2014-12-12 11:48:42 -08:00
|
|
|
|
#!/usr/bin/env python
|
win: Dynamically disable WoW64 tests absent explicit 32-bit build output
Rather than having the 64-bit build assume that it lives in
out\{Debug,Release}_x64 and that it can find 32-bit build output in
out\{Debug,Release}, require the location of 32-bit build output to be
provided explicitly via the CRASHPAD_TEST_32_BIT_OUTPUT environment
variable. If this variable is not set, 64-bit tests that require 32-bit
test build output will dynamically disable themselves at runtime.
In order for this to work, a new DISABLED_TEST() macro is added to
support dynamically disabled tests. gtest does not have its own
first-class support for this
(https://groups.google.com/d/topic/googletestframework/Nwh3u7YFuN4,
https://github.com/google/googletest/issues/490) so this local solution
is used instead.
For tests via Crashpad’s own build\run_tests.py, which is how Crashpad’s
own buildbots and trybots invoke tests, CRASHPAD_TEST_32_BIT_OUTPUT is
set to a locaton compatible with the paths expected for the GYP-based
build. No test coverage is lost on Crashpad’s own buildbots and trybots.
For Crashpad tests in Chromium’s buildbots and trybots, this environment
variable will not be set, causing these tests to be dynamically
disabled.
Bug: crashpad:203, chromium:743139, chromium:777924
Change-Id: I3c0de2bf4f835e13ed5a4adda5760d6fed508126
Reviewed-on: https://chromium-review.googlesource.com/739795
Commit-Queue: Mark Mentovai <mark@chromium.org>
Reviewed-by: Scott Graham <scottmg@chromium.org>
2017-10-26 13:48:01 -04:00
|
|
|
|
# coding: utf-8
|
2014-12-12 11:48:42 -08:00
|
|
|
|
|
|
|
|
|
# Copyright 2014 The Crashpad Authors. All rights reserved.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
2017-11-29 13:26:55 -05:00
|
|
|
|
from __future__ import print_function
|
|
|
|
|
|
2018-01-12 14:38:09 -08:00
|
|
|
|
import argparse
|
2014-12-12 11:48:42 -08:00
|
|
|
|
import os
|
2017-12-05 10:52:44 -08:00
|
|
|
|
import pipes
|
2017-12-07 16:57:46 -05:00
|
|
|
|
import posixpath
|
|
|
|
|
import re
|
2014-12-12 11:48:42 -08:00
|
|
|
|
import subprocess
|
|
|
|
|
import sys
|
2020-01-31 15:29:32 -05:00
|
|
|
|
import tempfile
|
2017-12-05 10:52:44 -08:00
|
|
|
|
import uuid
|
|
|
|
|
|
|
|
|
|
CRASHPAD_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
|
|
|
|
os.pardir)
|
|
|
|
|
IS_WINDOWS_HOST = sys.platform.startswith('win')
|
|
|
|
|
|
|
|
|
|
|
2018-01-11 14:19:49 -08:00
|
|
|
|
def _FindGNFromBinaryDir(binary_dir):
|
|
|
|
|
"""Attempts to determine the path to a GN binary used to generate the build
|
|
|
|
|
files in the given binary_dir. This is necessary because `gn` might not be in
|
|
|
|
|
the path or might be in a non-standard location, particularly on build
|
|
|
|
|
machines."""
|
|
|
|
|
|
|
|
|
|
build_ninja = os.path.join(binary_dir, 'build.ninja')
|
|
|
|
|
if os.path.isfile(build_ninja):
|
|
|
|
|
with open(build_ninja, 'rb') as f:
|
|
|
|
|
# Look for the always-generated regeneration rule of the form:
|
|
|
|
|
#
|
|
|
|
|
# rule gn
|
|
|
|
|
# command = <gn binary> ... arguments ...
|
|
|
|
|
#
|
|
|
|
|
# to extract the gn binary's full path.
|
|
|
|
|
found_rule_gn = False
|
|
|
|
|
for line in f:
|
|
|
|
|
if line.strip() == 'rule gn':
|
|
|
|
|
found_rule_gn = True
|
|
|
|
|
continue
|
|
|
|
|
if found_rule_gn:
|
|
|
|
|
if len(line) == 0 or line[0] != ' ':
|
|
|
|
|
return None
|
|
|
|
|
if line.startswith(' command = '):
|
|
|
|
|
gn_command_line_parts = line.strip().split(' ')
|
|
|
|
|
if len(gn_command_line_parts) > 2:
|
2018-01-12 10:25:34 -08:00
|
|
|
|
return os.path.join(binary_dir, gn_command_line_parts[2])
|
2018-01-11 14:19:49 -08:00
|
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2017-12-07 16:57:46 -05:00
|
|
|
|
def _BinaryDirTargetOS(binary_dir):
|
|
|
|
|
"""Returns the apparent target OS of binary_dir, or None if none appear to be
|
|
|
|
|
explicitly specified."""
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
2018-01-11 14:19:49 -08:00
|
|
|
|
gn_path = _FindGNFromBinaryDir(binary_dir)
|
|
|
|
|
|
|
|
|
|
if gn_path:
|
|
|
|
|
# Look for a GN “target_os”.
|
2018-02-16 13:49:30 -08:00
|
|
|
|
popen = subprocess.Popen([gn_path, '--root=' + CRASHPAD_DIR,
|
|
|
|
|
'args', binary_dir,
|
|
|
|
|
'--list=target_os', '--short'],
|
|
|
|
|
shell=IS_WINDOWS_HOST,
|
|
|
|
|
stdout=subprocess.PIPE, stderr=open(os.devnull))
|
2018-01-11 14:19:49 -08:00
|
|
|
|
value = popen.communicate()[0]
|
|
|
|
|
if popen.returncode == 0:
|
|
|
|
|
match = re.match('target_os = "(.*)"$', value.decode('utf-8'))
|
|
|
|
|
if match:
|
|
|
|
|
return match.group(1)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
|
|
|
|
|
# For GYP with Ninja, look for the appearance of “linux-android” in the path
|
|
|
|
|
# to ar. This path is configured by gyp_crashpad_android.py.
|
2017-12-08 18:47:12 -05:00
|
|
|
|
build_ninja_path = os.path.join(binary_dir, 'build.ninja')
|
|
|
|
|
if os.path.exists(build_ninja_path):
|
|
|
|
|
with open(build_ninja_path) as build_ninja_file:
|
2017-12-07 16:57:46 -05:00
|
|
|
|
build_ninja_content = build_ninja_file.read()
|
2019-03-26 11:36:34 -07:00
|
|
|
|
match = re.search('-linux-android(eabi)?-ar$',
|
2017-12-07 16:57:46 -05:00
|
|
|
|
build_ninja_content,
|
|
|
|
|
re.MULTILINE)
|
|
|
|
|
if match:
|
|
|
|
|
return 'android'
|
|
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
2017-12-11 12:04:46 -05:00
|
|
|
|
def _EnableVTProcessingOnWindowsConsole():
|
|
|
|
|
"""Enables virtual terminal processing for ANSI/VT100-style escape sequences
|
|
|
|
|
on a Windows console attached to standard output. Returns True on success.
|
|
|
|
|
Returns False if standard output is not a console or if virtual terminal
|
|
|
|
|
processing is not supported. The feature was introduced in Windows 10.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import pywintypes
|
|
|
|
|
import win32console
|
|
|
|
|
import winerror
|
|
|
|
|
|
|
|
|
|
stdout_console = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
|
|
|
|
|
try:
|
|
|
|
|
console_mode = stdout_console.GetConsoleMode()
|
|
|
|
|
except pywintypes.error as e:
|
|
|
|
|
if e.winerror == winerror.ERROR_INVALID_HANDLE:
|
|
|
|
|
# Standard output is not a console.
|
|
|
|
|
return False
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# From <wincon.h>. This would be
|
|
|
|
|
# win32console.ENABLE_VIRTUAL_TERMINAL_PROCESSING, but it’s too new to be
|
|
|
|
|
# defined there.
|
|
|
|
|
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
|
|
|
|
|
|
|
|
|
|
stdout_console.SetConsoleMode(console_mode |
|
|
|
|
|
ENABLE_VIRTUAL_TERMINAL_PROCESSING)
|
|
|
|
|
except pywintypes.error as e:
|
|
|
|
|
if e.winerror == winerror.ERROR_INVALID_PARAMETER:
|
|
|
|
|
# ANSI/VT100-style escape sequence processing isn’t supported before
|
|
|
|
|
# Windows 10.
|
|
|
|
|
return False
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2018-01-12 14:38:09 -08:00
|
|
|
|
def _RunOnAndroidTarget(binary_dir, test, android_device, extra_command_line):
|
2017-12-07 16:57:46 -05:00
|
|
|
|
local_test_path = os.path.join(binary_dir, test)
|
|
|
|
|
MAYBE_UNSUPPORTED_TESTS = (
|
|
|
|
|
'crashpad_client_test',
|
|
|
|
|
'crashpad_handler_test',
|
|
|
|
|
'crashpad_minidump_test',
|
|
|
|
|
'crashpad_snapshot_test',
|
|
|
|
|
)
|
|
|
|
|
if not os.path.exists(local_test_path) and test in MAYBE_UNSUPPORTED_TESTS:
|
2017-12-08 18:47:12 -05:00
|
|
|
|
print('This test is not present and may not be supported, skipping')
|
2017-12-07 16:57:46 -05:00
|
|
|
|
return
|
|
|
|
|
|
2017-12-08 18:47:12 -05:00
|
|
|
|
def _adb(*args):
|
|
|
|
|
# Flush all of this script’s own buffered stdout output before running adb,
|
|
|
|
|
# which will likely produce its own output on stdout.
|
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
|
|
adb_command = ['adb', '-s', android_device]
|
|
|
|
|
adb_command.extend(args)
|
|
|
|
|
subprocess.check_call(adb_command, shell=IS_WINDOWS_HOST)
|
|
|
|
|
|
|
|
|
|
def _adb_push(sources, destination):
|
|
|
|
|
args = list(sources)
|
|
|
|
|
args.append(destination)
|
|
|
|
|
_adb('push', *args)
|
|
|
|
|
|
|
|
|
|
def _adb_shell(command_args, env={}):
|
|
|
|
|
# Build a command to execute via “sh -c” instead of invoking it directly.
|
|
|
|
|
# Here’s why:
|
|
|
|
|
#
|
|
|
|
|
# /system/bin/env isn’t normally present prior to Android 6.0 (M), where
|
|
|
|
|
# toybox was introduced (Android platform/manifest 9a2c01e8450b). Instead,
|
|
|
|
|
# set environment variables by using the shell’s internal “export” command.
|
|
|
|
|
#
|
|
|
|
|
# adbd prior to Android 7.0 (N), and the adb client prior to SDK
|
|
|
|
|
# platform-tools version 24, don’t know how to communicate a shell command’s
|
|
|
|
|
# exit status. This was added in Android platform/system/core 606835ae5c4b).
|
|
|
|
|
# With older adb servers and clients, adb will “exit 0” indicating success
|
|
|
|
|
# even if the command failed on the device. This makes
|
|
|
|
|
# subprocess.check_call() semantics difficult to implement directly. As a
|
|
|
|
|
# workaround, have the device send the command’s exit status over stdout and
|
|
|
|
|
# pick it back up in this function.
|
|
|
|
|
#
|
|
|
|
|
# Both workarounds are implemented by giving the device a simple script,
|
|
|
|
|
# which adbd will run as an “sh -c” argument.
|
|
|
|
|
adb_command = ['adb', '-s', android_device, 'shell']
|
|
|
|
|
script_commands = []
|
|
|
|
|
for k, v in env.items():
|
|
|
|
|
script_commands.append('export %s=%s' % (pipes.quote(k), pipes.quote(v)))
|
|
|
|
|
script_commands.extend([
|
|
|
|
|
' '.join(pipes.quote(x) for x in command_args),
|
|
|
|
|
'status=${?}',
|
|
|
|
|
'echo "status=${status}"',
|
|
|
|
|
'exit ${status}'])
|
|
|
|
|
adb_command.append('; '.join(script_commands))
|
|
|
|
|
child = subprocess.Popen(adb_command,
|
|
|
|
|
shell=IS_WINDOWS_HOST,
|
|
|
|
|
stdin=open(os.devnull),
|
|
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
|
|
|
|
|
|
FINAL_LINE_RE = re.compile('status=(\d+)$')
|
|
|
|
|
final_line = None
|
|
|
|
|
while True:
|
|
|
|
|
# Use readline so that the test output appears “live” when running.
|
|
|
|
|
data = child.stdout.readline().decode('utf-8')
|
|
|
|
|
if data == '':
|
|
|
|
|
break
|
|
|
|
|
if final_line is not None:
|
|
|
|
|
# It wasn’t really the final line.
|
|
|
|
|
print(final_line, end='')
|
|
|
|
|
final_line = None
|
|
|
|
|
if FINAL_LINE_RE.match(data.rstrip()):
|
|
|
|
|
final_line = data
|
|
|
|
|
else:
|
|
|
|
|
print(data, end='')
|
|
|
|
|
|
|
|
|
|
if final_line is None:
|
|
|
|
|
# Maybe there was some stderr output after the end of stdout. Old versions
|
|
|
|
|
# of adb, prior to when the exit status could be communicated, smush the
|
|
|
|
|
# two together.
|
|
|
|
|
raise subprocess.CalledProcessError(-1, adb_command)
|
|
|
|
|
status = int(FINAL_LINE_RE.match(final_line.rstrip()).group(1))
|
|
|
|
|
if status != 0:
|
|
|
|
|
raise subprocess.CalledProcessError(status, adb_command)
|
|
|
|
|
|
|
|
|
|
child.wait()
|
|
|
|
|
if child.returncode != 0:
|
|
|
|
|
raise subprocess.CalledProcessError(subprocess.returncode, adb_command)
|
|
|
|
|
|
|
|
|
|
# /system/bin/mktemp isn’t normally present prior to Android 6.0 (M), where
|
|
|
|
|
# toybox was introduced (Android platform/manifest 9a2c01e8450b). Fake it with
|
|
|
|
|
# a host-generated name. This won’t retry if the name is in use, but with 122
|
|
|
|
|
# bits of randomness, it should be OK. This uses “mkdir” instead of “mkdir -p”
|
|
|
|
|
# because the latter will not indicate failure if the directory already
|
|
|
|
|
# exists.
|
|
|
|
|
device_temp_dir = '/data/local/tmp/%s.%s' % (test, uuid.uuid4().hex)
|
|
|
|
|
_adb_shell(['mkdir', device_temp_dir])
|
|
|
|
|
|
2017-12-07 16:57:46 -05:00
|
|
|
|
try:
|
|
|
|
|
# Specify test dependencies that must be pushed to the device. This could be
|
|
|
|
|
# determined automatically in a GN build, following the example used for
|
|
|
|
|
# Fuchsia. Since nothing like that exists for GYP, hard-code it for
|
|
|
|
|
# supported tests.
|
2019-03-26 11:36:34 -07:00
|
|
|
|
test_build_artifacts = [test, 'crashpad_handler']
|
2017-12-07 16:57:46 -05:00
|
|
|
|
test_data = ['test/test_paths_test_data_root.txt']
|
|
|
|
|
|
|
|
|
|
if test == 'crashpad_test_test':
|
|
|
|
|
test_build_artifacts.append(
|
|
|
|
|
'crashpad_test_test_multiprocess_exec_test_child')
|
|
|
|
|
elif test == 'crashpad_util_test':
|
|
|
|
|
test_data.append('util/net/testdata/')
|
|
|
|
|
|
|
|
|
|
# Establish the directory structure on the device.
|
|
|
|
|
device_out_dir = posixpath.join(device_temp_dir, 'out')
|
|
|
|
|
device_mkdirs = [device_out_dir]
|
|
|
|
|
for source_path in test_data:
|
|
|
|
|
# A trailing slash could reasonably mean to copy an entire directory, but
|
|
|
|
|
# will interfere with what’s needed from the path split. All parent
|
|
|
|
|
# directories of any source_path need to be be represented in
|
|
|
|
|
# device_mkdirs, but it’s important that no source_path itself wind up in
|
|
|
|
|
# device_mkdirs, even if source_path names a directory, because that would
|
|
|
|
|
# cause the “adb push” of the directory below to behave incorrectly.
|
|
|
|
|
if source_path.endswith(posixpath.sep):
|
|
|
|
|
source_path = source_path[:-1]
|
|
|
|
|
|
|
|
|
|
device_source_path = posixpath.join(device_temp_dir, source_path)
|
|
|
|
|
device_mkdir = posixpath.split(device_source_path)[0]
|
|
|
|
|
if device_mkdir not in device_mkdirs:
|
|
|
|
|
device_mkdirs.append(device_mkdir)
|
2017-12-08 18:47:12 -05:00
|
|
|
|
adb_mkdir_command = ['mkdir', '-p']
|
2017-12-07 16:57:46 -05:00
|
|
|
|
adb_mkdir_command.extend(device_mkdirs)
|
2017-12-08 18:47:12 -05:00
|
|
|
|
_adb_shell(adb_mkdir_command)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
|
|
|
|
|
# Push the test binary and any other build output to the device.
|
2017-12-08 18:47:12 -05:00
|
|
|
|
local_test_build_artifacts = []
|
2017-12-07 16:57:46 -05:00
|
|
|
|
for artifact in test_build_artifacts:
|
2017-12-08 18:47:12 -05:00
|
|
|
|
local_test_build_artifacts.append(os.path.join(binary_dir, artifact))
|
|
|
|
|
_adb_push(local_test_build_artifacts, device_out_dir)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
|
|
|
|
|
# Push test data to the device.
|
|
|
|
|
for source_path in test_data:
|
2017-12-08 18:47:12 -05:00
|
|
|
|
_adb_push([os.path.join(CRASHPAD_DIR, source_path)],
|
|
|
|
|
posixpath.join(device_temp_dir, source_path))
|
|
|
|
|
|
|
|
|
|
# Run the test on the device. Pass the test data root in the environment.
|
|
|
|
|
#
|
|
|
|
|
# Because the test will not run with its standard output attached to a
|
|
|
|
|
# pseudo-terminal device, gtest will not normally enable colored output, so
|
|
|
|
|
# mimic gtest’s own logic for deciding whether to enable color by checking
|
|
|
|
|
# this script’s own standard output connection. The whitelist of TERM values
|
|
|
|
|
# comes from gtest googletest/src/gtest.cc
|
|
|
|
|
# testing::internal::ShouldUseColor().
|
|
|
|
|
env = {'CRASHPAD_TEST_DATA_ROOT': device_temp_dir}
|
|
|
|
|
gtest_color = os.environ.get('GTEST_COLOR')
|
|
|
|
|
if gtest_color in ('auto', None):
|
|
|
|
|
if (sys.stdout.isatty() and
|
2017-12-11 12:04:46 -05:00
|
|
|
|
(os.environ.get('TERM') in
|
|
|
|
|
('xterm', 'xterm-color', 'xterm-256color', 'screen',
|
|
|
|
|
'screen-256color', 'tmux', 'tmux-256color', 'rxvt-unicode',
|
|
|
|
|
'rxvt-unicode-256color', 'linux', 'cygwin') or
|
|
|
|
|
(IS_WINDOWS_HOST and _EnableVTProcessingOnWindowsConsole()))):
|
2017-12-08 18:47:12 -05:00
|
|
|
|
gtest_color = 'yes'
|
|
|
|
|
else:
|
|
|
|
|
gtest_color = 'no'
|
|
|
|
|
env['GTEST_COLOR'] = gtest_color
|
2018-01-12 14:38:09 -08:00
|
|
|
|
_adb_shell([posixpath.join(device_out_dir, test)] + extra_command_line, env)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
finally:
|
2017-12-08 18:47:12 -05:00
|
|
|
|
_adb_shell(['rm', '-rf', device_temp_dir])
|
2017-12-07 16:57:46 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _GetFuchsiaSDKRoot():
|
|
|
|
|
arch = 'mac-amd64' if sys.platform == 'darwin' else 'linux-amd64'
|
|
|
|
|
return os.path.join(CRASHPAD_DIR, 'third_party', 'fuchsia', 'sdk', arch)
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _GenerateFuchsiaRuntimeDepsFiles(binary_dir, tests):
|
|
|
|
|
"""Ensures a <binary_dir>/<test>.runtime_deps file exists for each test."""
|
2018-02-16 13:49:30 -08:00
|
|
|
|
targets_file = os.path.join(binary_dir, 'targets.txt')
|
2017-12-05 10:52:44 -08:00
|
|
|
|
with open(targets_file, 'wb') as f:
|
|
|
|
|
f.write('//:' + '\n//:'.join(tests) + '\n')
|
2018-01-11 14:19:49 -08:00
|
|
|
|
gn_path = _FindGNFromBinaryDir(binary_dir)
|
2017-12-05 10:52:44 -08:00
|
|
|
|
subprocess.check_call(
|
2018-02-16 13:49:30 -08:00
|
|
|
|
[gn_path, '--root=' + CRASHPAD_DIR, 'gen', binary_dir,
|
|
|
|
|
'--runtime-deps-list-file=' + targets_file])
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
fuchsia: Fix ninja auto-regen after run
After the avoidance of abspath(), automatic regeneration of ninja files
was broken following a test run. The problem is that the
--runtime-deps-list-file argument gets saved into the regeneration rule,
but it's relative to the cwd. The cwd is CRASHPAD_DIR on the first run,
but the binary_dir on regenerations, so either way it doesn't work (this
should probably fixed in either GN or ninja).
We could abspath the path the runtime deps targets file to avoid this.
However, it's a bit cluttery to have that --runtime-deps-list-file in
the regeneration rule anyway, when really it's only required to extract
runtime deps at test-running time. (Also, if you happened to delete only
targets.txt from the out dir, the regeneration would mysteriously fail.)
So since generation only takes tens of milliseconds, the best thing to
do is just remove it from the regeneration rule by re-running gn gen
without the flag after we've extracted the .runtime_deps to prepare for
the run.
Bug: crashpad:196, chromium:814816
Change-Id: I009851d8b821fef5c953d463ba9c4880e5cc082a
Reviewed-on: https://chromium-review.googlesource.com/929887
Commit-Queue: Scott Graham <scottmg@chromium.org>
Reviewed-by: Mark Mentovai <mark@chromium.org>
2018-02-22 10:01:18 -08:00
|
|
|
|
# Run again so that --runtime-deps-list-file isn't in the regen rule. See
|
|
|
|
|
# https://crbug.com/814816.
|
|
|
|
|
subprocess.check_call(
|
|
|
|
|
[gn_path, '--root=' + CRASHPAD_DIR, 'gen', binary_dir])
|
|
|
|
|
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
|
|
|
|
def _HandleOutputFromFuchsiaLogListener(process, done_message):
|
|
|
|
|
"""Pass through the output from |process| (which should be an instance of
|
|
|
|
|
Fuchsia's loglistener) until a special termination |done_message| is
|
|
|
|
|
encountered.
|
|
|
|
|
|
|
|
|
|
Also attempts to determine if any tests failed by inspecting the log output,
|
|
|
|
|
and returns False if there were failures.
|
|
|
|
|
"""
|
|
|
|
|
success = True
|
|
|
|
|
while True:
|
|
|
|
|
line = process.stdout.readline().rstrip()
|
|
|
|
|
if 'FAILED TEST' in line:
|
|
|
|
|
success = False
|
|
|
|
|
elif done_message in line and 'echo ' not in line:
|
|
|
|
|
break
|
|
|
|
|
print(line)
|
|
|
|
|
return success
|
|
|
|
|
|
|
|
|
|
|
2018-01-12 14:38:09 -08:00
|
|
|
|
def _RunOnFuchsiaTarget(binary_dir, test, device_name, extra_command_line):
|
2017-12-05 10:52:44 -08:00
|
|
|
|
"""Runs the given Fuchsia |test| executable on the given |device_name|. The
|
|
|
|
|
device must already be booted.
|
|
|
|
|
|
|
|
|
|
Copies the executable and its runtime dependencies as specified by GN to the
|
|
|
|
|
target in /tmp using `netcp`, runs the binary on the target, and logs output
|
|
|
|
|
back to stdout on this machine via `loglistener`.
|
|
|
|
|
"""
|
|
|
|
|
sdk_root = _GetFuchsiaSDKRoot()
|
|
|
|
|
|
|
|
|
|
# Run loglistener and filter the output to know when the test is done.
|
|
|
|
|
loglistener_process = subprocess.Popen(
|
|
|
|
|
[os.path.join(sdk_root, 'tools', 'loglistener'), device_name],
|
|
|
|
|
stdout=subprocess.PIPE, stdin=open(os.devnull), stderr=open(os.devnull))
|
|
|
|
|
|
|
|
|
|
runtime_deps_file = os.path.join(binary_dir, test + '.runtime_deps')
|
|
|
|
|
with open(runtime_deps_file, 'rb') as f:
|
|
|
|
|
runtime_deps = f.read().splitlines()
|
|
|
|
|
|
|
|
|
|
def netruncmd(*args):
|
|
|
|
|
"""Runs a list of commands on the target device. Each command is escaped
|
|
|
|
|
by using pipes.quote(), and then each command is chained by shell ';'.
|
|
|
|
|
"""
|
2017-12-05 11:55:44 -08:00
|
|
|
|
netruncmd_path = os.path.join(sdk_root, 'tools', 'netruncmd')
|
2017-12-05 10:52:44 -08:00
|
|
|
|
final_args = ' ; '.join(' '.join(pipes.quote(x) for x in command)
|
|
|
|
|
for command in args)
|
2017-12-05 11:55:44 -08:00
|
|
|
|
subprocess.check_call([netruncmd_path, device_name, final_args])
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
unique_id = uuid.uuid4().hex
|
2018-01-23 10:33:55 -08:00
|
|
|
|
test_root = '/tmp/%s_%s' % (test, unique_id)
|
|
|
|
|
tmp_root = test_root + '/tmp'
|
|
|
|
|
staging_root = test_root + '/pkg'
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
|
|
|
|
# Make a staging directory tree on the target.
|
2018-02-16 13:49:30 -08:00
|
|
|
|
directories_to_create = [tmp_root,
|
|
|
|
|
'%s/bin' % staging_root,
|
2017-12-05 10:52:44 -08:00
|
|
|
|
'%s/assets' % staging_root]
|
|
|
|
|
netruncmd(['mkdir', '-p'] + directories_to_create)
|
|
|
|
|
|
2017-12-05 11:55:44 -08:00
|
|
|
|
def netcp(local_path):
|
|
|
|
|
"""Uses `netcp` to copy a file or directory to the device. Files located
|
Read either DT_HASH or DT_GNU_HASH to determine the size of DT_SYMTAB
Without the section headers for the symbol table, there's no direct way
to calculate the number of entries in the table.
DT_HASH and DT_GNU_HASH are auxiliary tables that are designed to make
symbol lookup faster. DT_HASH is the original and is theoretically
mandatory. DT_GNU_HASH is the new-and-improved, but is more complex.
In practice, however, an Android build (at least vs. API 16) has only
DT_HASH, and not DT_GNU_HASH, and a Fuchsia build has only DT_GNU_HASH
but not DT_HASH. So, both are tried.
This change does not actually use the data in these tables to improve
the speed of symbol lookup, but instead only uses them to correctly
terminate the linear search.
DT_HASH contains the total number of symbols in the symbol table fairly
directly because there is an entry for each symbol table entry in the
hash table, so the number is the same.
DT_GNU_HASH regrettably does not. Instead, it's necessary to walk the
buckets and chain structure to find the largest entry.
DT_GNU_HASH doesn't appear in any "real" documentation that I'm aware
of, other than the binutils code (at least as far as I know). Some
more-and-less-useful references:
- https://flapenguin.me/2017/04/24/elf-lookup-dt-hash/
- https://flapenguin.me/2017/05/10/elf-lookup-dt-gnu-hash/
- http://deroko.phearless.org/dt_gnu_hash.txt
- https://sourceware.org/ml/binutils/2006-10/msg00377.html
Change-Id: I7cfc4372f29efc37446f0931d22a1f790e44076f
Bug: crashpad:213, crashpad:196
Reviewed-on: https://chromium-review.googlesource.com/876879
Commit-Queue: Scott Graham <scottmg@chromium.org>
Reviewed-by: Mark Mentovai <mark@chromium.org>
Reviewed-by: Joshua Peraza <jperaza@chromium.org>
2018-01-30 14:30:50 -08:00
|
|
|
|
inside the build dir are stored to /pkg/bin, otherwise to /pkg/assets.
|
|
|
|
|
.so files are stored somewhere completely different, into /boot/lib (!).
|
|
|
|
|
This is because the loader service does not yet correctly handle the
|
|
|
|
|
namespace in which the caller is being run, and so can only load .so files
|
|
|
|
|
from a couple hardcoded locations, the only writable one of which is
|
|
|
|
|
/boot/lib, so we copy all .so files there. This bug is filed upstream as
|
|
|
|
|
ZX-1619.
|
2017-12-05 11:55:44 -08:00
|
|
|
|
"""
|
|
|
|
|
in_binary_dir = local_path.startswith(binary_dir + '/')
|
|
|
|
|
if in_binary_dir:
|
Read either DT_HASH or DT_GNU_HASH to determine the size of DT_SYMTAB
Without the section headers for the symbol table, there's no direct way
to calculate the number of entries in the table.
DT_HASH and DT_GNU_HASH are auxiliary tables that are designed to make
symbol lookup faster. DT_HASH is the original and is theoretically
mandatory. DT_GNU_HASH is the new-and-improved, but is more complex.
In practice, however, an Android build (at least vs. API 16) has only
DT_HASH, and not DT_GNU_HASH, and a Fuchsia build has only DT_GNU_HASH
but not DT_HASH. So, both are tried.
This change does not actually use the data in these tables to improve
the speed of symbol lookup, but instead only uses them to correctly
terminate the linear search.
DT_HASH contains the total number of symbols in the symbol table fairly
directly because there is an entry for each symbol table entry in the
hash table, so the number is the same.
DT_GNU_HASH regrettably does not. Instead, it's necessary to walk the
buckets and chain structure to find the largest entry.
DT_GNU_HASH doesn't appear in any "real" documentation that I'm aware
of, other than the binutils code (at least as far as I know). Some
more-and-less-useful references:
- https://flapenguin.me/2017/04/24/elf-lookup-dt-hash/
- https://flapenguin.me/2017/05/10/elf-lookup-dt-gnu-hash/
- http://deroko.phearless.org/dt_gnu_hash.txt
- https://sourceware.org/ml/binutils/2006-10/msg00377.html
Change-Id: I7cfc4372f29efc37446f0931d22a1f790e44076f
Bug: crashpad:213, crashpad:196
Reviewed-on: https://chromium-review.googlesource.com/876879
Commit-Queue: Scott Graham <scottmg@chromium.org>
Reviewed-by: Mark Mentovai <mark@chromium.org>
Reviewed-by: Joshua Peraza <jperaza@chromium.org>
2018-01-30 14:30:50 -08:00
|
|
|
|
if local_path.endswith('.so'):
|
|
|
|
|
target_path = os.path.join(
|
|
|
|
|
'/boot/lib', local_path[len(binary_dir)+1:])
|
|
|
|
|
else:
|
|
|
|
|
target_path = os.path.join(
|
|
|
|
|
staging_root, 'bin', local_path[len(binary_dir)+1:])
|
2017-12-05 11:55:44 -08:00
|
|
|
|
else:
|
2018-02-16 13:49:30 -08:00
|
|
|
|
relative_path = os.path.relpath(local_path, CRASHPAD_DIR)
|
|
|
|
|
target_path = os.path.join(staging_root, 'assets', relative_path)
|
2017-12-05 11:55:44 -08:00
|
|
|
|
netcp_path = os.path.join(sdk_root, 'tools', 'netcp')
|
|
|
|
|
subprocess.check_call([netcp_path, local_path,
|
|
|
|
|
device_name + ':' + target_path],
|
|
|
|
|
stderr=open(os.devnull))
|
|
|
|
|
|
2017-12-05 10:52:44 -08:00
|
|
|
|
# Copy runtime deps into the staging tree.
|
|
|
|
|
for dep in runtime_deps:
|
2017-12-05 11:55:44 -08:00
|
|
|
|
local_path = os.path.normpath(os.path.join(binary_dir, dep))
|
|
|
|
|
if os.path.isdir(local_path):
|
|
|
|
|
for root, dirs, files in os.walk(local_path):
|
|
|
|
|
for f in files:
|
|
|
|
|
netcp(os.path.join(root, f))
|
|
|
|
|
else:
|
|
|
|
|
netcp(local_path)
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
|
|
|
|
done_message = 'TERMINATED: ' + unique_id
|
|
|
|
|
namespace_command = [
|
2018-04-26 17:21:45 -07:00
|
|
|
|
'namespace', '/pkg=' + staging_root, '/tmp=' + tmp_root, '/svc=/svc',
|
2018-01-25 14:47:28 -08:00
|
|
|
|
'--replace-child-argv0=/pkg/bin/' + test, '--',
|
2018-01-12 14:38:09 -08:00
|
|
|
|
staging_root + '/bin/' + test] + extra_command_line
|
2017-12-05 10:52:44 -08:00
|
|
|
|
netruncmd(namespace_command, ['echo', done_message])
|
|
|
|
|
|
|
|
|
|
success = _HandleOutputFromFuchsiaLogListener(
|
|
|
|
|
loglistener_process, done_message)
|
|
|
|
|
if not success:
|
|
|
|
|
raise subprocess.CalledProcessError(1, test)
|
|
|
|
|
finally:
|
2018-01-23 10:33:55 -08:00
|
|
|
|
netruncmd(['rm', '-rf', test_root])
|
2014-12-12 11:48:42 -08:00
|
|
|
|
|
|
|
|
|
|
2020-01-31 15:29:32 -05:00
|
|
|
|
def _RunOnIOSTarget(binary_dir, test, is_xcuitest=False):
|
2020-03-03 14:24:38 -05:00
|
|
|
|
"""Runs the given iOS |test| app on iPhone 8 with the default OS version."""
|
2020-01-31 15:29:32 -05:00
|
|
|
|
|
|
|
|
|
def xctest(binary_dir, test):
|
|
|
|
|
"""Returns a dict containing the xctestrun data needed to run an
|
|
|
|
|
XCTest-based test app."""
|
|
|
|
|
test_path = os.path.join(CRASHPAD_DIR, binary_dir)
|
|
|
|
|
module_data = {
|
|
|
|
|
'TestBundlePath': os.path.join(test_path, test + '_module.xctest'),
|
|
|
|
|
'TestHostPath': os.path.join(test_path, test + '.app'),
|
|
|
|
|
'TestingEnvironmentVariables': {
|
|
|
|
|
'DYLD_FRAMEWORK_PATH': '__TESTROOT__/Debug-iphonesimulator:',
|
|
|
|
|
'DYLD_INSERT_LIBRARIES': (
|
|
|
|
|
'__PLATFORMS__/iPhoneSimulator.platform/Developer/'
|
|
|
|
|
'usr/lib/libXCTestBundleInject.dylib'),
|
|
|
|
|
'DYLD_LIBRARY_PATH': '__TESTROOT__/Debug-iphonesimulator',
|
|
|
|
|
'IDEiPhoneInternalTestBundleName': test + '.app',
|
|
|
|
|
'XCInjectBundleInto': '__TESTHOST__/' + test,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return { test: module_data }
|
|
|
|
|
|
|
|
|
|
def xcuitest(binary_dir, test):
|
|
|
|
|
"""Returns a dict containing the xctestrun data needed to run an
|
|
|
|
|
XCUITest-based test app."""
|
|
|
|
|
|
|
|
|
|
test_path = os.path.join(CRASHPAD_DIR, binary_dir)
|
|
|
|
|
runner_path = os.path.join(test_path, test + '_module-Runner.app')
|
|
|
|
|
bundle_path = os.path.join(runner_path, 'PlugIns', test + '_module.xctest')
|
|
|
|
|
target_app_path = os.path.join(test_path, test + '.app')
|
|
|
|
|
module_data = {
|
|
|
|
|
'IsUITestBundle': True,
|
|
|
|
|
'IsXCTRunnerHostedTestBundle': True,
|
|
|
|
|
'TestBundlePath': bundle_path,
|
|
|
|
|
'TestHostPath': runner_path,
|
|
|
|
|
'UITargetAppPath': target_app_path,
|
|
|
|
|
'DependentProductPaths': [ bundle_path, runner_path, target_app_path ],
|
|
|
|
|
'TestingEnvironmentVariables': {
|
|
|
|
|
'DYLD_FRAMEWORK_PATH': '__TESTROOT__/Debug-iphonesimulator:',
|
|
|
|
|
'DYLD_INSERT_LIBRARIES': (
|
|
|
|
|
'__PLATFORMS__/iPhoneSimulator.platform/Developer/'
|
|
|
|
|
'usr/lib/libXCTestBundleInject.dylib'),
|
|
|
|
|
'DYLD_LIBRARY_PATH': '__TESTROOT__/Debug-iphonesimulator',
|
|
|
|
|
'XCInjectBundleInto': '__TESTHOST__/' + test + '_module-Runner',
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return { test: module_data }
|
|
|
|
|
|
|
|
|
|
with tempfile.NamedTemporaryFile() as f:
|
|
|
|
|
import plistlib
|
|
|
|
|
|
|
|
|
|
xctestrun_path = f.name
|
|
|
|
|
print(xctestrun_path)
|
|
|
|
|
if is_xcuitest:
|
|
|
|
|
plistlib.writePlist(xcuitest(binary_dir, test), xctestrun_path)
|
|
|
|
|
else:
|
|
|
|
|
plistlib.writePlist(xctest(binary_dir, test), xctestrun_path)
|
|
|
|
|
|
|
|
|
|
subprocess.check_call(['xcodebuild', 'test-without-building',
|
|
|
|
|
'-xctestrun', xctestrun_path, '-destination',
|
2020-03-03 14:24:38 -05:00
|
|
|
|
'platform=iOS Simulator,name=iPhone 8'])
|
2020-01-31 15:29:32 -05:00
|
|
|
|
|
2014-12-12 11:48:42 -08:00
|
|
|
|
# This script is primarily used from the waterfall so that the list of tests
|
|
|
|
|
# that are run is maintained in-tree, rather than in a separate infrastructure
|
|
|
|
|
# location in the recipe.
|
|
|
|
|
def main(args):
|
2018-01-12 14:38:09 -08:00
|
|
|
|
parser = argparse.ArgumentParser(description='Run Crashpad unittests.')
|
|
|
|
|
parser.add_argument('binary_dir', help='Root of build dir')
|
|
|
|
|
parser.add_argument('test', nargs='*', help='Specific test(s) to run.')
|
|
|
|
|
parser.add_argument('--gtest_filter',
|
|
|
|
|
help='GTest filter applied to GTest binary runs.')
|
|
|
|
|
args = parser.parse_args()
|
2015-03-09 16:14:47 -04:00
|
|
|
|
|
win: Dynamically disable WoW64 tests absent explicit 32-bit build output
Rather than having the 64-bit build assume that it lives in
out\{Debug,Release}_x64 and that it can find 32-bit build output in
out\{Debug,Release}, require the location of 32-bit build output to be
provided explicitly via the CRASHPAD_TEST_32_BIT_OUTPUT environment
variable. If this variable is not set, 64-bit tests that require 32-bit
test build output will dynamically disable themselves at runtime.
In order for this to work, a new DISABLED_TEST() macro is added to
support dynamically disabled tests. gtest does not have its own
first-class support for this
(https://groups.google.com/d/topic/googletestframework/Nwh3u7YFuN4,
https://github.com/google/googletest/issues/490) so this local solution
is used instead.
For tests via Crashpad’s own build\run_tests.py, which is how Crashpad’s
own buildbots and trybots invoke tests, CRASHPAD_TEST_32_BIT_OUTPUT is
set to a locaton compatible with the paths expected for the GYP-based
build. No test coverage is lost on Crashpad’s own buildbots and trybots.
For Crashpad tests in Chromium’s buildbots and trybots, this environment
variable will not be set, causing these tests to be dynamically
disabled.
Bug: crashpad:203, chromium:743139, chromium:777924
Change-Id: I3c0de2bf4f835e13ed5a4adda5760d6fed508126
Reviewed-on: https://chromium-review.googlesource.com/739795
Commit-Queue: Mark Mentovai <mark@chromium.org>
Reviewed-by: Scott Graham <scottmg@chromium.org>
2017-10-26 13:48:01 -04:00
|
|
|
|
# Tell 64-bit Windows tests where to find 32-bit test executables, for
|
|
|
|
|
# cross-bitted testing. This relies on the fact that the GYP build by default
|
|
|
|
|
# uses {Debug,Release} for the 32-bit build and {Debug,Release}_x64 for the
|
|
|
|
|
# 64-bit build. This is not a universally valid assumption, and if it’s not
|
|
|
|
|
# met, 64-bit tests that require 32-bit build output will disable themselves
|
|
|
|
|
# dynamically.
|
2018-01-12 14:38:09 -08:00
|
|
|
|
if (sys.platform == 'win32' and args.binary_dir.endswith('_x64') and
|
win: Dynamically disable WoW64 tests absent explicit 32-bit build output
Rather than having the 64-bit build assume that it lives in
out\{Debug,Release}_x64 and that it can find 32-bit build output in
out\{Debug,Release}, require the location of 32-bit build output to be
provided explicitly via the CRASHPAD_TEST_32_BIT_OUTPUT environment
variable. If this variable is not set, 64-bit tests that require 32-bit
test build output will dynamically disable themselves at runtime.
In order for this to work, a new DISABLED_TEST() macro is added to
support dynamically disabled tests. gtest does not have its own
first-class support for this
(https://groups.google.com/d/topic/googletestframework/Nwh3u7YFuN4,
https://github.com/google/googletest/issues/490) so this local solution
is used instead.
For tests via Crashpad’s own build\run_tests.py, which is how Crashpad’s
own buildbots and trybots invoke tests, CRASHPAD_TEST_32_BIT_OUTPUT is
set to a locaton compatible with the paths expected for the GYP-based
build. No test coverage is lost on Crashpad’s own buildbots and trybots.
For Crashpad tests in Chromium’s buildbots and trybots, this environment
variable will not be set, causing these tests to be dynamically
disabled.
Bug: crashpad:203, chromium:743139, chromium:777924
Change-Id: I3c0de2bf4f835e13ed5a4adda5760d6fed508126
Reviewed-on: https://chromium-review.googlesource.com/739795
Commit-Queue: Mark Mentovai <mark@chromium.org>
Reviewed-by: Scott Graham <scottmg@chromium.org>
2017-10-26 13:48:01 -04:00
|
|
|
|
'CRASHPAD_TEST_32_BIT_OUTPUT' not in os.environ):
|
2018-01-12 14:38:09 -08:00
|
|
|
|
binary_dir_32 = args.binary_dir[:-4]
|
win: Dynamically disable WoW64 tests absent explicit 32-bit build output
Rather than having the 64-bit build assume that it lives in
out\{Debug,Release}_x64 and that it can find 32-bit build output in
out\{Debug,Release}, require the location of 32-bit build output to be
provided explicitly via the CRASHPAD_TEST_32_BIT_OUTPUT environment
variable. If this variable is not set, 64-bit tests that require 32-bit
test build output will dynamically disable themselves at runtime.
In order for this to work, a new DISABLED_TEST() macro is added to
support dynamically disabled tests. gtest does not have its own
first-class support for this
(https://groups.google.com/d/topic/googletestframework/Nwh3u7YFuN4,
https://github.com/google/googletest/issues/490) so this local solution
is used instead.
For tests via Crashpad’s own build\run_tests.py, which is how Crashpad’s
own buildbots and trybots invoke tests, CRASHPAD_TEST_32_BIT_OUTPUT is
set to a locaton compatible with the paths expected for the GYP-based
build. No test coverage is lost on Crashpad’s own buildbots and trybots.
For Crashpad tests in Chromium’s buildbots and trybots, this environment
variable will not be set, causing these tests to be dynamically
disabled.
Bug: crashpad:203, chromium:743139, chromium:777924
Change-Id: I3c0de2bf4f835e13ed5a4adda5760d6fed508126
Reviewed-on: https://chromium-review.googlesource.com/739795
Commit-Queue: Mark Mentovai <mark@chromium.org>
Reviewed-by: Scott Graham <scottmg@chromium.org>
2017-10-26 13:48:01 -04:00
|
|
|
|
if os.path.isdir(binary_dir_32):
|
|
|
|
|
os.environ['CRASHPAD_TEST_32_BIT_OUTPUT'] = binary_dir_32
|
|
|
|
|
|
2018-01-12 14:38:09 -08:00
|
|
|
|
target_os = _BinaryDirTargetOS(args.binary_dir)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
is_android = target_os == 'android'
|
|
|
|
|
is_fuchsia = target_os == 'fuchsia'
|
2020-01-31 15:29:32 -05:00
|
|
|
|
is_ios = target_os == 'ios'
|
2017-12-05 10:52:44 -08:00
|
|
|
|
|
2014-12-12 11:48:42 -08:00
|
|
|
|
tests = [
|
2017-12-06 10:08:11 -08:00
|
|
|
|
'crashpad_client_test',
|
|
|
|
|
'crashpad_handler_test',
|
2017-12-05 10:52:44 -08:00
|
|
|
|
'crashpad_minidump_test',
|
2017-12-05 15:43:23 -08:00
|
|
|
|
'crashpad_snapshot_test',
|
2017-12-05 10:52:44 -08:00
|
|
|
|
'crashpad_test_test',
|
2017-12-05 15:16:08 -08:00
|
|
|
|
'crashpad_util_test',
|
2017-12-05 10:52:44 -08:00
|
|
|
|
]
|
|
|
|
|
|
2017-12-07 16:57:46 -05:00
|
|
|
|
if is_android:
|
|
|
|
|
android_device = os.environ.get('ANDROID_DEVICE')
|
|
|
|
|
if not android_device:
|
|
|
|
|
adb_devices = subprocess.check_output(['adb', 'devices'],
|
|
|
|
|
shell=IS_WINDOWS_HOST)
|
|
|
|
|
devices = []
|
|
|
|
|
for line in adb_devices.splitlines():
|
|
|
|
|
line = line.decode('utf-8')
|
|
|
|
|
if (line == 'List of devices attached' or
|
|
|
|
|
re.match('^\* daemon .+ \*$', line) or
|
|
|
|
|
line == ''):
|
|
|
|
|
continue
|
|
|
|
|
(device, ignore) = line.split('\t')
|
|
|
|
|
devices.append(device)
|
|
|
|
|
if len(devices) != 1:
|
|
|
|
|
print("Please set ANDROID_DEVICE to your device's id", file=sys.stderr)
|
|
|
|
|
return 2
|
|
|
|
|
android_device = devices[0]
|
|
|
|
|
print('Using autodetected Android device:', android_device)
|
|
|
|
|
elif is_fuchsia:
|
2017-12-05 10:52:44 -08:00
|
|
|
|
zircon_nodename = os.environ.get('ZIRCON_NODENAME')
|
|
|
|
|
if not zircon_nodename:
|
|
|
|
|
netls = os.path.join(_GetFuchsiaSDKRoot(), 'tools', 'netls')
|
|
|
|
|
popen = subprocess.Popen([netls, '--nowait'], stdout=subprocess.PIPE)
|
|
|
|
|
devices = popen.communicate()[0].splitlines()
|
|
|
|
|
if popen.returncode != 0 or len(devices) != 1:
|
|
|
|
|
print("Please set ZIRCON_NODENAME to your device's hostname",
|
|
|
|
|
file=sys.stderr)
|
|
|
|
|
return 2
|
|
|
|
|
zircon_nodename = devices[0].strip().split()[1]
|
|
|
|
|
print('Using autodetected Fuchsia device:', zircon_nodename)
|
2017-12-05 15:16:08 -08:00
|
|
|
|
_GenerateFuchsiaRuntimeDepsFiles(
|
2018-01-12 14:38:09 -08:00
|
|
|
|
args.binary_dir, [t for t in tests if not t.endswith('.py')])
|
2020-01-31 15:29:32 -05:00
|
|
|
|
elif is_ios:
|
|
|
|
|
tests.append('ios_crash_xcuitests')
|
2017-12-05 15:16:08 -08:00
|
|
|
|
elif IS_WINDOWS_HOST:
|
|
|
|
|
tests.append('snapshot/win/end_to_end_test.py')
|
|
|
|
|
|
2018-01-12 14:38:09 -08:00
|
|
|
|
if args.test:
|
|
|
|
|
for t in args.test:
|
|
|
|
|
if t not in tests:
|
|
|
|
|
print('Unrecognized test:', t, file=sys.stderr)
|
|
|
|
|
return 3
|
|
|
|
|
tests = args.test
|
2017-04-11 14:48:10 -04:00
|
|
|
|
|
2014-12-12 11:48:42 -08:00
|
|
|
|
for test in tests:
|
2017-12-08 18:47:12 -05:00
|
|
|
|
print('-' * 80)
|
|
|
|
|
print(test)
|
|
|
|
|
print('-' * 80)
|
2017-12-05 15:16:08 -08:00
|
|
|
|
if test.endswith('.py'):
|
|
|
|
|
subprocess.check_call(
|
2018-01-12 14:38:09 -08:00
|
|
|
|
[sys.executable, os.path.join(CRASHPAD_DIR, test), args.binary_dir])
|
2017-12-05 10:52:44 -08:00
|
|
|
|
else:
|
2018-01-12 14:38:09 -08:00
|
|
|
|
extra_command_line = []
|
|
|
|
|
if args.gtest_filter:
|
|
|
|
|
extra_command_line.append('--gtest_filter=' + args.gtest_filter)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
if is_android:
|
2018-01-12 14:38:09 -08:00
|
|
|
|
_RunOnAndroidTarget(args.binary_dir, test, android_device,
|
|
|
|
|
extra_command_line)
|
2017-12-07 16:57:46 -05:00
|
|
|
|
elif is_fuchsia:
|
2018-01-12 14:38:09 -08:00
|
|
|
|
_RunOnFuchsiaTarget(args.binary_dir, test, zircon_nodename,
|
|
|
|
|
extra_command_line)
|
2020-01-31 15:29:32 -05:00
|
|
|
|
elif is_ios:
|
|
|
|
|
_RunOnIOSTarget(args.binary_dir, test,
|
|
|
|
|
is_xcuitest=test.startswith('ios'))
|
2017-12-05 15:16:08 -08:00
|
|
|
|
else:
|
2018-01-12 14:38:09 -08:00
|
|
|
|
subprocess.check_call([os.path.join(args.binary_dir, test)] +
|
|
|
|
|
extra_command_line)
|
2015-10-08 21:09:40 -07:00
|
|
|
|
|
2014-12-12 11:48:42 -08:00
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
sys.exit(main(sys.argv[1:]))
|