def RunPythonCommandInBuildDir(build_dir, target, command_line_args, server_dir=None): if sys.platform == 'win32': python_exe = 'python.exe' setup_mount = chromium_utils.FindUpward(build_dir, 'third_party', 'cygwin', 'setup_mount.bat') chromium_utils.RunCommand([setup_mount]) else: os.environ['PYTHONPATH'] = ( chromium_utils.FindUpward(build_dir, 'tools', 'python') + ":" + os.environ.get('PYTHONPATH', '')) python_exe = 'python' if chromium_utils.IsLinux(): slave_name = SlaveBuildName(build_dir) xvfb.StartVirtualX(slave_name, os.path.join(build_dir, '..', 'out', target), server_dir=server_dir) command = [python_exe] # The list of tests is given as arguments. command.extend(command_line_args) result = chromium_utils.RunCommand(command) if chromium_utils.IsLinux(): xvfb.StopVirtualX(slave_name) return result
def PerfTest(options): """Call run-perf-tests.py, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) webkit_scripts_dir = chromium_utils.FindUpward(build_dir, 'third_party', 'WebKit', 'Tools', 'Scripts') run_perf_tests = os.path.join(webkit_scripts_dir, 'run-perf-tests') command = [ run_perf_tests, '--time-out-ms=90000', '--no-results', '--force', 'inspector', ] command.append('--' + options.target.lower()) if options.platform: command.extend(['--platform', options.platform]) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e. from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() # Run the the tests return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command)
def RunPythonCommandInBuildDir(build_dir, target, command_line_args, server_dir=None, filter_obj=None): if sys.platform == 'win32': python_exe = 'python.exe' setup_mount = chromium_utils.FindUpward(build_dir, 'third_party', 'cygwin', 'setup_mount.bat') chromium_utils.RunCommand([setup_mount]) else: os.environ['PYTHONPATH'] = (chromium_utils.FindUpward(build_dir, 'tools', 'python') + ':' +os.environ.get('PYTHONPATH', '')) python_exe = 'python' command = [python_exe] + command_line_args return chromium_utils.RunCommand(command, filter_obj=filter_obj)
def layout_test(options, args): """Parse options and call run-webkit-tests, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) blink_scripts_dir = chromium_utils.FindUpward(build_dir, 'third_party', 'WebKit', 'Tools', 'Scripts') lint_tests_script = os.path.join(blink_scripts_dir, 'lint-test-expectations') return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, [lint_tests_script])
def layout_test(options, args): """Parse options and call run_webkit_tests.py, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) webkit_tests_dir = chromium_utils.FindUpward(build_dir, 'webkit', 'tools', 'layout_tests') run_webkit_tests = os.path.join(webkit_tests_dir, 'run_webkit_tests.py') command = [run_webkit_tests, '--lint-test-files', '--chromium'] return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command)
def main(): option_parser = optparse.OptionParser() chromium_utils.AddPropertiesOptions(option_parser) (options, args) = option_parser.parse_args() buildername = options.build_properties.get('buildername', '') cmd = SDK_BUILDER_MAP.get(buildername) or SDK_BUILDER_MAP.get('DEFAULT') build_tools_dir = chromium_utils.FindUpward(os.getcwd(), 'src', 'native_client_sdk', 'src', 'build_tools') os.chdir(build_tools_dir) return chromium_utils.RunCommand(cmd + args)
def archive_layout(options, args): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s:%(lineno)-3d' ' %(levelname)s %(message)s', datefmt='%y%m%d %H:%M:%S') chrome_dir = os.path.abspath(options.build_dir) results_dir_basename = os.path.basename(options.results_dir) if options.results_dir is not None: options.results_dir = os.path.abspath( os.path.join(options.build_dir, options.results_dir)) else: options.results_dir = chromium_utils.FindUpward(chrome_dir, RESULT_DIR) print 'Archiving results from %s' % options.results_dir staging_dir = slave_utils.GetStagingDir(chrome_dir) print 'Staging in %s' % staging_dir (actual_file_list, diff_file_list) = _CollectArchiveFiles(options.results_dir) zip_file = chromium_utils.MakeZip(staging_dir, results_dir_basename, actual_file_list, options.results_dir)[1] full_results_json = os.path.join(options.results_dir, 'full_results.json') # Extract the build name of this slave (e.g., 'chrome-release') from its # configuration file if not provided as a param. build_name = options.builder_name or slave_utils.SlaveBuildName(chrome_dir) build_name = re.sub('[ .()]', '_', build_name) last_change = str(slave_utils.SubversionRevision(chrome_dir)) print 'last change: %s' % last_change print 'build name: %s' % build_name print 'host name: %s' % socket.gethostname() # Where to save layout test results. dest_parent_dir = os.path.join(config.Archive.www_dir_base, results_dir_basename.replace('-', '_'), build_name) dest_dir = os.path.join(dest_parent_dir, last_change) gs_bucket = options.factory_properties.get('gs_bucket', None) if gs_bucket: gs_base = '/'.join([gs_bucket, build_name, last_change]) gs_acl = options.factory_properties.get('gs_acl', None) slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl) slave_utils.GSUtilCopyFile(full_results_json, gs_base, gs_acl=gs_acl) else: slave_utils.MaybeMakeDirectoryOnArchiveHost(dest_dir) slave_utils.CopyFileToArchiveHost(zip_file, dest_dir) slave_utils.CopyFileToArchiveHost(full_results_json, dest_dir) # Not supported on Google Storage yet. _ArchiveFullLayoutTestResults(staging_dir, dest_parent_dir, diff_file_list, options) return 0
def GetBlinkRevision(build_dir, webkit_revision=None): """ TODO(eyaich): Blink's now folded into Chromium and doesn't have a separate revision. Use main_revision and delete GetBlinkRevision and uses. """ if webkit_revision: webkit_revision = webkit_revision else: try: webkit_dir = chromium_utils.FindUpward(os.path.abspath(build_dir), 'third_party', 'WebKit', 'Source') webkit_revision = GetRevision(webkit_dir) except Exception: webkit_revision = None return webkit_revision
def SetPageHeap(chrome_dir, exe, enable): """Enables or disables page-heap checking in the given executable, depending on the 'enable' parameter. gflags_exe should be the full path to gflags.exe. """ global _gflags_exe if _gflags_exe is None: _gflags_exe = chromium_utils.FindUpward(chrome_dir, 'tools', 'memory', 'gflags.exe') command = [_gflags_exe] if enable: command.extend(['/p', '/enable', exe, '/full']) else: command.extend(['/p', '/disable', exe]) result = chromium_utils.RunCommand(command) if result: description = {True: 'enable', False: 'disable'} raise PageHeapError('Unable to %s page heap for %s.' % (description[enable], exe))
def main(): option_parser = optparse.OptionParser() chromium_utils.AddPropertiesOptions(option_parser) (options, args) = option_parser.parse_args() buildername = options.build_properties.get('buildername', '') cmd = SDK_BUILDER_MAP.get(buildername) or SDK_BUILDER_MAP.get('DEFAULT') build_tools_dir = chromium_utils.FindUpward(os.getcwd(), 'src', 'native_client_sdk', 'src', 'build_tools') os.chdir(build_tools_dir) # Remove BOTO_CONFIG from the environment -- we want to use the NaCl .boto # file that has access to gs://nativeclient-mirror. if 'AWS_CREDENTIAL_FILE' in os.environ: del os.environ['AWS_CREDENTIAL_FILE'] if 'BOTO_CONFIG' in os.environ: del os.environ['BOTO_CONFIG'] return chromium_utils.RunCommand(cmd + args)
def main(): option_parser = optparse.OptionParser() option_parser.add_option('', '--build-dir', default='webkit', help='path to main build directory (the parent of ' 'the Release or Debug directory)') # Note that --target isn't needed for --lint-test-files, but the # RunPythonCommandInBuildDir() will get upset if we don't say something. option_parser.add_option('', '--target', default='release', help='DumpRenderTree build configuration (Release or Debug)') options, _ = option_parser.parse_args() build_dir = os.path.abspath(options.build_dir) webkit_tests_dir = chromium_utils.FindUpward(build_dir, 'third_party', 'WebKit', 'Tools', 'Scripts') command = [os.path.join(webkit_tests_dir, 'test-webkitpy')] return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command)
def layout_test(options, args): """Parse options and call run-webkit-tests, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) dumprendertree_exe = 'DumpRenderTree.exe' if options.driver_name: dumprendertree_exe = '%s.exe' % options.driver_name # Disable the page heap in case it got left enabled by some previous process. try: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False) except chromium_utils.PathNotFound: # If we don't have gflags.exe, report it but don't worry about it. print 'Warning: Couldn\'t disable page heap, if it was already enabled.' blink_scripts_dir = chromium_utils.FindUpward(build_dir, 'third_party', 'WebKit', 'Tools', 'Scripts') run_blink_tests = os.path.join(blink_scripts_dir, 'run-webkit-tests') slave_name = slave_utils.SlaveBuildName(build_dir) command = [ run_blink_tests, '--no-show-results', '--no-new-test-results', '--full-results-html', # For the dashboards. '--clobber-old-results', # Clobber test results before each run. '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', ] # TODO(dpranke): we can switch to always using --debug-rwt-logging # after all the bots have WebKit r124789 or later. capture_obj = slave_utils.RunCommandCaptureFilter() slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, [run_blink_tests, '--help'], filter_obj=capture_obj) if '--debug-rwt-logging' in ''.join(capture_obj.lines): command.append('--debug-rwt-logging') else: command.append('--verbose') if options.results_directory: # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272, # run_blink_tests expects the results directory to be relative to # the configuration directory (e.g., src/webkit/Release). The # parameter is given to us relative to build_dir, which is where we # will run the command from. # # When 58272 is landed, run_blink_tests will support absolute file # paths as well as paths relative to CWD for non-Chromium ports and # paths relative to the configuration dir for Chromium ports. As # a transitional fix, we convert to an absolute dir, but once the # hack in 58272 is removed, we can use results_dir as-is. if not os.path.isabs(options.results_directory): if options.results_directory.startswith('../../'): options.results_directory = options.results_directory[6:] options.results_directory = os.path.abspath( os.path.join(os.getcwd(), options.results_directory)) chromium_utils.RemoveDirectory(options.results_directory) command.extend(['--results-directory', options.results_directory]) if options.target: command.extend(['--target', options.target]) if options.platform: command.extend(['--platform', options.platform]) if options.skipped: command.extend(['--skipped', options.skipped]) if options.no_pixel_tests: command.append('--no-pixel-tests') if options.batch_size: command.extend(['--batch-size', options.batch_size]) if options.run_part: command.extend(['--run-part', options.run_part]) if options.builder_name: command.extend(['--builder-name', options.builder_name]) if options.build_number: command.extend(['--build-number', options.build_number]) command.extend(['--master-name', slave_utils.GetActiveMaster() or '']) command.extend(['--build-name', slave_name]) if options.step_name: command.extend(['--step-name', options.step_name]) # On Windows, look for the target in an exact location. if sys.platform == 'win32': command.extend(['--build-directory', build_dir]) if options.test_results_server: command.extend(['--test-results-server', options.test_results_server]) if options.enable_pageheap: command.append('--time-out-ms=120000') if options.time_out_ms: command.extend(['--time-out-ms', options.time_out_ms]) for filename in options.additional_expectations: command.append('--additional-expectations=%s' % filename) if options.driver_name: command.append('--driver-name=%s' % options.driver_name) for additional_drt_flag in options.additional_drt_flag: command.append('--additional-drt-flag=%s' % additional_drt_flag) for test_list in options.test_list: command += ['--test-list', test_list] if options.enable_leak_detection: command.append('--enable-leak-detection') # The list of tests is given as arguments. if options.options: command.extend(options.options.split(' ')) command.extend(args) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() try: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, True) # Run the the tests return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command) finally: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False) if options.json_test_results: results_dir = options.results_directory results_json = os.path.join(results_dir, "failing_results.json") # If the json results file was not produced, then we produce no output # file too and rely on a recipe to handle this as invalid result. if os.path.isfile(results_json): with open(results_json, 'rb') as f: data = f.read() # data is in the form of: # ADD_RESULTS(<json object>); # but use a regex match to also support a raw json object. m = re.match( r'[^({]*' # From the beginning, take any except '(' or '{' r'(?:' r'\((.*)\);' # Expect '(<json>);' r'|' # or r'({.*})' # '<json object>' r')$', data) assert m is not None data = m.group(1) or m.group(2) json_data = json.loads(data) assert isinstance(json_data, dict) with open(options.json_test_results, 'wb') as f: f.write(data)
def archive_layout(options, args): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s:%(lineno)-3d' ' %(levelname)s %(message)s', datefmt='%y%m%d %H:%M:%S') chrome_dir = os.path.abspath(options.build_dir) results_dir_basename = os.path.basename(options.results_dir) if options.results_dir is not None: options.results_dir = os.path.abspath(os.path.join(options.build_dir, options.results_dir)) else: options.results_dir = chromium_utils.FindUpward(chrome_dir, RESULT_DIR) print 'Archiving results from %s' % options.results_dir staging_dir = options.staging_dir or slave_utils.GetStagingDir(chrome_dir) print 'Staging in %s' % staging_dir if not os.path.exists(staging_dir): os.makedirs(staging_dir) (actual_file_list, diff_file_list) = _CollectArchiveFiles(options.results_dir) zip_file = chromium_utils.MakeZip(staging_dir, results_dir_basename, actual_file_list, options.results_dir)[1] # TODO(ojan): Stop separately uploading full_results.json once garden-o-matic # switches to using failing_results.json. full_results_json = os.path.join(options.results_dir, 'full_results.json') failing_results_json = os.path.join(options.results_dir, 'failing_results.json') # Extract the build name of this slave (e.g., 'chrome-release') from its # configuration file if not provided as a param. build_name = options.builder_name or slave_utils.SlaveBuildName(chrome_dir) build_name = re.sub('[ .()]', '_', build_name) wc_dir = os.path.dirname(chrome_dir) last_change = slave_utils.GetHashOrRevision(wc_dir) # TODO(dpranke): Is it safe to assume build_number is not blank? Should we # assert() this ? build_number = str(options.build_number) print 'last change: %s' % last_change print 'build name: %s' % build_name print 'build number: %s' % build_number print 'host name: %s' % socket.gethostname() if options.gs_bucket: # Create a file containing last_change revision. This file will be uploaded # after all layout test results are uploaded so the client can check this # file to see if the upload for the revision is complete. # See crbug.com/574272 for more details. last_change_file = os.path.join(staging_dir, 'LAST_CHANGE') with open(last_change_file, 'w') as f: f.write(last_change) # Copy the results to a directory archived by build number. gs_base = '/'.join([options.gs_bucket, build_name, build_number]) gs_acl = options.gs_acl # These files never change, cache for a year. cache_control = "public, max-age=31556926" slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyDir(options.results_dir, gs_base, gs_acl=gs_acl, cache_control=cache_control) # TODO(dpranke): Remove these two lines once clients are fetching the # files from the layout-test-results dir. slave_utils.GSUtilCopyFile(full_results_json, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(failing_results_json, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(last_change_file, gs_base + '/' + results_dir_basename, gs_acl=gs_acl, cache_control=cache_control) # And also to the 'results' directory to provide the 'latest' results # and make sure they are not cached at all (Cloud Storage defaults to # caching w/ a max-age=3600). gs_base = '/'.join([options.gs_bucket, build_name, 'results']) cache_control = 'no-cache' slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyDir(options.results_dir, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(last_change_file, gs_base + '/' + results_dir_basename, gs_acl=gs_acl, cache_control=cache_control) else: # Where to save layout test results. dest_parent_dir = os.path.join(archive_utils.Config.www_dir_base, results_dir_basename.replace('-', '_'), build_name) dest_dir = os.path.join(dest_parent_dir, last_change) _MaybeMakeDirectoryOnArchiveHost(dest_dir) _CopyFileToArchiveHost(zip_file, dest_dir) _CopyFileToArchiveHost(full_results_json, dest_dir) _CopyFileToArchiveHost(failing_results_json, dest_dir) # Not supported on Google Storage yet. _ArchiveFullLayoutTestResults(staging_dir, dest_parent_dir, diff_file_list, options) return 0
def layout_test(options, args): """Parse options and call run_webkit_tests.py, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) # Disable the page heap in case it got left enabled by some previous process. try: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False) except chromium_utils.PathNotFound: # If we don't have gflags.exe, report it but don't worry about it. print 'Warning: Couldn\'t disable page heap, if it was already enabled.' webkit_tests_dir = chromium_utils.FindUpward(build_dir, 'webkit', 'tools', 'layout_tests') run_webkit_tests = os.path.join(webkit_tests_dir, 'run_webkit_tests.py') slave_name = slave_utils.SlaveBuildName(build_dir) command = [ run_webkit_tests, '--no-show-results', '--no-new-test-results', '--verbose', # Verbose output is enabled to support the dashboard. '--full-results-html', # To make debugging failures easier. '--clobber-old-results', # Clobber test results before each run. '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', ] if options.results_directory: # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272, # run_webkit_tests expects the results directory to be relative to # the configuration directory (e.g., src/webkit/Release). The # parameter is given to us relative to build_dir, which is where we # will run the command from. # # When 58272 is landed, run_webkit_tests will support absolute file # paths as well as paths relative to CWD for non-Chromium ports and # paths relative to the configuration dir for Chromium ports. As # a transitional fix, we convert to an absolute dir, but once the # hack in 58272 is removed, we can use results_dir as-is. if not os.path.isabs(options.results_directory): if options.results_directory.startswith('../../'): options.results_directory = options.results_directory[6:] options.results_directory = os.path.abspath( os.path.join(os.getcwd(), options.results_directory)) chromium_utils.RemoveDirectory(options.results_directory) command.extend(['--results-directory', options.results_directory]) if options.target: command.extend(['--target', options.target]) if options.platform: command.extend(['--platform', options.platform]) if options.no_pixel_tests: command.append('--no-pixel-tests') if options.batch_size: command.extend(['--batch-size', options.batch_size]) if options.run_part: command.extend(['--run-part', options.run_part]) if options.builder_name: command.extend(['--builder-name', options.builder_name]) if options.build_number: command.extend(['--build-number', options.build_number]) command.extend(['--master-name', slave_utils.GetActiveMaster() or '']) command.extend(['--build-name', slave_name]) # On Windows, look for the target in an exact location. if sys.platform == 'win32': command.extend(['--build-directory', build_dir]) if options.test_results_server: command.extend(['--test-results-server', options.test_results_server]) if options.enable_pageheap: command.append('--time-out-ms=120000') for filename in options.additional_expectations: command.append('--additional-expectations=%s' % filename) # The list of tests is given as arguments. command.extend(options.options.split(' ')) command.extend(args) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() try: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', True) # Run the the tests return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command) finally: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False)
def main(argv, stream): parser = argparse.ArgumentParser() parser.add_argument('--repository', required=True, help='URL of a git repository to fetch.') parser.add_argument('--revision', help='Git commit hash to check out.') parser.add_argument('--recipe', required=True, help='Name of the recipe to run') parser.add_argument('--build-properties-gz', dest='build_properties', type=chromium_utils.convert_gz_json_type, default={}, help='Build properties in b64 gz JSON format') parser.add_argument('--factory-properties-gz', dest='factory_properties', type=chromium_utils.convert_gz_json_type, default={}, help='factory properties in b64 gz JSON format') parser.add_argument('--leak', action='store_true', help='Refrain from cleaning up generated artifacts.') parser.add_argument('--canary', action='store_true', help='Force use of canary configuration.') parser.add_argument( '--kitchen', metavar='CIPD_VERSION', help='Force use of Kitchen bootstrapping at this revision.') parser.add_argument('--verbose', action='store_true') parser.add_argument( '--use-gitiles', action='store_true', help='Use Gitiles-specific way to fetch repo (faster for large repos)') group = parser.add_argument_group('LogDog Bootstrap') logdog_bootstrap.add_arguments(group) args = parser.parse_args(argv[1:]) args.repository = 'https://github.com/mcgreevy/chromium-build.git' buildbot_build_dir = os.getcwd() try: basedir = chromium_utils.FindUpward(buildbot_build_dir, 'b') except chromium_utils.PathNotFound as e: LOGGER.warn(e) # Use base directory inside system temporary directory - if we use slave # one (cwd), the paths get too long. Recipes which need different paths # or persistent directories should do so explicitly. basedir = tempfile.gettempdir() # Cleanup system and temporary directories. from slave import cleanup_temp cleanup_temp.Cleanup(b_dir=basedir) # Choose a tempdir prefix. If we have no active subdir, we will use a prefix # of "rr". If we have an active subdir, we will use "rs/<subdir>". This way, # there will be no tempdir collisions between combinations of the two # sitautions. active_subdir = chromium_utils.GetActiveSubdir() if active_subdir: prefix = os.path.join('rs', str(active_subdir)) else: prefix = 'rr' with robust_tempdir.RobustTempdir(prefix, leak=args.leak) as rt: # Explicitly clean up possibly leaked temporary directories # from previous runs. rt.cleanup(basedir) return _exec_recipe(args, rt, stream, basedir, buildbot_build_dir)
generate_json_options = copy.copy(options) generate_json_options.build_name = slave_name generate_json_options.input_results_xml = options.test_output_xml generate_json_options.builder_base_url = '%s/%s/%s/%s' % ( config.Master.archive_url, DEST_DIR, slave_name, options.test_type) generate_json_options.master_name = slave_utils.GetActiveMaster() generate_json_options.test_results_server = config.Master.test_results_server # Print out master name for log_parser print '[Running for master: "%s"]' % generate_json_options.master_name try: # Set webkit and chrome directory (they are used only to get the # repository revisions). generate_json_options.webkit_dir = chromium_utils.FindUpward( build_dir, 'third_party', 'WebKit', 'Source') generate_json_options.chrome_dir = build_dir # Generate results JSON file and upload it to the appspot server. gtest_slave_utils.GenerateAndUploadJSONResults(results_map, generate_json_options) # The code can throw all sorts of exceptions, including # slave.gtest.networktransaction.NetworkTimeout so just trap everything. except: # pylint: disable=W0702 print 'Unexpected error while generating JSON' def _BuildParallelCommand(build_dir, test_exe_path, options): supervisor_path = os.path.join(build_dir, '..', 'tools', 'sharding_supervisor',
def ParseIgnoredFailedTestSpec(dir_in_chrome): """Returns parsed ignored failed test spec. Args: dir_in_chrome: Any directory within chrome checkout to be used as a reference to find ignored failed test spec file. Returns: A list of tuples (test_name, platforms), where platforms is a list of sets of platform flags. For example: [('MyTest.TestOne', [set('OS_WIN', 'CPU_32_BITS', 'MODE_RELEASE'), set('OS_LINUX', 'CPU_64_BITS', 'MODE_DEBUG')]), ('MyTest.TestTwo', [set('OS_MACOSX', 'CPU_64_BITS', 'MODE_RELEASE'), set('CPU_32_BITS')]), ('MyTest.TestThree', [set()]] """ try: ignored_failed_tests_path = chromium_utils.FindUpward( os.path.abspath(dir_in_chrome), 'tools', 'ignorer_bot', 'ignored_failed_tests.txt') except chromium_utils.PathNotFound: return with open(ignored_failed_tests_path) as ignored_failed_tests_file: ignored_failed_tests_spec = ignored_failed_tests_file.readlines() parsed_spec = [] for spec_line in ignored_failed_tests_spec: spec_line = spec_line.strip() if spec_line.startswith('#') or not spec_line: continue # Any number of platform flags identifiers separated by whitespace. platform_spec_regexp = r'[A-Za-z0-9_\s]*' match = re.match( r'^crbug.com/\d+' # Issue URL. r'\s+' # Some whitespace. r'\[(' + # Opening square bracket '['. platform_spec_regexp + # At least one platform, and... r'(?:,' + # ...separated by commas... platform_spec_regexp + # ...any number of additional... r')*' # ...platforms. r')\]' # Closing square bracket ']'. r'\s+' # Some whitespace. r'(\S+)$', spec_line) # Test name. if not match: continue platform_specs = match.group(1).strip() test_name = match.group(2).strip() platforms = [ set(platform.split()) for platform in platform_specs.split(',') ] parsed_spec.append((test_name, platforms)) return parsed_spec
def main(argv): parser = argparse.ArgumentParser() parser.add_argument('--repository', required=True, help='URL of a git repository to fetch.') parser.add_argument('--revision', help='Git commit hash to check out.') parser.add_argument('--recipe', required=True, help='Name of the recipe to run') parser.add_argument('--build-properties-gz', dest='build_properties', type=chromium_utils.convert_gz_json_type, default={}, help='Build properties in b64 gz JSON format') parser.add_argument('--factory-properties-gz', dest='factory_properties', type=chromium_utils.convert_gz_json_type, default={}, help='factory properties in b64 gz JSON format') parser.add_argument('--leak', action='store_true', help='Refrain from cleaning up generated artifacts.') parser.add_argument('--verbose', action='store_true') group = parser.add_argument_group('LogDog Bootstrap') logdog_bootstrap.add_arguments(group) args = parser.parse_args(argv[1:]) with robust_tempdir.RobustTempdir(prefix='rr', leak=args.leak) as rt: try: basedir = chromium_utils.FindUpward(os.getcwd(), 'b') except chromium_utils.PathNotFound as e: LOGGER.warn(e) # Use base directory inside system temporary directory - if we use slave # one (cwd), the paths get too long. Recipes which need different paths # or persistent directories should do so explicitly. basedir = tempfile.gettempdir() # Explicitly clean up possibly leaked temporary directories # from previous runs. rt.cleanup(basedir) tempdir = rt.tempdir(basedir) LOGGER.info('Using temporary directory: [%s].', tempdir) build_data_dir = rt.tempdir(basedir) LOGGER.info('Using build data directory: [%s].', build_data_dir) properties = copy.copy(args.factory_properties) properties.update(args.build_properties) properties['build_data_dir'] = build_data_dir LOGGER.info('Using properties: %r', properties) properties_file = os.path.join(tempdir, 'remote_run_properties.json') with open(properties_file, 'w') as f: json.dump(properties, f) monitoring_utils.write_build_monitoring_event(build_data_dir, properties) # Make switching to remote_run easier: we do not use buildbot workdir, # and it takes disk space leading to out of disk errors. buildbot_workdir = properties.get('workdir') if buildbot_workdir: try: if os.path.exists(buildbot_workdir): buildbot_workdir = os.path.realpath(buildbot_workdir) cwd = os.path.realpath(os.getcwd()) if cwd.startswith(buildbot_workdir): buildbot_workdir = cwd LOGGER.info('Cleaning up buildbot workdir %r', buildbot_workdir) # Buildbot workdir is usually used as current working directory, # so do not remove it, but delete all of the contents. Deleting # current working directory of a running process may cause # confusing errors. for p in (os.path.join(buildbot_workdir, x) for x in os.listdir(buildbot_workdir)): LOGGER.info('Deleting %r', p) chromium_utils.RemovePath(p) except Exception as e: # It's preferred that we keep going rather than fail the build # on optional cleanup. LOGGER.exception('Buildbot workdir cleanup failed: %s', e) # Should we use a CIPD pin? mastername = properties.get('mastername') cipd_pin = None if mastername: cipd_pin = _CIPD_PINS.get(mastername) if not cipd_pin: cipd_pin = _CIPD_PINS[None] cipd_path = os.path.join(basedir, '.remote_run_cipd') _install_cipd_packages(cipd_path, cipd.CipdPackage('infra/recipes-py', cipd_pin)) recipe_result_path = os.path.join(tempdir, 'recipe_result.json') recipe_cmd = [ sys.executable, os.path.join(cipd_path, 'recipes.py'), '--verbose', 'remote', '--repository', args.repository, '--revision', args.revision, '--workdir', os.path.join(tempdir, 'rw'), '--', '--verbose', 'run', '--properties-file', properties_file, '--workdir', os.path.join(tempdir, 'w'), '--output-result-json', recipe_result_path, args.recipe, ] # If we bootstrap through logdog, the recipe command line gets written # to a temporary file and does not appear in the log. LOGGER.info('Recipe command line: %r', recipe_cmd) recipe_return_code = None try: bs = logdog_bootstrap.bootstrap(rt, args, basedir, tempdir, properties, recipe_cmd) LOGGER.info('Bootstrapping through LogDog: %s', bs.cmd) _ = _call(bs.cmd) recipe_return_code = bs.get_result() except logdog_bootstrap.NotBootstrapped as e: LOGGER.info('Not bootstrapped: %s', e.message) except logdog_bootstrap.BootstrapError as e: LOGGER.warning('Could not bootstrap LogDog: %s', e.message) except Exception as e: LOGGER.exception('Exception while bootstrapping LogDog.') finally: if recipe_return_code is None: LOGGER.info( 'Not using LogDog. Invoking `recipes.py` directly.') recipe_return_code = _call(recipe_cmd) # Try to open recipe result JSON. Any failure will result in an exception # and an infra failure. with open(recipe_result_path) as f: json.load(f) return recipe_return_code