def __init__(self, tests_type): self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER') self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME') self._tests_type = tests_type if not self._build_number or not self._builder_name: raise Exception('You should not be uploading tests results to the server' 'from your local machine.') upstream = (tests_type != 'Chromium_Android_Instrumentation') if upstream: # TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py) # This requires passing the actual master name (e.g. 'ChromiumFYI' not # 'chromium.fyi'). from slave import slave_utils self._build_name = slave_utils.SlaveBuildName(constants.DIR_SOURCE_ROOT) self._master_name = slave_utils.GetActiveMaster() else: self._build_name = 'chromium-android' buildbot_branch = os.environ.get('BUILDBOT_BRANCH') if not buildbot_branch: buildbot_branch = 'master' self._master_name = '%s-%s' % (self._build_name, buildbot_branch) self._test_results_map = {}
def __init__(self, options): """Constructor. Args: options: Command-line option object from optparse. """ self.options = options # Do platform-specific config if sys.platform in ('win32', 'cygwin'): self.is_posix = False elif sys.platform.startswith('darwin'): self.is_posix = True elif sys.platform.startswith('linux'): self.is_posix = True else: print 'Unknown/unsupported platform.' sys.exit(1) # Extract the build name of this slave (e.g., 'chrome-release') from its # configuration file. chrome_dir = os.path.abspath(options.build_dir) print 'chrome_dir: %s' % chrome_dir build_name = slave_utils.SlaveBuildName(chrome_dir) print 'build name: %s' % build_name # The 'last change:' line MUST appear for the buildbot output-parser to # construct the 'view coverage' link. (See # scripts/master/log_parser/archive_command.py) wc_dir = os.path.dirname(chrome_dir) self.last_change = str(slave_utils.SubversionRevision(wc_dir)) print 'last change: %s' % self.last_change host_name = socket.gethostname() print 'host name: %s' % host_name archive_config = config.Archive() if options.internal: archive_config.Internal() self.archive_host = archive_config.archive_host if self.is_posix: # Always ssh/scp to the archive host as chrome-bot. self.archive_host = 'chrome-bot@' + self.archive_host print 'archive host: %s' % self.archive_host if options.perf_subdir: self.perf_subdir = options.perf_subdir else: self.perf_subdir = build_name if options.build_number: self.perf_subdir = os.path.join(self.perf_subdir, options.build_number) print 'build number: %s' % options.build_number print 'perf subdir: %s' % self.perf_subdir self.archive_path = os.path.join(archive_config.www_dir_base, 'coverage', self.perf_subdir)
def archive_layout(options, args): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s:%(lineno)-3d' ' %(levelname)s %(message)s', datefmt='%y%m%d %H:%M:%S') chrome_dir = os.path.abspath(options.build_dir) results_dir_basename = os.path.basename(options.results_dir) if options.results_dir is not None: options.results_dir = os.path.abspath( os.path.join(options.build_dir, options.results_dir)) else: options.results_dir = chromium_utils.FindUpward(chrome_dir, RESULT_DIR) print 'Archiving results from %s' % options.results_dir staging_dir = slave_utils.GetStagingDir(chrome_dir) print 'Staging in %s' % staging_dir (actual_file_list, diff_file_list) = _CollectArchiveFiles(options.results_dir) zip_file = chromium_utils.MakeZip(staging_dir, results_dir_basename, actual_file_list, options.results_dir)[1] full_results_json = os.path.join(options.results_dir, 'full_results.json') # Extract the build name of this slave (e.g., 'chrome-release') from its # configuration file if not provided as a param. build_name = options.builder_name or slave_utils.SlaveBuildName(chrome_dir) build_name = re.sub('[ .()]', '_', build_name) last_change = str(slave_utils.SubversionRevision(chrome_dir)) print 'last change: %s' % last_change print 'build name: %s' % build_name print 'host name: %s' % socket.gethostname() # Where to save layout test results. dest_parent_dir = os.path.join(config.Archive.www_dir_base, results_dir_basename.replace('-', '_'), build_name) dest_dir = os.path.join(dest_parent_dir, last_change) gs_bucket = options.factory_properties.get('gs_bucket', None) if gs_bucket: gs_base = '/'.join([gs_bucket, build_name, last_change]) gs_acl = options.factory_properties.get('gs_acl', None) slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl) slave_utils.GSUtilCopyFile(full_results_json, gs_base, gs_acl=gs_acl) else: slave_utils.MaybeMakeDirectoryOnArchiveHost(dest_dir) slave_utils.CopyFileToArchiveHost(zip_file, dest_dir) slave_utils.CopyFileToArchiveHost(full_results_json, dest_dir) # Not supported on Google Storage yet. _ArchiveFullLayoutTestResults(staging_dir, dest_parent_dir, diff_file_list, options) return 0
def main(argv): o3d_dir = os.path.join(os.getcwd(), 'o3d') staging_dir = slave_utils.GetStagingDir(o3d_dir) # Find builder name and revision #s. builder_name = slave_utils.SlaveBuildName(o3d_dir) o3d_rev = str(slave_utils.SubversionRevision(o3d_dir)) platform = chromium_utils.PlatformName() # Upload zip. local_zip = os.path.join(staging_dir, 'full-build-' + platform + '_' + o3d_rev + '.zip') remote_zip = 'snapshots/o3d/' + o3d_rev + '/' + builder_name + '.zip' archive_file.UploadFile(local_zip, remote_zip) return 0
def __init__(self, options, build_revision): """Sets a number of file and directory paths for convenient use.""" self.options = options self._src_dir = os.path.abspath(options.src_dir) self._chrome_dir = os.path.join(self._src_dir, 'chrome') build_dir = build_directory.GetBuildOutputDirectory() self._build_dir = os.path.join(build_dir, options.target) if chromium_utils.IsWindows(): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win') elif chromium_utils.IsLinux(): # On Linux, we might have built for chromeos. Archive the same. if (options.factory_properties.get('chromeos', None) or slave_utils.GypFlagIsOn(options, 'chromeos')): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'chromeos') # Or, we might have built for Android. elif options.factory_properties.get('target_os') == 'android': self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'android') else: self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux') elif chromium_utils.IsMac(): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac') else: raise NotImplementedError( 'Platform "%s" is not currently supported.' % sys.platform) self._staging_dir = (options.staging_dir or slave_utils.GetStagingDir(self._src_dir)) if not os.path.exists(self._staging_dir): os.makedirs(self._staging_dir) self._symbol_dir_base = options.dirs['symbol_dir_base'] self._www_dir_base = options.dirs['www_dir_base'] if options.build_name: self._build_name = options.build_name else: self._build_name = slave_utils.SlaveBuildName(self._src_dir) self._symbol_dir_base = os.path.join(self._symbol_dir_base, self._build_name) self._www_dir_base = os.path.join(self._www_dir_base, self._build_name) self._version_file = os.path.join(self._chrome_dir, 'VERSION') self._chromium_revision = chromium_utils.GetBuildSortKey(options)[1] self._v8_revision = chromium_utils.GetBuildSortKey(options, project='v8')[1] self._v8_revision_git = chromium_utils.GetGitCommit(options, project='v8') self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE') # The REVISIONS file will record the revisions information of the main # components Chromium/WebKit/V8. self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS') self._build_revision = build_revision self._build_path_component = str(self._build_revision) # Will be initialized in GetLastBuildRevision. self.last_chromium_revision = None self.last_v8_revision = None self._files_file = os.path.join(self._tool_dir, archive_utils.FILES_FILENAME) self._test_files = self.BuildOldFilesList(TEST_FILE_NAME) self._dual_upload = options.factory_properties.get( 'dual_upload', False) self._archive_files = None
def __init__(self, options, build_revision): """Sets a number of file and directory paths for convenient use.""" self.options = options self._src_dir = os.path.abspath(options.src_dir) self._chrome_dir = os.path.join(self._src_dir, 'chrome') # TODO: This scode should not be grabbing so deeply into WebKit. # Worse, this code ends up looking at top-of-tree WebKit # instead of the revision in DEPS. self._webkit_dir = os.path.join(self._src_dir, 'third_party', 'WebKit', 'Source') self._v8_dir = os.path.join(self._src_dir, 'v8') build_dir = build_directory.GetBuildOutputDirectory() self._build_dir = os.path.join(build_dir, options.target) if chromium_utils.IsWindows(): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win') elif chromium_utils.IsLinux(): # On Linux, we might have built for chromeos. Archive the same. if (options.factory_properties.get('chromeos', None) or slave_utils.GypFlagIsOn(options, 'chromeos')): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'chromeos') # Or, we might have built for Android. elif options.factory_properties.get('target_os') == 'android': self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'android') else: self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux') elif chromium_utils.IsMac(): self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac') else: raise NotImplementedError( 'Platform "%s" is not currently supported.' % sys.platform) self._staging_dir = slave_utils.GetStagingDir(self._src_dir) self._symbol_dir_base = options.dirs['symbol_dir_base'] self._www_dir_base = options.dirs['www_dir_base'] if options.build_name: self._build_name = options.build_name else: self._build_name = slave_utils.SlaveBuildName(self._src_dir) self._symbol_dir_base = os.path.join(self._symbol_dir_base, self._build_name) self._www_dir_base = os.path.join(self._www_dir_base, self._build_name) self._version_file = os.path.join(self._chrome_dir, 'VERSION') if options.default_chromium_revision: self._chromium_revision = options.default_chromium_revision else: self._chromium_revision = slave_utils.GetHashOrRevision( os.path.dirname( self._chrome_dir)) # src/ instead of src/chrome if options.default_webkit_revision: self._webkit_revision = options.default_webkit_revision else: self._webkit_revision = slave_utils.GetHashOrRevision( os.path.dirname( self._webkit_dir)) # WebKit/ instead of WebKit/Source if options.default_v8_revision: self._v8_revision = options.default_v8_revision else: self._v8_revision = slave_utils.GetHashOrRevision(self._v8_dir) self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE') # The REVISIONS file will record the revisions information of the main # components Chromium/WebKit/V8. self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS') self._build_revision = build_revision # Will be initialized in GetLastBuildRevision. self.last_chromium_revision = None self.last_webkit_revision = None self.last_v8_revision = None self._files_file = os.path.join(self._tool_dir, archive_utils.FILES_FILENAME) self._test_files = self.BuildOldFilesList(TEST_FILE_NAME) self._dual_upload = options.factory_properties.get( 'dual_upload', False) self._archive_files = None
def layout_test(options, args): """Parse options and call run-webkit-tests, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) dumprendertree_exe = 'DumpRenderTree.exe' if options.driver_name: dumprendertree_exe = '%s.exe' % options.driver_name # Disable the page heap in case it got left enabled by some previous process. try: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False) except chromium_utils.PathNotFound: # If we don't have gflags.exe, report it but don't worry about it. print 'Warning: Couldn\'t disable page heap, if it was already enabled.' blink_scripts_dir = chromium_utils.FindUpward(build_dir, 'third_party', 'WebKit', 'Tools', 'Scripts') run_blink_tests = os.path.join(blink_scripts_dir, 'run-webkit-tests') slave_name = slave_utils.SlaveBuildName(build_dir) command = [ run_blink_tests, '--no-show-results', '--no-new-test-results', '--full-results-html', # For the dashboards. '--clobber-old-results', # Clobber test results before each run. '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', ] # TODO(dpranke): we can switch to always using --debug-rwt-logging # after all the bots have WebKit r124789 or later. capture_obj = slave_utils.RunCommandCaptureFilter() slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, [run_blink_tests, '--help'], filter_obj=capture_obj) if '--debug-rwt-logging' in ''.join(capture_obj.lines): command.append('--debug-rwt-logging') else: command.append('--verbose') if options.results_directory: # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272, # run_blink_tests expects the results directory to be relative to # the configuration directory (e.g., src/webkit/Release). The # parameter is given to us relative to build_dir, which is where we # will run the command from. # # When 58272 is landed, run_blink_tests will support absolute file # paths as well as paths relative to CWD for non-Chromium ports and # paths relative to the configuration dir for Chromium ports. As # a transitional fix, we convert to an absolute dir, but once the # hack in 58272 is removed, we can use results_dir as-is. if not os.path.isabs(options.results_directory): if options.results_directory.startswith('../../'): options.results_directory = options.results_directory[6:] options.results_directory = os.path.abspath( os.path.join(os.getcwd(), options.results_directory)) chromium_utils.RemoveDirectory(options.results_directory) command.extend(['--results-directory', options.results_directory]) if options.target: command.extend(['--target', options.target]) if options.platform: command.extend(['--platform', options.platform]) if options.skipped: command.extend(['--skipped', options.skipped]) if options.no_pixel_tests: command.append('--no-pixel-tests') if options.batch_size: command.extend(['--batch-size', options.batch_size]) if options.run_part: command.extend(['--run-part', options.run_part]) if options.builder_name: command.extend(['--builder-name', options.builder_name]) if options.build_number: command.extend(['--build-number', options.build_number]) command.extend(['--master-name', slave_utils.GetActiveMaster() or '']) command.extend(['--build-name', slave_name]) if options.step_name: command.extend(['--step-name', options.step_name]) # On Windows, look for the target in an exact location. if sys.platform == 'win32': command.extend(['--build-directory', build_dir]) if options.test_results_server: command.extend(['--test-results-server', options.test_results_server]) if options.enable_pageheap: command.append('--time-out-ms=120000') if options.time_out_ms: command.extend(['--time-out-ms', options.time_out_ms]) for filename in options.additional_expectations: command.append('--additional-expectations=%s' % filename) if options.driver_name: command.append('--driver-name=%s' % options.driver_name) for additional_drt_flag in options.additional_drt_flag: command.append('--additional-drt-flag=%s' % additional_drt_flag) for test_list in options.test_list: command += ['--test-list', test_list] if options.enable_leak_detection: command.append('--enable-leak-detection') # The list of tests is given as arguments. if options.options: command.extend(options.options.split(' ')) command.extend(args) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() try: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, True) # Run the the tests return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command) finally: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False) if options.json_test_results: results_dir = options.results_directory results_json = os.path.join(results_dir, "failing_results.json") # If the json results file was not produced, then we produce no output # file too and rely on a recipe to handle this as invalid result. if os.path.isfile(results_json): with open(results_json, 'rb') as f: data = f.read() # data is in the form of: # ADD_RESULTS(<json object>); # but use a regex match to also support a raw json object. m = re.match( r'[^({]*' # From the beginning, take any except '(' or '{' r'(?:' r'\((.*)\);' # Expect '(<json>);' r'|' # or r'({.*})' # '<json object>' r')$', data) assert m is not None data = m.group(1) or m.group(2) json_data = json.loads(data) assert isinstance(json_data, dict) with open(options.json_test_results, 'wb') as f: f.write(data)
def archive_layout(options, args): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s:%(lineno)-3d' ' %(levelname)s %(message)s', datefmt='%y%m%d %H:%M:%S') chrome_dir = os.path.abspath(options.build_dir) results_dir_basename = os.path.basename(options.results_dir) if options.results_dir is not None: options.results_dir = os.path.abspath(os.path.join(options.build_dir, options.results_dir)) else: options.results_dir = chromium_utils.FindUpward(chrome_dir, RESULT_DIR) print 'Archiving results from %s' % options.results_dir staging_dir = options.staging_dir or slave_utils.GetStagingDir(chrome_dir) print 'Staging in %s' % staging_dir if not os.path.exists(staging_dir): os.makedirs(staging_dir) (actual_file_list, diff_file_list) = _CollectArchiveFiles(options.results_dir) zip_file = chromium_utils.MakeZip(staging_dir, results_dir_basename, actual_file_list, options.results_dir)[1] # TODO(ojan): Stop separately uploading full_results.json once garden-o-matic # switches to using failing_results.json. full_results_json = os.path.join(options.results_dir, 'full_results.json') failing_results_json = os.path.join(options.results_dir, 'failing_results.json') # Extract the build name of this slave (e.g., 'chrome-release') from its # configuration file if not provided as a param. build_name = options.builder_name or slave_utils.SlaveBuildName(chrome_dir) build_name = re.sub('[ .()]', '_', build_name) wc_dir = os.path.dirname(chrome_dir) last_change = slave_utils.GetHashOrRevision(wc_dir) # TODO(dpranke): Is it safe to assume build_number is not blank? Should we # assert() this ? build_number = str(options.build_number) print 'last change: %s' % last_change print 'build name: %s' % build_name print 'build number: %s' % build_number print 'host name: %s' % socket.gethostname() if options.gs_bucket: # Create a file containing last_change revision. This file will be uploaded # after all layout test results are uploaded so the client can check this # file to see if the upload for the revision is complete. # See crbug.com/574272 for more details. last_change_file = os.path.join(staging_dir, 'LAST_CHANGE') with open(last_change_file, 'w') as f: f.write(last_change) # Copy the results to a directory archived by build number. gs_base = '/'.join([options.gs_bucket, build_name, build_number]) gs_acl = options.gs_acl # These files never change, cache for a year. cache_control = "public, max-age=31556926" slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyDir(options.results_dir, gs_base, gs_acl=gs_acl, cache_control=cache_control) # TODO(dpranke): Remove these two lines once clients are fetching the # files from the layout-test-results dir. slave_utils.GSUtilCopyFile(full_results_json, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(failing_results_json, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(last_change_file, gs_base + '/' + results_dir_basename, gs_acl=gs_acl, cache_control=cache_control) # And also to the 'results' directory to provide the 'latest' results # and make sure they are not cached at all (Cloud Storage defaults to # caching w/ a max-age=3600). gs_base = '/'.join([options.gs_bucket, build_name, 'results']) cache_control = 'no-cache' slave_utils.GSUtilCopyFile(zip_file, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyDir(options.results_dir, gs_base, gs_acl=gs_acl, cache_control=cache_control) slave_utils.GSUtilCopyFile(last_change_file, gs_base + '/' + results_dir_basename, gs_acl=gs_acl, cache_control=cache_control) else: # Where to save layout test results. dest_parent_dir = os.path.join(archive_utils.Config.www_dir_base, results_dir_basename.replace('-', '_'), build_name) dest_dir = os.path.join(dest_parent_dir, last_change) _MaybeMakeDirectoryOnArchiveHost(dest_dir) _CopyFileToArchiveHost(zip_file, dest_dir) _CopyFileToArchiveHost(full_results_json, dest_dir) _CopyFileToArchiveHost(failing_results_json, dest_dir) # Not supported on Google Storage yet. _ArchiveFullLayoutTestResults(staging_dir, dest_parent_dir, diff_file_list, options) return 0
def main_linux(options, args): if len(args) < 1: raise chromium_utils.MissingArgument('Usage: %s' % USAGE) build_dir = os.path.normpath(os.path.abspath(options.build_dir)) slave_name = slave_utils.SlaveBuildName(build_dir) # If this is a sub-project build (i.e. there's a 'sconsbuild' in build_dir), # look for the test binaries there, otherwise look for the top-level build # output. # This assumes we never pass a build_dir which might contain build output that # we're not trying to test. This is currently a safe assumption since we don't # have any builders that do both sub-project and top-level builds (only # Modules builders do sub-project builds), so they shouldn't ever have both # 'build_dir/sconsbuild' and 'build_dir/../sconsbuild'. outdir = None if os.path.exists(os.path.join(build_dir, 'sconsbuild')): outdir = 'sconsbuild' elif os.path.exists(os.path.join(build_dir, 'out')): outdir = 'out' if outdir: bin_dir = os.path.join(build_dir, outdir, options.target) src_dir = os.path.join(slave_utils.SlaveBaseDir(build_dir), 'build', 'src') os.environ['CR_SOURCE_ROOT'] = src_dir else: if os.path.exists(os.path.join(build_dir, '..', 'sconsbuild')): bin_dir = os.path.join(build_dir, '..', 'sconsbuild', options.target) else: bin_dir = os.path.join(build_dir, '..', 'out', options.target) # Figure out what we want for a special frame buffer directory. special_xvfb_dir = None if options.special_xvfb == 'auto': fp_special_xvfb = options.factory_properties.get('special_xvfb', None) fp_chromeos = options.factory_properties.get('chromeos', None) if fp_special_xvfb or ( fp_special_xvfb is None and (fp_chromeos or slave_utils.GypFlagIsOn(options, 'use_aura') or slave_utils.GypFlagIsOn(options, 'chromeos'))): special_xvfb_dir = options.special_xvfb_dir elif options.special_xvfb: special_xvfb_dir = options.special_xvfb_dir test_exe = args[0] test_exe_path = os.path.join(bin_dir, test_exe) if not os.path.exists(test_exe_path): if options.factory_properties.get('succeed_on_missing_exe', False): print '%s missing but succeed_on_missing_exe used, exiting' % ( test_exe_path) return 0 msg = 'Unable to find %s' % test_exe_path raise chromium_utils.PathNotFound(msg) # Decide whether to enable the suid sandbox for Chrome. if should_enable_sandbox(CHROME_SANDBOX_PATH): print 'Enabling sandbox. Setting environment variable:' print ' CHROME_DEVEL_SANDBOX="%s"' % CHROME_SANDBOX_PATH os.environ['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH else: print 'Disabling sandbox. Setting environment variable:' print ' CHROME_DEVEL_SANDBOX=""' os.environ['CHROME_DEVEL_SANDBOX'] = '' # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() os.environ['LD_LIBRARY_PATH'] = '%s:%s/lib:%s/lib.target' % ( bin_dir, bin_dir, bin_dir) # Figure out what we want for a special llvmpipe directory. if (options.llvmpipe_dir and os.path.exists(options.llvmpipe_dir)): os.environ['LD_LIBRARY_PATH'] += ':' + options.llvmpipe_dir if options.parallel: command = _BuildParallelCommand(build_dir, test_exe_path, options) elif options.run_shell_script: command = ['bash', test_exe_path] elif options.run_python_script: command = [sys.executable, test_exe] else: command = [test_exe_path] command.extend(args[1:]) results_tracker = None if options.generate_json_file: results_tracker = gtest_slave_utils.GTestUnexpectedDeathTracker() if os.path.exists(options.test_output_xml): # remove the old XML output file. os.remove(options.test_output_xml) try: http_server = None if options.document_root: http_server = start_http_server( 'linux', build_dir=build_dir, test_exe_path=test_exe_path, document_root=options.document_root) if options.xvfb: xvfb.StartVirtualX(slave_name, bin_dir, with_wm=options.factory_properties.get( 'window_manager', True), server_dir=special_xvfb_dir) if options.factory_properties.get('asan', False): symbolize = os.path.abspath( os.path.join('src', 'tools', 'valgrind', 'asan', 'asan_symbolize.py')) pipes = [[sys.executable, symbolize], ['c++filt']] result = _RunGTestCommand(command, pipes=pipes) else: result = _RunGTestCommand(command, results_tracker) finally: if http_server: http_server.StopServer() if options.xvfb: xvfb.StopVirtualX(slave_name) if options.generate_json_file: _GenerateJSONForTestResults(options, results_tracker) return result
else: sys.stderr.write( 'Unable to generate JSON from XML, using log output.') # The file did not get generated. See if we can generate a results map # from the log output. results_map = results_tracker.GetResultsMap() except Exception, e: # This error will be caught by the following 'not results_map' statement. print 'Error: ', e if not results_map: print 'No data was available to update the JSON results' return build_dir = os.path.abspath(options.build_dir) slave_name = slave_utils.SlaveBuildName(build_dir) generate_json_options = copy.copy(options) generate_json_options.build_name = slave_name generate_json_options.input_results_xml = options.test_output_xml generate_json_options.builder_base_url = '%s/%s/%s/%s' % ( config.Master.archive_url, DEST_DIR, slave_name, options.test_type) generate_json_options.master_name = slave_utils.GetActiveMaster() generate_json_options.test_results_server = config.Master.test_results_server # Print out master name for log_parser print '[Running for master: "%s"]' % generate_json_options.master_name try: # Set webkit and chrome directory (they are used only to get the # repository revisions).
def _Main(options, args, extra_env): """Using the target build configuration, run the executable given in the first non-option argument, passing any following arguments to that executable. Args: options: Command-line options for this invocation of runtest.py. args: Command and arguments for the test. extra_env: A dictionary of extra environment variables to set. Returns: Exit status code. """ if len(args) < 1: raise chromium_utils.MissingArgument('Usage: %s' % USAGE) xvfb_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'third_party', 'xvfb', platform.architecture()[0]) special_xvfb_dir = None fp_chromeos = options.factory_properties.get('chromeos', None) if (fp_chromeos or slave_utils.GypFlagIsOn(options, 'use_aura') or slave_utils.GypFlagIsOn(options, 'chromeos')): special_xvfb_dir = xvfb_path build_dir = os.path.normpath(os.path.abspath(options.build_dir)) bin_dir = os.path.join(build_dir, options.target) slave_name = options.slave_name or slave_utils.SlaveBuildName(build_dir) test_exe = args[0] if options.run_python_script: test_exe_path = test_exe else: test_exe_path = os.path.join(bin_dir, test_exe) if not os.path.exists(test_exe_path): if options.factory_properties.get('succeed_on_missing_exe', False): print '%s missing but succeed_on_missing_exe used, exiting' % ( test_exe_path) return 0 raise chromium_utils.PathNotFound('Unable to find %s' % test_exe_path) if sys.platform == 'linux2': # Unset http_proxy and HTTPS_PROXY environment variables. When set, this # causes some tests to hang. See http://crbug.com/139638 for more info. if 'http_proxy' in os.environ: del os.environ['http_proxy'] print 'Deleted http_proxy environment variable.' if 'HTTPS_PROXY' in os.environ: del os.environ['HTTPS_PROXY'] print 'Deleted HTTPS_PROXY environment variable.' # Path to SUID sandbox binary. This must be installed on all bots. extra_env['CHROME_DEVEL_SANDBOX'] = CHROME_SANDBOX_PATH extra_env['LD_LIBRARY_PATH'] = '' if options.enable_lsan: # Use the debug version of libstdc++ under LSan. If we don't, there will # be a lot of incomplete stack traces in the reports. extra_env['LD_LIBRARY_PATH'] += '/usr/lib/x86_64-linux-gnu/debug:' extra_env['LD_LIBRARY_PATH'] += '%s:%s/lib:%s/lib.target' % ( bin_dir, bin_dir, bin_dir) if options.run_shell_script: command = ['bash', test_exe_path] elif options.run_python_script: command = [sys.executable, test_exe] else: command = _BuildTestBinaryCommand(build_dir, test_exe_path, options) command.extend(args[1:]) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() log_processor = None if _UsingGtestJson(options): log_processor = gtest_utils.GTestJSONParser( options.build_properties.get('mastername')) if options.generate_json_file: if os.path.exists(options.test_output_xml): # remove the old XML output file. os.remove(options.test_output_xml) try: # TODO(dpranke): checking on test_exe is a temporary hack until we # can change the buildbot master to pass --xvfb instead of --no-xvfb # for these two steps. See # https://code.google.com/p/chromium/issues/detail?id=179814 start_xvfb = (sys.platform == 'linux2' and (options.xvfb or 'layout_test_wrapper' in test_exe or 'devtools_perf_test_wrapper' in test_exe)) if start_xvfb: xvfb.StartVirtualX(slave_name, bin_dir, with_wm=(options.factory_properties.get( 'window_manager', 'True') == 'True'), server_dir=special_xvfb_dir) if _UsingGtestJson(options): json_file_name = log_processor.PrepareJSONFile( options.test_launcher_summary_output) command.append('--test-launcher-summary-output=%s' % json_file_name) pipes = [] # See the comment in main() regarding offline symbolization. if options.use_symbolization_script: symbolize_command = _GetSanitizerSymbolizeCommand( strip_path_prefix=options.strip_path_prefix) pipes = [symbolize_command] command = _GenerateRunIsolatedCommand(build_dir, test_exe_path, options, command) result = _RunGTestCommand(options, command, extra_env, pipes=pipes) finally: if start_xvfb: xvfb.StopVirtualX(slave_name) if _UsingGtestJson(options): if options.use_symbolization_script: _SymbolizeSnippetsInJSON(options, json_file_name) log_processor.ProcessJSONFile(options.build_dir) if options.generate_json_file: if not _GenerateJSONForTestResults(options, log_processor): return 1 if options.annotate: annotation_utils.annotate(options.test_type, result, log_processor) return result
def _GenerateJSONForTestResults(options, log_processor): """Generates or updates a JSON file from the gtest results XML and upload the file to the archive server. The archived JSON file will be placed at: www-dir/DEST_DIR/buildname/testname/results.json on the archive server. NOTE: This will be deprecated. Args: options: command-line options that are supposed to have build_dir, results_directory, builder_name, build_name and test_output_xml values. log_processor: An instance of PerformanceLogProcessor or similar class. Returns: True upon success, False upon failure. """ results_map = None try: results_map = gtest_slave_utils.GetResultsMap(log_processor) except Exception as e: # This error will be caught by the following 'not results_map' statement. print 'Error: ', e if not results_map: print 'No data was available to update the JSON results' # Consider this non-fatal. return True build_dir = os.path.abspath(options.build_dir) slave_name = options.builder_name or slave_utils.SlaveBuildName(build_dir) generate_json_options = copy.copy(options) generate_json_options.build_name = slave_name generate_json_options.input_results_xml = options.test_output_xml generate_json_options.builder_base_url = '%s/%s/%s/%s' % ( config.Master.archive_url, DEST_DIR, slave_name, options.test_type) generate_json_options.master_name = options.master_class_name or _GetMaster( ) generate_json_options.test_results_server = config.Master.test_results_server print _GetMasterString(generate_json_options.master_name) generator = None try: if options.revision: generate_json_options.chrome_revision = options.revision else: generate_json_options.chrome_revision = '' if options.webkit_revision: generate_json_options.webkit_revision = options.webkit_revision else: generate_json_options.webkit_revision = '' # Generate results JSON file and upload it to the appspot server. generator = gtest_slave_utils.GenerateJSONResults( results_map, generate_json_options) except Exception as e: print 'Unexpected error while generating JSON: %s' % e sys.excepthook(*sys.exc_info()) return False # The code can throw all sorts of exceptions, including # slave.gtest.networktransaction.NetworkTimeout so just trap everything. # Earlier versions of this code ignored network errors, so until a # retry mechanism is added, continue to do so rather than reporting # an error. try: # Upload results JSON file to the appspot server. gtest_slave_utils.UploadJSONResults(generator) except Exception as e: # Consider this non-fatal for the moment. print 'Unexpected error while uploading JSON: %s' % e sys.excepthook(*sys.exc_info()) return True
def __init__(self, options, build_revision): """Sets a number of file and directory paths for convenient use.""" self.options = options self._src_dir = os.path.abspath(options.src_dir) self._chrome_dir = os.path.join(self._src_dir, 'chrome') # TODO: This scode should not be grabbing so deeply into WebKit. # Worse, this code ends up looking at top-of-tree WebKit # instead of the revision in DEPS. self._webkit_dir = os.path.join(self._src_dir, 'third_party', 'WebKit', 'Source', 'WebCore') self._v8_dir = os.path.join(self._src_dir, 'v8') # TODO: need to get the build *output* directory passed in instead so Linux # and Mac don't have to walk up a directory to get to the right directory. if chromium_utils.IsWindows(): self._build_dir = os.path.join(options.build_dir, options.target) self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'win') elif chromium_utils.IsLinux(): self._build_dir = os.path.join(os.path.dirname(options.build_dir), 'out', options.target) self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'linux') elif chromium_utils.IsMac(): self._build_dir = os.path.join(os.path.dirname(options.build_dir), 'xcodebuild', options.target) self._tool_dir = os.path.join(self._chrome_dir, 'tools', 'build', 'mac') else: raise NotImplementedError( 'Platform "%s" is not currently supported.' % sys.platform) self._staging_dir = slave_utils.GetStagingDir(self._src_dir) self._symbol_dir_base = options.dirs['symbol_dir_base'] self._www_dir_base = options.dirs['www_dir_base'] self._build_name = slave_utils.SlaveBuildName(self._src_dir) self._symbol_dir_base = os.path.join(self._symbol_dir_base, self._build_name) self._www_dir_base = os.path.join(self._www_dir_base, self._build_name) self._version_file = os.path.join(self._chrome_dir, 'VERSION') if options.default_chromium_revision: self._chromium_revision = options.default_chromium_revision else: self._chromium_revision = slave_utils.SubversionRevision( self._chrome_dir) if options.default_webkit_revision: self._webkit_revision = options.default_webkit_revision else: self._webkit_revision = slave_utils.SubversionRevision( self._webkit_dir) if options.default_v8_revision: self._v8_revision = options.default_v8_revision else: self._v8_revision = slave_utils.SubversionRevision(self._v8_dir) self.last_change_file = os.path.join(self._staging_dir, 'LAST_CHANGE') # The REVISIONS file will record the revisions information of the main # components Chromium/WebKit/V8. self.revisions_path = os.path.join(self._staging_dir, 'REVISIONS') self._build_revision = build_revision # Will be initialized in GetLastBuildRevision. self.last_chromium_revision = None self.last_webkit_revision = None self.last_v8_revision = None self._files_file = os.path.join(self._tool_dir, archive_utils.FILES_FILENAME) self._test_files = self.BuildOldFilesList(TEST_FILE_NAME) self._dual_upload = options.factory_properties.get( 'dual_upload', False) self._archive_files = None
def main(argv): if len(argv) != 3: print 'Usage: prepare_selenium_tests.py <o3d_src_root> <destination>' print 'Exiting...' return 1 # Make given directories absolute before changing the working directory. src_root = os.path.abspath(argv[1]) o3d_dir = os.path.join(src_root, 'o3d') o3d_internal_dir = os.path.join(src_root, 'o3d-internal') destination = os.path.abspath(argv[2]) config_dir = os.path.abspath(os.path.dirname(__file__)) config_file = os.path.join(config_dir, ARCHIVE_CONFIG_NAME) print 'O3D source root:', src_root print 'Destination:', destination print 'Config file:', config_file # Change umask on linux so that outputs (latest file and zip) are readable. if utils.IsLinux(): mask = os.umask(0022) # Build ChangeResolution project. BuildChangeResolution(src_root) # Create test archive. files = GetO3DArchiveFiles(src_root, config_file) zip_name = 'o3d' utils.MakeZip(destination, zip_name, files, src_root) zip_path = os.path.join(destination, zip_name + '.zip') print 'Zip archive created: %s' % zip_path # Find builder name and revision #s. builder_name = slave_utils.SlaveBuildName(o3d_dir) o3d_rev = str(slave_utils.SubversionRevision(o3d_dir)) o3d_internal_rev = str(slave_utils.SubversionRevision(o3d_internal_dir)) package_name = 'test_' + builder_name + '.zip' package_dir = o3d_rev + '_' + o3d_internal_rev package_path = package_dir + '/' + package_name print 'Builder name:', builder_name print 'O3D revision:', o3d_rev print 'O3D-internal revision:', o3d_internal_rev print 'Package path:', package_path # Create latest file. latest_path = os.path.join(destination, 'latest_' + builder_name) file(latest_path, 'w').write(package_path) # Upload files. package_dst = ('snapshots/o3d/test_packages/o3d/' + package_dir + '/' + package_name) latest_dst = 'snapshots/o3d/test_packages/o3d/latest_' + builder_name UploadFile(zip_path, package_dst) UploadFile(latest_path, latest_dst) # Reset the umask on linux. if utils.IsLinux(): os.umask(mask) return 0
def layout_test(options, args): """Parse options and call run_webkit_tests.py, using Python from the tree.""" build_dir = os.path.abspath(options.build_dir) # Disable the page heap in case it got left enabled by some previous process. try: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False) except chromium_utils.PathNotFound: # If we don't have gflags.exe, report it but don't worry about it. print 'Warning: Couldn\'t disable page heap, if it was already enabled.' webkit_tests_dir = chromium_utils.FindUpward(build_dir, 'webkit', 'tools', 'layout_tests') run_webkit_tests = os.path.join(webkit_tests_dir, 'run_webkit_tests.py') slave_name = slave_utils.SlaveBuildName(build_dir) command = [ run_webkit_tests, '--no-show-results', '--no-new-test-results', '--verbose', # Verbose output is enabled to support the dashboard. '--full-results-html', # To make debugging failures easier. '--clobber-old-results', # Clobber test results before each run. '--exit-after-n-failures', '5000', '--exit-after-n-crashes-or-timeouts', '100', ] if options.results_directory: # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272, # run_webkit_tests expects the results directory to be relative to # the configuration directory (e.g., src/webkit/Release). The # parameter is given to us relative to build_dir, which is where we # will run the command from. # # When 58272 is landed, run_webkit_tests will support absolute file # paths as well as paths relative to CWD for non-Chromium ports and # paths relative to the configuration dir for Chromium ports. As # a transitional fix, we convert to an absolute dir, but once the # hack in 58272 is removed, we can use results_dir as-is. if not os.path.isabs(options.results_directory): if options.results_directory.startswith('../../'): options.results_directory = options.results_directory[6:] options.results_directory = os.path.abspath( os.path.join(os.getcwd(), options.results_directory)) chromium_utils.RemoveDirectory(options.results_directory) command.extend(['--results-directory', options.results_directory]) if options.target: command.extend(['--target', options.target]) if options.platform: command.extend(['--platform', options.platform]) if options.no_pixel_tests: command.append('--no-pixel-tests') if options.batch_size: command.extend(['--batch-size', options.batch_size]) if options.run_part: command.extend(['--run-part', options.run_part]) if options.builder_name: command.extend(['--builder-name', options.builder_name]) if options.build_number: command.extend(['--build-number', options.build_number]) command.extend(['--master-name', slave_utils.GetActiveMaster() or '']) command.extend(['--build-name', slave_name]) # On Windows, look for the target in an exact location. if sys.platform == 'win32': command.extend(['--build-directory', build_dir]) if options.test_results_server: command.extend(['--test-results-server', options.test_results_server]) if options.enable_pageheap: command.append('--time-out-ms=120000') for filename in options.additional_expectations: command.append('--additional-expectations=%s' % filename) # The list of tests is given as arguments. command.extend(options.options.split(' ')) command.extend(args) # Nuke anything that appears to be stale chrome items in the temporary # directory from previous test runs (i.e.- from crashes or unittest leaks). slave_utils.RemoveChromeTemporaryFiles() try: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', True) # Run the the tests return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target, command) finally: if options.enable_pageheap: slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False)