示例#1
0
  def _Run(self):
    # Build the Skia libraries in Release mode.
    os.environ['GYP_DEFINES'] = 'skia_static_initializers=0'
    shell_utils.run(['python', 'gyp_skia'])
    shell_utils.run(['make', 'skia_lib', 'BUILDTYPE=Release', '--jobs'])

    # Obtain the dump-static-initializers script.
    print 'Downloading %s' % DUMP_STATIC_INITIALIZERS_URL
    dl = urllib2.urlopen(DUMP_STATIC_INITIALIZERS_URL)
    with open(DUMP_STATIC_INITIALIZERS_FILENAME, 'wb') as f:
      f.write(dl.read())

    # Run the script over the compiled files.
    results = []
    for built_file_name in os.listdir(os.path.join('out', 'Release')):
      if built_file_name.endswith('.a') or built_file_name.endswith('.so'):
        output = shell_utils.run(['python', DUMP_STATIC_INITIALIZERS_FILENAME,
                                  os.path.join('out', 'Release',
                                               built_file_name)])
        matches = re.search('Found (\d+) static initializers', output)
        if matches:
          num_found = int(matches.groups()[0])
          if num_found:
            results.append((built_file_name, num_found))
    if results:
      print
      print 'Found static initializers:'
      print
      for result in results:
        print '  %s: %d' % result
      print
      # TODO(borenet): Make this an error once we have no static initializers.
      raise BuildStepWarning('Static initializers found!')
示例#2
0
  def _Run(self):
    script_path = run_cmd.ResolvablePath('slave', 'skia_slave_scripts', 'utils',
                                         'force_update_checkout.py')
    sync_cmd = ['python', script_path]
    results = run_cmd.run_on_all_slaves_on_all_hosts(sync_cmd)
    failed = []
    for host in results.iterkeys():
      print host
      # If results[host] is a MultiCommandResults instance, then we have results
      # for buildslaves running on that machine, which implies that we were able
      # to log in to the machine successfully.
      if isinstance(results[host], run_cmd.MultiCommandResults):
        # We successfully logged into the buildslave host machine.
        for buildslave in results[host].iterkeys():
          print ' ', buildslave,
          # Check and report the results of the command for each buildslave on
          # this host machine.
          if results[host][buildslave].returncode != 0:
            # If the command failed, print its output.
            failed.append(buildslave)
            print
            results[host][buildslave].print_results(pretty=True)
          else:
            # If the command succeeded, find and print the commit hash we synced
            # to.  If we can't find it, then something must have failed, so
            # print the output and report a failure.
            match = re.search(
                force_update_checkout.GOT_REVISION_PATTERN % ('(\w+)'),
                results[host][buildslave].stdout)
            if match:
              print '\t%s' % match.group(1)
            else:
              failed.append(host)
              print
              results[host][buildslave].print_results(pretty=True)
      else:
        # We were unable to log into the buildslave host machine.
        if results[host].returncode != 0:
          failed.append(host)
          results[host].print_results(pretty=True)
      print

    if failed:
      print
      print 'Failed to update the following buildslaves:'
      for failed_host in failed:
        print ' ', failed_host

    if failed:
      # TODO(borenet): Make sure that we can log in to all hosts, then make this
      # an error.
      raise BuildStepWarning('Could not update some buildslaves.')
示例#3
0
  def DoRenderSKPs(self, args, render_mode_name, config='8888',
                   write_images=True):
    """Run render_pictures.

    Args:
      args: (list of strings) misc args to append to the command line
      render_mode_name: (string) human-readable description of this particular
          RenderSKPs run (perhaps "default", or "tiled"); used as part
          of the JSON summary filename, and also inserted within the file
      config: (string) which config to run in
      write_images: (boolean) whether to save the generated images (IGNORED)

    Raises:
      BuildStepWarning if there was a problem, but the step should keep going.
      Something else if there was a major problem and we should give up now.
    """
    json_summary_filename = JSON_SUMMARY_FILENAME_FORMATTER % render_mode_name
    json_expectations_devicepath = self._flavor_utils.DevicePathJoin(
        self._device_dirs.PlaybackExpectedSummariesDir(), json_summary_filename)
    if not self._flavor_utils.DevicePathExists(json_expectations_devicepath):
      raise BuildStepWarning('could not find JSON expectations file %s' %
                             json_expectations_devicepath)

    # TODO(stephana): We should probably start rendering whole images too, not
    # just tiles.
    cmd = [
        '--config', config,
        '--descriptions',
            '='.join([DESCRIPTION__BUILDER, self._builder_name]),
            '='.join([DESCRIPTION__RENDER_MODE, render_mode_name]),
        '--mode', 'tile', str(DEFAULT_TILE_X), str(DEFAULT_TILE_Y),
        '--readJsonSummaryPath', json_expectations_devicepath,
        '--readPath', self._device_dirs.SKPDir(),
        '--writeChecksumBasedFilenames',
        '--writeJsonSummaryPath', self._flavor_utils.DevicePathJoin(
            self._device_dirs.PlaybackActualSummariesDir(),
            json_summary_filename),
    ]
    if write_images:
      cmd.extend([
          '--mismatchPath', self._device_dirs.PlaybackActualImagesDir()])
    cmd.extend(args)

    if False:
      # For now, skip --validate on all builders, since it takes more time,
      # and at last check failed on Windows.
      if not os.name == 'nt':
        cmd.append('--validate')

    self._flavor_utils.RunFlavoredCmd('render_pictures', cmd)
示例#4
0
 def _Run(self):
   test_script_path = os.path.join('webkit', 'tools', 'layout_tests',
                                   'run_webkit_tests.sh')
   cmd = [
       test_script_path, '--build-directory', 'out', '--nocheck-sys-deps',
       '--additional-platform-directory=%s' %
           self._flavor_utils.baseline_dir,
       '--no-show-results'
   ]
   if 'new_baseline' in self._args:
     cmd.append('--new-baseline')
   if self._configuration == 'Debug':
     cmd.append('--debug')
   if 'write_results' in self._args:
     cmd.append('--results-directory=%s' % self._flavor_utils.result_dir)
   try:
     shell_utils.run(cmd)
   except Exception as e:
     # Allow this step to fail with a warning, since we expect to see a lot of
     # failures which aren't our fault. Instead, we care about the diffs.
     raise BuildStepWarning(e)
示例#5
0
  def _Run(self):
    disk_usage_script = run_cmd.ResolvablePath('third_party', 'disk_usage',
                                               'disk_usage.py')
    results = run_cmd.run_on_all_slave_hosts(['python', disk_usage_script])
    failed = []
    over_threshold = False
    print 'Maximum allowed disk usage percent: %d\n' % MAX_DISK_USAGE_PERCENT
    for host in results.iterkeys():
      print host,
      got_result = True
      if results[host].returncode != 0:
        got_result = False
      else:
        try:
          percent_used = get_disk_usage_percent(results[host].stdout)
          print ': %d%%' % percent_used,
          if percent_used > MAX_DISK_USAGE_PERCENT:
            print ' (over threshold)'
            over_threshold = True
          else:
            print
        except (IndexError, ZeroDivisionError):
          got_result = False
      if not got_result:
        failed.append(host)
        print ': failed: ', results[host].stderr

    if failed:
      print
      print 'Failed to get disk usage for the following hosts:'
      for failed_host in failed:
        print ' ', failed_host

    if over_threshold:
      raise BuildStepFailure('Some hosts are over threshold.')

    if failed:
      # TODO(borenet): Make sure that we can log in to all hosts, then make this
      # an error.
      raise BuildStepWarning('Could not log in to some hosts.')
示例#6
0
    def _Run(self):
        json_summary_path = misc.GetAbsPath(
            os.path.join(self._gm_actual_dir, run_gm.JSON_SUMMARY_FILENAME))

        # Temporary list of builders who are allowed to fail this step without the
        # bot turning red.
        may_fail_with_warning = []
        # This import must happen after BuildStep.__init__ because it requires that
        # CWD is in PYTHONPATH, and BuildStep.__init__ may change the CWD.
        from gm import display_json_results
        success = display_json_results.Display(json_summary_path)
        print('%s<a href="%s?resultsToLoad=/results/failures&builder=%s">'
              'link</a>' %
              (skia_vars.GetGlobalVariable('latest_gm_failures_preamble'),
               LIVE_REBASELINE_SERVER_BASEURL, self._builder_name))
        if not success:
            if self._builder_name in may_fail_with_warning:
                raise BuildStepWarning('Expectations mismatch in %s!' %
                                       json_summary_path)
            else:
                raise Exception('Expectations mismatch in %s!' %
                                json_summary_path)
    def _Run(self):
        with misc.ChDir(EXTERNAL_SKIA):
            # Check to see whether there is an upstream yet.
            if not UPSTREAM_REMOTE_NAME in shell_utils.run(
                [GIT, 'remote', 'show']):
                try:
                    shell_utils.run([
                        GIT, 'remote', 'add', UPSTREAM_REMOTE_NAME,
                        SKIA_REPO_URL
                    ])
                except shell_utils.CommandFailedException as e:
                    if 'remote %s already exists' % UPSTREAM_REMOTE_NAME in e.output:
                        # Accept this error. The upstream remote name should have been in
                        # the output of git remote show, which would have made us skip this
                        # redundant command anyway.
                        print(
                            '%s was already added. Why did it not show in git remote'
                            ' show?' % UPSTREAM_REMOTE_NAME)
                    else:
                        raise e

            # Update the upstream remote.
            shell_utils.run([GIT, 'fetch', UPSTREAM_REMOTE_NAME])

            # Create a stack of commits to submit, one at a time, until we reach a
            # commit that has already been merged.
            commit_stack = []
            head = git_utils.ShortHash('HEAD')

            print 'HEAD is at %s' % head

            if self._got_revision:
                # Merge the revision that started this build.
                commit = git_utils.ShortHash(self._got_revision)
            else:
                raise Exception('This build has no _got_revision to merge!')

            print(
                'Starting with %s, look for commits that have not been merged to '
                'HEAD' % commit)
            while not git_utils.AIsAncestorOfB(commit, head):
                print 'Adding %s to list of commits to merge.' % commit
                commit_stack.append(commit)
                if git_utils.IsMerge(commit):
                    # Skia's commit history is not linear. There is no obvious way to
                    # merge each branch in, one commit at a time. So just start with the
                    # merge commit.
                    print '%s is a merge. Skipping merge of its parents.' % commit
                    break
                commit = git_utils.ShortHash(commit + '~1')
            else:
                print '%s has already been merged.' % commit

            if len(commit_stack) == 0:
                raise BuildStepWarning(
                    'Nothing to merge; did someone already merge %s?'
                    ' Exiting.' % commit)

            print 'Merging %s commit(s):\n%s' % (len(commit_stack), '\n'.join(
                reversed(commit_stack)))

            # Now we have a list of commits to merge.
            while len(commit_stack) > 0:
                commit_to_merge = commit_stack.pop()

                print 'Attempting to merge ' + commit_to_merge

                # Start the merge.
                try:
                    shell_utils.run(
                        [GIT, 'merge', commit_to_merge, '--no-commit'])
                except shell_utils.CommandFailedException:
                    # Merge conflict. There may be a more elegant solution, but for now,
                    # undo the merge, and allow (/make) a human to do it.
                    git_utils.MergeAbort()
                    raise Exception(
                        'Failed to merge %s. Fall back to manual human '
                        'merge.' % commit_to_merge)

                # Grab the upstream version of SkUserConfig, which will be used to
                # generate Android's version.
                shell_utils.run([
                    GIT, 'checkout', commit_to_merge, '--',
                    UPSTREAM_USER_CONFIG
                ])

                # We don't want to commit the upstream version, so remove it from the
                # index.
                shell_utils.run([GIT, 'reset', 'HEAD', UPSTREAM_USER_CONFIG])

                # Now generate Android.mk and SkUserConfig.h
                gyp_failed = False
                try:
                    gyp_to_android.main()
                except AssertionError as e:
                    print e
                    # Failed to generate the makefiles. Make a human fix the problem.
                    git_utils.MergeAbort()
                    raise Exception(
                        'Failed to generate makefiles for %s. Fall back to '
                        'manual human merge.' % commit_to_merge)
                except SystemExit as e:
                    gyp_failed = True

                if not gyp_failed:
                    git_utils.Add('Android.mk')
                    git_utils.Add(ANDROID_USER_CONFIG)
                    git_utils.Add(os.path.join('tests', 'Android.mk'))
                    git_utils.Add(os.path.join('tools', 'Android.mk'))
                    git_utils.Add(os.path.join('bench', 'Android.mk'))
                    git_utils.Add(os.path.join('gm', 'Android.mk'))
                    git_utils.Add(os.path.join('dm', 'Android.mk'))

                # Remove upstream user config, which is no longer needed.
                os.remove(UPSTREAM_USER_CONFIG)

                # Create a new branch.
                shell_utils.run([REPO, 'start', LOCAL_BRANCH_NAME, '.'])

                try:
                    orig_msg = shell_utils.run(
                        [GIT, 'show', commit_to_merge, '--format="%s"',
                         '-s']).rstrip()
                    message = 'Merge %s into master-skia\n\n' + SKIA_REV_URL
                    if gyp_failed:
                        message += '\n\nFIXME: Failed to generate makefiles!'
                    shell_utils.run([
                        GIT, 'commit', '-m',
                        message % (orig_msg, commit_to_merge)
                    ])
                except shell_utils.CommandFailedException:
                    # It is possible that someone else already did the merge (for example,
                    # if they are testing a build slave). Clean up and exit.
                    RepoAbandon(LOCAL_BRANCH_NAME)
                    raise BuildStepWarning(
                        'Nothing to merge; did someone already merge '
                        '%s?' % commit_to_merge)

                # For some reason, sometimes the bot's authentication from sync_android
                # does not carry over to this step. Authenticate again.
                with GitAuthenticate():
                    # Now push to master-skia branch
                    try:
                        shell_utils.run(
                            [GIT, 'push', MASTER_SKIA_URL, MASTER_SKIA_REFS])
                    except shell_utils.CommandFailedException:
                        # It's possible someone submitted in between our sync and push or
                        # push failed for some other reason. Abandon and let the next
                        # attempt try again.
                        RepoAbandon(LOCAL_BRANCH_NAME)
                        raise BuildStepFailure('git push failed!')

                    # Our branch is no longer needed. Remove it.
                    shell_utils.run([REPO, 'sync', '-j32', '.'])
                    shell_utils.run([REPO, 'prune', '.'])

                # If gyp failed, this probably means there was an error in the gyp
                # files. We still want to push the commit. This way, when it gets
                # fixed with a future commit, we don't remain hung up on this one.
                if gyp_failed:
                    raise BuildStepFailure(
                        'Merged %s, but failed to generate makefiles.'
                        ' Is there a mistake in the gyp files?' %
                        commit_to_merge)