Exemplo n.º 1
0
    def test_repo_url(self):
        """Use repo: URL to specify summary files."""
        base_repo_url = 'repo:gm/rebaseline_server/testdata/inputs/skp-summaries'
        results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
            setA_dirs=[posixpath.join(base_repo_url, 'expectations')],
            setB_dirs=[posixpath.join(base_repo_url, 'actuals')],
            setA_section=gm_json.JSONKEY_EXPECTEDRESULTS,
            setB_section=gm_json.JSONKEY_ACTUALRESULTS,
            image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir),
            image_base_gs_url='gs://fakebucket/fake/path',
            diff_base_url='/static/generated-images')
        results_obj.get_timestamp = mock_get_timestamp

        # Overwrite elements within the results that change from one test run
        # to the next.
        # pylint: disable=W0212
        results_obj._setA_descriptions\
            [results.KEY__SET_DESCRIPTIONS__REPO_REVISION] = 'fake-repo-revision'
        results_obj._setB_descriptions\
            [results.KEY__SET_DESCRIPTIONS__REPO_REVISION] = 'fake-repo-revision'

        gm_json.WriteToFile(
            results_obj.get_packaged_results_of_type(
                results.KEY__HEADER__RESULTS_ALL),
            os.path.join(self.output_dir_actual,
                         'compare_rendered_pictures.json'))
Exemplo n.º 2
0
def main():
  logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                      datefmt='%m/%d/%Y %H:%M:%S',
                      level=logging.INFO)
  parser = argparse.ArgumentParser()
  parser.add_argument(
      '--actuals', default=results.DEFAULT_ACTUALS_DIR,
      help='Directory containing all actual-result JSON files; defaults to '
      '\'%(default)s\' .')
  parser.add_argument(
      'config', nargs=2,
      help='Two configurations to compare (8888, gpu, etc.).')
  parser.add_argument(
      '--outfile', required=True,
      help='File to write result summary into, in JSON format.')
  parser.add_argument(
      '--results', default=results.KEY__HEADER__RESULTS_FAILURES,
      help='Which result types to include. Defaults to \'%(default)s\'; '
      'must be one of ' +
      str([results.KEY__HEADER__RESULTS_FAILURES,
           results.KEY__HEADER__RESULTS_ALL]))
  parser.add_argument(
      '--workdir', default=results.DEFAULT_GENERATED_IMAGES_ROOT,
      help='Directory within which to download images and generate diffs; '
      'defaults to \'%(default)s\' .')
  args = parser.parse_args()
  results_obj = ConfigComparisons(configs=args.config,
                                  actuals_root=args.actuals,
                                  generated_images_root=args.workdir)
  gm_json.WriteToFile(
      results_obj.get_packaged_results_of_type(results_type=args.results),
      args.outfile)
Exemplo n.º 3
0
    def test_endToEnd(self):
        """Generate two sets of SKPs, run render_pictures over both, and compare
    the results."""
        self._generate_skps_and_run_render_pictures(subdir='before_patch',
                                                    skpdict={
                                                        'changed.skp': 200,
                                                        'unchanged.skp': 100,
                                                        'only-in-before.skp':
                                                        128,
                                                    })
        self._generate_skps_and_run_render_pictures(subdir='after_patch',
                                                    skpdict={
                                                        'changed.skp': 201,
                                                        'unchanged.skp': 100,
                                                        'only-in-after.skp':
                                                        128,
                                                    })

        results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
            actuals_root=self._temp_dir,
            subdirs=('before_patch', 'after_patch'),
            generated_images_root=self._temp_dir,
            diff_base_url='/static/generated-images')
        results_obj.get_timestamp = mock_get_timestamp

        gm_json.WriteToFile(
            results_obj.get_packaged_results_of_type(
                results.KEY__HEADER__RESULTS_ALL),
            os.path.join(self._output_dir_actual,
                         'compare_rendered_pictures.json'))
Exemplo n.º 4
0
    def RebaselineSubdir(self, subdir, builder):
        # Read in the actual result summary, and extract all the tests whose
        # results we need to update.
        actuals_url = '/'.join([
            self._actuals_base_url, subdir, builder, subdir,
            self._actuals_filename
        ])
        # In most cases, we won't need to re-record results that are already
        # succeeding, but including the SUCCEEDED results will allow us to
        # re-record expectations if they somehow get out of sync.
        sections = [
            gm_json.JSONKEY_ACTUALRESULTS_FAILED,
            gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED
        ]
        if self._add_new:
            sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
        results_to_update = self._GetActualResults(json_url=actuals_url,
                                                   sections=sections)

        # Read in current expectations.
        expectations_input_filepath = os.path.join(
            self._expectations_root, subdir, self._expectations_input_filename)
        expectations_dict = gm_json.LoadFromFile(expectations_input_filepath)
        expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS]

        # Update the expectations in memory, skipping any tests/configs that
        # the caller asked to exclude.
        skipped_images = []
        if results_to_update:
            for (image_name, image_results) in results_to_update.iteritems():
                (test,
                 config) = self._image_filename_re.match(image_name).groups()
                if self._tests:
                    if test not in self._tests:
                        skipped_images.append(image_name)
                        continue
                if self._configs:
                    if config not in self._configs:
                        skipped_images.append(image_name)
                        continue
                if not expected_results.get(image_name):
                    expected_results[image_name] = {}
                expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \
                                [image_results]

        # Write out updated expectations.
        expectations_output_filepath = os.path.join(
            self._expectations_root, subdir,
            self._expectations_output_filename)
        gm_json.WriteToFile(expectations_dict, expectations_output_filepath)

        # Mark the JSON file as plaintext, so text-style diffs can be applied.
        # Fixes https://code.google.com/p/skia/issues/detail?id=1442
        if self._using_svn:
            self._Call([
                'svn', 'propset', '--quiet', 'svn:mime-type', 'text/x-json',
                expectations_output_filepath
            ])
Exemplo n.º 5
0
 def test_gm(self):
     """Process results of a GM run with the Results object."""
     results_obj = results.Results(
         actuals_root=os.path.join(self._input_dir, 'gm-actuals'),
         expected_root=os.path.join(self._input_dir, 'gm-expectations'),
         generated_images_root=self._temp_dir)
     results_obj.get_timestamp = mock_get_timestamp
     gm_json.WriteToFile(
         results_obj.get_packaged_results_of_type(
             results.KEY__HEADER__RESULTS_ALL),
         os.path.join(self._output_dir_actual, 'gm.json'))
Exemplo n.º 6
0
 def test_gm(self):
     """Process results of a GM run with the ConfigComparisons object."""
     results_obj = compare_configs.ConfigComparisons(
         configs=('8888', 'gpu'),
         actuals_root=os.path.join(self._input_dir, 'gm-actuals'),
         generated_images_root=self._temp_dir,
         diff_base_url='/static/generated-images')
     results_obj.get_timestamp = mock_get_timestamp
     gm_json.WriteToFile(
         results_obj.get_packaged_results_of_type(
             results.KEY__HEADER__RESULTS_ALL),
         os.path.join(self._output_dir_actual, 'gm.json'))
Exemplo n.º 7
0
 def test_gm(self):
     """Process results of a GM run with the ExpectationComparisons object."""
     image_diff_db = imagediffdb.ImageDiffDB(storage_root=self.temp_dir)
     results_obj = compare_to_expectations.ExpectationComparisons(
         image_diff_db=image_diff_db,
         actuals_root=os.path.join(self.input_dir, 'gm-actuals'),
         expected_root=os.path.join(self.input_dir, 'gm-expectations'),
         diff_base_url='/static/generated-images')
     results_obj.get_timestamp = mock_get_timestamp
     gm_json.WriteToFile(
         results_obj.get_packaged_results_of_type(
             results.KEY__HEADER__RESULTS_ALL),
         os.path.join(self.output_dir_actual, 'gm.json'))
  def _write_dicts_to_root(meta_dict, root):
    """Write out multiple dictionaries in JSON format.

    Args:
      meta_dict: a builder-keyed meta-dictionary containing all the JSON
                 dictionaries we want to write out
      root: path to root of directory tree within which to write files
    """
    if not os.path.isdir(root):
      raise IOError('no directory found at path %s' % root)

    for rel_path in meta_dict.keys():
      full_path = os.path.join(root, rel_path)
      gm_json.WriteToFile(meta_dict[rel_path], full_path)
Exemplo n.º 9
0
    def _write_dicts_to_root(meta_dict, root, pattern='*.json'):
        """Write all per-builder dictionaries within meta_dict to files under
    the root path.

    Security note: this will only write to files that already exist within
    the root path (as found by os.walk() within root), so we don't need to
    worry about malformed content writing to disk outside of root.
    However, the data written to those files is not double-checked, so it
    could contain poisonous data.

    Args:
      meta_dict: a builder-keyed meta-dictionary containing all the JSON
                 dictionaries we want to write out
      root: path to root of directory tree within which to write files
      pattern: which files to write within root (fnmatch-style pattern)

    Raises:
      IOError if root does not refer to an existing directory
      KeyError if the set of per-builder dictionaries written out was
               different than expected
    """
        if not os.path.isdir(root):
            raise IOError('no directory found at path %s' % root)
        actual_builders_written = []
        for dirpath, dirnames, filenames in os.walk(root):
            for matching_filename in fnmatch.filter(filenames, pattern):
                builder = os.path.basename(dirpath)
                # We should never encounter Trybot *expectations*, but if we are
                # writing into the actual-results dir, skip the Trybot actuals.
                # (I don't know why we would ever write into the actual-results dir,
                # though.)
                if builder.endswith('-Trybot'):
                    continue
                per_builder_dict = meta_dict.get(builder)
                if per_builder_dict is not None:
                    fullpath = os.path.join(dirpath, matching_filename)
                    gm_json.WriteToFile(per_builder_dict, fullpath)
                    actual_builders_written.append(builder)

        # Check: did we write out the set of per-builder dictionaries we
        # expected to?
        expected_builders_written = sorted(meta_dict.keys())
        actual_builders_written.sort()
        if expected_builders_written != actual_builders_written:
            raise KeyError(
                'expected to write dicts for builders %s, but actually wrote them '
                'for builders %s' %
                (expected_builders_written, actual_builders_written))
Exemplo n.º 10
0
def main():
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--actuals',
        default=results.DEFAULT_ACTUALS_DIR,
        help='Directory containing all actual-result JSON files; defaults to '
        '\'%(default)s\' .')
    parser.add_argument(
        '--expectations',
        default=DEFAULT_EXPECTATIONS_DIR,
        help='Directory containing all expected-result JSON files; defaults to '
        '\'%(default)s\' .')
    parser.add_argument(
        '--ignore-failures-file',
        default=DEFAULT_IGNORE_FAILURES_FILE,
        help='If a file with this name is found within the EXPECTATIONS dir, '
        'ignore failures for any tests listed in the file; defaults to '
        '\'%(default)s\' .')
    parser.add_argument(
        '--outfile',
        required=True,
        help='File to write result summary into, in JSON format.')
    parser.add_argument(
        '--results',
        default=results.KEY__HEADER__RESULTS_FAILURES,
        help='Which result types to include. Defaults to \'%(default)s\'; '
        'must be one of ' + str([
            results.KEY__HEADER__RESULTS_FAILURES,
            results.KEY__HEADER__RESULTS_ALL
        ]))
    parser.add_argument(
        '--workdir',
        default=results.DEFAULT_GENERATED_IMAGES_ROOT,
        help='Directory within which to download images and generate diffs; '
        'defaults to \'%(default)s\' .')
    args = parser.parse_args()
    image_diff_db = imagediffdb.ImageDiffDB(storage_root=args.workdir)
    results_obj = ExpectationComparisons(
        image_diff_db=image_diff_db,
        actuals_root=args.actuals,
        expected_root=args.expectations,
        ignore_failures_file=args.ignore_failures_file)
    gm_json.WriteToFile(
        results_obj.get_packaged_results_of_type(results_type=args.results),
        args.outfile)
Exemplo n.º 11
0
 def test_endToEnd(self):
   """Compare results of two render_pictures runs."""
   # TODO(epoger): Specify image_base_url pointing at the directory on local
   # disk containing our test images, so that we can actually compute pixel
   # diffs.  For now, this test attempts to download images from
   # DEFAULT_IMAGE_BASE_URL, and there aren't any there yet.
   results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
       actuals_root=os.path.join(self._input_dir, 'render_pictures_output'),
       subdirs=('before_patch', 'after_patch'),
       generated_images_root=self._temp_dir,
       diff_base_url='/static/generated-images')
   results_obj.get_timestamp = mock_get_timestamp
   gm_json.WriteToFile(
       results_obj.get_packaged_results_of_type(
           results.KEY__HEADER__RESULTS_ALL),
       os.path.join(self._output_dir_actual, 'compare_rendered_pictures.json'))
Exemplo n.º 12
0
    def RebaselineSubdir(self, subdir, builder):
        # Read in the actual result summary, and extract all the tests whose
        # results we need to update.
        actuals_url = '/'.join([
            self._actuals_base_url, subdir, builder, subdir,
            self._actuals_filename
        ])
        sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED]
        if self._add_new:
            sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
        results_to_update = self._GetActualResults(json_url=actuals_url,
                                                   sections=sections)

        # Read in current expectations.
        expectations_json_filepath = os.path.join(self._expectations_root,
                                                  subdir,
                                                  self._expectations_filename)
        expectations_dict = gm_json.LoadFromFile(expectations_json_filepath)

        # Update the expectations in memory, skipping any tests/configs that
        # the caller asked to exclude.
        skipped_images = []
        if results_to_update:
            for (image_name, image_results) in results_to_update.iteritems():
                (test,
                 config) = self._testname_pattern.match(image_name).groups()
                if self._tests:
                    if test not in self._tests:
                        skipped_images.append(image_name)
                        continue
                if self._configs:
                    if config not in self._configs:
                        skipped_images.append(image_name)
                        continue
                expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] \
                                 [image_name] \
                                 [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \
                                     [image_results]

        # Write out updated expectations.
        gm_json.WriteToFile(expectations_dict, expectations_json_filepath)

        if skipped_images:
            print('Skipped these tests due to test/config filters: %s' %
                  skipped_images)
Exemplo n.º 13
0
    def test_endToEnd(self):
        """Generate two sets of SKPs, run render_pictures over both, and compare
    the results."""
        setA_subdir = 'before_patch'
        setB_subdir = 'after_patch'
        self._generate_skps_and_run_render_pictures(subdir=setA_subdir,
                                                    skpdict={
                                                        'changed.skp': 200,
                                                        'unchanged.skp': 100,
                                                        'only-in-before.skp':
                                                        128,
                                                    })
        self._generate_skps_and_run_render_pictures(subdir=setB_subdir,
                                                    skpdict={
                                                        'changed.skp': 201,
                                                        'unchanged.skp': 100,
                                                        'only-in-after.skp':
                                                        128,
                                                    })

        results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
            setA_dirs=[os.path.join(self.temp_dir, setA_subdir)],
            setB_dirs=[os.path.join(self.temp_dir, setB_subdir)],
            setA_section=gm_json.JSONKEY_ACTUALRESULTS,
            setB_section=gm_json.JSONKEY_ACTUALRESULTS,
            image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir),
            image_base_gs_url='gs://fakebucket/fake/path',
            diff_base_url='/static/generated-images')
        results_obj.get_timestamp = mock_get_timestamp

        # Overwrite elements within the results that change from one test run
        # to the next.
        # pylint: disable=W0212
        results_obj._setA_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [
            'before-patch-fake-dir'
        ]
        results_obj._setB_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [
            'after-patch-fake-dir'
        ]

        gm_json.WriteToFile(
            results_obj.get_packaged_results_of_type(
                results.KEY__HEADER__RESULTS_ALL),
            os.path.join(self.output_dir_actual,
                         'compare_rendered_pictures.json'))
    def _set_expected_hash(self, device_name, image_name, hash_value):
        """Set the expected hash for the image of the given device. This always
        writes directly to the expected results file of the given device

        @param device_name The name of the device to write the hash to.
        @param image_name  The name of the image whose hash to set.
        @param hash_value  The value of the hash to set.
        """

        # Retrieve the expected results file as it is in the working tree
        json_path = os.path.join(self._expectations_dir, device_name,
                                 self._expected_name)
        expectations = gm_json.LoadFromFile(json_path)

        # Set the specified hash.
        set_expected_hash_in_json(expectations, image_name, hash_value)

        # Write it out to disk using gm_json to keep the formatting consistent.
        gm_json.WriteToFile(expectations, json_path)
Exemplo n.º 15
0
    def _duplicate_config(self, path, old, new):
        """Duplicates all instances of a config within a GM expectations file.

    Params:
      path: path to file which will be modified in place
      old: old config name
      new: new config name
    """
        dic = gm_json.LoadFromFile(file_path=path)
        expected_results = dic[gm_json.JSONKEY_EXPECTEDRESULTS]
        orig_keys = expected_results.keys()
        for key in orig_keys:
            result = expected_results[key]
            (testname, config) = IMAGE_FILENAME_RE.match(key).groups()
            if config == old:
                config = new
                key = '%s_%s.png' % (testname, config)
                expected_results[key] = result
        gm_json.WriteToFile(json_dict=dic, file_path=path)
Exemplo n.º 16
0
def Reformat(filename):
  print('Reformatting file %s...' % filename)
  gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename)
Exemplo n.º 17
0
    def update_results(self, invalidate=False):
        """ Create or update self._results, based on the latest expectations and
    actuals.

    We hold self.results_rlock while we do this, to guarantee that no other
    thread attempts to update either self._results or the underlying files at
    the same time.

    Args:
      invalidate: if True, invalidate self._results immediately upon entry;
                  otherwise, we will let readers see those results until we
                  replace them
    """
        with self.results_rlock:
            if invalidate:
                self._results = None
            if self._actuals_repo_url:
                logging.info(
                    'Updating actual GM results in %s to revision %s from repo %s ...'
                    % (self._actuals_dir, self._actuals_repo_revision,
                       self._actuals_repo_url))
                self._actuals_repo.Update(path='.',
                                          revision=self._actuals_repo_revision)

            # We only update the expectations dir if the server was run with a
            # nonzero --reload argument; otherwise, we expect the user to maintain
            # her own expectations as she sees fit.
            #
            # Because the Skia repo is moving from SVN to git, and git does not
            # support updating a single directory tree, we have to update the entire
            # repo checkout.
            #
            # Because Skia uses depot_tools, we have to update using "gclient sync"
            # instead of raw git (or SVN) update.  Happily, this will work whether
            # the checkout was created using git or SVN.
            if self._reload_seconds:
                logging.info(
                    'Updating expected GM results in %s by syncing Skia repo ...'
                    % compare_to_expectations.DEFAULT_EXPECTATIONS_DIR)
                _run_command(['gclient', 'sync'], TRUNK_DIRECTORY)

            self._results = compare_to_expectations.ExpectationComparisons(
                actuals_root=self._actuals_dir,
                generated_images_root=os.path.join(PARENT_DIRECTORY,
                                                   STATIC_CONTENTS_SUBDIR,
                                                   GENERATED_IMAGES_SUBDIR),
                diff_base_url=posixpath.join(os.pardir, STATIC_CONTENTS_SUBDIR,
                                             GENERATED_IMAGES_SUBDIR),
                builder_regex_list=self._builder_regex_list)

            json_dir = os.path.join(PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
                                    GENERATED_JSON_SUBDIR)
            if not os.path.isdir(json_dir):
                os.makedirs(json_dir)

            for config_pair in self._config_pairs:
                config_comparisons = compare_configs.ConfigComparisons(
                    configs=config_pair,
                    actuals_root=self._actuals_dir,
                    generated_images_root=os.path.join(
                        PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
                        GENERATED_IMAGES_SUBDIR),
                    diff_base_url=posixpath.join(os.pardir,
                                                 GENERATED_IMAGES_SUBDIR),
                    builder_regex_list=self._builder_regex_list)
                for summary_type in SUMMARY_TYPES:
                    gm_json.WriteToFile(
                        config_comparisons.get_packaged_results_of_type(
                            results_type=summary_type),
                        os.path.join(
                            json_dir, '%s-vs-%s_%s.json' %
                            (config_pair[0], config_pair[1], summary_type)))
Exemplo n.º 18
0
    def RebaselineSubdir(self, builder):
        # Read in the actual result summary, and extract all the tests whose
        # results we need to update.
        actuals_url = '/'.join(
            [self._actuals_base_url, builder, self._actuals_filename])
        # Only update results for tests that are currently failing.
        # We don't want to rewrite results for tests that are already succeeding,
        # because we don't want to add annotation fields (such as
        # JSONKEY_EXPECTEDRESULTS_BUGS) except for tests whose expectations we
        # are actually modifying.
        sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED]
        if self._add_new:
            sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
        results_to_update = self._GetActualResults(json_url=actuals_url,
                                                   sections=sections)

        # Read in current expectations.
        expectations_input_filepath = os.path.join(
            self._expectations_root, builder,
            self._expectations_input_filename)
        expectations_dict = gm_json.LoadFromFile(expectations_input_filepath)
        expected_results = expectations_dict.get(
            gm_json.JSONKEY_EXPECTEDRESULTS)
        if not expected_results:
            expected_results = {}
            expectations_dict[
                gm_json.JSONKEY_EXPECTEDRESULTS] = expected_results

        # Update the expectations in memory, skipping any tests/configs that
        # the caller asked to exclude.
        skipped_images = []
        if results_to_update:
            for (image_name, image_results) in results_to_update.iteritems():
                (test,
                 config) = self._image_filename_re.match(image_name).groups()
                if self._tests:
                    if test not in self._tests:
                        skipped_images.append(image_name)
                        continue
                if self._configs:
                    if config not in self._configs:
                        skipped_images.append(image_name)
                        continue
                if not expected_results.get(image_name):
                    expected_results[image_name] = {}
                expected_results[image_name]\
                                [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS]\
                                = [image_results]
                if self._mark_unreviewed:
                    expected_results[image_name]\
                                    [gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED]\
                                    = False
                if self._bugs:
                    expected_results[image_name]\
                                    [gm_json.JSONKEY_EXPECTEDRESULTS_BUGS]\
                                    = self._bugs
                if self._notes:
                    expected_results[image_name]\
                                    [gm_json.JSONKEY_EXPECTEDRESULTS_NOTES]\
                                    = self._notes

        # Write out updated expectations.
        expectations_output_filepath = os.path.join(
            self._expectations_root, builder,
            self._expectations_output_filename)
        gm_json.WriteToFile(expectations_dict, expectations_output_filepath)

        # Mark the JSON file as plaintext, so text-style diffs can be applied.
        # Fixes https://code.google.com/p/skia/issues/detail?id=1442
        if self._using_svn:
            self._Call([
                'svn', 'propset', '--quiet', 'svn:mime-type', 'text/x-json',
                expectations_output_filepath
            ])
Exemplo n.º 19
0
Arquivo: server.py Projeto: elima/skia
    def update_results(self, invalidate=False):
        """ Create or update self._results, based on the latest expectations and
    actuals.

    We hold self.results_rlock while we do this, to guarantee that no other
    thread attempts to update either self._results or the underlying files at
    the same time.

    Args:
      invalidate: if True, invalidate self._results immediately upon entry;
                  otherwise, we will let readers see those results until we
                  replace them
    """
        with self.results_rlock:
            if invalidate:
                self._results = None
            if self._gm_summaries_bucket:
                logging.info(
                    'Updating GM result summaries in %s from gm_summaries_bucket %s ...'
                    % (self._actuals_dir, self._gm_summaries_bucket))

                # Clean out actuals_dir first, in case some builders have gone away
                # since we last ran.
                if os.path.isdir(self._actuals_dir):
                    shutil.rmtree(self._actuals_dir)

                # Get the list of builders we care about.
                all_builders = download_actuals.get_builders_list(
                    summaries_bucket=self._gm_summaries_bucket)
                if self._builder_regex_list:
                    matching_builders = []
                    for builder in all_builders:
                        for regex in self._builder_regex_list:
                            if re.match(regex, builder):
                                matching_builders.append(builder)
                                break  # go on to the next builder, no need to try more regexes
                else:
                    matching_builders = all_builders

                # Download the JSON file for each builder we care about.
                #
                # TODO(epoger): When this is a large number of builders, we would be
                # better off downloading them in parallel!
                for builder in matching_builders:
                    self._gs.download_file(
                        source_bucket=self._gm_summaries_bucket,
                        source_path=posixpath.join(builder,
                                                   self._json_filename),
                        dest_path=os.path.join(self._actuals_dir, builder,
                                               self._json_filename),
                        create_subdirs_if_needed=True)

            # We only update the expectations dir if the server was run with a
            # nonzero --reload argument; otherwise, we expect the user to maintain
            # her own expectations as she sees fit.
            #
            # Because the Skia repo is hosted using git, and git does not
            # support updating a single directory tree, we have to update the entire
            # repo checkout.
            #
            # Because Skia uses depot_tools, we have to update using "gclient sync"
            # instead of raw git commands.
            #
            # TODO(epoger): Fetch latest expectations in some other way.
            # Eric points out that our official documentation recommends an
            # unmanaged Skia checkout, so "gclient sync" will not bring down updated
            # expectations from origin/master-- you'd have to do a "git pull" of
            # some sort instead.
            # However, the live rebaseline_server at
            # http://skia-tree-status.appspot.com/redirect/rebaseline-server (which
            # is probably the only user of the --reload flag!) uses a managed
            # checkout, so "gclient sync" works in that case.
            # Probably the best idea is to avoid all of this nonsense by fetching
            # updated expectations into a temp directory, and leaving the rest of
            # the checkout alone.  This could be done using "git show", or by
            # downloading individual expectation JSON files from
            # skia.googlesource.com .
            if self._reload_seconds:
                logging.info(
                    'Updating expected GM results in %s by syncing Skia repo ...'
                    % compare_to_expectations.DEFAULT_EXPECTATIONS_DIR)
                _run_command(['gclient', 'sync'], TRUNK_DIRECTORY)

            self._results = compare_to_expectations.ExpectationComparisons(
                image_diff_db=self._image_diff_db,
                actuals_root=self._actuals_dir,
                diff_base_url=posixpath.join(os.pardir, STATIC_CONTENTS_SUBDIR,
                                             GENERATED_IMAGES_SUBDIR),
                builder_regex_list=self._builder_regex_list)

            json_dir = os.path.join(PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
                                    GENERATED_JSON_SUBDIR)
            if not os.path.isdir(json_dir):
                os.makedirs(json_dir)

            for config_pair in self._config_pairs:
                config_comparisons = compare_configs.ConfigComparisons(
                    configs=config_pair,
                    actuals_root=self._actuals_dir,
                    generated_images_root=os.path.join(
                        PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
                        GENERATED_IMAGES_SUBDIR),
                    diff_base_url=posixpath.join(os.pardir,
                                                 GENERATED_IMAGES_SUBDIR),
                    builder_regex_list=self._builder_regex_list)
                for summary_type in _GM_SUMMARY_TYPES:
                    gm_json.WriteToFile(
                        config_comparisons.get_packaged_results_of_type(
                            results_type=summary_type),
                        os.path.join(
                            json_dir, '%s-vs-%s_%s.json' %
                            (config_pair[0], config_pair[1], summary_type)))
    def test_endToEnd_withImageBaseGSUrl(self):
        """Generate two sets of SKPs, run render_pictures over both, and compare
    the results."""
        setA_subdir = 'before_patch'
        setB_subdir = 'after_patch'
        imageA_gs_base = 'superman/kent-camera/pictures'
        imageB_gs_base = 'batman/batarang/pictures'
        self._generate_skps_and_run_render_pictures(
            subdir=setA_subdir,
            skpdict={
                'changed.skp': 200,
                'unchanged.skp': 100,
                'only-in-before.skp': 128,
            },
            image_base_gs_url='gs://%s' % imageA_gs_base)
        self._generate_skps_and_run_render_pictures(
            subdir=setB_subdir,
            skpdict={
                'changed.skp': 201,
                'unchanged.skp': 100,
                'only-in-after.skp': 128,
            },
            image_base_gs_url='gs://%s' % imageB_gs_base)

        results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
            setA_dir=os.path.join(self.temp_dir, setA_subdir),
            setB_dir=os.path.join(self.temp_dir, setB_subdir),
            setA_section=gm_json.JSONKEY_ACTUALRESULTS,
            setB_section=gm_json.JSONKEY_ACTUALRESULTS,
            image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir),
            image_base_gs_url='gs://fakebucket/fake/path',
            diff_base_url='/static/generated-images')
        results_obj.get_timestamp = mock_get_timestamp

        output_dict = results_obj.get_packaged_results_of_type(
            results.KEY__HEADER__RESULTS_ALL)
        # Assert that the baseURLs are as expected.
        self.assertEquals(
            output_dict[imagepairset.KEY__ROOT__IMAGESETS][
                imagepairset.KEY__IMAGESETS__SET__IMAGE_A][
                    imagepairset.KEY__IMAGESETS__FIELD__BASE_URL],
            'http://storage.cloud.google.com/%s' % imageA_gs_base)
        self.assertEquals(
            output_dict[imagepairset.KEY__ROOT__IMAGESETS][
                imagepairset.KEY__IMAGESETS__SET__IMAGE_B][
                    imagepairset.KEY__IMAGESETS__FIELD__BASE_URL],
            'http://storage.cloud.google.com/%s' % imageB_gs_base)
        # Overwrite elements within the results that change from one test run
        # to the next.
        # pylint: disable=W0212
        results_obj._setA_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [
            'before-patch-fake-dir'
        ]
        results_obj._setB_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [
            'after-patch-fake-dir'
        ]

        gm_json.WriteToFile(
            output_dict,
            os.path.join(self.output_dir_actual,
                         'compare_rendered_pictures.json'))