def _read_builder_dicts_from_root(self, root, pattern='*.json'): """Read all JSON dictionaries within a directory tree. Skips any dictionaries belonging to a builder we have chosen to ignore. Args: root: path to root of directory tree pattern: which files to read within root (fnmatch-style pattern) Returns: A meta-dictionary containing all the JSON dictionaries found within the directory tree, keyed by builder name (the basename of the directory where each JSON dictionary was found). Raises: IOError if root does not refer to an existing directory """ # I considered making this call _read_dicts_from_root(), but I decided # it was better to prune out the ignored builders within the os.walk(). if not os.path.isdir(root): raise IOError('no directory found at path %s' % root) meta_dict = {} for dirpath, _, filenames in os.walk(root): for matching_filename in fnmatch.filter(filenames, pattern): builder = os.path.basename(dirpath) if self._ignore_builder(builder): continue full_path = os.path.join(dirpath, matching_filename) meta_dict[builder] = gm_json.LoadFromFile(full_path) return meta_dict
def _read_dicts_from_root(self, root, pattern='*.json'): """Read all JSON dictionaries within a directory tree. Args: root: path to root of directory tree pattern: which files to read within root (fnmatch-style pattern) Returns: A meta-dictionary containing all the JSON dictionaries found within the directory tree, keyed by the builder name of each dictionary. Raises: IOError if root does not refer to an existing directory """ if not os.path.isdir(root): raise IOError('no directory found at path %s' % root) meta_dict = {} for dirpath, dirnames, filenames in os.walk(root): for matching_filename in fnmatch.filter(filenames, pattern): builder = os.path.basename(dirpath) if self._ignore_builder(builder): continue fullpath = os.path.join(dirpath, matching_filename) meta_dict[builder] = gm_json.LoadFromFile(fullpath) return meta_dict
def _read_dicts_from_root(self, root, pattern='*.json'): """Read all JSON dictionaries within a directory tree. Args: root: path to root of directory tree pattern: which files to read within root (fnmatch-style pattern) Returns: A meta-dictionary containing all the JSON dictionaries found within the directory tree, keyed by the pathname (relative to root) of each JSON dictionary. Raises: IOError if root does not refer to an existing directory """ if not os.path.isdir(root): raise IOError('no directory found at path %s' % root) meta_dict = {} for abs_dirpath, _, filenames in os.walk(root): rel_dirpath = os.path.relpath(abs_dirpath, root) for matching_filename in fnmatch.filter(filenames, pattern): abs_path = os.path.join(abs_dirpath, matching_filename) rel_path = os.path.join(rel_dirpath, matching_filename) meta_dict[rel_path] = gm_json.LoadFromFile(abs_path) return meta_dict
def _read_dicts_from_root(root, pattern='*.json'): """Read all JSON dictionaries within a directory tree. Args: root: path to root of directory tree pattern: which files to read within root (fnmatch-style pattern) Returns: A meta-dictionary containing all the JSON dictionaries found within the directory tree, keyed by the builder name of each dictionary. Raises: IOError if root does not refer to an existing directory """ if not os.path.isdir(root): raise IOError('no directory found at path %s' % root) meta_dict = {} for dirpath, dirnames, filenames in os.walk(root): for matching_filename in fnmatch.filter(filenames, pattern): builder = os.path.basename(dirpath) # If we are reading from the collection of actual results, skip over # the Trybot results (we don't maintain baselines for them). if builder.endswith('-Trybot'): continue fullpath = os.path.join(dirpath, matching_filename) meta_dict[builder] = gm_json.LoadFromFile(fullpath) return meta_dict
def RebaselineSubdir(self, subdir, builder): # Read in the actual result summary, and extract all the tests whose # results we need to update. actuals_url = '/'.join([ self._actuals_base_url, subdir, builder, subdir, self._actuals_filename ]) # In most cases, we won't need to re-record results that are already # succeeding, but including the SUCCEEDED results will allow us to # re-record expectations if they somehow get out of sync. sections = [ gm_json.JSONKEY_ACTUALRESULTS_FAILED, gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED ] if self._add_new: sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) results_to_update = self._GetActualResults(json_url=actuals_url, sections=sections) # Read in current expectations. expectations_input_filepath = os.path.join( self._expectations_root, subdir, self._expectations_input_filename) expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] # Update the expectations in memory, skipping any tests/configs that # the caller asked to exclude. skipped_images = [] if results_to_update: for (image_name, image_results) in results_to_update.iteritems(): (test, config) = self._image_filename_re.match(image_name).groups() if self._tests: if test not in self._tests: skipped_images.append(image_name) continue if self._configs: if config not in self._configs: skipped_images.append(image_name) continue if not expected_results.get(image_name): expected_results[image_name] = {} expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \ [image_results] # Write out updated expectations. expectations_output_filepath = os.path.join( self._expectations_root, subdir, self._expectations_output_filename) gm_json.WriteToFile(expectations_dict, expectations_output_filepath) # Mark the JSON file as plaintext, so text-style diffs can be applied. # Fixes https://code.google.com/p/skia/issues/detail?id=1442 if self._using_svn: self._Call([ 'svn', 'propset', '--quiet', 'svn:mime-type', 'text/x-json', expectations_output_filepath ])
def RebaselineSubdir(self, subdir, builder): # Read in the actual result summary, and extract all the tests whose # results we need to update. actuals_url = '/'.join([ self._actuals_base_url, subdir, builder, subdir, self._actuals_filename ]) sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] if self._add_new: sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) results_to_update = self._GetActualResults(json_url=actuals_url, sections=sections) # Read in current expectations. expectations_json_filepath = os.path.join(self._expectations_root, subdir, self._expectations_filename) expectations_dict = gm_json.LoadFromFile(expectations_json_filepath) # Update the expectations in memory, skipping any tests/configs that # the caller asked to exclude. skipped_images = [] if results_to_update: for (image_name, image_results) in results_to_update.iteritems(): (test, config) = self._testname_pattern.match(image_name).groups() if self._tests: if test not in self._tests: skipped_images.append(image_name) continue if self._configs: if config not in self._configs: skipped_images.append(image_name) continue expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS] \ [image_name] \ [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \ [image_results] # Write out updated expectations. gm_json.WriteToFile(expectations_dict, expectations_json_filepath) if skipped_images: print('Skipped these tests due to test/config filters: %s' % skipped_images)
def _duplicate_config(self, path, old, new): """Duplicates all instances of a config within a GM expectations file. Params: path: path to file which will be modified in place old: old config name new: new config name """ dic = gm_json.LoadFromFile(file_path=path) expected_results = dic[gm_json.JSONKEY_EXPECTEDRESULTS] orig_keys = expected_results.keys() for key in orig_keys: result = expected_results[key] (testname, config) = IMAGE_FILENAME_RE.match(key).groups() if config == old: config = new key = '%s_%s.png' % (testname, config) expected_results[key] = result gm_json.WriteToFile(json_dict=dic, file_path=path)
def _set_expected_hash(self, device_name, image_name, hash_value): """Set the expected hash for the image of the given device. This always writes directly to the expected results file of the given device @param device_name The name of the device to write the hash to. @param image_name The name of the image whose hash to set. @param hash_value The value of the hash to set. """ # Retrieve the expected results file as it is in the working tree json_path = os.path.join(self._expectations_dir, device_name, self._expected_name) expectations = gm_json.LoadFromFile(json_path) # Set the specified hash. set_expected_hash_in_json(expectations, image_name, hash_value) # Write it out to disk using gm_json to keep the formatting consistent. gm_json.WriteToFile(expectations, json_path)
def Display(filepath): """Displays a summary of the results in a JSON file. Returns True if the results are free of any significant failures. filepath: (string) path to JSON file""" # Map labels within the JSON file to the ResultAccumulator for each label. results_map = { gm_json.JSONKEY_ACTUALRESULTS_FAILED: ResultAccumulator(name='ExpectationsMismatch', do_list=True, do_fail=True), gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED: ResultAccumulator(name='IgnoredExpectationsMismatch', do_list=True, do_fail=False), gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON: ResultAccumulator(name='MissingExpectations', do_list=False, do_fail=False), gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED: ResultAccumulator(name='Passed', do_list=False, do_fail=False), } success = True json_dict = gm_json.LoadFromFile(filepath) actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS] for label, accumulator in results_map.iteritems(): results = actual_results[label] if results: for result in results: accumulator.AddResult(result) print accumulator.GetSummaryLine() if accumulator.ShouldSignalFailure(): success = False print '(results marked with [*] will cause nonzero return value)' return success
def RebaselineSubdir(self, builder): # Read in the actual result summary, and extract all the tests whose # results we need to update. actuals_url = '/'.join( [self._actuals_base_url, builder, self._actuals_filename]) # Only update results for tests that are currently failing. # We don't want to rewrite results for tests that are already succeeding, # because we don't want to add annotation fields (such as # JSONKEY_EXPECTEDRESULTS_BUGS) except for tests whose expectations we # are actually modifying. sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED] if self._add_new: sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON) results_to_update = self._GetActualResults(json_url=actuals_url, sections=sections) # Read in current expectations. expectations_input_filepath = os.path.join( self._expectations_root, builder, self._expectations_input_filename) expectations_dict = gm_json.LoadFromFile(expectations_input_filepath) expected_results = expectations_dict.get( gm_json.JSONKEY_EXPECTEDRESULTS) if not expected_results: expected_results = {} expectations_dict[ gm_json.JSONKEY_EXPECTEDRESULTS] = expected_results # Update the expectations in memory, skipping any tests/configs that # the caller asked to exclude. skipped_images = [] if results_to_update: for (image_name, image_results) in results_to_update.iteritems(): (test, config) = self._image_filename_re.match(image_name).groups() if self._tests: if test not in self._tests: skipped_images.append(image_name) continue if self._configs: if config not in self._configs: skipped_images.append(image_name) continue if not expected_results.get(image_name): expected_results[image_name] = {} expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS]\ = [image_results] if self._mark_unreviewed: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED]\ = False if self._bugs: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_BUGS]\ = self._bugs if self._notes: expected_results[image_name]\ [gm_json.JSONKEY_EXPECTEDRESULTS_NOTES]\ = self._notes # Write out updated expectations. expectations_output_filepath = os.path.join( self._expectations_root, builder, self._expectations_output_filename) gm_json.WriteToFile(expectations_dict, expectations_output_filepath) # Mark the JSON file as plaintext, so text-style diffs can be applied. # Fixes https://code.google.com/p/skia/issues/detail?id=1442 if self._using_svn: self._Call([ 'svn', 'propset', '--quiet', 'svn:mime-type', 'text/x-json', expectations_output_filepath ])
def Reformat(filename): print('Reformatting file %s...' % filename) gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename)