def __init__(self, subdirs, actuals_root, generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT, image_base_url=DEFAULT_IMAGE_BASE_URL, diff_base_url=None): """ Args: actuals_root: root directory containing all render_pictures-generated JSON files subdirs: (string, string) tuple; pair of subdirectories within actuals_root to compare generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created image_base_url: URL under which all render_pictures result images can be found; this will be used to read images for comparison within this code, and included in the ImagePairSet so its consumers know where to download the images from diff_base_url: base URL within which the client should look for diff images; if not specified, defaults to a "file:///" URL representation of generated_images_root """ time_start = int(time.time()) self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._image_base_url = image_base_url self._diff_base_url = ( diff_base_url or download_actuals.create_filepath_url(generated_images_root)) self._load_result_pairs(actuals_root, subdirs) self._timestamp = int(time.time()) logging.info('Results complete; took %d seconds.' % (self._timestamp - time_start))
def __init__(self, configs, actuals_root=results.DEFAULT_ACTUALS_DIR, generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT, diff_base_url=None, builder_regex_list=None): """ Args: configs: (string, string) tuple; pair of configs to compare actuals_root: root directory containing all actual-results.json files generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created diff_base_url: base URL within which the client should look for diff images; if not specified, defaults to a "file:///" URL representation of generated_images_root builder_regex_list: List of regular expressions specifying which builders we will process. If None, process all builders. """ super(ConfigComparisons, self).__init__() time_start = int(time.time()) if builder_regex_list != None: self.set_match_builders_pattern_list(builder_regex_list) self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._diff_base_url = ( diff_base_url or url_utils.create_filepath_url(generated_images_root)) self._actuals_root = actuals_root self._load_config_pairs(configs) self._timestamp = int(time.time()) logging.info('Results complete; took %d seconds.' % (self._timestamp - time_start))
def test_repo_url(self): """Use repo: URL to specify summary files.""" base_repo_url = 'repo:gm/rebaseline_server/testdata/inputs/skp-summaries' results_obj = compare_rendered_pictures.RenderedPicturesComparisons( setA_dirs=[posixpath.join(base_repo_url, 'expectations')], setB_dirs=[posixpath.join(base_repo_url, 'actuals')], setA_section=gm_json.JSONKEY_EXPECTEDRESULTS, setB_section=gm_json.JSONKEY_ACTUALRESULTS, image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir), image_base_gs_url='gs://fakebucket/fake/path', diff_base_url='/static/generated-images') results_obj.get_timestamp = mock_get_timestamp # Overwrite elements within the results that change from one test run # to the next. # pylint: disable=W0212 results_obj._setA_descriptions\ [results.KEY__SET_DESCRIPTIONS__REPO_REVISION] = 'fake-repo-revision' results_obj._setB_descriptions\ [results.KEY__SET_DESCRIPTIONS__REPO_REVISION] = 'fake-repo-revision' gm_json.WriteToFile( results_obj.get_packaged_results_of_type( results.KEY__HEADER__RESULTS_ALL), os.path.join(self.output_dir_actual, 'compare_rendered_pictures.json'))
def test_simple(self): """Test ImageDiffDB, downloading real known images from Google Storage. TODO(epoger): Instead of hitting Google Storage, we should read image files from local disk using a file:// IMG_URL_BASE. """ # params for each self-test: # 0. expected image locator # 1. expected image URL # 2. actual image locator # 3. actual image URL # 4. expected percent_pixels_differing (as a string, to 4 decimal places) # 5. expected weighted_diff_measure (as a string, to 4 decimal places) # 6. expected perceptual difference (as a string, to 4 decimal places) # 7. expected max_diff_per_channel selftests = [ [ 'arcofzorro/16206093933823793653', IMG_URL_BASE + 'arcofzorro/16206093933823793653.png', 'arcofzorro/13786535001616823825', IMG_URL_BASE + 'arcofzorro/13786535001616823825.png', '0.0662', '0.0113', '0.0662', [255, 255, 247], ], [ 'gradients_degenerate_2pt/10552995703607727960', IMG_URL_BASE + 'gradients_degenerate_2pt/10552995703607727960.png', 'gradients_degenerate_2pt/11198253335583713230', IMG_URL_BASE + 'gradients_degenerate_2pt/11198253335583713230.png', '100.0000', '66.6667', '100.0000', [255, 0, 255], ], ] # Add all image pairs to the database db = imagediffdb.ImageDiffDB(self._temp_dir) for selftest in selftests: retval = db.add_image_pair(expected_image_locator=selftest[0], expected_image_url=selftest[1], actual_image_locator=selftest[2], actual_image_url=selftest[3]) # Fetch each image pair from the database for selftest in selftests: record = db.get_diff_record(expected_image_locator=selftest[0], actual_image_locator=selftest[2]) self.assertEqual('%.4f' % record.get_percent_pixels_differing(), selftest[4]) self.assertEqual('%.4f' % record.get_weighted_diff_measure(), selftest[5]) self.assertEqual('%.4f' % record.get_perceptual_difference(), selftest[6]) self.assertEqual(record.get_max_diff_per_channel(), selftest[7])
def test_gm(self): """Process results of a GM run with the ExpectationComparisons object.""" image_diff_db = imagediffdb.ImageDiffDB(storage_root=self.temp_dir) results_obj = compare_to_expectations.ExpectationComparisons( image_diff_db=image_diff_db, actuals_root=os.path.join(self.input_dir, 'gm-actuals'), expected_root=os.path.join(self.input_dir, 'gm-expectations'), diff_base_url='/static/generated-images') results_obj.get_timestamp = mock_get_timestamp gm_json.WriteToFile( results_obj.get_packaged_results_of_type( results.KEY__HEADER__RESULTS_ALL), os.path.join(self.output_dir_actual, 'gm.json'))
def __init__(self, actuals_root, expected_root, generated_images_root): """ Args: actuals_root: root directory containing all actual-results.json files expected_root: root directory containing all expected-results.json files generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created """ self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._actuals_root = actuals_root self._expected_root = expected_root self._load_actual_and_expected() self._timestamp = int(time.time())
def main(): logging.basicConfig(level=logging.INFO) # params for each self-test: # 0. expected image locator # 1. expected image URL # 2. actual image locator # 3. actual image URL # 4. expected percent_pixels_differing (as a string, to 4 decimal places) # 5. expected weighted_diff_measure (as a string, to 4 decimal places) # 6. expected max_diff_per_channel selftests = [ [ '16206093933823793653', IMAGE_URL_BASE + 'arcofzorro/16206093933823793653.png', '13786535001616823825', IMAGE_URL_BASE + 'arcofzorro/13786535001616823825.png', '0.0662', '0.0113', [255, 255, 247], ], [ '10552995703607727960', IMAGE_URL_BASE + 'gradients_degenerate_2pt/10552995703607727960.png', '11198253335583713230', IMAGE_URL_BASE + 'gradients_degenerate_2pt/11198253335583713230.png', '100.0000', '66.6667', [255, 0, 255], ], ] # Add all image pairs to the database db = imagediffdb.ImageDiffDB('/tmp/ImageDiffDB') for selftest in selftests: retval = db.add_image_pair(expected_image_locator=selftest[0], expected_image_url=selftest[1], actual_image_locator=selftest[2], actual_image_url=selftest[3]) # Fetch each image pair from the database for selftest in selftests: record = db.get_diff_record(expected_image_locator=selftest[0], actual_image_locator=selftest[2]) assert (('%.4f' % record.get_percent_pixels_differing()) == selftest[4]) assert (('%.4f' % record.get_weighted_diff_measure()) == selftest[5]) assert (record.get_max_diff_per_channel() == selftest[6]) logging.info("Self-test completed successfully!")
def main(): logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( '--actuals', default=results.DEFAULT_ACTUALS_DIR, help='Directory containing all actual-result JSON files; defaults to ' '\'%(default)s\' .') parser.add_argument( '--expectations', default=DEFAULT_EXPECTATIONS_DIR, help='Directory containing all expected-result JSON files; defaults to ' '\'%(default)s\' .') parser.add_argument( '--ignore-failures-file', default=DEFAULT_IGNORE_FAILURES_FILE, help='If a file with this name is found within the EXPECTATIONS dir, ' 'ignore failures for any tests listed in the file; defaults to ' '\'%(default)s\' .') parser.add_argument( '--outfile', required=True, help='File to write result summary into, in JSON format.') parser.add_argument( '--results', default=results.KEY__HEADER__RESULTS_FAILURES, help='Which result types to include. Defaults to \'%(default)s\'; ' 'must be one of ' + str([ results.KEY__HEADER__RESULTS_FAILURES, results.KEY__HEADER__RESULTS_ALL ])) parser.add_argument( '--workdir', default=results.DEFAULT_GENERATED_IMAGES_ROOT, help='Directory within which to download images and generate diffs; ' 'defaults to \'%(default)s\' .') args = parser.parse_args() image_diff_db = imagediffdb.ImageDiffDB(storage_root=args.workdir) results_obj = ExpectationComparisons( image_diff_db=image_diff_db, actuals_root=args.actuals, expected_root=args.expectations, ignore_failures_file=args.ignore_failures_file) gm_json.WriteToFile( results_obj.get_packaged_results_of_type(results_type=args.results), args.outfile)
def test_endToEnd(self): """Generate two sets of SKPs, run render_pictures over both, and compare the results.""" setA_subdir = 'before_patch' setB_subdir = 'after_patch' self._generate_skps_and_run_render_pictures(subdir=setA_subdir, skpdict={ 'changed.skp': 200, 'unchanged.skp': 100, 'only-in-before.skp': 128, }) self._generate_skps_and_run_render_pictures(subdir=setB_subdir, skpdict={ 'changed.skp': 201, 'unchanged.skp': 100, 'only-in-after.skp': 128, }) results_obj = compare_rendered_pictures.RenderedPicturesComparisons( setA_dirs=[os.path.join(self.temp_dir, setA_subdir)], setB_dirs=[os.path.join(self.temp_dir, setB_subdir)], setA_section=gm_json.JSONKEY_ACTUALRESULTS, setB_section=gm_json.JSONKEY_ACTUALRESULTS, image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir), image_base_gs_url='gs://fakebucket/fake/path', diff_base_url='/static/generated-images') results_obj.get_timestamp = mock_get_timestamp # Overwrite elements within the results that change from one test run # to the next. # pylint: disable=W0212 results_obj._setA_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [ 'before-patch-fake-dir' ] results_obj._setB_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [ 'after-patch-fake-dir' ] gm_json.WriteToFile( results_obj.get_packaged_results_of_type( results.KEY__HEADER__RESULTS_ALL), os.path.join(self.output_dir_actual, 'compare_rendered_pictures.json'))
def __init__(self, actuals_root=DEFAULT_ACTUALS_DIR, expected_root=DEFAULT_EXPECTATIONS_DIR, generated_images_root=DEFAULT_GENERATED_IMAGES_ROOT): """ Args: actuals_root: root directory containing all actual-results.json files expected_root: root directory containing all expected-results.json files generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created """ time_start = int(time.time()) self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._actuals_root = actuals_root self._expected_root = expected_root self._load_actual_and_expected() self._timestamp = int(time.time()) logging.info('Results complete; took %d seconds.' % (self._timestamp - time_start))
def __init__(self, actuals_root=results.DEFAULT_ACTUALS_DIR, expected_root=DEFAULT_EXPECTATIONS_DIR, ignore_failures_file=DEFAULT_IGNORE_FAILURES_FILE, generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT, diff_base_url=None, builder_regex_list=None): """ Args: actuals_root: root directory containing all actual-results.json files expected_root: root directory containing all expected-results.json files ignore_failures_file: if a file with this name is found within expected_root, ignore failures for any tests listed in the file generated_images_root: directory within which to create all pixel diffs; if this directory does not yet exist, it will be created diff_base_url: base URL within which the client should look for diff images; if not specified, defaults to a "file:///" URL representation of generated_images_root builder_regex_list: List of regular expressions specifying which builders we will process. If None, process all builders. """ time_start = int(time.time()) if builder_regex_list != None: self.set_match_builders_pattern_list(builder_regex_list) self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root) self._diff_base_url = ( diff_base_url or url_utils.create_filepath_url(generated_images_root)) self._actuals_root = actuals_root self._expected_root = expected_root self._ignore_failures_on_these_tests = [] if ignore_failures_file: self._ignore_failures_on_these_tests = ( ExpectationComparisons._read_noncomment_lines( os.path.join(expected_root, ignore_failures_file))) self._load_actual_and_expected() self._timestamp = int(time.time()) logging.info('Results complete; took %d seconds.' % (self._timestamp - time_start))
def test_endToEnd(self): """Tests ImagePair, using a real ImageDiffDB to download real images. TODO(epoger): Either in addition to or instead of this end-to-end test, we should perform some tests using either: 1. a mock ImageDiffDB, or 2. a real ImageDiffDB that doesn't hit Google Storage looking for input image files (maybe a file:// IMG_URL_BASE) """ # params for each self-test: # # inputs: # 0. imageA_relative_URL # 1. imageB_relative_URL # 2. expectations dict # 3. extra_columns dict # expected output: # 4. expected result of ImagePair.as_dict() selftests = [ [ # inputs: 'arcofzorro/16206093933823793653.png', 'arcofzorro/16206093933823793653.png', None, { 'builder': 'MyBuilder', 'test': 'MyTest', }, # expected output: { 'extraColumns': { 'builder': 'MyBuilder', 'test': 'MyTest', }, 'imageAUrl': 'arcofzorro/16206093933823793653.png', 'imageBUrl': 'arcofzorro/16206093933823793653.png', 'isDifferent': False, }, ], [ # inputs: 'arcofzorro/16206093933823793653.png', 'arcofzorro/13786535001616823825.png', None, None, # expected output: { 'differenceData': { 'maxDiffPerChannel': [255, 255, 247], 'numDifferingPixels': 662, 'percentDifferingPixels': 0.0662, 'perceptualDifference': 0.06620300000000157, 'diffUrl': 'arcofzorro_16206093933823793653_png_png-vs-' + 'arcofzorro_13786535001616823825_png_png.png', 'whiteDiffUrl': 'arcofzorro_16206093933823793653_png_png' + '-vs-arcofzorro_13786535001616823825_png_png.png', }, 'imageAUrl': 'arcofzorro/16206093933823793653.png', 'imageBUrl': 'arcofzorro/13786535001616823825.png', 'isDifferent': True, }, ], [ # inputs: 'gradients_degenerate_2pt/10552995703607727960.png', 'gradients_degenerate_2pt/11198253335583713230.png', { 'ignoreFailure': True, 'bugs': [1001, 1002], }, { 'builder': 'MyBuilder', 'test': 'MyTest', }, # expected output: { 'differenceData': { 'maxDiffPerChannel': [255, 0, 255], 'numDifferingPixels': 102400, 'percentDifferingPixels': 100.00, 'perceptualDifference': 100.00, 'diffUrl': 'gradients_degenerate_2pt_10552995703607727960' + '_png_png-vs-gradients_degenerate_2pt_' + '11198253335583713230_png_png.png', 'whiteDiffUrl': 'gradients_degenerate_2pt_' + '10552995703607727960_png_png-vs-' + 'gradients_degenerate_2pt_11198253335583713230' + '_png_png.png' }, 'expectations': { 'bugs': [1001, 1002], 'ignoreFailure': True, }, 'extraColumns': { 'builder': 'MyBuilder', 'test': 'MyTest', }, 'imageAUrl': 'gradients_degenerate_2pt/10552995703607727960.png', 'imageBUrl': 'gradients_degenerate_2pt/11198253335583713230.png', 'isDifferent': True, }, ], # Test fix for http://skbug.com/2368 -- how do we handle an ImagePair # missing one of its images? [ # inputs: 'arcofzorro/16206093933823793653.png', 'nonexistentDir/111111.png', { 'ignoreFailure': True, 'bugs': [1001, 1002], }, { 'builder': 'MyBuilder', 'test': 'MyTest', }, # expected output: { 'expectations': { 'bugs': [1001, 1002], 'ignoreFailure': True, }, 'extraColumns': { 'builder': 'MyBuilder', 'test': 'MyTest', }, 'imageAUrl': 'arcofzorro/16206093933823793653.png', 'imageBUrl': 'nonexistentDir/111111.png', 'isDifferent': True, }, ], # One of the two images is missing, but download_all_images=True so we # should download it anyway. [ # inputs: None, 'arcofzorro/13786535001616823825.png', None, None, # expected output: { 'imageAUrl': None, 'imageBUrl': 'arcofzorro/13786535001616823825.png', 'isDifferent': True, }, ], ] db = imagediffdb.ImageDiffDB(self.temp_dir) for selftest in selftests: image_pair = imagepair.ImagePair(image_diff_db=db, imageA_base_url=IMG_URL_BASE, imageB_base_url=IMG_URL_BASE, imageA_relative_url=selftest[0], imageB_relative_url=selftest[1], expectations=selftest[2], extra_columns=selftest[3], download_all_images=True) self.assertEqual(image_pair.as_dict(), selftest[4])
def WriteJsonSummary(img_root, nopatch_json, nopatch_images_base_url, withpatch_json, withpatch_images_base_url, output_file_path, gs_output_dir, gs_skp_dir, slave_num, additions_to_sys_path): """Outputs the JSON summary of image comparisions. Args: img_root: (str) The root directory on local disk where we store all images. nopatch_json: (str) Location of the nopatch render_pictures JSON summary file. nopatch_images_base_url: (str) URL of directory containing all nopatch images. withpatch_json: (str) Location of the withpatch render_pictures JSON summary file. withpatch_images_base_url: (str) URL of directory containing all withpatch images. output_file_path: (str) The local path to the JSON file that will be created by this function which will contain a summary of all file differences for this slave. gs_output_dir: (str) The directory the JSON summary file and images will be outputted to in Google Storage. gs_skp_dir: (str) The Google Storage directory that contains the SKPs of this cluster telemetry slave. slave_num: (str) The number of the cluster telemetry slave that is running this script. additions_to_sys_path: ([str]) A list of path components to add to sys.path; typically used to provide rebaseline_server Python modules. """ if additions_to_sys_path: for dirpath in additions_to_sys_path: if dirpath not in sys.path: sys.path.insert(0, dirpath) # Modules from skia's gm/ and gm/rebaseline_server/ dirs. try: import gm_json import imagediffdb except ImportError: print 'sys.path is [%s]' % sys.path traceback.print_exc() raise Exception( 'You need to add gm/ and gm/rebaseline_server to sys.path') all_image_descriptions_nopatch = GetImageDescriptions( gm_json, nopatch_json) all_image_descriptions_withpatch = GetImageDescriptions( gm_json, withpatch_json) assert (len(all_image_descriptions_nopatch) == len(all_image_descriptions_withpatch)), \ 'Number of images in the two JSON summary files are different' assert (all_image_descriptions_nopatch.keys() == all_image_descriptions_withpatch.keys()), \ 'SKP filenames in the two JSON summary files are different' # Compare checksums in both directories and output differences. file_differences = [] slave_dict = { json_summary_constants.JSONKEY_SKPS_LOCATION: gs_skp_dir, json_summary_constants.JSONKEY_FAILED_FILES: file_differences, json_summary_constants.JSONKEY_FILES_LOCATION_NOPATCH: posixpath.join(gs_output_dir, 'slave%s' % slave_num, 'nopatch-images'), json_summary_constants.JSONKEY_FILES_LOCATION_WITHPATCH: posixpath.join(gs_output_dir, 'slave%s' % slave_num, 'withpatch-images'), json_summary_constants.JSONKEY_FILES_LOCATION_DIFFS: posixpath.join(gs_output_dir, 'slave%s' % slave_num, 'diffs'), json_summary_constants.JSONKEY_FILES_LOCATION_WHITE_DIFFS: posixpath.join(gs_output_dir, 'slave%s' % slave_num, 'whitediffs') } json_summary = {'slave%s' % slave_num: slave_dict} image_diff_db = imagediffdb.ImageDiffDB(storage_root=img_root) for image_filepath in all_image_descriptions_nopatch: image_desc_nopatch = all_image_descriptions_nopatch[image_filepath] image_desc_withpatch = all_image_descriptions_withpatch[image_filepath] algo_nopatch = image_desc_nopatch[JSONKEY_IMAGE_CHECKSUMALGORITHM] algo_withpatch = image_desc_withpatch[JSONKEY_IMAGE_CHECKSUMALGORITHM] assert algo_nopatch == algo_withpatch, 'Different checksum algorithms' imagefile_nopatch = image_desc_nopatch[JSONKEY_IMAGE_FILEPATH] imagefile_withpatch = image_desc_withpatch[JSONKEY_IMAGE_FILEPATH] assert imagefile_nopatch == imagefile_withpatch, 'Different imagefile names' skpfile_nopatch = image_desc_nopatch[IMAGE_SOURCE] skpfile_withpatch = image_desc_withpatch[IMAGE_SOURCE] assert skpfile_nopatch == skpfile_withpatch, 'Different skpfile names' checksum_nopatch = image_desc_nopatch[JSONKEY_IMAGE_CHECKSUMVALUE] checksum_withpatch = image_desc_withpatch[JSONKEY_IMAGE_CHECKSUMVALUE] if checksum_nopatch != checksum_withpatch: # TODO(epoger): It seems silly that we add this DiffRecord to ImageDiffDB # and then pull it out again right away, but this is a stepping-stone # to using ImagePairSet instead of replicating its behavior here. image_locator_base = os.path.splitext(imagefile_nopatch)[0] image_locator_nopatch = image_locator_base + '_nopatch' image_locator_withpatch = image_locator_base + '_withpatch' image_diff_db.add_image_pair( expected_image_url=posixpath.join(nopatch_images_base_url, image_filepath), expected_image_locator=image_locator_nopatch, actual_image_url=posixpath.join(withpatch_images_base_url, image_filepath), actual_image_locator=image_locator_withpatch) diff_record = image_diff_db.get_diff_record( expected_image_locator=image_locator_nopatch, actual_image_locator=image_locator_withpatch) file_differences.append({ json_summary_constants.JSONKEY_FILE_NAME: imagefile_nopatch, json_summary_constants.JSONKEY_SKP_LOCATION: posixpath.join(gs_skp_dir, skpfile_nopatch), json_summary_constants.JSONKEY_NUM_PIXELS_DIFFERING: diff_record.get_num_pixels_differing(), json_summary_constants.JSONKEY_PERCENT_PIXELS_DIFFERING: diff_record.get_percent_pixels_differing(), json_summary_constants.JSONKEY_MAX_DIFF_PER_CHANNEL: diff_record.get_max_diff_per_channel(), json_summary_constants.JSONKEY_PERCEPTUAL_DIFF: diff_record.get_perceptual_difference(), }) if file_differences: slave_dict[json_summary_constants.JSONKEY_FAILED_FILES_COUNT] = len( file_differences) with open(output_file_path, 'w') as f: f.write(json.dumps(json_summary, indent=4, sort_keys=True))
def __init__(self, actuals_dir=DEFAULT_ACTUALS_DIR, json_filename=DEFAULT_JSON_FILENAME, gm_summaries_bucket=DEFAULT_GM_SUMMARIES_BUCKET, port=DEFAULT_PORT, export=False, editable=True, reload_seconds=0, config_pairs=None, builder_regex_list=None, boto_file_path=None, imagediffdb_threads=imagediffdb.DEFAULT_NUM_WORKER_THREADS): """ Args: actuals_dir: directory under which we will check out the latest actual GM results json_filename: basename of the JSON summary file to load for each builder gm_summaries_bucket: Google Storage bucket to download json_filename files from; if None or '', don't fetch new actual-results files at all, just compare to whatever files are already in actuals_dir port: which TCP port to listen on for HTTP requests export: whether to allow HTTP clients on other hosts to access this server editable: whether HTTP clients are allowed to submit new GM baselines (SKP baseline modifications are performed using an entirely different mechanism, not affected by this parameter) reload_seconds: polling interval with which to check for new results; if 0, don't check for new results at all config_pairs: List of (string, string) tuples; for each tuple, compare actual results of these two configs. If None or empty, don't compare configs at all. builder_regex_list: List of regular expressions specifying which builders we will process. If None, process all builders. boto_file_path: Path to .boto file giving us credentials to access Google Storage buckets; if None, we will only be able to access public GS buckets. imagediffdb_threads: How many threads to spin up within imagediffdb. """ self._actuals_dir = actuals_dir self._json_filename = json_filename self._gm_summaries_bucket = gm_summaries_bucket self._port = port self._export = export self._editable = editable self._reload_seconds = reload_seconds self._config_pairs = config_pairs or [] self._builder_regex_list = builder_regex_list self.truncate_results = False if boto_file_path: self._gs = gs_utils.GSUtils(boto_file_path=boto_file_path) else: self._gs = gs_utils.GSUtils() _create_index(file_path=os.path.join(PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR, GENERATED_HTML_SUBDIR, "index.html"), config_pairs=config_pairs) # Reentrant lock that must be held whenever updating EITHER of: # 1. self._results # 2. the expected or actual results on local disk self.results_rlock = threading.RLock() # Create a single ImageDiffDB instance that is used by all our differs. self._image_diff_db = imagediffdb.ImageDiffDB( gs=self._gs, storage_root=os.path.join(PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR, GENERATED_IMAGES_SUBDIR), num_worker_threads=imagediffdb_threads) # This will be filled in by calls to update_results() self._results = None
def test_endToEnd_withImageBaseGSUrl(self): """Generate two sets of SKPs, run render_pictures over both, and compare the results.""" setA_subdir = 'before_patch' setB_subdir = 'after_patch' imageA_gs_base = 'superman/kent-camera/pictures' imageB_gs_base = 'batman/batarang/pictures' self._generate_skps_and_run_render_pictures( subdir=setA_subdir, skpdict={ 'changed.skp': 200, 'unchanged.skp': 100, 'only-in-before.skp': 128, }, image_base_gs_url='gs://%s' % imageA_gs_base) self._generate_skps_and_run_render_pictures( subdir=setB_subdir, skpdict={ 'changed.skp': 201, 'unchanged.skp': 100, 'only-in-after.skp': 128, }, image_base_gs_url='gs://%s' % imageB_gs_base) results_obj = compare_rendered_pictures.RenderedPicturesComparisons( setA_dir=os.path.join(self.temp_dir, setA_subdir), setB_dir=os.path.join(self.temp_dir, setB_subdir), setA_section=gm_json.JSONKEY_ACTUALRESULTS, setB_section=gm_json.JSONKEY_ACTUALRESULTS, image_diff_db=imagediffdb.ImageDiffDB(self.temp_dir), image_base_gs_url='gs://fakebucket/fake/path', diff_base_url='/static/generated-images') results_obj.get_timestamp = mock_get_timestamp output_dict = results_obj.get_packaged_results_of_type( results.KEY__HEADER__RESULTS_ALL) # Assert that the baseURLs are as expected. self.assertEquals( output_dict[imagepairset.KEY__ROOT__IMAGESETS][ imagepairset.KEY__IMAGESETS__SET__IMAGE_A][ imagepairset.KEY__IMAGESETS__FIELD__BASE_URL], 'http://storage.cloud.google.com/%s' % imageA_gs_base) self.assertEquals( output_dict[imagepairset.KEY__ROOT__IMAGESETS][ imagepairset.KEY__IMAGESETS__SET__IMAGE_B][ imagepairset.KEY__IMAGESETS__FIELD__BASE_URL], 'http://storage.cloud.google.com/%s' % imageB_gs_base) # Overwrite elements within the results that change from one test run # to the next. # pylint: disable=W0212 results_obj._setA_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [ 'before-patch-fake-dir' ] results_obj._setB_descriptions[results.KEY__SET_DESCRIPTIONS__DIR] = [ 'after-patch-fake-dir' ] gm_json.WriteToFile( output_dict, os.path.join(self.output_dir_actual, 'compare_rendered_pictures.json'))
def test_endToEnd(self): """Tests ImagePair, using a real ImageDiffDB to download real images. TODO(epoger): Either in addition to or instead of this end-to-end test, we should perform some tests using either: 1. a mock ImageDiffDB, or 2. a real ImageDiffDB that doesn't hit Google Storage looking for input image files (maybe a file:// IMG_URL_BASE) """ # params for each self-test: # # inputs: # 0. imageA_relative_URL # 1. imageB_relative_URL # 2. expectations dict # 3. extra_columns dict # expected output: # 4. expected result of ImagePair.as_dict() selftests = [ [ # inputs: 'arcofzorro/16206093933823793653.png', 'arcofzorro/16206093933823793653.png', None, { 'builder': 'MyBuilder', 'test': 'MyTest', }, # expected output: { 'extraColumns': { 'builder': 'MyBuilder', 'test': 'MyTest', }, 'imageAUrl': 'arcofzorro/16206093933823793653.png', 'imageBUrl': 'arcofzorro/16206093933823793653.png', 'isDifferent': False, }, ], [ # inputs: 'arcofzorro/16206093933823793653.png', 'arcofzorro/13786535001616823825.png', None, None, # expected output: { 'differenceData': { 'maxDiffPerChannel': [255, 255, 247], 'numDifferingPixels': 662, 'percentDifferingPixels': 0.0662, 'perceptualDifference': 0.06620000000000914, 'weightedDiffMeasure': 0.01127756555171088, }, 'imageAUrl': 'arcofzorro/16206093933823793653.png', 'imageBUrl': 'arcofzorro/13786535001616823825.png', 'isDifferent': True, }, ], [ # inputs: 'gradients_degenerate_2pt/10552995703607727960.png', 'gradients_degenerate_2pt/11198253335583713230.png', { 'ignoreFailure': True, 'bugs': [1001, 1002], }, { 'builder': 'MyBuilder', 'test': 'MyTest', }, # expected output: { 'differenceData': { 'maxDiffPerChannel': [255, 0, 255], 'numDifferingPixels': 102400, 'percentDifferingPixels': 100.00, 'perceptualDifference': 100.00, 'weightedDiffMeasure': 66.66666666666667, }, 'expectations': { 'bugs': [1001, 1002], 'ignoreFailure': True, }, 'extraColumns': { 'builder': 'MyBuilder', 'test': 'MyTest', }, 'imageAUrl': 'gradients_degenerate_2pt/10552995703607727960.png', 'imageBUrl': 'gradients_degenerate_2pt/11198253335583713230.png', 'isDifferent': True, }, ], ] db = imagediffdb.ImageDiffDB(self._temp_dir) for selftest in selftests: image_pair = imagepair.ImagePair(image_diff_db=db, base_url=IMG_URL_BASE, imageA_relative_url=selftest[0], imageB_relative_url=selftest[1], expectations=selftest[2], extra_columns=selftest[3]) self.assertEqual(image_pair.as_dict(), selftest[4])