def test_run_vmafrc_runner_set_custom_models_enable_transform(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) self.runner = VmafrcQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'models': [ 'path={}:name=custom_vmaf_0'.format( VmafConfig.model_path("vmaf_v0.6.1.pkl")), 'path={}:name=custom_vmaf_1:enable_transform'.format( VmafConfig.model_path("vmaf_v0.6.1.pkl")), ] }) self.runner.run(parallelize=True) results = self.runner.results self.assertAlmostEqual(results[0]['VMAFRC_custom_vmaf_0_score'], 76.69926875, places=4) self.assertAlmostEqual(results[0]['VMAFRC_custom_vmaf_1_score'], 92.542390144364546, places=4) self.assertAlmostEqual(results[1]['VMAFRC_custom_vmaf_0_score'], 99.94641666666666, places=4) self.assertAlmostEqual(results[1]['VMAFRC_custom_vmaf_1_score'], 100.0, places=4)
def test_run_vmaf_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.61273448644646, places=4)
def test_test_on_dataset_bootstrap_quality_runner(self): from vmaf.routine import run_test_on_dataset test_dataset = import_python_file( VmafConfig.test_resource_path('dataset_sample.py')) test_assets, results = run_test_on_dataset( test_dataset, BootstrapVmafQualityRunner, None, None, VmafConfig.model_path("vmaf_float_b_v0.6.3.json"), parallelize=True, aggregate_method=None) expecteds = [ 98.7927560599655, 100.0, 100.0, 98.82959541116277, 99.80711961053976, 98.91713244333198, 100.0, 99.33233498293374, 98.99337537979711, 99.62668672314118, 99.00885507364698, 100.0, 97.29492843378944, 100.0, 99.02101642563275, 94.50521964145268, 95.63007904351339, 98.57370486684022, 100.0, 99.36754906446309 ] actuals = results[0]['BOOTSTRAP_VMAF_all_models_score'] assert len(actuals) == len( expecteds ), "Expected and actual bootstrap prediction lists do not match in length." for actual, expected in zip(actuals, expecteds): self.assertAlmostEqual(actual, expected, places=4) self.assertAlmostEqual(results[0]['BOOTSTRAP_VMAF_score'], 99.32876664539778, places=4)
class SpatioTemporalVmafQualityRunner(VmafQualityRunner): TYPE = 'STVMAF' VERSION = '1' DEFAULT_MODEL_FILEPATH = VmafConfig.model_path("stvmaf", "stvmaf_v1.pkl")
def test_run_vmafossexec_runner_with_ci_and_phone_model(self): print 'test on running VMAFOSSEXEC runner with conf interval and phone model...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.runner = VmafossExecQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_rb_v0.6.2", "vmaf_rb_v0.6.2.pkl"), 'phone_model':True, 'ci': True, }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 91.723012127641823, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 100.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_bagging_score'], 90.129761531349985, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_bagging_score'], 100.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_stddev_score'], 0.85880437658259945, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_stddev_score'], 0.0, places=4)
def test_run_vmaf_runner_with_notyuv_jpg(self): ref_path = VmafConfig.test_resource_path("test_images", "bikes.jpg") dis_path = VmafConfig.test_resource_path("test_images", "bikes_dis.jpg") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={'yuv_type': 'notyuv', 'quality_width': 512, 'quality_height': 384, 'workfile_yuv_type': 'yuv444p', }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 63.255016130209064, places=4)
def test_run_vmafossexec_runner_with_ci_and_phone_model(self): print 'test on running VMAFOSSEXEC runner with conf interval and phone model...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.runner = VmafossExecQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_rb_v0.6.3", "vmaf_rb_v0.6.3.pkl"), 'phone_model':True, 'ci': True, }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 91.723012127641823, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 100.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_bagging_score'], 90.13159583333334, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_bagging_score'], 100.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_stddev_score'], 0.8371132083333332, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_stddev_score'], 0.0, places=4) # per model score checks self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0001_score'], 90.25032499999999, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0002_score'], 88.18534583333333, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0003_score'], 89.04952291666666, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0020_score'], 90.16633958333334, places=3)
def test_test_on_dataset(self): from vmaf.routine import run_test_on_dataset test_dataset = import_python_file( VmafConfig.test_resource_path('dataset_sample.py')) test_assets, results = run_test_on_dataset( test_dataset, VmafQualityRunner, None, None, VmafConfig.model_path("vmaf_float_v0.6.1.json"), parallelize=True, aggregate_method=None) self.assertAlmostEqual(results[0]['VMAF_score'], 99.142659046424384, places=4) self.assertAlmostEqual(results[1]['VMAF_score'], 35.066157497128764, places=4) self.assertAlmostEqual(results[2]['VMAF_score'], 97.428042675471147, places=4) self.assertAlmostEqual(results[3]['VMAF_score'], 97.427927701008869, places=4) self.assertAlmostEqual(test_assets[0].groundtruth, 100, places=4) self.assertAlmostEqual(test_assets[1].groundtruth, 50, places=4) self.assertAlmostEqual(test_assets[2].groundtruth, 100, places=4) self.assertAlmostEqual(test_assets[3].groundtruth, 80, places=4)
def test_from_xml_from_json_and_aggregation(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) asset_list = [asset, asset_original] self.runner = VmafQualityRunner( asset_list, None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.pkl"), }, optional_dict2=None, ) self.runner.run() results = self.runner.results xml_string_expected = results[0].to_xml() xml_string_recon = Result.from_xml(xml_string_expected).to_xml() json_string_expected = results[0].to_json() json_string_recon = Result.from_json(json_string_expected).to_json() assert xml_string_expected == xml_string_recon, "XML files do not match" assert json_string_expected == json_string_recon, "JSON files do not match" combined_result = Result.combine_result([results[0], results[1]]) # check that all keys are there combined_result_keys = [key for key in combined_result.result_dict] keys_0 = [key for key in results[0].result_dict] keys_1 = [key for key in results[1].result_dict] assert set(keys_0) == set(keys_1) == set(combined_result_keys) # check that the dictionaries have been copied as expected for key in combined_result_keys: assert len(combined_result.result_dict[key]) == len( results[0].result_dict[key]) + len(results[1].result_dict[key]) assert combined_result.result_dict[key][0] == results[ 0].result_dict[key][0] assert combined_result.result_dict[key][ len(results[0].result_dict[key]) - 1] == results[0].result_dict[key][ len(results[0].result_dict[key]) - 1] assert combined_result.result_dict[key][len( results[0].result_dict[key])] == results[1].result_dict[key][0] assert combined_result.result_dict[key][ len(combined_result.result_dict[key]) - 1] == results[1].result_dict[key][ len(results[1].result_dict[key]) - 1]
def test_run_vmafossexec_nonexist_dis_file(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} > /dev/null 2>&1".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.resource_path("yuv", "src01_hrc01_576x324_XXX.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.pkl")) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_ARGUMENT_ISSUE)
def test_test_on_dataset_split_test_indices_for_perf_ci(self): from vmaf.routine import run_test_on_dataset test_dataset = import_python_file(VmafConfig.test_resource_path('dataset_sample.py')) test_assets, results = run_test_on_dataset(test_dataset, VmafQualityRunner, None, None, VmafConfig.model_path("vmaf_float_v0.6.1.json"), parallelize=False, aggregate_method=None, split_test_indices_for_perf_ci=True, n_splits_test_indices=10) self.assertAlmostEqual(results[0]['VMAF_score'], 99.142659046424384, places=4)
def test_run_vmafossexec_runner_with_ci(self): print 'test on running VMAFOSSEXEC runner with conf interval...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.runner = VmafossExecQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_rb_v0.6.3", "vmaf_rb_v0.6.3.pkl"), 'ci': True }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 75.443043750000001, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.958047916666672, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_bagging_score'], 73.10273541666668, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_bagging_score'], 99.79000416666668, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_stddev_score'], 1.1991330833333333, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_stddev_score'], 1.3028828125, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ci95_low_score'], 70.82471875, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ci95_low_score'], 94.79667083333334, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ci95_high_score'], 74.85038125, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ci95_high_score'], 99.99736666666666, places=4) # per model score checks self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0001_score'], 73.26853333333334, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0002_score'], 70.38517916666667, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0003_score'], 71.59264583333334, places=3) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vmaf_0020_score'], 73.15570625, places=3)
def test_run_vmafossexec_wrong_subsample(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} --thread 0 --subsample 0".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.pkl")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_MORE_ARGUMENT_ISSUE)
def test_run_vmafossexec_odd_resolution(self): exe = VmafConfig.root_path('wrapper', 'vmafossexec') cmd = "{exe} yuv420p 575 323 {ref} {dis} {model}".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("vmaf_v0.6.0.pkl")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_ARGUMENT_ISSUE)
def test_run_vmafossexec_unknown_pooling(self): exe = VmafConfig.root_path('src', 'libvmaf', 'vmafossexec') cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} --pool mean_XXX".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.pkl")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_MORE_ARGUMENT_ISSUE)
def test_run_vmafossexec(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} --thread 1 --subsample 2".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.json")) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_SUCCESS)
def test_run_vmafossexec_unknown_pooling(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} --pool mean_XXX > /dev/null 2>&1".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.json")) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_MORE_ARGUMENT_ISSUE)
def test_run_vmafossexec_nonexist_model_file(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model}".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.pkl_XXX")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_VMAF_EXCEPTION)
def test_run_vmafossexec_wrong_thread(self): exe = VmafConfig.root_path('wrapper', 'vmafossexec') cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} --thread -1 --subsample 2".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("vmaf_v0.6.0.pkl")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_MORE_ARGUMENT_ISSUE)
def test_run_vmafossexec_wrong_model_fmt(self): exe = VmafConfig.root_path('wrapper', 'vmafossexec') cmd = "{exe} yuv420p 576 324 {ref} {dis} {model}".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("vmaf_v0.6.0.pkl.model")) ret = subprocess.call(cmd, shell=True) self.assertEquals(ret, self.RC_LOGIC_ERROR)
def test_run_vmafossexec_wrong_model_fmt(self): exe = ExternalProgram.vmafossexec cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} > /dev/null 2>&1".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.pkl.model")) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_LOGIC_ERROR)
def test_run_vmafexec_with_frame_skipping_unequal(self): exe = ExternalProgram.vmafexec cmd = "{exe} --reference {ref} --distorted {dis} --width 576 --height 324 --pixel_format 420 --bitdepth 8 --xml --feature psnr " \ "--model path={model} --quiet --output {output} --frame_skip_ref 2 --frame_skip_dist 5".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.json"), output=self.output_file_path) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_SUCCESS) with open(self.output_file_path, 'rt') as fo: fc = fo.read() self.assertTrue('<metric name="psnr_y" min="19.019327" max="21.084954" mean="20.269606" harmonic_mean="20.258113" />' in fc)
def test_run_vmafexec(self): exe = ExternalProgram.vmafexec cmd = "{exe} --reference {ref} --distorted {dis} --width 576 --height 324 --pixel_format 420 --bitdepth 8 --xml --feature psnr " \ "--model path={model} --quiet --output {output}".format( exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"), dis=VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.json"), output=self.output_file_path) ret = subprocess.call(cmd, shell=True) self.assertEqual(ret, self.RC_SUCCESS) with open(self.output_file_path, 'rt') as fo: fc = fo.read() self.assertTrue('<metric name="psnr_y" min="29.640688" max="34.760779" mean="30.755064" harmonic_mean="30.727905" />' in fc)
def test_run_vmafossexec_runner_with_ci(self): print 'test on running VMAFOSSEXEC runner with conf interval...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.runner = VmafossExecQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_rb_v0.6.2", "vmaf_rb_v0.6.2.pkl"), 'ci': True }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion2_score'], 3.8953518541666665, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 75.443043750000001, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 99.958047916666672, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_bagging_score'], 73.099946626689174, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_bagging_score'], 99.686116179979152, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_stddev_score'], 1.2301198477788975, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_stddev_score'], 1.5917514683608882, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ci95_low_score'], 70.801585803086553, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ci95_low_score'], 94.784491176494996, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ci95_high_score'], 74.853442421187708, places=3) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ci95_high_score'], 99.992560767034618, places=4)
def setUp(self): train_dataset_path = VmafConfig.test_resource_path( 'test_image_dataset_diffdim.py') train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) _, self.features = run_executors_in_parallel( MomentNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=None, optional_dict2=None, ) self.model_filename = VmafConfig.model_path("test_save_load.pkl")
def setUp(self): train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py') train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) runner = MomentNorefFeatureExtractor( train_assets, None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict=None, optional_dict2=None, ) runner.run(parallelize=True) self.features = runner.results self.model_filename = VmafConfig.model_path("test_save_load.pkl")
def test_run_vmafossexec_runner_norm_type_none(self): print 'test on running VMAFOSSEXEC runner with norm type none...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.runner = VmafossExecQualityRunner( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath':VmafConfig.model_path("other_models", "nflxtrain_norm_type_none.pkl"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale0_score'],0.363420458333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale1_score'], 0.766647520833, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale2_score'], 0.862854708333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_vif_scale3_score'], 0.915971791667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_adm2_score'], 0.93458777083333333, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_psnr_score'], 30.7550666667, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ssim_score'], 0.86322654166666657, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_ms_ssim_score'], 0.9632498125, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale0_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale1_score'],0.999999958333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale2_score'],0.999999416667, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_vif_scale3_score'], 0.999999208333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_motion_score'], 4.04982583333, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_adm2_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_psnr_score'], 60.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_ms_ssim_score'], 1.0, places=4) self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 74.253349625150562, places=4) self.assertAlmostEqual(results[1]['VMAFOSSEXEC_score'], 77.996338095161946, places=4)
def setUp(self): ref_path = VmafConfig.test_resource_path( "yuv", "checkerboard_1920_1080_10_3_0_0.yuv") dis_path = VmafConfig.test_resource_path( "yuv", "checkerboard_1920_1080_10_3_1_0.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 1920, 'height': 1080 }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=FileSystemResultStore(), optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() self.result = self.runner.results[0] Nframes = len(self.result.result_dict['VMAF_scores']) self.result.result_dict['VMAF_array_scores'] = self.result.result_dict[ 'VMAF_scores'].reshape(Nframes, 1) self.result.result_dict['VMAF_two_models_array_scores'] = np.vstack( (self.result.result_dict['VMAF_scores'].reshape(1, Nframes), self.result.result_dict['VMAF_scores'].reshape(1, Nframes))) self.result.result_dict['VMAF_3D_array_scores'] = np.zeros((1, 1, 1))
def test_test_on_dataset_plot_per_content(self): from vmaf.routine import run_test_on_dataset test_dataset = import_python_file( VmafConfig.test_resource_path('dataset_sample.py')) import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 1, figsize=[20, 20]) run_test_on_dataset(test_dataset, VmafQualityRunner, ax, None, VmafConfig.model_path("vmaf_float_v0.6.1.json"), parallelize=False, fifo_mode=False, aggregate_method=None, point_label='asset_id', do_plot=['aggregate', # plots all contents in one figure 'per_content' # plots a separate figure per content ], plot_linear_fit=True # adds linear fit line to each plot ) output_dir = VmafConfig.workspace_path("output", "test_output") DisplayConfig.show(write_to_dir=output_dir) self.assertEqual(len(glob.glob(os.path.join(output_dir, '*.png'))), 3) if os.path.exists(output_dir): shutil.rmtree(output_dir)
class BootstrapVmafQualityRunner(VmafQualityRunner): TYPE = "BOOTSTRAP_VMAF" VERSION = VmafQualityRunner.VERSION + '-' + 'M' + BootstrapLibsvmNusvrTrainTestModel.VERSION ALGO_VERSION = None # "vmaf_b_v0.6.3": plain bootstrapping, "vmaf_rb_v0.6.3": residue bootstrapping DEFAULT_MODEL_FILEPATH = VmafConfig.model_path("vmaf_b_v0.6.3", "vmaf_b_v0.6.3.pkl") def _populate_result_dict(self, feature_result, pred_result): result_dict = {} result_dict.update(feature_result.result_dict) # add feature result result_dict[self.get_scores_key()] = pred_result[ 'ys_pred'] # add quality score result_dict[self.get_all_models_scores_key()] = pred_result[ 'ys_pred_all_models'] # add quality score from all models result_dict[self.get_bagging_scores_key()] = pred_result[ 'ys_pred_bagging'] # add bagging quality score result_dict[self.get_stddev_scores_key()] = pred_result[ 'ys_pred_stddev'] # add stddev of bootstrapped quality score result_dict[self.get_ci95_low_scores_key()] = pred_result[ 'ys_pred_ci95_low'] # add ci95 of bootstrapped quality score result_dict[self.get_ci95_high_scores_key()] = pred_result[ 'ys_pred_ci95_high'] # add ci95 of bootstrapped quality score return result_dict @classmethod def predict_with_model(cls, model, xs, **kwargs): DELTA = 1e-2 result = model.predict(xs) ys_pred_all_models = result['ys_label_pred_all_models'] ys_pred = result['ys_label_pred'] ys_pred_bagging = result['ys_label_pred_bagging'] ys_pred_stddev = result['ys_label_pred_stddev'] ys_pred_ci95_low = result['ys_label_pred_ci95_low'] ys_pred_ci95_high = result['ys_label_pred_ci95_high'] ys_pred_plus = ys_pred_bagging + DELTA ys_pred_minus = ys_pred_bagging - DELTA do_transform_score = cls._do_transform_score(kwargs) if do_transform_score: ys_pred_all_models = np.array([ cls.transform_score(model, ys_pred_some_model) for ys_pred_some_model in ys_pred_all_models ]) ys_pred = cls.transform_score(model, ys_pred) ys_pred_bagging = cls.transform_score(model, ys_pred_bagging) ys_pred_plus = cls.transform_score(model, ys_pred_plus) ys_pred_minus = cls.transform_score(model, ys_pred_minus) ys_pred_ci95_low = cls.transform_score(model, ys_pred_ci95_low) ys_pred_ci95_high = cls.transform_score(model, ys_pred_ci95_high) else: pass if 'disable_clip_score' in kwargs and kwargs[ 'disable_clip_score'] is True: pass else: ys_pred_all_models = np.array([ cls.clip_score(model, ys_pred_some_model) for ys_pred_some_model in ys_pred_all_models ]) ys_pred = cls.clip_score(model, ys_pred) ys_pred_bagging = cls.clip_score(model, ys_pred_bagging) ys_pred_plus = cls.clip_score(model, ys_pred_plus) ys_pred_minus = cls.clip_score(model, ys_pred_minus) ys_pred_ci95_low = cls.clip_score(model, ys_pred_ci95_low) ys_pred_ci95_high = cls.clip_score(model, ys_pred_ci95_high) # stddev score transform is applied after transform, clip, or both, or neither slope = ((ys_pred_plus - ys_pred_minus) / (2.0 * DELTA)) ys_pred_stddev = ys_pred_stddev * slope return { 'ys_pred_all_models': ys_pred_all_models, 'ys_pred': ys_pred, 'ys_pred_bagging': ys_pred_bagging, 'ys_pred_stddev': ys_pred_stddev, 'ys_pred_ci95_low': ys_pred_ci95_low, 'ys_pred_ci95_high': ys_pred_ci95_high, } def get_train_test_model_class(self): # overide VmafQualityRunner.get_train_test_model_class return BootstrapLibsvmNusvrTrainTestModel @classmethod def get_all_models_scores_key(cls): return cls.TYPE + '_all_models_scores' @classmethod def get_all_models_score_key(cls): return cls.TYPE + '_all_models_score' @classmethod def get_bagging_scores_key(cls): return cls.TYPE + '_bagging_scores' @classmethod def get_bagging_score_key(cls): return cls.TYPE + '_bagging_score' @classmethod def get_stddev_scores_key(cls): return cls.TYPE + '_stddev_scores' @classmethod def get_stddev_score_key(cls): return cls.TYPE + '_stddev_score' @classmethod def get_ci95_low_scores_key(cls): return cls.TYPE + '_ci95_low_scores' @classmethod def get_ci95_low_score_key(cls): return cls.TYPE + '_ci95_low_score' @classmethod def get_ci95_high_scores_key(cls): return cls.TYPE + '_ci95_high_scores' @classmethod def get_ci95_high_score_key(cls): return cls.TYPE + '_ci95_high_score'
class VmafossExecQualityRunner(QualityRunner): TYPE = 'VMAFOSSEXEC' # VERSION = '0.3' # DEFAULT_MODEL_FILEPATH_DOTMODEL = VmafConfig.model_path("nflxall_vmafv3.pkl.model") # VERSION = '0.3.1' # DEFAULT_MODEL_FILEPATH_DOTMODEL = VmafConfig.model_path("nflxall_vmafv3a.pkl.model") # VERSION = '0.3.2' # ALGO_VERSION = 0 # # DEFAULT_MODEL_FILEPATH_DOTMODEL = VmafConfig.model_path("nflxall_vmafv4.pkl.model") # DEFAULT_MODEL_FILEPATH = VmafConfig.model_path("nflxall_vmafv4.pkl") VERSION = 'F' + VmafFeatureExtractor.VERSION + '-0.6.1' ALGO_VERSION = 2 # trained with resource/param/vmaf_v6.py on private/user/zli/resource/dataset/dataset/derived/vmafplusstudy_laptop_raw_generalandcornercase.py, MLER, y=x+17 DEFAULT_MODEL_FILEPATH = VmafConfig.model_path("vmaf_v0.6.1.pkl") FEATURES = [ 'adm2', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', 'motion', 'vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif', 'psnr', 'ssim', 'ms_ssim', 'motion2', 'bagging', 'stddev', 'ci95_low', 'ci95_high' ] @classmethod def get_feature_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format(type=cls.TYPE, atom_feature=atom_feature) def _generate_result(self, asset): # routine to call the command-line executable and generate quality # scores in the log file. log_file_path = self._get_log_file_path(asset) if self.optional_dict is not None \ and 'model_filepath' in self.optional_dict \ and self.optional_dict['model_filepath'] is not None: model_filepath = self.optional_dict['model_filepath'] else: model_filepath = self.DEFAULT_MODEL_FILEPATH if self.optional_dict is not None and 'disable_clip_score' in self.optional_dict: disable_clip_score = self.optional_dict['disable_clip_score'] else: disable_clip_score = False if self.optional_dict is not None and 'enable_transform_score' in self.optional_dict: enable_transform_score = self.optional_dict[ 'enable_transform_score'] else: enable_transform_score = False if self.optional_dict is not None and 'phone_model' in self.optional_dict: phone_model = self.optional_dict['phone_model'] else: phone_model = False if self.optional_dict is not None and 'disable_avx' in self.optional_dict: disable_avx = self.optional_dict['disable_avx'] else: disable_avx = False if self.optional_dict is not None and 'thread' in self.optional_dict: n_thread = self.optional_dict['thread'] else: n_thread = 0 if self.optional_dict is not None and 'subsample' in self.optional_dict: n_subsample = self.optional_dict['subsample'] else: n_subsample = 1 if self.optional_dict is not None and 'psnr' in self.optional_dict: psnr = self.optional_dict['psnr'] else: psnr = True if self.optional_dict is not None and 'ssim' in self.optional_dict: ssim = self.optional_dict['ssim'] else: ssim = True if self.optional_dict is not None and 'ms_ssim' in self.optional_dict: ms_ssim = self.optional_dict['ms_ssim'] else: ms_ssim = True if self.optional_dict is not None and 'ci' in self.optional_dict: ci = self.optional_dict['ci'] else: ci = False quality_width, quality_height = asset.quality_width_height fmt = self._get_workfile_yuv_type(asset) w = quality_width h = quality_height ref_path = asset.ref_workfile_path dis_path = asset.dis_workfile_path model = model_filepath exe = self._get_exec() logger = self.logger ExternalProgramCaller.call_vmafossexec( fmt, w, h, ref_path, dis_path, model, log_file_path, disable_clip_score, enable_transform_score, phone_model, disable_avx, n_thread, n_subsample, psnr, ssim, ms_ssim, ci, exe, logger) def _get_exec(self): return None # signaling default def _get_quality_scores(self, asset): # routine to read the quality scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) tree = ElementTree.parse(log_file_path) root = tree.getroot() scores = [] # check if vmafossexec returned additional info about the bootstrapped models # bootstrap_model_list_str is a comma-separated string of model names vmaf_params = root.findall('params')[0].attrib augmented_features = copy.copy(self.FEATURES) if 'bootstrap_model_list_str' in vmaf_params: bootstrap_model_list_str = vmaf_params['bootstrap_model_list_str'] bootstrap_model_list = bootstrap_model_list_str.split( ',') if len(bootstrap_model_list_str) > 0 else [] augmented_features += bootstrap_model_list feature_scores = [[] for _ in augmented_features] for frame in root.findall('frames/frame'): scores.append(float(frame.attrib['vmaf'])) for i_feature, feature in enumerate(augmented_features): try: feature_scores[i_feature].append( float(frame.attrib[feature])) except KeyError: pass # some features may be missing assert len(scores) != 0 quality_result = { self.get_scores_key(): scores, } for i_feature, feature in enumerate(augmented_features): if len(feature_scores[i_feature]) != 0: quality_result[self.get_feature_scores_key( feature)] = feature_scores[i_feature] return quality_result