def test_run_vmaf_runner_with_notyuv_jpg(self): ref_path = VmafConfig.test_resource_path("test_images", "bikes.jpg") dis_path = VmafConfig.test_resource_path("test_images", "bikes_dis.jpg") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 512, 'quality_height': 384, 'workfile_yuv_type': 'yuv444p', }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 63.255016130209064, places=4)
def test_run_vmaf_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.28938600125885, places=4)
def test_run_vmaf_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.61273448644646, places=4)
def test_run_vmaf_runner_with_yuv_lutyuv(self): ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'quality_width': 360, 'quality_height': 240, 'lutyuv_cmd': 'y=2*val', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=False, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.18873019841408, places=4)
def setUp(self): ref_path = VmafConfig.test_resource_path( "yuv", "checkerboard_1920_1080_10_3_0_0.yuv") dis_path = VmafConfig.test_resource_path( "yuv", "checkerboard_1920_1080_10_3_1_0.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 1920, 'height': 1080 }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=FileSystemResultStore(), ) self.runner.run() self.result = self.runner.results[0] Nframes = len(self.result.result_dict['VMAF_scores']) self.result.result_dict['VMAF_array_scores'] = self.result.result_dict[ 'VMAF_scores'].reshape(Nframes, 1) self.result.result_dict['VMAF_two_models_array_scores'] = np.vstack( (self.result.result_dict['VMAF_scores'].reshape(1, Nframes), self.result.result_dict['VMAF_scores'].reshape(1, Nframes))) self.result.result_dict['VMAF_3D_array_scores'] = np.zeros((1, 1, 1))
def main(): title = sys.argv[1] assets, dis_info = get_assets() runner = VmafQualityRunner(assets, logger=None) runner.run() fig = draw_graph(runner.results, dis_info, title) gpath = get_graph_path() fig.savefig(gpath) print 'Saved graph to ' + gpath
class ResultAggregatingTest(unittest.TestCase): def test_from_xml_from_json_and_aggregation(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) asset_list = [asset, asset_original] self.runner = VmafQualityRunner( asset_list, None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.pkl"), }, optional_dict2=None, ) self.runner.run() results = self.runner.results xml_string_expected = results[0].to_xml() xml_string_recon = Result.from_xml(xml_string_expected).to_xml() json_string_expected = results[0].to_json() json_string_recon = Result.from_json(json_string_expected).to_json() assert xml_string_expected == xml_string_recon, "XML files do not match" assert json_string_expected == json_string_recon, "JSON files do not match" combined_result = Result.combine_result([results[0], results[1]]) # check that all keys are there combined_result_keys = [key for key in combined_result.result_dict] keys_0 = [key for key in results[0].result_dict] keys_1 = [key for key in results[1].result_dict] assert set(keys_0) == set(keys_1) == set(combined_result_keys) # check that the dictionaries have been copied as expected for key in combined_result_keys: assert len(combined_result.result_dict[key]) == len( results[0].result_dict[key]) + len(results[1].result_dict[key]) assert combined_result.result_dict[key][0] == results[ 0].result_dict[key][0] assert combined_result.result_dict[key][ len(results[0].result_dict[key]) - 1] == results[0].result_dict[key][ len(results[0].result_dict[key]) - 1] assert combined_result.result_dict[key][len( results[0].result_dict[key])] == results[1].result_dict[key][0] assert combined_result.result_dict[key][ len(combined_result.result_dict[key]) - 1] == results[1].result_dict[key][ len(results[1].result_dict[key]) - 1]
def test_run_vmaf_runner_with_notyuv(self): print 'test on running VMAF runner...' ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.635307276411766, places=4)
def test_run_vmaf_runner_with_notyuv_jpg(self): print 'test on running VMAF runner on jpg...' ref_path = VmafConfig.test_resource_path("test_images", "bikes.jpg") dis_path = VmafConfig.test_resource_path("test_images", "bikes_dis.jpg") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 512, 'quality_height': 384, 'workfile_yuv_type': 'yuv444p', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 63.27798081002585, places=4)
def test_run_vmafossexec_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafossExecQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 77.59110129333334, places=3)
def test_run_psnr_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 720, 'quality_height': 480, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = PsnrQualityRunner([asset], None, fifo_mode=False, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 50.99313338666667, places=4)
class ScoreAggregationTest(unittest.TestCase): def setUp(self): ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv") dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_1_0.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={'width':1920, 'height':1080}) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=FileSystemResultStore(), ) self.runner.run() self.result = self.runner.results[0] Nframes = len(self.result.result_dict['VMAF_scores']) self.result.result_dict['VMAF_array_scores'] = self.result.result_dict['VMAF_scores'].reshape(Nframes, 1) self.result.result_dict['VMAF_two_models_array_scores'] = np.vstack((self.result.result_dict['VMAF_scores'].reshape(1, Nframes), self.result.result_dict['VMAF_scores'].reshape(1, Nframes))) self.result.result_dict['VMAF_3D_array_scores'] = np.zeros((1, 1, 1)) def tearDown(self): if hasattr(self, 'runner'): self.runner.remove_results() def test_to_score_str(self): self.result.set_score_aggregate_method(np.mean) # the following should give same value self.assertAlmostEqual(self.result['VMAF_score'], 35.0661575902223, places=4) # for a 2-D array, first dimension is # models and second is # frames self.assertAlmostEqual(self.result['VMAF_two_models_array_score'][0], 35.0661575902223, places=4) self.assertAlmostEqual(self.result['VMAF_two_models_array_score'][1], 35.0661575902223, places=4) self.assertAlmostEqual(self.result['VMAF_array_score'][0], 22.97749190550349, places=4) self.assertAlmostEqual(self.result['VMAF_array_score'][1], 44.79653061901706, places=4) self.assertAlmostEqual(self.result['VMAF_array_score'][2], 37.424450246146364, places=4) # check that a 3-D array will throw assertion, score aggregation accepts only up to 2-D with self.assertRaises(AssertionError): x = self.result['VMAF_3D_array_score']
def test_train_test_on_dataset_with_dis1st_thr_with_feature_optional_dict_good(self): from vmaf.routine import train_test_vmaf_on_dataset train_dataset = import_python_file( VmafConfig.test_resource_path('dataset_sample.py')) model_param = import_python_file( VmafConfig.test_resource_path('model_param_sample.py')) feature_param = import_python_file( VmafConfig.test_resource_path('feature_param_sample_with_optional_dict_good.py')) train_fassembler, train_assets, train_stats, test_fassembler, test_assets, test_stats, _ = train_test_vmaf_on_dataset( train_dataset=train_dataset, test_dataset=train_dataset, feature_param=feature_param, model_param=model_param, train_ax=None, test_ax=None, result_store=None, parallelize=True, logger=None, fifo_mode=True, output_model_filepath=self.output_model_filepath, ) self.train_fassembler = train_fassembler self.assertTrue(os.path.exists(self.output_model_filepath)) self.assertAlmostEqual(train_stats['ys_label_pred'][0], 90.753010402770798, places=3) self.assertAlmostEqual(test_stats['ys_label_pred'][0], 90.753010402770798, places=3) runner = VmafQualityRunner( train_assets, None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={'model_filepath': self.output_model_filepath} ) runner.run(parallelize=True) results = runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 89.55494473011981, places=4) self.assertAlmostEqual(results[1]['VMAF_score'], 61.01289549048653, places=4) self.assertAlmostEqual(results[2]['VMAF_score'], 90.75301241304798, places=4) self.assertAlmostEqual(results[3]['VMAF_score'], 89.27013895870179, places=4)
def test_run_psnr_runner_with_frames_proc(self): ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'start_frame': 2, 'end_frame': 2, 'ref_proc_callback': 'identity', }) asset2 = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'ref_start_frame': 2, 'ref_end_frame': 2, 'dis_start_frame': 6, 'dis_end_frame': 6, 'dis_proc_callback': 'identity', }) self.runner = PsnrQualityRunner([asset, asset2], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run(parallelize=False) results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 30.993823, places=4) self.assertAlmostEqual(results[1]['PSNR_score'], 19.393160, places=4)
def test_run_vmaf_runner_with_notyuv(self): print 'test on running VMAF runner...' ref_path = config.ROOT + "/python/test/resource/mp4/Seeking_30_480_1050.mp4" dis_path = config.ROOT + "/python/test/resource/mp4/Seeking_10_288_375.mp4" asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=config.ROOT + "/workspace/workdir", ref_path=ref_path, dis_path=dis_path, asset_dict={'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 79.052553444706504, places=4)
def train_test_vmaf_on_dataset(train_dataset, test_dataset, feature_param, model_param, train_ax, test_ax, result_store, parallelize=True, logger=None, fifo_mode=True, output_model_filepath=None, aggregate_method=np.mean, **kwargs): train_assets = read_dataset(train_dataset, **kwargs) train_raw_assets = None try: for train_asset in train_assets: assert train_asset.groundtruth is not None except AssertionError: # no groundtruth, try do subjective modeling from sureal.dataset_reader import RawDatasetReader from sureal.subjective_model import DmosModel subj_model_class = kwargs[ 'subj_model_class'] if 'subj_model_class' in kwargs and kwargs[ 'subj_model_class'] is not None else DmosModel dataset_reader_class = kwargs[ 'dataset_reader_class'] if 'dataset_reader_class' in kwargs else RawDatasetReader subjective_model = subj_model_class( dataset_reader_class(train_dataset)) subjective_model.run_modeling(**kwargs) train_dataset_aggregate = subjective_model.to_aggregated_dataset( **kwargs) train_raw_assets = train_assets train_assets = read_dataset(train_dataset_aggregate, **kwargs) train_fassembler = FeatureAssembler( feature_dict=feature_param.feature_dict, feature_option_dict=None, assets=train_assets, logger=logger, fifo_mode=fifo_mode, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=parallelize, ) train_fassembler.run() train_features = train_fassembler.results for result in train_features: result.set_score_aggregate_method(aggregate_method) model_type = model_param.model_type model_param_dict = model_param.model_param_dict model_class = TrainTestModel.find_subclass(model_type) train_xys = model_class.get_xys_from_results(train_features) train_xs = model_class.get_xs_from_results(train_features) train_ys = model_class.get_ys_from_results(train_features) model = model_class(model_param_dict, logger) model.train(train_xys, **kwargs) # append additional information to model before saving, so that # VmafQualityRunner can read and process model.append_info('feature_dict', feature_param.feature_dict) if 'score_clip' in model_param_dict: VmafQualityRunner.set_clip_score(model, model_param_dict['score_clip']) if 'score_transform' in model_param_dict: VmafQualityRunner.set_transform_score( model, model_param_dict['score_transform']) train_ys_pred = VmafQualityRunner.predict_with_model( model, train_xs, **kwargs)['ys_pred'] raw_groundtruths = None if train_raw_assets is None else \ list(map(lambda asset: asset.raw_groundtruth, train_raw_assets)) train_stats = model.get_stats(train_ys['label'], train_ys_pred, ys_label_raw=raw_groundtruths) log = 'Stats on training data: {}'.format( model.format_stats_for_print(train_stats)) if logger: logger.info(log) else: print(log) # save model if output_model_filepath is not None: model.to_file(output_model_filepath) if train_ax is not None: train_content_ids = list( map(lambda asset: asset.content_id, train_assets)) model_class.plot_scatter(train_ax, train_stats, content_ids=train_content_ids) train_ax.set_xlabel('True Score') train_ax.set_ylabel("Predicted Score") train_ax.grid() train_ax.set_title( "Dataset: {dataset}, Model: {model}\n{stats}".format( dataset=train_dataset.dataset_name, model=model.model_id, stats=model_class.format_stats_for_plot(train_stats))) # === test model on test dataset === if test_dataset is None: test_assets = None test_stats = None test_fassembler = None else: test_assets = read_dataset(test_dataset, **kwargs) test_raw_assets = None try: for test_asset in test_assets: assert test_asset.groundtruth is not None except AssertionError: # no groundtruth, try do subjective modeling from sureal.dataset_reader import RawDatasetReader from sureal.subjective_model import DmosModel subj_model_class = kwargs[ 'subj_model_class'] if 'subj_model_class' in kwargs and kwargs[ 'subj_model_class'] is not None else DmosModel dataset_reader_class = kwargs[ 'dataset_reader_class'] if 'dataset_reader_class' in kwargs else RawDatasetReader subjective_model = subj_model_class( dataset_reader_class(test_dataset)) subjective_model.run_modeling(**kwargs) test_dataset_aggregate = subjective_model.to_aggregated_dataset( **kwargs) test_raw_assets = test_assets test_assets = read_dataset(test_dataset_aggregate, **kwargs) test_fassembler = FeatureAssembler( feature_dict=feature_param.feature_dict, feature_option_dict=None, assets=test_assets, logger=logger, fifo_mode=fifo_mode, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=True, ) test_fassembler.run() test_features = test_fassembler.results for result in test_features: result.set_score_aggregate_method(aggregate_method) test_xs = model_class.get_xs_from_results(test_features) test_ys = model_class.get_ys_from_results(test_features) test_ys_pred = VmafQualityRunner.predict_with_model( model, test_xs, **kwargs)['ys_pred'] raw_groundtruths = None if test_raw_assets is None else \ list(map(lambda asset: asset.raw_groundtruth, test_raw_assets)) test_stats = model.get_stats(test_ys['label'], test_ys_pred, ys_label_raw=raw_groundtruths) log = 'Stats on testing data: {}'.format( model_class.format_stats_for_print(test_stats)) if logger: logger.info(log) else: print(log) if test_ax is not None: test_content_ids = list( map(lambda asset: asset.content_id, test_assets)) model_class.plot_scatter(test_ax, test_stats, content_ids=test_content_ids) test_ax.set_xlabel('True Score') test_ax.set_ylabel("Predicted Score") test_ax.grid() test_ax.set_title( "Dataset: {dataset}, Model: {model}\n{stats}".format( dataset=test_dataset.dataset_name, model=model.model_id, stats=model_class.format_stats_for_plot(test_stats))) return train_fassembler, train_assets, train_stats, test_fassembler, test_assets, test_stats, model
def train_test_vmaf_on_dataset(train_dataset, test_dataset, feature_param, model_param, train_ax, test_ax, result_store, parallelize=True, logger=None, fifo_mode=True, output_model_filepath=None, aggregate_method=np.mean, **kwargs): train_assets = read_dataset(train_dataset, **kwargs) train_raw_assets = None try: for train_asset in train_assets: assert train_asset.groundtruth is not None except AssertionError: # no groundtruth, try do subjective modeling subj_model_class = kwargs['subj_model_class'] if 'subj_model_class' in kwargs and kwargs['subj_model_class'] is not None else DmosModel subjective_model = subj_model_class(RawDatasetReader(train_dataset)) subjective_model.run_modeling(**kwargs) train_dataset_aggregate = subjective_model.to_aggregated_dataset(**kwargs) train_raw_assets = train_assets train_assets = read_dataset(train_dataset_aggregate, **kwargs) train_fassembler = FeatureAssembler( feature_dict=feature_param.feature_dict, feature_option_dict=None, assets=train_assets, logger=logger, fifo_mode=fifo_mode, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=parallelize, ) train_fassembler.run() train_features = train_fassembler.results for result in train_features: result.set_score_aggregate_method(aggregate_method) model_type = model_param.model_type model_param_dict = model_param.model_param_dict model_class = TrainTestModel.find_subclass(model_type) train_xys = model_class.get_xys_from_results(train_features) train_xs = model_class.get_xs_from_results(train_features) train_ys = model_class.get_ys_from_results(train_features) model = model_class(model_param_dict, logger) model.train(train_xys) # append additional information to model before saving, so that # VmafQualityRunner can read and process model.append_info('feature_dict', feature_param.feature_dict) if 'score_clip' in model_param_dict: VmafQualityRunner.set_clip_score(model, model_param_dict['score_clip']) if 'score_transform' in model_param_dict: VmafQualityRunner.set_transform_score(model, model_param_dict['score_transform']) train_ys_pred = VmafQualityRunner.predict_with_model(model, train_xs, **kwargs) raw_groundtruths = None if train_raw_assets is None else \ map(lambda asset: asset.raw_groundtruth, train_raw_assets) train_stats = model.get_stats(train_ys['label'], train_ys_pred, ys_label_raw=raw_groundtruths) log = 'Stats on training data: {}'.format(model.format_stats(train_stats)) if logger: logger.info(log) else: print log # save model if output_model_filepath is not None: model.to_file(output_model_filepath) if train_ax is not None: train_content_ids = map(lambda asset: asset.content_id, train_assets) model_class.plot_scatter(train_ax, train_stats, train_content_ids) train_ax.set_xlabel('True Score') train_ax.set_ylabel("Predicted Score") train_ax.grid() train_ax.set_title("Dataset: {dataset}, Model: {model}\n{stats}".format( dataset=train_dataset.dataset_name, model=model.model_id, stats=model_class.format_stats(train_stats) )) # === test model on test dataset === if test_dataset is None: test_assets = None test_stats = None test_fassembler = None else: test_assets = read_dataset(test_dataset, **kwargs) test_raw_assets = None try: for test_asset in test_assets: assert test_asset.groundtruth is not None except AssertionError: # no groundtruth, try do subjective modeling subj_model_class = kwargs['subj_model_class'] if 'subj_model_class' in kwargs and kwargs['subj_model_class'] is not None else DmosModel subjective_model = subj_model_class(RawDatasetReader(test_dataset)) subjective_model.run_modeling(**kwargs) test_dataset_aggregate = subjective_model.to_aggregated_dataset(**kwargs) test_raw_assets = test_assets test_assets = read_dataset(test_dataset_aggregate, **kwargs) test_fassembler = FeatureAssembler( feature_dict=feature_param.feature_dict, feature_option_dict=None, assets=test_assets, logger=logger, fifo_mode=fifo_mode, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=True, ) test_fassembler.run() test_features = test_fassembler.results for result in test_features: result.set_score_aggregate_method(aggregate_method) test_xs = model_class.get_xs_from_results(test_features) test_ys = model_class.get_ys_from_results(test_features) test_ys_pred = VmafQualityRunner.predict_with_model(model, test_xs, **kwargs) raw_groundtruths = None if test_raw_assets is None else \ map(lambda asset: asset.raw_groundtruth, test_raw_assets) test_stats = model_class.get_stats(test_ys['label'], test_ys_pred, ys_label_raw=raw_groundtruths) log = 'Stats on testing data: {}'.format(model_class.format_stats(test_stats)) if logger: logger.info(log) else: print log if test_ax is not None: test_content_ids = map(lambda asset: asset.content_id, test_assets) model_class.plot_scatter(test_ax, test_stats, test_content_ids) test_ax.set_xlabel('True Score') test_ax.set_ylabel("Predicted Score") test_ax.grid() test_ax.set_title("Dataset: {dataset}, Model: {model}\n{stats}".format( dataset=test_dataset.dataset_name, model=model.model_id, stats=model_class.format_stats(test_stats) )) return train_fassembler, train_assets, train_stats, test_fassembler, test_assets, test_stats, model
class QualityRunnerTest(unittest.TestCase): def tearDown(self): if hasattr(self, 'runner'): self.runner.remove_results() pass def setUp(self): self.result_store = FileSystemResultStore() def test_run_psnr_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 720, 'quality_height': 480, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = PsnrQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 51.12090432666667, places=4) def test_run_vmaf_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.28044458354246, places=4)
class QualityRunnerTest(unittest.TestCase): def tearDown(self): if hasattr(self, 'runner'): self.runner.remove_results() pass def setUp(self): self.result_store = FileSystemResultStore() def test_run_psnr_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 720, 'quality_height': 480, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = PsnrQualityRunner([asset], None, fifo_mode=False, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 50.99313338666667, places=4) def test_run_vmaf_runner_with_notyuv_gblur(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, 'dis_gblur_cmd': 'sigma=0.01:steps=1', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.28938600125885, places=4) def test_run_vmaf_runner_with_yuv_lutyuv(self): ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'quality_width': 360, 'quality_height': 240, 'lutyuv_cmd': 'y=2*val', }) self.runner = VmafQualityRunner([asset], None, fifo_mode=False, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.18873019841408, places=4)
class QualityRunnerTest(MyTestCase): def tearDown(self): if hasattr(self, 'runner'): self.runner.remove_results() super().tearDown() def setUp(self): super().setUp() self.result_store = FileSystemResultStore() def test_run_psnr_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 720, 'quality_height': 480, }) self.runner = PsnrQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 60.0, places=4) def test_run_vmaf_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 77.61273448644646, places=4) def test_run_vmaf_runner_with_notyuv_jpg(self): ref_path = VmafConfig.test_resource_path("test_images", "bikes.jpg") dis_path = VmafConfig.test_resource_path("test_images", "bikes_dis.jpg") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 512, 'quality_height': 384, 'workfile_yuv_type': 'yuv444p', }) self.runner = VmafQualityRunner( [asset], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.json"), }, ) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 63.255016130209064, places=4) def test_run_vmafossexec_runner_with_notyuv(self): ref_path = VmafConfig.test_resource_path("mp4", "Seeking_30_480_1050.mp4") dis_path = VmafConfig.test_resource_path("mp4", "Seeking_10_288_375.mp4") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'yuv_type': 'notyuv', 'quality_width': 360, 'quality_height': 240, }) self.runner = VmafossExecQualityRunner([asset], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAFOSSEXEC_score'], 77.59110129333334, places=3) def test_run_psnr_runner_with_frames(self): ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'start_frame': 2, 'end_frame': 2 }) asset2 = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'ref_start_frame': 2, 'ref_end_frame': 2, 'dis_start_frame': 6, 'dis_end_frame': 6, }) self.runner = PsnrQualityRunner([asset, asset2], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run(parallelize=False) results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 30.993823, places=4) self.assertAlmostEqual(results[1]['PSNR_score'], 19.393160, places=4) def test_run_psnr_runner_with_frames_proc(self): ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'start_frame': 2, 'end_frame': 2, 'ref_proc_callback': 'identity', }) asset2 = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324, 'ref_start_frame': 2, 'ref_end_frame': 2, 'dis_start_frame': 6, 'dis_end_frame': 6, 'dis_proc_callback': 'identity', }) self.runner = PsnrQualityRunner([asset, asset2], None, fifo_mode=True, delete_workdir=True, result_store=None) self.runner.run(parallelize=False) results = self.runner.results self.assertAlmostEqual(results[0]['PSNR_score'], 30.993823, places=4) self.assertAlmostEqual(results[1]['PSNR_score'], 19.393160, places=4) def test_run_vmaf_runner_eq_cmd_gamma1d5(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) asset.asset_dict['eq_cmd'] = 'gamma=1.5' asset_original.asset_dict['eq_cmd'] = 'gamma=1.5' self.runner = VmafQualityRunner( [asset, asset_original], None, fifo_mode=False, delete_workdir=True, result_store=None, ) self.runner.run(parallelize=False) results = self.runner.results self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale0_score'], 0.34796083546749507, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale1_score'], 0.7430727630647906, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale2_score'], 0.8432128968501337, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale3_score'], 0.9003110626539442, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_motion2_score'], 3.951617145833333, places=4) self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.9239987660077826, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale0_score'], 1.00000001415, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale1_score'], 0.99999972612, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale2_score'], 0.999999465724, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale3_score'], 0.999999399683, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_motion2_score'], 3.951617145833333, places=4) self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_ansnr_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_motion_score'], 1.0, places=4) self.assertAlmostEqual(results[0]['VMAF_score'], 72.90549596147889, places=4) self.assertAlmostEqual(results[1]['VMAF_score'], 99.946416604585025, places=4)
def test_run_vmaf_runner_eq_cmd_gamma1d5(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) asset.asset_dict['eq_cmd'] = 'gamma=1.5' asset_original.asset_dict['eq_cmd'] = 'gamma=1.5' self.runner = VmafQualityRunner( [asset, asset_original], None, fifo_mode=False, delete_workdir=True, result_store=None, ) self.runner.run(parallelize=False) results = self.runner.results self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale0_score'], 0.34796083546749507, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale1_score'], 0.7430727630647906, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale2_score'], 0.8432128968501337, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_vif_scale3_score'], 0.9003110626539442, places=4) self.assertAlmostEqual( results[0]['VMAF_integer_feature_motion2_score'], 3.951617145833333, places=4) self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.9239987660077826, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale0_score'], 1.00000001415, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale1_score'], 0.99999972612, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale2_score'], 0.999999465724, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_scale3_score'], 0.999999399683, places=4) self.assertAlmostEqual( results[1]['VMAF_integer_feature_motion2_score'], 3.951617145833333, places=4) self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_vif_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_ansnr_score'], 1.0, places=4) with self.assertRaises(KeyError): self.assertAlmostEqual( results[1]['VMAF_integer_feature_motion_score'], 1.0, places=4) self.assertAlmostEqual(results[0]['VMAF_score'], 72.90549596147889, places=4) self.assertAlmostEqual(results[1]['VMAF_score'], 99.946416604585025, places=4)