def test_from_xml_from_json_and_aggregation(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing( ) asset_list = [asset, asset_original] self.runner = VmafQualityRunner( asset_list, None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict={ 'model_filepath': VmafConfig.model_path("vmaf_float_v0.6.1.pkl"), }, optional_dict2=None, ) self.runner.run() results = self.runner.results xml_string_expected = results[0].to_xml() xml_string_recon = Result.from_xml(xml_string_expected).to_xml() json_string_expected = results[0].to_json() json_string_recon = Result.from_json(json_string_expected).to_json() assert xml_string_expected == xml_string_recon, "XML files do not match" assert json_string_expected == json_string_recon, "JSON files do not match" combined_result = Result.combine_result([results[0], results[1]]) # check that all keys are there combined_result_keys = [key for key in combined_result.result_dict] keys_0 = [key for key in results[0].result_dict] keys_1 = [key for key in results[1].result_dict] assert set(keys_0) == set(keys_1) == set(combined_result_keys) # check that the dictionaries have been copied as expected for key in combined_result_keys: assert len(combined_result.result_dict[key]) == len( results[0].result_dict[key]) + len(results[1].result_dict[key]) assert combined_result.result_dict[key][0] == results[ 0].result_dict[key][0] assert combined_result.result_dict[key][ len(results[0].result_dict[key]) - 1] == results[0].result_dict[key][ len(results[0].result_dict[key]) - 1] assert combined_result.result_dict[key][len( results[0].result_dict[key])] == results[1].result_dict[key][0] assert combined_result.result_dict[key][ len(combined_result.result_dict[key]) - 1] == results[1].result_dict[key][ len(results[1].result_dict[key]) - 1]
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset), which runs a # FeatureAssembler, collect a feature vector, run # TrainTestModel.predict() on it, and return a Result object # (in this case, both Executor._run_on_asset(self, asset) and # QualityRunner._read_result(self, asset) get bypassed. niqe_fassembler = self._get_niqe_feature_assembler_instance(asset) niqe_fassembler.run() feature_result = niqe_fassembler.results[0] # xs = NiqeTrainTestModel.get_perframe_xs_from_result(feature_result) xs = NiqeTrainTestModel.get_xs_from_results([feature_result]) model = self._load_model(asset) ys_pred = model.predict(xs)['ys_label_pred'] result_dict = {} # add all feature result result_dict.update(feature_result.result_dict) # add quality score result_dict[self.get_scores_key()] = ys_pred return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset), which runs a # FeatureAssembler, collect a feature vector, run # TrainTestModel.predict() on it, and return a Result object # (in this case, both Executor._run_on_asset(self, asset) and # QualityRunner._read_result(self, asset) get bypassed. vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] model = self._load_model(asset) xs = model.get_per_unit_xs_from_a_result(feature_result) if self.optional_dict is not None and 'disable_clip_score' in self.optional_dict: disable_clip_score = self.optional_dict['disable_clip_score'] else: disable_clip_score = False if self.optional_dict is not None and 'enable_transform_score' in self.optional_dict: enable_transform_score = self.optional_dict[ 'enable_transform_score'] else: enable_transform_score = False pred_result = self.predict_with_model( model, xs, disable_clip_score=disable_clip_score, enable_transform_score=enable_transform_score) result_dict = self._populate_result_dict(feature_result, pred_result) return Result(asset, self.executor_id, result_dict)
def _create_result_list(self, to_assemble_list): to_assemble_xml_strings = self._parse_files(to_assemble_list) results = [] for to_assemble_xml_string in to_assemble_xml_strings: results.append(Result.from_xml(to_assemble_xml_string)) return results
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset) vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] result_dict = { self.get_scores_key(): feature_result[VmafFeatureExtractor.get_scores_key(self.FEATURE_NAME)] } return Result(asset, self.executor_id, result_dict)
def assemble(self): """ Main file assembly logic """ to_assemble_list = self.create_assembly_file_list(self.to_assemble_input) self._assert(to_assemble_list) results = self._create_result_list(to_assemble_list) combined_result = Result.combine_result(results) return combined_result
def _create_result_list(self, to_assemble_list): to_assemble_jsons = self._parse_files(to_assemble_list) results = [] for to_assemble_json in to_assemble_jsons: to_assemble_json_string = json.dumps(to_assemble_json) results.append(Result.from_json(to_assemble_json_string)) return results
def _run_on_asset(self, asset): vmaf_fassembler = self._get_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] result_dict = {} result_dict.update(feature_result.result_dict.copy()) # add feature result result_dict[self.get_scores_key()] = feature_result.result_dict[ StrredOptFeatureExtractor.get_scores_key('strred')] # add strred score del result_dict[StrredOptFeatureExtractor.get_scores_key('strred')] # delete redundant return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset) vmaf_fassembler = self._get_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] result_dict = {} result_dict.update(feature_result.result_dict.copy()) # add feature result result_dict[self.get_scores_key()] = feature_result.result_dict[ MsSsimFeatureExtractor.get_scores_key('ms_ssim')] # add ssim score del result_dict[MsSsimFeatureExtractor.get_scores_key('ms_ssim')] # delete redundant return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): speed_fassembler = self._get_feature_assembler_instance(asset) speed_fassembler.run() feature_result = speed_fassembler.results[0] result_dict = {} result_dict.update(feature_result.result_dict.copy()) # add feature result result_dict[self.get_scores_key()] = feature_result.result_dict[ SpEEDMatlabFeatureExtractor.get_scores_key('speed_4')] # add SpEED score at scale 4 return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset), which runs a # FeatureAssembler, collect a feature vector, run # TrainTestModel.predict() on it, and return a Result object # (in this case, both Executor._run_on_asset(self, asset) and # QualityRunner._read_result(self, asset) get bypassed. ensemblevmaf_fassemblers = self._get_ensemblevmaf_feature_assembler_instance(asset) # each model is associated with a Feature Assembler Nmodels = len(ensemblevmaf_fassemblers) pred_result_all_models = [] result_dict = {} for model_ind in range(Nmodels): evmaf_fassembler = ensemblevmaf_fassemblers[model_ind] evmaf_fassembler.run() feature_result = evmaf_fassembler.results[0] model = self._load_model(asset)[model_ind] xs = model.get_per_unit_xs_from_a_result(feature_result) if self.optional_dict is not None and 'disable_clip_score' in self.optional_dict: disable_clip_score = self.optional_dict['disable_clip_score'] else: disable_clip_score = False if self.optional_dict is not None and 'enable_transform_score' in self.optional_dict: enable_transform_score = self.optional_dict['enable_transform_score'] else: enable_transform_score = False pred_result = self.predict_with_model(model, xs, disable_clip_score=disable_clip_score, enable_transform_score=enable_transform_score) result_dict = self._populate_result_dict(feature_result, pred_result, result_dict) pred_result_all_models.append(pred_result) assert Nmodels > 0 Nframes = self._get_Nframes(pred_result) all_model_scores = np.zeros((Nmodels, Nframes)) all_model_score_names = self.ensemblevmaf_get_scores_key(Nmodels) for model_ind in range(Nmodels): result_dict[all_model_score_names[model_ind]] = pred_result_all_models[model_ind]['ys_pred'] # add quality score all_model_scores[model_ind, :] = pred_result_all_models[model_ind]['ys_pred'] # perform prediction averaging (simple average for now) pred_result_all_models_ensemble = np.mean(all_model_scores, axis=0) # write results result_dict[self.get_scores_key()] = pred_result_all_models_ensemble return Result(asset, self.executor_id, result_dict)
def test_todataframe_fromdataframe(self): print 'test on result to/from dataframe...' df = self.result.to_dataframe() df_vmaf = df.loc[df['scores_key'] == 'VMAF_legacy_scores'] df_adm = df.loc[df['scores_key'] == 'VMAF_feature_adm_scores'] df_vif = df.loc[df['scores_key'] == 'VMAF_feature_vif_scores'] df_ansnr = df.loc[df['scores_key'] == 'VMAF_feature_ansnr_scores'] df_motion = df.loc[df['scores_key'] == 'VMAF_feature_motion_scores'] df_adm_den = df.loc[df['scores_key'] == 'VMAF_feature_adm_den_scores'] self.assertEquals(len(df), 38) self.assertEquals(len(df_vmaf), 1) self.assertEquals(len(df_adm), 1) self.assertEquals(len(df_vif), 1) self.assertEquals(len(df_ansnr), 1) self.assertEquals(len(df_motion), 1) self.assertAlmostEquals(np.mean(df_vmaf.iloc[0]['scores']), 40.421899030550769, places=4) self.assertAlmostEquals(np.mean(df_adm.iloc[0]['scores']), 0.78533833333333336, places=4) self.assertAlmostEquals(np.mean(df_vif.iloc[0]['scores']), 0.156834666667, places=4) self.assertAlmostEquals(np.mean(df_ansnr.iloc[0]['scores']), 7.92623066667, places=4) self.assertAlmostEquals(np.mean(df_motion.iloc[0]['scores']), 12.5548366667, places=4) self.assertAlmostEquals(np.mean(df_adm_den.iloc[0]['scores']), 2773.8912249999998, places=3) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_legacy_scores', 'scores')), 40.421899030550769, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_adm_scores', 'scores')), 0.78533833333333336, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_vif_scores', 'scores')), 0.156834666667, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_ansnr_scores', 'scores')), 7.92623066667, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_motion_scores', 'scores')), 12.5548366667, places=4) self.assertEquals(df.iloc[0]['dataset'], 'test') self.assertEquals(df.iloc[0]['content_id'], 0) self.assertEquals(df.iloc[0]['asset_id'], 0) self.assertEquals(df.iloc[0]['ref_name'], 'checkerboard_1920_1080_10_3_0_0.yuv') self.assertEquals(df.iloc[0]['dis_name'], 'checkerboard_1920_1080_10_3_1_0.yuv') self.assertEquals( df.iloc[0]['asset'], '{"asset_dict": {"height": 1080, "use_path_as_workpath": 1, "width": 1920}, "asset_id": 0, "content_id": 0, "dataset": "test", "dis_path": "checkerboard_1920_1080_10_3_1_0.yuv", "ref_path": "checkerboard_1920_1080_10_3_0_0.yuv", "workdir": ""}') # noqa self.assertEquals(df.iloc[0]['executor_id'], 'VMAF_legacy_VF0.2.4b-1.1') Result._assert_asset_dataframe(df) recon_result = Result.from_dataframe(df) self.assertEquals(self.result, recon_result) self.assertTrue(self.result == recon_result) self.assertFalse(self.result != recon_result)
def test_todataframe_fromdataframe(self): print 'test on result to/from dataframe...' df = self.result.to_dataframe() df_vmaf = df.loc[df['scores_key'] == 'VMAF_legacy_scores'] df_adm = df.loc[df['scores_key'] == 'VMAF_feature_adm_scores'] df_vif = df.loc[df['scores_key'] == 'VMAF_feature_vif_scores'] df_ansnr = df.loc[df['scores_key'] == 'VMAF_feature_ansnr_scores'] df_motion = df.loc[df['scores_key'] == 'VMAF_feature_motion_scores'] df_adm_den = df.loc[df['scores_key'] == 'VMAF_feature_adm_den_scores'] self.assertEquals(len(df), 38) self.assertEquals(len(df_vmaf), 1) self.assertEquals(len(df_adm), 1) self.assertEquals(len(df_vif), 1) self.assertEquals(len(df_ansnr), 1) self.assertEquals(len(df_motion), 1) self.assertAlmostEquals(np.mean(df_vmaf.iloc[0]['scores']), 40.421899030550769, places=4) self.assertAlmostEquals(np.mean(df_adm.iloc[0]['scores']), 0.78533833333333336, places=4) self.assertAlmostEquals(np.mean(df_vif.iloc[0]['scores']), 0.156834666667, places=4) self.assertAlmostEquals(np.mean(df_ansnr.iloc[0]['scores']), 7.92623066667, places=4) self.assertAlmostEquals(np.mean(df_motion.iloc[0]['scores']), 12.5548366667, places=4) self.assertAlmostEquals(np.mean(df_adm_den.iloc[0]['scores']), 2773.8912249999998, places=3) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_legacy_scores', 'scores')), 40.421899030550769, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_adm_scores', 'scores')), 0.78533833333333336, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_vif_scores', 'scores')), 0.156834666667, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_ansnr_scores', 'scores')), 7.92623066667, places=4) self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_motion_scores', 'scores')), 12.5548366667, places=4) self.assertEquals(df.iloc[0]['dataset'], 'test') self.assertEquals(df.iloc[0]['content_id'], 0) self.assertEquals(df.iloc[0]['asset_id'], 0) self.assertEquals(df.iloc[0]['ref_name'], 'checkerboard_1920_1080_10_3_0_0.yuv') self.assertEquals(df.iloc[0]['dis_name'], 'checkerboard_1920_1080_10_3_1_0.yuv') self.assertEquals( df.iloc[0]['asset'], '{"asset_dict": {"height": 1080, "use_path_as_workpath": 1, "width": 1920}, "asset_id": 0, "content_id": 0, "dataset": "test", "dis_path": "checkerboard_1920_1080_10_3_1_0.yuv", "ref_path": "checkerboard_1920_1080_10_3_0_0.yuv", "workdir": ""}') # noqa self.assertEquals(df.iloc[0]['executor_id'], 'VMAF_legacy_VF0.2.4b-1.1') Result._assert_asset_dataframe(df) recon_result = Result.from_dataframe(df) self.assertEquals(self.result, recon_result) self.assertTrue(self.result == recon_result) self.assertFalse(self.result != recon_result)
def _run_on_asset(self, asset): # Override Executor._run_on_asset(self, asset), which runs a # FeatureAssembler, collect a feature vector, run # TrainTestModel.predict() on it, and return a Result object # (in this case, both Executor._run_on_asset(self, asset) and # QualityRunner._read_result(self, asset) get bypassed. vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] # ===================================================================== try: svmutil except NameError: from vmaf import svmutil # SVR predict model = svmutil.svm_load_model(self.SVM_MODEL_FILE) ordered_scaled_scores_list = [] for scores_key in self.SVM_MODEL_ORDERED_SCORES_KEYS: scaled_scores = self._rescale( feature_result[scores_key], self.FEATURE_RESCALE_DICT[scores_key]) ordered_scaled_scores_list.append(scaled_scores) scores = [] for score_vector in zip(*ordered_scaled_scores_list): vif, adm, ansnr, motion = score_vector xs = [[vif, adm, ansnr, motion]] score = svmutil.svm_predict([0], xs, model)[0][0] score = self._post_correction(motion, score) scores.append(score) result_dict = {} # add all feature result result_dict.update(feature_result.result_dict) # add quality score result_dict[self.get_scores_key()] = scores return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): # Override VmafQualityRunner._run_on_asset(self, asset), by adding # additional local explanation info. vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] model = self._load_model(asset) xs = model.get_per_unit_xs_from_a_result(feature_result) ys_pred = self.predict_with_model(model, xs) if self.optional_dict2 is not None and \ 'explainer' in self.optional_dict2: explainer = self.optional_dict2['explainer'] else: explainer = LocalExplainer() exps = explainer.explain(model, xs) result_dict = {} result_dict.update(feature_result.result_dict) # add feature result result_dict[self.get_scores_key()] = ys_pred # add quality score result_dict[ self.get_explanations_key()] = exps # add local explanations return Result(asset, self.executor_id, result_dict)
def _read_result(self, asset): result = {} result.update(self._get_quality_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result)
def load_result(result_file_path): with open(result_file_path, "rt") as result_file: df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read())) result = Result.from_dataframe(df) return result
def load_result(result_file_path): with open(result_file_path, "rt") as result_file: df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read())) result = Result.from_dataframe(df) return result