def show_local_explanations(cls, results, indexs=None): """Plot local explanations of results :param results: :param indexs: a list of frame indices, or None. If None, will take the second frame. :return: figures of local explanation plots """ # assert results are indeed generated by class for result in results: assert cls.get_explanations_key() in result.result_dict N = len(results) if indexs is None: indexs = [1] # default: second frame figss = [] for n in range(N): exps = results[n][cls.get_explanations_key()] asset = results[n].asset exps2 = LocalExplainer.select_from_exps(exps, indexs) ys_pred = results[n][cls.get_scores_key()][indexs] N2 = LocalExplainer.assert_explanations(exps2) assets2 = [asset for _ in range(N2)] # LocalExplainer.print_explanations(exps2, assets=assets2, ys=None, ys_pred=ys_pred) figs = LocalExplainer.plot_explanations(exps2, assets=assets2, ys=None, ys_pred=ys_pred) figss.append(figs) return figss
def test_explain_train_test_model(self): model_class = SklearnRandomForestTrainTestModel train_dataset_path = config.ROOT + '/python/test/resource/' \ 'test_image_dataset_diffdim.py' train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) _, self.features = run_executors_in_parallel( MomentNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=None, optional_dict2=None, ) xys = model_class.get_xys_from_results(self.features[:7]) model = model_class({'norm_type':'normalize', 'random_state':0}, None) model.train(xys) np.random.seed(0) xs = model_class.get_xs_from_results(self.features[7:]) explainer = LocalExplainer(neighbor_samples=1000) exps = explainer.explain(model, xs) self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4) self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4) self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4) self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4) self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4) self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4) self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4) self.assertEqual(exps['feature_names'], ['Moment_noref_feature_1st_score', 'Moment_noref_feature_2nd_score', 'Moment_noref_feature_var_score'] )
def test_explain_vmaf_results(self): print 'test on running VMAF runner with local explainer...' ref_path = config.ROOT + "/resource/yuv/src01_hrc00_576x324.yuv" dis_path = config.ROOT + "/resource/yuv/src01_hrc01_576x324.yuv" asset = Asset(dataset="test", content_id=0, asset_id=0, workdir_root=config.ROOT + "/workspace/workdir", ref_path=ref_path, dis_path=dis_path, asset_dict={'width':576, 'height':324}) asset_original = Asset(dataset="test", content_id=0, asset_id=1, workdir_root=config.ROOT + "/workspace/workdir", ref_path=ref_path, dis_path=ref_path, asset_dict={'width':576, 'height':324}) self.runner = VmafQualityRunnerWithLocalExplainer( [asset, asset_original], None, fifo_mode=True, delete_workdir=True, result_store=None, optional_dict2={'explainer': LocalExplainer(neighbor_samples=100)} ) np.random.seed(0) self.runner.run() results = self.runner.results self.assertAlmostEqual(results[0]['VMAF_score'], 65.4488588759, places=4) self.assertAlmostEqual(results[1]['VMAF_score'], 99.2259317881, places=4) expected_feature_names = ['VMAF_feature_adm2_score', 'VMAF_feature_motion_score', 'VMAF_feature_vif_scale0_score', 'VMAF_feature_vif_scale1_score', 'VMAF_feature_vif_scale2_score', 'VMAF_feature_vif_scale3_score'] weights = np.mean(results[0]['VMAF_scores_exps']['feature_weights'], axis=0) self.assertAlmostEqual(weights[0], 0.75441663, places=4) self.assertAlmostEqual(weights[1], 0.06816105, places=4) self.assertAlmostEqual(weights[2], -0.10934421, places=4) self.assertAlmostEqual(weights[3], 0.22051127, places=4) self.assertAlmostEqual(weights[4], 0.12517884, places=4) self.assertAlmostEqual(weights[5], 0.04639162, places=4) self.assertEqual(results[0]['VMAF_scores_exps']['feature_names'], expected_feature_names) weights = np.mean(results[1]['VMAF_scores_exps']['feature_weights'], axis=0) self.assertAlmostEqual(weights[0], 0.77096087, places=4) self.assertAlmostEqual(weights[1], 0.01491754, places=4) self.assertAlmostEqual(weights[2], -0.08025557, places=4) self.assertAlmostEqual(weights[3], 0.2511188, places=4) self.assertAlmostEqual(weights[4], 0.14953561, places=4) self.assertAlmostEqual(weights[5], 0.07960753, places=4) self.assertEqual(results[1]['VMAF_scores_exps']['feature_names'], expected_feature_names)
def test_explain_train_test_model(self): model_class = MomentRandomForestTrainTestModel xys = model_class.get_xys_from_results(self.features[:7]) del xys['dis_u'] del xys['dis_v'] model = model_class({'norm_type':'normalize', 'random_state':0}) model.train(xys) np.random.seed(0) xs = model_class.get_xs_from_results(self.features[7:]) del xs['dis_u'] del xs['dis_v'] explainer = LocalExplainer(neighbor_samples=1000) exps = explainer.explain(model, xs) self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4) self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4) self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4) self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4) self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4) self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4) self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4) self.assertEqual(exps['feature_names'], ['dis_y'])
def _run_on_asset(self, asset): # Override VmafQualityRunner._run_on_asset(self, asset), by adding # additional local explanation info. vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] model = self._load_model(asset) xs = model.get_per_unit_xs_from_a_result(feature_result) ys_pred = self.predict_with_model(model, xs) if self.optional_dict2 is not None and \ 'explainer' in self.optional_dict2: explainer = self.optional_dict2['explainer'] else: explainer = LocalExplainer() exps = explainer.explain(model, xs) result_dict = {} result_dict.update(feature_result.result_dict) # add feature result result_dict[self.get_scores_key()] = ys_pred # add quality score result_dict[self.get_explanations_key()] = exps # add local explanations return Result(asset, self.executor_id, result_dict)
def _run_on_asset(self, asset): # Override VmafQualityRunner._run_on_asset(self, asset), by adding # additional local explanation info. vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset) vmaf_fassembler.run() feature_result = vmaf_fassembler.results[0] model = self._load_model(asset) xs = model.get_per_unit_xs_from_a_result(feature_result) ys_pred = self.predict_with_model(model, xs) if self.optional_dict2 is not None and \ 'explainer' in self.optional_dict2: explainer = self.optional_dict2['explainer'] else: explainer = LocalExplainer() exps = explainer.explain(model, xs) result_dict = {} result_dict.update(feature_result.result_dict) # add feature result result_dict[self.get_scores_key()] = ys_pred # add quality score result_dict[ self.get_explanations_key()] = exps # add local explanations return Result(asset, self.executor_id, result_dict)
def explain_model_on_dataset(model, test_assets_selected_indexs, test_dataset_filepath): def print_assets(test_assets): print '\n'.join( map( lambda (i, asset): "Asset {i}: {name}".format( i=i, name=get_file_name_without_extension(asset.dis_path)), enumerate(test_assets))) test_dataset = import_python_file(test_dataset_filepath) test_assets = read_dataset(test_dataset) print_assets(test_assets) print "Assets selected for local explanation: {}".format( test_assets_selected_indexs) result_store = FileSystemResultStore() test_assets = [test_assets[i] for i in test_assets_selected_indexs] test_fassembler = FeatureAssembler( feature_dict=model.model_dict['feature_dict'], feature_option_dict=None, assets=test_assets, logger=None, fifo_mode=True, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=True, ) test_fassembler.run() test_feature_results = test_fassembler.results test_xs = model.get_xs_from_results(test_feature_results) test_ys = model.get_ys_from_results(test_feature_results) test_ys_pred = model.predict(test_xs) explainer = LocalExplainer(neighbor_samples=1000) test_exps = explainer.explain(model, test_xs) explainer.print_explanations(test_exps, assets=test_assets, ys=test_ys, ys_pred=test_ys_pred) explainer.plot_explanations(test_exps, assets=test_assets, ys=test_ys, ys_pred=test_ys_pred) plt.show()
def explain_model_on_dataset(model, test_assets_selected_indexs, test_dataset_filepath): def print_assets(test_assets): print "\n".join( map( lambda (i, asset): "Asset {i}: {name}".format( i=i, name=get_file_name_without_extension(asset.dis_path) ), enumerate(test_assets), ) ) test_dataset = import_python_file(test_dataset_filepath) test_assets = read_dataset(test_dataset) print_assets(test_assets) print "Assets selected for local explanation: {}".format(test_assets_selected_indexs) result_store = FileSystemResultStore() test_assets = [test_assets[i] for i in test_assets_selected_indexs] test_fassembler = FeatureAssembler( feature_dict=model.model_dict["feature_dict"], feature_option_dict=None, assets=test_assets, logger=None, fifo_mode=True, delete_workdir=True, result_store=result_store, optional_dict=None, optional_dict2=None, parallelize=True, ) test_fassembler.run() test_feature_results = test_fassembler.results test_xs = model.get_xs_from_results(test_feature_results) test_ys = model.get_ys_from_results(test_feature_results) test_ys_pred = model.predict(test_xs) explainer = LocalExplainer(neighbor_samples=1000) test_exps = explainer.explain(model, test_xs) explainer.print_explanations(test_exps, assets=test_assets, ys=test_ys, ys_pred=test_ys_pred) explainer.plot_explanations(test_exps, assets=test_assets, ys=test_ys, ys_pred=test_ys_pred) plt.show()