def setUp(self): train_dataset_path = VmafConfig.test_resource_path( 'test_image_dataset_diffdim.py') train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) self.h5py_filepath = VmafConfig.workdir_path('test.hdf5') self.h5py_file = DisYUVRawVideoExtractor.open_h5py_file( self.h5py_filepath) optional_dict2 = {'h5py_file': self.h5py_file} _, self.features = run_executors_in_parallel( DisYUVRawVideoExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize= False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor result_store=None, optional_dict=None, optional_dict2=optional_dict2, ) self.model_filename = VmafConfig.workspace_path( "model", "test_save_load.pkl")
def setUp(self): train_dataset_path = VmafConfig.test_resource_path( 'test_image_dataset.py') train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) param = empty_object() param.model_type = "NIQE" param.model_param_dict = { 'patch_size': 96, } self.param = param optional_dict = {'mode': 'train'} _, self.features = run_executors_in_parallel( NiqeNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=optional_dict, optional_dict2=None, ) self.model_filename = VmafConfig.workspace_path( 'model', 'test_save_load.pkl')
def test_run_parallel_brisque_noref_fextractor(self): print 'test on running BRISQUE noref feature extractor on NorefAssets in parallel...' ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = NorefAsset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), dis_path=dis_path, asset_dict={'width':576, 'height':324}) asset_original = NorefAsset(dataset="test", content_id=0, asset_id=1, workdir_root=VmafConfig.workdir_path(), dis_path=ref_path, asset_dict={'width':576, 'height':324}) self.fextractors, results = run_executors_in_parallel( BrisqueNorefFeatureExtractor, [asset, asset_original], fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, ) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha23_score'], 0.78020833333333384, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha13_score'], 0.6322500000000002, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_N34_score'], -0.0071207420215536723, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha23_score'], 0.87156250000000046, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha13_score'], 0.82906250000000103, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_N34_score'], -0.0092448158862212092, places=4)
def test_run_parallel_dis_y_fextractor(self): print 'test on running dis YUV raw video extractor in parallel (disabled)...' ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = Asset(dataset="test", content_id=0, asset_id=1, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324 }) asset_original = Asset(dataset="test", content_id=0, asset_id=2, workdir_root=VmafConfig.workdir_path(), ref_path=ref_path, dis_path=ref_path, asset_dict={ 'width': 576, 'height': 324 }) h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath) optional_dict2 = {'h5py_file': h5py_file} self.fextractors, results = run_executors_in_parallel( DisYUVRawVideoExtractor, [asset, asset_original], fifo_mode=True, delete_workdir=True, parallelize=False, # Can't run parallel: can't pickle FileID objects result_store=None, optional_dict={'channels': 'yu'}, optional_dict2=optional_dict2) self.assertAlmostEqual(np.mean(results[0]['dis_y']), 61.332006579182384, places=4) self.assertAlmostEquals(np.mean(results[1]['dis_y']), 59.788567297525148, places=4) self.assertAlmostEqual(np.mean(results[0]['dis_u']), 115.23227407335962, places=4) self.assertAlmostEquals(np.mean(results[1]['dis_u']), 114.49701717535437, places=4) with self.assertRaises(KeyError): np.mean(results[0]['dis_v']) DisYUVRawVideoExtractor.close_h5py_file(h5py_file)
def setUp(self): train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py' train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) _, self.features = run_executors_in_parallel( MomentNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=None, optional_dict2=None, )
def test_run_parallel_moment_noref_fextractor(self): print 'test on running Moment noref feature extractor on NorefAssets in parallel...' ref_path = VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv") dis_path = VmafConfig.test_resource_path("yuv", "src01_hrc01_576x324.yuv") asset = NorefAsset(dataset="test", content_id=0, asset_id=0, workdir_root=VmafConfig.workdir_path(), dis_path=dis_path, asset_dict={ 'width': 576, 'height': 324 }) asset_original = NorefAsset(dataset="test", content_id=0, asset_id=1, workdir_root=VmafConfig.workdir_path(), dis_path=ref_path, asset_dict={ 'width': 576, 'height': 324 }) self.fextractors, results = run_executors_in_parallel( MomentNorefFeatureExtractor, [asset, asset_original], fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, ) self.assertAlmostEqual(results[0]['Moment_noref_feature_1st_score'], 61.332006624999984) self.assertAlmostEqual(results[0]['Moment_noref_feature_2nd_score'], 4798.659574041666) self.assertAlmostEqual(results[0]['Moment_noref_feature_var_score'], 1036.8371843488285) self.assertAlmostEqual(results[1]['Moment_noref_feature_1st_score'], 59.788567297525134) self.assertAlmostEqual(results[1]['Moment_noref_feature_2nd_score'], 4696.668388042271) self.assertAlmostEqual(results[1]['Moment_noref_feature_var_score'], 1121.519917231207)
def setUp(self): train_dataset_path = VmafConfig.test_resource_path( 'test_image_dataset_diffdim.py') train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) _, self.features = run_executors_in_parallel( MomentNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=None, optional_dict2=None, ) self.model_filename = VmafConfig.model_path("test_save_load.pkl")
def run(self): """ Do all the calculation here. :return: """ # for each FeatureExtractor_type key in feature_dict, find the subclass # of FeatureExtractor, run, and put results in a dict for fextractor_type in self.feature_dict: # fextractor = self._get_fextractor_instance(fextractor_type) # fextractor.run() # results = fextractor.results fextractor_class = FeatureExtractor.find_subclass(fextractor_type) _, results = run_executors_in_parallel( fextractor_class, assets=self.assets, fifo_mode=self.fifo_mode, delete_workdir=self.delete_workdir, parallelize=self.parallelize, result_store=self.result_store, optional_dict=self.optional_dict, optional_dict2=self.optional_dict2, ) self.type2results_dict[fextractor_type] = results # assemble an output dict with demanded atom features # atom_features_dict = self.fextractor_atom_features_dict result_dicts = map(lambda x: dict(), self.assets) for fextractor_type in self.feature_dict: assert fextractor_type in self.type2results_dict for atom_feature in self._get_atom_features(fextractor_type): scores_key = self._get_scores_key(fextractor_type, atom_feature) for result_index, result in enumerate( self.type2results_dict[fextractor_type]): result_dicts[result_index][scores_key] = result[scores_key] self.results = map( lambda (asset, result_dict): BasicResult(asset, result_dict), zip(self.assets, result_dicts))
def test_run_parallel_brisque_noref_fextractor(self): ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.fextractors, results = run_executors_in_parallel( BrisqueNorefFeatureExtractor, [asset, asset_original], fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, ) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha23_score'], 0.7640625000000005, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha13_score'], 0.6322500000000002, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_N34_score'], -0.007239876204980851, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha23_score'], 0.8644583333333339, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha13_score'], 0.82906250000000103, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_N34_score'], -0.0092448158862212092, places=4)
def test_run_parallel_brisque_noref_fextractor(self): print 'test on running BRISQUE noref feature extractor on NorefAssets in parallel...' ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing() self.fextractors, results = run_executors_in_parallel( BrisqueNorefFeatureExtractor, [asset, asset_original], fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, ) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha23_score'], 0.78020833333333384, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_alpha13_score'], 0.6322500000000002, places=4) self.assertAlmostEqual(results[0]['BRISQUE_noref_feature_N34_score'], -0.0071207420215536723, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha23_score'], 0.87156250000000046, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_alpha13_score'], 0.82906250000000103, places=4) self.assertAlmostEqual(results[1]['BRISQUE_noref_feature_N34_score'], -0.0092448158862212092, places=4)
def setUp(self): train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py' train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) self.h5py_filepath = config.ROOT + '/workspace/workdir/test.hdf5' self.h5py_file = DisYUVRawVideoExtractor.open_h5py_file( self.h5py_filepath) optional_dict2 = {'h5py_file': self.h5py_file} _, self.features = run_executors_in_parallel( DisYUVRawVideoExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize= False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor result_store=None, optional_dict=None, optional_dict2=optional_dict2, )
# shuffle assets np.random.seed(seed) np.random.shuffle(assets) assets = assets[:(num_train + num_test)] raw_video_h5py_filepath = config.ROOT + '/workspace/workdir/rawvideo.hdf5' raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(raw_video_h5py_filepath) print '======================== Extract raw YUVs ==============================' _, raw_yuvs = run_executors_in_parallel( DisYUVRawVideoExtractor, assets, fifo_mode=True, delete_workdir=True, parallelize=False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor result_store=None, optional_dict=None, optional_dict2={'h5py_file': raw_video_h5py_file}) patch_h5py_filepath = config.ROOT + '/workspace/workdir/patch.hdf5' patch_h5py_file = ToddNoiseClassifierTrainTestModel.open_h5py_file(patch_h5py_filepath) model = ToddNoiseClassifierTrainTestModel( param_dict={ 'seed': seed, 'n_epochs': n_epochs, }, logger=None, optional_dict2={ # for options that won't impact the result # 'checkpoints_dir': config.ROOT + '/workspace/checkpoints_dir',
def main(): # parameters num_train = 500 num_test = 50 n_epochs = 30 seed = 0 # None # read input dataset dataset_path = VmafConfig.resource_path('dataset', 'BSDS500_noisy_dataset.py') dataset = import_python_file(dataset_path) assets = read_dataset(dataset) # shuffle assets np.random.seed(seed) np.random.shuffle(assets) assets = assets[:(num_train + num_test)] raw_video_h5py_filepath = VmafConfig.workdir_path('rawvideo.hdf5') raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file( raw_video_h5py_filepath) print( '======================== Extract raw YUVs ==============================' ) _, raw_yuvs = run_executors_in_parallel( DisYUVRawVideoExtractor, assets, fifo_mode=True, delete_workdir=True, parallelize=False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor result_store=None, optional_dict=None, optional_dict2={'h5py_file': raw_video_h5py_file}) patch_h5py_filepath = VmafConfig.workdir_path('patch.hdf5') patch_h5py_file = ToddNoiseClassifierTrainTestModel.open_h5py_file( patch_h5py_filepath) model = ToddNoiseClassifierTrainTestModel( param_dict={ 'seed': seed, 'n_epochs': n_epochs, }, logger=None, optional_dict2={ # for options that won't impact the result # 'checkpoints_dir': VmafConfig.workspace_path('checkpoints_dir'), 'h5py_file': patch_h5py_file, }) print( '============================ Train model ===============================' ) xys = ToddNoiseClassifierTrainTestModel.get_xys_from_results( raw_yuvs[:num_train]) model.train(xys) print( '=========================== Evaluate model =============================' ) xs = ToddNoiseClassifierTrainTestModel.get_xs_from_results( raw_yuvs[num_train:]) ys = ToddNoiseClassifierTrainTestModel.get_ys_from_results( raw_yuvs[num_train:]) result = model.evaluate(xs, ys) print("") print("f1 test %g, errorrate test %g" % (result['f1'], result['errorrate'])) # tear down DisYUVRawVideoExtractor.close_h5py_file(raw_video_h5py_file) ToddNoiseClassifierTrainTestModel.close_h5py_file(patch_h5py_file) os.remove(raw_video_h5py_filepath) os.remove(patch_h5py_filepath) print('Done.')
def test_explain_train_test_model(self): model_class = SklearnRandomForestTrainTestModel train_dataset_path = config.ROOT + '/python/test/resource/' \ 'test_image_dataset_diffdim.py' train_dataset = import_python_file(train_dataset_path) train_assets = read_dataset(train_dataset) _, self.features = run_executors_in_parallel( MomentNorefFeatureExtractor, train_assets, fifo_mode=True, delete_workdir=True, parallelize=True, result_store=None, optional_dict=None, optional_dict2=None, ) xys = model_class.get_xys_from_results(self.features[:7]) model = model_class({ 'norm_type': 'normalize', 'random_state': 0 }, None) model.train(xys) np.random.seed(0) xs = model_class.get_xs_from_results(self.features[7:]) explainer = LocalExplainer(neighbor_samples=1000) exps = explainer.explain(model, xs) self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4) self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4) self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4) self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4) self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4) self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4) self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4) self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4) self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4) self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4) self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4) self.assertEqual(exps['feature_names'], [ 'Moment_noref_feature_1st_score', 'Moment_noref_feature_2nd_score', 'Moment_noref_feature_var_score' ])
# shuffle assets np.random.seed(seed) np.random.shuffle(assets) assets = assets[:(num_train + num_test)] raw_video_h5py_filepath = VmafConfig.workdir_path('rawvideo.hdf5') raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(raw_video_h5py_filepath) print '======================== Extract raw YUVs ==============================' _, raw_yuvs = run_executors_in_parallel( DisYUVRawVideoExtractor, assets, fifo_mode=True, delete_workdir=True, parallelize=False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor result_store=None, optional_dict=None, optional_dict2={'h5py_file': raw_video_h5py_file}) patch_h5py_filepath = VmafConfig.workdir_path('patch.hdf5') patch_h5py_file = ToddNoiseClassifierTrainTestModel.open_h5py_file(patch_h5py_filepath) model = ToddNoiseClassifierTrainTestModel( param_dict={ 'seed': seed, 'n_epochs': n_epochs, }, logger=None, optional_dict2={ # for options that won't impact the result # 'checkpoints_dir': VmafConfig.workspace_path('checkpoints_dir'),