Esempio n. 1
0
    def test_train_test_on_raw_dataset_with_dis1st_thr(self):
        train_dataset = import_python_file(
            VmafConfig.test_resource_path('raw_dataset_sample.py'))
        model_param = import_python_file(
            VmafConfig.test_resource_path('model_param_sample.py'))
        feature_param = import_python_file(
            VmafConfig.test_resource_path('feature_param_sample.py'))

        train_fassembler, train_assets, train_stats, test_fassembler, test_assets, test_stats, _ = train_test_vmaf_on_dataset(
            train_dataset=train_dataset,
            test_dataset=train_dataset,
            feature_param=feature_param,
            model_param=model_param,
            train_ax=None,
            test_ax=None,
            result_store=None,
            parallelize=True,
            logger=None,
            fifo_mode=True,
            output_model_filepath=self.output_model_filepath
        )

        self.train_fassembler = train_fassembler
        self.assertTrue(os.path.exists(self.output_model_filepath))
        self.assertAlmostEqual(train_stats['ys_label_pred'][0], 93.565459224020742, places=3)
        self.assertAlmostEqual(test_stats['ys_label_pred'][0], 93.565459224020742, places=3)
Esempio n. 2
0
 def test_persubject_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = PerSubjectModel(dataset_reader)
     subjective_model.run_modeling(transform_final={'p1':25, 'p0':-25})
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 100.0, places=4)
Esempio n. 3
0
 def test_mos_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Esempio n. 4
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Esempio n. 5
0
    def test_read_dataset_mixed(self):
        dataset_path = VmafConfig.test_resource_path('test_dataset_mixed.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEquals(len(assets), 4)

        self.assertEqual(assets[0].resampling_type, 'bilinear')
        self.assertEqual(assets[0].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[0].dis_yuv_type, 'yuv420p')
        self.assertEqual(assets[0].ref_width_height, (1920, 1080))
        self.assertEqual(assets[0].dis_width_height, (1920, 1080))

        self.assertEqual(assets[1].resampling_type, 'bilinear')
        self.assertEqual(assets[1].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[1].dis_yuv_type, 'notyuv')
        self.assertEqual(assets[1].ref_width_height, (1920, 1080))
        self.assertEqual(assets[1].dis_width_height, None)

        self.assertEqual(assets[2].resampling_type, 'bilinear')
        self.assertEqual(assets[2].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[2].dis_yuv_type, 'yuv420p')
        self.assertEqual(assets[2].ref_width_height, (720, 480))
        self.assertEqual(assets[2].dis_width_height, (720, 480))

        self.assertEqual(assets[3].resampling_type, 'bilinear')
        self.assertEqual(assets[3].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[3].dis_yuv_type, 'notyuv')
        self.assertEqual(assets[3].ref_width_height, (720, 480))
        self.assertEqual(assets[3].dis_width_height, None)
Esempio n. 6
0
def main():
    if len(sys.argv) < 3:
        print_usage()
        return 2

    try:
        quality_type = sys.argv[1]
        dataset_filepath = sys.argv[2]
    except ValueError:
        print_usage()
        return 2

    try:
        dataset = import_python_file(dataset_filepath)
    except Exception as e:
        print "Error: " + str(e)
        return 1

    try:
        runner_class = QualityRunner.find_subclass(quality_type)
    except:
        print_usage()
        return 2

    result_store = FileSystemResultStore()

    run_remove_results_for_dataset(result_store, dataset, runner_class)

    return 0
Esempio n. 7
0
 def test_generate_dataset_from_raw_default(self):  # DMOS
     generate_dataset_from_raw(
         raw_dataset_filepath=self.raw_dataset_filepath,
         output_dataset_filepath=self.derived_dataset_path)
     dataset = import_python_file(self.derived_dataset_path)
     self.assertAlmostEqual(dataset.dis_videos[0]['groundtruth'],
                            1.42307692308,
                            places=4)
Esempio n. 8
0
    def from_dataset_file(cls, dataset_filepath, content_ids=None):
        dataset = import_python_file(dataset_filepath)

        if content_ids is not None:
            dataset.dis_videos = filter(lambda dis_video: dis_video['content_id'] in content_ids, dataset.dis_videos)

        dataset_reader = RawDatasetReader(dataset)
        return cls(dataset_reader)
Esempio n. 9
0
    def test_read_dataset_fps_rebuf_indices(self):
        train_dataset_path = VmafConfig.test_resource_path(
            'test_dataset_fps_rebufinds.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertTrue('fps' in train_assets[0].asset_dict.keys())
        self.assertTrue('rebuf_indices' in train_assets[0].asset_dict.keys())
Esempio n. 10
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(
         self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'],
                             4.884615384615385,
                             places=4)
Esempio n. 11
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Esempio n. 12
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 21.769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
Esempio n. 13
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Esempio n. 14
0
    def test_read_dataset_duration_sec(self):
        train_dataset_path = VmafConfig.test_resource_path('example_dataset_crop_pad_duration_sec.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEqual(len(train_assets), 3)
        self.assertEqual(train_assets[0].asset_dict['duration_sec'], 5.0)
        self.assertEqual(train_assets[1].asset_dict['duration_sec'], 5.0)
        self.assertEqual(train_assets[2].asset_dict['duration_sec'], 5.0)
Esempio n. 15
0
    def test_read_dataset_crop_and_pad(self):
        train_dataset_path = VmafConfig.test_resource_path('example_dataset_crop_pad.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 3)
        self.assertEquals(str(train_assets[0]), "example_0_1_src01_hrc00_576x324_576x324_vs_src01_hrc01_576x324_576x324_q_576x324_crop288:162:144:81")
        self.assertEquals(str(train_assets[1]), "example_0_2_src01_hrc00_576x324_576x324_vs_src01_hrc01_576x324_576x324_q_576x324_padiw+100:ih+100:50:50")
        self.assertEquals(str(train_assets[2]), "example_0_3_src01_hrc00_576x324_576x324_vs_src01_hrc01_576x324_576x324_q_576x324_crop288:162:144:81_padiw+288:ih+162:144:81")
Esempio n. 16
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
Esempio n. 17
0
    def test_read_dataset_crop_and_pad(self):
        train_dataset_path = VmafConfig.test_resource_path('example_dataset_crop_pad.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEqual(len(train_assets), 3)
        self.assertEqual(str(train_assets[0]), "example_0_1_src01_hrc00_576x324_576x324_crop288:162:144:81_vs_src01_hrc01_576x324_576x324_crop288:162:144:81_q_576x324")
        self.assertEqual(str(train_assets[1]), "example_0_2_src01_hrc00_576x324_576x324_padiw+100:ih+100:50:50_vs_src01_hrc01_576x324_576x324_padiw+100:ih+100:50:50_q_576x324")
        self.assertEqual(str(train_assets[2]), "example_0_3_src01_hrc00_576x324_576x324_crop288:162:144:81_padiw+288:ih+162:144:81_vs_src01_hrc01_576x324_576x324_crop288:162:144:81_padiw+288:ih+162:144:81_q_576x324")
Esempio n. 18
0
    def test_read_image_dataset_notyuv(self):
        dataset_path = VmafConfig.test_resource_path('test_image_dataset_notyuv.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEquals(len(assets), 4)
        self.assertTrue(assets[0].ref_width_height is None)
        self.assertTrue(assets[0].dis_width_height is None)
        self.assertEquals(assets[0].quality_width_height, (1920, 1080))
Esempio n. 19
0
 def test_generate_dataset_from_raw_mos(self):
     generate_dataset_from_raw(
         raw_dataset_filepath=self.raw_dataset_filepath,
         output_dataset_filepath=self.derived_dataset_path,
         subj_model_class=MosModel)
     dataset = import_python_file(self.derived_dataset_path)
     self.assertAlmostEqual(dataset.dis_videos[0]['groundtruth'],
                            1.3076923076923077,
                            places=4)
Esempio n. 20
0
    def test_explain_train_test_model(self):

        model_class = SklearnRandomForestTrainTestModel

        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        fextractor = MomentNorefFeatureExtractor(
            train_assets,
            None,
            fifo_mode=True,
            delete_workdir=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )
        fextractor.run(parallelize=True)
        self.features = fextractor.results

        xys = model_class.get_xys_from_results(self.features[:7])
        model = model_class({'norm_type':'normalize', 'random_state':0}, None)
        model.train(xys)

        np.random.seed(0)

        xs = model_class.get_xs_from_results(self.features[7:])
        explainer = LocalExplainer(neighbor_samples=1000)
        exps = explainer.explain(model, xs)

        self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)

        self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)
        self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)
        self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)
        self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)
        self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)
        self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)

        self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)

        self.assertEqual(exps['feature_names'],
                         ['Moment_noref_feature_1st_score',
                          'Moment_noref_feature_2nd_score',
                          'Moment_noref_feature_var_score']
                         )
Esempio n. 21
0
    def test_explain_train_test_model(self):

        model_class = SklearnRandomForestTrainTestModel

        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        fextractor = MomentNorefFeatureExtractor(
            train_assets,
            None,
            fifo_mode=True,
            delete_workdir=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )
        fextractor.run(parallelize=True)
        self.features = fextractor.results

        xys = model_class.get_xys_from_results(self.features[:7])
        model = model_class({'norm_type':'normalize', 'random_state':0}, None)
        model.train(xys)

        np.random.seed(0)

        xs = model_class.get_xs_from_results(self.features[7:])
        explainer = LocalExplainer(neighbor_samples=1000)
        exps = explainer.explain(model, xs)

        self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)

        self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)
        self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)
        self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)
        self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)
        self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)
        self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)

        self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)

        self.assertEqual(exps['feature_names'],
                         ['Moment_noref_feature_1st_score',
                          'Moment_noref_feature_2nd_score',
                          'Moment_noref_feature_var_score']
                         )
Esempio n. 22
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Esempio n. 23
0
    def test_read_image_dataset_notyuv(self):
        dataset_path = VmafConfig.test_resource_path(
            'test_image_dataset_notyuv.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEquals(len(assets), 4)
        self.assertTrue(assets[0].ref_width_height is None)
        self.assertTrue(assets[0].dis_width_height is None)
        self.assertEquals(assets[0].quality_width_height, (1920, 1080))
Esempio n. 24
0
    def test_test_on_dataset_split_test_indices_for_perf_ci(self):
        from vmaf.routine import run_test_on_dataset
        test_dataset = import_python_file(VmafConfig.test_resource_path('dataset_sample.py'))
        test_assets, results = run_test_on_dataset(test_dataset, VmafQualityRunner, None,
                                                   None, None, parallelize=False,
                                                   aggregate_method=None,
                                                   split_test_indices_for_perf_ci=True,
                                                   n_splits_test_indices=10)

        self.assertAlmostEqual(results[0]['VMAF_score'], 99.142659046424384, places=4)
Esempio n. 25
0
    def from_dataset_file(cls, dataset_filepath, content_ids=None):
        dataset = import_python_file(dataset_filepath)

        if content_ids is not None:
            dataset.dis_videos = filter(
                lambda dis_video: dis_video['content_id'] in content_ids,
                dataset.dis_videos)

        dataset_reader = RawDatasetReader(dataset)
        return cls(dataset_reader)
Esempio n. 26
0
def run_vmaf_cv(train_dataset_filepath,
                test_dataset_filepath,
                param_filepath,
                output_model_filepath=None,
                **kwargs):

    result_store_dir = kwargs['result_store_dir'] if 'result_store_dir' in kwargs else VmafConfig.file_result_store_path()

    logger = get_stdout_logger()
    result_store = FileSystemResultStore(result_store_dir)

    train_dataset = import_python_file(train_dataset_filepath)
    test_dataset = import_python_file(test_dataset_filepath) if test_dataset_filepath is not None else None

    param = import_python_file(param_filepath)

    # === plot scatter ===

    nrows = 1
    ncols = 2
    fig, axs = plt.subplots(figsize=(5*ncols, 5*nrows), nrows=nrows, ncols=ncols)

    train_test_vmaf_on_dataset(train_dataset, test_dataset, param, param, axs[0], axs[1],
                               result_store, parallelize=True, logger=None,
                               output_model_filepath=output_model_filepath,
                               **kwargs)

    if 'xlim' in kwargs:
        axs[0].set_xlim(kwargs['xlim'])
        axs[1].set_xlim(kwargs['xlim'])

    if 'ylim' in kwargs:
        axs[0].set_ylim(kwargs['ylim'])
        axs[1].set_ylim(kwargs['ylim'])

    bbox = {'facecolor':'white', 'alpha':1, 'pad':20}
    axs[0].annotate('Training Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)
    axs[1].annotate('Testing Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)

    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Esempio n. 27
0
    def test_train_test_on_dataset_with_dis1st_thr_with_feature_optional_dict_good(self):
        from vmaf.routine import train_test_vmaf_on_dataset
        train_dataset = import_python_file(
            VmafConfig.test_resource_path('dataset_sample.py'))
        model_param = import_python_file(
            VmafConfig.test_resource_path('model_param_sample.py'))
        feature_param = import_python_file(
            VmafConfig.test_resource_path('feature_param_sample_with_optional_dict_good.py'))

        train_fassembler, train_assets, train_stats, test_fassembler, test_assets, test_stats, _ = train_test_vmaf_on_dataset(
            train_dataset=train_dataset,
            test_dataset=train_dataset,
            feature_param=feature_param,
            model_param=model_param,
            train_ax=None,
            test_ax=None,
            result_store=None,
            parallelize=True,
            logger=None,
            fifo_mode=True,
            output_model_filepath=self.output_model_filepath,
        )

        self.train_fassembler = train_fassembler
        self.assertTrue(os.path.exists(self.output_model_filepath))
        self.assertAlmostEqual(train_stats['ys_label_pred'][0], 90.753010402770798, places=3)
        self.assertAlmostEqual(test_stats['ys_label_pred'][0], 90.753010402770798, places=3)

        runner = VmafQualityRunner(
            train_assets,
            None, fifo_mode=True,
            delete_workdir=True,
            result_store=None,
            optional_dict={'model_filepath': self.output_model_filepath}
        )
        runner.run(parallelize=True)
        results = runner.results

        self.assertAlmostEqual(results[0]['VMAF_score'], 89.55494473011981, places=4)
        self.assertAlmostEqual(results[1]['VMAF_score'], 61.01289549048653, places=4)
        self.assertAlmostEqual(results[2]['VMAF_score'], 90.75301241304798, places=4)
        self.assertAlmostEqual(results[3]['VMAF_score'], 89.27013895870179, places=4)
Esempio n. 28
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = CorruptSubjectRawDatasetReader(
            dataset, input_dict=info_dict)
Esempio n. 29
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'corrupt_probability': 0.1,
        }

        self.dataset_reader = CorruptDataRawDatasetReader(dataset,
                                                          input_dict=info_dict)
Esempio n. 30
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                     input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Esempio n. 31
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
     scores_std = result['quality_scores_std']
     self.assertAlmostEquals(np.mean(scores_std), 0.12986637295658307, places=4)
Esempio n. 32
0
def run_vmaf_cv(train_dataset_filepath,
                test_dataset_filepath,
                param_filepath,
                output_model_filepath=None,
                **kwargs):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()

    train_dataset = import_python_file(train_dataset_filepath)
    test_dataset = import_python_file(test_dataset_filepath) if test_dataset_filepath is not None else None

    param = import_python_file(param_filepath)

    # === plot scatter ===

    nrows = 1
    ncols = 2
    fig, axs = plt.subplots(figsize=(5*ncols, 5*nrows), nrows=nrows, ncols=ncols)

    train_test_vmaf_on_dataset(train_dataset, test_dataset, param, param, axs[0], axs[1],
                               result_store, parallelize=True, logger=None,
                               output_model_filepath=output_model_filepath,
                               **kwargs)

    if 'xlim' in kwargs:
        axs[0].set_xlim(kwargs['xlim'])
        axs[1].set_xlim(kwargs['xlim'])

    if 'ylim' in kwargs:
        axs[0].set_ylim(kwargs['ylim'])
        axs[1].set_ylim(kwargs['ylim'])

    bbox = {'facecolor':'white', 'alpha':1, 'pad':20}
    axs[0].annotate('Training Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)
    axs[1].annotate('Testing Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)

    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Esempio n. 33
0
 def _import_dataset_and_filter(cls, dataset_filepath, content_ids,
                                asset_ids):
     dataset = import_python_file(dataset_filepath)
     if content_ids is not None:
         dataset.dis_videos = filter(
             lambda dis_video: dis_video['content_id'] in content_ids,
             dataset.dis_videos)
     if asset_ids is not None:
         dataset.dis_videos = filter(
             lambda dis_video: dis_video['asset_id'] in asset_ids,
             dataset.dis_videos)
     return dataset
Esempio n. 34
0
 def test_mos_subjective_model_output2(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     dataset2 = subjective_model.to_aggregated_dataset()
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'],
                             4.884615384615385,
                             places=4)
Esempio n. 35
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={
         'p1': 10,
         'p0': 1
     })
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 21.769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
Esempio n. 36
0
    def setUp(self):
        dataset_filepath = VmafConfig.test_resource_path(
            'NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = SelectSubjectRawDatasetReader(
            dataset, input_dict=info_dict)
Esempio n. 37
0
    def setUp(self):
        dataset_filepath = VmafConfig.test_resource_path(
            'NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'corrupt_probability': 0.1,
        }

        self.dataset_reader = CorruptDataRawDatasetReader(dataset,
                                                          input_dict=info_dict)
Esempio n. 38
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Esempio n. 39
0
    def test_zscoresubjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = ZscoringSubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66405245792414114, places=4) # 1.4012220200639218
Esempio n. 40
0
 def test_import_python_file(self):
     ds = import_python_file(
         VmafConfig.test_resource_path('example_raw_dataset2.py'))
     self.assertEqual(ds.dataset_name, 'example')
     self.assertEqual(ds.yuv_fmt, 'yuv420p')
     self.assertEqual(ds.ref_videos[0]['content_id'], 0)
     self.assertEqual(ds.ref_videos[0]['path'],
                      '/dir/yuv/checkerboard_1920_1080_10_3_0_0.yuv')
     self.assertEqual(ds.dis_videos[3]['content_id'], 1)
     self.assertEqual(ds.dis_videos[3]['asset_id'], 3)
     self.assertEqual(ds.dis_videos[3]['os'], [70, 75, 80, 85, 90])
     self.assertEqual(ds.dis_videos[3]['path'],
                      '/dir/yuv/flat_1920_1080_10.yuv')
Esempio n. 41
0
def run_vmaf_kfold_cv(dataset_filepath,
                      contentid_groups,
                      param_filepath,
                      aggregate_method,
                      ):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()
    dataset = import_python_file(dataset_filepath)
    param = import_python_file(param_filepath)

    fig, ax = plt.subplots(figsize=(5, 5), nrows=1, ncols=1)

    cv_on_dataset(dataset, param, param, ax, result_store, contentid_groups,
                  logger, aggregate_method)

    ax.set_xlim([0, 120])
    ax.set_ylim([0, 120])
    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Esempio n. 42
0
    def test_read_dataset_refdif_start_end_frame(self):
        dataset_path = VmafConfig.test_resource_path('test_read_dataset_dataset3.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEqual(len(assets), 3)

        self.assertEqual(assets[0].ref_start_end_frame, (250, 250))
        self.assertEqual(assets[0].dis_start_end_frame, (250, 250))
        self.assertEqual(assets[1].ref_start_end_frame, (250, 250))
        self.assertEqual(assets[1].dis_start_end_frame, (200, 210))
        self.assertEqual(assets[2].ref_start_end_frame, (250, 250))
        self.assertEqual(assets[2].dis_start_end_frame, (250, 251))
Esempio n. 43
0
    def test_subjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 4.0246673158065542, places=4)
        self.assertAlmostEquals(np.var(scores), 1.0932580358187849, places=4) # 1.4012220200639218
Esempio n. 44
0
    def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True)
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
Esempio n. 45
0
    def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejMosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
        self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
Esempio n. 46
0
def run_vmaf_kfold_cv(dataset_filepath,
                      contentid_groups,
                      param_filepath,
                      aggregate_method,
                      ):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()
    dataset = import_python_file(dataset_filepath)
    param = import_python_file(param_filepath)

    fig, ax = plt.subplots(figsize=(5, 5), nrows=1, ncols=1)

    cv_on_dataset(dataset, param, param, ax, result_store, contentid_groups,
                  logger, aggregate_method)

    ax.set_xlim([0, 120])
    ax.set_ylim([0, 120])
    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Esempio n. 47
0
    def test_mos_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
        self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218
Esempio n. 48
0
    def test_read_dataset_basic(self):
        dataset_path = VmafConfig.test_resource_path('test_dataset.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEqual(len(assets), 4)
        self.assertTrue('groundtruth' in assets[0].asset_dict.keys())
        self.assertTrue('os' not in assets[0].asset_dict.keys())
        self.assertEqual(assets[0].quality_width_height, (1920, 1080))
        self.assertEqual(assets[0].resampling_type, 'bicubic')
        self.assertEqual(assets[0].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[0].dis_yuv_type, 'yuv420p')
        self.assertEqual(assets[1].quality_width_height, (1920, 1080))
        self.assertEqual(assets[1].resampling_type, 'bicubic')
Esempio n. 49
0
    def test_read_dataset_diffyuv(self):
        train_dataset_path = VmafConfig.test_resource_path('test_dataset_diffyuv.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 4)
        self.assertEquals(train_assets[0].ref_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].quality_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_yuv_type, 'yuv420p')
        self.assertEquals(train_assets[2].ref_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_width_height, (1280, 720))
        self.assertEquals(train_assets[2].quality_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_yuv_type, 'yuv420p10le')
Esempio n. 50
0
    def test_read_dataset_qualitywh2(self):
        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim_qualitywh2.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertTrue('quality_width' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('resampling_type' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[1].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[1].asset_dict.keys())
        self.assertTrue('resampling_type' not in train_assets[1].asset_dict.keys())
        self.assertEqual(train_assets[0].asset_dict['quality_width'], 200)
        self.assertEqual(train_assets[0].asset_dict['quality_height'], 100)
        self.assertEqual(train_assets[0].asset_dict['resampling_type'], 'bicubic')
Esempio n. 51
0
    def test_read_dataset(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 9)
        self.assertTrue('groundtruth' in train_assets[0].asset_dict.keys())
        self.assertTrue('os' not in train_assets[0].asset_dict.keys())
        self.assertTrue('width' in train_assets[0].asset_dict.keys())
        self.assertTrue('height' in train_assets[0].asset_dict.keys())
        self.assertTrue(
            'quality_width' not in train_assets[0].asset_dict.keys())
        self.assertTrue(
            'quality_height' not in train_assets[0].asset_dict.keys())
Esempio n. 52
0
    def test_read_dataset_basic(self):
        dataset_path = VmafConfig.test_resource_path('test_dataset.py')
        dataset = import_python_file(dataset_path)
        assets = read_dataset(dataset)

        self.assertEquals(len(assets), 4)
        self.assertTrue('groundtruth' in assets[0].asset_dict.keys())
        self.assertTrue('os' not in assets[0].asset_dict.keys())
        self.assertEqual(assets[0].quality_width_height, (1920, 1080))
        self.assertEqual(assets[0].resampling_type, 'bilinear')
        self.assertEqual(assets[0].ref_yuv_type, 'yuv420p')
        self.assertEqual(assets[0].dis_yuv_type, 'yuv420p')
        self.assertEqual(assets[1].quality_width_height, (1920, 1080))
        self.assertEqual(assets[1].resampling_type, 'bilinear')
Esempio n. 53
0
    def test_read_dataset_diffyuv(self):
        train_dataset_path = VmafConfig.test_resource_path('test_dataset_diffyuv.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 4)
        self.assertEquals(train_assets[0].ref_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].quality_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_yuv_type, 'yuv420p')
        self.assertEquals(train_assets[2].ref_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_width_height, (1280, 720))
        self.assertEquals(train_assets[2].quality_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_yuv_type, 'yuv420p10le')
Esempio n. 54
0
    def test_read_dataset_qualitywh2(self):
        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim_qualitywh2.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertTrue('quality_width' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('resampling_type' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[1].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[1].asset_dict.keys())
        self.assertTrue('resampling_type' not in train_assets[1].asset_dict.keys())
        self.assertEqual(train_assets[0].asset_dict['quality_width'], 200)
        self.assertEqual(train_assets[0].asset_dict['quality_height'], 100)
        self.assertEqual(train_assets[0].asset_dict['resampling_type'], 'bicubic')
Esempio n. 55
0
def explain_model_on_dataset(
    model,
    test_assets_selected_indexs,
    test_dataset_filepath,
    result_store_dir=VmafConfig.file_result_store_path()):
    def print_assets(test_assets):
        print('\n'.join(
            map(
                lambda tasset: "Asset {i}: {name}".format(
                    i=tasset[0],
                    name=get_file_name_without_extension(tasset[1].dis_path)),
                enumerate(test_assets))))

    test_dataset = import_python_file(test_dataset_filepath)
    test_assets = read_dataset(test_dataset)
    print_assets(test_assets)
    print("Assets selected for local explanation: {}".format(
        test_assets_selected_indexs))
    result_store = FileSystemResultStore(result_store_dir)
    test_assets = [test_assets[i] for i in test_assets_selected_indexs]
    test_fassembler = FeatureAssembler(
        feature_dict=model.model_dict['feature_dict'],
        feature_option_dict=None,
        assets=test_assets,
        logger=None,
        fifo_mode=True,
        delete_workdir=True,
        result_store=result_store,
        optional_dict=None,
        optional_dict2=None,
        parallelize=True,
    )
    test_fassembler.run()
    test_feature_results = test_fassembler.results
    test_xs = model.get_xs_from_results(test_feature_results)
    test_ys = model.get_ys_from_results(test_feature_results)
    test_ys_pred = model.predict(test_xs)['ys_label_pred']
    explainer = LocalExplainer(neighbor_samples=1000)
    test_exps = explainer.explain(model, test_xs)

    explainer.print_explanations(test_exps,
                                 assets=test_assets,
                                 ys=test_ys,
                                 ys_pred=test_ys_pred)
    explainer.plot_explanations(test_exps,
                                assets=test_assets,
                                ys=test_ys,
                                ys_pred=test_ys_pred)
    DisplayConfig.show()
Esempio n. 56
0
    def test_read_dataset(self):
        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 9)
        self.assertTrue('groundtruth' in train_assets[0].asset_dict.keys())
        self.assertTrue('os' not in train_assets[0].asset_dict.keys())
        self.assertFalse('width' in train_assets[0].asset_dict.keys())
        self.assertTrue('ref_width' in train_assets[0].asset_dict.keys())
        self.assertTrue('dis_width' in train_assets[0].asset_dict.keys())
        self.assertFalse('height' in train_assets[0].asset_dict.keys())
        self.assertTrue('ref_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('dis_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[0].asset_dict.keys())
Esempio n. 57
0
    def test_test_on_dataset_raw(self):
        test_dataset = import_python_file(
            VmafConfig.test_resource_path('raw_dataset_sample.py'))
        test_assets, results = run_test_on_dataset(test_dataset, VmafQualityRunner, None,
                        None, None,
                        parallelize=True,
                        aggregate_method=None)

        self.assertAlmostEqual(results[0]['VMAF_score'], 99.142659046424384, places=4)
        self.assertAlmostEqual(results[1]['VMAF_score'], 35.066157497128764, places=4)
        self.assertAlmostEqual(results[2]['VMAF_score'], 97.428042675471147, places=4)
        self.assertAlmostEqual(results[3]['VMAF_score'], 97.427927701008869, places=4)
        self.assertAlmostEqual(test_assets[0].groundtruth, 100, places=4)
        self.assertAlmostEqual(test_assets[1].groundtruth, 50, places=4)
        self.assertAlmostEqual(test_assets[2].groundtruth, 100, places=4)
        self.assertAlmostEqual(test_assets[3].groundtruth, 90, places=4)
Esempio n. 58
0
    def setUp(self):

        train_dataset_path = VmafConfig.test_resource_path('test_image_dataset_diffdim.py')
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        runner = MomentNorefFeatureExtractor(
            train_assets,
            None,
            fifo_mode=True,
            delete_workdir=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )
        runner.run(parallelize=True)
        self.features = runner.results
Esempio n. 59
0
    def test_observer_content_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.9104244772977128, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0037713583509767193, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.21903272050455846, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.084353684687185043, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 9.8168943054654481, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028159236075789944, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.05548186797336, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4339487982797514, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 2.63184284168883, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.019164097909450246, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.2263148440748638, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.070613033112114504, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.317917502439435, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.029455722248727296, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.29962156788139, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4717366222424826, places=4)
Esempio n. 60
0
    def setUp(self):
        train_dataset_path = VmafConfig.test_resource_path("test_image_dataset_diffdim.py")
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.h5py_filepath = VmafConfig.workdir_path('test.hdf5')
        self.h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)
        optional_dict2 = {'h5py_file': self.h5py_file}

        fextractor = DisYUVRawVideoExtractor(
            train_assets,
            None,
            fifo_mode=True,
            delete_workdir=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=optional_dict2,
        )
        fextractor.run(parallelize=False) # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor
        self.features = fextractor.results