Пример #1
0
 def test_train_test_on_raw_dataset_with_dis1st_thr(self):
     train_dataset = import_python_file(
         config.ROOT + '/python/test/resource/raw_dataset_sample.py')
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats, _ = \
         train_test_vmaf_on_dataset(
             train_dataset=train_dataset, test_dataset=train_dataset,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath)
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'],
                             [93.565459224020742, 60.451618249440827,
                              93.565460383297108, 92.417462071278933])
     self.assertItemsEqual(test_stats['ys_label_pred'],
                             [93.565459224020742, 60.451618249440827,
                              93.565460383297108, 92.417462071278933])
Пример #2
0
 def test_train_test_on_raw_dataset_with_dis1st_thr(self):
     train_dataset = import_python_file(
         config.ROOT + '/python/test/resource/raw_dataset_sample.py')
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats, _ = \
         train_test_vmaf_on_dataset(
             train_dataset=train_dataset, test_dataset=train_dataset,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath)
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'], [
         93.565459224020742, 60.451618249440827, 93.565460383297108,
         92.417462071278933
     ])
     self.assertItemsEqual(test_stats['ys_label_pred'], [
         93.565459224020742, 60.451618249440827, 93.565460383297108,
         92.417462071278933
     ])
Пример #3
0
 def test_train_test_on_dataset_with_dis1st_thr(self):
     train_dataset = import_python_file(
         config.ROOT + '/python/test/resource/dataset_sample.py')
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats, _ = \
         train_test_vmaf_on_dataset(
             train_dataset=train_dataset, test_dataset=train_dataset,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath,
                      )
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertAlmostEqual(train_stats['ys_label_pred'][0],
                            90.753010402770798,
                            places=3)
     self.assertAlmostEqual(test_stats['ys_label_pred'][0],
                            90.753010402770798,
                            places=3)
Пример #4
0
 def test_train_test_on_dataset_with_dis1st_thr(self):
     train_dataset = import_python_file(
         config.ROOT + '/python/test/resource/dataset_sample.py')
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats, _ = \
         train_test_vmaf_on_dataset(
             train_dataset=train_dataset, test_dataset=train_dataset,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath,
                      )
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'],
                             [90.753010402770798, 59.223801498461015,
                              90.753011435798058, 89.270176556597008])
     self.assertItemsEqual(test_stats['ys_label_pred'],
                             [90.753010402770798, 59.223801498461015,
                              90.753011435798058, 89.270176556597008])
Пример #5
0
def run_vmaf_cv(train_dataset_filepath,
                test_dataset_filepath,
                param_filepath,
                output_model_filepath=None,
                **kwargs):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()

    train_dataset = import_python_file(train_dataset_filepath)
    test_dataset = import_python_file(
        test_dataset_filepath) if test_dataset_filepath is not None else None

    param = import_python_file(param_filepath)

    # === plot scatter ===

    nrows = 1
    ncols = 2
    fig, axs = plt.subplots(figsize=(5 * ncols, 5 * nrows),
                            nrows=nrows,
                            ncols=ncols)

    train_test_vmaf_on_dataset(train_dataset,
                               test_dataset,
                               param,
                               param,
                               axs[0],
                               axs[1],
                               result_store,
                               parallelize=True,
                               logger=None,
                               output_model_filepath=output_model_filepath,
                               **kwargs)

    if 'xlim' in kwargs:
        axs[0].set_xlim(kwargs['xlim'])
        axs[1].set_xlim(kwargs['xlim'])

    if 'ylim' in kwargs:
        axs[0].set_ylim(kwargs['ylim'])
        axs[1].set_ylim(kwargs['ylim'])

    bbox = {'facecolor': 'white', 'alpha': 1, 'pad': 20}
    axs[0].annotate('Training Set',
                    xy=(0.1, 0.85),
                    xycoords='axes fraction',
                    bbox=bbox)
    axs[1].annotate('Testing Set',
                    xy=(0.1, 0.85),
                    xycoords='axes fraction',
                    bbox=bbox)

    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Пример #6
0
 def test_mos_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Пример #7
0
 def test_mos_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Пример #8
0
 def test_persubject_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = PerSubjectModel(dataset_reader)
     subjective_model.run_modeling(transform_final={'p1':25, 'p0':-25})
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 100.0, places=4)
Пример #9
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Пример #10
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
Пример #11
0
    def setUp(self):

        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_noisy.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.raw_video_h5py_filepath = config.ROOT + '/workspace/workdir/rawvideo.hdf5'
        self.raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(
            self.raw_video_h5py_filepath)
        optional_dict2 = {'h5py_file': self.raw_video_h5py_file}

        _, self.features = run_executors_in_parallel(
            DisYUVRawVideoExtractor,
            train_assets,
            fifo_mode=True,
            delete_workdir=True,
            parallelize=
            False,  # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor
            result_store=None,
            optional_dict=None,
            optional_dict2=optional_dict2,
        )

        np.random.seed(0)
        np.random.shuffle(self.features)

        self.patch_h5py_filepath = config.ROOT + '/workspace/workdir/patch.hdf5'

        self.model_filename = config.ROOT + "/workspace/model/test_save_load.pkl"

        NeuralNetTrainTestModel.reset()
Пример #12
0
    def test_test_on_dataset_mle(self):
        test_dataset = import_python_file(
            config.ROOT + '/python/test/resource/raw_dataset_sample.py')
        test_assets, results = test_on_dataset(test_dataset,
                                               VmafQualityRunner,
                                               None,
                                               None,
                                               None,
                                               parallelize=True,
                                               aggregate_method=None,
                                               subj_model_class=MosModel)

        self.assertAlmostEqual(results[0]['VMAF_score'],
                               99.620284242680668,
                               places=4)
        self.assertAlmostEqual(results[1]['VMAF_score'],
                               28.98459923031756,
                               places=4)
        self.assertAlmostEqual(results[2]['VMAF_score'],
                               98.860794831997097,
                               places=4)
        self.assertAlmostEqual(results[3]['VMAF_score'],
                               98.860790186334995,
                               places=4)
        self.assertAlmostEqual(test_assets[0].groundtruth, 100, places=4)
        self.assertAlmostEqual(test_assets[1].groundtruth, 50, places=4)
        self.assertAlmostEqual(test_assets[2].groundtruth, 90, places=4)
        self.assertAlmostEqual(test_assets[3].groundtruth, 80, places=4)
Пример #13
0
def main():
    if len(sys.argv) < 3:
        print_usage()
        return 2

    try:
        quality_type = sys.argv[1]
        dataset_filepath = sys.argv[2]
    except ValueError:
        print_usage()
        return 2

    try:
        dataset = import_python_file(dataset_filepath)
    except Exception as e:
        print "Error: " + str(e)
        return 1

    try:
        runner_class = QualityRunner.find_subclass(quality_type)
    except:
        print_usage()
        return 2

    result_store = FileSystemResultStore()

    run_remove_results_for_dataset(result_store, dataset, runner_class)

    return 0
Пример #14
0
def main():
    if len(sys.argv) < 3:
        print_usage()
        return 2

    try:
        quality_type = sys.argv[1]
        dataset_filepath = sys.argv[2]
    except ValueError:
        print_usage()
        return 2

    try:
        dataset = import_python_file(dataset_filepath)
    except Exception as e:
        print "Error: " + str(e)
        return 1

    try:
        runner_class = QualityRunner.find_subclass(quality_type)
    except:
        print_usage()
        return 2

    result_store = FileSystemResultStore()

    run_remove_results_for_dataset(result_store, dataset, runner_class)

    return 0
Пример #15
0
def run_vmaf_kfold_cv(dataset_filepath, contentid_groups, param_filepath, aggregate_method):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()
    dataset = import_python_file(dataset_filepath)
    param = import_python_file(param_filepath)

    fig, ax = plt.subplots(figsize=(5, 5), nrows=1, ncols=1)

    cv_on_dataset(dataset, param, param, ax, result_store, contentid_groups, logger, aggregate_method)

    ax.set_xlim([0, 120])
    ax.set_ylim([0, 120])
    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Пример #16
0
def run_vmaf_cv(train_dataset_filepath, test_dataset_filepath, param_filepath, output_model_filepath=None, **kwargs):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()

    train_dataset = import_python_file(train_dataset_filepath)
    test_dataset = import_python_file(test_dataset_filepath) if test_dataset_filepath is not None else None

    param = import_python_file(param_filepath)

    # === plot scatter ===

    nrows = 1
    ncols = 2
    fig, axs = plt.subplots(figsize=(5 * ncols, 5 * nrows), nrows=nrows, ncols=ncols)

    train_test_vmaf_on_dataset(
        train_dataset,
        test_dataset,
        param,
        param,
        axs[0],
        axs[1],
        result_store,
        parallelize=True,
        logger=None,
        output_model_filepath=output_model_filepath,
        **kwargs
    )

    if "xlim" in kwargs:
        axs[0].set_xlim(kwargs["xlim"])
        axs[1].set_xlim(kwargs["xlim"])

    if "ylim" in kwargs:
        axs[0].set_ylim(kwargs["ylim"])
        axs[1].set_ylim(kwargs["ylim"])

    bbox = {"facecolor": "white", "alpha": 1, "pad": 20}
    axs[0].annotate("Training Set", xy=(0.1, 0.85), xycoords="axes fraction", bbox=bbox)
    axs[1].annotate("Testing Set", xy=(0.1, 0.85), xycoords="axes fraction", bbox=bbox)

    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Пример #17
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Пример #18
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 21.769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
Пример #19
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Пример #20
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
Пример #21
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
Пример #22
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 21.769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
Пример #23
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         result = subjective_model.run_modeling(subject_rejection=True)
Пример #24
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         result = subjective_model.run_modeling(subject_rejection=True)
Пример #25
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
Пример #26
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }

        self.dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
Пример #27
0
    def test_explain_train_test_model(self):

        model_class = SklearnRandomForestTrainTestModel

        train_dataset_path = config.ROOT + '/python/test/resource/' \
                                           'test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)
        _, self.features = run_executors_in_parallel(
            MomentNorefFeatureExtractor,
            train_assets,
            fifo_mode=True,
            delete_workdir=True,
            parallelize=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )

        xys = model_class.get_xys_from_results(self.features[:7])
        model = model_class({'norm_type':'normalize', 'random_state':0}, None)
        model.train(xys)

        np.random.seed(0)

        xs = model_class.get_xs_from_results(self.features[7:])
        explainer = LocalExplainer(neighbor_samples=1000)
        exps = explainer.explain(model, xs)

        self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)

        self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)
        self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)
        self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)
        self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)
        self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)
        self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)

        self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)

        self.assertEqual(exps['feature_names'],
                         ['Moment_noref_feature_1st_score',
                          'Moment_noref_feature_2nd_score',
                          'Moment_noref_feature_var_score']
                         )
Пример #28
0
    def test_explain_train_test_model(self):

        model_class = SklearnRandomForestTrainTestModel

        train_dataset_path = config.ROOT + '/python/test/resource/' \
                                           'test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)
        _, self.features = run_executors_in_parallel(
            MomentNorefFeatureExtractor,
            train_assets,
            fifo_mode=True,
            delete_workdir=True,
            parallelize=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )

        xys = model_class.get_xys_from_results(self.features[:7])
        model = model_class({'norm_type':'normalize', 'random_state':0}, None)
        model.train(xys)

        np.random.seed(0)

        xs = model_class.get_xs_from_results(self.features[7:])
        explainer = LocalExplainer(neighbor_samples=1000)
        exps = explainer.explain(model, xs)

        self.assertAlmostEqual(exps['feature_weights'][0, 0], -0.12416, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 0], 0.00076, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 1], -0.20931, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 1], -0.01245, places=4)
        self.assertAlmostEqual(exps['feature_weights'][0, 2], 0.02322, places=4)
        self.assertAlmostEqual(exps['feature_weights'][1, 2], 0.03673, places=4)

        self.assertAlmostEqual(exps['features'][0, 0], 107.73501, places=4)
        self.assertAlmostEqual(exps['features'][1, 0], 35.81638, places=4)
        self.assertAlmostEqual(exps['features'][0, 1], 13691.23881, places=4)
        self.assertAlmostEqual(exps['features'][1, 1], 1611.56764, places=4)
        self.assertAlmostEqual(exps['features'][0, 2], 2084.40542, places=4)
        self.assertAlmostEqual(exps['features'][1, 2], 328.75389, places=4)

        self.assertAlmostEqual(exps['features_normalized'][0, 0], -0.65527, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 0], -3.74922, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 1], -0.68872, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 1], -2.79586, places=4)
        self.assertAlmostEqual(exps['features_normalized'][0, 2], 0.08524, places=4)
        self.assertAlmostEqual(exps['features_normalized'][1, 2], -1.32625, places=4)

        self.assertEqual(exps['feature_names'],
                         ['Moment_noref_feature_1st_score',
                          'Moment_noref_feature_2nd_score',
                          'Moment_noref_feature_var_score']
                         )
Пример #29
0
 def test_train_test_on_dataset_with_dis1st_thr(self):
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats, _ = \
         train_test_vmaf_on_dataset(
             train_dataset=self.train_dataset, test_dataset=None,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath,
                      )
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'],
                             [90.753010402770798, 59.223801498461015,
                              90.753011435798058, 89.270176556597008])
Пример #30
0
    def test_read_dataset(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 9)
        self.assertTrue('groundtruth' in train_assets[0].asset_dict.keys())
        self.assertTrue('os' not in train_assets[0].asset_dict.keys())
        self.assertTrue('width' in train_assets[0].asset_dict.keys())
        self.assertTrue('height' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[0].asset_dict.keys())
Пример #31
0
    def test_read_dataset(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 9)
        self.assertTrue('groundtruth' in train_assets[0].asset_dict.keys())
        self.assertTrue('os' not in train_assets[0].asset_dict.keys())
        self.assertTrue('width' in train_assets[0].asset_dict.keys())
        self.assertTrue('height' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[0].asset_dict.keys())
Пример #32
0
 def test_train_test_on_dataset_with_dis1st_thr(self):
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats = \
         train_test_on_dataset(
             train_dataset=self.train_dataset, test_dataset=None,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath,
                      )
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'],
                             [91.707522376672316, 58.277822562766268,
                              91.707521620497104, 88.307134410232536])
Пример #33
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Пример #34
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Пример #35
0
    def test_subjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 4.0246673158065542, places=4)
        self.assertAlmostEquals(np.var(scores), 1.0932580358187849, places=4) # 1.4012220200639218
Пример #36
0
    def test_mos_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
        self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218
Пример #37
0
    def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True)
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
Пример #38
0
    def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejMosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
        self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
Пример #39
0
    def test_zscoresubjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = ZscoringSubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66405245792414114, places=4) # 1.4012220200639218
Пример #40
0
    def test_mos_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
        self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218
Пример #41
0
    def test_zscoresubjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = ZscoringSubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66405245792414114, places=4) # 1.4012220200639218
Пример #42
0
    def test_subjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 4.0246673158065542, places=4)
        self.assertAlmostEquals(np.var(scores), 1.0932580358187849, places=4) # 1.4012220200639218
Пример #43
0
    def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True)
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
Пример #44
0
    def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejMosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
        self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
Пример #45
0
def run_vmaf_kfold_cv(dataset_filepath,
                      contentid_groups,
                      param_filepath,
                      aggregate_method,
                      ):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()
    dataset = import_python_file(dataset_filepath)
    param = import_python_file(param_filepath)

    fig, ax = plt.subplots(figsize=(5, 5), nrows=1, ncols=1)

    cv_on_dataset(dataset, param, param, ax, result_store, contentid_groups,
                  logger, aggregate_method)

    ax.set_xlim([0, 120])
    ax.set_ylim([0, 120])
    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Пример #46
0
 def test_train_test_on_dataset_with_dis1st_thr(self):
     model_param = import_python_file(
         config.ROOT + '/python/test/resource/model_param_sample.py')
     feature_param = import_python_file(
         config.ROOT + '/python/test/resource/feature_param_sample.py')
     train_fassembler, train_assets, train_stats, \
     test_fassembler, test_assets, test_stats = \
         train_test_on_dataset(
             train_dataset=self.train_dataset, test_dataset=None,
                      feature_param=feature_param, model_param=model_param,
                      train_ax=None, test_ax=None, result_store=None,
                      parallelize=True,
                      logger=None,
                      fifo_mode=True,
                      output_model_filepath=self.output_model_filepath,
                      )
     self.train_fassembler = train_fassembler
     self.assertTrue(os.path.exists(self.output_model_filepath))
     self.assertItemsEqual(train_stats['ys_label_pred'], [
         91.707522376672316, 58.277822562766268, 91.707521620497104,
         88.307134410232536
     ])
Пример #47
0
    def test_observer_content_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.9104244772977128, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0037713583509767193, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.21903272050455846, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.084353684687185043, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 9.8168943054654481, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028159236075789944, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.05548186797336, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4339487982797514, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 2.63184284168883, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.019164097909450246, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.2263148440748638, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.070613033112114504, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.317917502439435, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.029455722248727296, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.29962156788139, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4717366222424826, places=4)
Пример #48
0
    def test_observer_content_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.9104244772977128, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0037713583509767193, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.21903272050455846, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.084353684687185043, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 9.8168943054654481, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028159236075789944, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.05548186797336, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4339487982797514, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 2.63184284168883, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.019164097909450246, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.2263148440748638, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.070613033112114504, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.317917502439435, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.029455722248727296, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.29962156788139, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4717366222424826, places=4)
Пример #49
0
    def test_read_dataset_diffyuv(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_dataset_diffyuv.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 4)
        self.assertEquals(train_assets[0].ref_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].quality_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].yuv_type, 'yuv420p')
        self.assertEquals(train_assets[2].ref_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_width_height, (1280, 720))
        self.assertEquals(train_assets[2].quality_width_height, (1280, 720))
        self.assertEquals(train_assets[2].yuv_type, 'yuv420p10le')
Пример #50
0
    def test_read_dataset_qualitywh2(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim_qualitywh2.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertTrue('quality_width' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('resampling_type' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[1].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[1].asset_dict.keys())
        self.assertTrue('resampling_type' not in train_assets[1].asset_dict.keys())
        self.assertEqual(train_assets[0].asset_dict['quality_width'], 200)
        self.assertEqual(train_assets[0].asset_dict['quality_height'], 100)
        self.assertEqual(train_assets[0].asset_dict['resampling_type'], 'bicubic')
Пример #51
0
    def test_read_dataset_qualitywh2(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim_qualitywh2.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertTrue('quality_width' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_height' in train_assets[0].asset_dict.keys())
        self.assertTrue('resampling_type' in train_assets[0].asset_dict.keys())
        self.assertTrue('quality_width' not in train_assets[1].asset_dict.keys())
        self.assertTrue('quality_height' not in train_assets[1].asset_dict.keys())
        self.assertTrue('resampling_type' not in train_assets[1].asset_dict.keys())
        self.assertEqual(train_assets[0].asset_dict['quality_width'], 200)
        self.assertEqual(train_assets[0].asset_dict['quality_height'], 100)
        self.assertEqual(train_assets[0].asset_dict['resampling_type'], 'bicubic')
Пример #52
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'quality_scores': np.random.randint(1, 6, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.normal(0, 0.1, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }

        self.dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
Пример #53
0
    def test_read_dataset_diffyuv(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_dataset_diffyuv.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.assertEquals(len(train_assets), 4)
        self.assertEquals(train_assets[0].ref_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].dis_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].quality_width_height, (1920, 1080))
        self.assertEquals(train_assets[0].yuv_type, 'yuv420p')
        self.assertEquals(train_assets[2].ref_width_height, (1280, 720))
        self.assertEquals(train_assets[2].dis_width_height, (1280, 720))
        self.assertEquals(train_assets[2].quality_width_height, (1280, 720))
        self.assertEquals(train_assets[2].yuv_type, 'yuv420p10le')
Пример #54
0
def run_vmaf_cv(train_dataset_filepath,
                test_dataset_filepath,
                param_filepath,
                output_model_filepath=None):

    logger = get_stdout_logger()
    result_store = FileSystemResultStore()

    train_dataset = import_python_file(train_dataset_filepath)
    test_dataset = import_python_file(test_dataset_filepath)

    param = import_python_file(param_filepath)

    # === plot scatter ===

    nrows = 1
    ncols = 2
    fig, axs = plt.subplots(figsize=(5*ncols, 5*nrows), nrows=nrows, ncols=ncols)

    train_test_on_dataset(train_dataset, test_dataset, param, param, axs[0], axs[1],
                          result_store, parallelize=False, logger=None,
                          output_model_filepath=output_model_filepath)

    # axs[0].set_xlim([0, 120])
    # axs[0].set_ylim([0, 120])

    # axs[1].set_xlim([0, 120])
    # axs[1].set_ylim([0, 120])

    bbox = {'facecolor':'white', 'alpha':1, 'pad':20}
    axs[0].annotate('Training Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)
    axs[1].annotate('Testing Set', xy=(0.1, 0.85), xycoords='axes fraction', bbox=bbox)

    plt.tight_layout()

    # === clean up ===
    close_logger(logger)
Пример #55
0
    def setUp(self):

        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        _, self.features = run_executors_in_parallel(
            MomentNorefFeatureExtractor,
            train_assets,
            fifo_mode=True,
            delete_workdir=True,
            parallelize=True,
            result_store=None,
            optional_dict=None,
            optional_dict2=None,
        )
Пример #56
0
    def test_test_on_dataset_raw(self):
        test_dataset = import_python_file(
            config.ROOT + '/python/test/resource/raw_dataset_sample.py')
        test_assets, results = test_on_dataset(test_dataset, VmafQualityRunner, None,
                        None, None,
                        parallelize=True,
                        aggregate_method=None)

        self.assertAlmostEqual(results[0]['VMAF_score'], 99.620284242680668, places=4)
        self.assertAlmostEqual(results[1]['VMAF_score'], 28.98459923031756, places=4)
        self.assertAlmostEqual(results[2]['VMAF_score'], 98.860794831997097, places=4)
        self.assertAlmostEqual(results[3]['VMAF_score'], 98.860790186334995, places=4)
        self.assertAlmostEqual(test_assets[0].groundtruth, 100, places=4)
        self.assertAlmostEqual(test_assets[1].groundtruth, 50, places=4)
        self.assertAlmostEqual(test_assets[2].groundtruth, 100, places=4)
        self.assertAlmostEqual(test_assets[3].groundtruth, 90, places=4)
Пример #57
0
    def setUp(self):
        train_dataset_path = config.ROOT + '/python/test/resource/test_image_dataset_diffdim.py'
        train_dataset = import_python_file(train_dataset_path)
        train_assets = read_dataset(train_dataset)

        self.h5py_filepath = config.ROOT + '/workspace/workdir/test.hdf5'
        self.h5py_file = DisYUVRawVideoExtractor.open_h5py_file(self.h5py_filepath)
        optional_dict2 = {'h5py_file': self.h5py_file}

        _, self.features = run_executors_in_parallel(
            DisYUVRawVideoExtractor,
            train_assets,
            fifo_mode=True,
            delete_workdir=True,
            parallelize=False, # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor
            result_store=None,
            optional_dict=None,
            optional_dict2=optional_dict2,
        )