Beispiel #1
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = CorruptSubjectRawDatasetReader(
            dataset, input_dict=info_dict)
Beispiel #2
0
class CorruptSubjectDatasetReaderTest(unittest.TestCase):
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = CorruptSubjectRawDatasetReader(
            dataset, input_dict=info_dict)

    def test_read_dataset_stats(self):
        self.assertEquals(self.dataset_reader.num_ref_videos, 9)
        self.assertEquals(self.dataset_reader.num_dis_videos, 79)
        self.assertEquals(self.dataset_reader.num_observers, 26)

    def test_opinion_score_2darray(self):
        os_2darray = self.dataset_reader.opinion_score_2darray
        self.assertEquals(os_2darray.shape, (79, 26))
        self.assertAlmostEquals(np.mean(np.std(os_2darray, axis=1)),
                                0.93177573807000225,
                                places=4)

    def test_to_dataset(self):
        dataset = self.dataset_reader.to_dataset()

        old_scores = [
            dis_video['os']
            for dis_video in self.dataset_reader.dataset.dis_videos
        ]
        new_scores = [dis_video['os'] for dis_video in dataset.dis_videos]

        self.assertNotEquals(old_scores, new_scores)
Beispiel #3
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                     input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Beispiel #4
0
 def test_opinion_score_2darray_with_corruption_prob4(self):
     info_dict = {
         'selected_subjects': range(5),
         'corrupt_probability': 1.0,
     }
     self.dataset_reader = CorruptSubjectRawDatasetReader(
         self.dataset, input_dict=info_dict)
     os_2darray = self.dataset_reader.opinion_score_2darray
     self.assertEquals(os_2darray.shape, (79, 26))
     self.assertAlmostEquals(np.mean(np.std(os_2darray, axis=1)),
                             0.96532565883975119,
                             places=4)
Beispiel #5
0
 def run_one_num_subject(num_subject, dataset, seed):
     np.random.seed(seed)
     info_dict = {
         'selected_subjects':
         np.random.permutation(len(
             dataset.dis_videos[0]['os']))[:num_subject]
     }
     dataset_reader = CorruptSubjectRawDatasetReader(
         dataset, input_dict=info_dict)
     subjective_model = model_class(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=False)
     return dataset_reader, result
Beispiel #6
0
    def test_zscoresubjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = ZscoringSubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66405245792414114,
                                places=4)  # 1.4012220200639218
Beispiel #7
0
    def test_subjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = SubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 4.0246673158065542, places=4)
        self.assertAlmostEquals(np.var(scores), 1.0932580358187849,
                                places=4)  # 1.4012220200639218
Beispiel #8
0
    def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = SubjrejMosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
        self.assertAlmostEquals(np.var(scores), 1.1049505732699529,
                                places=4)  # 1.4012220200639218
Beispiel #9
0
    def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling(zscore_mode=True,
                                               subject_rejection=True)
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
Beispiel #10
0
    def test_mos_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
        self.assertAlmostEquals(np.var(scores), 0.95893305294535369,
                                places=4)  # 1.4012220200639218
Beispiel #11
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']),
                                3.5573073781669944,
                                places=4)  # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.3559834438740614,
                                places=4)  # 1.4355485462027884