예제 #1
0
 def setUp(self):
     dataset_filepath = SurealConfig.test_resource_path(
         'NFLX_dataset_public_raw.py')
     dataset = import_python_file(dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     pc_dataset = dataset_reader.to_pc_dataset(pc_type='within_subject')
     self.pc_dataset_reader = PairedCompDatasetReader(pc_dataset)
예제 #2
0
 def from_dataset_file(cls,
                       dataset_filepath,
                       content_ids=None,
                       asset_ids=None):
     dataset = cls._import_dataset_and_filter(dataset_filepath, content_ids,
                                              asset_ids)
     dataset_reader = RawDatasetReader(dataset)
     return cls(dataset_reader)
예제 #3
0
    def _run_corrupt_probs(dataset,
                           corrupt_probs,
                           model_class,
                           seed,
                           perf_type='rmse'):
        def run_one_corrput_prob(corrupt_prob, dataset, seed):
            np.random.seed(seed)
            info_dict = {
                'corrupt_probability': corrupt_prob,
            }
            dataset_reader = CorruptDataRawDatasetReader(dataset,
                                                         input_dict=info_dict)
            subjective_model = model_class(dataset_reader)
            try:
                result = subjective_model.run_modeling(normalize_final=False)
            except ValueError as e:
                print 'Warning: {}, return result None'.format(e)
                result = None
            return dataset_reader, result

        inputs = []
        for corrupt_prob in corrupt_probs:
            input = [corrupt_prob, dataset, seed]
            inputs.append(input)
        outputs = map(lambda input: run_one_corrput_prob(*input), inputs)

        result0 = model_class(
            RawDatasetReader(dataset)).run_modeling(normalize_final=False)

        result0_qs = np.array(result0['quality_scores'])
        result0_qs_mean = np.mean(result0_qs)
        result0_qs_std = np.std(result0_qs)
        result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std

        perfs = []
        datasizes = []
        for input, output in zip(inputs, outputs):
            corrupt_prob, dataset, seed = input
            reader, result = output

            result_qs = np.array(result['quality_scores'])
            result_qs = (result_qs - result0_qs_mean) / result0_qs_std

            if result is None:
                perf = float('NaN')
            else:
                if perf_type == 'pcc':
                    perf, _ = scipy.stats.pearsonr(result_qs, result0_qs)
                elif perf_type == 'rmse':
                    perf = np.sqrt(
                        np.mean(np.power(result_qs - result0_qs, 2.0)))
                else:
                    assert False
            datasize = corrupt_prob
            perfs.append(perf)
            datasizes.append(datasize)
        return datasizes, perfs
예제 #4
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
예제 #5
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 29.076923076923073, places=4)
     self.assertAlmostEquals(np.mean(scores), 35.871794871794876, places=4)
예제 #6
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1666952279897338, places=4)
     self.assertAlmostEquals(scores[10], -0.56729217507757768, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
예제 #7
0
    def _run_partial_corrupt_nums(dataset,
                                  subject_nums,
                                  model_class,
                                  seed,
                                  perf_type='rmse'):
        def run_one_num_subject(num_subject, dataset, seed):
            np.random.seed(seed)
            info_dict = {
                'selected_subjects':
                np.random.permutation(len(
                    dataset.dis_videos[0]['os']))[:num_subject],
                'corrupt_probability':
                0.5,
            }
            dataset_reader = CorruptSubjectRawDatasetReader(
                dataset, input_dict=info_dict)
            subjective_model = model_class(dataset_reader)
            result = subjective_model.run_modeling(normalize_final=False)
            return dataset_reader, result

        inputs = []
        for subject_num in subject_nums:
            input = [subject_num, dataset, seed]
            inputs.append(input)
        outputs = map(lambda input: run_one_num_subject(*input), inputs)

        result0 = model_class(
            RawDatasetReader(dataset)).run_modeling(normalize_final=False)

        result0_qs = np.array(result0['quality_scores'])
        result0_qs_mean = np.mean(result0_qs)
        result0_qs_std = np.std(result0_qs)
        result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std

        perfs = []
        datasizes = []
        for input, output in zip(inputs, outputs):
            subject_num, dataset, seed = input
            reader, result = output

            result_qs = np.array(result['quality_scores'])
            result_qs = (result_qs - result0_qs_mean) / result0_qs_std

            if perf_type == 'pcc':
                perf, _ = scipy.stats.pearsonr(result_qs, result0_qs)
            elif perf_type == 'rmse':
                perf = np.sqrt(np.mean(np.power(result_qs - result0_qs, 2.0)))
            else:
                assert False
            # datasize = np.prod(subject_num * len(reader.dataset.dis_videos))
            datasize = np.prod(subject_num)
            perfs.append(perf)
            datasizes.append(datasize)
        return datasizes, perfs
예제 #8
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.8076923076923075, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.4871794871794877, places=4)
     scores_std = result['quality_scores_std']
     self.assertAlmostEquals(np.mean(scores_std), 0.13125250408357622, places=4)
예제 #9
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
     scores_std = result['quality_scores_std']
     self.assertAlmostEquals(np.mean(scores_std), 0.12986637295658307, places=4)
예제 #10
0
 def test_mos_subjective_model_output2(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     dataset2 = subjective_model.to_aggregated_dataset()
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('groundtruth_std' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
     self.assertAlmostEquals(dis_video['groundtruth_std'], 0.08461538461538462, places=4)
예제 #11
0
 def test_persubject_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = PerSubjectModel(dataset_reader)
     subjective_model.run_modeling(transform_final={'p1':25, 'p0':-25})
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 100.0, places=4)
예제 #12
0
    def _run_subject_nums(dataset,
                          subject_nums,
                          model_class,
                          seed,
                          perf_type='rmse'):
        def run_one_num_subject(num_subject, dataset, seed):
            np.random.seed(seed)
            total_subject = len(dataset.dis_videos[0]['os'])
            info_dict = {
                'selected_subjects':
                np.random.permutation(total_subject)[:num_subject]
            }
            dataset_reader = SelectSubjectRawDatasetReader(
                dataset, input_dict=info_dict)
            subjective_model = model_class(dataset_reader)
            result = subjective_model.run_modeling(normalize_final=False)
            return dataset_reader, result

        inputs = []
        for subject_num in subject_nums:
            input = [subject_num, dataset, seed]
            inputs.append(input)
        outputs = map(lambda input: run_one_num_subject(*input), inputs)

        result0 = model_class(
            RawDatasetReader(dataset)).run_modeling(normalize_final=False)

        result0_qs = np.array(result0['quality_scores'])
        result0_qs_mean = np.mean(result0_qs)
        result0_qs_std = np.std(result0_qs)
        result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std

        perfs = []
        datasizes = []
        for output in outputs:
            reader, result = output

            result_qs = np.array(result['quality_scores'])
            result_qs = (result_qs - result0_qs_mean) / result0_qs_std

            if perf_type == 'pcc':
                perf, _ = scipy.stats.pearsonr(result_qs, result0_qs)
            elif perf_type == 'rmse':
                perf = np.sqrt(np.mean(np.power(result_qs - result0_qs, 2.0)))
            else:
                assert False
            datasize = reader.opinion_score_2darray.shape[1]
            perfs.append(perf)
            datasizes.append(datasize)
        return datasizes, perfs
예제 #13
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('groundtruth_std' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
     self.assertAlmostEquals(dis_video['groundtruth_std'], 0.08461538461538462, places=4)
예제 #14
0
 def setUp(self):
     dataset_filepath = SurealConfig.test_resource_path(
         'NFLX_dataset_public_raw_PARTIAL.py')
     self.dataset = import_python_file(dataset_filepath)
     self.dataset_reader = RawDatasetReader(self.dataset)