Esempio n. 1
0
 def test_observer_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
     with self.assertRaises(AssertionError):
         result = subjective_model.run_modeling(subject_rejection=True)
Esempio n. 2
0
 def test_observer_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
     with self.assertRaises(AssertionError):
         result = subjective_model.run_modeling(subject_rejection=True)
Esempio n. 3
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Esempio n. 4
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Esempio n. 5
0
    def test_observer_aware_subjective_model_use_log(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(use_log=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.082429594509296211,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.089032585621095089,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                15.681766163430936,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.012565584832977776,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.2889206910113,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4355485462027884,
                                places=4)
Esempio n. 6
0
    def test_observer_aware_subjective_model_with_dscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(dscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.090840910829083799,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.089032585621095089,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                15.681766163430936,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.012565584832977776,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                298.35293969059796,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4163670233392607,
                                places=4)
Esempio n. 7
0
    def test_observer_aware_subjective_model(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.090840910829083799,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.089032585621095089,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                15.681766163430936,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.012565584832977776,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.31447815213642,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4355485462027884,
                                places=4)
Esempio n. 8
0
    def test_observer_aware_subjective_model_with_dscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(dscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 298.35293969059796, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4163670233392607, places=4)
Esempio n. 9
0
    def test_observer_aware_subjective_model_with_zscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(zscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.568205661696393, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0079989301785523791, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 0.80942484781493518, places=4)
Esempio n. 10
0
    def test_observer_aware_subjective_model_use_log(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(use_log=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.082429594509296211, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.2889206910113, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
Esempio n. 11
0
    def test_observer_aware_subjective_model(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.31447815213642, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
Esempio n. 12
0
    def test_observer_aware_subjective_model_with_dscoring_and_zscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(dscore_mode=True, zscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.628499078069273, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0082089371266301642, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 0.80806512456121071, places=4)
Esempio n. 13
0
    def test_observer_aware_subjective_model_with_zscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(zscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.568205661696393, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0079989301785523791, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 0.80942484781493518, places=4)
Esempio n. 14
0
    def test_observer_aware_subjective_model_with_dscoring_and_zscoring(self):
        subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file(
            self.dataset_filepath)
        result = subjective_model.run_modeling(dscore_mode=True, zscore_mode=True)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.628499078069273, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0082089371266301642, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 0.80806512456121071, places=4)
Esempio n. 15
0
    def test_observer_aware_subjective_model_synthetic(self):

        np.random.seed(0)

        dataset = import_python_file(self.dataset_filepath)
        info_dict = {
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }
        dataset_reader = SyntheticRawDatasetReader(dataset,
                                                   input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.90138622499935517,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.84819162765420342,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                12.742288471632817,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.0047638169604076975,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                236.78529213581052,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.3059726132293354,
                                places=4)
Esempio n. 16
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.18504017984241944, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.087350553292201705, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.520738471447299, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.010940587327083341, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 279.94975274863879, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4325574378911554, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.057731868199093525, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.081341845650928557, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 14.996238224489693, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.013666025579465165, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.67100837103203, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4637917512768972, places=4)
Esempio n. 17
0
    def test_observer_aware_subjective_model_synthetic(self):

        np.random.seed(0)

        dataset = import_python_file(self.dataset_filepath)
        info_dict = {
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }
        dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.90138622499935517, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.84819162765420342, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.742288471632817, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0047638169604076975, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 236.78529213581052, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3059726132293354, places=4)
Esempio n. 18
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.18504017984241944, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.087350553292201705, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.520738471447299, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.010940587327083341, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 279.94975274863879, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4325574378911554, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.057731868199093525, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.081341845650928557, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 14.996238224489693, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.013666025579465165, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.67100837103203, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4637917512768972, places=4)