Exemplo n.º 1
0
 def test_observer_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Exemplo n.º 2
0
 def test_observer_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                     input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModelReduced(
         dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Exemplo n.º 3
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
Exemplo n.º 4
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.18504017984241944, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.087350553292201705, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.520738471447299, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.010940587327083341, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 279.94975274863879, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4325574378911554, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.057731868199093525, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.081341845650928557, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 14.996238224489693, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.013666025579465165, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.67100837103203, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4637917512768972, places=4)
Exemplo n.º 5
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset,
                                                        input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']),
                                3.5573073781669944,
                                places=4)  # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.3559834438740614,
                                places=4)  # 1.4355485462027884
Exemplo n.º 6
0
    def test_observer_aware_subjective_model_synthetic(self):

        np.random.seed(0)

        dataset = import_python_file(self.dataset_filepath)
        info_dict = {
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }
        dataset_reader = SyntheticRawDatasetReader(dataset,
                                                   input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.90138622499935517,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.84819162765420342,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                12.742288471632817,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.0047638169604076975,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                236.78529213581052,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.3059726132293354,
                                places=4)
Exemplo n.º 7
0
    def test_observer_aware_subjective_model_synthetic(self):

        np.random.seed(0)

        dataset = import_python_file(self.dataset_filepath)
        info_dict = {
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }
        dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.90138622499935517, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.84819162765420342, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.742288471632817, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0047638169604076975, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 236.78529213581052, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3059726132293354, places=4)
Exemplo n.º 8
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.18504017984241944,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.087350553292201705,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                15.520738471447299,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.010940587327083341,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                279.94975274863879,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4325574378911554,
                                places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                0.057731868199093525,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.081341845650928557,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                14.996238224489693,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.013666025579465165,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.67100837103203,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4637917512768972,
                                places=4)