Esempio n. 1
0
    def setUp(self):
        dataset_filepath = config.ROOT + '/python/test/resource/NFLX_dataset_public_raw.py'
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }

        self.dataset_reader = MissingDataRawDatasetReader(dataset,
                                                          input_dict=info_dict)
Esempio n. 2
0
 def run_one_missing_prob(missing_prob, dataset, seed):
     np.random.seed(seed)
     info_dict = {
         'missing_probability': missing_prob,
     }
     dataset_reader = MissingDataRawDatasetReader(dataset,
                                                  input_dict=info_dict)
     subjective_model = model_class(dataset_reader)
     try:
         result = subjective_model.run_modeling(normalize_final=False)
     except ValueError as e:
         print 'Warning: {}, return result None'.format(e)
         result = None
     return dataset_reader, result
Esempio n. 3
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.18504017984241944,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.087350553292201705,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                15.520738471447299,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.010940587327083341,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                279.94975274863879,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4325574378911554,
                                places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(
            dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                0.057731868199093525,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.081341845650928557,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                14.996238224489693,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.013666025579465165,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.67100837103203,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4637917512768972,
                                places=4)
Esempio n. 4
0
    def test_observer_content_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']),
                                3.9104244772977128,
                                places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']),
                                0.0037713583509767193,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                -0.21903272050455846,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.084353684687185043,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                9.8168943054654481,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.028159236075789944,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.05548186797336,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4339487982797514,
                                places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset,
                                                     input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_bias']), 0, places=4)
        self.assertAlmostEquals(np.var(result['content_bias']), 0, places=4)

        self.assertAlmostEquals(np.sum(result['content_ambiguity']),
                                2.63184284168883,
                                places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']),
                                0.019164097909450246,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']),
                                0.2263148440748638,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']),
                                0.070613033112114504,
                                places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']),
                                12.317917502439435,
                                places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']),
                                0.029455722248727296,
                                places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']),
                                280.29962156788139,
                                places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']),
                                1.4717366222424826,
                                places=4)