def test_observer_aware_subjective_model(self): subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file( self.dataset_filepath) result = subjective_model.run_modeling() self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4) self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4) self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4) self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4) self.assertAlmostEquals(np.sum(result['quality_scores']), 280.31447815213642, places=4) self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
def test_observer_aware_subjective_model_use_log(self): subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file( self.dataset_filepath) result = subjective_model.run_modeling(use_log=True) self.assertAlmostEquals(np.sum(result['observer_bias']), -0.082429594509296211, places=4) self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4) self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4) self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4) self.assertAlmostEquals(np.sum(result['quality_scores']), 280.2889206910113, places=4) self.assertAlmostEquals(np.var(result['quality_scores']), 1.4355485462027884, places=4)
def test_observer_aware_subjective_model_with_dscoring(self): subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file( self.dataset_filepath) result = subjective_model.run_modeling(dscore_mode=True) self.assertAlmostEquals(np.sum(result['observer_bias']), -0.090840910829083799, places=4) self.assertAlmostEquals(np.var(result['observer_bias']), 0.089032585621095089, places=4) self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.681766163430936, places=4) self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.012565584832977776, places=4) self.assertAlmostEquals(np.sum(result['quality_scores']), 298.35293969059796, places=4) self.assertAlmostEquals(np.var(result['quality_scores']), 1.4163670233392607, places=4)
def test_observer_aware_subjective_model_with_dscoring_and_zscoring(self): subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file( self.dataset_filepath) result = subjective_model.run_modeling(dscore_mode=True, zscore_mode=True) self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4) self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4) self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.628499078069273, places=4) self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0082089371266301642, places=4) self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4) self.assertAlmostEquals(np.var(result['quality_scores']), 0.80806512456121071, places=4)
def test_observer_aware_subjective_model_with_zscoring(self): subjective_model = MaximumLikelihoodEstimationModelReduced.from_dataset_file( self.dataset_filepath) result = subjective_model.run_modeling(zscore_mode=True) self.assertAlmostEquals(np.sum(result['observer_bias']), 0.0, places=4) self.assertAlmostEquals(np.var(result['observer_bias']), 0.0, places=4) self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 11.568205661696393, places=4) self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0079989301785523791, places=4) self.assertAlmostEquals(np.sum(result['quality_scores']), 0.0, places=4) self.assertAlmostEquals(np.var(result['quality_scores']), 0.80942484781493518, places=4)