def test_mos_subjective_model_output2(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) subjective_model.run_modeling() dataset2 = subjective_model.to_aggregated_dataset() dis_video = dataset2.dis_videos[0] self.assertTrue('groundtruth' in dis_video) self.assertTrue('os' not in dis_video) self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
def test_mos_subjective_model_output_custom_resampling(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) subjective_model.run_modeling() subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos') self.assertTrue(os.path.exists(self.output_dataset_filepath)) dataset2 = import_python_file(self.output_dataset_filepath) self.assertFalse(hasattr(dataset2, 'quality_height')) self.assertFalse(hasattr(dataset2, 'quality_width')) self.assertEquals(dataset2.resampling_type, 'lanczos') dis_video = dataset2.dis_videos[0] self.assertTrue('groundtruth' in dis_video) self.assertTrue('os' not in dis_video) self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
def test_mos_subjective_model_transform_final(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1}) scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 49.84615384615385, places=4) self.assertAlmostEquals(scores[10], 21.769230769230771, places=4) self.assertAlmostEquals(np.mean(scores), 36.44790652385589, places=4)
def test_mos_subjective_model_normalize_final(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(normalize_final=True) scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4) self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4) self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_mos_subjective_model(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling() scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 4.884615384615385, places=4) self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4) self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
def test_zscore_mos_subjective_model_corruptdata_subjreject(self): dataset = import_python_file(self.dataset_filepath) np.random.seed(0) info_dict = { 'selected_subjects': range(5), } dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True) scores = result['quality_scores'] self.assertAlmostEquals(np.mean(scores), 0.0, places=4) self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
def test_mos_subjective_model_corruptdata_subjreject(self): dataset = import_python_file(self.dataset_filepath) np.random.seed(0) info_dict = { 'selected_subjects': range(5), } dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(subject_rejection=True) scores = result['quality_scores'] self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4) self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
def test_mos_subjective_model_corruptdata(self): dataset = import_python_file(self.dataset_filepath) np.random.seed(0) info_dict = { 'selected_subjects': range(5), } dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling() scores = result['quality_scores'] self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4) self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218