def setUp(self): dataset2_filepath = SurealConfig.test_resource_path('quality_variation_2017_agh_tv_dataset.py') dataset2 = import_python_file(dataset2_filepath) np.random.seed(0) info_dict2 = { 'selected_subjects': list(range(13)), } self.dataset2_reader = SelectSubjectRawDatasetReader(dataset2, input_dict=info_dict2)
def setUp(self): dataset2_filepath = SurealConfig.test_resource_path('test_dataset_os_as_dict.py') dataset2 = import_python_file(dataset2_filepath) np.random.seed(0) info_dict2 = { 'selected_subjects': np.array([1, 2]), } self.dataset2_reader = SelectSubjectRawDatasetReader(dataset2, input_dict=info_dict2)
def setUp(self): dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py') dataset = import_python_file(dataset_filepath) np.random.seed(0) info_dict = { 'selected_subjects': range(5), } self.dataset_reader = SelectSubjectRawDatasetReader(dataset, input_dict=info_dict)
class SelectedSubjectDatasetReaderTest(unittest.TestCase): def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw.py') dataset = import_python_file(dataset_filepath) np.random.seed(0) info_dict = { 'selected_subjects': range(5), } self.dataset_reader = SelectSubjectRawDatasetReader( dataset, input_dict=info_dict) def test_read_dataset_stats(self): self.assertEqual(self.dataset_reader.num_ref_videos, 9) self.assertEqual(self.dataset_reader.num_dis_videos, 79) self.assertEqual(self.dataset_reader.num_observers, 5) def test_opinion_score_2darray(self): os_2darray = self.dataset_reader.opinion_score_2darray self.assertEqual(os_2darray.shape, (79, 5)) def test_to_dataset(self): dataset = self.dataset_reader.to_dataset() old_scores = [ dis_video['os'] for dis_video in self.dataset_reader.dataset.dis_videos ] new_scores = [dis_video['os'] for dis_video in dataset.dis_videos] self.assertNotEqual(old_scores, new_scores)
class SelectedSubjectDatasetReaderTest3(unittest.TestCase): def setUp(self): dataset2_filepath = SurealConfig.test_resource_path('quality_variation_2017_agh_tv_dataset.py') dataset2 = import_python_file(dataset2_filepath) np.random.seed(0) info_dict2 = { 'selected_subjects': list(range(13)), } self.dataset2_reader = SelectSubjectRawDatasetReader(dataset2, input_dict=info_dict2) def test_read_dataset_stats_os_as_dict(self): self.assertEqual(self.dataset2_reader.num_observers, 13) self.assertEqual(self.dataset2_reader.num_ref_videos, 20) self.assertEqual(self.dataset2_reader.num_dis_videos, 320) def test_opinion_score_2darray_os_as_dict(self): opinion_score_2darray = self.dataset2_reader.opinion_score_2darray self.assertEqual(opinion_score_2darray[0, 0], 2.0) self.assertEqual(opinion_score_2darray[1, 0], 1.0) self.assertTrue(np.isnan(opinion_score_2darray[2, 0])) self.assertTrue(np.isnan(opinion_score_2darray[0, 1])) self.assertTrue(np.isnan(opinion_score_2darray[1, 1])) self.assertEqual(opinion_score_2darray[2, 1], 1.0) def test_to_dataset_os_as_dict(self): dataset2 = self.dataset2_reader.to_dataset() old_scores = [dis_video['os'] for dis_video in self.dataset2_reader.dataset.dis_videos] new_scores = [dis_video['os'] for dis_video in dataset2.dis_videos] self.assertNotEqual(old_scores, new_scores)
def run_one_num_subject(num_subject, dataset, seed): np.random.seed(seed) total_subject = len(dataset.dis_videos[0]['os']) info_dict = { 'selected_subjects': np.random.permutation(total_subject)[:num_subject] } dataset_reader = SelectSubjectRawDatasetReader( dataset, input_dict=info_dict) subjective_model = model_class(dataset_reader) result = subjective_model.run_modeling(normalize_final=False) return dataset_reader, result
def _bootstrap_subjects(cls, dataset, result, n_subj, n_bootstrap, kwargs): bootstrap_results = [] for ibootstrap in range(n_bootstrap): print(f"Bootstrap with seed {ibootstrap}") np.random.seed(ibootstrap) selected_subjects = np.random.choice(range(n_subj), size=n_subj, replace=True) select_subj_reader = SelectSubjectRawDatasetReader( dataset, input_dict={'selected_subjects': selected_subjects}) bootstrap_result = super(MaximumLikelihoodEstimationModelWithBootstrapping, cls). \ _run_modeling(select_subj_reader, **kwargs) bootstrap_observer_bias_offset = np.mean( np.array(bootstrap_result['observer_bias']) - np.array(result['observer_bias'])[selected_subjects]) bootstrap_result['observer_bias'] = list( np.array(bootstrap_result['observer_bias']) - bootstrap_observer_bias_offset) bootstrap_result['quality_scores'] = list( np.array(bootstrap_result['quality_scores']) + bootstrap_observer_bias_offset) bootstrap_results.append(bootstrap_result) bootstrap_quality_scoress = np.array( [r['quality_scores'] for r in bootstrap_results]) quality_scores_ci95 = [ np.array(result['quality_scores']) - np.percentile(bootstrap_quality_scoress, 2.5, axis=0), np.percentile(bootstrap_quality_scoress, 97.5, axis=0) - np.array(result['quality_scores']) ] return quality_scores_ci95