def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw.py') dataset = import_python_file(dataset_filepath) dataset_reader = RawDatasetReader(dataset) pc_dataset = dataset_reader.to_pc_dataset(pc_type='within_subject') self.pc_dataset_reader = PairedCompDatasetReader(pc_dataset)
def from_dataset_file(cls, dataset_filepath, content_ids=None, asset_ids=None): dataset = cls._import_dataset_and_filter(dataset_filepath, content_ids, asset_ids) dataset_reader = RawDatasetReader(dataset) return cls(dataset_reader)
def test_mos_subjective_model_normalize_final(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(normalize_final=True) scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4) self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4) self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def test_mos_subjective_model_transform_final(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1}) scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 49.84615384615385, places=4) self.assertAlmostEquals(scores[10], 29.076923076923073, places=4) self.assertAlmostEquals(np.mean(scores), 35.871794871794876, places=4)
def test_mos_subjective_model_normalize_final(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling(normalize_final=True) scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 1.1666952279897338, places=4) self.assertAlmostEquals(scores[10], -0.56729217507757768, places=4) self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
def _run_corrupt_probs(dataset, corrupt_probs, model_class, seed, perf_type='rmse'): def run_one_corrput_prob(corrupt_prob, dataset, seed): np.random.seed(seed) info_dict = { 'corrupt_probability': corrupt_prob, } dataset_reader = CorruptDataRawDatasetReader(dataset, input_dict=info_dict) subjective_model = model_class(dataset_reader) try: result = subjective_model.run_modeling(normalize_final=False) except ValueError as e: print 'Warning: {}, return result None'.format(e) result = None return dataset_reader, result inputs = [] for corrupt_prob in corrupt_probs: input = [corrupt_prob, dataset, seed] inputs.append(input) outputs = map(lambda input: run_one_corrput_prob(*input), inputs) result0 = model_class( RawDatasetReader(dataset)).run_modeling(normalize_final=False) result0_qs = np.array(result0['quality_scores']) result0_qs_mean = np.mean(result0_qs) result0_qs_std = np.std(result0_qs) result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std perfs = [] datasizes = [] for input, output in zip(inputs, outputs): corrupt_prob, dataset, seed = input reader, result = output result_qs = np.array(result['quality_scores']) result_qs = (result_qs - result0_qs_mean) / result0_qs_std if result is None: perf = float('NaN') else: if perf_type == 'pcc': perf, _ = scipy.stats.pearsonr(result_qs, result0_qs) elif perf_type == 'rmse': perf = np.sqrt( np.mean(np.power(result_qs - result0_qs, 2.0))) else: assert False datasize = corrupt_prob perfs.append(perf) datasizes.append(datasize) return datasizes, perfs
class RawDatasetReaderTest(unittest.TestCase): def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw.py') self.dataset = import_python_file(dataset_filepath) self.dataset_reader = RawDatasetReader(self.dataset) def test_read_dataset_stats(self): self.assertEqual(self.dataset_reader.num_ref_videos, 9) self.assertEqual(self.dataset_reader.max_content_id_of_ref_videos, 8) self.assertEqual(self.dataset_reader.num_dis_videos, 79) self.assertEqual(self.dataset_reader.num_observers, 26) def test_opinion_score_2darray(self): os_2darray = self.dataset_reader.opinion_score_2darray self.assertAlmostEqual(np.mean(os_2darray), 3.544790652385589, places=4) self.assertAlmostEqual(np.mean(np.std(os_2darray, axis=1)), 0.64933186478291516, places=4) def test_dis_videos_content_ids(self): content_ids = self.dataset_reader.content_id_of_dis_videos self.assertAlmostEqual(np.mean(content_ids), 3.8607594936708862, places=4) def test_disvideo_is_refvideo(self): l = self.dataset_reader.disvideo_is_refvideo self.assertTrue(all(l[0:9])) def test_ref_score(self): self.assertEqual(self.dataset_reader.ref_score, 5.0) def test_to_persubject_dataset_wrong_dim(self): with self.assertRaises(AssertionError): dataset = self.dataset_reader.to_persubject_dataset(np.zeros(3000)) self.assertEqual(len(dataset.dis_videos), 2054) def test_to_persubject_dataset(self): dataset = self.dataset_reader.to_persubject_dataset(np.zeros([79, 26])) self.assertEqual(len(dataset.dis_videos), 2054)
class RawDatasetReaderPartialTest(unittest.TestCase): def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw_PARTIAL.py') self.dataset = import_python_file(dataset_filepath) self.dataset_reader = RawDatasetReader(self.dataset) def test_read_dataset_stats(self): self.assertEqual(self.dataset_reader.num_ref_videos, 7) self.assertEqual(self.dataset_reader.max_content_id_of_ref_videos, 8) self.assertEqual(self.dataset_reader.num_dis_videos, 51) self.assertEqual(self.dataset_reader.num_observers, 26) def test_opinion_score_2darray(self): os_2darray = self.dataset_reader.opinion_score_2darray self.assertAlmostEqual(np.mean(os_2darray), 3.4871794871794872, places=4) self.assertAlmostEqual(np.mean(np.std(os_2darray, axis=1)), 0.65626252041788125, places=4) def test_dis_videos_content_ids(self): content_ids = self.dataset_reader.content_id_of_dis_videos self.assertAlmostEqual(np.mean(content_ids), 3.9215686274509802, places=4) def test_disvideo_is_refvideo(self): l = self.dataset_reader.disvideo_is_refvideo self.assertTrue(all(l[0:7])) def test_ref_score(self): self.assertEqual(self.dataset_reader.ref_score, 5.0) def test_to_persubject_dataset_wrong_dim(self): with self.assertRaises(AssertionError): dataset = self.dataset_reader.to_persubject_dataset(np.zeros(3000)) self.assertEqual(len(dataset.dis_videos), 2054) def test_to_persubject_dataset(self): dataset = self.dataset_reader.to_persubject_dataset(np.zeros([79, 26])) self.assertEqual(len(dataset.dis_videos), 1326)
def test_mos_subjective_model(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling() scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 4.884615384615385, places=4) self.assertAlmostEquals(scores[10], 2.8076923076923075, places=4) self.assertAlmostEquals(np.mean(scores), 3.4871794871794877, places=4) scores_std = result['quality_scores_std'] self.assertAlmostEquals(np.mean(scores_std), 0.13125250408357622, places=4)
def _run_partial_corrupt_nums(dataset, subject_nums, model_class, seed, perf_type='rmse'): def run_one_num_subject(num_subject, dataset, seed): np.random.seed(seed) info_dict = { 'selected_subjects': np.random.permutation(len( dataset.dis_videos[0]['os']))[:num_subject], 'corrupt_probability': 0.5, } dataset_reader = CorruptSubjectRawDatasetReader( dataset, input_dict=info_dict) subjective_model = model_class(dataset_reader) result = subjective_model.run_modeling(normalize_final=False) return dataset_reader, result inputs = [] for subject_num in subject_nums: input = [subject_num, dataset, seed] inputs.append(input) outputs = map(lambda input: run_one_num_subject(*input), inputs) result0 = model_class( RawDatasetReader(dataset)).run_modeling(normalize_final=False) result0_qs = np.array(result0['quality_scores']) result0_qs_mean = np.mean(result0_qs) result0_qs_std = np.std(result0_qs) result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std perfs = [] datasizes = [] for input, output in zip(inputs, outputs): subject_num, dataset, seed = input reader, result = output result_qs = np.array(result['quality_scores']) result_qs = (result_qs - result0_qs_mean) / result0_qs_std if perf_type == 'pcc': perf, _ = scipy.stats.pearsonr(result_qs, result0_qs) elif perf_type == 'rmse': perf = np.sqrt(np.mean(np.power(result_qs - result0_qs, 2.0))) else: assert False # datasize = np.prod(subject_num * len(reader.dataset.dis_videos)) datasize = np.prod(subject_num) perfs.append(perf) datasizes.append(datasize) return datasizes, perfs
def test_mos_subjective_model(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) result = subjective_model.run_modeling() scores = result['quality_scores'] self.assertAlmostEquals(scores[0], 4.884615384615385, places=4) self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4) self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4) scores_std = result['quality_scores_std'] self.assertAlmostEquals(np.mean(scores_std), 0.12986637295658307, places=4)
def test_persubject_subjective_model_output(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = PerSubjectModel(dataset_reader) subjective_model.run_modeling(transform_final={'p1':25, 'p0':-25}) subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath) self.assertTrue(os.path.exists(self.output_dataset_filepath)) dataset2 = import_python_file(self.output_dataset_filepath) dis_video = dataset2.dis_videos[0] self.assertTrue('groundtruth' in dis_video) self.assertTrue('os' not in dis_video) self.assertAlmostEquals(dis_video['groundtruth'], 100.0, places=4)
def test_mos_subjective_model_output2(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) subjective_model.run_modeling() dataset2 = subjective_model.to_aggregated_dataset() dis_video = dataset2.dis_videos[0] self.assertTrue('groundtruth' in dis_video) self.assertTrue('groundtruth_std' in dis_video) self.assertTrue('os' not in dis_video) self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4) self.assertAlmostEquals(dis_video['groundtruth_std'], 0.08461538461538462, places=4)
def _run_subject_nums(dataset, subject_nums, model_class, seed, perf_type='rmse'): def run_one_num_subject(num_subject, dataset, seed): np.random.seed(seed) total_subject = len(dataset.dis_videos[0]['os']) info_dict = { 'selected_subjects': np.random.permutation(total_subject)[:num_subject] } dataset_reader = SelectSubjectRawDatasetReader( dataset, input_dict=info_dict) subjective_model = model_class(dataset_reader) result = subjective_model.run_modeling(normalize_final=False) return dataset_reader, result inputs = [] for subject_num in subject_nums: input = [subject_num, dataset, seed] inputs.append(input) outputs = map(lambda input: run_one_num_subject(*input), inputs) result0 = model_class( RawDatasetReader(dataset)).run_modeling(normalize_final=False) result0_qs = np.array(result0['quality_scores']) result0_qs_mean = np.mean(result0_qs) result0_qs_std = np.std(result0_qs) result0_qs = (result0_qs - result0_qs_mean) / result0_qs_std perfs = [] datasizes = [] for output in outputs: reader, result = output result_qs = np.array(result['quality_scores']) result_qs = (result_qs - result0_qs_mean) / result0_qs_std if perf_type == 'pcc': perf, _ = scipy.stats.pearsonr(result_qs, result0_qs) elif perf_type == 'rmse': perf = np.sqrt(np.mean(np.power(result_qs - result0_qs, 2.0))) else: assert False datasize = reader.opinion_score_2darray.shape[1] perfs.append(perf) datasizes.append(datasize) return datasizes, perfs
def test_mos_subjective_model_output_custom_resampling(self): dataset = import_python_file(self.dataset_filepath) dataset_reader = RawDatasetReader(dataset) subjective_model = MosModel(dataset_reader) subjective_model.run_modeling() subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos') self.assertTrue(os.path.exists(self.output_dataset_filepath)) dataset2 = import_python_file(self.output_dataset_filepath) self.assertFalse(hasattr(dataset2, 'quality_height')) self.assertFalse(hasattr(dataset2, 'quality_width')) self.assertEquals(dataset2.resampling_type, 'lanczos') dis_video = dataset2.dis_videos[0] self.assertTrue('groundtruth' in dis_video) self.assertTrue('groundtruth_std' in dis_video) self.assertTrue('os' not in dis_video) self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4) self.assertAlmostEquals(dis_video['groundtruth_std'], 0.08461538461538462, places=4)
def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw_PARTIAL.py') self.dataset = import_python_file(dataset_filepath) self.dataset_reader = RawDatasetReader(self.dataset)
class RawDatasetReaderPCTest(unittest.TestCase): def setUp(self): dataset_filepath = SurealConfig.test_resource_path( 'NFLX_dataset_public_raw.py') dataset = import_python_file(dataset_filepath) self.dataset_reader = RawDatasetReader(dataset) def test_dataset_to_pc_dataset(self): pc_dataset = self.dataset_reader.to_pc_dataset() pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertEqual(np.nanmean(opinion_score_3darray), 0.816039603960396) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_within_subject(self): pc_dataset = self.dataset_reader.to_pc_dataset( pc_type='within_subject') pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 80106) self.assertEqual(np.nanmean(opinion_score_3darray), 0.8050935185278244) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_coin_toss(self): pc_dataset = self.dataset_reader.to_pc_dataset( tiebreak_method='coin_toss') pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertEqual(np.nanmean(opinion_score_3darray), 1.0) self.assertEqual(np.nanmin(opinion_score_3darray), 1.0) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_random(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(randomness_level=0.5) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) # check: python2 values seem to fluctuate quite a bit self.assertAlmostEqual(np.nanmean(opinion_score_3darray), 0.85, delta=0.1) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_sampling_rate(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(sampling_rate=0.1) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 844) self.assertAlmostEqual(np.nanmean(opinion_score_3darray), 0.85, delta=0.1) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_per_asset_sampling_rates(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset( per_asset_sampling_rates=np.hstack( [np.ones(39), np.ones(40) * 0.1])) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 4550) self.assertAlmostEqual(np.nanmean(opinion_score_3darray), 0.85, delta=0.1) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0)
class RawDatasetReaderPCTest(unittest.TestCase): def setUp(self): dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py') dataset = import_python_file(dataset_filepath) self.dataset_reader = RawDatasetReader(dataset) def test_dataset_to_pc_dataset(self): pc_dataset = self.dataset_reader.to_pc_dataset() pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertEqual(np.nanmean(opinion_score_3darray), 0.816039603960396) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_within_subject(self): pc_dataset = self.dataset_reader.to_pc_dataset(pc_type='within_subject') pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 80106) self.assertEqual(np.nanmean(opinion_score_3darray), 0.8050935185278244) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_coin_toss(self): pc_dataset = self.dataset_reader.to_pc_dataset(tiebreak_method='coin_toss') pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertEqual(np.nanmean(opinion_score_3darray), 1.0) self.assertEqual(np.nanmin(opinion_score_3darray), 1.0) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_random(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(cointoss_rate=0.5) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) # check: python2 values seem to fluctuate quite a bit self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 0.8966492602262838, delta=0.01) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_sampling_rate(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(sampling_rate=0.1) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 844) self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 0.8107588856868396, delta=0.0001) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_sampling_rate_greater_than_1(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(sampling_rate=2.1) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 0.816039603960396, delta=0.0001) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_per_asset_sampling_rates(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(per_asset_sampling_rates=np.hstack([np.ones(39), np.ones(40) * 0.1])) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 4546) self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 0.8116303960042811, delta=0.01) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_per_asset_cointoss_rates(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(per_asset_cointoss_rates=np.hstack([np.ones(39), np.ones(40) * 0.1])) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 0.91102022769979, delta=0.01) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_per_asset_noise_levels(self): import random random.seed(0) pc_dataset = self.dataset_reader.to_pc_dataset(per_asset_noise_levels=np.hstack([np.ones(39), np.ones(40) * 0.1])) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 8242) self.assertAlmostEqual(float(np.nanmean(opinion_score_3darray)), 1.0, delta=0.0001) self.assertEqual(np.nanmin(opinion_score_3darray), 1.0) self.assertEqual(np.nanmax(opinion_score_3darray), 1.0) def test_dataset_to_pc_dataset_within_subject_per_asset_mean_scores(self): pc_dataset = self.dataset_reader.to_pc_dataset(pc_type='within_subject', per_asset_mean_scores=np.ones(79)) pc_dataset_reader = PairedCompDatasetReader(pc_dataset) opinion_score_3darray = pc_dataset_reader.opinion_score_3darray self.assertEqual(np.nansum(opinion_score_3darray), 80106) self.assertEqual(np.nanmean(opinion_score_3darray), 0.5) self.assertEqual(np.nanmin(opinion_score_3darray), 0.5) self.assertEqual(np.nanmax(opinion_score_3darray), 0.5)