Example #1
0
 def test_persubject_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = PerSubjectModel(dataset_reader)
     subjective_model.run_modeling(transform_final={'p1':25, 'p0':-25})
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 100.0, places=4)
Example #2
0
 def test_mos_subjective_model_output(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath)
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('groundtruth_std' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
     self.assertAlmostEquals(dis_video['groundtruth_std'], 0.06389710663783135, places=4)
Example #3
0
 def _import_dataset_and_filter(cls, dataset_filepath, content_ids, asset_ids):
     dataset = import_python_file(dataset_filepath)
     if content_ids is not None:
         dataset.dis_videos = [dis_video for dis_video in dataset.dis_videos if dis_video['content_id'] in content_ids]
     if asset_ids is not None:
         dataset.dis_videos = [dis_video for dis_video in dataset.dis_videos if dis_video['asset_id'] in asset_ids]
     return dataset
 def setUp(self):
     dataset_filepath = SurealConfig.test_resource_path(
         'NFLX_dataset_public_raw.py')
     dataset = import_python_file(dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     pc_dataset = dataset_reader.to_pc_dataset(pc_type='within_subject')
     self.pc_dataset_reader = PairedCompDatasetReader(pc_dataset)
Example #5
0
 def test_mos_subjective_model_output_custom_resampling(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     subjective_model.run_modeling()
     subjective_model.to_aggregated_dataset_file(self.output_dataset_filepath, resampling_type='lanczos')
     self.assertTrue(os.path.exists(self.output_dataset_filepath))
     dataset2 = import_python_file(self.output_dataset_filepath)
     self.assertFalse(hasattr(dataset2, 'quality_height'))
     self.assertFalse(hasattr(dataset2, 'quality_width'))
     self.assertEquals(dataset2.resampling_type, 'lanczos')
     dis_video = dataset2.dis_videos[0]
     self.assertTrue('groundtruth' in dis_video)
     self.assertTrue('groundtruth_std' in dis_video)
     self.assertTrue('os' not in dis_video)
     self.assertAlmostEquals(dis_video['groundtruth'], 4.884615384615385, places=4)
     self.assertAlmostEquals(dis_video['groundtruth_std'], 0.08461538461538462, places=4)
Example #6
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1666952279897338, places=4)
     self.assertAlmostEquals(scores[10], -0.56729217507757768, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Example #7
0
 def test_mos_subjective_model_transform_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(transform_final={'p1': 10, 'p0': 1})
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 49.84615384615385, places=4)
     self.assertAlmostEquals(scores[10], 29.076923076923073, places=4)
     self.assertAlmostEquals(np.mean(scores), 35.871794871794876, places=4)
Example #8
0
 def test_mos_subjective_model_normalize_final(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling(normalize_final=True)
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 1.1318646945818083, places=4)
     self.assertAlmostEquals(scores[10], -1.2400334499143002, places=4)
     self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
Example #9
0
    def setUp(self):
        dataset2_filepath = SurealConfig.test_resource_path('test_dataset_os_as_dict.py')
        dataset2 = import_python_file(dataset2_filepath)

        np.random.seed(0)
        info_dict2 = {
            'selected_subjects': np.array([1, 2]),
        }

        self.dataset2_reader = SelectSubjectRawDatasetReader(dataset2, input_dict=info_dict2)
Example #10
0
    def setUp(self):
        dataset2_filepath = SurealConfig.test_resource_path('quality_variation_2017_agh_tv_dataset.py')
        dataset2 = import_python_file(dataset2_filepath)

        np.random.seed(0)
        info_dict2 = {
            'selected_subjects': list(range(13)),
        }

        self.dataset2_reader = SelectSubjectRawDatasetReader(dataset2, input_dict=info_dict2)
Example #11
0
    def setUp(self):
        dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_dis_videos': range(15),
        }

        self.dataset_reader = SelectDisVideoRawDatasetReader(dataset, input_dict=info_dict)
Example #12
0
    def setUp(self):
        dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }

        self.dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
Example #13
0
 def test_observer_content_aware_subjective_model_subjreject(self):
     dataset = import_python_file(self.dataset_filepath)
     np.random.seed(0)
     info_dict = {
         'selected_subjects': range(5),
     }
     dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
     subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
     with self.assertRaises(AssertionError):
         subjective_model.run_modeling(subject_rejection=True)
Example #14
0
    def setUp(self):
        dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'corrupt_probability': 0.1,
        }

        self.dataset_reader = CorruptDataRawDatasetReader(dataset, input_dict=info_dict)
Example #15
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.8076923076923075, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.4871794871794877, places=4)
     scores_std = result['quality_scores_std']
     self.assertAlmostEquals(np.mean(scores_std), 0.13125250408357622, places=4)
Example #16
0
 def test_mos_subjective_model(self):
     dataset = import_python_file(self.dataset_filepath)
     dataset_reader = RawDatasetReader(dataset)
     subjective_model = MosModel(dataset_reader)
     result = subjective_model.run_modeling()
     scores = result['quality_scores']
     self.assertAlmostEquals(scores[0], 4.884615384615385, places=4)
     self.assertAlmostEquals(scores[10], 2.0769230769230771, places=4)
     self.assertAlmostEquals(np.mean(scores), 3.544790652385589, places=4)
     scores_std = result['quality_scores_std']
     self.assertAlmostEquals(np.mean(scores_std), 0.12986637295658307, places=4)
Example #17
0
    def test_observer_aware_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.mean(result['quality_scores']), 3.5573073781669944, places=4) # 3.5482845335713469
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3559834438740614, places=4) # 1.4355485462027884
 def _import_dataset_and_filter(cls, dataset_filepath, content_ids,
                                asset_ids):
     dataset = import_python_file(dataset_filepath)
     if content_ids is not None:
         dataset.dis_videos = filter(
             lambda dis_video: dis_video['content_id'] in content_ids,
             dataset.dis_videos)
     if asset_ids is not None:
         dataset.dis_videos = filter(
             lambda dis_video: dis_video['asset_id'] in asset_ids,
             dataset.dis_videos)
     return dataset
Example #19
0
    def test_mos_subjective_model_corruptdata(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5447906523855899, places=4)
        self.assertAlmostEquals(np.var(scores), 0.95893305294535369, places=4) # 1.4012220200639218
Example #20
0
    def test_subjrejmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejMosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 3.5611814345991566, places=4)
        self.assertAlmostEquals(np.var(scores), 1.1049505732699529, places=4) # 1.4012220200639218
Example #21
0
    def test_subjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = SubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 4.0246673158065542, places=4)
        self.assertAlmostEquals(np.var(scores), 1.0932580358187849, places=4) # 1.4012220200639218
Example #22
0
    def test_zscoresubjrejdmos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = ZscoringSubjrejDmosModel(dataset_reader)
        result = subjective_model.run_modeling()
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66405245792414114, places=4) # 1.4012220200639218
Example #23
0
    def test_zscore_mos_subjective_model_corruptdata_subjreject(self):
        dataset = import_python_file(self.dataset_filepath)
        np.random.seed(0)
        info_dict = {
            'selected_subjects': range(5),
        }
        dataset_reader = CorruptSubjectRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MosModel(dataset_reader)
        result = subjective_model.run_modeling(zscore_mode=True, subject_rejection=True)
        scores = result['quality_scores']

        self.assertAlmostEquals(np.mean(scores), 0.0, places=4)
        self.assertAlmostEquals(np.var(scores), 0.66670826882879042, places=4)
Example #24
0
    def setUp(self):
        dataset_filepath = SurealConfig.test_resource_path('NFLX_dataset_public_raw.py')
        dataset = import_python_file(dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'quality_scores': np.random.randint(1, 6, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.normal(0, 0.1, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }

        self.dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
Example #25
0
    def test_observer_content_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 3.9104244772977128, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.0037713583509767193, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.21903272050455846, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.084353684687185043, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 9.8168943054654481, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.028159236075789944, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.05548186797336, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4339487982797514, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)

        subjective_model = MaximumLikelihoodEstimationModel(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['content_ambiguity']), 2.63184284168883, places=4)
        self.assertAlmostEquals(np.var(result['content_ambiguity']), 0.019164097909450246, places=4)

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.2263148440748638, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.070613033112114504, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.317917502439435, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.029455722248727296, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.29962156788139, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4717366222424826, places=4)
Example #26
0
def visualize_pc_dataset(dataset_filepath):

    dataset = import_python_file(dataset_filepath)
    dataset_reader = PairedCompDatasetReader(dataset)
    tensor_pvs_pvs_subject = dataset_reader.opinion_score_3darray

    plt.figure()
    # plot the rate of winning x, 0 <= x <= 1.0, of one PVS compared against another PVS
    mtx_pvs_pvs = np.nansum(tensor_pvs_pvs_subject, axis=2) \
                  / (np.nansum(tensor_pvs_pvs_subject, axis=2) +
                     np.nansum(tensor_pvs_pvs_subject, axis=2).transpose())
    plt.imshow(mtx_pvs_pvs, interpolation='nearest')
    plt.title(r'Paired Comparison Winning Rate')
    plt.ylabel(r"PVS ($j$)")
    plt.xlabel(r"PVS ($j'$) [Compared Against]")
    plt.set_cmap('jet')
    plt.colorbar()
    plt.tight_layout()
Example #27
0
    def test_observer_aware_subjective_model_missingdata(self):

        dataset = import_python_file(self.dataset_filepath)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.1,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.18504017984241944, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.087350553292201705, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 15.520738471447299, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.010940587327083341, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 279.94975274863879, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4325574378911554, places=4)

        np.random.seed(0)
        info_dict = {
            'missing_probability': 0.5,
        }
        dataset_reader = MissingDataRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), 0.057731868199093525, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.081341845650928557, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 14.996238224489693, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.013666025579465165, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 280.67100837103203, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.4637917512768972, places=4)
Example #28
0
    def test_observer_aware_subjective_model_synthetic(self):

        np.random.seed(0)

        dataset = import_python_file(self.dataset_filepath)
        info_dict = {
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 26),
            'observer_inconsistency': np.abs(np.random.uniform(0.4, 0.6, 26)),
            'content_bias': np.zeros(9),
            'content_ambiguity': np.zeros(9),
        }
        dataset_reader = SyntheticRawDatasetReader(dataset, input_dict=info_dict)
        subjective_model = MaximumLikelihoodEstimationModelReduced(dataset_reader)
        result = subjective_model.run_modeling()

        self.assertAlmostEquals(np.sum(result['observer_bias']), -0.90138622499935517, places=4)
        self.assertAlmostEquals(np.var(result['observer_bias']), 0.84819162765420342, places=4)

        self.assertAlmostEquals(np.sum(result['observer_inconsistency']), 12.742288471632817, places=4)
        self.assertAlmostEquals(np.var(result['observer_inconsistency']), 0.0047638169604076975, places=4)

        self.assertAlmostEquals(np.sum(result['quality_scores']), 236.78529213581052, places=4)
        self.assertAlmostEquals(np.var(result['quality_scores']), 1.3059726132293354, places=4)
Example #29
0
def run_subjective_models(dataset_filepath,
                          subjective_model_classes,
                          do_plot=None,
                          **kwargs):

    if do_plot is None:
        do_plot = []

    if 'dataset_reader_class' in kwargs:
        dataset_reader_class = kwargs['dataset_reader_class']
    else:
        dataset_reader_class = RawDatasetReader

    if 'dataset_reader_info_dict' in kwargs:
        dataset_reader_info_dict = kwargs['dataset_reader_info_dict']
    else:
        dataset_reader_info_dict = {}

    if 'plot_type' in kwargs:
        plot_type = kwargs['plot_type']
    else:
        plot_type = 'errorbar'

    colors = ['black', 'gray', 'blue', 'red'] * 2

    dataset = import_python_file(dataset_filepath)
    dataset_reader = dataset_reader_class(dataset,
                                          input_dict=dataset_reader_info_dict)

    subjective_models = map(
        lambda subjective_model_class: subjective_model_class(dataset_reader),
        subjective_model_classes)

    results = map(
        lambda subjective_model: subjective_model.run_modeling(**kwargs),
        subjective_models)

    if do_plot == 'all' or 'raw_scores' in do_plot:
        # ===== plot raw scores
        plt.figure(figsize=(5, 2.5))
        mtx = dataset_reader.opinion_score_2darray.T
        S, E = mtx.shape
        plt.imshow(mtx, interpolation='nearest')
        # xs = np.array(range(S)) + 1
        # my_xticks = map(lambda x: "#{}".format(x), xs)
        # plt.yticks(np.array(xs), my_xticks, rotation=0)
        plt.title(r'Raw Opinion Scores ($x_{es}$)')
        plt.xlabel(r'Impaired Video Encodes ($e$)')
        plt.ylabel(r'Test Subjects ($s$)')
        plt.set_cmap('gray')
        plt.tight_layout()

    if do_plot == 'all' or 'quality_scores' in do_plot:
        # ===== plot quality scores =====
        bar_width = 0.4
        fig, ax_quality = plt.subplots(figsize=(10, 2.5), nrows=1)
        xs = None
        shift_count = 0
        my_xticks = None
        for subjective_model, result in zip(subjective_models, results):
            if 'quality_scores' in result:
                quality = result['quality_scores']
                xs = range(len(quality))

                # plt.plot(result['quality_scores'], label=subjective_model.TYPE)

                if plot_type == 'bar':
                    ax_quality.bar(np.array(xs) + shift_count * bar_width,
                                   quality,
                                   width=bar_width,
                                   color=colors[shift_count],
                                   label=subjective_model.TYPE)
                elif plot_type == 'errorbar':
                    if 'quality_scores_std' in result:
                        quality_error = np.array(
                            result['quality_scores_std']) * 1.96  # 95% C.I.
                        ax_quality.errorbar(np.array(xs) +
                                            shift_count * bar_width + 0.2,
                                            quality,
                                            yerr=quality_error,
                                            fmt='.',
                                            color=colors[shift_count],
                                            label=subjective_model.TYPE)
                    else:
                        ax_quality.plot(np.array(xs) +
                                        shift_count * bar_width + 0.2,
                                        quality,
                                        '.',
                                        color=colors[shift_count],
                                        label=subjective_model.TYPE)
                else:
                    raise AssertionError(
                        "Unknown plot_type: {}".format(plot_type))

                ax_quality.set_xlabel(r'Impaired Video Encodes ($e$)')
                ax_quality.set_title(r'Recovered Quality Score ($x_e$)')
                ax_quality.set_xlim([min(xs), max(xs) + 1])
                shift_count += 1
        ax_quality.grid()
        ax_quality.legend(loc=1, ncol=2, frameon=True)
        plt.tight_layout()

    if do_plot == 'all' or 'subject_scores' in do_plot:

        # ===== plot subject bias and inconsistency =====
        bar_width = 0.4
        figsize = (5, 3.5)
        # figsize = (7, 10)
        fig, (ax_bias, ax_inconsty) = plt.subplots(figsize=figsize,
                                                   nrows=2,
                                                   sharex=True)
        xs = None
        shift_count = 0
        my_xticks = None
        for subjective_model, result in zip(subjective_models, results):

            if 'observer_bias' in result:
                bias = result['observer_bias']
                xs = range(len(bias))

                if plot_type == 'bar':
                    ax_bias.bar(np.array(xs) + shift_count * bar_width,
                                bias,
                                width=bar_width,
                                color=colors[shift_count],
                                label=subjective_model.TYPE)
                elif plot_type == 'errorbar':
                    if 'observer_bias_std' in result:
                        bias_error = np.array(
                            result['observer_bias_std']) * 1.96  # 95% C.I.
                        ax_bias.errorbar(np.array(xs) +
                                         shift_count * bar_width + 0.2,
                                         bias,
                                         yerr=bias_error,
                                         fmt='.',
                                         color=colors[shift_count],
                                         label=subjective_model.TYPE)
                    else:
                        ax_bias.plot(np.array(xs) + shift_count * bar_width +
                                     0.2,
                                     bias,
                                     '.',
                                     color=colors[shift_count],
                                     label=subjective_model.TYPE)
                else:
                    raise AssertionError(
                        "Unknown plot_type: {}".format(plot_type))

                ax_inconsty.set_xlim([min(xs), max(xs) + 1])
                ax_bias.set_title(r'Subject Bias ($b_s$)')
                ax_bias.grid()

                if 'observers' in result:
                    observers = result['observers']
                    assert len(bias) == len(observers)
                    my_xticks = observers
                    plt.xticks(np.array(xs) + 0.01, my_xticks, rotation=90)

            if 'observer_inconsistency' in result:
                inconsty = result['observer_inconsistency']
                xs = range(len(inconsty))

                if plot_type == 'bar':
                    ax_inconsty.bar(np.array(xs) + shift_count * bar_width,
                                    inconsty,
                                    width=bar_width,
                                    color=colors[shift_count],
                                    label=subjective_model.TYPE)
                elif plot_type == 'errorbar':
                    if 'observer_inconsistency_std' in result:
                        inconsistency_error = np.array(
                            result['observer_inconsistency_std']
                        ) * 1.96  # 95% C.I.
                        ax_inconsty.errorbar(np.array(xs) +
                                             shift_count * bar_width + 0.2,
                                             inconsty,
                                             yerr=inconsistency_error,
                                             fmt='.',
                                             color=colors[shift_count],
                                             label=subjective_model.TYPE)
                    else:
                        ax_inconsty.plot(np.array(xs) +
                                         shift_count * bar_width + 0.2,
                                         inconsty,
                                         '.',
                                         color=colors[shift_count],
                                         label=subjective_model.TYPE)
                else:
                    raise AssertionError(
                        "Unknown plot_type: {}".format(plot_type))

                ax_inconsty.set_xlim([min(xs), max(xs) + 1])
                ax_inconsty.set_title(r'Subject Inconsisency ($v_s$)')
                ax_inconsty.legend(loc=2, ncol=2, frameon=True)
                ax_inconsty.grid()

            if 'observer_bias' in result:
                shift_count += 1

        if xs and my_xticks is None:
            my_xticks = map(lambda x: "#{}".format(x + 1), xs)
            plt.xticks(np.array(xs) + 0.3, my_xticks, rotation=90)
        plt.tight_layout()

    if do_plot == 'all' or 'content_scores' in do_plot:

        # ===== plot content ambiguity =====
        bar_width = 0.4
        fig, ax_ambgty = plt.subplots(figsize=(5, 3.5), nrows=1)
        xs = None
        shift_count = 0
        for subjective_model, result in zip(subjective_models, results):
            if 'content_ambiguity' in result:
                ambgty = result['content_ambiguity']
                xs = range(len(ambgty))

                if plot_type == 'bar':
                    ax_ambgty.bar(np.array(xs) + shift_count * bar_width,
                                  ambgty,
                                  width=bar_width,
                                  color=colors[shift_count],
                                  label=subjective_model.TYPE)
                elif plot_type == 'errorbar':
                    if 'content_ambiguity_std' in result:
                        ambiguity_error = np.array(
                            result['content_ambiguity_std']) * 1.96  # 95% C.I.
                        ax_ambgty.errorbar(np.array(xs) +
                                           shift_count * bar_width + 0.2,
                                           ambgty,
                                           yerr=ambiguity_error,
                                           fmt='.',
                                           color=colors[shift_count],
                                           label=subjective_model.TYPE)
                    else:
                        ax_ambgty.plot(np.array(xs) + shift_count * bar_width +
                                       0.2,
                                       ambgty,
                                       '.',
                                       color=colors[shift_count],
                                       label=subjective_model.TYPE)
                else:
                    raise AssertionError(
                        "Unknown plot_type: {}".format(plot_type))

                shift_count += 1
                ax_ambgty.set_title(r'Content Ambiguity ($a_c$)')
                ax_ambgty.grid()
        if xs:
            my_xticks = ['' for _ in range(len(xs))]
            for ref_video in dataset_reader.dataset.ref_videos:
                my_xticks[ref_video['content_id']] = ref_video['content_name']
            # rotation = 75
            rotation = 90
            plt.xticks(np.array(xs) + 0.01, my_xticks, rotation=rotation)
        ax_ambgty.legend(loc=1, ncol=2, frameon=True)
        plt.tight_layout()

    return dataset, subjective_models, results
Example #30
0
 def setUp(self):
     dataset_filepath = SurealConfig.test_resource_path(
         'NFLX_dataset_public_raw_PARTIAL.py')
     self.dataset = import_python_file(dataset_filepath)
     self.dataset_reader = RawDatasetReader(self.dataset)