Пример #1
0
 def test_table_calc_results(self):
     self.table.rows = [
         Row([IsNotNone(), IsGreatThan(0)],
             [Result(2), Result(0)]),
         Row([IsNone(), IsEqual(0)], [Result(10), Result(1)]),
     ]
     self.assertEqual(self.table.calc([3, 2]), [2, 0])
     self.assertEqual(self.table.calc([None, 0]), [10, 1])
Пример #2
0
class MatchStruct(Thread):
    def __init__(self, url, ratio_percents):
        super(MatchStruct, self).__init__()
        self.url = url
        self.match = Match(url)
        self.result = Result(url)
        self.ratio = MatchRatio(url, ratio_percents)

    def run(self):
        self.match.start()
        xhash, id_sport, id_match, id_version = match_data_request(self.url)
        self.ratio.add_keys(xhash, id_sport, id_match, id_version)
        self.result.add_keys(xhash, id_sport, id_match)
        self.ratio.start()
        self.result.start()
        while self.match.isAlive() and self.result.isAlive() and self.ratio.isAlive():
            print(self.match.isAlive() and self.result.isAlive() and self.ratio.isAlive())
            print(self.result.isAlive())
            print(self.ratio.isAlive())
            time.sleep(1)

    def print_match(self):
        self.match.show_match()
        self.result.show_result()
        self.ratio.show_ratious()
Пример #3
0
    def _run_on_asset(self, asset):
        # Override Executor._run_on_asset(self, asset), which runs a
        # FeatureAssembler, collect a feature vector, run
        # TrainTestModel.predict() on it, and return a Result object
        # (in this case, both Executor._run_on_asset(self, asset) and
        # QualityRunner._read_result(self, asset) get bypassed.

        vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset)
        vmaf_fassembler.run()
        feature_result = vmaf_fassembler.results[0]

        xs = TrainTestModel.get_perframe_xs_from_result(feature_result)

        model = self._load_model()

        ys_pred = model.predict(xs)

        # 'score_clip'
        ys_pred = self.clip_score(model, ys_pred)

        result_dict = {}
        # add all feature result
        result_dict.update(feature_result.result_dict)
        # add quality score
        result_dict[self.get_scores_key()] = ys_pred

        return Result(asset, self.executor_id, result_dict)
Пример #4
0
    def _run_on_asset(self, asset):
        # Override Executor._run_on_asset(self, asset), which runs a
        # FeatureAssembler, collect a feature vector, run
        # TrainTestModel.predict() on it, and return a Result object
        # (in this case, both Executor._run_on_asset(self, asset) and
        # QualityRunner._read_result(self, asset) get bypassed.
        vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset)
        vmaf_fassembler.run()
        feature_result = vmaf_fassembler.results[0]
        model = self._load_model(asset)
        xs = model.get_per_unit_xs_from_a_result(feature_result)

        if self.optional_dict is not None and 'disable_clip_score' in self.optional_dict:
            disable_clip_score = self.optional_dict['disable_clip_score']
        else:
            disable_clip_score = False

        if self.optional_dict is not None and 'enable_transform_score' in self.optional_dict:
            enable_transform_score = self.optional_dict[
                'enable_transform_score']
        else:
            enable_transform_score = False

        ys_pred = self.predict_with_model(
            model,
            xs,
            disable_clip_score=disable_clip_score,
            enable_transform_score=enable_transform_score)
        result_dict = {}
        result_dict.update(feature_result.result_dict)  # add feature result
        result_dict[self.get_scores_key()] = ys_pred  # add quality score
        return Result(asset, self.executor_id, result_dict)
Пример #5
0
    def _run_on_asset(self, asset):
        # Override Executor._run_on_asset(self, asset)
        vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset)
        vmaf_fassembler.run()
        feature_result = vmaf_fassembler.results[0]
        result_dict = {
            self.get_scores_key(): feature_result[VmafFeatureExtractor.get_scores_key(self.FEATURE_NAME)]
        }

        return Result(asset, self.executor_id, result_dict)
Пример #6
0
 def _run_on_asset(self, asset):
     # Override Executor._run_on_asset(self, asset)
     vmaf_fassembler = self._get_feature_assembler_instance(asset)
     vmaf_fassembler.run()
     feature_result = vmaf_fassembler.results[0]
     result_dict = {}
     result_dict.update(feature_result.result_dict.copy()) # add feature result
     result_dict[self.get_scores_key()] = feature_result.result_dict[
         MsSsimFeatureExtractor.get_scores_key('ms_ssim')] # add ssim score
     del result_dict[MsSsimFeatureExtractor.get_scores_key('ms_ssim')] # delete redundant
     return Result(asset, self.executor_id, result_dict)
Пример #7
0
    def load(self, asset, executor_id):
        import pandas as pd
        import ast
        result_file_path = self._get_result_file_path2(asset, executor_id)

        if not os.path.isfile(result_file_path):
            return None

        with open(result_file_path, "rt") as result_file:
            df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read()))
            result = Result.from_dataframe(df)
        return result
Пример #8
0
    def load(self, asset, executor_id):
        import pandas as pd
        import ast
        result_file_path = self._get_result_file_path2(asset, executor_id)

        if not os.path.isfile(result_file_path):
            return None

        with open(result_file_path, "rt") as result_file:
            df = pd.DataFrame.from_dict(ast.literal_eval(result_file.read()))
            result = Result.from_dataframe(df)
        return result
Пример #9
0
    def test_todataframe_fromdataframe(self):

        print 'test on result to/from dataframe...'
        df = self.result.to_dataframe()
        df_vmaf = df.loc[df['scores_key'] == 'VMAF_legacy_scores']
        df_adm = df.loc[df['scores_key'] == 'VMAF_feature_adm_scores']
        df_vif = df.loc[df['scores_key'] == 'VMAF_feature_vif_scores']
        df_ansnr = df.loc[df['scores_key'] == 'VMAF_feature_ansnr_scores']
        df_motion = df.loc[df['scores_key'] == 'VMAF_feature_motion_scores']
        df_adm_den = df.loc[df['scores_key'] == 'VMAF_feature_adm_den_scores']
        self.assertEquals(len(df), 37)
        self.assertEquals(len(df_vmaf), 1)
        self.assertEquals(len(df_adm), 1)
        self.assertEquals(len(df_vif), 1)
        self.assertEquals(len(df_ansnr), 1)
        self.assertEquals(len(df_motion), 1)
        self.assertAlmostEquals(np.mean(df_vmaf.iloc[0]['scores']), 44.4942308947, places=4)
        self.assertAlmostEquals(np.mean(df_adm.iloc[0]['scores']), 0.813856666667, places=4)
        self.assertAlmostEquals(np.mean(df_vif.iloc[0]['scores']), 0.156834666667, places=4)
        self.assertAlmostEquals(np.mean(df_ansnr.iloc[0]['scores']), 7.92623066667, places=4)
        self.assertAlmostEquals(np.mean(df_motion.iloc[0]['scores']), 12.5548366667, places=4)
        self.assertAlmostEquals(np.mean(df_adm_den.iloc[0]['scores']), 30814.9100813, places=3)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_legacy_scores', 'scores')), 44.4942308947, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_adm_scores', 'scores')), 0.813856666667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_vif_scores', 'scores')), 0.156834666667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_ansnr_scores', 'scores')), 7.92623066667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_motion_scores', 'scores')), 12.5548366667, places=4)
        self.assertEquals(df.iloc[0]['dataset'], 'test')
        self.assertEquals(df.iloc[0]['content_id'], 0)
        self.assertEquals(df.iloc[0]['asset_id'], 0)
        self.assertEquals(df.iloc[0]['ref_name'], 'checkerboard_1920_1080_10_3_0_0.yuv')
        self.assertEquals(df.iloc[0]['dis_name'], 'checkerboard_1920_1080_10_3_1_0.yuv')
        self.assertEquals(
            df.iloc[0]['asset'],
            '{"asset_dict": {"height": 1080, "use_path_as_workpath": 1, "width": 1920}, "asset_id": 0, "content_id": 0, "dataset": "test", "dis_path": "checkerboard_1920_1080_10_3_1_0.yuv", "ref_path": "checkerboard_1920_1080_10_3_0_0.yuv", "workdir": ""}')
        self.assertEquals(df.iloc[0]['executor_id'], 'VMAF_legacy_V1.2')

        Result._assert_asset_dataframe(df)

        recon_result = Result.from_dataframe(df)
        self.assertEquals(self.result, recon_result)
        self.assertTrue(self.result == recon_result)
        self.assertFalse(self.result != recon_result)
Пример #10
0
    def test_todataframe_fromdataframe(self):

        print 'test on result to/from dataframe...'
        df = self.result.to_dataframe()
        df_vmaf = df.loc[df['scores_key'] == 'VMAF_legacy_scores']
        df_adm = df.loc[df['scores_key'] == 'VMAF_feature_adm_scores']
        df_vif = df.loc[df['scores_key'] == 'VMAF_feature_vif_scores']
        df_ansnr = df.loc[df['scores_key'] == 'VMAF_feature_ansnr_scores']
        df_motion = df.loc[df['scores_key'] == 'VMAF_feature_motion_scores']
        df_adm_den = df.loc[df['scores_key'] == 'VMAF_feature_adm_den_scores']
        self.assertEquals(len(df), 37)
        self.assertEquals(len(df_vmaf), 1)
        self.assertEquals(len(df_adm), 1)
        self.assertEquals(len(df_vif), 1)
        self.assertEquals(len(df_ansnr), 1)
        self.assertEquals(len(df_motion), 1)
        self.assertAlmostEquals(np.mean(df_vmaf.iloc[0]['scores']), 44.4942308947, places=4)
        self.assertAlmostEquals(np.mean(df_adm.iloc[0]['scores']), 0.813856666667, places=4)
        self.assertAlmostEquals(np.mean(df_vif.iloc[0]['scores']), 0.156834666667, places=4)
        self.assertAlmostEquals(np.mean(df_ansnr.iloc[0]['scores']), 7.92623066667, places=4)
        self.assertAlmostEquals(np.mean(df_motion.iloc[0]['scores']), 12.5548366667, places=4)
        self.assertAlmostEquals(np.mean(df_adm_den.iloc[0]['scores']), 30814.9100813, places=3)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_legacy_scores', 'scores')), 44.4942308947, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_adm_scores', 'scores')), 0.813856666667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_vif_scores', 'scores')), 0.156834666667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_ansnr_scores', 'scores')), 7.92623066667, places=4)
        self.assertAlmostEquals(np.mean(Result.get_unique_from_dataframe(df, 'VMAF_feature_motion_scores', 'scores')), 12.5548366667, places=4)
        self.assertEquals(df.iloc[0]['dataset'], 'test')
        self.assertEquals(df.iloc[0]['content_id'], 0)
        self.assertEquals(df.iloc[0]['asset_id'], 0)
        self.assertEquals(df.iloc[0]['ref_name'], 'checkerboard_1920_1080_10_3_0_0.yuv')
        self.assertEquals(df.iloc[0]['dis_name'], 'checkerboard_1920_1080_10_3_1_0.yuv')
        self.assertEquals(
            df.iloc[0]['asset'],
            '{"asset_dict": {"height": 1080, "use_path_as_workpath": 1, "width": 1920}, "asset_id": 0, "content_id": 0, "dataset": "test", "dis_path": "checkerboard_1920_1080_10_3_1_0.yuv", "ref_path": "checkerboard_1920_1080_10_3_0_0.yuv", "workdir": ""}')
        self.assertEquals(df.iloc[0]['executor_id'], 'VMAF_legacy_V1.2')

        Result._assert_asset_dataframe(df)

        recon_result = Result.from_dataframe(df)
        self.assertEquals(self.result, recon_result)
        self.assertTrue(self.result == recon_result)
        self.assertFalse(self.result != recon_result)
Пример #11
0
    def generate_test_result(self,
                             test_collection: cri.CrCollection,
                             verbose=1,
                             save_to_instance_key=None,
                             exp_key=None,
                             verbose_short_name=None,
                             description='',
                             workers=4,
                             use_multiprocessing=False,
                             params=None) -> Result:
        """
        Genereates a Result based on predictions against test_collection.

        When save_to_instance_key is not None, the results are saved to
            <model_key>/<save_to_instance_key>/cr_result.json
        """
        model = self.get_model()
        test_gen = self.get_test_generator(test_collection)
        test_gen.reset()
        if (verbose):
            print('Generating predictions for {}'.format(
                self.get_key()).center(80, '-'))
        predictions = model.predict_generator(
            test_gen,
            steps=len(test_gen),
            workers=workers,
            use_multiprocessing=use_multiprocessing,
            verbose=1)

        cr_codes = cri.extract_cr_codes(test_gen.filenames)

        if verbose_short_name is None:
            short_name = self.get_key()
            short_name += ' analyzed on {}'.format(
                datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        else:
            short_name = verbose_short_name

        if params is None:
            params = dict()
        result = Result.from_predictions(predictions, cr_codes, params,
                                         short_name, description)
        if save_to_instance_key:
            result.save(self.get_key(), save_to_instance_key, exp_key)
        return result
Пример #12
0
 def start_threads(self, url):
     node = Match(url)
     node_ratio = MatchRatio(url, self.ratio_percents)
     node_result = Result(url)
     node.start()
     node.join()
     #  Здесь сделать request кодов
     xhash, id_sport, id_match, id_version = node.return_keys()
     node_ratio.add_keys(xhash, id_sport, id_match, id_version)
     node_result.add_keys(xhash, id_sport, id_match)
     node_ratio.start()
     node_result.start()
     node.add_data(node_ratio, node_result)
     return node
Пример #13
0
    def _run_on_asset(self, asset):
        # Override Executor._run_on_asset(self, asset), which runs a
        # FeatureAssembler, collect a feature vector, run
        # TrainTestModel.predict() on it, and return a Result object
        # (in this case, both Executor._run_on_asset(self, asset) and
        # QualityRunner._read_result(self, asset) get bypassed.

        vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset)
        vmaf_fassembler.run()
        feature_result = vmaf_fassembler.results[0]

        # =====================================================================

        # SVR predict
        model = self.svmutil.svm_load_model(self.SVM_MODEL_FILE)

        ordered_scaled_scores_list = []
        for scores_key in self.SVM_MODEL_ORDERED_SCORES_KEYS:
            scaled_scores = self._rescale(
                feature_result[scores_key],
                self.FEATURE_RESCALE_DICT[scores_key])
            ordered_scaled_scores_list.append(scaled_scores)

        scores = []
        for score_vector in zip(*ordered_scaled_scores_list):
            vif, adm, ansnr, motion = score_vector
            xs = [[vif, adm, ansnr, motion]]
            score = self.svmutil.svm_predict([0], xs, model)[0][0]
            score = self._post_correction(motion, score)
            scores.append(score)

        result_dict = {}
        # add all feature result
        result_dict.update(feature_result.result_dict)
        # add quality score
        result_dict[self.get_scores_key()] = scores

        return Result(asset, self.executor_id, result_dict)
Пример #14
0
 def extract(self,
             response: requests.Response,
             img_num,
             soup=None) -> Optional[Result]:
     data = response.text
     start_end = re.search(pattern="var msg_title = .*?    var msg_link = ",
                           string=data,
                           flags=re.S)
     if not start_end:
         return Empty
     title = ""
     description = ""
     img = []
     for i in start_end.group().split("\n"):
         text = i.strip()
         if text.startswith("var msg_title"):
             title = text.split(" = ", maxsplit=1)[-1][1:-2]
         elif text.startswith("var msg_desc"):
             description = text.split(" = ", maxsplit=1)[-1][1:-2]
         elif len(img) < img_num and text.startswith("var msg_cdn_url"):
             img.append(Img(url=text.split(" = ", maxsplit=1)[-1][1:-2]))
         elif len(img) < img_num and text.startswith("var cdn_url_1_1"):
             img.append(Img(url=text.split(" = ", maxsplit=1)[-1][1:-2]))
         elif len(img) < img_num and text.startswith("var cdn_url_235_1"):
             img.append(Img(url=text.split(" = ", maxsplit=1)[-1][1:-2]))
         else:
             pass
     if img_num > 0 and len(img) < 1:
         img.append(
             Img(url=
                 "https://res.wx.qq.com/a/wx_fed/assets/res/OTE0YTAw.png",
                 width=180,
                 height=180))
     return Result(title=title,
                   keywords=None,
                   description=description,
                   img=img,
                   limit_img_num=img_num)
Пример #15
0
    def _run_on_asset(self, asset):
        # Override VmafQualityRunner._run_on_asset(self, asset), by adding
        # additional local explanation info.
        vmaf_fassembler = self._get_vmaf_feature_assembler_instance(asset)
        vmaf_fassembler.run()
        feature_result = vmaf_fassembler.results[0]
        model = self._load_model(asset)
        xs = model.get_per_unit_xs_from_a_result(feature_result)
        ys_pred = self.predict_with_model(model, xs)

        if self.optional_dict2 is not None and \
           'explainer' in self.optional_dict2:
            explainer = self.optional_dict2['explainer']
        else:
            explainer = LocalExplainer()

        exps = explainer.explain(model, xs)
        result_dict = {}
        result_dict.update(feature_result.result_dict)  # add feature result
        result_dict[self.get_scores_key()] = ys_pred  # add quality score
        result_dict[
            self.get_explanations_key()] = exps  # add local explanations
        return Result(asset, self.executor_id, result_dict)
Пример #16
0
 def __init__(self, url, ratio_percents):
     super(MatchStruct, self).__init__()
     self.url = url
     self.match = Match(url)
     self.result = Result(url)
     self.ratio = MatchRatio(url, ratio_percents)
Пример #17
0
            try:
                to = 10 if self.engine.getOption('http-proxy') is None else 20
                response = urlopen(req, timeout=to)
            except HTTPError, e:
                self._addError(e.code, target.getAbsoluteUrl())
                return
            except URLError, e:
                self._addError(e.reason, target.getAbsoluteUrl())
                return
            except:
                self._addError('Unknown', target.getAbsoluteUrl())
                return
            else:
                result = self.processResponse(response.read().lower(), pl)
                for r in result:
                    self.results.append(Result(target, k, pl, r))

    def _checkStoredInjections(self):
        for r in self.results:
            # At this state injections in Result obj are not
            # compacted yet so it will only be 1st injected param
            url, data = r.target.getPayloadedUrl(r.first_param, "")
            
            # In case of proxy 
            if self.engine.getOption('http-proxy') is not None:
                proxy = ProxyHandler({'http': self.engine.getOption('http-proxy')})
                opener = build_opener(proxy)
                install_opener(opener)
            
            # Some headers
            if self.engine.getOption('ua') is not None:
Пример #18
0
 def _read_result(self, asset):
     result = {}
     result.update(self._get_quality_scores(asset))
     return Result(asset, self.executor_id, result)
Пример #19
0
 def _read_result(self, asset):
     result = {}
     result.update(self._get_feature_scores(asset))
     executor_id = self.executor_id
     return Result(asset, executor_id, result)
Пример #20
0
def main():
    global metadata, results, predictions, percentages, image_collection, LABELS, show_cam, cam_fm, show_predictions, index

    metadata = cri.load_metadata()
    for p in metadata:
        if 'label' in p:
            print(p['label'])

    parser = argparse.ArgumentParser()
    description = 'Start in prediction mode. Note that in predicitons mode,' \
        'you can press the spacebar to use the predictions to label the images'
    parser.add_argument('-P',
                        '--predictions',
                        help=description,
                        action='store_true')
    description = 'Show class activation maps in prediction mode'
    parser.add_argument('-C', '--cam', help=description, action='store_true')
    description = 'Export all plots'
    parser.add_argument('-E',
                        '--export',
                        help=description,
                        action='store_true')
    args = parser.parse_args()

    show_cam = args.cam
    show_predictions = args.predictions or args.cam

    if show_predictions:
        if args.cam:

            def _output_filter(e, m, i):
                result = paths.get_test_result_path(e, m, i)
                weights = paths.get_weights_path(e, m, i)
                return os.path.exists(result) and os.path.exists(weights)
        else:

            def _output_filter(e, m, i):
                result = paths.get_test_result_path(e, m, i)
                return os.path.exists(result)

    if show_predictions:
        output_key = paths.select_output(_output_filter)
        if not output_key:
            return None
        e, m, i = output_key
        result = Result.load(exp_key=e, model_key=m, instance_key=i)
        result_dict = result.data

        p = result_dict['predictions']
        import json
        print('Predictions: {}'.format(json.dumps(p, indent=4)))

        # hotfix
        if cri.is_tri_label_result(result_dict):
            LABELS = [None, 'oap', 'in', 'obs']

        predictions = {}
        percentages = {}
        for basename, result in p.items():
            cr_code = cri.extract_cr_code(basename)
            predictions[cr_code] = result['prediction']
            percentages[cr_code] = result['percentages']

        image_collection = {}
        for basename, result in predictions.items():
            cr = cri.parse_cr_code(basename, match=False)
            image_collection[tuple(cr[:3])] = []

        # get list of patients then add all of their images (not just from predictions)
        for cr_code in metadata.keys():
            cr = cri.parse_cr_code(cr_code)
            if tuple(cr[:3]) in image_collection:
                image_collection[tuple(cr[:3])].append(cr_code)
    else:
        image_collection = collections.defaultdict(list)
        for cr_code in metadata.keys():
            cr = cri.parse_cr_code(cr_code)
            image_collection[tuple(cr[:3])].append(cr_code)

    if show_cam:
        try:
            print('Loading {} for CAM analysis'.format(output_key))
            fm = FineModel.load_by_key(m)
            fm.load_weights(exp_key=e, instance_key=i)
        except Exception:
            raise RuntimeError('Failed to load corresponding model weights')
        cam_fm = fm

    image_collection = sorted(image_collection.items())

    fig.canvas.mpl_connect('key_press_event', on_key_press)
    fig.canvas.mpl_connect('button_press_event', on_button_press)

    plt.subplots_adjust(top=0.95,
                        bottom=0.05,
                        right=1,
                        left=0,
                        hspace=0.2,
                        wspace=0)

    if args.export:
        export_dir = os.path.abspath('labeler_exports')
        os.makedirs(export_dir, exist_ok=True)
        print('Exporting all images to {}'.format(export_dir))
        for i in tqdm(range(len(image_collection))):
            index = i
            update()
            patient = image_collection[i]
            basename = '[{:03d}] D{:02d}_P{:08d}.png'.format(
                i, patient[0][0], patient[0][1])
            path = os.path.join(export_dir, basename)
            plt.savefig(path,
                        dpi=320,
                        transparent=False,
                        bbox_inches=None,
                        pad_inches=0.1)
    else:
        update()
        plt.show()

    cri.save_metadata(metadata)