def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=[
                                    'wrong-top1', 'wrong-top5', 'res-top5',
                                    'label', 'logits'
                                ])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    top5s = []
    labels = []
    logits = []
    for top1, top5, pred, label, logit in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
        top5s.extend(pred.tolist())
        labels.extend(label.tolist())
        logits.extend(logit.tolist())
    with open("top5_resnet2x.json", "w") as f:
        json.dump(top5s, f)

    with open("labels_resnet2x.json", "w") as f:
        json.dump(labels, f)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
    return acc1.ratio, acc5.ratio
예제 #2
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_var_names=['input'],
        session_init=ParamRestore(param_dict),
        session_config=get_default_sess_config(0.9),
        output_var_names=['output']  # output:0 is the probability distribution
    )
    predict_func = get_predict_func(pred_config)

    import cv2
    im = cv2.imread(input)
    assert im is not None
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    im = cv2.resize(im, (224, 224))
    im = np.reshape(im, (1, 224, 224, 3)).astype('float32')
    im = im - 110
    outputs = predict_func([im])[0]
    prob = outputs[0]
    ret = prob.argsort()[-10:][::-1]
    print ret

    meta = ILSVRCMeta().get_synset_words_1000()
    print[meta[k] for k in ret]
예제 #3
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_data_mapping=[0],
        session_init=ParamRestore(param_dict),
        output_var_names=['output:0', 'pool5/MaxPool:0'])
    predict_func = get_predict_func(pred_config)

    im = cv2.imread(input)
    assert im is not None
    im = im.astype('float32')
    im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3))
    im = im - 110
    raw_out = predict_func([im])
    tfout = raw_out[1][0]

    from tensorio import read_value
    dumpf = 'dump.tensortxt'
    with open(dumpf) as f:
        name, arr = read_value(f)
    os.unlink(dumpf)
    hout = arr[:, :, :, 0]
    diff = hout - tfout
    maxdiff = np.abs(diff).max()
    print "Diff:", maxdiff
    assert maxdiff < 1e-3
    return

    prob = raw_out[0][0]
    ret = prob.argsort()[-10:][::-1]
    print ret
    meta = ILSVRCMeta().get_synset_words_1000()
    print[meta[k] for k in ret]
예제 #4
0
def critic_predict_dataflow(ctrl, data, log_dir, model_dir, vs_name):
    """
    Prediction on a dataflow, used for testing a large batch of data
    """
    ckpt = tf.train.latest_checkpoint(model_dir)
    if not ckpt:
        outputs = [0] * len(data[0])
        logger.info("No model exists. Do not sort")
        return outputs
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    ds_val = critic_dataflow_factory(ctrl, data, is_train=False)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]

    session_config = None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count={'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config),
        session_init=SaverRestore(ckpt))

    #with tf.Graph().as_default():
    predictor = SimpleDatasetPredictor(pred_config, ds_val)
    outputs = []
    for o in predictor.get_result():
        outputs.extend(o[0])
    return outputs
예제 #5
0
파일: infer.py 프로젝트: haelkemary/xy_net
    def run(self):
        model_path = self.inf_model_path

        MODEL_MAKER = Model_NP_XY if self.model_mode == 'np+xy' else Model_NP_DIST

        pred_config = PredictConfig(
            model=MODEL_MAKER(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        for norm_target in self.inf_norm_codes:
            norm_dir = '%s/%s/' % (self.inf_norm_root_dir, norm_target)
            norm_save_dir = '%s/%s/' % (self.inf_output_dir, norm_target)

            # TODO: cache list to check later norm dir has same number of files
            file_list = glob.glob('%s/*%s' % (norm_dir, self.inf_imgs_ext))
            file_list.sort()  # ensure same order

            rm_n_mkdir(norm_save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split('.')[0]
                print(basename, norm_target, end=' ', flush=True)

                ##
                img = cv2.imread(norm_dir + filename)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat('%s/%s.mat' % (norm_save_dir, basename),
                            {'result': [pred_map]})
                print('FINISH')
예제 #6
0
파일: run_vinmec.py 프로젝트: tmquan/COVID
def eval(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "logit" in the graph.
    """
    evaluator_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['image', 'label'],
        output_names=['estim']
    )

    stat = BinaryStatistics()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    evaluator = OfflinePredictor(evaluator_config)
    for dp in dataflow:
        image = dp[0]
        label = dp[1]
        estim = evaluator(image, label)[0]
        stat.feed((estim + 0.5).astype(np.int32), label)

    print('_precision: \t{}'.format(stat.precision))
    print('_recall: \t{}'.format(stat.recall))
    print('_f1_score: \t{}'.format(2 * (stat.precision * stat.recall) / (1 * stat.precision + stat.recall)))
    print('_f2_score: \t{}'.format(5 * (stat.precision * stat.recall) / (4 * stat.precision + stat.recall)))
    pass
예제 #7
0
파일: DQN.py 프로젝트: Jothecat/tensorpack
def eval_model_multiprocess(model_path, romfile):
    M = Model()
    cfg = PredictConfig(model=M,
                        input_data_mapping=[0],
                        session_init=SaverRestore(model_path),
                        output_var_names=['fct/output:0'])

    class Worker(ParallelPredictWorker):
        def __init__(self, idx, gpuid, config, outqueue):
            super(Worker, self).__init__(idx, gpuid, config)
            self.outq = outqueue

        def run(self):
            player = AtariPlayer(AtariDriver(romfile, viz=0),
                                 action_repeat=ACTION_REPEAT)
            global NUM_ACTIONS
            NUM_ACTIONS = player.driver.get_num_actions()

            self._init_runtime()

            tot_reward = 0
            que = deque(maxlen=30)
            while True:
                s = player.current_state()
                outputs = self.func([[s]])
                action_value = outputs[0][0]
                act = action_value.argmax()
                #print action_value, act
                if random.random() < 0.01:
                    act = random.choice(range(player.driver.get_num_actions()))
                if len(que) == que.maxlen \
                        and que.count(que[0]) == que.maxlen:
                    act = 1
                que.append(act)
                #print(act)
                reward, isOver = player.action(act)
                tot_reward += reward
                if isOver:
                    self.outq.put(tot_reward)
                    tot_reward = 0

    NR_PROC = min(multiprocessing.cpu_count() // 2, 10)
    procs = []
    q = multiprocessing.Queue()
    for k in range(NR_PROC):
        procs.append(Worker(k, -1, cfg, q))
    ensure_proc_terminate(procs)
    for k in procs:
        k.start()
    stat = StatCounter()
    try:
        EVAL_EPISODE = 50
        for _ in tqdm(range(EVAL_EPISODE)):
            r = q.get()
            stat.feed(r)
    finally:
        logger.info("Average Score: {}. Max Score: {}".format(
            stat.average, stat.max))
예제 #8
0
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=model,
                      session_init=SmartInit(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_{}_proposals/boxes'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'generate_{}_proposals/scores'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'fastrcnn_all_scores',
                          'output/boxes',
                          'output/scores',
                          'output/labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp[
                'gt_labels']

            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(
                    img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_scores[good_proposals_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_scores, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
예제 #9
0
def export(args):
    model = AttentionOCR()
    predcfg = PredictConfig(
        model=model,
        session_init=SmartInit(args.checkpoint_path),
        input_names=model.get_inferene_tensor_names()[0],
        output_names=model.get_inferene_tensor_names()[1])

    ModelExporter(predcfg).export_compact(args.pb_path, optimize=False)
예제 #10
0
def test_checkpoint(args):

    model = AttentionOCR()
    predcfg = PredictConfig(model=model,
                            session_init=SmartInit(args.checkpoint_path),
                            input_names=model.get_inferene_tensor_names()[0],
                            output_names=model.get_inferene_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)
    list_dict = []
    with open("result/model-500000-512.txt", "w") as f:
        ned = 0.
        count = 0
        for filename in os.listdir(args.img_folder)[500:]:
            results = {}
            img_path = os.path.join(args.img_folder, filename)
            print("----> image path: ", img_path)
            name = filename.split('_')[0]
            image = cv2.imread(img_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            height, width = image.shape[:2]
            points = [[0, 0], [width - 1, 0], [width - 1, height - 1],
                      [0, height - 1]]

            image = preprocess(image, points, cfg.image_size)

            before = time.time()
            preds, probs = predictor(np.expand_dims(image, axis=0),
                                     np.ones([1, cfg.seq_len + 1], np.int32),
                                     False, 1.)
            print(preds)
            print(probs)

            after = time.time()
            text, confidence = label2str(preds[0], probs[0], cfg.label_dict)
            print("Text: ", text)
            print("Label: ", name)
            print("confidence: ", confidence)
            print("cal_sim: ", cal_sim(text, name))
            ned += cal_sim(text, name)
            count += 1
            print("-------------------------------")
            f.write("Path: {}".format(img_path))
            f.write("\n")
            f.write("Text: {}".format(text))
            f.write("\n")
            f.write("Label: {}".format(name))
            f.write("\n")
            f.write("Confidence: {}".format(confidence))
            f.write("\n")
            f.write("1-N.E.D: {}".format(cal_sim(text, name)))
            f.write("\n")
            f.write("---------------------------------------------")
            f.write("\n")
        f.write("Total {} Images | Average NED: {}".format(count, ned / count))
예제 #11
0
 def load_model(self):
     print('Loading Model...')
     model_path = self.model_path
     model_constructor = self.get_model()
     pred_config = PredictConfig(model=model_constructor(
         self.nr_types, self.input_shape, self.mask_shape, self.input_norm),
                                 session_init=get_model_loader(model_path),
                                 input_names=self.input_tensor_names,
                                 output_names=self.output_tensor_names)
     self.predictor = OfflinePredictor(pred_config)
    def __init__(self, model):
        config = PredictConfig(
            inputs_desc=[InputDesc(tf.float32, (None, None, 128), 'features'),
                         InputDesc(tf.int32, (None,), 'length')],
            tower_func=_tower_func,
            session_init=SaverRestore(model),
            input_names=['features', 'length'],
            output_names=['prediction'])

        super(FeaturePredictor, self).__init__(config)
예제 #13
0
def assemble_func(config_module, checkpoint_path):
    model = config_module.Model()
    pred_config = PredictConfig(
        model=model,
        input_data_mapping=[0, 1],
        session_init=SaverRestore(checkpoint_path),
        output_var_names=['pred'],
    )
    predict_func = get_predict_func(pred_config)
    return predict_func
예제 #14
0
def play_model(model_path):
    player = get_player(viz=0.01)
    cfg = PredictConfig(
            model=Model(),
            input_data_mapping=[0],
            session_init=SaverRestore(model_path),
            output_var_names=['fct/output:0'])
    predfunc = get_predict_func(cfg)
    while True:
        score = play_one_episode(player, predfunc)
        print("Total:", score)
예제 #15
0
파일: eval.py 프로젝트: hewr1993/nn_expr
def assemble_func(config_module, checkpoint_path):
    model = config_module.Model()
    pred_config = PredictConfig(
        model=model,
        input_var_names=['input'],
        session_init=SaverRestore(checkpoint_path),
        session_config=get_default_sess_config(0.5),
        output_var_names=['score', 'boxes'],
    )
    predict_func = get_predict_func(pred_config)
    return predict_func
예제 #16
0
def eval_model_multithread(model_path):
    cfg = PredictConfig(
            model=Model(),
            input_data_mapping=[0],
            session_init=SaverRestore(model_path),
            output_var_names=['fct/output:0'])
    p = get_player(); del p # set NUM_ACTIONS
    func = get_predict_func(cfg)
    NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
    mean, max = eval_with_funcs([func] * NR_PROC)
    logger.info("Average Score: {}; Max Score: {}".format(mean, max))
예제 #17
0
    def __init__(self, model):
        config = PredictConfig(
            inputs_desc=[InputDesc(tf.float32, (None, 32, 32), 'input')],
            tower_func=_tower_func,
            session_init=SaverRestore(model),
            input_names=['input'],
            # TODO cannot choose max3. Fix this
            output_names=['max3/output', 'labels'])
        # output_names=['max3/output', 'labels'])

        super(CharacterPredictor, self).__init__(config)
예제 #18
0
    def create_predict_config(self, session_init):
        """
        Returns:
            a :class:`PredictConfig` to be used for inference.
            The predictor will take inputs and return probabilities.

        Examples:

            pred = OfflinePredictor(model.create_predict_config(get_model_loader(args.load)))
            prob = pred(NCHW_image)[0]  # Nx1000 probabilities
        """
        return PredictConfig(model=self, input_names=['input'], output_names=['prob'], session_init=session_init)
예제 #19
0
 def inference_config(self, args) -> TrainConfig:
     loss_name = (self.validation_total_cost_var
                  if args.validation is not None else self.total_cost_var)
     min_file = os.path.join(args.save,
                             (f"min-{loss_name}.data-00000-of-00001"))
     model = self.inference_model(args)
     return PredictConfig(
         model=model,
         input_names=[i.name for i in model.inputs()],
         output_names=model.outputs(),
         session_init=SaverRestore(min_file),
     )
예제 #20
0
def critic_predictor(ctrl, model_dir, vs_name):
    """
    Create an OfflinePredictorWithSaver for test-time use.
    """
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]
    session_config = None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count={'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config))
    if model_dir:
        ckpt = tf.train.latest_checkpoint(model_dir)
        logger.info("Loading {} predictor from {}".format(vs_name, ckpt))
        if ckpt:
            pred_config.session_init = SaverRestore(ckpt)
    predictor = OfflinePredictorWithSaver(pred_config)
    return predictor
예제 #21
0
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=['wrong-top1', 'wrong-top5'])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
예제 #22
0
def test(net,
         session_init,
         val_dataflow,
         do_calc_flops=False,
         extended_log=False):
    """
    Main test routine.

    Parameters:
    ----------
    net : obj
        Model.
    session_init : SessionInit
        Session initializer.
    do_calc_flops : bool, default False
        Whether to calculate count of weights.
    extended_log : bool, default False
        Whether to log more precise accuracy values.
    """
    pred_config = PredictConfig(
        model=net,
        session_init=session_init,
        input_names=["input", "label"],
        output_names=["wrong-top1", "wrong-top5"]
    )
    err_top1 = RatioCounter()
    err_top5 = RatioCounter()

    tic = time.time()
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(val_dataflow), device="/gpu:0"))

    for _ in tqdm.trange(val_dataflow.size()):
        err_top1_val, err_top5_val = pred()
        batch_size = err_top1_val.shape[0]
        err_top1.feed(err_top1_val.sum(), batch_size)
        err_top5.feed(err_top5_val.sum(), batch_size)

    err_top1_val = err_top1.ratio
    err_top5_val = err_top5.ratio

    if extended_log:
        logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
            top1=err_top1_val, top5=err_top5_val))
    else:
        logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
            top1=err_top1_val, top5=err_top5_val))
    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic))

    if do_calc_flops:
        calc_flops(model=net)
예제 #23
0
파일: infer.py 프로젝트: okunator/hover_net
    def run(self):

        if self.inf_auto_find_chkpt:
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        for data_dir_set in self.inf_data_list:
            data_root_dir = data_dir_set[0]
            data_out_code = data_dir_set[1]

            for subdir in data_dir_set[2:]:
                data_dir = '%s/%s/' % (data_root_dir, subdir)
                save_dir = '%s/%s/%s' % (self.inf_output_dir, data_out_code,
                                         subdir)

                file_list = glob.glob('%s/*%s' % (data_dir, self.inf_imgs_ext))
                file_list.sort()  # ensure same order

                rm_n_mkdir(save_dir)
                for filename in file_list:
                    filename = os.path.basename(filename)
                    basename = filename.split('.')[0]
                    print(data_dir, basename, end=' ', flush=True)

                    ##
                    img = cv2.imread(data_dir + filename)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                    ##
                    pred_map = self.__gen_prediction(img, predictor)
                    sio.savemat('%s/%s.mat' % (save_dir, basename),
                                {'result': [pred_map]})
                    print('FINISH')
예제 #24
0
    def load_model(self):
        """
        Loads the model and checkpoints according to the model stated in config.py
        """

        print('Loading Model...')
        model_path = self.model_path
        model_constructor = self.get_model()
        pred_config = PredictConfig(model=model_constructor(
            self.nr_types, self.input_shape, self.mask_shape, self.input_norm),
                                    session_init=get_model_loader(model_path),
                                    input_names=self.input_tensor_names,
                                    output_names=self.output_tensor_names)
        self.predictor = OfflinePredictor(pred_config)
예제 #25
0
 def load_model(self):
     """Loads the model and checkpoints"""
     print("Loading Model...")
     model_path = self.model_path
     model_constructor = self.get_model()
     pred_config = PredictConfig(
         model=model_constructor(
             self.nr_types, self.patch_input_shape, self.patch_output_shape, self.input_norm
         ),
         session_init=get_model_loader(model_path),
         input_names=self.input_tensor_names,
         output_names=self.output_tensor_names,
     )
     self.predictor = OfflinePredictor(pred_config)
예제 #26
0
    def run(self, save_only): 
        if self.inf_auto_find_chkpt:
            self.inf_model_path = os.path.join(self.save_dir, str(max([int(x) for x in [name for name in os.listdir(self.save_dir) if os.path.isdir(os.path.join(self.save_dir, name))]])))
            print(f"Inference model path: <{self.inf_model_path}>")
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.inf_model_path, self.inf_auto_metric, self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model        = model_constructor(),
            session_init = get_model_loader(model_path),
            input_names  = self.eval_inf_input_tensor_names,
            output_names = self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)
        
        if save_only:
            exporter = ModelExporter(pred_config)
            rm_n_mkdir(self.model_export_dir)
            print ('{}/compact.pb'.format(self.model_export_dir))
            exporter.export_compact(filename='{}/compact.pb'.format(self.model_export_dir))
            exporter.export_serving(os.path.join(self.model_export_dir, 'serving'), signature_name='serving_default')
            return

        for num, data_dir in enumerate(self.inf_data_list):
            save_dir = os.path.join(self.inf_output_dir, str(num))

            file_list = glob.glob(os.path.join(data_dir, '*{}'.format(self.inf_imgs_ext)))
            file_list.sort() # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split('.')[0]
                print(data_dir, basename, end=' ', flush=True)

                ##
                img = cv2.imread(os.path.join(data_dir, filename))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat(os.path.join(save_dir,'{}.mat'.format(basename)), {'result':[pred_map]})
                print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
예제 #27
0
    def run(self):

        if self.inf_auto_find_chkpt:
            print(
                '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison'
                % (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print("Selecting: %s" % model_path)
            print("Having Following Statistics:")
            for key, value in stat.items():
                print("\t%s: %s" % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=get_model_loader(model_path),
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names,
            create_graph=False,
        )
        predictor = OfflinePredictor(pred_config)

        for data_dir in self.inf_data_list:
            save_dir = self.inf_output_dir + "/raw/"
            file_list = glob.glob("%s/*%s" % (data_dir, self.inf_imgs_ext))
            file_list.sort()  # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                start = time.time()
                filename = os.path.basename(filename)
                basename = filename.split(".")[0]
                print(data_dir, basename, end=" ", flush=True)

                ##
                img = cv2.imread(data_dir + filename)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                pred_map = self.__gen_prediction(img, predictor)

                np.save("%s/%s.npy" % (save_dir, basename), [pred_map])
                end = time.time()
                diff = str(round(end - start, 2))
                print("FINISH. TIME: %s" % diff)
예제 #28
0
def init_predictor():
    register_coco(cfg.DATA.BASEDIR)
    MODEL = ResNetFPNModel()
    finalize_configs(is_training=False)

    predcfg = PredictConfig(
        model=MODEL,
        #session_init=SmartInit("/home/jetson/Documents/trained_model/500000_17/checkpoint"),
        session_init=SmartInit(
            "/home/jetson/Documents/trained_model/255000_04.01/checkpoint"),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)

    return predictor
예제 #29
0
def eval_on_ILSVRC12(model, scale, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=[
                                    'wrong-scale%03d-top1' % scale,
                                    'wrong-scale%03d-top5' % scale
                                ])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print('Top1/Top5 Acc: %.1f/%.1f' %
          (100 - 100 * acc1.ratio, 100 - 100 * acc5.ratio))
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=['wrong-top1',
                                              'wrong-top5'])  # 该函数用于构件图
    pred = SimpleDatasetPredictor(
        pred_config,
        dataflow)  # Simply create one predictor and run it on the DataFlow.
    acc1, acc5 = RatioCounter(), RatioCounter(
    )  #  A counter to count ratio of something.某事物的记录
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))  # 输出误差