Пример #1
0
def critic_predict_dataflow(ctrl, data, log_dir, model_dir, vs_name):
    """
    Prediction on a dataflow, used for testing a large batch of data
    """
    ckpt = tf.train.latest_checkpoint(model_dir)
    if not ckpt:
        outputs = [0] * len(data[0])
        logger.info("No model exists. Do not sort")
        return outputs
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    ds_val = critic_dataflow_factory(ctrl, data, is_train=False)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]

    session_config = None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count={'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config),
        session_init=SaverRestore(ckpt))

    #with tf.Graph().as_default():
    predictor = SimpleDatasetPredictor(pred_config, ds_val)
    outputs = []
    for o in predictor.get_result():
        outputs.extend(o[0])
    return outputs
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=[
                                    'wrong-top1', 'wrong-top5', 'res-top5',
                                    'label', 'logits'
                                ])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    top5s = []
    labels = []
    logits = []
    for top1, top5, pred, label, logit in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
        top5s.extend(pred.tolist())
        labels.extend(label.tolist())
        logits.extend(logit.tolist())
    with open("top5_resnet2x.json", "w") as f:
        json.dump(top5s, f)

    with open("labels_resnet2x.json", "w") as f:
        json.dump(labels, f)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
    return acc1.ratio, acc5.ratio
Пример #3
0
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=['wrong-top1', 'wrong-top5'])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
Пример #4
0
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
Пример #5
0
def eval_on_ILSVRC12(model, scale, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=[
                                    'wrong-scale%03d-top1' % scale,
                                    'wrong-scale%03d-top5' % scale
                                ])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print('Top1/Top5 Acc: %.1f/%.1f' %
          (100 - 100 * acc1.ratio, 100 - 100 * acc5.ratio))
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=['wrong-top1',
                                              'wrong-top5'])  # 该函数用于构件图
    pred = SimpleDatasetPredictor(
        pred_config,
        dataflow)  # Simply create one predictor and run it on the DataFlow.
    acc1, acc5 = RatioCounter(), RatioCounter(
    )  #  A counter to count ratio of something.某事物的记录
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))  # 输出误差
Пример #7
0
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5', 'attack_success'])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5, succ = RatioCounter(), RatioCounter(), RatioCounter()
    for top1, top5, num_succ in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
        succ.feed(num_succ.sum(), batch_size)
        # Uncomment to monitor the metrics during evaluation
        # print("Top1 Error: {}".format(acc1.ratio))
        # print("Attack Success Rate: {}".format(succ.ratio))
    print("Top1 Error: {}".format(acc1.ratio))
    print("Attack Success Rate: {}".format(succ.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
def eval_on_iNaturalist(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['label', 'logits', 'wrong-top1', 'wrong-top3'])
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc3 = RatioCounter(), RatioCounter()
    for label, logits, top1, top3 in pred.get_result():
        # from IPython import embed
        # embed()
        # pred_logits = logits[0]
        # pred_logits = pred_logits.argsort()[-3:][::-1].tolist()
        # print(pred_logits, label)

        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc3.feed(top3.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top3 Error: {}".format(acc3.ratio))
Пример #9
0
def eval_on_iNaturalist(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['label','logits', 'wrong-top1', 'wrong-top3']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc3 = RatioCounter(), RatioCounter()
    for label, logits, top1, top3 in pred.get_result():
        # from IPython import embed
        # embed()
        # pred_logits = logits[0]
        # pred_logits = pred_logits.argsort()[-3:][::-1].tolist()
        # print(pred_logits, label)

        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc3.feed(top3.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top3 Error: {}".format(acc3.ratio))
Пример #10
0
def feature_extraction_on_AVA2012(model, sessinit, dataflow, data_format,
                                  output_name, batch_size_times, feature_dir):
    # set the configuration during the prediction process
    # and apply the SimpleDatasetPredictor to extract the output_name
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        # NOTE: the names in input_names & output_names depends on the definitions in the loaded model
        input_names=['input', 'label'],
        output_names=[output_name])
    pred = SimpleDatasetPredictor(pred_config, dataflow)

    # BEGIN
    cnt = 0
    rep_dict = {output_name: []}
    for outp in pred.get_result():
        #
        rep = outp[0]
        if (len(rep.shape) == 4) and (data_format == 'NCHW'):
            rep = np.transpose(rep, (0, 2, 3, 1))
        rep_dict[output_name].append(rep)

        #
        cnt += 1
        if cnt >= batch_size_times:
            rep_dict[output_name] = np.concatenate(rep_dict[output_name],
                                                   axis=0)
            print('    early stop for getting enough reps')
            break

    #
    with open(
            "{0}/reps_{1}.pkl".format(feature_dir,
                                      output_name.replace('/', '-')),
            'wb') as output_stream:
        print('--> save {} to a local pkl file ...'.format(output_name))
        print('    shape : ', end='')
        print(rep_dict[output_name].shape)
        pickle.dump(rep_dict, output_stream, protocol=2)
Пример #11
0
def test_on_iNaturalist(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['label', 'logits']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    with open('./result_val.csv', 'w') as f:
        writer = csv.writer(f)
        writer.writerow(['id', 'predicted'])
        for label, logits, in pred.get_result():
            # from IPython import embed
            # embed()
            pred_logits = logits[0]
            pred_logits = pred_logits.argsort()[-3:][::-1].tolist()
            #print(label.tolist(), pred_logits)
            row = []
            # from IPython import embed
            # embed()
            row.append(label[0])
            row.append(str(pred_logits[0]) + ' ' + str(pred_logits[1]) + ' ' + str(pred_logits[2]))
            print(row)
            writer.writerow(row)
Пример #12
0
                    choices=['label', 'label-prob', 'raw'])
parser.add_argument('--top', default=1, type=int)
args = parser.parse_args()

get_config_func = imp.load_source('config_script', args.config).get_config

# TODO not sure if it this script is still working

with tf.Graph().as_default() as G:
    train_config = get_config_func()
    config = PredictConfig(
        inputs=train_config.inputs,
        input_dataset_mapping=[train_config.inputs[0]],  # assume first component is image
        get_model_func=train_config.get_model_func,
        session_init=sessinit.SaverRestore(args.model),
        output_var_names=['output:0']
    )

    ds = ImageFromFile(args.images, 3, resize=(227, 227))
    ds = BatchData(ds, 128, remainder=True)
    predictor = SimpleDatasetPredictor(config, ds)
    res = predictor.get_all_result()

    if args.output_type == 'label':
        for r in res:
            print r[0].argsort(axis=1)[:,-args.top:][:,::-1]
    elif args.output_type == 'label_prob':
        raise NotImplementedError
    elif args.output_type == 'raw':
        print res
Пример #13
0
def viz_CAM(model,
            sessinit,
            name,
            dataflow,
            CAM_dir,
            save_PKL=False,
            save_REP=False):
    # set the configuration during the prediction process
    # and apply the SimpleDatasetPredictor to extract the output_names
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        # NOTE: the names in input_names & output_names depends on the definitions in the loaded model
        input_names=['input', 'label'],
        output_names=[
            'wrong-top1', 'group3/block1/ReLU_output', 'linear_C2/W'
        ],
        return_input=True)
    pred = SimpleDatasetPredictor(pred_config, dataflow)

    # create or clear CAM_dir for the output of results of CAM visualization
    CAM_dir = '{}{}'.format(CAM_dir, name)
    if os.path.isdir(CAM_dir):
        print('--> clear the existing results in the directory {}'.format(
            CAM_dir))
        os.system('rm -r {}'.format(CAM_dir))
    os.system('mkdir -p {}'.format(CAM_dir))

    # for the sake of the ease of file government, we save
    # jpgs, pkls and reps into three different directories
    print('--> during the viz_CAM, we will generate the jpgs', end='')
    os.system('mkdir -p {}'.format(CAM_dir + '/jpg'))
    if save_PKL:
        print(', pkl', end='')
        os.system('mkdir -p {}'.format(CAM_dir + '/pkl'))
    if save_REP:
        print(', rep', end='')
        os.system('mkdir -p {}'.format(CAM_dir + '/rep'))
    print(' files for furthre usage')

    # get the img_lab_list for proper formation of result recording
    img_lab_list = dataset.AVA2012Meta().get_image_list(name)[0]

    # BEGIN
    cnt = 0
    for inp, outp in pred.get_result():
        #
        images, labels = inp
        wrongs, convmaps, W = outp
        batch = wrongs.shape[0]

        #
        for i in range(batch):
            convmap = convmaps[i, :, :, :]  # 512 x 7 x 7
            weight0 = W[:, 0].T  # 512 x 1 for negative
            mergedmap0_7x7 = np.matmul(weight0, convmap.reshape(
                (512, -1))).reshape(7, 7)
            mergedmap0 = cv2.resize(mergedmap0_7x7, (224, 224))
            heatmap0 = viz.intensity_to_rgb(mergedmap0)
            blend0 = images[i] * 0.5 + heatmap0 * 0.5

            weight1 = W[:, 1].T  # 512 x 1 for positive
            mergedmap1_7x7 = np.matmul(weight1, convmap.reshape(
                (512, -1))).reshape(7, 7)
            mergedmap1 = cv2.resize(mergedmap1_7x7, (224, 224))
            heatmap1 = viz.intensity_to_rgb(mergedmap1)
            blend1 = images[i] * 0.5 + heatmap1 * 0.5

            concat = np.concatenate(
                (images[i], heatmap0, blend0, heatmap1, blend1), axis=1)

            imgName, lab01 = img_lab_list[cnt]
            assert lab01 == labels[i], \
                '*** in viz_CAM: lab01 ({0}) != labels[i] ({1}) in image {2}'.format(lab01, labels[i], imgName)

            # save image of CAM visualization
            cv2.imwrite('{0}/jpg/cam_{1}_{2}_{3}.jpg'.format(CAM_dir, os.path.splitext(imgName)[0], \
                lab01, int(wrongs[i])), concat)
            # add @20171123: for CAMCrop
            if save_PKL:
                with open(
                        '{0}/pkl/{1}.pkl'.format(CAM_dir,
                                                 os.path.splitext(imgName)[0]),
                        'wb') as output_stream:
                    pickle.dump(
                        {
                            "GT01": lab01,
                            "CAM0": mergedmap0_7x7,
                            "CAM1": mergedmap1_7x7
                        }, output_stream)

            if save_REP:
                with open(
                        '{0}/rep/{1}.rep'.format(CAM_dir,
                                                 os.path.splitext(imgName)[0]),
                        'wb') as output_stream:
                    pickle.dump({
                        "convmap": convmap,
                        "W": W,
                        "GT01": lab01
                    }, output_stream)

            cnt += 1

    #
    print(
        '=== Finish CAM_viz on all the images in the validation dataset in AVA2012'
    )
Пример #14
0
def eval_on_AVA2012(model,
                    sessinit,
                    dataflow,
                    repeat_times,
                    fusion_method='GlobalAverage',
                    output_predictions=True):
    pred_config = PredictConfig(model=model,
                                session_init=sessinit,
                                input_names=['input', 'label'],
                                output_names=['softmax-logits', 'label'])

    # add @ 20171127
    def DeMaxMin_GlobalAverage(dps):
        res01 = []

        for cls in range(2):
            s = dps[:, cls]
            s_sum_denoise = np.sum(s) - np.max(s) - np.min(s)
            res01.append(s_sum_denoise / (s.shape[0] - 2))

        return res01

    # add @ 20171127
    def Median(dps):
        res01 = []

        for cls in range(2):
            res01.append(np.median(dps[:, cls]))

        return res01

    #
    def accuracyEstimation(log01_, GTs01_):
        batch_size = log01_.shape[0]
        acc01_ = np.zeros((int(batch_size // repeat_times), ))
        y_True_Pred = np.zeros((int(batch_size // repeat_times), 2))
        avg_p = np.zeros((int(batch_size // repeat_times), log01_.shape[1]))

        #
        for i in range(acc01_.shape[0]):
            # change @ 20171127 : refine the fusion approaches
            if fusion_method == 'GlobalAverage':
                avgLog01__ = np.average(
                    log01_[(i * repeat_times):((i + 1) * repeat_times), :],
                    axis=0)
            elif fusion_method == 'DeMaxMin_Average':
                assert log01_.shape[0] >= 3, '***  ***'
                avgLog01__ = DeMaxMin_GlobalAverage(
                    log01_[(i * repeat_times):((i + 1) * repeat_times), :])
            elif fusion_method == 'Median':
                avgLog01__ = Median(
                    log01_[(i * repeat_times):((i + 1) * repeat_times), :])

            pred01__ = 0 if avgLog01__[0] > avgLog01__[
                1] else 1  # TODO: confidence gap? or what if aesthetic_level > 2
            # GTs01_ vs pred01__
            acc01_[i] = int(pred01__ == GTs01_[(i * repeat_times)])
            # add @ 20171122
            y_True_Pred[i, 0] = GTs01_[(i * repeat_times)]
            y_True_Pred[i, 1] = pred01__
            # add @ 20171201
            avg_p[i, :] = avgLog01__[:]

        return acc01_, y_True_Pred, avg_p

    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1 = RatioCounter()
    y_True_Pred_list = []
    interval_results = []

    # add @ 20180709: to estimate the time consumption
    elapsed_times = []
    for pred_res in pred.get_result():
        # for each image, we perform the prediction pipeline for repeat_times
        # therefore, ...
        logs01 = pred_res[0]
        GTs01 = pred_res[1]
        batch_size = logs01.shape[0]
        assert batch_size % repeat_times == 0, \
            '*** batch_size % repeat_times != 0, which makes the accuracyEstimation difficult ***'

        start_time = time.time()
        # change @ 20171122
        # change @ 20171201
        acc01, y_True_Pred_, avg_p = accuracyEstimation(logs01, GTs01)
        elapsed_times.append(time.time() - start_time)

        y_True_Pred_list.append(y_True_Pred_)

        acc1.feed(acc01.sum(), acc01.shape[0])

        # add @ 20171201
        interval_results.append(np.hstack((y_True_Pred_, avg_p)))

    # performance exhibition
    print("--> detailed performance exhibition")
    print("    Top1 Accuracy: {}".format(acc1.ratio))

    # add @ 20171122
    y_True_Pred_Matrix = np.vstack(y_True_Pred_list)
    conf_matrix = confusion_matrix(y_True_Pred_Matrix[:, 0],
                                   y_True_Pred_Matrix[:, 1])
    print("    Confusion matrix is:")
    print(conf_matrix)
    print("        Accuracy of Negative Prediction: ",
          (conf_matrix[0, 0]) / (conf_matrix[0, 0] + conf_matrix[1, 0]))
    print("        Accuracy of Positive Prediction: ",
          (conf_matrix[1, 1]) / (conf_matrix[0, 1] + conf_matrix[1, 1]))
    print("        Recall of Negative Instances   : ",
          (conf_matrix[0, 0]) / (conf_matrix[0, 0] + conf_matrix[0, 1]))
    print("        Recall of Positive Instances   : ",
          (conf_matrix[1, 1]) / (conf_matrix[1, 0] + conf_matrix[1, 1]))

    # add @ 20171201
    if output_predictions:
        print(
            '    and save interval_results to ./interval_results_AVA2012.pkl for further investigation ...'
        )
        with open('./interval_results_AVA2012.pkl', 'wb') as output_stream:
            pickle.dump({'interval_results': interval_results}, output_stream)

    # add @ 20180709: exhibit the information of time consumption
    print('--> average time consumption per image : {0:.3f}ms'.format( \
          1000 * np.sum(elapsed_times) / y_True_Pred_Matrix.shape[0]))
Пример #15
0
                    choices=['label', 'label-prob', 'raw'])
parser.add_argument('--top', default=1, type=int)
args = parser.parse_args()

get_config_func = imp.load_source('config_script', args.config).get_config

# TODO not sure if it this script is still working

with tf.Graph().as_default() as G:
    train_config = get_config_func()
    config = PredictConfig(
        inputs=train_config.inputs,
        input_dataset_mapping=[train_config.inputs[0]
                               ],  # assume first component is image
        get_model_func=train_config.get_model_func,
        session_init=sessinit.SaverRestore(args.model),
        output_var_names=['output:0'])

    ds = ImageFromFile(args.images, 3, resize=(227, 227))
    ds = BatchData(ds, 128, remainder=True)
    predictor = SimpleDatasetPredictor(config, ds)
    res = predictor.get_all_result()

    if args.output_type == 'label':
        for r in res:
            print r[0].argsort(axis=1)[:, -args.top:][:, ::-1]
    elif args.output_type == 'label_prob':
        raise NotImplementedError
    elif args.output_type == 'raw':
        print res