示例#1
0
def validate(net, val_dataloader, epoch, save_change_map_dir, save_roc_dir):

    net.eval()
    cont_conv5_total, cont_fc_total, cont_embedding_total, num = 0.0, 0.0, 0.0, 0.0
    metric_for_conditions = util.init_metric_for_class_for_cmu(1)
    for batch_idx, batch in enumerate(val_dataloader):
        inputs1, input2, targets, filename, height, width = batch
        height, width, filename = height.numpy()[0], width.numpy(
        )[0], filename[0]
        inputs1, input2, targets = inputs1.cuda(), input2.cuda(), targets.cuda(
        )
        inputs1, inputs2, targets = Variable(inputs1, volatile=True), Variable(
            input2, volatile=True), Variable(targets)
        out_conv5, out_fc, out_embedding = net(inputs1, inputs2)
        out_conv5_t0, out_conv5_t1 = out_conv5
        out_fc_t0, out_fc_t1 = out_fc
        out_embedding_t0, out_embedding_t1 = out_embedding
        conv5_distance_map = single_layer_similar_heatmap_visual(
            out_conv5_t0, out_conv5_t1, save_change_map_dir, epoch, filename,
            'conv5', 'l2')
        fc_distance_map = single_layer_similar_heatmap_visual(
            out_fc_t0, out_fc_t1, save_change_map_dir, epoch, filename, 'fc',
            'l2')
        embedding_distance_map = single_layer_similar_heatmap_visual(
            out_embedding_t0, out_embedding_t1, save_change_map_dir, epoch,
            filename, 'embedding', 'l2')
        cont_conv5 = mc.RMS_Contrast(conv5_distance_map)
        cont_fc = mc.RMS_Contrast(fc_distance_map)
        cont_embedding = mc.RMS_Contrast(embedding_distance_map)
        cont_conv5_total += cont_conv5
        cont_fc_total += cont_fc
        cont_embedding_total += cont_embedding
        num += 1
        prob_change = embedding_distance_map[0][0]
        gt = targets.data.cpu().numpy()
        FN, FP, posNum, negNum = mc.eval_image_rewrite(gt[0],
                                                       prob_change,
                                                       cl_index=1)
        metric_for_conditions[0]['total_fp'] += FP
        metric_for_conditions[0]['total_fn'] += FN
        metric_for_conditions[0]['total_posnum'] += posNum
        metric_for_conditions[0]['total_negnum'] += negNum
        cont_conv5_mean, cont_fc_mean,cont_embedding_mean = cont_conv5_total/num, \
                                                                            cont_fc_total/num,cont_embedding_total/num

    thresh = np.array(range(0, 256)) / 255.0
    conds = metric_for_conditions.keys()
    for cond_name in conds:
        total_posnum = metric_for_conditions[cond_name]['total_posnum']
        total_negnum = metric_for_conditions[cond_name]['total_negnum']
        total_fn = metric_for_conditions[cond_name]['total_fn']
        total_fp = metric_for_conditions[cond_name]['total_fp']
        metric_dict = mc.pxEval_maximizeFMeasure(total_posnum,
                                                 total_negnum,
                                                 total_fn,
                                                 total_fp,
                                                 thresh=thresh)
        metric_for_conditions[cond_name].setdefault('metric', metric_dict)
        metric_for_conditions[cond_name].setdefault('contrast_conv5',
                                                    cont_conv5_mean)
        metric_for_conditions[cond_name].setdefault('contrast_fc',
                                                    cont_fc_mean)
        metric_for_conditions[cond_name].setdefault('contrast_embedding',
                                                    cont_embedding_mean)

    f_score_total = 0.0
    for cond_name in conds:
        pr, recall, f_score = metric_for_conditions[cond_name]['metric'][
            'precision'], metric_for_conditions[cond_name]['metric'][
                'recall'], metric_for_conditions[cond_name]['metric']['MaxF']
        roc_save_epoch_dir = os.path.join(save_roc_dir, str(epoch))
        check_dir(roc_save_epoch_dir)
        roc_save_epoch_cat_dir = os.path.join(roc_save_epoch_dir)
        check_dir(roc_save_epoch_cat_dir)
        mc.save_PTZ_metric2disk(metric_for_conditions[cond_name],
                                roc_save_epoch_cat_dir)
        roc_save_dir = os.path.join(roc_save_epoch_cat_dir,
                                    '_' + str(cond_name) + '_roc.png')
        mc.plotPrecisionRecall(pr, recall, roc_save_dir, benchmark_pr=None)
        f_score_total += f_score

    print(f_score_total / (len(conds)))
    return f_score_total / len(conds)
示例#2
0
def validate(net, val_dataloader, epoch, save_valid_imags_dir,
             save_spatial_att_dir, save_roc_dir):

    net.eval()
    gts, preds = [], []
    metric_for_class = util.init_metric_for_class_for_cd2014(number_class='2')
    for batch_idx, batch in enumerate(val_dataloader):

        inputs1, input2, targets, filename, height, width = batch
        height, width, filename = height.numpy()[0], width.numpy(
        )[0], filename[0]
        root_name, sub_name, real_name = filename[:filename.find(
            '/')], filename[filename.find('/') + 1:filename.rfind('/') -
                            3], filename[filename.rfind('/') + 1:]
        inputs1, input2, targets = inputs1.cuda(), input2.cuda(), targets.cuda(
        )
        inputs1, inputs2, targets = Variable(inputs1, volatile=True), Variable(
            input2, volatile=True), Variable(targets)
        seg_out, output_conv5, output_fc = net(inputs1, inputs2)
        prob_seg = F.softmax(seg_out).data.cpu().numpy()[0]
        #prob = prob_seg.data.cpu().numpy()[0]
        #interp = nn.Upsample(size=(height, width), mode='bilinear')
        #### similar_distance_map ####
        out_conv5_t0, out_conv5_t1 = output_conv5
        out_fc_t0, out_fc_t1 = output_fc
        single_layer_similar_heatmap_visual(out_conv5_t0, out_conv5_t1,
                                            save_spatial_att_dir, epoch,
                                            filename, 'conv5')
        single_layer_similar_heatmap_visual(out_fc_t0, out_fc_t1,
                                            save_spatial_att_dir, epoch,
                                            filename, 'fc')
        #### seg prediction ###
        seg_pred = np.squeeze(seg_out.data.max(1)[1].cpu().numpy(), axis=0)
        pred_rgb = dates.decode_segmap(seg_pred, plot=False)[:, :, ::-1]
        pred_rgb_rescal = cv2.resize(pred_rgb, (width, height))
        preds.append(seg_pred)
        gt = targets.data.cpu().numpy()
        for gt_ in gt:
            gts.append(gt_)
        for i in range(len(prob_seg)):
            prob_cl = prob_seg[i]
            FN, FP, posNum, negNum = mc.eval_image_rewrite(gt[0], prob_cl, i)
            metric_for_class[root_name][i]['total_fp'] += FP
            metric_for_class[root_name][i]['total_fn'] += FN
            metric_for_class[root_name][i]['total_posnum'] += posNum
            metric_for_class[root_name][i]['total_negnum'] += negNum

        save_valid_dir = os.path.join(save_valid_imags_dir,
                                      'epoch_' + str(epoch))
        check_dir(save_valid_dir)
        root_dir, sub_dir = os.path.join(save_valid_dir,
                                         root_name), os.path.join(
                                             save_valid_dir, root_name,
                                             sub_name)
        check_dir(root_dir), check_dir(sub_dir)
        save_fig_dir = os.path.join(sub_dir,
                                    real_name[:real_name.find('.')] + '.jpg')
        cv2.imwrite(save_fig_dir, pred_rgb_rescal)

    thresh = np.array(range(0, 256)) / 255.0
    conds = metric_for_class.keys()
    for cond_name in conds:
        for i in range(2):
            total_posnum = metric_for_class[cond_name][i]['total_posnum']
            total_negnum = metric_for_class[cond_name][i]['total_negnum']
            total_fn = metric_for_class[cond_name][i]['total_fn']
            total_fp = metric_for_class[cond_name][i]['total_fp']
            metric_dict = mc.pxEval_maximizeFMeasure(total_posnum,
                                                     total_negnum,
                                                     total_fn,
                                                     total_fp,
                                                     thresh=thresh)
            metric_for_class[cond_name][i].setdefault('metric', metric_dict)
    '''''' '''
    for i in range(2):
        total_posnum = metric_for_class[i]['total_posnum']
        total_negnum = metric_for_class[i]['total_negnum']
        total_fn = metric_for_class[i]['total_fn']
        total_fp = metric_for_class[i]['total_fp']
        metric_dict = mc.pxEval_maximizeFMeasure(total_posnum, total_negnum,
                                                 total_fn, total_fp, thresh=thresh)
        metric_for_class[i].setdefault('metric', metric_dict)
    ''' ''''''
    f_score_total = 0.0
    for cond_name in conds:
        for i in range(2):
            pr, recall, f_score = metric_for_class[cond_name][i]['metric'][
                'precision'], metric_for_class[cond_name][i]['metric'][
                    'recall'], metric_for_class[cond_name][i]['metric']['MaxF']
            roc_save_epoch_dir = os.path.join(save_roc_dir, str(epoch))
            check_dir(roc_save_epoch_dir)
            roc_save_epoch_cat_dir = os.path.join(roc_save_epoch_dir,
                                                  cond_name)
            check_dir(roc_save_epoch_cat_dir)
            mc.save_metric2disk(metric_for_class[cond_name],
                                roc_save_epoch_cat_dir)
            roc_save_dir = os.path.join(
                roc_save_epoch_cat_dir,
                '_' + metric_for_class[cond_name][i]['name'] + '_roc.png')
            mc.plotPrecisionRecall(pr, recall, roc_save_dir, benchmark_pr=None)
            f_score_total += f_score
    '''''' '''
    for i in range(2):
        pr, recall = metric_for_class[i]['metric']['precision'], metric_for_class[i]['metric']['recall']
        roc_save_epoch_dir = os.path.join(save_roc_dir,str(epoch))
        check_dir(roc_save_epoch_dir)
        roc_save_dir = os.path.join(roc_save_epoch_dir,
                                    '_' + metric_for_class[i]['name'] + '_roc.png')
        mc.plotPrecisionRecall(pr, recall, roc_save_dir, benchmark_pr=None)
    ''' ''''''
    score, class_iou = metric.scores(gts, preds, n_class=2)
    for k, v in score.items():
        print k, v

    for i in range(2):
        print i, class_iou[i]

    print f_score_total / (2 * len(conds))
    return score['Mean IoU :']