示例#1
0
def experimenter(dataset, clfs, splits, n_experiments):
    print("### Dataset: ", dataset[0], "###")
    write_in_file("execution_time", f"{dataset[0]}\n")
    dataset[1].download()
    #results = []
    print("Performing Experiments.")
    for i in range(1, n_experiments + 1):
        results = []
        print("## Experiment N.", i, "##")
        for folds in splits:
            for clf in clfs:
                fold_number = 1
                print(folds[0], clf[0])
                write_in_file("execution_time", f"{folds[0]} - {clf[0]}\n")
                for X_train, y_train, X_test, y_test in getattr(
                        dataset[1], folds[1])():
                    write_in_file("execution_time", f"{fold_number}: ")
                    print("fold_number: ", fold_number)
                    y_pred, y_proba = run_train_test(clf[1], X_train, y_train,
                                                     X_test)
                    results.append([
                        dataset[0], folds[0], clf[0], fold_number, y_test,
                        y_pred, y_proba
                    ])
                    fold_number = fold_number + 1
        saved_results = persist_results.save_results(results)
        metrics.scores(saved_results)
示例#2
0
def val_e_sigmoid(model, valoader, nclass=2, nogpu=False):
    model.eval()
    gts, preds, gts_e = [], [], []
    for img_id, (img, gt_mask, _, gt_emask, _) in enumerate(valoader):
        # print(gt_mask.size(), gt_emask.size())
        gt_mask = gt_mask.numpy()[0]
        gt_emask = gt_emask.numpy()[0]
        if nogpu:
            img = Variable(img, volatile=True)
        else:
            img = Variable(img.cuda(), volatile=True)
        out_pred_map = model(img)
        out_pred_map = nn.Sigmoid()(out_pred_map)
        # print("output: ", out_pred_map.size())
        # Get hard prediction
        if nogpu:
            soft_pred = out_pred_map.data.numpy()[0]
        else:
            soft_pred = out_pred_map.data.cpu().numpy()[0]

        soft_pred = soft_pred[:, :gt_mask.shape[0], :gt_mask.shape[1]]
        soft_pred[soft_pred >= 0.5] = 1
        soft_pred[soft_pred < 0.5] = 0
        hard_pred = soft_pred[0].astype(np.uint8)
        # print(hard_pred.shape, gt_mask.shape)
        # hard_pred = np.argmax(soft_pred,axis=0).astype(np.uint8)
        for gt_, gte_, pred_ in zip(gt_mask, gt_emask, hard_pred):
            gts.append(gt_)
            gts_e.append(gte_)
            preds.append(pred_)
    miou, _ = scores(gts, preds, n_class=nclass)
    eiou, _ = scores(gts_e, preds, n_class=nclass)
    return miou, eiou
def val_e(model, valoader, nclass=2, nogpu=False, Centroids=False):
    model.eval()
    gts, preds, gts_e = [], [], []
    for img_id, (img, gt_mask, _, gt_emask, name) in enumerate(valoader):
        # print(gt_mask.size(), gt_emask.size())
        # print("val: ", name)
        gt_mask = gt_mask.numpy()[0]
        gt_emask = gt_emask.numpy()[0]
        with torch.no_grad():
            if nogpu:
                img = Variable(img)
            else:
                img = Variable(img.cuda())
            if Centroids:
                out_pred_map, _ = model(img)
            else:
                out_pred_map = model(img)
            if nogpu:
                soft_pred = out_pred_map.data.numpy()[0]
            else:
                soft_pred = out_pred_map.data.cpu().numpy()[0]

            # soft_pred = soft_pred[:,:gt_mask.shape[0],:gt_mask.shape[1]]
            # print(gt_mask.shape, soft_pred.shape)
            # print(soft_pred.shape)
            hard_pred = np.argmax(soft_pred, axis=0).astype(np.uint8)
            for gt_, gte_, pred_ in zip(gt_mask, gt_emask, hard_pred):
                gts.append(gt_)
                gts_e.append(gte_)
                preds.append(pred_)
    miou, _ = scores(gts, preds, n_class=nclass)
    eiou, _ = scores(gts_e, preds, n_class=nclass)
    return miou, eiou
示例#4
0
def validate(model, valloader, n_class):
    import pdb;pdb.set_trace()
    losses = AverageMeter()
    model.eval()
    gts, preds = [], []
    for i, (images, labels) in enumerate(valloader):
        images = Variable(images.cuda())
        labels = Variable(labels.cuda())
        outputs = model(images)
    if(isinstance(outputs, tuple)):
        outputs = outputs[0]

        loss = cross_entropy2d(outputs, labels)
        losses.update(loss.data[0], images.size(0))

        gt = labels.data.cpu().numpy()
        pred = outputs.data.max(1)[1].cpu().numpy()
        #pred = outputs.data[:,1:,:,:].max(1)[1].cpu().numpy() + 1

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)
    score = scores(gts, preds, n_class=n_class)

    return losses.avg, score
def val(model, valoader, nclass=21, nogpu=False):
    model.eval()
    gts, preds = [], []
    for img_id, (img, gt_mask, _) in tqdm(enumerate(valoader)):
        gt_mask = gt_mask.numpy()[0]
        if nogpu:
            img = Variable(img, volatile=True)
        else:
            img = Variable(img.cuda(), volatile=True)
        out_pred_map = model(img)

        # Get hard prediction
        if nogpu:
            soft_pred = out_pred_map.data.numpy()[0]
        else:
            soft_pred = out_pred_map.data.cpu().numpy()[0]

        soft_pred = soft_pred[:, :gt_mask.shape[0], :gt_mask.shape[1]]
        hard_pred = np.argmax(soft_pred, axis=0).astype(np.uint8)
        for gt_, pred_ in zip(gt_mask, hard_pred):
            gts.append(gt_)
            preds.append(pred_)
    miou, _ = scores(gts, preds, n_class=nclass)
    model.train()

    return miou
示例#6
0
def test_large_img(args):
    # Setup Model
    #model = torch.load(args.model_path,map_location=lambda storage,loc: storage)
    #model = torch.load(args.model_path)
    #load model from model files(mode train on DataParallel)
    model = get_model(args.model_path.split('/')[-2], 5)
    state_dict = torch.load(args.model_path).state_dict()
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        name = k[7:]  #remove moudle
        new_state_dict[name] = v
    model.load_state_dict(new_state_dict)
    model = DataParallel(model.cuda(),
                         device_ids=[i for i in range(len(args.gpu))])

    model.cuda()
    model.eval()

    pred_labels_list = []
    for crop_scale in args.crop_scales:
        pred_labels_single = process_single_scale(args, model, crop_scale)
        pred_labels_list.append(pred_labels_single)
        color_mask = segmap(pred_labels_single)
        test_id = os.path.basename(args.img_path)[0]
        misc.imsave(
            os.path.join(args.tempdir,
                         "%s_temp_scale_%d.png" % (test_id, crop_scale)),
            color_mask)

    if (len(args.crop_scales) == 1):
        pred_labels = pred_labels_list[0]
    else:
        average_map = np.zeros_like(pred_labels_list[0])
        for i in range(average_map.shape[0]):
            for j in range(average_map.shape[1]):
                pre_list = []
                for index in range(len(args.crop_scales)):
                    pre_list.append(pred_labels_list[index][i][j])
                most_label = np.argmax(np.bincount(pre_list))
                average_map[i][j] = most_label
        pred_labels = average_map

    misc.imsave(args.out_path, np.asarray(pred_labels, dtype=np.uint8))
    color_mask = segmap(pred_labels)
    misc.imsave(args.vis_out_path, color_mask)

    if (args.img_label_path != None):
        gts = misc.imread(args.img_label_path)
        score, class_iou = scores(gts, pred_labels, n_class=n_classes)

        for k, v in score.items():
            print k, v

        for i in range(n_classes):
            print i, class_iou[i]
def val(model,valoader,epoch, nclass=2,use_cuda=True):
    model.eval()
    gts, preds = [], []
    for img_id, (img,gt_mask,_, new_id) in enumerate(valoader):
        # print(img.shape)
        # print(gt_mask.shape)
        # save_picture(img[0], slice_id, mask=False, generate=False)
        gt_mask = gt_mask.numpy()[0]
        # print(f'gt_mask.shape:{gt_mask.shape}')
        if use_cuda:
            img = img.cuda()
        else:
            img = img
        out_pred_map = model(img)
        # Get hard prediction
        if use_cuda:
            soft_pred = out_pred_map.data.cpu().numpy()[0]
            # print(soft_pred.shape)
        else:
            soft_pred = out_pred_map.data.numpy()[0]

        soft_pred = soft_pred[:,:gt_mask.shape[0],:gt_mask.shape[1]]
        hard_pred = np.argmax(soft_pred,axis=0).astype(np.uint8)

        # gts.append(gt_mask)
        # preds.append(hard_pred)
        # if epoch % 20 == 0:
        #     save_picture(hard_pred, new_id, mask=True, generate=True)
        #     save_picture(gt_mask, new_id, mask=True, generate=False)

        gts.append(gt_mask)
        preds.append(hard_pred)

        # print(gt_mask.shape())
        # for gt_, pred_, id in zip(gt_mask, hard_pred, list(new_id)):
            # gts.append(gt_mask)
            # preds.append(hard_pred)
        if epoch % 20 == 0:
            save_picture(hard_pred, new_id, mask=True, generate=True)
            save_picture(gt_mask, new_id, mask=True, generate=False)

    _, miou = scores(gts, preds, n_class = nclass)

    return miou
示例#8
0
def main():
    home_dir = os.path.dirname(os.path.realpath(__file__))
    dataset_dir = '/media/data/seg_dataset/corrosion/'
    test_img_list = os

    parser = argparse.ArgumentParser()
    parser.add_argument("dataset_dir",
                        help="A directory containing img (Images) \
                        and cls (GT Segmentation) folder")
    parser.add_argument("snapshot", help="Snapshot with the saved model")
    parser.add_argument("--val_orig",
                        help="Do Inference on original size image.\
                        Otherwise, crop to 321x321 like in training ",
                        action='store_true')
    parser.add_argument("--norm",help="Normalize the test images",\
                        action='store_true')
    args = parser.parse_args()
    # print(args.val_orig, args.norm)
    if args.val_orig:
        img_transform = transforms.Compose([ToTensor()])
        if args.norm:
            img_transform = transforms.Compose(
                [ToTensor(), NormalizeOwn(dataset='corrosion')])
        label_transform = transforms.Compose(
            [IgnoreLabelClass(), ToTensorLabel()])
        co_transform = transforms.Compose([RandomSizedCrop((321, 321))])

        testset = Corrosion(home_dir, args.dataset_dir,img_transform=img_transform, \
            label_transform = label_transform,co_transform=co_transform,train_phase=False)
        testloader = DataLoader(testset, batch_size=1)
    else:
        img_transform = transforms.Compose([ZeroPadding(), ToTensor()])
        if args.norm:
            img_transform = img_transform = transforms.Compose(
                [ZeroPadding(),
                 ToTensor(),
                 NormalizeOwn(dataset='corrosion')])
        label_transform = transforms.Compose(
            [IgnoreLabelClass(), ToTensorLabel()])

        testset = Corrosion(home_dir,args.dataset_dir,img_transform=img_transform, \
            label_transform=label_transform,train_phase=False)
        testloader = DataLoader(testset, batch_size=1)

    generator = deeplabv2.ResDeeplab()
    assert (os.path.isfile(args.snapshot))
    snapshot = torch.load(args.snapshot)

    saved_net = {
        k.partition('module.')[2]: v
        for i, (k, v) in enumerate(snapshot['state_dict'].items())
    }
    print('Snapshot Loaded')
    generator.load_state_dict(saved_net)
    generator.eval()
    generator = nn.DataParallel(generator).cuda()
    print('Generator Loaded')
    n_classes = 2

    gts, preds = [], []

    print('Prediction Goint to Start')

    # TODO: Crop out the padding before prediction
    for img_id, (img, gt_mask, _) in enumerate(testloader):
        print("Generating Predictions for Image {}".format(img_id))
        gt_mask = gt_mask.numpy()[0]
        img = Variable(img.cuda())
        out_pred_map = generator(img)

        # Get hard prediction
        soft_pred = out_pred_map.data.cpu().numpy()[0]
        soft_pred = soft_pred[:, :gt_mask.shape[0], :gt_mask.shape[1]]
        hard_pred = np.argmax(soft_pred, axis=0).astype(np.uint8)

        for gt_, pred_ in zip(gt_mask, hard_pred):
            gts.append(gt_)
            preds.append(pred_)
    score, class_iou = scores(gts, preds, n_class=n_classes)

    print("Mean IoU: {}".format(score))
示例#9
0
def evaluate_discriminator():
    home_dir = os.path.dirname(os.path.realpath(__file__))

    parser = argparse.ArgumentParser()
    parser.add_argument("dataset_dir",
                        help="A directory containing img (Images) \
                        and cls (GT Segmentation) folder")
    parser.add_argument("snapshot_g",
                        help="Snapshot with the saved generator model")
    parser.add_argument("snapshot_d",
                        help="Snapshot with the saved discriminator model")
    parser.add_argument("--val_orig",
                        help="Do Inference on original size image.\
                        Otherwise, crop to 320x320 like in training ",
                        action='store_true')
    parser.add_argument("--norm",help="Normalize the test images",\
                        action='store_true')
    args = parser.parse_args()
    # print(args.val_orig, args.norm)
    if args.val_orig:
        img_transform = transforms.Compose([ToTensor()])
        if args.norm:
            img_transform = transforms.Compose(
                [ToTensor(), NormalizeOwn(dataset='corrosion')])
        label_transform = transforms.Compose(
            [IgnoreLabelClass(), ToTensorLabel()])
        # co_transform = transforms.Compose([RandomSizedCrop((320,320))])
        co_transform = transforms.Compose([ResizedImage3((320, 320))])

        testset = Corrosion(home_dir, args.dataset_dir,img_transform=img_transform, \
            label_transform = label_transform,co_transform=co_transform,train_phase=False)
        testloader = DataLoader(testset, batch_size=1)
    else:
        img_transform = transforms.Compose([ZeroPadding(), ToTensor()])
        if args.norm:
            img_transform = img_transform = transforms.Compose(
                [ZeroPadding(),
                 ToTensor(),
                 NormalizeOwn(dataset='corrosion')])
        label_transform = transforms.Compose(
            [IgnoreLabelClass(), ToTensorLabel()])

        testset = Corrosion(home_dir,args.dataset_dir,img_transform=img_transform, \
            label_transform=label_transform,train_phase=False)
        testloader = DataLoader(testset, batch_size=1)

    # generator = deeplabv2.ResDeeplab()
    # generatro = fcn.FCN8s_soft()
    generator = unet.AttU_Net()
    print(args.snapshot_g)
    assert (os.path.isfile(args.snapshot_g))
    snapshot_g = torch.load(args.snapshot_g)

    discriminator = Dis(in_channels=2)
    print(args.snapshot_d)
    assert (os.path.isfile(args.snapshot_d))
    snapshot_d = torch.load(args.snapshot_d)

    saved_net = {
        k.partition('module.')[2]: v
        for i, (k, v) in enumerate(snapshot_g['state_dict'].items())
    }
    print('Generator Snapshot Loaded')
    generator.load_state_dict(saved_net)
    generator.eval()
    generator = nn.DataParallel(generator).cuda()
    print('Generator Loaded')

    saved_net_d = {
        k.partition('module.')[2]: v
        for i, (k, v) in enumerate(snapshot_d['state_dict'].items())
    }
    print('Discriminator Snapshot Loaded')
    discriminator.load_state_dict(saved_net_d)
    discriminator.eval()
    discriminator = nn.DataParallel(discriminator).cuda()
    print('discriminator Loaded')
    n_classes = 2

    gts, preds = [], []
    print('Prediction Goint to Start')
    colorize = VOCColorize()
    palette = make_palette(2)
    # print(palette)
    IMG_DIR = osp.join(args.dataset_dir, 'corrosion/JPEGImages')
    # TODO: Crop out the padding before prediction
    for img_id, (img, gt_mask, _, gte_mask, name) in enumerate(testloader):
        print("Generating Predictions for Image {}".format(img_id))
        gt_mask = gt_mask.numpy()[0]
        img = Variable(img.cuda())
        # img.cpu().numpy()[0]
        img_path = osp.join(IMG_DIR, name[0] + '.jpg')
        print(img_path)
        img_array = cv2.imread(img_path)
        img_array = cv2.resize(img_array, (320, 320),
                               interpolation=cv2.INTER_AREA)
        img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
        out_pred_map = generator(img)
        # print(out_pred_map.size())

        # Get hard prediction
        soft_pred = out_pred_map.data.cpu().numpy()[0]
        # print("gen: ", soft_pred.shape)
        # print(soft_pred.shape)
        soft_pred = soft_pred[:, :gt_mask.shape[0], :gt_mask.shape[1]]
        # print("gen: ", soft_pred.shape)
        # print(soft_pred.shape)
        hard_pred = np.argmax(soft_pred, axis=0).astype(np.uint8)
        # print("gen: ", hard_pred.shape)

        # Get discriminator prediction
        dis_conf = discriminator(out_pred_map)
        dis_confsmax = nn.Softmax2d()(dis_conf)
        # print(dis_conf.size())
        dis_soft_pred = dis_confsmax.data.cpu().numpy()[0]
        # dis_soft_pred[dis_soft_pred<=0.2] = 0
        # dis_soft_pred[dis_soft_pred>0.2] = 1
        # print("dis: ", dis_soft_pred.shape)
        dis_hard_pred = np.argmax(dis_soft_pred, axis=0).astype(np.uint8)
        # print("dis: ", dis_hard_pred.shape)
        # dis_pred = dis_pred[:,:gt_mask.shape[0],:gt_mask.shape[1]]
        # print(soft_pred.shape)
        # dis_hard_pred = np.argmax(dis_pred,axis=0).astype(np.uint8)

        # print(hard_pred.shape, name)
        output = np.asarray(hard_pred, dtype=np.int)
        # print("gen: ", output.shape)
        filename = os.path.join('results', '{}.png'.format(name[0]))
        color_file = Image.fromarray(
            colorize(output).transpose(1, 2, 0), 'RGB')
        color_file.save(filename)

        masked_im = Image.fromarray(vis_seg(img_array, output, palette))
        masked_im.save(filename[0:-4] + '_vis.png')

        # discriminator output
        dis_output = np.asarray(dis_hard_pred, dtype=np.int)
        # print("dis: ", dis_output.shape)
        dis_filename = os.path.join('results',
                                    '{}_dis.png'.format(name[0][0:-4]))
        dis_color_file = Image.fromarray(
            colorize(dis_output).transpose(1, 2, 0), 'RGB')
        dis_color_file.save(dis_filename)

        for gt_, pred_ in zip(gt_mask, hard_pred):
            gts.append(gt_)
            preds.append(pred_)
        # input('s')
    score, class_iou = scores(gts, preds, n_class=n_classes)
    print("Mean IoU: {}".format(score))
示例#10
0
def validate(net, val_dataloader, epoch, save_valid_imags_dir,
             save_spatial_att_dir, save_roc_dir):

    net.eval()
    gts, preds = [], []
    metric_for_class = util.init_metric_for_class_for_cd2014(number_class='2')
    for batch_idx, batch in enumerate(val_dataloader):

        inputs1, input2, targets, filename, height, width = batch
        height, width, filename = height.numpy()[0], width.numpy(
        )[0], filename[0]
        root_name, sub_name, real_name = filename[:filename.find(
            '/')], filename[filename.find('/') + 1:filename.rfind('/') -
                            3], filename[filename.rfind('/') + 1:]
        inputs1, input2, targets = inputs1.cuda(), input2.cuda(), targets.cuda(
        )
        inputs1, inputs2, targets = Variable(inputs1, volatile=True), Variable(
            input2, volatile=True), Variable(targets)
        seg_out, output_conv5, output_fc = net(inputs1, inputs2)
        prob_seg = F.softmax(seg_out).data.cpu().numpy()[0]
        #prob = prob_seg.data.cpu().numpy()[0]
        #interp = nn.Upsample(size=(height, width), mode='bilinear')
        #### similar_distance_map ####
        out_conv5_t0, out_conv5_t1 = output_conv5
        out_fc_t0, out_fc_t1 = output_fc
        single_layer_similar_heatmap_visual(out_conv5_t0, out_conv5_t1,
                                            save_spatial_att_dir, epoch,
                                            filename, 'conv5')
        single_layer_similar_heatmap_visual(out_fc_t0, out_fc_t1,
                                            save_spatial_att_dir, epoch,
                                            filename, 'fc')
        #### seg prediction ###
        seg_pred = np.squeeze(seg_out.data.max(1)[1].cpu().numpy(), axis=0)
        pred_rgb = dates.decode_segmap(seg_pred, plot=False)[:, :, ::-1]
        pred_rgb_rescal = cv2.resize(pred_rgb, (width, height))
        preds.append(seg_pred)
        gt = targets.data.cpu().numpy()
        for gt_ in gt:
            gts.append(gt_)
        for i in range(len(prob_seg)):
            prob_cl = prob_seg[i]
            FN, FP, posNum, negNum = mc.eval_image_rewrite(gt[0], prob_cl, i)
            metric_for_class[root_name][i]['total_fp'] += FP
            metric_for_class[root_name][i]['total_fn'] += FN
            metric_for_class[root_name][i]['total_posnum'] += posNum
            metric_for_class[root_name][i]['total_negnum'] += negNum

        save_valid_dir = os.path.join(save_valid_imags_dir,
                                      'epoch_' + str(epoch))
        check_dir(save_valid_dir)
        root_dir, sub_dir = os.path.join(save_valid_dir,
                                         root_name), os.path.join(
                                             save_valid_dir, root_name,
                                             sub_name)
        check_dir(root_dir), check_dir(sub_dir)
        save_fig_dir = os.path.join(sub_dir,
                                    real_name[:real_name.find('.')] + '.jpg')
        cv2.imwrite(save_fig_dir, pred_rgb_rescal)

    thresh = np.array(range(0, 256)) / 255.0
    conds = metric_for_class.keys()
    for cond_name in conds:
        for i in range(2):
            total_posnum = metric_for_class[cond_name][i]['total_posnum']
            total_negnum = metric_for_class[cond_name][i]['total_negnum']
            total_fn = metric_for_class[cond_name][i]['total_fn']
            total_fp = metric_for_class[cond_name][i]['total_fp']
            metric_dict = mc.pxEval_maximizeFMeasure(total_posnum,
                                                     total_negnum,
                                                     total_fn,
                                                     total_fp,
                                                     thresh=thresh)
            metric_for_class[cond_name][i].setdefault('metric', metric_dict)
    '''''' '''
    for i in range(2):
        total_posnum = metric_for_class[i]['total_posnum']
        total_negnum = metric_for_class[i]['total_negnum']
        total_fn = metric_for_class[i]['total_fn']
        total_fp = metric_for_class[i]['total_fp']
        metric_dict = mc.pxEval_maximizeFMeasure(total_posnum, total_negnum,
                                                 total_fn, total_fp, thresh=thresh)
        metric_for_class[i].setdefault('metric', metric_dict)
    ''' ''''''
    f_score_total = 0.0
    for cond_name in conds:
        for i in range(2):
            pr, recall, f_score = metric_for_class[cond_name][i]['metric'][
                'precision'], metric_for_class[cond_name][i]['metric'][
                    'recall'], metric_for_class[cond_name][i]['metric']['MaxF']
            roc_save_epoch_dir = os.path.join(save_roc_dir, str(epoch))
            check_dir(roc_save_epoch_dir)
            roc_save_epoch_cat_dir = os.path.join(roc_save_epoch_dir,
                                                  cond_name)
            check_dir(roc_save_epoch_cat_dir)
            mc.save_metric2disk(metric_for_class[cond_name],
                                roc_save_epoch_cat_dir)
            roc_save_dir = os.path.join(
                roc_save_epoch_cat_dir,
                '_' + metric_for_class[cond_name][i]['name'] + '_roc.png')
            mc.plotPrecisionRecall(pr, recall, roc_save_dir, benchmark_pr=None)
            f_score_total += f_score
    '''''' '''
    for i in range(2):
        pr, recall = metric_for_class[i]['metric']['precision'], metric_for_class[i]['metric']['recall']
        roc_save_epoch_dir = os.path.join(save_roc_dir,str(epoch))
        check_dir(roc_save_epoch_dir)
        roc_save_dir = os.path.join(roc_save_epoch_dir,
                                    '_' + metric_for_class[i]['name'] + '_roc.png')
        mc.plotPrecisionRecall(pr, recall, roc_save_dir, benchmark_pr=None)
    ''' ''''''
    score, class_iou = metric.scores(gts, preds, n_class=2)
    for k, v in score.items():
        print k, v

    for i in range(2):
        print i, class_iou[i]

    print f_score_total / (2 * len(conds))
    return score['Mean IoU :']
示例#11
0
def main():
    abspath = os.path.abspath(__file__)
    dname = os.path.dirname(abspath)
    os.chdir(dname)

    metrics.scores("2021.07.06_21.41.08.csv")
        out_pred_map = model(img)
        # Get hard prediction
        if use_cuda:
            soft_pred = out_pred_map.data.cpu().numpy()
            # print(soft_pred.shape)
        else:
            soft_pred = out_pred_map.data.numpy()

        soft_pred = soft_pred[:,:, :gt_mask.shape[1],:gt_mask.shape[2]]
        hard_pred = np.argmax(soft_pred,axis=1).astype(np.uint8)

        gts.append(gt_mask)
        preds.append(hard_pred)
        for gt_, pred_, id in zip(gt_mask, hard_pred, list(new_id)):
            # print(f'gt_shape:{gt_.shape}')
            # gts.append(gt_mask)
            # preds.append(hard_pred)
            if epoch % 20 == 0:
                save_picture_unlabel(pred_, id, mask=True, generate=True)
                save_picture_unlabel(gt_, id, mask=True, generate=False)

    _, miou = scores(gts, preds, n_class = nclass)

    return miou

if __name__ == '__main__':
    a = np.array([[0,0,0,0],[0,1,1,0],[0,0,1,0], [0,0,0,0]])
    b = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0]])
    _, dic = scores(a, b, n_class=2)
    print(dic)