Exemple #1
0
def predict_img(net, full_img, gpu=False):
    img = resize_and_crop(full_img)

    left = get_square(img, 0)
    right = get_square(img, 1)

    right = normalize(right)
    left = normalize(left)

    right = np.transpose(right, axes=[2, 0, 1])
    left = np.transpose(left, axes=[2, 0, 1])

    X_l = torch.FloatTensor(left).unsqueeze(0)
    X_r = torch.FloatTensor(right).unsqueeze(0)

    if gpu:
        X_l = Variable(X_l, volatile=True).cuda()
        X_r = Variable(X_r, volatile=True).cuda()
    else:
        X_l = Variable(X_l, volatile=True)
        X_r = Variable(X_r, volatile=True)

    y_l = F.sigmoid(net(X_l))
    y_r = F.sigmoid(net(X_r))
    y_l = F.upsample_bilinear(y_l, scale_factor=2).data[0][0].cpu().numpy()
    y_r = F.upsample_bilinear(y_r, scale_factor=2).data[0][0].cpu().numpy()

    y = merge_masks(y_l, y_r, 1918)
    yy = dense_crf(np.array(full_img).astype(np.uint8), y)

    return yy > 0.5
Exemple #2
0
def val(epoch,crf=True):
    fcn_model.eval()
    total_ious=[]
    pixel_accs=[]
    for iter,batch in enumerate(eval_loader):
        inputs=Variable(batch['image'])
        output=fcn_model(inputs)
        output.data.cpu().numpy()
        if crf:
            crf_output = np.zeros(output.shape)
            images = inputs.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output
            
        N,_,h,w=output.shape
        pred=output.transpose(0,2,3,1).reshape(-1,n_class).argmax(axis=1).reshape(N,h,w)
        target=batch['label'].cpu().numpy().reshape(N,h,w)
        
        for p,t in zip(pred,target):
            total_ious.append(iou(p,t))
            pixel_accs.append(pixel_acc(p,t))
        #calculate average IOUs
    total_ious = np.array(total_ious).T  # n_class * val_len
    ious = np.nanmean(total_ious, axis=1)
    pixel_accs = np.array(pixel_accs).mean()
    print("epoch{}, pix_acc: {}, meanIoU: {}, IoUs: {}".format(epoch, pixel_accs, np.nanmean(ious), ious))
    IU_scores[epoch] = ious
    np.save(os.path.join(score_dir, "meanIU"), IU_scores)
    pixel_scores[epoch] = pixel_accs
    np.save(os.path.join(score_dir, "meanPixel"), pixel_scores)
Exemple #3
0
def eval_net(net, dataset, gpu=False):
    tot = 0
    for i, b in enumerate(dataset):
        X = b[0]
        y = b[1]

        X = torch.FloatTensor(X).unsqueeze(0)
        y = torch.ByteTensor(y).unsqueeze(0)

        if gpu:
            X = Variable(X, volatile=True).cuda()
            y = Variable(y, volatile=True).cuda()
        else:
            X = Variable(X, volatile=True)
            y = Variable(y, volatile=True)

        y_pred = net(X)

        y_pred = (F.sigmoid(y_pred) > 0.6).float()
        # y_pred = F.sigmoid(y_pred).float()

        dice = dice_coeff(y_pred, y.float()).data[0]
        tot += dice

        if 1:
            X = X.data.squeeze(0).cpu().numpy()
            X = np.transpose(X, axes=[1, 2, 0])
            y = y.data.squeeze(0).cpu().numpy()
            y_pred = y_pred.data.squeeze(0).squeeze(0).cpu().numpy()
            print(y_pred.shape)

            fig = plt.figure()
            ax1 = fig.add_subplot(1, 4, 1)
            ax1.imshow(X)
            ax2 = fig.add_subplot(1, 4, 2)
            ax2.imshow(y)
            ax3 = fig.add_subplot(1, 4, 3)
            ax3.imshow((y_pred > 0.5))


            Q = dense_crf(((X*255).round()).astype(np.uint8), y_pred)
            ax4 = fig.add_subplot(1, 4, 4)
            print(Q)
            ax4.imshow(Q > 0.5)
            plt.show()
    return tot / i
def main(test_args):

    testset = "/mnt/iusers01/eee01/mchiwml4/CamVid/test"
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])
    test_dataset = DataLoader(Loaddata(testset,
                                       transform=transform,
                                       target_transform=MaskToTensor()),
                              batch_size=1,
                              shuffle=False,
                              num_workers=8)

    label_num = 11
    model = deeplab_v2.Deeplab_Resnet(label_num)
    model = model.cuda()
    model.load_state_dict(torch.load(test_args.load_param))
    model.eval()

    total = np.zeros((label_num, ))

    running_metrics = runningScore(label_num)

    for j, data in enumerate(test_dataset):
        inputs, labels = data
        inputs = Variable(inputs.cuda())

        outputs = model(inputs)

        H = inputs.size()[2]
        W = inputs.size()[3]
        interp_resize = nn.Upsample(size=(int(H), int(W)), mode='bilinear')
        output = interp_resize(outputs[3])
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()

        if test_args.crf:
            crf_output = np.zeros(output.shape)
            images = inputs.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output

        pred = np.argmax(output, axis=1)
        gt = labels.numpy()

        running_metrics.update(gt, pred)

        for i in range(label_num):
            mask = gt == i  # ground truth mask of class i
            total[i] += np.sum(
                mask)  # total number of pixels of class i (tp+fn)

    score, class_iou, class_acc = running_metrics.get_scores()

    for k, v in score.items():
        print(k, v)
    print('class iou: ')
    for i in range(label_num):
        print(i, class_iou[i])
    print('class acc: ')
    for i in range(label_num):
        print(i, class_acc[i])

    print('number of pixels:')
    print(total)
        
# normalized output for threshold
#        nor_output = np.zeros(output.shape)
#        for idx in range(output.shape[0]):
#            max_per = output[idx].max()
#            if max_per <= 0 :
#                max_per = 1
#            nor_output[idx] = output[idx] / float(max_per)
        
            
 #        print(output.max(1).max(1)) # noramalize all map according to the max value for all map   , max() for all max , .max(1).max(1) for each map max  # and relative bigger and smaller value hasn't changed            
        max_all = output.max()   
        for idx in range(output.shape[0]): 
            output[idx] = output[idx] / float(max_all)
        nocrf_output = output
        output = dense_crf(img_, output)
        crf_mask =  np.argmax(output,axis = 0)

# for gathering algorithm
#        mask = statis(nocrf_output, crf_mask, i[:-1])
#        mask = cluster(nocrf_output, crf_mask , i[:-1] , mode = 'kmeans')
        
        
#        nocrf_output = mask

#        img_arr = np.array(Image.open(img_path) , dtype = np.uint8)        
#        output = seed_dense_crf(img_arr , mask)        
        

#        ##########################   no_remove   ######################
#        thre = 0.3