Ejemplo n.º 1
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        images, label, size, name = batch
        images = Variable(images, volatile=True)
        h, w, c = size[0].numpy()
        images075 = nn.Upsample(size=(int(h * 0.75), int(w * 0.75)),
                                mode='bilinear')(images)
        images05 = nn.Upsample(size=(int(h * 0.5), int(w * 0.5)),
                               mode='bilinear')(images)

        out100 = model(images.cuda(args.gpu))
        out075 = model(images075.cuda(args.gpu))
        out05 = model(images05.cuda(args.gpu))
        o_h, o_w = out100.size()[2:]
        interpo1 = nn.Upsample(size=(o_h, o_w), mode='bilinear')
        out_max = torch.max(torch.stack(
            [out100, interpo1(out075),
             interpo1(out05)]),
                            dim=0)[0]

        output = interp(out_max).cpu().data[0].numpy()

        output = output[:, :h, :w]
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        gt = np.asarray(label[0].numpy()[:h, :w], dtype=np.int)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    get_iou(data_list, args.num_classes)
Ejemplo n.º 2
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()

    if torch.cuda.is_available():
        args.device = torch.device('cuda')
        model = model.cuda()
        print()
    else:
        args.device = torch.device('cpu')

    print('the model is operating on {}'.format(args.device))

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            hue_value=args.hue_value,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        image, label, size, name = batch
        image = image.to(device=args.device)
        size = size[0].numpy()
        output = model(Variable(image, volatile=True))
        output = interp(output).cpu().data[0].numpy()

        output = output[:, :size[0], :size[1]]
        gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    get_iou(data_list, args.num_classes, args.hue_value)
Ejemplo n.º 3
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear')
    data_list = []

    with open('result.txt', 'w') as f:

        for index, batch in enumerate(testloader, 0):
            #print('%d processd'%(index))
            image, label, size, name = batch
            size = size[0].numpy()  #the size of original input image
            #print("size:",size)
            output = model(Variable(image, volatile=True).cuda(gpu0))
            #print("model output size: ",output.size())
            output = interp(output).cpu().data[0].numpy()

            output = output[:, :size[0], :size[1]]
            gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

            output = output.transpose(1, 2, 0)
            prob = softmax(output, axis=2)
            entropy = (-prob * np.log(prob)).mean() * 100
            output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

            # show_all(gt, output)
            data_list.append([gt.flatten(), output.flatten()])

            iou = get_iou(data_list, args.num_classes)
            data_list = []
            print('{:.4f}, {:.4f}'.format(entropy.item(), iou.item()))
            f.write('{:.8f}, {:.8f}\n'.format(entropy.item(), iou.item()))
Ejemplo n.º 4
0
def iou():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear', align_corners=True)
    data_list = []

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            #print('%d processd'%(index))
            sys.stdout.flush()
        image, label, size, name = batch
        size = size[0].numpy()
        with torch.no_grad():
            output = model(Variable(image).cuda(gpu0))
            output = interp(output).cpu().data[0].numpy()

        output = output[:, :size[0], :size[1]]
        gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        # show_all(gt, output)
        data_list.append([gt.flatten(), output.flatten()])

    return get_iou(data_list, args.num_classes)
Ejemplo n.º 5
0
def main():
    """Create the model and start the evaluation process."""
    args = get_arguments()

    gpu0 = args.gpu

    model = Res_Deeplab(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.restore_from)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(args.data_dir,
                                            args.data_list,
                                            crop_size=(505, 505),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    interp = nn.Upsample(size=(505, 505), mode='bilinear')
    sigmoid = nn.Sigmoid()
    data_list = []

    for index, batch in enumerate(testloader):
        if index % 1 == 0:
            print('%d processd' % (index))
        image, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))
        output = interp(output)

        output = output[:, :size[0], :size[1]]

        output = torch.from_numpy(output).float()
        result = torchvision.transforms.ToPILImage()(output)
        path = os.path.join(SR_dir, name[0])
        result.save(path)
Ejemplo n.º 6
0
def main():
    """Create the model and start the evaluation process."""

    gpu0 = 0

    model = Res_Deeplab(num_classes=NUM_CLASSES)
    
    saved_state_dict = torch.load(RESTORE_FROM)
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(VOCDataSet(DATA_DIRECTORY, DATA_LIST_PATH, crop_size=(321, 321), mean=IMG_MEAN, scale=False, mirror=False), 
                                    batch_size=1, shuffle=False, pin_memory=True)

    interp = nn.Upsample(size=(321, 321), mode='bilinear', align_corners=True) #changed to model 321,321
    data_list = []

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd'%(index))
        torch.save(batch, SAVE_TO + '/batch' + str(index) + '.pth') # Save the batch
        image, label, size, name = batch
        size = size[0].numpy()
        output = model(Variable(image, volatile=True).cuda(gpu0))

        output = interp(output)
        torch.save(output, SAVE_TO + '/prediction' + str(index) + '.pth') #Save b11 prediction

        output = output.cpu().data[0].numpy()

        output = output[:,:size[0],:size[1]]
        gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int)
        
        output = output.transpose(1,2,0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.int)

        data_list.append([gt.flatten(), output.flatten()])

    get_iou(data_list, NUM_CLASSES)
Ejemplo n.º 7
0
def main():
    """Create the model and start the training."""

    print("=====> Set GPU for training")
    if args.cuda:
        print("====> use gpu id: '{}'".format(args.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    # Create network.

    print("=====> Building network")
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    print("=====> Loading pretrained weights")
    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)

    model.train()
    model.cuda()

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    print('====> Computing network parameters')
    total_paramters = netParams(model)
    print('Total network parameters: ' + str(total_paramters))

    print("=====> Preparing training data")
    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.batch_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=2,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear')

    logFileLoc = args.snapshot_dir + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write("\n%s\t\t%s" % ('iter', 'Loss(train)'))
    logger.flush()

    print("=====> Begin to train")
    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = Variable(images).cuda()

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels)
        loss.backward()
        optimizer.step()

        print('iter = ', i_iter, 'of', args.num_steps, 'completed, loss = ',
              loss.data.cpu().numpy())
        logger.write("\n%d\t\t%.5f" % (i_iter, loss.data.cpu().numpy()))
        logger.flush()
        if i_iter >= args.num_steps - 1:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(i_iter) + '.pth'))

    logger.close()
    end = timeit.default_timer()
    print(end - start, 'seconds')
Ejemplo n.º 8
0
def main():
    """Create the model and start the training."""

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)
    #model.float()
    #model.eval() # use_global_stats = True
    model.train()
    model.cuda()

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.batch_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=0,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)

    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = Variable(images).cuda()

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels)
        loss.backward()
        optimizer.step()

        print('iter = ', i_iter, 'of', args.num_steps, 'completed, loss = ',
              loss.data.cpu().numpy())
        sys.stdout.flush()

        if i_iter >= args.num_steps - 1:
            print('save model ...')
            sys.stdout.flush()
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            sys.stdout.flush()
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
    sys.stdout.flush()
Ejemplo n.º 9
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True
    gpu = args.gpu

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)
    #model.float()
    #model.eval() # use_global_stats = True
    model.train()
    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.iter_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    b_loss = 0
    for i_iter, batch in enumerate(trainloader):

        images, labels, _, _ = batch
        images, labels = Variable(images), labels.numpy()
        h, w = images.size()[2:]
        images075 = nn.Upsample(size=(int(h * 0.75), int(w * 0.75)),
                                mode='bilinear')(images)
        images05 = nn.Upsample(size=(int(h * 0.5), int(w * 0.5)),
                               mode='bilinear')(images)

        out = model(images.cuda(args.gpu))
        out075 = model(images075.cuda(args.gpu))
        out05 = model(images05.cuda(args.gpu))
        o_h, o_w = out.size()[2:]
        interpo1 = nn.Upsample(size=(o_h, o_w), mode='bilinear')
        interpo2 = nn.Upsample(size=(h, w), mode='bilinear')
        out_max = interpo2(
            torch.max(torch.stack([out, interpo1(out075),
                                   interpo1(out05)]),
                      dim=0)[0])

        loss = loss_calc(out_max, labels, args.gpu)
        d1, d2 = float(labels.shape[1]), float(labels.shape[2])
        loss100 = loss_calc(
            out,
            nd.zoom(labels, (1.0, out.size()[2] / d1, out.size()[3] / d2),
                    order=0), args.gpu)
        loss075 = loss_calc(
            out075,
            nd.zoom(labels,
                    (1.0, out075.size()[2] / d1, out075.size()[3] / d2),
                    order=0), args.gpu)
        loss05 = loss_calc(
            out05,
            nd.zoom(labels, (1.0, out05.size()[2] / d1, out05.size()[3] / d2),
                    order=0), args.gpu)
        loss_all = (loss + loss100 + loss075 + loss05) / args.iter_size
        loss_all.backward()
        b_loss += loss_all.data.cpu().numpy()

        b_iter = i_iter / args.iter_size

        if b_iter >= args.num_steps - 1:
            print 'save model ...'
            optimizer.step()
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.iter_size == 0 and i_iter != 0:
            print 'iter = ', b_iter, 'of', args.num_steps, 'completed, loss = ', b_loss
            optimizer.step()
            adjust_learning_rate(optimizer, b_iter)
            optimizer.zero_grad()
            b_loss = 0

        if i_iter % (args.save_pred_every *
                     args.iter_size) == 0 and b_iter != 0:
            print 'taking snapshot ...'
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(b_iter) + '.pth'))

    end = timeit.timeit()
    print end - start, 'seconds'
Ejemplo n.º 10
0
# load the state dictionary of the model
# IMPORTANT! HAS TO BE CHANGED HERE, USE PATH TO YOUR BEST TRAINED MODEL
pathToTrainedModel = '/root/20000StepsDefaultParametersBatch6/VOC12_scenes_20000.pth'
saved_state_dict = torch.load(pathToTrainedModel)
model.load_state_dict(saved_state_dict)

# no training/updating weights here
model.eval()

model.cuda()

# Trainloader from Pytorch deeplab, uses the pascal voc data set
trainloader = data.DataLoader(VOCDataSet(DATA_DIRECTORY,
                                         DATA_LIST_PATH,
                                         max_iters=10,
                                         crop_size=input_size,
                                         scale=False,
                                         mirror=False,
                                         mean=IMG_MEAN),
                              batch_size=BATCH_SIZE,
                              shuffle=False,
                              num_workers=0,
                              pin_memory=True)

interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)

# Iterate over the trainloader for the above mentioned times
for i_iter, batch in enumerate(trainloader):
    # get the images and labels (ground truth) of each batch
    images, labels, _, _ = batch
    images = Variable(images).cuda()  #gets and saves a gpu output