Beispiel #1
0
def inference(args, dataloader):
    if str(args.model).lower() == 'fcn32s':
        model = VGG16_FCN32s(n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_fcn32s.pth',
                       map_location='cpu'))
    elif str(args.model).lower() == 'fcn8s':
        model = VGG16_FCN8s(n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_fcn8s.pth',
                       map_location='cpu'))
    else:
        model = UNet(n_channels=3, n_classes=7)
        model.load_state_dict(
            torch.load(f'{args.model_path}/best_unet.pth', map_location='cpu'))
    #model = nn.DataParallel(model)
    model.eval()
    model.cuda()

    for idx, (images, path) in enumerate(dataloader):
        b = images.size(0)

        predict = model(images.cuda())
        predict = F.softmax(predict.permute(0, 2, 3, 1), dim=-1)
        predict = torch.argmax(predict, dim=-1)
        predict = predict.cpu().numpy()

        for s in range(b):
            pred_img = np.zeros((512, 512, 3)).astype(np.uint8)
            for c in range(len(class_map)):
                pred_img[predict[s] == c] = class_map[c]
            pred_img = Image.fromarray(pred_img)
            pred_img.save(path[s])
        print(f'\t[{(idx+1)*b}/{len(dataloader.dataset)}]', end='  \r')
Beispiel #2
0
import os
from os.path import *
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt

batch_size = 1
test_image_dir = '../test/images/'
test_label_dir = '../test/labels/'
checkpoints_dir = '../checkpoints/'
# save_path = 'test_results/'
# if not exists(save_path):
#     os.mkdir(save_path)

net = UNet(n_channels=3, n_classes=1)
net.cuda()
net.eval()

for checkpoint in range(1, 31):
    net.load_state_dict(
        torch.load(checkpoints_dir + 'CP' + str(5 * checkpoint - 4) + '.pth'))

    transform1 = transforms.Compose([ToTensor()])
    test_dataset = Dataset_unet(test_image_dir,
                                test_label_dir,
                                transform=transform1)
    dataloader = DataLoader(test_dataset, batch_size=batch_size)
    dataset_sizes = len(test_dataset)
    batch_num = int(dataset_sizes / batch_size)

    Sensitivity = 0
Beispiel #3
0
    (options, args) = parser.parse_args()
    return options


if __name__ == '__main__':

    args = get_args()
    print(args)

    writer = SummaryWriter(args.tensorboard)

    have_gpu = torch.cuda.is_available()
    print('Have GPU?:{}'.format(have_gpu))

    net = UNet(n_channels=3, n_classes=1)
    net.eval()

    if have_gpu and args.gpu:
        net = net.cuda()
        print('Using GPU !')

    predict(validate_image_dir=args.imagedir,
            validate_label_dir=args.gt,
            validate_boundary_dir=args.bd,
            checkpoints_dir=args.checkpoint,
            net=net,
            batch_size=args.batchsize,
            gpu=args.gpu)

    ## tensorboard --logdir=./log* --port=8008
Beispiel #4
0
    transforms.ToTensor(),
    normalize,
])

gpu = 0
checkpoint_path = "3/checkpoint_80.pth"

torch.manual_seed(1)
if gpu is not None:
    torch.cuda.manual_seed(1)

model = UNet(3, 31)
print("loaded model!")

if gpu is not None:
    model = model.cuda(gpu)
    print("model to gpu")
if os.path.isfile(checkpoint_path):
    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['state_dict'])
    print("loaded checkpoint {}".format(checkpoint_path))


def main():
    print("model")
    print(model)
    img = Image.open("test/zjf_7.jpg").convert('RGB')
    img_tensor = img_transforms(img)
    input = torch.unsqueeze(img_tensor, 0)
    if gpu is not None:
        input = input.cuda(gpu, non_blocking=True)
Beispiel #5
0
test_label_mask = data_amp['test_label_instance']
num_test_instances = len(test_data)

test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_label = torch.from_numpy(test_label_mask).type(torch.LongTensor)
# test_data = test_data.view(num_test_instances, 1, -1)
# test_label = test_label.view(num_test_instances, 2)

test_dataset = TensorDataset(test_data, test_label)
test_data_loader = DataLoader(dataset=test_dataset,
                              batch_size=batch_size,
                              shuffle=False)

unet = UNet(n_classes=7)
unet = unet.cuda()

criterion = nn.CrossEntropyLoss(size_average=False).cuda()
optimizer = torch.optim.Adam(unet.parameters(), lr=0.005)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=[
                                                     10, 20, 30, 40, 60, 70,
                                                     80, 90, 100, 110, 120,
                                                     130, 140, 150, 160, 170,
                                                     180, 190, 200, 250, 300
                                                 ],
                                                 gamma=0.5)
train_loss = np.zeros([num_epochs, 1])
test_loss = np.zeros([num_epochs, 1])
train_acc = np.zeros([num_epochs, 1])
test_acc = np.zeros([num_epochs, 1])
Beispiel #6
0
def main():
    net = UNet(3, n_classes=3)
    #net.load_state_dict(torch.load("./MODEL.pth"))
    #print("Model loaded.")
    if len(args['snapshot']) == 0:
        # net.load_state_dict(torch.load(os.path.join(ckpt_path, 'cityscapes (coarse)-psp_net', 'xx.pth')))
        curr_epoch = 1
        args['best_record'] = {
            'epoch': 0,
            'iter': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0,
            'fwavacc': 0
        }
    else:
        print('training resumes from ' + args['snapshot'])
        net.load_state_dict(
            torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'iter': int(split_snapshot[3]),
            'val_loss': float(split_snapshot[5]),
            'acc': float(split_snapshot[7]),
            'acc_cls': float(split_snapshot[9]),
            'mean_iu': float(split_snapshot[11]),
            'fwavacc': float(split_snapshot[13])
        }
    net.cuda().train()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    train_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(args['longer_size']),
        joint_transforms.RandomRotate(10),
        joint_transforms.RandomHorizontallyFlip()
    ])
    sliding_crop = joint_transforms.SlidingCrop(args['crop_size'],
                                                args['stride_rate'],
                                                ignore_label)
    train_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    val_input_transform = standard_transforms.Compose([
        standard_transforms.ToTensor(),
        standard_transforms.Normalize(*mean_std)
    ])
    target_transform = extended_transforms.MaskToTensor()
    visualize = standard_transforms.Compose([
        standard_transforms.Scale(args['val_img_display_size']),
        standard_transforms.ToTensor()
    ])

    train_set = Retinaimages('training',
                             joint_transform=train_joint_transform,
                             sliding_crop=sliding_crop,
                             transform=train_input_transform,
                             target_transform=target_transform)
    train_loader = DataLoader(train_set,
                              batch_size=args['train_batch_size'],
                              num_workers=2,
                              shuffle=True)
    val_set = Retinaimages('validate',
                           transform=val_input_transform,
                           sliding_crop=sliding_crop,
                           target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            num_workers=2,
                            shuffle=False)

    criterion = CrossEntropyLoss2d(size_average=True).cuda()

    optimizer = optim.SGD([{
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] == 'bias'
        ],
        'lr':
        2 * args['lr']
    }, {
        'params': [
            param
            for name, param in net.named_parameters() if name[-4:] != 'bias'
        ],
        'lr':
        args['lr'],
        'weight_decay':
        args['weight_decay']
    }],
                          momentum=args['momentum'],
                          nesterov=True)

    if len(args['snapshot']) > 0:
        optimizer.load_state_dict(
            torch.load(
                os.path.join(ckpt_path, exp_name, 'opt_' + args['snapshot'])))
        optimizer.param_groups[0]['lr'] = 2 * args['lr']
        optimizer.param_groups[1]['lr'] = args['lr']

    check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    open(os.path.join(ckpt_path, exp_name, "_1" + '.txt'),
         'w').write(str(args) + '\n\n')

    train(train_loader, net, criterion, optimizer, curr_epoch, args,
          val_loader, visualize, val_set)
Beispiel #7
0
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=args.batchsize,
                                         shuffle=True,
                                         num_workers=4)

torch.manual_seed(1)
if args.gpu is not None:
    torch.cuda.manual_seed(1)
    cudnn.deterministic = True
    cudnn.benchmark = True

model = UNet(3, 31)
print("loaded model!")

if args.gpu is not None:
    model = model.cuda(args.gpu)
    print("model to gpu")
if os.path.isfile(args.checkpoint):
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['state_dict'])
    print("loaded checkpoint '{}'".format(args.checkpoint))

#criterion = nn.MSELoss()
criterion = nn.L1Loss()
optimizer = torch.optim.SGD(model.parameters(),
                            args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
#optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))