示例#1
0
def main(train_args):

    trainset = "/mnt/iusers01/eee01/mchiwml4/CamVid/train"
    validset = "/mnt/iusers01/eee01/mchiwml4/CamVid/val"

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    train_joint_transform = jointtransform.Compose(
        [jointtransform.RandomHorizontallyFlip()])

    train_dataset = DataLoader(Loaddata(trainset,
                                        transform=transform,
                                        target_transform=MaskToTensor(),
                                        joint_transform=train_joint_transform),
                               batch_size=train_args.batch_size,
                               shuffle=True,
                               num_workers=8)
    valid_dataset = DataLoader(Loaddata(validset,
                                        transform=transform,
                                        target_transform=MaskToTensor()),
                               batch_size=1,
                               shuffle=True,
                               num_workers=8)

    label_num = 11
    model = ducmodel.ResNetDUCHDC(label_num)

    model = model.cuda()
    weight = torch.Tensor(class_weight).cuda()

    criterion = CrossEntropyLoss2d(weight=weight).cuda()

    lr_ = train_args.lr
    optimizer = optim.SGD([{
        'params': get_1x_lr_params(model),
        'lr': lr_
    }, {
        'params': get_10x_lr_params(model),
        'lr': lr_ * 10
    }],
                          momentum=train_args.momentum,
                          weight_decay=train_args.weight_decay)

    #    optimizer.param_groups[0]['lr'] = lr_
    #    optimizer.param_groups[1]['lr'] = lr_*10

    if train_args.load_param is not None:
        model.load_state_dict(torch.load(train_args.load_param))
    if train_args.load_optim is not None:
        optimizer.load_state_dict(torch.load(train_args.load_optim))

    max_epochs = train_args.epoch_num
    savefolder = train_args.save_folder
    train_model(model, criterion, optimizer, lr_, max_epochs, train_dataset,
                valid_dataset, savefolder)
示例#2
0
def main(train_args):

    trainset = "/mnt/iusers01/eee01/mchiwml4/CamVid/train"
    validset = "/mnt/iusers01/eee01/mchiwml4/CamVid/val"

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    train_joint_transform = jointtransform.Compose([
        jointtransform.RandomCrop(224),
        jointtransform.RandomHorizontallyFlip()
    ])

    train_dataset = DataLoader(Loaddata(trainset,
                                        transform=transform,
                                        target_transform=MaskToTensor(),
                                        joint_transform=train_joint_transform),
                               batch_size=train_args.batch_size,
                               shuffle=True,
                               num_workers=8)
    valid_dataset = DataLoader(Loaddata(validset,
                                        transform=transform,
                                        target_transform=MaskToTensor()),
                               batch_size=1,
                               shuffle=True,
                               num_workers=8)

    label_num = 11
    model = fcdensenetmodel.FCDenseNet103(label_num)

    model = model.cuda()
    weight = torch.Tensor(class_weight).cuda()

    criterion = CrossEntropyLoss2d(weight=weight).cuda()

    lr_ = train_args.lr
    optimizer = optim.RMSprop(model.parameters(),
                              lr=lr_,
                              weight_decay=train_args.weight_decay)

    exp_lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.995)

    if train_args.load_param is not None:
        model.load_state_dict(torch.load(train_args.load_param))
    if train_args.load_optim is not None:
        optimizer.load_state_dict(torch.load(train_args.load_optim))

    max_epochs = train_args.epoch_num
    savefolder = train_args.save_folder
    train_model(model, criterion, optimizer, exp_lr_scheduler, max_epochs,
                train_dataset, valid_dataset, savefolder)
示例#3
0
def main(test_args):

    testset = "/mnt/iusers01/eee01/mchiwml4/CamVid/test"
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])
    test_dataset = DataLoader(Loaddata(testset,
                                       transform=transform,
                                       target_transform=MaskToTensor()),
                              batch_size=1,
                              shuffle=False,
                              num_workers=8)

    label_num = 11
    model = linknetmodel.linknet(label_num)
    model = model.cuda()
    model.load_state_dict(torch.load(test_args.load_param))
    model.eval()

    total = np.zeros((label_num, ))

    running_metrics = runningScore(label_num)

    time_elapsed = 0
    for j, data in enumerate(test_dataset):

        since = time.time()
        inputs, labels = data
        inputs = Variable(inputs.cuda(), volatile=True)
        outputs = model(inputs)
        time_elapsed += time.time() - since

        pred = outputs.data.max(1)[1].cpu().numpy()
        gt = labels.numpy()
        running_metrics.update(gt, pred)

        for i in range(label_num):
            mask = gt == i  # ground truth mask of class i
            total[i] += np.sum(
                mask)  # total number of pixels of class i (tp+fn)

    print('Inference speed: {:.0f}ms, {:.0f}fps '.format(
        time_elapsed / len(test_dataset) * 1000,
        1 / (time_elapsed / len(test_dataset))))

    score, class_iou, class_acc = running_metrics.get_scores()

    for k, v in score.items():
        print(k, v)
    print('class iou: ')
    for i in range(label_num):
        print(i, class_iou[i])
    print('class acc: ')
    for i in range(label_num):
        print(i, class_acc[i])

    print('number of pixels:')
    print(total)
示例#4
0
def main(test_args):
    
    testset="/mnt/iusers01/eee01/mchiwml4/CamVid/test"
    transform = transforms.Compose([
                transforms.ToTensor(), 
                transforms.Normalize(mean, std)
                ])
    test_dataset = DataLoader(
            Loaddata(testset, transform=transform, target_transform=MaskToTensor()),
            batch_size=1, shuffle=False, num_workers=8)
    
    label_num=11
    model = ducmodel.ResNetDUCHDC(label_num)
    model=model.cuda()
    model.load_state_dict(torch.load(test_args.load_param))
    model.eval()

    total=np.zeros((label_num,))
    running_metrics = runningScore(label_num)  
    for j, data in enumerate(test_dataset):
        inputs, labels = data      
        inputs = Variable(inputs.cuda()) 
        
        outputs = model(inputs)
        
        pred = outputs.data.max(1)[1].cpu().numpy()
        gt = labels.numpy()
              
        running_metrics.update(gt, pred)
        print(j)
        for i in range(label_num):
            mask=gt==i  # ground truth mask of class i
            total[i]+=np.sum(mask)  
         
    score, class_iou, class_acc = running_metrics.get_scores()

    for k, v in score.items():
        print(k, v)
    print('class iou: ')
    for i in range(label_num):
        print(i, class_iou[i])
    print('class acc: ')
    for i in range(label_num):
        print(i, class_acc[i])
          
    print('number of pixels:') 
    print(total)       
示例#5
0
def main(test_args):

    testset = "/mnt/iusers01/eee01/mchiwml4/CamVid/test"
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])
    test_dataset = DataLoader(Loaddata(testset,
                                       transform=transform,
                                       target_transform=MaskToTensor()),
                              batch_size=1,
                              shuffle=False,
                              num_workers=8)

    label_num = 11
    model = deeplab_v2.Deeplab_Resnet(label_num)
    model = model.cuda()
    model.load_state_dict(torch.load(test_args.load_param))
    model.eval()

    total = np.zeros((label_num, ))

    running_metrics = runningScore(label_num)

    for j, data in enumerate(test_dataset):
        inputs, labels = data
        inputs = Variable(inputs.cuda())

        outputs = model(inputs)

        H = inputs.size()[2]
        W = inputs.size()[3]
        interp_resize = nn.Upsample(size=(int(H), int(W)), mode='bilinear')
        output = interp_resize(outputs[3])
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()

        if test_args.crf:
            crf_output = np.zeros(output.shape)
            images = inputs.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output

        pred = np.argmax(output, axis=1)
        gt = labels.numpy()

        running_metrics.update(gt, pred)

        for i in range(label_num):
            mask = gt == i  # ground truth mask of class i
            total[i] += np.sum(
                mask)  # total number of pixels of class i (tp+fn)

    score, class_iou, class_acc = running_metrics.get_scores()

    for k, v in score.items():
        print(k, v)
    print('class iou: ')
    for i in range(label_num):
        print(i, class_iou[i])
    print('class acc: ')
    for i in range(label_num):
        print(i, class_acc[i])

    print('number of pixels:')
    print(total)
示例#6
0

trainset="/mnt/iusers01/eee01/mchiwml4/CamVid/train"
validset="/mnt/iusers01/eee01/mchiwml4/CamVid/val"
testset="/mnt/iusers01/eee01/mchiwml4/CamVid/test"
transform = transforms.Compose([
                transforms.ToTensor(), 
                transforms.Normalize(mean, std)
                ])
    
train_joint_transform = jointtransform.Compose([
    jointtransform.RandomHorizontallyFlip()
])

train_dataset = DataLoader(
        Loaddata(trainset, transform=transform, target_transform=MaskToTensor(), joint_transform=train_joint_transform),
        batch_size=4, shuffle=True, num_workers=8)
valid_dataset = DataLoader(
        Loaddata(validset, transform=transform, target_transform=MaskToTensor()),
        batch_size=1, shuffle=True, num_workers=8)


train_size = len(Loaddata(trainset))
valid_size = len(Loaddata(validset))

def get_1x_lr_params(model):
    b = []

    b.append(model.down1.parameters())
    b.append(model.down2.parameters())
    b.append(model.down3.parameters())
示例#7
0
import torch
import torch.nn.functional as F
import numpy as np
import utils.jointtransform as jointtransform

trainset = "/mnt/iusers01/eee01/mchiwml4/CamVid/train"
validset = "/mnt/iusers01/eee01/mchiwml4/CamVid/val"
testset = "/mnt/iusers01/eee01/mchiwml4/CamVid/test"
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean, std)])
train_joint_transform = jointtransform.Compose(
    [jointtransform.RandomHorizontallyFlip()])

train_dataset = DataLoader(Loaddata(trainset,
                                    transform=transform,
                                    target_transform=MaskToTensor(),
                                    joint_transform=train_joint_transform),
                           batch_size=4,
                           shuffle=True,
                           num_workers=8)
valid_dataset = DataLoader(Loaddata(validset,
                                    transform=transform,
                                    target_transform=MaskToTensor()),
                           batch_size=1,
                           shuffle=True,
                           num_workers=8)

train_size = len(Loaddata(trainset))
valid_size = len(Loaddata(validset))
test_size = len(Loaddata(testset))
示例#8
0
        self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index=11)

    def forward(self, inputs, targets):
        return self.nll_loss(F.log_softmax(inputs, dim=1), targets)


trainset = "/mnt/iusers01/eee01/mchiwml4/CamVid/train"
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean, std)])

train_joint_transform = jointtransform.Compose(
    [jointtransform.RandomHorizontallyFlip()])

train_dataset = DataLoader(Loaddata(trainset,
                                    transform=transform,
                                    target_transform=MaskToTensor(),
                                    joint_transform=train_joint_transform),
                           batch_size=3,
                           shuffle=True,
                           num_workers=0)

label_num = 11
model = tiramisu_nobias.FCDenseNet103(label_num)
model.load_state_dict(
    torch.load(
        '/mnt/iusers01/eee01/mchiwml4/pycode/segmentation/net_data/camvid/tiramisu_pretrain/net_params795.pth'
    ))
model = model.cuda()
weight = torch.Tensor(class_weight).cuda()
criterion = CrossEntropyLoss2d(weight=weight).cuda()