示例#1
0
def train(args, train_dataloader, valid_dataloader):

    if str(args.model).lower() == 'fcn32s':
        model = VGG16_FCN32s(n_classes=7)
    elif str(args.model).lower() == 'fcn8s':
        model = VGG16_FCN8s(n_classes=7)
    else:
        model = UNet(n_channels=3, n_classes=7)
    #model = nn.DataParallel(model, device_ids=['cuda:0','cuda:1'])
    model.to(args.device)

    # loss
    # 0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 132, 0
    weight = torch.tensor([0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 1.0])
    criterion = nn.CrossEntropyLoss(weight).to(args.device)

    # optim
    optimizer = optim.SGD(model.parameters(),
                          lr=1e-3,
                          momentum=0.9,
                          weight_decay=5e-4)
    if str(args.model) == 'fcn32s':
        milestones = [1, 10, 20, 50]
    elif str(args.model) == 'fcn8s':
        milestones = [1, 10, 20, 60]
    else:
        milestones = [25, 50, 80]
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=0.2)  #learning rate decay

    best_iou = 0
    for epoch in range(args.epochs):
        print(f"\tEpoch {epoch}")

        loss, acc, iou = _run_train(args, train_dataloader, model, criterion,
                                    optimizer)
        print("\t train loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        loss, acc, iou = _run_eval(args, valid_dataloader, model, criterion)
        print("\t valid loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        if epoch in milestones:
            torch.save(model.state_dict(),
                       f"./result/{epoch}_{args.model}.pth")
            print('\t [Info] save weights')
        if epoch > milestones[1] and iou > best_iou:
            best_iou = iou
            torch.save(model.state_dict(), f"./result/best_{args.model}.pth")
            print('\t [Info] save weights')
示例#2
0
def main():

    train_dataset = MHP('/root/dataset/LV-MHP-v2/train', n_classes=59)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=12,
                              shuffle=True,
                              num_workers=0)
    model = UNet(n_channels=3, n_classes=59).cuda()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.CrossEntropyLoss()

    writer = tbx.SummaryWriter(log_dir="logs")

    n_epochs = 10000
    for epoch in range(n_epochs):

        train_epoch(train_loader, model, criterion, optimizer, epoch, writer)

        state = {'state_dict': model.state_dict()}
        filename = 'checkpoints/{0:05d}.pth.tar'.format(epoch)
        torch.save(state, filename)
                                               drop_last=True)

    # Create validation dataset

    valid_data = dset.ImageFolder(root=args.valid_dir,
                                  transform=default_transform)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    model = UNet(n_channels=3, n_classes=3, bilinear=True)
    model.to(args.device)
    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=args.lr,
                                 weight_decay=0)

    state_dict = {'itr': 0}

    if args.resume:
        print('Loading weights & resuming from iteration {}'.format(
            args.checkpoint))
        model, optimizer, logger = load_UNET_checkpoint(
            model, optimizer, '256', args)
        state_dict['itr'] = args.checkpoint

    for epoch in range(args.num_epochs):
        train_256(epoch, state_dict, model, optimizer, train_loader,
                  valid_loader, args, logger)
                            transforms.Resize(args.image_size),
                            transforms.ToTensor()
                        ])

    # Create training dataset
    
    train_dataset = dset.ImageFolder(root=args.train_dir, transform=default_transform)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
                                               shuffle=True, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)

    # Create validation dataset
    
    valid_data = dset.ImageFolder(root=args.valid_dir, transform=default_transform)
    valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=64,
                                               shuffle=True, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)
    
    model = UNet(n_channels=3, n_classes=3, bilinear=True)
    model.to(args.device)
    optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr, weight_decay=0)

    state_dict = {'itr': 0}

    if args.resume:
        print('Loading weights & resuming from iteration {}'.format(args.checkpoint))
        model, optimizer, logger = load_UNET_checkpoint(model, optimizer, '128', args)
        state_dict['itr'] = args.checkpoint
    
    for epoch in range(args.num_epochs):
        train(epoch, state_dict, model, optimizer, train_loader, valid_loader, args, logger)
示例#5
0
test_data = torch.from_numpy(test_data).type(torch.FloatTensor)
test_label = torch.from_numpy(test_label_mask).type(torch.LongTensor)
# test_data = test_data.view(num_test_instances, 1, -1)
# test_label = test_label.view(num_test_instances, 2)

test_dataset = TensorDataset(test_data, test_label)
test_data_loader = DataLoader(dataset=test_dataset,
                              batch_size=batch_size,
                              shuffle=False)

unet = UNet(n_classes=7)
unet = unet.cuda()

criterion = nn.CrossEntropyLoss(size_average=False).cuda()
optimizer = torch.optim.Adam(unet.parameters(), lr=0.005)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=[
                                                     10, 20, 30, 40, 60, 70,
                                                     80, 90, 100, 110, 120,
                                                     130, 140, 150, 160, 170,
                                                     180, 190, 200, 250, 300
                                                 ],
                                                 gamma=0.5)
train_loss = np.zeros([num_epochs, 1])
test_loss = np.zeros([num_epochs, 1])
train_acc = np.zeros([num_epochs, 1])
test_acc = np.zeros([num_epochs, 1])

for epoch in range(num_epochs):
    print('Epoch:', epoch)
示例#6
0
    cudnn.benchmark = True

model = UNet(3, 31)
print("loaded model!")

if args.gpu is not None:
    model = model.cuda(args.gpu)
    print("model to gpu")
if os.path.isfile(args.checkpoint):
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['state_dict'])
    print("loaded checkpoint '{}'".format(args.checkpoint))

#criterion = nn.MSELoss()
criterion = nn.L1Loss()
optimizer = torch.optim.SGD(model.parameters(),
                            args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)
#optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))


def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = args.lr * (0.1**(epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


loss_train = []
loss_test = []
示例#7
0
                                         sampler=sampler,
                                         batch_size=32)

from unet.unet_model import UNet

unet = UNet(1, 1)

n_epochs = 10

n_samples_per_epoch = 100000

all_epoch_avg_losses = []

unet = unet.cuda()

optimizer = torch.optim.Adam(unet.parameters())
import sys

import numpy as np

for e in range(n_epochs):
    losses = []
    for (x, y), ii in zip(dataloader, range(n_samples_per_epoch)):
        x = x.cuda()
        y = y.cuda()[..., 0]
        yy = y.type(torch.float) * 2 - 1

        p = unet.forward(x)[:, 0]
        loss = -torch.nn.LogSigmoid()(p * yy).mean()
        losses.append(loss.detach().cpu().numpy())
        optimizer.zero_grad()