Ejemplo n.º 1
0
                          num_workers=1)

val_dataset = Dataset(os.path.join(args.valdir, 'Image/'),
                      os.path.join(args.valdir, 'LabeledImage/'), mytransforms)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=True, num_workers=1)

model = FCDenseNet103(args.classes)

#model=model.cuda()
model = nn.DataParallel(model).cuda()

optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
#optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=1e-4)
criterion = nn.NLLLoss2d()

train_utils.load_weights(model, 'weights/weights-0-0.178-0.000.pth')

for epoch in range(args.epochs):

    print('\n _______________________________________________')
    trn_loss, trn_err = train_utils.train(model, train_loader, optimizer,
                                          criterion, epoch)
    print('Epoch {:d}\nTrain - Loss: {:.4f}'.format(epoch, trn_loss))

    train_utils.save_weights(model, epoch, float(trn_loss), 0)

    ### Adjust Lr ###
    train_utils.adjust_learning_rate(args.lr, args.decay, optimizer, epoch,
                                     DECAY_EVERY_N_EPOCHS)

    ### Validate ###
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--nClasses', type=int, default=10)  #CIFAR
    parser.add_argument('--reduction', type=float, default=1.0)  #no reduction
    parser.add_argument('--bottleneck', type=bool, default=False)
    parser.add_argument('--growthRate', type=int, default=12)
    parser.add_argument('--modelDepth', type=int, default=40)
    parser.add_argument('--batchSize', type=int, default=64)
    parser.add_argument('--nEpochs', type=int, default=2)
    parser.add_argument('--no-cuda', action='store_true')
    parser.add_argument('--save', type=str, default=RESULTS_PATH)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--existingWeights', type=str, default=None)
    parser.add_argument('--sessionName',
                        type=str,
                        default=train_utils.get_rand_str(5))
    parser.add_argument('--opt',
                        type=str,
                        default='sgd',
                        choices=('sgd', 'adam', 'rmsprop'))

    args = parser.parse_args()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    setproctitle.setproctitle(args.save)  #The process name

    torch.manual_seed(args.seed)
    if args.cuda:
        print("Using CUDA")
        torch.cuda.manual_seed(args.seed)


#    if os.path.exists(args.save):
#        shutil.rmtree(args.save)
#    os.makedirs(args.save, exist_ok=True)

    normMean = [0.49139968, 0.48215827, 0.44653124]
    normStd = [0.24703233, 0.24348505, 0.26158768]
    normTransform = transforms.Normalize(normMean, normStd)

    trainTransform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normTransform
    ])
    testTransform = transforms.Compose([transforms.ToTensor(), normTransform])

    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    print("Kwargs: " + str(kwargs))
    trainLoader = DataLoader(dset.CIFAR10(root=CIFAR10_PATH,
                                          train=True,
                                          download=True,
                                          transform=trainTransform),
                             batch_size=args.batchSize,
                             shuffle=True,
                             **kwargs)
    testLoader = DataLoader(dset.CIFAR10(root=CIFAR10_PATH,
                                         train=False,
                                         download=True,
                                         transform=testTransform),
                            batch_size=args.batchSize,
                            shuffle=False,
                            **kwargs)

    net = DenseNet(growthRate=args.growthRate,
                   depth=args.modelDepth,
                   reduction=args.reduction,
                   bottleneck=args.bottleneck,
                   nClasses=args.nClasses)

    if args.existingWeights:
        print("Loading existing weights: %s" % args.existingWeights)
        startEpoch = train_utils.load_weights(net, args.existingWeights)
        endEpoch = startEpoch + args.nEpochs
        print('Resume training at epoch: {}'.format(startEpoch))
        if os.path.exists(args.save + 'train.csv'):  #assume test.csv exists
            print("Found existing train.csv")
            append_write = 'a'  # append if already exists
        else:
            print("Creating new train.csv")
            append_write = 'w'  # make a new file if not
        trainF = open(os.path.join(args.save, 'train.csv'), append_write)
        testF = open(os.path.join(args.save, 'test.csv'), append_write)
    else:
        print("Training new model from scratch")
        startEpoch = 1
        endEpoch = args.nEpochs
        trainF = open(os.path.join(args.save, 'train.csv'), 'w')
        testF = open(os.path.join(args.save, 'test.csv'), 'w')

    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in net.parameters()])))
    if args.cuda:
        net = net.cuda()

    if args.opt == 'sgd':
        optimizer = optim.SGD(net.parameters(),
                              lr=1e-1,
                              momentum=0.9,
                              weight_decay=1e-4)
    elif args.opt == 'adam':
        optimizer = optim.Adam(net.parameters(), weight_decay=1e-4)
    elif args.opt == 'rmsprop':
        optimizer = optim.RMSprop(net.parameters(), weight_decay=1e-4)

    print("Training....")
    for epoch in range(startEpoch, endEpoch + 1):
        since = time.time()
        train_utils.adjust_opt(args.opt, optimizer, epoch)
        train_utils.train(epoch,
                          net,
                          trainLoader,
                          optimizer,
                          trainF,
                          sessionName=args.sessionName)
        train_utils.test(epoch, net, testLoader, optimizer, testF)
        time_elapsed = time.time() - since
        print('Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                              time_elapsed % 60))
        if epoch != 1:
            os.system('./plot.py {} &'.format(args.save))

    trainF.close()
    testF.close()
Ejemplo n.º 3
0
import torchvision
import torchvision.transforms as transforms
from datasets import camvid
import utils.imgs
import utils.training as train_utils
from datasets import joint_transforms
from pathlib import Path
from models import tiramisu

CAMVID_PATH = Path('/home/jingwenlai/data', 'CamVid/CamVid')
batch_size = 2

normalize = transforms.Normalize(mean=camvid.mean, std=camvid.std)
test_dset = camvid.CamVid(CAMVID_PATH,
                          'test',
                          joint_transform=None,
                          transform=transforms.Compose(
                              [transforms.ToTensor(), normalize]))
test_loader = torch.utils.data.DataLoader(test_dset,
                                          batch_size=batch_size,
                                          shuffle=False)

print("Test: %d" % len(test_loader.dataset.imgs))

model = tiramisu.FCDenseNet67(n_classes=12).cuda()
model_weights = ".weights/latest.th"
startEpoch = train_utils.load_weights(model, model_weights)
print("load_weights, return epoch: ", startEpoch)

train_utils.view_sample_predictions(model, test_loader, n=10)
Ejemplo n.º 4
0
    #print("Targets: ", targets.size())
    #
    #utils.imgs.view_image(inputs[0])
    #utils.imgs.view_annotated(targets[0])

    #%%
    LR = 1e-4
    LR_DECAY = 0.995
    DECAY_EVERY_N_EPOCHS = 1

    torch.cuda.manual_seed(0)

    model = tiramisu.FCDenseNet67(n_classes=n_classes).cuda()

    if weight_filename is not None:
        train_utils.load_weights(model,
                                 os.path.join(WEIGHTS_PATH, weight_filename))
        print('pretrained weights loaded')
        start_epoch = int(weight_filename.split('-')[1]) + 1
    else:
        model.apply(train_utils.weights_init)
        print('train from beginning')
        start_epoch = 0

    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=LR,
                                    weight_decay=1e-4)

    #criterion = train_utils.FocalLoss2d(gamma)
    # if FocalLoss2d: delete the last layer of the tiramisu
    # print('focal loss with gamma = ',gamma)
print("Val: %d" % len(val_loader.dataset.imgs))
print("Test: %d" % len(test_loader.dataset.imgs))
print("Classes: %d" % len(train_loader.dataset.classes))

inputs, targets = next(iter(train_loader))
print("Inputs: ", inputs.size())
print("Targets: ", targets.size())

# hyperparameters
LR = hyper["learning_rate"]
LR_DECAY = hyper["lr_decay"]
DECAY_EVERY_N_EPOCHS = hyper["decay_per_n_epoch"]
N_EPOCHS = hyper["n_epoch"]

# load combined model
model_path = "./.weights/weights-combined-11-27987.752-0.343.pth"
model = tiramisu.FCDenseNet57_aleatoric(n_classes=12, dropout=dropout).cuda()
load_weights(model, model_path)

if __name__ == "__main__":
    print("Visualizing ...")
    # Make visualization
    i = 0
    for inputs, targets in train_dset:
        print("{} - th".format(i))
        train_utils.view_sample_predictions_with_uncertainty(
            model, inputs, targets, i)
        i += 1
        if i > 100:
            break
Ejemplo n.º 6
0
def test():
    train_utils.test(model, test_loader, criterion, epoch=1)
    train_utils.view_sample_predictions(model, test_loader, n=1)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Pytorch Tiramisu Training")
    parser.add_argument('--lr', default=1e-4, type=float, help="learning rate")
    parser.add_argument('--resume',
                        '-r',
                        action="store_true",
                        help="resume from checkpoint")
    args = parser.parse_args()

    startEpoch = 0
    if args.resume:
        startEpoch = train_utils.load_weights(model, '.weights/latest.th')
        print("load weights for model , start from startEpoch")
        train(startEpoch)

    epoch_logger = open('train_val_log.csv', 'w')

    for epoch in range(startEpoch, N_EPOCHS + 1):
        train_loss, train_acc = train(epoch)
        val_loss, val_acc = val(epoch)

        epoch_logger.write("%d, %.3f, %.3f, %.3f, %3f" %
                           (epoch, train_loss, train_acc, val_loss, val_acc))

    epoch_logger.close()
Ejemplo n.º 7
0
mean = [0.41189489566336, 0.4251328133025, 0.4326707089857]
std = [0.27413549931506, 0.28506257482912, 0.28284674400252]


normalize = transforms.Normalize(mean=mean, std=std)

test_joint_transformer = transforms.Compose([
    joint_transforms.JointCenterCrop((512,224))
    ])

test_dset = shirts.Shirts(
    CAMVID_PATH, 'test', joint_transform=test_joint_transformer,
    transform=transforms.Compose([
        transforms.ToTensor(),
        normalize
    ]))
test_loader = torch.utils.data.DataLoader(
    test_dset, batch_size=1, shuffle=False)

print("Test: %d" %len(test_loader.dataset.imgs))
print("Classes: %d" % len(test_loader.dataset.classes))


fpath = os.path.join(WEIGHTS_PATH,"weights-20-0.217-0.000.pth")
torch.cuda.manual_seed(0)
model = tiramisu.FCDenseNet00(n_classes=2).cuda()
train_utils.load_weights(model,fpath)
model.eval()
train_utils.test_set_predictions(model, test_loader)