Exemplo n.º 1
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(weights=args.weights_encoder)
    net_decoder_1 = builder.build_decoder(weights=args.weights_decoder_1)
    net_decoder_2 = builder.build_decoder(arch='c1',weights=args.weights_decoder_2)

    if args.weighted_class:
        crit = nn.NLLLoss(ignore_index=-1, weight=args.class_weight)
    else:
        crit = nn.NLLLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_train = GTA(root=args.root_gta, cropSize=args.imgSize, is_train=1)
    dataset_val = CityScapes('val', root=args.root_cityscapes, cropSize=args.imgSize,
                             max_sample=args.num_val, is_train=0)
    dataset_val_2 = BDD('val', root=args.root_bdd, cropSize=args.imgSize,
                        max_sample=args.num_val, is_train=0)

    loader_val = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=args.batch_size_eval,
        shuffle=False,
        num_workers=int(args.workers),
        drop_last=True)
    loader_val_2 = torch.utils.data.DataLoader(
        dataset_val_2,
        batch_size=args.batch_size_eval,
        shuffle=False,
        num_workers=int(args.workers),
        drop_last=True)
    args.epoch_iters = int(len(dataset_train) / args.batch_size)
    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # load nets into gpu
    if args.num_gpus > 1:
        net_encoder = nn.DataParallel(net_encoder,
                                      device_ids=range(args.num_gpus))
        net_decoder_1 = nn.DataParallel(net_decoder_1,
                                        device_ids=range(args.num_gpus))
        net_decoder_2 = nn.DataParallel(net_decoder_2,
                                        device_ids=range(args.num_gpus))

    nets = (net_encoder, net_decoder_1, net_decoder_2, crit)
    for net in nets:
        net.cuda()

    history = {split: {'epoch': [], 'err': [], 'acc': [], 'mIoU': []}
               for split in ('train', 'val', 'val_2')}

    # eval
    evaluate(nets, loader_val, loader_val_2, history, 0, args)
    print('Evaluation Done!')
Exemplo n.º 2
0
def main():
    colors = loadmat('colormap.mat')['colors']

    dataset = GTA(root='/home/selfdriving/datasets/GTA_full', is_train=0)
    h_s, w_s = 720, 1312

    #dataset = CityScapes('val', root='/home/selfdriving/datasets/cityscapes_full', is_train=0)
    #h_s, w_s = 720, 1440

    #dataset = BDD('val',root='/home/selfdriving/datasets/bdd100k', is_train=0)
    #h_s, w_s = 720, 1280

    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=16,
                                         shuffle=True,
                                         num_workers=8,
                                         drop_last=False)

    count = 0
    statistics = np.zeros([19], dtype=np.int)
    mean_image = torch.zeros([19, h_s, w_s]).to(torch.long)
    for batch_data in tqdm(loader):
        (imgs, segs, infos) = batch_data
        count += imgs.size(0)
        for i in range(19):
            statistics[i] += torch.sum(segs == i)
            mean_image[i] += torch.sum(segs == i, dim=0)

    pred = mean_image.data.cpu().numpy() / count
    pred_ = np.argmax(pred, axis=0)
    pred_color = colorEncode(pred_, colors).astype(np.uint8)

    #print(entropy(np.ones(19)))
    entropies = entropy(pred)
    plt.imshow(entropies, cmap='hot')
    plt.colorbar()
    plt.savefig('./entropies.png')
    plt.clf()

    max_vals = np.max(pred, axis=0)
    plt.imshow(max_vals, cmap='hot')
    plt.colorbar()
    plt.savefig('./max_vals.png')

    imsave('./mean.png', pred_color)
Exemplo n.º 3
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(weights=args.weights_encoder)
    net_decoder_1 = builder.build_decoder(weights=args.weights_decoder_1)
    net_decoder_2 = builder.build_decoder(arch='c1',
                                          weights=args.weights_decoder_2)

    if args.weighted_class:
        crit = nn.NLLLoss(ignore_index=-1, weight=args.class_weight)
    else:
        crit = nn.NLLLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_train = GTA(root=args.root_gta,
                        cropSize=args.imgSize,
                        is_train=1,
                        random_mask=args.mask)
    dataset_val = CityScapes('val',
                             root=args.root_cityscapes,
                             cropSize=args.imgSize,
                             max_sample=args.num_val,
                             is_train=0)
    dataset_val_2 = BDD('val',
                        root=args.root_bdd,
                        cropSize=args.imgSize,
                        max_sample=args.num_val,
                        is_train=0)

    loader_train = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=int(args.workers),
                                               drop_last=True)
    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=args.batch_size_eval,
                                             shuffle=False,
                                             num_workers=int(args.workers),
                                             drop_last=True)
    loader_val_2 = torch.utils.data.DataLoader(dataset_val_2,
                                               batch_size=args.batch_size_eval,
                                               shuffle=False,
                                               num_workers=int(args.workers),
                                               drop_last=True)
    args.epoch_iters = int(len(dataset_train) / args.batch_size)
    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # load nets into gpu
    if args.num_gpus > 1:
        net_encoder = nn.DataParallel(net_encoder,
                                      device_ids=range(args.num_gpus))
        net_decoder_1 = nn.DataParallel(net_decoder_1,
                                        device_ids=range(args.num_gpus))
        net_decoder_2 = nn.DataParallel(net_decoder_2,
                                        device_ids=range(args.num_gpus))

    nets = (net_encoder, net_decoder_1, net_decoder_2, crit)
    for net in nets:
        net.cuda()

    # Set up optimizers
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {
        split: {
            'epoch': [],
            'err': [],
            'acc': [],
            'mIoU': []
        }
        for split in ('train', 'val', 'val_2')
    }

    # optional initial eval
    # evaluate(nets, loader_val, loader_val_2, history, 0, args)
    for epoch in range(1, args.num_epoch + 1):
        train(nets, loader_train, optimizers, history, epoch, args)

        # Evaluation
        if epoch % args.eval_epoch == 0:
            evaluate(nets, loader_val, loader_val_2, history, epoch, args)

        # checkpointing
        checkpoint(nets, history, args)

        # adjust learning rate
        adjust_learning_rate(optimizers, epoch, args)

    print('Training Done!')
Exemplo n.º 4
0
def main(args):
    # Network Builders
    builder = ModelBuilder()
    net_encoder = builder.build_encoder(weights=args.weights_encoder)
    net_decoder_1 = builder.build_decoder(weights=args.weights_decoder_1,
                                          use_softmax=True)
    net_decoder_2 = builder.build_decoder(weights=args.weights_decoder_2,
                                          use_softmax=True)

    if args.weighted_class:
        crit1 = nn.CrossEntropyLoss(ignore_index=-1, weight=args.class_weight)
    else:
        crit1 = nn.CrossEntropyLoss(ignore_index=-1)

    # Dataset and Loader
    dataset_train_sup = GTA(root=args.root_gta,
                            cropSize=args.imgSize,
                            max_sample=-1,
                            is_train=1,
                            random_mask=args.mask)
    split_lengths = [len(dataset_train_sup) // args.gamma1] * args.gamma1
    split_lengths[0] += len(dataset_train_sup) % args.gamma1
    split_dataset_train_sup = torch.utils.data.random_split(
        dataset_train_sup, split_lengths)
    dataset_train_unsup = CityScapes('train',
                                     root=args.root_cityscapes,
                                     cropSize=args.imgSize,
                                     max_sample=-1,
                                     is_train=1)
    split_lengths = [len(dataset_train_unsup) // args.gamma2] * args.gamma2
    split_lengths[0] += len(dataset_train_unsup) % args.gamma2
    split_dataset_train_unsup = torch.utils.data.random_split(
        dataset_train_unsup, split_lengths)
    dataset_val = CityScapes('val',
                             root=args.root_cityscapes,
                             cropSize=args.imgSize,
                             max_sample=args.num_val,
                             is_train=0)

    loader_val = torch.utils.data.DataLoader(dataset_val,
                                             batch_size=args.batch_size_eval,
                                             shuffle=False,
                                             num_workers=int(args.workers),
                                             drop_last=True)

    args.epoch_iters = int(
        (len(dataset_train_sup) // args.gamma1 +
         len(dataset_train_unsup) // args.gamma2) / args.batch_size)
    print('1 Epoch = {} iters'.format(args.epoch_iters))

    # load nets into gpu
    if args.num_gpus > 1:
        net_encoder = nn.DataParallel(net_encoder,
                                      device_ids=range(args.num_gpus))
        net_decoder_1 = nn.DataParallel(net_decoder_1,
                                        device_ids=range(args.num_gpus))
        net_decoder_2 = nn.DataParallel(net_decoder_2,
                                        device_ids=range(args.num_gpus))

    nets = (net_encoder, net_decoder_1, net_decoder_2, crit1)
    for net in nets:
        net.cuda()

    # Set up optimizers
    optimizers = create_optimizers(nets, args)

    # Main loop
    history = {
        split: {
            'epoch': [],
            'err': [],
            'acc': [],
            'mIoU': []
        }
        for split in ('train', 'val')
    }

    # optional initial eval
    # evaluate(nets, loader_val, history, 0, args)
    for epoch in range(1, args.num_epoch + 1):
        train(nets, split_dataset_train_sup, split_dataset_train_unsup,
              optimizers, history, epoch, args)

        # Evaluation and visualization
        if epoch % args.eval_epoch == 0:
            evaluate(nets, loader_val, history, epoch, args)

            # checkpointing
            checkpoint(nets, history, args)

        # adjust learning rate
        adjust_learning_rate(optimizers, epoch, args)

    print('Training Done!')