Esempio n. 1
0
def main(config_file, resume):
    config = load_config(config_file)
    yaml.safe_dump(config, sys.stderr, default_flow_style=False)

    cuda = torch.cuda.is_available()

    seed = 1
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    # 1. dataset

    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        DatasetV1(split='train', transform=True, aug=config['aug']),
        batch_size=config.get('batch_size', 1), shuffle=True, **kwargs)
    valid_loader = torch.utils.data.DataLoader(
        DatasetV1(split='valid', transform=True, aug=config['aug']),
        batch_size=1, shuffle=False, **kwargs)

    # 2. model

    n_class = len(DatasetV1.class_names)
    model = torchfcn.models.FCN32s(n_class=n_class, nodeconv=True)
    start_epoch = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16, copy_fc8=False, init_upscore=False)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = getattr(torch.optim, config['optimizer'])
    optim = optim(model.parameters(), lr=config['lr'],
                  weight_decay=config['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=valid_loader,
        out=config['out'],
        max_iter=config['max_iteration'],
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_epoch * len(train_loader)
    trainer.train()
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    parser.add_argument(
        '--max-iteration', type=int, default=100000, help='max iteration'
    )
    parser.add_argument(
        '--lr', type=float, default=1.0e-12, help='learning rate',
    )
    parser.add_argument(
        '--weight-decay', type=float, default=0.0005, help='weight decay',
    )
    parser.add_argument(
        '--momentum', type=float, default=0.99, help='momentum',
    )
    parser.add_argument(
        '--pretrained-model',
        help='pretrained model of FCN32s',
    )
    args = parser.parse_args()

    args.model = 'FCN16s'
    args.git_hash = git_hash()

    now = datetime.datetime.now()
    args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    #root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.SatelliteDataset(split='train', transform=True),
        batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.SatelliteDataset(
            split='val', transform=True),
        batch_size=1, shuffle=False, **kwargs)

    # 2. model

    model = torchfcn.models.FCN16s(n_class=2)
    start_epoch = 0
    start_iteration = 0
    # if args.resume:
    #     checkpoint = torch.load(args.resume)
    #     model.load_state_dict(checkpoint['model_state_dict'])
    #     start_epoch = checkpoint['epoch']
    #     start_iteration = checkpoint['iteration']
    # else:
    #     fcn32s = torchfcn.models.FCN32s()
    #     state_dict = torch.load(args.pretrained_model)
    #     try:
    #         fcn32s.load_state_dict(state_dict)
    #     except RuntimeError:
    #         fcn32s.load_state_dict(state_dict['model_state_dict'])
    #     model.copy_params_from_fcn32s(fcn32s)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr': args.lr * 2, 'weight_decay': 0},
        ],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    if args.resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=args.out,
        max_iter=args.max_iteration,
        interval_validate=4000,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 3
0
def main():
    args = parse_args()
    gpu = args.gpu
    cfg = config.get_config(args.config)
    if cfg['no_inst']:
        assert cfg['n_max_per_class'] == 1, "no_inst implies n_max_per_class=1.  Please change " \
                                            "the value accordingly."

    out = get_log_dir(osp.basename(__file__).replace(
        '.py', ''), args.config, config.create_config_copy(cfg),
        parent_directory=osp.dirname(osp.abspath(__file__)))

    logger.info('logdir: {}'.format(out))
    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    np.random.seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset
    train_loader, val_loader, train_loader_for_val = get_dataset_loaders(cuda, cfg)
    semantic_train_ids = [ci for ci, sem_id in enumerate(train_loader.dataset.train_id_list)
                          if train_loader.dataset.train_id_assignments['semantic']]
    # TODO(allie): handle the case where we don't have consecutive semantic vals.
    number_per_instance_class = cfg['n_max_per_class']
    n_instances_by_semantic_id = [
        number_per_instance_class if train_loader.dataset.train_id_assignments['instance'][sem_id]
        else 1 for sem_id in semantic_train_ids]

    problem_config = instance_utils.InstanceProblemConfig(
        semantic_vals=semantic_train_ids,
        n_instances_by_semantic_id=n_instances_by_semantic_id,
        void_value=train_loader.dataset.void_value)

    # 2. model
    model = torchfcn.models.FCN8sInstance(
        problem_config.n_classes, map_to_semantic=cfg['map_to_semantic'],
        semantic_instance_class_list=problem_config.semantic_instance_class_list)

    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer
    optim = torch.optim.Adam(model.parameters(), lr=cfg['lr'], weight_decay=cfg['weight_decay'])

    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    writer = SummaryWriter(log_dir=out)
    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=cfg.get('max_iteration'),
        interval_validate=cfg.get('interval_validate', len(train_loader)),
        tensorboard_writer=writer,
        matching_loss=cfg.get('matching'),
        recompute_loss_at_optimal_permutation=cfg.get('recompute_optimal_loss'),
        size_average=cfg.get('size_average'),
        train_loader_for_val=train_loader_for_val
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    logger.info('Starting training.')
    trainer.train()
Esempio n. 4
0
def main():
    m = hashlib.sha256()
    m.update(str(uuid.getnode()).encode('utf-8'))
    on_my_notebook = m.digest() in nb_hashs

    args = argument_parsing()

    args.model = 'FCN8s'
    args.git_hash = git_hash(
    )  # This is a nice idea: Makes results reproducible by logging current git commit.

    args.use_cuda = prepare_cuda(args, torch_seed=42)
    args.use_cuda = False if on_my_notebook else args.use_cuda

    settings_to_logfile(args)

    print("Output folder:\n{}".format(args.out))

    for k in range(args.k_fold):
        print("Training fold {}/{}".format(k, args.k_fold))

        out = osp.join(args.out, "fold_{}".format(k))
        # Prepare Dataset
        root = osp.expanduser(
            '~/Daten/datasets/cmu-airlab/assignment-task-5/data')
        if on_my_notebook:
            root = "../data"
        kwargs = {
            'num_workers': 8,
            'pin_memory': True
        } if args.use_cuda else {}

        train_dst = AirLabClassSegBase(root,
                                       transform=True,
                                       max_len=3 if on_my_notebook else None,
                                       k_fold=args.k_fold,
                                       k_fold_val=k,
                                       use_augmented=False)

        test_dst = AirLabClassSegBase(root,
                                      val=True,
                                      transform=True,
                                      max_len=3 if on_my_notebook else None,
                                      k_fold=args.k_fold,
                                      k_fold_val=k,
                                      use_augmented=False)

        train_loader = DataLoader(train_dst,
                                  batch_size=5,
                                  shuffle=False,
                                  **kwargs)
        val_loader = DataLoader(test_dst,
                                batch_size=1,
                                shuffle=False,
                                **kwargs)

        # Check for checkpoint.
        start_epoch = 0
        start_iteration = 0
        checkpoint = None
        if args.resume:
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch']
            start_iteration = checkpoint['iteration']

        # Prepare model. Load weights from checkpoint if available.
        fcn_model = prepare_model(args,
                                  freeze_cnn_weights=True,
                                  checkpoint=checkpoint)

        # Prepare optimizer and learning rate scheduler-
        optim = torch.optim.SGD([
            {
                'params': get_parameters(fcn_model, bias=False)
            },
            {
                'params': get_parameters(fcn_model, bias=True),
                'lr': args.lr * 2,
                'weight_decay': 0
            },
        ],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

        if args.resume:
            checkpoint = torch.load(args.resume)
            optim.load_state_dict(checkpoint['optim_state_dict'])

        scheduler = MultiStepLR(optim,
                                milestones=[64, 67, 70],
                                gamma=0.1,
                                last_epoch=start_epoch - 1)

        weight_unfreezer = prepare_weight_unfreezer(optim,
                                                    fcn_model,
                                                    cnn_weights_frozen=True)
        model_refiner = prepare_model_refinement(fcn_model)

        trainer = torchfcn.Trainer(cuda=args.use_cuda,
                                   model=fcn_model,
                                   optimizer=optim,
                                   lr_scheduler=scheduler,
                                   train_loader=train_loader,
                                   val_loader=val_loader,
                                   out=out,
                                   max_epoch=args.max_epoch,
                                   interval_val_viz=5,
                                   epoch_callback_tuples=[(70,
                                                           weight_unfreezer)])

        trainer.epoch = start_epoch
        trainer.iteration = start_iteration
        trainer.train()
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
    parser.add_argument('-g', '--gpu', default='0', type=int, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    parser.add_argument('--max-iteration',
                        type=int,
                        default=100000,
                        help='max iteration')
    parser.add_argument(
        '--lr',
        type=float,
        default=1.0e-10,
        help='learning rate',
    )
    parser.add_argument(
        '--weight-decay',
        type=float,
        default=0.0005,
        help='weight decay',
    )
    parser.add_argument(
        '--momentum',
        type=float,
        default=0.99,
        help='momentum',
    )
    args = parser.parse_args()

    args.model = 'FCN32s'
    # args.git_hash = git_hash()

    now = datetime.datetime.now()
    args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset
    root = osp.expanduser('/media/atr/Seagate Expansion Drive/dataset')
    # root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = DataLoader(torchfcn.datasets.SBDClassSeg(root,
                                                            split='train',
                                                            transform=True),
                              batch_size=1,
                              shuffle=True,
                              **kwargs)
    val_loader = DataLoader(torchfcn.datasets.VOC2011ClassSeg(
        root, split='seg11valid', transform=True),
                            batch_size=1,
                            shuffle=False,
                            **kwargs)

    # 2. model
    args.resume = "/home/atr/WMJ/pytorch_FCN/VOC/logs/20190331_222251.423714/checkpoint.pth.tar"
    model = torchfcn.models.FCN32s(n_class=21)
    start_epoch = 0
    start_iteration = 0
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()
        # device_ids = [0,1]
        # model = model.cuda(device_ids[0])
        # model = nn.DataParallel(model,device_ids=device_ids)

    # 3. optimizer

    optim = torch.optim.SGD([
        {
            'params': get_parameters(model, bias=False)
        },
        {
            'params': get_parameters(model, bias=True),
            'lr': args.lr * 2,
            'weight_decay': 0
        },
    ],
                            lr=args.lr,
                            momentum=args.momentum,
                            weight_decay=args.weight_decay)

    if args.resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=args.out,
        max_iter=args.max_iteration,
        interval_validate=4000,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=int, required=True)
    parser.add_argument('-c',
                        '--config',
                        type=int,
                        default=1,
                        choices=configurations.keys())
    parser.add_argument('--resume', help='Checkpoint path')
    args = parser.parse_args()

    gpu = args.gpu
    cfg = configurations[args.config]
    out = get_log_dir('fcn32s', args.config, cfg)
    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = '/opt/visualai/rkdoshi/pytorch-fcn/data/datasets'
    kwargs = {'num_workers': 8, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.VOC2012ClassSeg(root, split='train', transform=True),
        batch_size=1,
        shuffle=True,
        **kwargs)
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.VOC2012ClassSeg(
        root, split='val', transform=True),
                                             batch_size=1,
                                             shuffle=False,
                                             **kwargs)

    # 2. model

    model = torchfcn.models.FCN32s(n_class=21)
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD([
        {
            'params': get_parameters(model, bias=False)
        },
        {
            'params': get_parameters(model, bias=True),
            'lr': cfg['lr'] * 2,
            'weight_decay': 0
        },
    ],
                            lr=cfg['lr'],
                            momentum=cfg['momentum'],
                            weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=cfg['max_iteration'],
        interval_validate=cfg.get('interval_validate', len(train_loader)),
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=int, required=True)
    parser.add_argument('-c', '--config', type=int, default=1,
                        choices=configurations.keys())
    parser.add_argument('--instance',
                        action='store_true',
                        help='Use instance labels, else use class labels.')
    parser.add_argument('--resume', help='Checkpoint path')
    args = parser.parse_args()

    gpu = args.gpu
    cfg = configurations[args.config]
    out = get_log_dir('fcn8s', args.config, cfg)
    print 'Running experiment {}'.format(out)
    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    if args.instance:
        print 'Beginning instance segmentation.'
        train_dataset = torchfcn.datasets.SBDInstSeg
        val_dataset = torchfcn.datasets.VOC2011InstSeg
    else:
        print 'Beginning semantic segmentation.'
        train_dataset = torchfcn.datasets.SBDClassSeg
        val_dataset = torchfcn.datasets.VOC2011ClassSeg
    train_loader = torch.utils.data.DataLoader(
        train_dataset(root, split='train', transform=True),
        batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        val_dataset(root, split='seg11valid', transform=True),
        batch_size=1, shuffle=False, **kwargs)

    # 2. model

    model = torchfcn.models.FCN8s(n_class=cfg['num_classes'])
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        fcn16s = torchfcn.models.FCN16s()
        fcn16s.load_state_dict(torch.load(cfg['fcn16s_pretrained_model']))
        model.copy_params_from_fcn16s(fcn16s)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr': cfg['lr'] * 2, 'weight_decay': 0},
        ],
        lr=cfg['lr'],
        momentum=cfg['momentum'],
        weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    tensorboard_writer = SummaryWriter(log_dir=out, comment='')

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=cfg['max_iteration'],
        interval_validate=cfg.get('interval_validate', len(train_loader)),
        tensorboard_writer=tensorboard_writer,
        interval_train_loss=100,
        n_class=cfg['num_classes']
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--out')
    parser.add_argument('--resume')
    parser.add_argument('--no-deconv', action='store_true')
    args = parser.parse_args()

    cuda = torch.cuda.is_available()

    out = args.out
    resume = args.resume
    deconv = not args.no_deconv

    seed = 1
    max_iter = 100000

    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SBDClassSeg(
        root, split='train', transform=True),
                                               batch_size=1,
                                               shuffle=True,
                                               **kwargs)
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.VOC2011ClassSeg(
        root, split='seg11valid', transform=True),
                                             batch_size=1,
                                             shuffle=False,
                                             **kwargs)

    # 2. model

    model = torchfcn.models.FCN32s(n_class=21, deconv=deconv)
    start_epoch = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
    else:
        pth_file = osp.expanduser('~/data/models/torch/vgg16-00b39a1b.pth')
        vgg16 = torchvision.models.vgg16()
        vgg16.load_state_dict(torch.load(pth_file))
        torchfcn.utils.copy_params_vgg16_to_fcn32s(vgg16,
                                                   model,
                                                   init_upscore=False)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    # FIXME: Per-parameter options not work? No loss discreasing.
    # conv_weights = []
    # conv_biases = []
    # for l in model.features:
    #     for i, param in enumerate(l.parameters()):
    #         if i == 0:
    #             conv_weights.append(param)
    #         elif i == 1:
    #             conv_biases.append(param)
    #         else:
    #             raise ValueError
    optim = torch.optim.SGD(
        # FIXME: Per-parameter options not work? No loss discreasing.
        # [
        #     {'params': conv_weights},
        #     {'params': conv_biases, 'lr': 2e-10, 'weight_decay': 0},
        #     {'params': model.upscore.parameters(), 'lr': 0},  # deconv
        # ],
        model.parameters(),
        lr=1e-10,
        momentum=0.99,
        weight_decay=0.0005)
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=max_iter,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_epoch * len(train_loader)
    trainer.train()
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    parser.add_argument(
        '--max-iteration', type=int, default=100000, help='max iteration'
    )
    parser.add_argument(
        '--lr', type=float, default=1.0e-10, help='learning rate',
    )
    parser.add_argument(
        '--weight-decay', type=float, default=0.0005, help='weight decay',
    )
    parser.add_argument(
        '--momentum', type=float, default=0.99, help='momentum',
    )
    args = parser.parse_args()

    args.model = 'FCN32s'
    args.git_hash = git_hash()

    now = datetime.datetime.now()
    args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser('~/facades_datasets/5.ECP')
    # root = osp.expanduser('~/facades_datasets/1.CMP/CMP_base')

    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}

    # use our dataset and defined transformations
    dataset_train = facade_dataset.ECP_Dataset(root, split='train')
    dataset_val = facade_dataset.ECP_Dataset(root, split='val')

    # define training and validation data loaders
    loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=8, shuffle=True, **kwargs)
    loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False, **kwargs)

    # 2. model
    model = torchfcn.models.FCN32s(n_class=9)
    start_epoch = 0
    start_iteration = 0
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),'lr': args.lr * 2, 'weight_decay': 0},
        ],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    if args.resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=loader_train,
        val_loader=loader_val,
        out=args.out,
        max_iter=args.max_iteration,
        interval_validate=5000,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-g', '--gpu', type=str, required=True)
    parser.add_argument('-c', '--config', type=int, default=1,
                        choices=configurations.keys())
    parser.add_argument('--resume', help='Checkpoint path')
    args = parser.parse_args()

    gpu = args.gpu
    cfg = configurations[args.config]
    out = get_log_dir('fcn32s', args.config, cfg)

    resume = args.resume

    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    if torch.cuda.device_count() == 1:
        batch_size = 1
    else:
        batch_size = 2 * torch.cuda.device_count()

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.CityScapesClassSeg(
            root, split=['train'], transform=True, preprocess=False,
        ), batch_size=batch_size, shuffle=True, **kwargs
    )
    val_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.CityScapesClassSeg(
            root, split=['val'], transform=True, preprocess=False,
        ), batch_size=batch_size, shuffle=False, **kwargs
    )

    # 2. model

    model = torchfcn.models.FCN32s(n_class=20)
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        if torch.cuda.device_count() == 1:
            model = model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # 3. optimizer

    optim = torch.optim.Adam(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr': cfg['lr'] * 2, 'weight_decay': 0},
        ],
        lr=cfg['lr'],
        # momentum=cfg['momentum'],
        weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=cfg['max_iteration'],
        nEpochs=10,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 11
0
def main(config_file):
    config = yaml.load(open(config_file))

    prefix = osp.splitext(osp.basename(config_file))[0]
    out = '%s' % prefix
    for key, value in config.items():
        out += '_%s-%s' % (key.upper(), str(value))
    out = osp.join(here, 'logs', out)
    if not osp.exists(out):
        os.makedirs(out)
    config['out'] = out[len(here) + 1:]
    config['config_file'] = config_file
    yaml.dump(config, open(osp.join(out, 'config.yaml'), 'w'))

    cuda = torch.cuda.is_available()

    seed = 1
    max_iter = 1000000

    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SBDClassSeg(
        root, split='train', transform=True),
                                               batch_size=1,
                                               shuffle=True,
                                               **kwargs)
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.VOC2011ClassSeg(
        root, split='seg11valid', transform=True),
                                             batch_size=1,
                                             shuffle=False,
                                             **kwargs)

    # 2. model

    model = torchfcdense.models.FCDense(
        depths=[4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4],
        growth_rates=16,
        n_classes=21,
        drop_rate=0.2,
    )
    start_epoch = 0
    if config.get('resume'):
        checkpoint = torch.load(config['resume'])
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = getattr(torch.optim,
                    config['optimizer'])(model.parameters(),
                                         lr=config['lr'],
                                         weight_decay=config['weight_decay'])
    if config.get('resume'):
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=config['out'],
        max_iter=max_iter,
        size_average=True,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_epoch * len(train_loader)
    trainer.train()
Esempio n. 12
0
def main(config_file, resume):
    cfg, out = load_config_file(config_file)

    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.SBDClassSeg(root, split='train', transform=True),
        batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.VOC2011ClassSeg(
            root, split='seg11valid', transform=True),
        batch_size=1, shuffle=False, **kwargs)

    # 2. model

    model = torchfcn.models.FCN32s(n_class=21)
    start_epoch = 0
    start_iteration = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr': cfg['lr'] * 2, 'weight_decay': 0},
        ],
        lr=cfg['lr'],
        momentum=cfg['momentum'],
        weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=cfg['max_iteration'],
        interval_validate=cfg.get('interval_validate', len(train_loader)),
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 13
0
def main(config_file, resume):
    cfg, out = load_config_file(config_file)

    cuda = torch.cuda.is_available()

    batch_size = torch.cuda.device_count() * 3
    max_iter = cfg['max_iteration'] // batch_size

    torch.manual_seed(1)
    if cuda:
        torch.cuda.manual_seed(1)

    # 1. dataset

    cfg['dataset'] = cfg.get('dataset', 'v2')
    if cfg['dataset'] == 'v2':
        dataset_class = torchfcn.datasets.APC2016V2
    elif cfg['dataset'] == 'v3':
        dataset_class = torchfcn.datasets.APC2016V3
    else:
        raise ValueError('Unsupported dataset: %s' % cfg['dataset'])

    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(dataset_class(split='train',
                                                             transform=True),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)
    valid_loader = torch.utils.data.DataLoader(dataset_class(split='valid',
                                                             transform=True),
                                               batch_size=batch_size,
                                               shuffle=False,
                                               **kwargs)

    # 2. model

    n_class = len(train_loader.dataset.class_names)
    model = torchfcn.models.FCN32s(n_class=n_class, nodeconv=cfg['nodeconv'])
    start_epoch = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16, copy_fc8=False, init_upscore=False)
    if cuda:
        if torch.cuda.device_count() == 1:
            model = model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # 3. optimizer

    optim = torch.optim.Adam(model.parameters(),
                             lr=cfg['lr'],
                             weight_decay=cfg['weight_decay'])
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=valid_loader,
        out=out,
        max_iter=max_iter,
        interval_validate=cfg['interval_validate'],
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_epoch * len(train_loader)
    trainer.train()
Esempio n. 14
0
def main():
    now = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=8)))
    outpath_default = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S'))
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id')
    parser.add_argument('--resume', help='checkpoint path')
    # configurations (same configuration as original work)
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    # TODO input model name from args
    parser.add_argument('--max-iteration', type=int, default=100000, help='max iteration')
    parser.add_argument('--lr', type=float, default=1.0e-10, help='learning rate',)
    parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay',)
    parser.add_argument('--momentum', type=float, default=0.99, help='momentum',)
    parser.add_argument('--root', type=str, default='~/data/datasets', help='the directory contains folder "VOC"')
    parser.add_argument('--out', type=str, default=outpath_default, help='directory to store output logs and weights')
    args = parser.parse_args()

    args.model = 'FCN32s'
    args.git_hash = git_hash()

    if not os.path.isdir(args.out):
        os.makedirs(args.out)
        print("\tdirctory {} created".format(args.out))

    # now = datetime.datetime.now()
    # args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f'))

    # os.makedirs(args.out)
    with open(osp.join(args.out, 'config.yaml'), 'w') as f:
        yaml.safe_dump(args.__dict__, f, default_flow_style=False)

    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
    cuda = torch.cuda.is_available()

    torch.manual_seed(1337)
    if cuda:
        torch.cuda.manual_seed(1337)

    # 1. dataset

    root = osp.expanduser(args.root)
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.SBDClassSeg(root, split='train', transform=True),
        batch_size=1, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        torchfcn.datasets.VOC2011ClassSeg(
            root, split='seg11valid', transform=True),
        batch_size=1, shuffle=False, **kwargs)

    # 2. model

    model = torchfcn.models.FCN32s(n_class=21)
    start_epoch = 0
    start_iteration = 0
    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
        start_iteration = checkpoint['iteration']
    else:
        vgg16 = torchfcn.models.VGG16(pretrained=True)
        model.copy_params_from_vgg16(vgg16)
    if cuda:
        model = model.cuda()

    # 3. optimizer

    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr': args.lr * 2, 'weight_decay': 0},
        ],
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=args.weight_decay)
    if args.resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=args.out,
        max_iter=args.max_iteration,
        interval_validate=4000,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_iteration
    trainer.train()
Esempio n. 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--out')
    parser.add_argument('--resume')
    args = parser.parse_args()

    out = args.out
    resume = args.resume
    cuda = torch.cuda.is_available()

    seed = 1
    batch_size = torch.cuda.device_count() * 3
    max_iter = 150000 // batch_size

    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    # 1. dataset

    root = osp.expanduser('~/data/datasets')
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    train_loader = torch.utils.data.DataLoader(torchfcn.datasets.APC2016V2(
        root, train=True, transform=True),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)
    val_loader = torch.utils.data.DataLoader(torchfcn.datasets.APC2016V2(
        root, train=False, transform=True),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             **kwargs)

    # 2. model

    n_class = len(train_loader.dataset.class_names)
    model = torchfcn.models.FCN32s(n_class=n_class)
    start_epoch = 0
    if resume:
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        start_epoch = checkpoint['epoch']
    else:
        pth_file = osp.expanduser('~/data/models/torch/vgg16-00b39a1b.pth')
        vgg16 = torchvision.models.vgg16()
        vgg16.load_state_dict(torch.load(pth_file))
        torchfcn.utils.copy_params_vgg16_to_fcn32s(vgg16,
                                                   model,
                                                   copy_fc8=True,
                                                   init_upscore=False)
    if cuda:
        if torch.cuda.device_count() == 1:
            model = model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # 3. optimizer

    optim = torch.optim.Adam(model.parameters(), lr=1e-5, weight_decay=0.0005)
    if resume:
        optim.load_state_dict(checkpoint['optim_state_dict'])

    trainer = torchfcn.Trainer(
        cuda=cuda,
        model=model,
        optimizer=optim,
        train_loader=train_loader,
        val_loader=val_loader,
        out=out,
        max_iter=max_iter,
    )
    trainer.epoch = start_epoch
    trainer.iteration = start_epoch * len(train_loader)
    trainer.train()