Beispiel #1
0
def main(cfg, _log):
    init_seed(cfg.seed)

    _log.info("=> fetching img pairs.")
    train_set, valid_set = get_dataset(cfg)

    _log.info('{} samples found, {} train samples and {} test samples '.format(
        len(valid_set) + len(train_set), len(train_set), len(valid_set)))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=cfg.train.batch_size,
                                               num_workers=cfg.train.workers,
                                               pin_memory=True,
                                               shuffle=True)

    max_test_batch = 4
    if type(valid_set) is torch.utils.data.ConcatDataset:
        valid_loader = [
            torch.utils.data.DataLoader(s,
                                        batch_size=min(max_test_batch,
                                                       cfg.train.batch_size),
                                        num_workers=min(4, cfg.train.workers),
                                        pin_memory=True,
                                        shuffle=False)
            for s in valid_set.datasets
        ]
        valid_size = sum([len(l) for l in valid_loader])
    else:
        valid_loader = torch.utils.data.DataLoader(
            valid_set,
            batch_size=min(max_test_batch, cfg.train.batch_size),
            num_workers=min(4, cfg.train.workers),
            pin_memory=True,
            shuffle=False)
        valid_size = len(valid_loader)

    if cfg.train.epoch_size == 0:
        cfg.train.epoch_size = len(train_loader)
    if cfg.train.valid_size == 0:
        cfg.train.valid_size = valid_size
    cfg.train.epoch_size = min(cfg.train.epoch_size, len(train_loader))
    cfg.train.valid_size = min(cfg.train.valid_size, valid_size)

    model = get_model(cfg.model)
    loss = get_loss(cfg.loss)
    trainer = get_trainer(cfg.trainer)(train_loader, valid_loader, model, loss,
                                       _log, cfg.save_root, cfg.train)

    for name, param in model.named_parameters():
        if ("pyramid" in name) == False:
            param.requires_grad = False

        else:
            print(name, param.requires_grad)
            #parameter.requires_grad = False
    epoch, weights = load_checkpoint('checkpoints/Sintel/pwclite_ar.tar')
    print("traiiiiiiiiiiiiiiiiiiiiiiiiiiiiin", weights)

    trainer.model = model
    trainer.train()
Beispiel #2
0
def main(cfg, _log):
    init_seed(cfg.seed)

    _log.info("=> fetching img pairs.")
    train_set, valid_set = get_dataset(cfg)

    _log.info('{} samples found, {} train samples and {} test samples '.format(
        len(valid_set) + len(train_set), len(train_set), len(valid_set)))

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=cfg.train.batch_size,
                                               num_workers=cfg.train.workers,
                                               pin_memory=True,
                                               shuffle=True)

    max_test_batch = 4
    if type(valid_set) is torch.utils.data.ConcatDataset:
        valid_loader = [
            torch.utils.data.DataLoader(s,
                                        batch_size=min(max_test_batch,
                                                       cfg.train.batch_size),
                                        num_workers=min(4, cfg.train.workers),
                                        pin_memory=True,
                                        shuffle=False)
            for s in valid_set.datasets
        ]
        valid_size = sum([len(l) for l in valid_loader])
    else:
        valid_loader = torch.utils.data.DataLoader(
            valid_set,
            batch_size=min(max_test_batch, cfg.train.batch_size),
            num_workers=min(4, cfg.train.workers),
            pin_memory=True,
            shuffle=False)
        valid_size = len(valid_loader)

    if cfg.train.epoch_size == 0:
        cfg.train.epoch_size = len(train_loader)
    if cfg.train.valid_size == 0:
        cfg.train.valid_size = valid_size
    cfg.train.epoch_size = min(cfg.train.epoch_size, len(train_loader))
    cfg.train.valid_size = min(cfg.train.valid_size, valid_size)

    model = get_model(cfg.model)
    loss = get_loss(cfg.loss)
    trainer = get_trainer(cfg.trainer)(train_loader, valid_loader, model, loss,
                                       _log, cfg.save_root, cfg.train)

    trainer.train()
Beispiel #3
0
parser.add_argument(
    '--log-interval',
    type=int,
    default=100,
    metavar='N',
    help='how many batches to wait before logging training status')
parser.add_argument('--resume',
                    default='',
                    type=str,
                    metavar='PATH',
                    help='path to latest checkpoint (default: none)')
args = parser.parse_args()

torch.backends.cudnn.benchmark = True

source_dataset, target_dataset = get_dataset(args.task)

source_loader = torch.utils.data.DataLoader(source_dataset,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0)

target_loader = torch.utils.data.DataLoader(target_dataset,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=0)

model = models.Net(task=args.task).cuda()

if args.task == 's2m':
    optimizer = torch.optim.SGD(model.parameters(),
            correct += pred.eq(target_t.view_as(pred)).sum().item()
            count += len(target_t)
    return correct * 1.0 / count


### DDM in batch
if args.model == 'ddm':
    criterion_cel = nn.CrossEntropyLoss()

    model_f = models.Net_f(task=task, outdim=outdim).cuda()
    model_c = models.Net_c_cway(task=task, outdim=outdim).cuda()
    optimizer_f = torch.optim.Adam(model_f.parameters(), 0.001)
    optimizer_c = torch.optim.Adam(model_c.parameters(), 0.001)

    drift_num = 0
    source_dataset, target_dataset = get_dataset(task, drift_num)
    source_loader = torch.utils.data.DataLoader(source_dataset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=0)
    target_loader = torch.utils.data.DataLoader(target_dataset,
                                                batch_size=batch_size,
                                                shuffle=False,
                                                num_workers=0)

    train_xs, train_ys = [], []
    train_xt, train_yt = [], []

    dl_source = iter(source_loader)
    dl_target = iter(target_loader)
Beispiel #5
0
    # init parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', default='configs/sky_timelapse.json')
    args = parser.parse_args()
    with open(args.config) as f:
        cfg = EasyDict(json.load(f))
    # init seed
    init_seed(cfg.train.seed)

    # init logger
    if cfg.save_root == '':
        curr_time = datetime.datetime.now().strftime("%y%m%d%H%M%S")
        cfg.save_root = Path(
            cfg.train.checkpoints) / cfg.train.name / curr_time
        cfg.save_root.makedirs_p()
    logger = init_logger(log_dir=cfg.save_root)
    logger.info('=> training: will save everything to {}'.format(
        cfg.save_root))

    # show configurations
    cfg_str = pprint.pformat(cfg)
    logger.info('=> configurations \n ' + cfg_str)

    # create datasets
    train_loader, valid_loader = get_dataset(cfg)

    # train
    TrainFramework = get_trainer(cfg.trainer)
    trainer = TrainFramework(cfg, train_loader, valid_loader, logger)
    trainer.train()