sampler=InfiniteSampler(len(dataset_train)), num_workers=args.n_threads)) print(len(dataset_train)) model = PConvUNet(input_guides=1 if use_depth else 0).to(device) if args.finetune: lr = args.lr_finetune model.freeze_enc_bn = True else: lr = args.lr start_iter = 0 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr) criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device) if args.resume: start_iter = load_ckpt(args.resume, [('model', model)], [('optimizer', optimizer)]) for param_group in optimizer.param_groups: param_group['lr'] = lr print('Starting from iter ', start_iter) for i in tqdm(range(start_iter, args.max_iter)): model.train() image, mask, gt = [x.to(device) for x in next(iterator_train)] if args.mask_root is not None: guide = image[:, 3:4, :, :] image = image[:, 0:3, :, :]
loader_val = data.DataLoader(dataset_val, batch_size=mini_batch, sampler=RandomSampler(data_source=dataset_val), num_workers=4) loaders = {"train": loader_train, "valid": loader_val} print('model') # model, criterion, optimizer, scheduler #model = vgg13().cuda() #model = resnet18(pretrained=False, progress=True).cuda() #model = inception_v3().cuda() model = resnext101_32x8d().cuda() #criterion = CustomCriterion().cuda() criterion = InpaintingLoss(VGG16FeatureExtractor()).cuda() optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0) scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[3, 6, 9, 12, 18, 24, 30, 40, 50, 60, 70, 80, 90], gamma=.5) print('training') # model training runner = dl.SupervisedRunner() logdir = './logdir' runner.train(model=model, criterion=criterion, optimizer=optimizer, scheduler=scheduler,