data.DataLoader(dataset_train, batch_size=args.batch_size, sampler=InfiniteSampler(len(dataset_train)), num_workers=args.n_threads)) print(len(dataset_train)) model = PConvUNet(input_guides=1 if use_depth else 0).to(device) if args.finetune: lr = args.lr_finetune model.freeze_enc_bn = True else: lr = args.lr start_iter = 0 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr) criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device) if args.resume: start_iter = load_ckpt(args.resume, [('model', model)], [('optimizer', optimizer)]) for param_group in optimizer.param_groups: param_group['lr'] = lr print('Starting from iter ', start_iter) for i in tqdm(range(start_iter, args.max_iter)): model.train() image, mask, gt = [x.to(device) for x in next(iterator_train)] if args.mask_root is not None:
iterator_train = iter(data.DataLoader( dataset_train, batch_size=args.batch_size, sampler=InfiniteSampler(len(dataset_train)), num_workers=args.n_threads)) print(len(dataset_train)) model = PConvUNet().to(device) if args.finetune: lr = args.lr_finetune model.freeze_enc_bn = True else: lr = args.lr start_iter = 0 optimizer = torch.optim.Adam( filter(lambda p: p.requires_grad, model.parameters()), lr=lr) criterion = InpaintingLoss(VGG16FeatureExtractor()).to(device) if args.resume: start_iter = load_ckpt( args.resume, [('model', model)], [('optimizer', optimizer)]) for param_group in optimizer.param_groups: param_group['lr'] = lr print('Starting from iter ', start_iter) for i in tqdm(range(start_iter, args.max_iter)): model.train() image, mask, gt = [x.to(device) for x in next(iterator_train)] #print(image.shape) #print(mask.shape)