dataRoot = args.dataRoot maskRoot = args.maskRoot imgData = GetData(dataRoot, maskRoot, loadSize, cropSize) data_loader = DataLoader(imgData, batch_size=batchSize, shuffle=True, num_workers=args.numOfWorkers, drop_last=False, pin_memory=True) num_epochs = args.train_epochs # pdb.set_trace() netG = LBAMModel(4, 3) if args.pretrained != '': netG.load_state_dict(torch.load(args.pretrained)) # pdb.set_trace() numOfGPUs = torch.cuda.device_count() if cuda: netG = netG.cuda() if numOfGPUs > 1: netG = nn.DataParallel(netG, device_ids=range(numOfGPUs)) count = 1 G_optimizer = optim.Adam(netG.parameters(), lr=0.0001, betas=(0.5, 0.9))
maskRoot = args.maskRoot savePath = args.savePath if not os.path.exists(savePath): os.makedirs(savePath) imgData = GetData(dataRoot, maskRoot, loadSize, cropSize) data_loader = DataLoader(imgData, batch_size=batchSize, shuffle=True, num_workers=args.numOfWorkers, drop_last=False) num_epochs = 10 netG = LBAMModel(4, 3) if args.pretrained != '': netG.load_state_dict(torch.load(args.pretrained)) else: print('No pretrained model provided!') # if cuda: netG = netG.cuda() for param in netG.parameters(): param.requires_grad = False print('OK!')