def loadD(num, model_path): netD = models._netD_1(1, 100, 3, 64, 0) netD = netD.to(device) path = model_path + 'netD_epoch_{}.pth'.format(num) if CUDA: state_dict = torch.load(path) else: state_dict = torch.load(path, map_location=lambda storage, loc: storage) netD.load_state_dict(state_dict) return netD
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # bring images to (-1,1) ]) dataset = NewDogCat(root=opt.dataRoot, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, collate_fn=my_collate_fn, shuffle=True, num_workers=opt.workers) # load models if opt.model == 1: netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g) netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d) elif opt.model == 2: netG = models._netG_2(ngpu, nz, nc, ngf) netD = models._netD_2(ngpu, nz, nc, ndf) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) netD.apply(weights_init) if opt.netD != '': netD.load_state_dict(torch.load(opt.netD)) print(netD) criterion = nn.BCELoss()
dataset = dset.ImageFolder( root=opt.dataRoot, transform=transforms.Compose([ transforms.Scale(opt.imageSize), # transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), # bring images to (-1,1) ]) ) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=opt.workers) # load models if opt.model == 1: netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g) netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d) elif opt.model == 2: netG = models._netG_2(ngpu, nz, nc, ngf) netD = models._netD_2(ngpu, nz, nc, ndf) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) netD.apply(weights_init) if opt.netD != '': netD.load_state_dict(torch.load(opt.netD)) print(netD) criterion = nn.BCELoss()