]) dataset = NewDogCat(root=opt.dataRoot, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, collate_fn=my_collate_fn, shuffle=True, num_workers=opt.workers) # load models if opt.model == 1: netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g) netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d) elif opt.model == 2: netG = models._netG_2(ngpu, nz, nc, ngf) netD = models._netD_2(ngpu, nz, nc, ndf) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) netD.apply(weights_init) if opt.netD != '': netD.load_state_dict(torch.load(opt.netD)) print(netD) criterion = nn.BCELoss() criterion_MSE = nn.MSELoss()
transform=transforms.Compose([ transforms.Scale(opt.imageSize), # transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)), # bring images to (-1,1) ]) ) dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=opt.workers) # load models if opt.model == 1: netG = models._netG_1(ngpu, nz, nc, ngf, n_extra_g) netD = models._netD_1(ngpu, nz, nc, ndf, n_extra_d) elif opt.model == 2: netG = models._netG_2(ngpu, nz, nc, ngf) netD = models._netD_2(ngpu, nz, nc, ndf) netG.apply(weights_init) if opt.netG != '': netG.load_state_dict(torch.load(opt.netG)) print(netG) netD.apply(weights_init) if opt.netD != '': netD.load_state_dict(torch.load(opt.netD)) print(netD) criterion = nn.BCELoss() criterion_MSE = nn.MSELoss()