示例#1
0
                                         shuffle=True,
                                         num_workers=int(opt.workers))

# some hyper parameters
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
num_classes = int(opt.num_classes)
nc = 3

# Define the generator and initialize the weights
if opt.dataset == 'cifar10':
    netG = _netG_CIFAR10(ngpu, nz)
else:
    netG = _netG(ngpu, nz)
netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(torch.load(opt.netG))
print(netG)

# Define the discriminator and initialize the weights
if opt.dataset == 'cifar10':
    netD = _netD_CIFAR10(ngpu, num_classes)
else:
    netD = _netD(ngpu, num_classes)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(torch.load(opt.netD))
print(netD)
示例#2
0
    def __init__(self, params):
        self.params = params
        # specify the gpu id if using only 1 gpu
        if params.ngpu == 1:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(params.gpu_id)
        try:
            os.makedirs(params.outf)
        except OSError:
            pass
        if params.manualSeed is None:
            params.manualSeed = random.randint(1, 10000)
        print("Random Seed: ", params.manualSeed)
        random.seed(params.manualSeed)
        torch.manual_seed(params.manualSeed)
        if params.cuda:
            torch.cuda.manual_seed_all(params.manualSeed)

        cudnn.benchmark = True

        if torch.cuda.is_available() and not params.cuda:
            print(
                "WARNING: You have a CUDA device, so you should probably run with --cuda"
            )

        # datase t
        if params.dataset == 'imagenet':
            # folder dataset
            self.dataset = ImageFolder(
                root=params.dataroot,
                transform=transforms.Compose([
                    transforms.Scale(params.imageSize),
                    transforms.CenterCrop(params.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]),
                classes_idx=(10, 20))
        elif params.dataset == 'cifar10':
            self.dataset = dset.CIFAR10(root=params.dataroot,
                                        download=True,
                                        transform=transforms.Compose([
                                            transforms.Scale(params.imageSize),
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                (0.5, 0.5, 0.5),
                                                (0.5, 0.5, 0.5)),
                                        ]))
        else:
            raise NotImplementedError("No such dataset {}".format(
                params.dataset))

        assert self.dataset
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=params.batchSize,
            shuffle=True,
            num_workers=int(params.workers))

        # some hyper parameters
        self.ngpu = int(params.ngpu)
        self.nz = int(params.nz)
        self.ngf = int(params.ngf)
        self.ndf = int(params.ndf)
        self.num_classes = int(params.num_classes)
        self.nc = 3

        # Define the generator and initialize the weights
        if params.dataset == 'imagenet':
            self.netG = _netG(self.ngpu, self.nz)
        else:
            self.netG = _netG_CIFAR10(self.ngpu, self.nz)
        self.netG.apply(weights_init)
        if params.netG != '':
            self.netG.load_state_dict(torch.load(params.netG))
        print(self.netG)
        # Define the discriminator and initialize the weights
        if params.dataset == 'imagenet':
            self.netD = _netD(self.ngpu, self.num_classes)
        else:
            self.netD = _netD_CIFAR10(self.ngpu, self.num_classes)
        self.netD.apply(weights_init)
        if params.netD != '':
            self.netD.load_state_dict(torch.load(params.netD))
        print(self.netD)
        # loss functions
        self.dis_criterion = nn.BCELoss()
        self.aux_criterion = nn.NLLLoss()

        # tensor placeholders
        self.input = torch.FloatTensor(params.batchSize, 3, params.imageSize,
                                       params.imageSize)
        self.noise = torch.FloatTensor(params.batchSize, self.nz, 1, 1)
        self.eval_noise = torch.FloatTensor(params.batchSize, self.nz, 1,
                                            1).normal_(0, 1)
        self.dis_label = torch.FloatTensor(params.batchSize)
        self.aux_label = torch.LongTensor(params.batchSize)
        self.real_label = 1
        self.fake_label = 0

        # if using cuda
        if params.cuda:
            self.netD.cuda()
            self.netG.cuda()
            self.dis_criterion.cuda()
            self.aux_criterion.cuda()
            self.input, self.dis_label, self.aux_label = self.input.cuda(
            ), self.dis_label.cuda(), self.aux_label.cuda()
            self.noise, self.eval_noise = self.noise.cuda(
            ), self.eval_noise.cuda()

        # define variables
        self.input = Variable(self.input)
        self.noise = Variable(self.noise)
        self.eval_noise = Variable(self.eval_noise)
        self.dis_label = Variable(self.dis_label)
        self.aux_label = Variable(self.aux_label)
        # noise for evaluation
        self.eval_noise_ = np.random.normal(0, 1, (params.batchSize, self.nz))
        self.eval_label = np.random.randint(0, self.num_classes,
                                            params.batchSize)
        self.eval_onehot = np.zeros((params.batchSize, self.num_classes))
        self.eval_onehot[np.arange(params.batchSize), self.eval_label] = 1
        self.eval_noise_[np.arange(params.batchSize), :self.
                         num_classes] = self.eval_onehot[np.arange(
                             params.batchSize)]
        self.eval_noise_ = (torch.from_numpy(self.eval_noise_))
        self.eval_noise.data.copy_(
            self.eval_noise_.view(params.batchSize, self.nz, 1, 1))
        # setup optimizer
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=params.lr,
                                     betas=(params.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=params.lr,
                                     betas=(params.beta1, 0.999))