def __init__(self, args):
		# parameters
		self.epoch = args.epoch
		self.sample_num = 64
		self.batch_size = args.batch_size
		self.save_dir = args.save_dir
		self.result_dir = args.result_dir
		self.dataset = args.dataset
		self.dataroot_dir = args.dataroot_dir
		self.log_dir = args.log_dir
		self.gpu_mode = args.gpu_mode
		self.model_name = args.gan_type
		self.lambda_ = 0.25
		self.n_critic = 5			   # the number of iterations of the critic per generator iteration

		# networks init
		self.G = generator(self.dataset)
		self.D = discriminator(self.dataset)
		self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
		self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

		if self.gpu_mode:
			self.G.cuda()
			self.D.cuda()

		print('---------- Networks architecture -------------')
		utils.print_network(self.G)
		utils.print_network(self.D)
		print('-----------------------------------------------')

		# load dataset
		data_dir = os.path.join( self.dataroot_dir, self.dataset )
		if self.dataset == 'mnist':
			self.data_loader = DataLoader(datasets.MNIST(data_dir, train=True, download=True,
														 transform=transforms.Compose(
															 [transforms.ToTensor()])),
										  batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'fashion-mnist':
			self.data_loader = DataLoader(
				datasets.FashionMNIST(data_dir, train=True, download=True, transform=transforms.Compose(
					[transforms.ToTensor()])),
				batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'celebA':
			self.data_loader = utils.CustomDataLoader(data_dir, transform=transforms.Compose(
				[transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
												 shuffle=True)
		self.z_dim = 62

		# fixed noise
		if self.gpu_mode:
			self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
		else:
			self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # EBGAN parameters
        self.pt_loss_weight = 0.1
        self.margin = max(1, self.batch_size / 64.)  # margin for loss function
        # usually margin of 1 is enough, but for large batch size it must be larger than 1

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 49
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.multi_gpu = args.multi_gpu
        self.num_workers = args.num_workers
        self.model_name = args.gan_type
        self.centerBosphorus = args.centerBosphorus
        self.loss_option = args.loss_option
        if len(args.loss_option) > 0:
            self.model_name = self.model_name + '_' + args.loss_option
            self.loss_option = args.loss_option.split(',')
        if len(args.comment) > 0:
            self.model_name = self.model_name + '_' + args.comment
        self.lambda_ = 0.25
        self.n_critic = args.n_critic
        self.n_gen = args.n_gen
        self.c = 0.01  # for wgan
        self.nDaccAvg = args.nDaccAvg
        if 'wass' in self.loss_option:
            self.n_critic = 5

        # makedirs
        temp_save_dir = os.path.join(self.save_dir, self.dataset,
                                     self.model_name)
        if not os.path.exists(temp_save_dir):
            os.makedirs(temp_save_dir)
        else:
            print('[warning] path exists: ' + temp_save_dir)
        temp_result_dir = os.path.join(self.result_dir, self.dataset,
                                       self.model_name)
        if not os.path.exists(temp_result_dir):
            os.makedirs(temp_result_dir)
        else:
            print('[warning] path exists: ' + temp_result_dir)

        # save args
        timestamp = time.strftime('%b_%d_%Y_%H;%M')
        with open(
                os.path.join(temp_save_dir,
                             self.model_name + '_' + timestamp + '_args.pkl'),
                'wb') as fhandle:
            pickle.dump(args, fhandle)

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.Nd = 337  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'Bosphorus':
            self.data_loader = DataLoader(utils.Bosphorus(
                data_dir,
                use_image=True,
                fname_cache=args.fname_cache,
                transform=transforms.ToTensor(),
                shape=128,
                image_shape=256,
                center=self.centerBosphorus,
                use_colorPCL=True),
                                          batch_size=self.batch_size,
                                          shuffle=True,
                                          num_workers=self.num_workers)
            self.num_id = 105
            self.num_c_expr = len(self.data_loader.dataset.posecodemap)
            self.dim_fx = 320

        # networks init
        self.G = generator2d3d(self.dim_fx,
                               self.num_id,
                               self.num_c_expr,
                               nOutputCh={
                                   '2d': 3,
                                   '3d': 1
                               })
        self.D2d = discriminator2d(self.num_id, self.num_c_expr, nInputCh=3)
        self.D3d = discriminator3d(self.num_id, self.num_c_expr, nInputCh=1)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D2d_optimizer = optim.Adam(self.D2d.parameters(),
                                        lr=args.lrD,
                                        betas=(args.beta1, args.beta2))
        self.D3d_optimizer = optim.Adam(self.D3d.parameters(),
                                        lr=args.lrD,
                                        betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D2d.cuda()
            self.D3d.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
            self.L1_loss = nn.L1Loss().cuda()

            if self.multi_gpu:
                gpus = [0, 1]
                self.G = torch.nn.DataParallel(self.G, device_ids=gpus).cuda()
                self.D2d = torch.nn.DataParallel(self.D2d,
                                                 device_ids=gpus).cuda()
                self.D3d = torch.nn.DataParallel(self.D3d,
                                                 device_ids=gpus).cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.BCE_loss = nn.BCELoss()
            self.MSE_loss = nn.MSELoss()
            self.L1_loss = nn.L1Loss()
Beispiel #4
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.num_workers = args.num_workers
        self.model_name = args.gan_type
        self.use_GP = args.use_GP
        if self.use_GP:
            self.model_name = self.model_name + '_GP'
        if len(args.comment) > 0:
            self.model_name = self.model_name + '_' + args.comment
        self.lambda_ = 0.25
        self.sample_num = 16

        if self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.Nd = 337  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'Bosphorus':
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' +
                              self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' +
                        self.model_name)
        if not os.path.exists(
                os.path.join(self.save_dir, self.dataset, self.model_name)):
            os.makedirs(
                os.path.join(self.save_dir, self.dataset, self.model_name))

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'Bosphorus':
            self.data_loader = DataLoader(utils.Bosphorus(
                data_dir,
                skipCodes=['YR', 'PR', 'CR'],
                transform=transforms.ToTensor(),
                shape=128,
                image_shape=256),
                                          batch_size=self.batch_size,
                                          shuffle=True,
                                          num_workers=self.num_workers)
            self.Nid = 105
            self.Npcode = len(self.data_loader.dataset.posecodemap)

        # fixed samples for reconstruction visualization
        print('Generating fixed sample for visualization...')
        nSamples = self.sample_num
        sample_x3D_s = []
        for iB, (sample_x3D_, _) in enumerate(self.data_loader):
            sample_x3D_s.append(sample_x3D_)
            if iB > nSamples // self.batch_size:
                break
        self.sample_x3D_ = torch.cat(sample_x3D_s)[:nSamples, :, :, :]

        fname = os.path.join(self.result_dir, self.dataset, self.model_name,
                             'sampleGT.npy')
        self.sample_x3D_.numpy().squeeze().dump(fname)

        if self.gpu_mode:
            self.sample_x3D_ = Variable(self.sample_x3D_.cuda(), volatile=True)
        else:
            self.sample_x3D_ = Variable(self.sample_x3D_, volatile=True)

        # networks init
        self.AE = AE3D()

        self.optimizer = optim.Adam(self.AE.parameters(),
                                    lr=args.lrG,
                                    betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.AE.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
            self.L1_loss = nn.L1Loss().cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.BCE_loss = nn.BCELoss()
            self.MSE_loss = nn.MSELoss()
            self.L1_loss = nn.L1Loss()

        print('init done')
Beispiel #5
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 49
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.num_workers = args.num_workers
        self.model_name = args.gan_type
        self.centerBosphorus = args.centerBosphorus
        self.loss_option = args.loss_option
        if len(args.loss_option) > 0:
            self.model_name = self.model_name + '_' + args.loss_option
            self.loss_option = args.loss_option.split(',')
        if len(args.comment) > 0:
            self.model_name = self.model_name + '_' + args.comment
        self.lambda_ = 0.25
        self.n_critic = args.n_critic
        self.n_gen = args.n_gen
        self.c = 0.01  # for wgan
        self.nDaccAvg = args.nDaccAvg
        if 'wass' in self.loss_option:
            self.n_critic = 5

        # makedirs
        temp_save_dir = os.path.join(self.save_dir, self.dataset,
                                     self.model_name)
        if not os.path.exists(temp_save_dir):
            os.makedirs(temp_save_dir)
        else:
            print('[warning] path exists: ' + temp_save_dir)
        temp_result_dir = os.path.join(self.result_dir, self.dataset,
                                       self.model_name)
        if not os.path.exists(temp_result_dir):
            os.makedirs(temp_result_dir)
        else:
            print('[warning] path exists: ' + temp_result_dir)

        # save args
        timestamp = time.strftime('%b_%d_%Y_%H;%M')
        with open(
                os.path.join(temp_save_dir,
                             self.model_name + '_' + timestamp + '_args.pkl'),
                'wb') as fhandle:
            pickle.dump(args, fhandle)

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.Nd = 337  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'Bosphorus':
            #			inclCodes = ['LFAU_9',
            #							'LFAU_10',
            #							'LFAU_12',
            #							'LFAU_12L',
            #							'LFAU_12R',
            #							'LFAU_22',
            #							'LFAU_27',
            #							'LFAU_34',
            #							'N_N',
            #							'UFAU_2',
            #							'UFAU_4',
            #							'UFAU_43',
            #							]
            inclCodes = []

            self.data_loader = DataLoader(utils.Bosphorus(
                data_dir,
                use_image=True,
                fname_cache=args.fname_cache,
                transform=transforms.ToTensor(),
                shape=128,
                image_shape=256,
                center=self.centerBosphorus,
                inclCodes=inclCodes),
                                          batch_size=self.batch_size,
                                          shuffle=True,
                                          num_workers=self.num_workers)
            self.Nid = 105
            self.Npcode = len(self.data_loader.dataset.posecodemap)
            self.Nz = 50

        # networks init
        self.G = generator(self.Nid, self.Npcode, self.Nz)
        self.D = discriminator(self.Nid, self.Npcode)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if hasattr(args, 'comment1'):
            return
        # fixed samples for reconstruction visualization
        path_sample = os.path.join(self.result_dir, self.dataset,
                                   self.model_name, 'fixed_sample')
        if args.interpolate or args.generate:
            print('skipping fixed sample : interpolate/generate')
        elif not os.path.exists(path_sample):
            print('Generating fixed sample for visualization...')
            os.makedirs(path_sample)
            nSamples = self.sample_num - self.Npcode
            nPcodes = self.Npcode
            sample_x2D_s = []
            sample_x3D_s = []
            for iB, (sample_x3D_, sample_y_,
                     sample_x2D_) in enumerate(self.data_loader):
                sample_x2D_s.append(sample_x2D_)
                sample_x3D_s.append(sample_x3D_)
                if iB > nSamples // self.batch_size:
                    break
            sample_x2D_s = torch.cat(sample_x2D_s)[:nSamples, :, :, :]
            sample_x3D_s = torch.cat(sample_x3D_s)[:nSamples, :, :, :]
            sample_x2D_s = torch.split(sample_x2D_s, 1)
            sample_x3D_s = torch.split(sample_x3D_s, 1)
            sample_x2D_s += (sample_x2D_s[0], ) * nPcodes
            sample_x3D_s += (sample_x3D_s[0], ) * nPcodes
            #		sample_x2D_s = [ [x]*nPcodes for x in sample_x2D_s ]
            #		sample_x3D_s = [ [x]*nPcodes for x in sample_x3D_s ]
            #		flatten = lambda l: [item for sublist in l for item in sublist]
            self.sample_x2D_ = torch.cat(sample_x2D_s)
            self.sample_x3D_ = torch.cat(sample_x3D_s)
            #		sample_x2D_s = [sample_x2D_s[0][0].unsqueeze(0)]*nSamples
            self.sample_pcode_ = torch.zeros(nSamples + nPcodes, self.Npcode)
            self.sample_pcode_[:nSamples, 0] = 1
            for iS in range(nPcodes):
                ii = iS % self.Npcode
                self.sample_pcode_[iS + nSamples, ii] = 1
            self.sample_z_ = torch.rand(nSamples + nPcodes, self.Nz)

            nSpS = int(math.ceil(math.sqrt(nSamples +
                                           nPcodes)))  # num samples per side
            fname = os.path.join(path_sample, 'sampleGT.png')
            utils.save_images(
                self.sample_x2D_[:nSpS * nSpS, :, :, :].numpy().transpose(
                    0, 2, 3, 1), [nSpS, nSpS], fname)

            fname = os.path.join(path_sample, 'sampleGT_2D.npy')
            self.sample_x2D_.numpy().dump(fname)
            fname = os.path.join(path_sample, 'sampleGT_3D.npy')
            self.sample_x3D_.numpy().dump(fname)
            fname = os.path.join(path_sample, 'sampleGT_z.npy')
            self.sample_z_.numpy().dump(fname)
            fname = os.path.join(path_sample, 'sampleGT_pcode.npy')
            self.sample_pcode_.numpy().dump(fname)
        else:
            print('Loading fixed sample for visualization...')
            fname = os.path.join(path_sample, 'sampleGT_2D.npy')
            with open(fname) as fhandle:
                self.sample_x2D_ = torch.Tensor(pickle.load(fhandle))
            fname = os.path.join(path_sample, 'sampleGT_3D.npy')
            with open(fname) as fhandle:
                self.sample_x3D_ = torch.Tensor(pickle.load(fhandle))
            fname = os.path.join(path_sample, 'sampleGT_z.npy')
            with open(fname) as fhandle:
                self.sample_z_ = torch.Tensor(pickle.load(fhandle))
            fname = os.path.join(path_sample, 'sampleGT_pcode.npy')
            with open(fname) as fhandle:
                self.sample_pcode_ = torch.Tensor(pickle.load(fhandle))

        if not args.interpolate and not args.generate:
            if self.gpu_mode:
                self.sample_x2D_ = Variable(self.sample_x2D_.cuda(),
                                            volatile=True)
                self.sample_z_ = Variable(self.sample_z_.cuda(), volatile=True)
                self.sample_pcode_ = Variable(self.sample_pcode_.cuda(),
                                              volatile=True)
            else:
                self.sample_x2D_ = Variable(self.sample_x2D_, volatile=True)
                self.sample_z_ = Variable(self.sample_z_, volatile=True)
                self.sample_pcode_ = Variable(self.sample_pcode_,
                                              volatile=True)

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
            self.L1_loss = nn.L1Loss().cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.BCE_loss = nn.BCELoss()
            self.MSE_loss = nn.MSELoss()
            self.L1_loss = nn.L1Loss()
	def __init__(self, args):
		print( 'init DRecon2DGAN...' )
		t_start = time.time()
		# parameters
		self.epoch = args.epoch
		self.sample_num = 49 
		self.batch_size = args.batch_size
		self.save_dir = args.save_dir
		self.result_dir = args.result_dir
		self.dataset = args.dataset
		self.dataroot_dir = args.dataroot_dir
		self.log_dir = args.log_dir
		self.gpu_mode = args.gpu_mode
		self.num_workers = args.num_workers
		self.model_name = args.gan_type
		self.centerBosphorus = args.centerBosphorus
		self.loss_option = args.loss_option
		if len(args.loss_option) > 0:
			self.model_name = self.model_name + '_' + args.loss_option
			self.loss_option = args.loss_option.split(',')
		if len(args.comment) > 0:
			self.model_name = self.model_name + '_' + args.comment
		self.lambda_ = 0.25
		self.n_critic = args.n_critic
		self.n_gen = args.n_gen
		self.c = 0.01 # for wgan
		self.nDaccAvg = args.nDaccAvg
		if 'wass' in self.loss_option:
			self.n_critic = 5

		# makedirs
		temp_save_dir = os.path.join(self.save_dir, self.dataset, self.model_name)
		if not os.path.exists(temp_save_dir):
			os.makedirs(temp_save_dir)
		else:
			print('[warning] path exists: '+temp_save_dir)
		temp_result_dir = os.path.join(self.result_dir, self.dataset, self.model_name)
		if not os.path.exists(temp_result_dir):
			os.makedirs(temp_result_dir)
		else:
			print('[warning] path exists: '+temp_result_dir)

		# save args
		timestamp = time.strftime('%b_%d_%Y_%H;%M')
		with open(os.path.join(temp_save_dir, self.model_name + '_' + timestamp + '_args.pkl'), 'wb') as fhandle:
			pickle.dump(args, fhandle)


		# load dataset
		data_dir = os.path.join( self.dataroot_dir, self.dataset )
		if self.dataset == 'mnist':
			self.data_loader = DataLoader(datasets.MNIST(data_dir, train=True, download=True,
																		  transform=transforms.Compose(
																			  [transforms.ToTensor()])),
														   batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'fashion-mnist':
			self.data_loader = DataLoader(
				datasets.FashionMNIST(data_dir, train=True, download=True, transform=transforms.Compose(
					[transforms.ToTensor()])),
				batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'celebA':
			self.data_loader = utils.CustomDataLoader(data_dir, transform=transforms.Compose(
				[transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
												 shuffle=True)
		elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
			self.data_loader = DataLoader( utils.MultiPie(data_dir,
					transform=transforms.Compose(
					[transforms.Scale(100), transforms.RandomCrop(96), transforms.ToTensor()])),
				batch_size=self.batch_size, shuffle=True) 
			self.Nd = 337 # 200
			self.Np = 9
			self.Ni = 20
			self.Nz = 50
		elif self.dataset == 'CASIA-WebFace':
			self.data_loader = utils.CustomDataLoader(data_dir, transform=transforms.Compose(
				[transforms.Scale(100), transforms.RandomCrop(96), transforms.ToTensor()]), batch_size=self.batch_size,
												 shuffle=True)
			self.Nd = 10885 
			self.Np = 13
			self.Ni = 20
			self.Nz = 50
		elif self.dataset == 'Bosphorus':
			self.data_loader = DataLoader( utils.Bosphorus(data_dir, use_image=True, fname_cache=args.fname_cache,
											transform=transforms.ToTensor(),
											shape=128, image_shape=256, center=self.centerBosphorus,
											use_colorPCL=True),
											batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
			self.Nid = 105
			self.Npcode = len(self.data_loader.dataset.posecodemap)
			self.Nz = 50

		# networks init
		self.G = generator(self.Nid, self.Npcode, nOutputCh=3)
		self.D = discriminator2D(self.Nid, self.Npcode, nInputCh=3)
		self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
		self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

		# fixed samples for reconstruction visualization
		path_sample = os.path.join( self.result_dir, self.dataset, self.model_name, 'fixed_sample' )
		if args.interpolate: # or args.generate:
			print( 'skipping fixed sample : interpolate/generate' )
		elif not os.path.exists( path_sample ):
			print( 'Generating fixed sample for visualization...' )
			os.makedirs( path_sample )
			nPcodes = self.Npcode
			nSamples = self.sample_num-nPcodes*3 # 13 people with fixed pcode, but note that 3 people with all pcodes will be added
			list_sample_x2Ds_raw = []
			list_sample_x3Ds_raw = []
			for iB, (sample_x3D_,sample_y_,sample_x2D_) in enumerate(self.data_loader):
				list_sample_x2Ds_raw.append( sample_x2D_ )
				list_sample_x3Ds_raw.append( sample_x3D_ )
				if iB > (nSamples+3) // self.batch_size:
					break
			# store different people for fixed pcode
			list_sample_x2Ds_raw = torch.split( torch.cat(list_sample_x2Ds_raw),1)
			list_sample_x3Ds_raw = torch.split( torch.cat(list_sample_x3Ds_raw),1)
			sample_x2D_s = list_sample_x2Ds_raw[:nSamples]
			sample_x3D_s = list_sample_x3Ds_raw[:nSamples]
			#sample_x2D_s = torch.cat( list_sample_x2Ds_raw )[:nSamples,:,:,:]
			#sample_x3D_s = torch.cat( list_sample_x3Ds_raw )[:nSamples,:,:,:]
			#sample_x2D_s = torch.split( sample_x2D_s, 1 )
			#sample_x3D_s = torch.split( sample_x3D_s, 1 )

			# add 3 people for all pcodes
			for i in range(3):
				sample_x2D_s += list_sample_x2Ds_raw[nSamples+i:nSamples+i+1]*nPcodes
				sample_x3D_s += list_sample_x3Ds_raw[nSamples+i:nSamples+i+1]*nPcodes

			# concat all people
			self.sample_x2D_ = torch.cat( sample_x2D_s )
			self.sample_x3D_ = torch.cat( sample_x3D_s )

			# make pcodes
			self.sample_pcode_ = torch.zeros( nSamples+nPcodes*3, nPcodes )
			self.sample_pcode_[:nSamples,-1]=1 # N ( neutral )
			for iS in range( nPcodes*3 ):
				ii = iS%self.Npcode
				self.sample_pcode_[iS+nSamples,ii] = 1
	
			nSpS = int(math.ceil( math.sqrt( nSamples+nPcodes*3 ) )) # num samples per side
			fname = os.path.join( path_sample, 'sampleGT.png')
			utils.save_images(self.sample_x2D_[:nSpS*nSpS,:,:,:].numpy().transpose(0,2,3,1), [nSpS,nSpS],fname)
	
			fname = os.path.join( path_sample, 'sampleGT_2D.npy')
			self.sample_x2D_.numpy().dump( fname )
			fname = os.path.join( path_sample, 'sampleGT_3D.npy')
			self.sample_x3D_.numpy().dump( fname )
			fname = os.path.join( path_sample, 'sampleGT_pcode.npy')
			self.sample_pcode_.numpy().dump( fname )
		else:
			print( 'Loading fixed sample for visualization...' )
			fname = os.path.join( path_sample, 'sampleGT_2D.npy')
			with open( fname ) as fhandle:
				self.sample_x2D_ = torch.Tensor(pickle.load( fhandle ))
			fname = os.path.join( path_sample, 'sampleGT_3D.npy')
			with open( fname ) as fhandle:
				self.sample_x3D_ = torch.Tensor(pickle.load( fhandle ))
			fname = os.path.join( path_sample, 'sampleGT_pcode.npy')
			with open( fname ) as fhandle:
				self.sample_pcode_ = torch.Tensor( pickle.load( fhandle ))

		if not args.interpolate: # and not args.generate:
			if self.gpu_mode:
				self.sample_x2D_ = Variable(self.sample_x2D_.cuda(), volatile=True)
				self.sample_pcode_ = Variable(self.sample_pcode_.cuda(), volatile=True)
			else:
				self.sample_x2D_ = Variable(self.sample_x2D_, volatile=True)
				self.sample_pcode_ = Variable(self.sample_pcode_, volatile=True)


		if self.gpu_mode:
			self.G.cuda()
			self.D.cuda()
			self.CE_loss = nn.CrossEntropyLoss().cuda()
			self.BCE_loss = nn.BCELoss().cuda()
			self.MSE_loss = nn.MSELoss().cuda()
			self.L1_loss = nn.L1Loss().cuda()
		else:
			self.CE_loss = nn.CrossEntropyLoss()
			self.BCE_loss = nn.BCELoss()
			self.MSE_loss = nn.MSELoss()
			self.L1_loss = nn.L1Loss()

#		print('---------- Networks architecture -------------')
#		utils.print_network(self.G)
#		utils.print_network(self.D)
#		print('-----------------------------------------------')

		print('init DRecon2DGAN done: {}sec'.format(time.time()-t_start) )
Beispiel #7
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 19
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.num_workers = args.num_workers
        self.model_name = args.gan_type
        self.loss_option = args.loss_option
        if len(args.loss_option) > 0:
            self.model_name = self.model_name + '_' + args.loss_option
            self.loss_option = args.loss_option.split(',')
        if len(args.comment) > 0:
            self.model_name = self.model_name + '_' + args.comment
        self.lambda_ = 0.25

        if self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.Nd = 337  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'Bosphorus':
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' +
                              self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' +
                        self.model_name)
        if not os.path.exists(
                os.path.join(self.save_dir, self.dataset, self.model_name)):
            os.makedirs(
                os.path.join(self.save_dir, self.dataset, self.model_name))

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'Bosphorus':
            self.data_loader = DataLoader(utils.Bosphorus(
                data_dir,
                use_image=True,
                skipCodes=['YR', 'PR', 'CR'],
                transform=transforms.ToTensor(),
                shape=128,
                image_shape=256),
                                          batch_size=self.batch_size,
                                          shuffle=True,
                                          num_workers=self.num_workers)
            self.Nid = 105
            self.Npcode = len(self.data_loader.dataset.posecodemap)

        # fixed samples for reconstruction visualization
        print('Generating fixed sample for visualization...')
        nPcodes = self.Npcode // 4
        nSamples = self.sample_num - nPcodes
        sample_x2D_s = []
        sample_x3D_s = []
        for iB, (sample_x3D_, sample_y_,
                 sample_x2D_) in enumerate(self.data_loader):
            sample_x2D_s.append(sample_x2D_)
            sample_x3D_s.append(sample_x3D_)
            if iB > nSamples // self.batch_size:
                break
        sample_x2D_s = torch.cat(sample_x2D_s)[:nSamples, :, :, :]
        sample_x3D_s = torch.cat(sample_x3D_s)[:nSamples, :, :, :]
        sample_x2D_s = torch.split(sample_x2D_s, 1)
        sample_x3D_s = torch.split(sample_x3D_s, 1)
        sample_x2D_s += (sample_x2D_s[0], ) * nPcodes
        sample_x3D_s += (sample_x3D_s[0], ) * nPcodes
        self.sample_x2D_ = torch.cat(sample_x2D_s)
        self.sample_x3D_ = torch.cat(sample_x3D_s)
        self.sample_pcode_ = torch.zeros(nSamples + nPcodes, self.Npcode)
        self.sample_pcode_[:nSamples, 0] = 1
        for iS in range(nPcodes):
            ii = iS % self.Npcode
            self.sample_pcode_[iS + nSamples, ii] = 1
        self.sample_z_ = torch.rand(nSamples + nPcodes, self.Nz)

        fname = os.path.join(self.result_dir, self.dataset, self.model_name,
                             'samples.png')
        nSpS = int(math.ceil(math.sqrt(nSamples +
                                       nPcodes)))  # num samples per side
        utils.save_images(
            self.sample_x2D_[:nSpS * nSpS, :, :, :].numpy().transpose(
                0, 2, 3, 1), [nSpS, nSpS], fname)

        fname = os.path.join(self.result_dir, self.dataset, self.model_name,
                             'sampleGT.npy')
        self.sample_x3D_.numpy().squeeze().dump(fname)

        if self.gpu_mode:
            self.sample_x2D_ = Variable(self.sample_x2D_.cuda(), volatile=True)
            self.sample_x3D_ = Variable(self.sample_x3D_.cuda(), volatile=True)
            self.sample_z_ = Variable(self.sample_z_.cuda(), volatile=True)
            self.sample_pcode_ = Variable(self.sample_pcode_.cuda(),
                                          volatile=True)
        else:
            self.sample_x2D_ = Variable(self.sample_x2D_, volatile=True)
            self.sample_x3D_ = Variable(self.sample_x3D_, volatile=True)
            self.sample_z_ = Variable(self.sample_z_, volatile=True)
            self.sample_pcode_ = Variable(self.sample_pcode_, volatile=True)

        # networks init
        self.G_2Dto3D = generator2Dto3D(self.Nid, self.Npcode, self.Nz)
        self.D_3D = discriminator3D(self.Nid, self.Npcode)

        self.G_2Dto3D_optimizer = optim.Adam(self.G_2Dto3D.parameters(),
                                             lr=args.lrG,
                                             betas=(args.beta1, args.beta2))
        self.D_3D_optimizer = optim.Adam(self.D_3D.parameters(),
                                         lr=args.lrD,
                                         betas=(args.beta1, args.beta2))

        self.G_3Dto2D = generator3Dto2D(self.Nid, self.Npcode, self.Nz)
        self.D_2D = discriminator2D(self.Nid, self.Npcode)

        self.G_3Dto2D_optimizer = optim.Adam(self.G_3Dto2D.parameters(),
                                             lr=args.lrG,
                                             betas=(args.beta1, args.beta2))
        self.D_2D_optimizer = optim.Adam(self.D_2D.parameters(),
                                         lr=args.lrD,
                                         betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G_2Dto3D.cuda()
            self.G_3Dto2D.cuda()
            self.D_3D.cuda()
            self.D_2D.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
            self.L1_loss = nn.L1Loss().cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.BCE_loss = nn.BCELoss()
            self.MSE_loss = nn.MSELoss()
            self.L1_loss = nn.L1Loss()

        print('init done')
Beispiel #8
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        if len(args.comment) > 0:
            self.model_name += '_' + args.comment

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                cam_ids=[51]),
                                          batch_size=self.batch_size,
                                          shuffle=True)

        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
Beispiel #9
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        if len(args.comment) > 0:
            self.model_name = self.model_name + '_' + args.comment
        self.lambda_ = 0.25

        if self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.Nd = 337  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50

        # networks init
        self.G = generator(self.Nz, self.Nd, self.Np, self.Ni)
        self.D = Encoder('D', self.Nd, self.Np, self.Ni)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.BCE_loss = nn.BCELoss()
            self.MSE_loss = nn.MSELoss()

#		print('---------- Networks architecture -------------')
#		utils.print_network(self.G)
#		utils.print_network(self.D)
#		print('-----------------------------------------------')

# load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        # fixed samples for reconstruction visualization
        nSamples = self.Np * self.Ni
        sample_x_s = []
        for iB, (sample_x_, sample_y_) in enumerate(self.data_loader):
            sample_x_s.append(sample_x_)
            break
            if iB > nSamples // self.batch_size:
                break
        sample_x_s = [sample_x_s[0][0].unsqueeze(0)] * nSamples
        self.sample_x_ = torch.cat(sample_x_s)[:nSamples, :, :, :]
        self.sample_pose_ = torch.zeros(nSamples, self.Np)
        self.sample_illum_ = torch.zeros(nSamples, self.Ni)
        for iS in range(self.Np * self.Ni):
            ii = iS % self.Ni
            ip = iS // self.Ni
            self.sample_pose_[iS, ip] = 1
            self.sample_illum_[iS, ii] = 1
        self.sample_z_ = torch.rand(nSamples, self.Nz)

        if self.gpu_mode:
            self.sample_x_ = Variable(self.sample_x_.cuda(), volatile=True)
            self.sample_z_ = Variable(self.sample_z_.cuda(), volatile=True)
            self.sample_pose_ = Variable(self.sample_pose_.cuda(),
                                         volatile=True)
            self.sample_illum_ = Variable(self.sample_illum_.cuda(),
                                          volatile=True)
        else:
            self.sample_x_ = Variable(self.sample_x_, volatile=True)
            self.sample_z_ = Variable(self.sample_z_, volatile=True)
            self.sample_pose_ = Variable(self.sample_pose_, volatile=True)
            self.sample_illum_ = Variable(self.sample_illum_, volatile=True)
Beispiel #10
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.dataroot_dir = args.dataroot_dir
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        if self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.Nd = 346  # 200
            self.Np = 9
            self.Ni = 20
            self.Nz = 50
        elif self.dataset == 'CASIA-WebFace':
            self.Nd = 10885
            self.Np = 13
            self.Ni = 20
            self.Nz = 50

        # networks init
        # self.G = generator(self.Nz, self.Nd, self.Np, self.Ni)
        self.G = generator(0, 0, 0, 0)
        # self.G = DiscoganGenerator()
        self.D = Encoder('D', self.Nd, self.Np, self.Ni)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.CE_loss = nn.CrossEntropyLoss()
            self.MSE_loss = nn.MSELoss()

#		print('---------- Networks architecture -------------')
#		utils.print_network(self.G)
#		utils.print_network(self.D)
#		print('-----------------------------------------------')

# load dataset
        data_dir = os.path.join(self.dataroot_dir, self.dataset)
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                data_dir,
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
            self.data_loader = DataLoader(utils.MultiPie(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                cam_ids=[51]),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'CASIA-WebFace':
            self.data_loader = utils.CustomDataLoader(
                data_dir,
                transform=transforms.Compose([
                    transforms.Scale(100),
                    transforms.RandomCrop(96),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)

        # fixed noise, all poses, all illums
        for iB, (sample_x_, sample_y_) in enumerate(self.data_loader):
            self.sample_x_ = sample_x_
            break
        if self.gpu_mode:
            self.sample_x_ = Variable(self.sample_x_.cuda(), volatile=True)
        else:
            self.sample_x_ = Variable(self.sample_x_, volatile=True)