Exemplo n.º 1
0
 def test_selected_curve(self):
     '''
     given a training and a reference image, synthesize the intermediate results with the selected interpolation curve.
     :return:
     '''
     util.mkdir(self.args.save_dir)
     n_branch = len(self.args.attr.split(',')) + 1
     test_model = model.model_deploy(
         n_branch=n_branch,
         model_path=self.args.model_path,
         label=self.args.pth_label).eval().cuda()
     _, test_dataset = self.load_dataset()
     loader = DataLoader(test_dataset,
                         batch_size=self.args.batch_size,
                         shuffle=False)
     for i, data in enumerate(loader):
         img, _ = data
         img = util.toVariable(img).cuda()
         idx = torch.randperm(img.size(0)).cuda()
         img_ref = img[idx]
         img_out = [img]
         v = torch.zeros(img.size(0), n_branch)
         for j in self.args.branch_list:
             v[:, j:j + 1] = 1
             v = util.toVariable(v).cuda()
             out_now = test_model(img, img_ref, v)
             img_out += [out_now]
         img_out += [img_ref]
         img_out = torch.cat(img_out)
         img_out = tensorWriter.untransformTensor(img_out.data.cpu())
         tensorWriter.writeTensor('{}/{}.jpg'.format(self.args.save_dir, i),
                                  img_out,
                                  nRow=img.size(0))
Exemplo n.º 2
0
    def attribute_manipulation(self):
        '''
        perform attribute manipulation
        :return:
        '''
        from data.attributeDataset import Dataset_testing_filtered, Dataset_testing
        util.mkdir(self.args.save_dir)

        n_branch = len(self.args.attr.split(
            ',')) + 1  # n groupped attribute + 1 residual attribute.
        test_model = model.model_deploy(
            n_branch=n_branch,
            model_path=self.args.model_path,
            label=self.args.pth_label).eval().cuda()
        if self.args.test_folder is None:
            _, test_dataset = self.load_dataset()
        else:
            image_list = glob.glob(self.args.test_folder + '/*.jpg')
            test_dataset = Dataset_testing(image_list)
        ref_dataset = Dataset_testing_filtered(self.args.data_dir,
                                               self.args.filter_target_attr,
                                               n_samples=self.args.n_ref)
        loader = DataLoader(test_dataset,
                            batch_size=self.args.batch_size,
                            shuffle=True)
        ref_loader = DataLoader(ref_dataset, batch_size=1, shuffle=True)
        # for data, ref_data in zip(loader, ref_loader):
        img_out = [tmp for tmp in ref_loader]
        img_out = torch.cat(img_out)
        img_out = tensorWriter.untransformTensor(img_out.data.cpu())
        tensorWriter.writeTensor('{}/reference.jpg'.format(self.args.save_dir),
                                 img_out,
                                 nRow=1)

        for i, data in enumerate(loader):
            img, _ = data
            img = util.toVariable(img).cuda()
            img_out = [img]
            for ref_data in ref_loader:
                print('proceesing the {}-th batch'.format(i))
                img_ref = ref_data
                img_ref = util.toVariable(img_ref).cuda()
                v = torch.zeros(img.size(0), n_branch)
                v[:, self.args.branch_idx:self.args.branch_idx +
                  1] = self.args.strength
                v = util.toVariable(v).cuda()
                img_ref_now = img_ref.expand_as(img)
                out_now = test_model(img, img_ref_now, v)
                img_out += [out_now]
                # img_out += [img_ref]
            img_out = torch.cat(img_out)
            img_out = tensorWriter.untransformTensor(img_out.data.cpu())
            tensorWriter.writeTensor('{}/{}.jpg'.format(self.args.save_dir, i),
                                     img_out,
                                     nRow=img.size(0))
            i += 1
            if i > self.args.n_test:
                break
Exemplo n.º 3
0
 def optimize_parameters(self, vgg_feat, gt):
     self.model.train()
     self.optimizer.zero_grad()
     vgg_feat = [util.toVariable(vgg_feat_, requires_grad=False).cuda() for vgg_feat_ in vgg_feat]
     gt = [util.toVariable(gt_, requires_grad=False).cuda() for gt_ in gt]
     w = self.forward(vgg_feat)
     self.backward_G(w, gt)
     self.optimizer.step()
     return w
Exemplo n.º 4
0
    def backward_recon_adam(self,
                            img,
                            net,
                            target_feature,
                            layerList,
                            lr=1e-3,
                            iter=500,
                            tv=10):
        '''
        backward to reconstruct the target feature
        :param img: the input image
        :param target_feature: the target feature, it is a list whose length is 3
        :return: output image
        '''
        _img = util.toVariable(img.cuda(), requires_grad=True)
        target_feature = [
            util.toVariable(t.cuda(), requires_grad=False)
            for t in target_feature
        ]

        optim = torch.optim.Adam([_img], lr=lr)
        optim.n_steps = 0

        MSELoss = torch.nn.MSELoss(size_average=False).cuda()
        tv_loss = TVLoss2()

        def step():
            if _img.grad is not None:
                _img.grad.data.fill_(0)
            feat = net.forward(_img)
            loss_all = []
            for layer in layerList:
                loss_all += [MSELoss(feat[layer], target_feature[layer])]
            losstv = tv_loss(_img)
            loss = sum(loss_all) + tv * losstv
            loss.backward()
            if optim.n_steps % 25 == 0:
                msg = 'lossall=%f, ' % loss.data[0]
                for idx, l in enumerate(loss_all):
                    msg += 'loss=%f, ' % l.data[0]
                msg += 'loss_tv=%f' % losstv.data[0]
                print(msg)
            optim.n_steps += 1
            return loss

        # _img.data.sub_(_img.grad.data * lr)
        for i in range(iter):
            optim.step(step)
            # print(_img.grad.data)
            img = _img.cuda()
        return img
Exemplo n.º 5
0
def train():
    image_path = args.input_path
    gt_path = args.npz_path
    npz_list, image_list = walk_data.glob_image_from_npz(gt_path, image_path, '*_%s.npz' % args.effect)
    trainingSet = Dataset(image_list=image_list, npz_list=npz_list)
    dataloader = DataLoader(trainingSet, batch_size=args.batch_size, shuffle=True, num_workers=4)
    
    vgg = torch.nn.DataParallel(VGG()).cuda()
    args.pretrained = False
    facelet = Facelet(args)
    facelet = facelet.cuda()

    global_step = 0
    
    if args.pretrain_path is not None:
        facelet.load(args.pretrain_path, args.pretrain_label)
    
    for epoch in range(args.epoch):
        for idx, data in enumerate(tqdm(dataloader), 0):
            try:
                image, gt = data
                #print('--> processing, idx:{0}'.format(idx))
                vgg_feat = vgg.forward(util.toVariable(image).cuda())
                _ = facelet.optimize_parameters(vgg_feat, gt)
                if global_step % 10 == 0:
                    facelet.print_current_errors(epoch=epoch, i=idx)
                if global_step > 0 and global_step % args.snapshot == 0:
                    facelet.save(label=args.effect)
                global_step += 1
            except Exception as e:
                print(e)
        facelet.save(label=args.effect)

    facelet.save(label=args.effect)
Exemplo n.º 6
0
def train():
    image_path = args.input_path
    gt_path = args.npz_path
    # gt_path = '../face_edit/datasets/training/proj23/features/facefeature/facemodel_four/celeba'
    # image_path = '../face_edit/datasets/training/proj23/houyang/facemodel_four/celeba'
    npz_list, image_list = walk_data.glob_image_from_npz(gt_path, image_path, '*_%s.npz' % args.effect)
    trainingSet = Dataset(image_list=image_list, npz_list=npz_list)
    dataloader = DataLoader(trainingSet, batch_size=args.batch_size, shuffle=True, num_workers=4)
    vgg = torch.nn.DataParallel(VGG()).cuda()
    args.pretrained = False
    facelet = Facelet(args)
    facelet = facelet.cuda()
    global_step = 0
    if args.pretrain_path is not None:
        facelet.load(args.pretrain_path, args.pretrain_label)
    for epoch in range(args.epoch):
        for idx, data in enumerate(tqdm(dataloader), 0):
            image, gt = data
            vgg_feat = vgg.forward(util.toVariable(image).cuda())
            w = facelet.optimize_parameters(vgg_feat, gt)
            if global_step % 10 == 0:
                facelet.print_current_errors(epoch=epoch, i=idx)
            if global_step % args.snapshot == 0:
                facelet.save(label=args.effect)
            global_step += 1
        facelet.save(label=args.effect)
    facelet.save(label=args.effect)
Exemplo n.º 7
0
 def save_samples(self, global_step=0):
     self.encoder.eval()
     self.interp_net.eval()
     self.discrim.eval()
     self.decoder.eval()
     n_pairs = 20
     save_path_single = os.path.join(self.opt.save_dir, 'samples/single')
     util.mkdir(save_path_single)
     map_single = []
     n_branches = self.interp_net.module.n_branch
     for i in range(n_pairs):
         img1, _ = self.test_dataset[i]
         img2, _ = self.test_dataset[i + 1]
         img1 = util.toVariable(img1).cuda()
         img2 = util.toVariable(img2).cuda()
         map_single += [self.interp_test(img1, img2)]
     map_single = torch.cat(map_single, dim=0)
     map_single = vs.untransformTensor(map_single)
     vs.writeTensor(os.path.join(save_path_single, '%d.jpg' % global_step),
                    map_single,
                    nRow=2)
     ##################################################
     save_path = os.path.join(self.opt.save_dir, 'interp_curve')
     util.mkdir(save_path)
     im_out = [self.image.data.cpu()]
     v = torch.zeros(self.image.size(0), n_branches)
     v = util.toVariable(v).cuda()
     feat = self.interp_net(self.feat, self.feat_permute, v)
     out_now = self.decoder(feat)
     im_out += [out_now.data.cpu()]
     for i in range(n_branches):
         log(i)
         v = torch.zeros(self.image.size(0), n_branches)
         v[:, 0:i + 1] = 1
         v = util.toVariable(v).cuda()
         feat = self.interp_net(self.feat.detach(),
                                self.feat_permute.detach(), v)
         out_now = self.decoder(feat.detach())
         im_out += [out_now.data.cpu()]
     im_out += [self.image_permute.data.cpu()]
     im_out = [util.toVariable(tmp) for tmp in im_out]
     im_out = torch.cat(im_out, dim=0)
     im_out = vs.untransformTensor(im_out.data.cpu())
     vs.writeTensor('%s/%d.jpg' % (save_path, global_step),
                    im_out,
                    nRow=self.image.size(0))
Exemplo n.º 8
0
    def _generate_select_vector(self, n_branches, type='uniform'):
        '''
        generate the select vector to select the interpolation curve
        type:

        :return: nSample x selct_dims, which indicates which attribute to be transferred.
        '''

        if type == 'one_attr_randsample':  # each sample has one random selected attribute
            selected_vector = []
            for i in range(self.image.size(0)):
                tmp = torch.randperm(n_branches)[
                    0]  # randomly select one attribute
                # log('generate_select_vector: tmp:', tmp)
                one_hot_vec = torch.zeros(1, n_branches)
                one_hot_vec[:, tmp] = 1
                # log('generate_select_vector: one_hot_vec:', one_hot_vec)
                selected_vector += [one_hot_vec]
            selected_vector = torch.cat(selected_vector, dim=0)
            # log('one-attr-randsample', selected_vector)
            selected_vector = util.toVariable(selected_vector).cuda()
            return selected_vector
        elif type == 'one_attr_batch':  # each batch has one common selected attribute
            raise NotImplemented
        elif type == 'uniform':
            selected_vector = torch.rand(self.image.size(0), n_branches)
            selected_vector = util.toVariable(selected_vector).cuda()
            return selected_vector
        elif type == 'uniform_binarize':
            selected_vector = torch.rand(self.image.size(0), n_branches)
            selected_vector = (selected_vector > 0.5).float() * 1.
            selected_vector = util.toVariable(selected_vector).cuda()
            return selected_vector
        elif type == 'select_all':
            selected_vector = torch.ones(self.image.size(0), n_branches)
            selected_vector = util.toVariable(selected_vector).cuda()
            return selected_vector
        elif type == 'select_none':
            selected_vector = torch.zeros(self.image.size(0), n_branches)
            # log('selective_none', torch.sum(selected_vector))
            selected_vector = util.toVariable(selected_vector).cuda()
            return selected_vector
        else:
            raise NotImplemented
Exemplo n.º 9
0
    def interp_test(self, img1, img2):
        '''
        testing the interpolation effect.
        :param type: "single" and "accumulate"
        :return: a torch image that combines the interpolation results
        '''
        img1 = img1.unsqueeze(0)
        img2 = img2.unsqueeze(0)
        feat1 = self.encoder(img1)
        feat2 = self.encoder(img2)
        result_map = []
        n_branches = self.interp_net.module.n_branch
        for attr_idx in range(n_branches):
            result_row = [img1.data.cpu()]
            for strength in [0, 0.5, 1]:
                attr_vec = torch.zeros(1, n_branches + 1)
                attr_vec[:, attr_idx] = strength
                attr_vec = util.toVariable(attr_vec).cuda()
                interp_feat = self.interp_net(feat1, feat2, attr_vec)
                out_tmp = self.decoder(interp_feat)
                result_row += [out_tmp.data.cpu()]
            result_row += [img2.data.cpu()]
            result_row = torch.cat(result_row, dim=3)
            result_map += [result_row]
        result_row = [img1.data.cpu()]
        # interpolate all the attributes
        for strength in [0, 0.5, 1]:
            attr_vec = torch.ones(1, n_branches) * strength
            attr_vec = util.toVariable(attr_vec).cuda()
            interp_feat = self.interp_net(feat1, feat2, attr_vec)
            out_tmp = self.decoder(interp_feat)
            result_row += [out_tmp.data.cpu()]
        result_row += [img2.data.cpu()]
        result_row = torch.cat(result_row, dim=3)
        result_map += [result_row]

        result_map = torch.cat(result_map, dim=2)
        return result_map
Exemplo n.º 10
0
    def backward_recon_1feat(self,
                             img,
                             net,
                             target_feature,
                             lr=1,
                             iter=500,
                             tv=10):
        '''
        backward to reconstruct the target feature
        :param img: the input image
        :param target_feature: the target feature
        :return: output image
        '''
        _img = util.toVariable(img.cuda(), requires_grad=True)
        target_feature = target_feature.detach()

        optim = torch.optim.LBFGS([_img], lr=lr, max_iter=iter)
        optim.n_steps = 0

        MSELoss = torch.nn.MSELoss(size_average=False).cuda()
        tv_loss = TVLoss2()

        def step():
            if _img.grad is not None:
                _img.grad.data.fill_(0)
            feat = net.forward(_img)
            loss_all = MSELoss(feat, target_feature)
            losstv = tv_loss(_img)
            loss = loss_all + tv * losstv
            loss.backward()
            if optim.n_steps % 25 == 0:
                msg = 'lossall=%f, ' % loss.data[0]
                for idx, l in enumerate(loss_all):
                    msg += 'loss=%f, ' % l.data[0]
                msg += 'loss_tv=%f' % losstv.data[0]
                print(msg)
            optim.n_steps += 1
            return loss

        # _img.data.sub_(_img.grad.data * lr)
        optim.step(step)
        # print(_img.grad.data)
        img = _img.cuda()
        return img
Exemplo n.º 11
0
    def optimize_parameters(self, global_step):
        self.encoder.train()
        self.interp_net.train()
        self.discrim.train()
        self.decoder.train()
        self.KGTransform.train()
        self.loss = OrderedDict()
        ''' define v '''
        self.v = util.toVariable(self.generate_select_vector()).cuda()
        self.rand_idx = torch.randperm(self.image.size(0)).cuda()
        self.image_permute = self.image[self.rand_idx]
        self.attr_permute = []
        for att in self.attribute:
            self.attr_permute += [att[self.rand_idx]]
        ''' compute the target attributes '''
        self.attr_interp = []
        for i, (att, attp) in enumerate(zip(self.attribute,
                                            self.attr_permute)):
            self.attr_interp += [att + self.v[:, i:i + 1] * (attp - att)]
        ''' pre-computed variables '''
        self.feat = self.encoder(self.image)
        self.feat_permute = self.feat[self.rand_idx]
        self.feat_interp = self.interp_net(self.feat, self.feat_permute,
                                           self.v)

        self.zero_grad()
        self.compute_dec_loss().backward(retain_graph=True)
        self.optim_decoder.step()

        self.zero_grad()
        self.compute_discrim_loss().backward(retain_graph=True)
        self.optim_discrim.step()

        self.zero_grad()
        self.compute_KGTransform_loss().backward(retain_graph=True)
        self.optim_KGTransform.step()

        if global_step % self.opt.n_discrim == 0:
            self.zero_grad()
            self.compute_enc_int_loss().backward()
            self.optim_encoder.step()
            self.optim_interp.step()
Exemplo n.º 12
0
 def compute_discrim_loss(self):
     self.loss['discrim'] = 0
     discrim_real, real_attr = self.discrim(self.feat.detach())
     discrim_interp, interp_attr = self.discrim(self.feat_interp.detach())
     ''' gradient penality '''
     gp_interpolate = self.random_interpolate(self.feat.data,
                                              self.feat_interp.data)
     gp_interpolate = util.toVariable(gp_interpolate, requires_grad=True)
     discrim_gp_interpolated, _ = self.discrim(gp_interpolate)
     self.loss['discrim_gp'] = util.gradient_penalty(
         gp_interpolate, discrim_gp_interpolated) * 100.
     self.loss['discrim'] += self.loss['discrim_gp']
     ''' the GAN loss '''
     self.loss['discrim_gan'] = (discrim_interp - discrim_real).mean()
     self.loss['discrim'] += self.loss['discrim_gan']
     ''' the attribute classification loss '''
     att_detach = [att.detach() for att in self.attribute]
     self.loss['discrim_cls'] = classification_loss_list(
         interp_attr, att_detach)
     self.loss['discrim'] += self.loss['discrim_cls']
     return self.loss['discrim']
Exemplo n.º 13
0
 def forward(self, fy, img=None):
     fy = [util.toVariable(f) for f in fy]
     y = self.model.forward(fy)
     y = y + img
     return y
Exemplo n.º 14
0
 def set_input(self, input):
     self.image, self.attribute = input
     self.image = util.toVariable(self.image).cuda()
     self.attribute = [
         util.toVariable(att.cuda()) for att in self.attribute
     ]