示例#1
0
    def discrim_l2(self, y, y_, step):
        """
        facial semantic feature loss
        evaluate loss use l1 at pixel space
        :param y: input photo, numpy array  [H, W, C]
        :param y_: generated image, tensor  [B, C, W, H]
        :param step: train step
        :return: l1 loss in pixel space
        """
        img1 = parse_evaluate(y.astype(np.uint8),
                              cp=self.parsing,
                              cuda=self.cuda)
        y_ = y_.cpu().detach().numpy()
        y_ = np.squeeze(y_, axis=0)
        y_ = np.swapaxes(y_, 0, 2) * 255
        img2 = parse_evaluate(y_.astype(np.uint8),
                              cp=self.parsing,
                              cuda=self.cuda)
        edge_img1 = utils.img_edge(img1).astype(np.float32)
        edge_img2 = utils.img_edge(img2).astype(np.float32)
        w_g = 1.0
        w_r = 1.0

        if step % self.args.eval_prev_freq == 0:
            path = os.path.join(self.prev_path, "l2_{0}.jpg".format(step))
            edge1_v3 = 255. - ops.fill_grey(edge_img1)
            edge2_v3 = 255. - ops.fill_grey(edge_img2)
            merge = ops.merge_4image(y,
                                     y_,
                                     edge1_v3,
                                     edge2_v3,
                                     transpose=False)
            cv2.imwrite(path, merge)
        return np.mean(np.abs(w_r * edge_img1 - w_g * edge_img2))
示例#2
0
 def capture(path, tensor1, tensor2, parse, cuda):
     """
     imitator 快照
     :param cuda: use gpu
     :param path: save path
     :param tensor1: input photo
     :param tensor2: generated image
     :param parse: parse checkpoint's path
     """
     img1 = ops.tensor_2_image(tensor1)[0].swapaxes(0, 1).astype(np.uint8)
     img2 = ops.tensor_2_image(tensor2)[0].swapaxes(0, 1).astype(np.uint8)
     img1 = cv2.resize(img1, (512, 512), interpolation=cv2.INTER_LINEAR)
     img3 = utils.faceparsing_ndarray(img1, parse, cuda)
     img4 = utils.img_edge(img3)
     img4 = 255 - ops.fill_gray(img4)
     image = ops.merge_4image(img1, img2, img3, img4, transpose=False)
     cv2.imwrite(path, image)
示例#3
0
 def output(self, x, refer, step):
     """
     capture for result
     :param x: generated image with grad, torch tensor [b,params]
     :param refer: reference picture
     :param step: train step
     """
     self.write(x)
     y_ = self.imitator(x)
     y_ = y_.cpu().detach().numpy()
     y_ = np.squeeze(y_, axis=0)
     y_ = np.swapaxes(y_, 0, 2) * 255
     y_ = y_.astype(np.uint8)
     im1 = self.l2_c[0]
     im2 = self.l2_c[1]
     np_im1 = im1.cpu().detach().numpy()
     np_im2 = im2.cpu().detach().numpy()
     f_im1 = ops.fill_gray(np_im1)
     f_im2 = ops.fill_gray(np_im2)
     image_ = ops.merge_4image(refer, y_, f_im1, f_im2, transpose=False)
     path = os.path.join(self.prev_path, "eval_{0}.jpg".format(step))
     cv2.imwrite(path, image_)
示例#4
0
 def capture(self, tensor1, tensor2, name, step, cuda):
     """
     extractor 快照
     :param tensor1: input photo
     :param tensor2: generated image
     :param cuda: use gpu to speed up
     :param step: train step
     :param name: picture name
     """
     path = "{1}/{2}_{0}.jpg".format(step, self.prev_path, name[3:-6])
     orig_path = os.path.join(self.args.path_to_dataset + "2", name)
     img3 = cv2.imread(orig_path)
     img4 = utils.faceparsing_ndarray(img3, self.args.parsing_checkpoint, cuda)
     image1 = 255 - tensor1.cpu().detach().numpy() * 255
     image2 = 255 - tensor2.cpu().detach().numpy() * 255
     shape = image1.shape
     if len(shape) == 2:
         image1 = image1[:, :, np.newaxis]
         image2 = image2[:, :, np.newaxis]
     img1 = ops.fill_gray(image1)
     img2 = ops.fill_gray(image2)
     img = ops.merge_4image(img1, img2, img3, img4)
     cv2.imwrite(path, img)
示例#5
0
文件: main.py 项目: bluesea/face-nn
            p2 = os.path.join(path, "a-" + file)
            al = align.face_features(p, p2)
            ev = utils.parse_evaluate(al, args.parsing_checkpoint, cuda=cuda)
            p = os.path.join(path, "b-" + file)
            cv2.imwrite(p, ev)
            ev = 255 - utils.img_edge(ev)
            p = os.path.join(path, "c-" + file)
            cv2.imwrite(p, ev)
    elif args.phase == "dataset":
        dataset = FaceDataset(args, "test")
        dataset.pre_process(cuda)
    elif args.phase == "preview":
        log.info("preview picture")
        path = "../export/regular/model.jpg"
        img = cv2.imread(path)
        img2 = utils.parse_evaluate(img, args.parsing_checkpoint, cuda)
        img3 = utils.img_edge(img2)
        img3_ = ops.fill_grey(img3)
        img4 = align.face_features(path)
        log.info("{0} {1} {2} {3}".format(img.shape, img2.shape, img3_.shape,
                                          img4.shape))
        ops.merge_4image(img, img2, img3_, img4, show=True)
    elif args.phase == "evaluate":
        log.info("evaluation mode start")
        evl = Evaluate(args, cuda=cuda)
        img = cv2.imread(args.eval_image).astype(np.float32)
        x_ = evl.itr_train(img)
        evl.output(x_, img)
    else:
        log.error("not known phase %s", args.phase)