示例#1
0
    def __init__(self, args):
        super(NeuralStyleTransfer, self).__init__(args=args)

        self._style_loss = loss.VGGStyleLoss(transfer_mode=args.transfer_mode,
                                             resize=True)

        npzfile = np.load(args.vector_file)

        self.x_ctt = torch.tensor(npzfile['x_ctt']).to(device)
        self.x_color = torch.tensor(npzfile['x_color']).to(device)
        self.x_alpha = torch.tensor(npzfile['x_alpha']).to(device)
        self.m_grid = int(np.sqrt(self.x_ctt.shape[0]))

        self.anchor_id = self.x_ctt.shape[1] - 1

        img_ = cv2.imread(args.content_img_path, cv2.IMREAD_COLOR)
        img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.img_ = cv2.resize(img_, (128 * self.m_grid, 128 * self.m_grid))
        self.img_batch = utils.img2patches(self.img_, self.m_grid).to(device)

        style_img = cv2.imread(args.style_img_path, cv2.IMREAD_COLOR)
        self.style_img_ = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB).astype(
            np.float32) / 255.
        self.style_img = cv2.blur(cv2.resize(self.style_img_, (128, 128)),
                                  (2, 2))
        self.style_img = torch.tensor(self.style_img).permute(
            [2, 0, 1]).unsqueeze(0).to(device)
    def __init__(self, args):
        super(NeuralStyleTransfer, self).__init__(args=args)

        self._style_loss = loss.VGGStyleLoss(transfer_mode=args.transfer_mode, resize=True)

        print('loading pre-generated vector file...')
        if os.path.exists(args.vector_file) is False:
            exit('vector file does not exist, pls check --vector_file, or run demo.py fist')
        else:
            npzfile = np.load(args.vector_file)

        self.x_ctt = torch.tensor(npzfile['x_ctt']).to(device)
        self.x_color = torch.tensor(npzfile['x_color']).to(device)
        self.x_alpha = torch.tensor(npzfile['x_alpha']).to(device)
        self.m_grid = int(np.sqrt(self.x_ctt.shape[0]))

        self.anchor_id = self.x_ctt.shape[1] - 1

        img_ = cv2.imread(args.content_img_path, cv2.IMREAD_COLOR)
        img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.img_ = cv2.resize(img_, (128*self.m_grid, 128*self.m_grid))
        self.img_batch = utils.img2patches(self.img_, self.m_grid).to(device)

        style_img = cv2.imread(args.style_img_path, cv2.IMREAD_COLOR)
        self.style_img_ = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.style_img = cv2.blur(cv2.resize(self.style_img_, (128, 128)), (2, 2))
        self.style_img = torch.tensor(self.style_img).permute([2, 0, 1]).unsqueeze(0).to(device)
示例#3
0
    def __init__(self, args):
        super(Painter, self).__init__(args=args)

        self.m_grid = args.m_grid

        self.max_m_strokes = args.max_m_strokes

        self.img_path = args.img_path
        self.img_ = cv2.imread(args.img_path, cv2.IMREAD_COLOR)
        self.img_ = (
            cv2.cvtColor(self.img_, cv2.COLOR_BGR2RGB).astype(np.float32) /
            255.0)
        self.input_aspect_ratio = self.img_.shape[0] / self.img_.shape[1]
        self.img_ = cv2.resize(
            self.img_,
            (self.net_G.out_size * args.m_grid,
             self.net_G.out_size * args.m_grid),
            cv2.INTER_AREA,
        )

        self.m_strokes_per_block = int(args.max_m_strokes /
                                       (args.m_grid * args.m_grid))

        self.img_batch = utils.img2patches(self.img_, args.m_grid,
                                           self.net_G.out_size).to(device)

        self.final_rendered_images = None
    def __init__(self, args):
        super(Painter, self).__init__(args=args)

        self.m_grid = args.m_grid

        self.max_m_strokes = args.max_m_strokes

        self.img_path = args.img_path
        self.img_ = cv2.imread(args.img_path, cv2.IMREAD_COLOR)
        self.img_ = cv2.cvtColor(self.img_, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.img_ = cv2.resize(self.img_, (128 * args.m_grid, 128 * args.m_grid))

        self.m_strokes_per_block = int(args.max_m_strokes / (args.m_grid * args.m_grid))

        self.img_batch = utils.img2patches(self.img_, args.m_grid).to(device)

        self.final_rendered_images = None