예제 #1
0
    def __init__(self, args):
        super(NeuralStyleTransfer, self).__init__(args=args)

        self._style_loss = loss.VGGStyleLoss(transfer_mode=args.transfer_mode,
                                             resize=True)

        npzfile = np.load(args.vector_file)

        self.x_ctt = torch.tensor(npzfile['x_ctt']).to(device)
        self.x_color = torch.tensor(npzfile['x_color']).to(device)
        self.x_alpha = torch.tensor(npzfile['x_alpha']).to(device)
        self.m_grid = int(np.sqrt(self.x_ctt.shape[0]))

        self.anchor_id = self.x_ctt.shape[1] - 1

        img_ = cv2.imread(args.content_img_path, cv2.IMREAD_COLOR)
        img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.img_ = cv2.resize(img_, (128 * self.m_grid, 128 * self.m_grid))
        self.img_batch = utils.img2patches(self.img_, self.m_grid).to(device)

        style_img = cv2.imread(args.style_img_path, cv2.IMREAD_COLOR)
        self.style_img_ = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB).astype(
            np.float32) / 255.
        self.style_img = cv2.blur(cv2.resize(self.style_img_, (128, 128)),
                                  (2, 2))
        self.style_img = torch.tensor(self.style_img).permute(
            [2, 0, 1]).unsqueeze(0).to(device)
예제 #2
0
    def __init__(self, args):
        super(NeuralStyleTransfer, self).__init__(args=args)

        self._style_loss = loss.VGGStyleLoss(transfer_mode=args.transfer_mode, resize=True)

        print('loading pre-generated vector file...')
        if os.path.exists(args.vector_file) is False:
            exit('vector file does not exist, pls check --vector_file, or run demo.py fist')
        else:
            npzfile = np.load(args.vector_file)

        self.x_ctt = torch.tensor(npzfile['x_ctt']).to(device)
        self.x_color = torch.tensor(npzfile['x_color']).to(device)
        self.x_alpha = torch.tensor(npzfile['x_alpha']).to(device)
        self.m_grid = int(np.sqrt(self.x_ctt.shape[0]))

        self.anchor_id = self.x_ctt.shape[1] - 1

        img_ = cv2.imread(args.content_img_path, cv2.IMREAD_COLOR)
        img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.img_ = cv2.resize(img_, (128*self.m_grid, 128*self.m_grid))
        self.img_batch = utils.img2patches(self.img_, self.m_grid).to(device)

        style_img = cv2.imread(args.style_img_path, cv2.IMREAD_COLOR)
        self.style_img_ = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB).astype(np.float32) / 255.
        self.style_img = cv2.blur(cv2.resize(self.style_img_, (128, 128)), (2, 2))
        self.style_img = torch.tensor(self.style_img).permute([2, 0, 1]).unsqueeze(0).to(device)