def embed_ids_batch(self, ids: np.array) -> torch.tensor:
     """embeds a whole batch at once"""
     ids = torch.tensor(ids)
     with torch.no_grad():
         if cuda_utils.CUDA_ENABLED and self.use_cuda_if_available:
             ids = ids.cuda()
         return run_model(self.model, ids, -1).cpu()
Ejemplo n.º 2
0
def generate(
    trained_model: torch.nn.Module, img_size: list, y: np.array, temp=0.8, cuda=False
) -> np.array:
    trained_model.eval()
    generated_image = np.zeros([y.shape[0], 1] + img_size, dtype="float32")
    # tip: when from_numpy() is used tensor and np_array share same memory
    # changes are reflected on each other. so used when converting back to
    # numpy array
    generated_image, y = torch.from_numpy(generated_image), torch.from_numpy(y)
    if cuda:
        y, generated_image = y.cuda(), generated_image.cuda()

    p_bar = ProgressBar()
    print("Generating images...")
    for r in p_bar(range(img_size[0])):
        for c in range(img_size[1]):
            out = trained_model(generated_image, y)
            p = torch.exp(out)[:, :, r, c]
            p = torch.pow(p, 1 / temp)
            p = p / torch.sum(p, -1, keepdim=True)
            sample = p.multinomial(1)
            generated_image[:, :, r, c] = sample.float() / (out.shape[1] - 1)

    utils.clearline()
    utils.clearline()
    return (255 * generated_image.data.cpu().numpy()).astype("uint8")
 def predict(self, img: np.array) -> np.array:
     img = self.transform_test(img)
     if self.args.cuda:
         img = img.cuda()
     with torch.no_grad():
         output = self.model(img)
     pred = output.data.cpu().numpy()
     return pred
Ejemplo n.º 4
0
 def _to_torch(image: np.array) -> _t.Tuple[_torch.Tensor, tuple]:
     shape = image.shape
     transform = _torchvision.transforms.Compose(
         [_transforms.Resize(320), _transforms.ToTensor()]
     )
     image = transform(image)
     image.unsqueeze_(0)
     image = image.type(_torch.FloatTensor)
     image = Variable(image.cuda())
     return image, shape
Ejemplo n.º 5
0
 def predict_(self, inp: np.array):
     org = np.array(inp)
     inp = self.trans(inp)
     inp = torch.unsqueeze(inp, 0)
     if self.use_gpu:
         inp = inp.cuda()
     with torch.no_grad():
         output = self.model(inp)
         output = self.getImage(output, self.getImageMode)
     output = output[:, :, np.newaxis]
     output = np.concatenate((output, output, output), axis=-1)
     return output[:, :, 0], output * 255
Ejemplo n.º 6
0
def model_inference_helper(x_0: np.array, x_1: np.array):
    """ Input: x_0, x_1; Output: y_0 """
    x_0 = torch.from_numpy(x_0).type(args.dtype)
    x_1 = torch.from_numpy(x_1).type(args.dtype)
    y_0 = torch.FloatTensor()

    intWidth = x_0.size(2)
    intHeight = x_0.size(1)
    channel = x_0.size(0)
    assert channel == 3, "input frame's channel is not equal to 3."

    if intWidth != ((intWidth >> 7) << 7):
        intWidth_pad = ((intWidth >> 7) + 1) << 7  # more than necessary
        intPaddingLeft = int((intWidth_pad - intWidth) / 2)
        intPaddingRight = intWidth_pad - intWidth - intPaddingLeft
    else:
        intWidth_pad = intWidth
        intPaddingLeft = 32
        intPaddingRight = 32

    if intHeight != ((intHeight >> 7) << 7):
        intHeight_pad = ((intHeight >> 7) + 1) << 7  # more than necessary
        intPaddingTop = int((intHeight_pad - intHeight) / 2)
        intPaddingBottom = intHeight_pad - intHeight - intPaddingTop
    else:
        intHeight_pad = intHeight
        intPaddingTop = 32
        intPaddingBottom = 32

    # torch.set_grad_enabled(False)
    x_0 = Variable(torch.unsqueeze(x_0, 0))
    x_1 = Variable(torch.unsqueeze(x_1, 0))
    x_0 = torch.nn.ReplicationPad2d(
        [intPaddingLeft, intPaddingRight, intPaddingTop,
         intPaddingBottom])(x_0)
    x_1 = torch.nn.ReplicationPad2d(
        [intPaddingLeft, intPaddingRight, intPaddingTop,
         intPaddingBottom])(x_1)

    # if use_cuda:
    x_0 = x_0.cuda()
    x_1 = x_1.cuda()

    # y_s, offset, filter = model(torch.stack((X0, X1), dim=0))
    y_s, _, _ = model(torch.stack((x_0, x_1), dim=0))
    y_0 = y_s[args.save_which]

    torch.cuda.empty_cache()

    if not isinstance(y_0, list):
        y_0 = y_0.data.cpu().numpy()
    else:
        y_0 = [item.data.cpu().numpy() for item in y_0]
    y_0 = [
        np.transpose(
            255.0 *
            item.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight,
                              intPaddingLeft:intPaddingLeft + intWidth, ],
            (1, 2, 0),
        ) for item in y_0
    ]

    return y_0
def get_tensor_from_array(arr: np.array) -> torch.Tensor:
    arr = torch.Tensor(arr)
    if torch.cuda.is_available():
        arr = arr.cuda()
    return arr