Beispiel #1
0
    def _get_y(self, idx):
        joints_file = self.dataset.get_joint_from_id(idx)

        joints = torch.tensor(joints_file["joints"])
        mask = torch.tensor(joints_file["mask"]).type(torch.bool)
        return (
            geometry.normalize_pixel_coordinates(joints, self.max_h,
                                                 self.max_w),
            mask,
        )
Beispiel #2
0
def _get_window_grid_kernel2d(h: int, w: int) -> torch.Tensor:
    '''Helper function, which generates a kernel to
    with window coordinates, residual to window center
    Args:
         h (int): kernel height
         w (int): kernel width
    Returns:
        conv_kernel (torch.Tensor) [2x1xhxw]
    '''
    window_grid2d = create_meshgrid(h, w, False)
    window_grid2d = normalize_pixel_coordinates(window_grid2d, h, w)
    conv_kernel = window_grid2d.permute(3, 0, 1, 2)
    return conv_kernel
def _get_window_grid_kernel2d(h: int, w: int, device: torch.device = torch.device('cpu')) -> torch.Tensor:
    r"""Helper function, which generates a kernel to with window coordinates,
       residual to window center.

    Args:
         h: kernel height.
         : kernel width.
         device: device, on which generate.

    Returns:
        conv_kernel [2x1xhxw]
    """
    window_grid2d = create_meshgrid(h, w, False, device=device)
    window_grid2d = normalize_pixel_coordinates(window_grid2d, h, w)
    conv_kernel = window_grid2d.permute(3, 0, 1, 2)
    return conv_kernel
Beispiel #4
0
Datei: demo.py Projekt: m4nh/RAFT
def demo(args):
    model = torch.nn.DataParallel(RAFT(args))
    model.load_state_dict(torch.load(args.model))

    model = model.module
    model.to(DEVICE)
    model.eval()

    with torch.no_grad():
        images = glob.glob(os.path.join(args.path, '*.png')) + \
            glob.glob(os.path.join(args.path, '*.jpg')) + \
            glob.glob(os.path.join(args.path, '*.tiff'))

        images = sorted(images)
        for imfile1, imfile2 in zip(images[:-1], images[1:]):
            image1 = load_image(imfile1)
            image2 = load_image(imfile2)

            padder = InputPadder(image1.shape)
            image1, image2 = padder.pad(image1, image2)

            t1 = time.perf_counter()
            flow_low, flow_up = model(image1, image2, iters=5, test_mode=True)
            t2 = time.perf_counter()
            print("Time: ", t2 - t1)
            ############
            height, width = image1.shape[-2:]
            grid = geometry.create_meshgrid(height,
                                            width,
                                            normalized_coordinates=False).to(
                                                image1.device)
            print("SPODSAOPDA", flow_up.shape, grid.shape, grid.min(),
                  grid.max())
            grid = flow_up.permute(0, 2, 3, 1) + grid
            flow_up_norm = geometry.normalize_pixel_coordinates(
                grid, height, width)  # BxHxWx2

            image1_warped = F.grid_sample(image2,
                                          flow_up_norm,
                                          align_corners=True)

            view(image1_warped, 'img2_warped')
            view(image1, 'img2')
            viz(image1, flow_up)
Beispiel #5
0
def conv_soft_argmax2d(input: torch.Tensor,
                       kernel_size: Tuple[int, int] = (3, 3),
                       stride: Tuple[int, int] = (1, 1),
                       padding: Tuple[int, int] = (1, 1),
                       temperature: Union[torch.Tensor, float] = torch.tensor(1.0),
                       normalized_coordinates: bool = True,
                       eps: float = 1e-8,
                       output_value: bool = False) -> Union[torch.Tensor,
                                                            Tuple[torch.Tensor, torch.Tensor]]:
    r"""Function that computes the convolutional spatial Soft-Argmax 2D over the windows
    of a given input heatmap. Function has two outputs: argmax coordinates and the softmaxpooled heatmap values
    themselves. On each window, the function computed is

    .. math::
             ij(X) = \frac{\sum{(i,j)} * exp(x / T)  \in X} {\sum{exp(x / T)  \in X}}

    .. math::
             val(X) = \frac{\sum{x * exp(x / T)  \in X}} {\sum{exp(x / T)  \in X}}

    where T is temperature.

    Args:
        kernel_size (Tuple[int,int]): the size of the window
        stride  (Tuple[int,int]): the stride of the window.
        padding (Tuple[int,int]): input zero padding
        temperature (torch.Tensor): factor to apply to input. Default is 1.
        normalized_coordinates (bool): whether to return the coordinates normalized in the range of [-1, 1]. Otherwise,
                                       it will return the coordinates in the range of the input shape. Default is True.
        eps (float): small value to avoid zero division. Default is 1e-8.
        output_value (bool): if True, val is outputed, if False, only ij

    Shape:
        - Input: :math:`(N, C, H_{in}, W_{in})`
        - Output: :math:`(N, C, 2, H_{out}, W_{out})`, :math:`(N, C, H_{out}, W_{out})`, where

         .. math::
                  H_{out} = \left\lfloor\frac{H_{in}  + 2 \times \text{padding}[0] -
                  (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor

         .. math::
                  W_{out} = \left\lfloor\frac{W_{in}  + 2 \times \text{padding}[1] -
                  (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor

    Examples::
        >>> input = torch.randn(20, 16, 50, 32)
        >>> nms_coords, nms_val = conv_soft_argmax2d(input, (3,3), (2,2), (1,1))
    """
    if not torch.is_tensor(input):
        raise TypeError("Input type is not a torch.Tensor. Got {}"
                        .format(type(input)))

    if not len(input.shape) == 4:
        raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
                         .format(input.shape))

    if temperature <= 0:
        raise ValueError("Temperature should be positive float or tensor. Got: {}"
                         .format(temperature))

    b, c, h, w = input.shape
    kx, ky = kernel_size
    device: torch.device = input.device
    dtype: torch.dtype = input.dtype
    input = input.view(b * c, 1, h, w)

    center_kernel: torch.Tensor = _get_center_kernel2d(kx, ky, device).to(dtype)
    window_kernel: torch.Tensor = _get_window_grid_kernel2d(kx, ky, device).to(dtype)

    # applies exponential normalization trick
    # https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
    # https://github.com/pytorch/pytorch/blob/bcb0bb7e0e03b386ad837015faba6b4b16e3bfb9/aten/src/ATen/native/SoftMax.cpp#L44
    x_max = F.adaptive_max_pool2d(input, (1, 1))

    # max is detached to prevent undesired backprop loops in the graph
    x_exp = ((input - x_max.detach()) / temperature).exp()

    # F.avg_pool2d(.., divisor_override = 1.0) - proper way for sum pool in PyTorch 1.2.
    # Not available yet in version 1.0, so let's do manually
    pool_coef: float = float(kx * ky)

    # softmax denominator
    den = pool_coef * F.avg_pool2d(x_exp, kernel_size, stride=stride, padding=padding) + eps

    x_softmaxpool = pool_coef * F.avg_pool2d(x_exp * input,
                                             kernel_size,
                                             stride=stride,
                                             padding=padding) / den
    x_softmaxpool = x_softmaxpool.view(b, c, x_softmaxpool.size(2), x_softmaxpool.size(3))

    # We need to output also coordinates
    # Pooled window center coordinates
    grid_global: torch.Tensor = create_meshgrid(h, w, False, device).to(
        dtype).permute(0, 3, 1, 2)

    grid_global_pooled = F.conv2d(grid_global,
                                  center_kernel,
                                  stride=stride,
                                  padding=padding)

    # Coordinates of maxima residual to window center
    # prepare kernel
    coords_max: torch.Tensor = F.conv2d(x_exp,
                                        window_kernel,
                                        stride=stride,
                                        padding=padding)

    coords_max = coords_max / den.expand_as(coords_max)
    coords_max = coords_max + grid_global_pooled.expand_as(coords_max)
    # [:,:, 0, ...] is x
    # [:,:, 1, ...] is y

    if normalized_coordinates:
        coords_max = normalize_pixel_coordinates(coords_max.permute(0, 2, 3, 1), h, w)
        coords_max = coords_max.permute(0, 3, 1, 2)

    # Back B*C -> (b, c)
    coords_max = coords_max.view(b, c, 2, coords_max.size(2), coords_max.size(3))

    if output_value:
        return coords_max, x_softmaxpool
    return coords_max
Beispiel #6
0
def warp_frame_depth(image_src: torch.Tensor, depth_dst: torch.Tensor,
                     src_trans_dst: torch.Tensor,
                     camera_matrix: torch.Tensor) -> torch.Tensor:
    """Warp a tensor from a source to destination frame by the depth in the destination.

    Compute 3d points from the depth, transform them using given transformation, then project the point cloud to an
    image plane.

    Args:
        image_src (torch.Tensor): image tensor in the source frame with shape (BxDxHxW).
        depth_dst (torch.Tensor): depth tensor in the destination frame with shape (Bx1xHxW).
        src_trans_dst (torch.Tensor): transformation matrix from destination to source with shape (Bx4x4).
        camera_matrix (torch.Tensor): tensor containing the camera intrinsics with shape (Bx3x3).

    Return:
        torch.Tensor: the warped tensor in the source frame with shape (Bx3xHxW).

    """
    if not isinstance(image_src, torch.Tensor):
        raise TypeError(
            f"Input image_src type is not a torch.Tensor. Got {type(image_src)}."
        )

    if not len(image_src.shape) == 4:
        raise ValueError(
            f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}"
        )

    if not isinstance(depth_dst, torch.Tensor):
        raise TypeError(
            f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}."
        )

    if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
        raise ValueError(
            f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}"
        )

    if not isinstance(src_trans_dst, torch.Tensor):
        raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. "
                        f"Got {type(src_trans_dst)}.")

    if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3,
                                                                          3):
        raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
                         f"Got: {src_trans_dst.shape}.")

    if not isinstance(camera_matrix, torch.Tensor):
        raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
                        f"Got {type(camera_matrix)}.")

    if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3,
                                                                          3):
        raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
                         f"Got: {camera_matrix.shape}.")
    # unproject source points to camera frame
    points_3d_dst: torch.Tensor = depth_to_3d(depth_dst,
                                              camera_matrix)  # Bx3xHxW

    # transform points from source to destionation
    points_3d_dst = points_3d_dst.permute(0, 2, 3, 1)  # BxHxWx3

    # apply transformation to the 3d points
    points_3d_src = transform_points(src_trans_dst[:, None],
                                     points_3d_dst)  # BxHxWx3

    # project back to pixels
    camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None]  # Bx1x1xHxW
    points_2d_src: torch.Tensor = project_points(points_3d_src,
                                                 camera_matrix_tmp)  # BxHxWx2

    # normalize points between [-1 / 1]
    height, width = depth_dst.shape[-2:]
    points_2d_src_norm: torch.Tensor = normalize_pixel_coordinates(
        points_2d_src, height, width)  # BxHxWx2

    return F.grid_sample(image_src, points_2d_src_norm,
                         align_corners=True)  # type: ignore