Beispiel #1
0
    def get_view_neighbours(self, cameras, center_view, nr_neighbours):
        if nr_neighbours == 0:
            return []

        cameras = cameras.cuda()
        B = cameras.shape[0]
        camlocs = cameras.new_empty(B, 3, 1)
        invKRs = cameras.new_empty(B, 3, 3)
        MYTH.InvertCams_gpu(cameras, invKRs, camlocs)

        distances = (camlocs -
                     camlocs[center_view:center_view + 1, :, :]).pow(2).sum(
                         dim=1).sum(dim=1)
        distances = [d.item() for d in distances]

        orders = sorted(range(len(distances)), key=distances.__getitem__)
        if nr_neighbours >= len(distances):
            return orders
        if self._neighbour_selection == "closest":
            return orders[1:1 + nr_neighbours]
        elif self._neighbour_selection == "furthest":
            return orders[-nr_neighbours:]
        elif self._neighbour_selection == "mixed":
            return orders[1:1 + nr_neighbours //
                          2] + orders[-(nr_neighbours - nr_neighbours // 2):]
        else:
            raise ValueError(
                "Unsupported neighbourhood selection approach '%s'" %
                self._neighbour_selection)
Beispiel #2
0
    def forward(ctx, depths, cameras, scale):
        B = depths.shape[0]
        N = depths.shape[1]
        H = depths.shape[3]
        W = depths.shape[4]

        sentinel = 1e9
        output_depth = depths.new_full((B, N, 1, H, W), fill_value=sentinel)

        camlocs = depths.new_empty(B, N, 3, 1)
        invKRs = depths.new_empty(B, N, 3, 3)
        if scale != 1.0:
            for b in range(B):
                for n in range(N):
                    cameras[b][n] = cameras[b][n].clone()
                    cameras[b][n][:2, :] = cameras[b][n][:2, :] * scale
                    invKRs[b][n] = torch.inverse(
                        cameras[b][n][:3, :3]).contiguous()
                    camlocs[b][n] = -torch.mm(invKRs[b][n], cameras[b][n][:3,
                                                                          3:4])
        else:
            MYTH.InvertCams_gpu(cameras.reshape(-1, 3, 4),
                                invKRs.reshape(-1, 3, 3),
                                camlocs.reshape(-1, 3, 1))

        MYTH.DepthReprojectionNeighbours_updateOutput_gpu(
            depths, output_depth, cameras, invKRs, camlocs)

        output_depth[output_depth > sentinel / 10] = 0

        return output_depth
Beispiel #3
0
    def forward(ctx, trusted_depth_center, color_center, camera_center,
                trusted_depths_neighbours, colors_neighbours,
                cameras_neighbours):
        B = colors_neighbours.shape[0]
        N = colors_neighbours.shape[1]
        C = colors_neighbours.shape[2]
        H_out = color_center.shape[2]
        W_out = color_center.shape[3]

        sentinel = 1e9
        output_depth = trusted_depth_center.new_full((B, N, 1, H_out, W_out),
                                                     fill_value=sentinel)
        output_color = color_center.new_full((B, N, C, H_out, W_out),
                                             fill_value=0.0)

        invKRs_neighbours = cameras_neighbours.new_empty(B, N, 3, 3)
        camlocs_neighbours = cameras_neighbours.new_empty(B, N, 3, 1)
        MYTH.InvertCams_gpu(cameras_neighbours.reshape(-1, 3, 4),
                            invKRs_neighbours.reshape(-1, 3, 3),
                            camlocs_neighbours.reshape(-1, 3, 1))

        MYTH.PatchedReprojectionNeighbours_updateOutput_gpu(
            trusted_depths_neighbours, colors_neighbours, output_depth,
            output_color, camera_center, invKRs_neighbours, camlocs_neighbours)

        output_depth[output_depth > sentinel / 10] = 0

        ctx.save_for_backward(trusted_depths_neighbours, output_depth,
                              camera_center.clone(), invKRs_neighbours.clone(),
                              camlocs_neighbours.clone(),
                              colors_neighbours.clone())

        return output_depth, output_color
Beispiel #4
0
    def forward(ctx, depths, reprojected, cameras, scale, dmin, dmax, dstep):
        B = depths.shape[0]
        N = depths.shape[1]
        H = depths.shape[3]
        W = depths.shape[4]

        camlocs = depths.new_empty(B, N, 3, 1)
        invKRs = depths.new_empty(B, N, 3, 3)
        if scale != 1.0:
            for b in range(B):
                for n in range(N):
                    cameras[b][n] = cameras[b][n].clone()
                    cameras[b][n][:2, :] = cameras[b][n][:2, :] * scale
                    invKRs[b][n] = torch.inverse(
                        cameras[b][n][:3, :3]).contiguous()
                    camlocs[b][n] = -torch.mm(invKRs[b][n], cameras[b][n][:3,
                                                                          3:4])
        else:
            MYTH.InvertCams_gpu(cameras.reshape(-1, 3, 4),
                                invKRs.reshape(-1, 3, 3),
                                camlocs.reshape(-1, 3, 1))

        ctx.save_for_backward(depths, cameras, camlocs, invKRs)
        MYTH.DepthReprojectionNonzeroCompleteBound_updateOutput_gpu(
            depths, reprojected, cameras, invKRs, camlocs, dmin, dmax, dstep)

        return reprojected
Beispiel #5
0
    def forward(ctx, depth_center, camera_center, depths_neighbours,
                cameras_neighbours, dmin, dmax, dstep):
        B = depths_neighbours.shape[0]
        N = depths_neighbours.shape[1]
        H_center = depth_center.shape[2]
        W_center = depth_center.shape[3]

        invKR_center = camera_center.new_empty(B, 3, 3)
        camloc_center = camera_center.new_empty(B, 3, 1)
        MYTH.InvertCams_gpu(camera_center, invKR_center, camloc_center)

        bounds_neighbours = depth_center.new_zeros(
            (B, N, 1, H_center, W_center))

        MYTH.PatchedReprojectionCompleteBound_updateOutput_gpu(
            depth_center, depths_neighbours, bounds_neighbours,
            cameras_neighbours, invKR_center, camloc_center, dmin, dmax, dstep)

        return bounds_neighbours