コード例 #1
0
    def step_fn(step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(model, loss_fn, inputs)

        print("loss %f" % loss.item())

        print(data.keys())

        print(data["pose"].shape)
        for i in range(args.batch):
            print(list(data["pose"][i, 0, :].cpu().detach().numpy()))
            print(list(data["pose"][i, 1, :].cpu().detach().numpy()))
            print("--")

        depth_img = viz.tensor2depthimg(
            torch.cat((*data["depth"][0][:, 0], ), dim=0))
        tgt_img = viz.tensor2img(torch.cat((*data["tgt"], ), dim=1))
        img = np.concatenate((tgt_img, depth_img), axis=1)

        warp_imgs = []
        #diff_imgs = []
        for warp, diff in zip(data["warp"], data["diff"]):
            warp = restack(restack(warp, 1, -1), 0, -2)
            diff = restack(restack(diff, 1, -1), 0, -2)
            warp_imgs.append(viz.tensor2img(warp))
            #diff_imgs.append(viz.tensor2diffimg(diff))

        world = reconstruction.depth_to_3d_points(data["depth"][0], data["K"])
        points = world[0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64)
        colors = (data["tgt"][0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64) + 1) / 2

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27 or pango.ShouldQuit():
                exit()
            elif key != -1:
                loop = False
            cv2.imshow("target and depth", img)
            #for i, (warp, diff) in enumerate(zip(warp_imgs, diff_imgs)):
            for i, warp in enumerate(warp_imgs):
                cv2.imshow("warp scale: %d" % i, warp)
                #cv2.imshow("diff scale: %d" % i, diff)

            gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
            gl.glClearColor(1.0, 1.0, 1.0, 1.0)
            dcam.Activate(scam)
            gl.glPointSize(5)
            pango.DrawPoints(points, colors)
            pose = np.identity(4)
            pose[:3, 3] = 0
            gl.glLineWidth(1)
            gl.glColor3f(0.0, 0.0, 1.0)
            pango.DrawCamera(pose, 0.5, 0.75, 0.8)
            pango.FinishFrame()
コード例 #2
0
ファイル: debug.py プロジェクト: ErikOrjehag/sfmnet
    def step_fn(step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(model, loss_fn, inputs)

        print("loss %f" % loss.item())

        print(data.keys())

        print(data["pose"].shape)
        for i in range(4):
            print(list(data["pose"][i, 0, :].cpu().detach().numpy()))
            print(list(data["pose"][i, 1, :].cpu().detach().numpy()))
            print("--")

        depth_img = viz.tensor2depthimg(
            torch.cat((*data["depth"][0][:, 0], ), dim=0))
        tgt_img = viz.tensor2img(torch.cat((*data["tgt"], ), dim=1))
        img = np.concatenate((tgt_img, depth_img), axis=1)

        warp_imgs = []
        diff_imgs = []
        for warp, diff in zip(data["warp"], data["diff"]):
            warp = restack(restack(warp, 1, -1), 0, -2)
            diff = restack(restack(diff, 1, -1), 0, -2)
            warp_imgs.append(viz.tensor2img(warp))
            diff_imgs.append(viz.tensor2diffimg(diff))

        world = inverse_warp.depth_to_3d_points(data["depth"][0], data["K"])
        points = world[0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64)
        colors = (data["tgt"][0, :].view(3, -1).transpose(
            1, 0).cpu().detach().numpy().astype(np.float64) + 1) / 2

        point_cloud.points = o3d.open3d.Vector3dVector(points)
        point_cloud.colors = o3d.open3d.Vector3dVector(colors)
        vis.add_geometry(point_cloud)

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27:
                exit()
            elif key != -1:
                loop = False
            vis.update_geometry()
            vis.poll_events()
            vis.update_renderer()
            cv2.imshow("target and depth", img)
            for i, (warp, diff) in enumerate(zip(warp_imgs, diff_imgs)):
                cv2.imshow("warp scale: %d" % i, warp)
                cv2.imshow("diff scale: %d" % i, diff)
コード例 #3
0
def main():
    dataset = SequenceDataset(sys.argv[1])

    vis = o3d.Visualizer()
    vis.create_window()
    point_cloud = o3d.geometry.PointCloud()
    vis.add_geometry(point_cloud)

    for i in range(0, len(dataset)):
        tgt, refs, K, Kinv, sparse, dense, tgt_pose, ref_pose = dataset[i]

        print(tgt_pose)
        exit()

        pose_b = pose.unsqueeze(0)
        dense_b = dense.unsqueeze(0).unsqueeze(0)
        ref_b = refs[1].unsqueeze(0)
        K_b = K.unsqueeze(0)

        warp, valid, world, projpixel, grid = inverse_warp.inverse_warp(
            ref_b, dense_b, pose_b, K_b)

        # Vizualisation
        colors = (ref_b[0, :].view(3, -1).transpose(1, 0) + 1) / 2
        points = world[0, :3].view(3, -1).transpose(1, 0)
        point_cloud.points = o3d.open3d.Vector3dVector(points)
        point_cloud.colors = o3d.open3d.Vector3dVector(colors)
        vis.add_geometry(point_cloud)

        img = torch.cat((refs[0], tgt, refs[1]), dim=1)

        while cv2.waitKey(10) != 27:
            vis.update_geometry()
            vis.poll_events()
            vis.update_renderer()
            cv2.imshow("img", viz.tensor2img(img))
            cv2.imshow("dense", viz.tensor2depthimg(dense))
            cv2.imshow("warp", viz.tensor2img(warp.squeeze(0)))
コード例 #4
0
def sfm_inspector(data):
    global K
    K = utils.torch_to_numpy(data["K"])
    img = torch.cat((data["refs"][0], data["tgt"], data["refs"][1]), dim=1)
    cv2.imshow("img", viz.tensor2img(img))
    cv2.imshow("gt_sparse", viz.tensor2depthimg(data["gt_sparse"]))
コード例 #5
0
    def _step_fn(self, step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(self.model, self.loss_fn, inputs)

        print(f"loss {loss.item():.3f}")

        for i in range(data["pose"].shape[1]):
            pose = list(data["pose"][0, i, :].cpu().detach().numpy())
            #print("pose %d -> x: %.6f, y: %.6f, z: %.6f, rx: %.6f, ry: %.6f, rz: %.6f" % (i, *pose))

        poses = data["pose"]
        T0 = utils.torch_to_numpy(
            geometry.to_homog_matrix(geometry.pose_vec2mat(
                poses[:, 1])).squeeze(0))
        T1 = np.identity(4)
        T1[:3, 3] = 0
        T2 = utils.torch_to_numpy(
            geometry.to_homog_matrix(geometry.pose_vec2mat(
                poses[:, 0])).squeeze(0))

        T_gt = utils.torch_to_numpy(data["T"].squeeze(0))
        T0_gt = T_gt[0]
        T1_gt = np.identity(4)
        T1_gt[:3, 3] = 0
        T2_gt = T_gt[1]

        Ta, Tb, Tc = T0.copy(), T1.copy(), T2.copy()
        Ta_gt, Tb_gt, Tc_gt = T0_gt.copy(), T1_gt.copy(), T2_gt.copy()

        # Trajectory
        if self.prev_tgt_i != data["tgt_i"] - 1 or self.scale is None:
            self.positions = []  # New sequence!
            self.positions_gt = []
        self.scale = np.linalg.norm(Tc_gt[:3, -1] - Ta_gt[:3, -1]
                                    ) / np.linalg.norm(Tc[:3, -1] - Ta[:3, -1])
        self.prev_tgt_i = data["tgt_i"]

        Ta_gt[:3, -1] /= self.scale
        Tc_gt[:3, -1] /= self.scale

        print(Tc_gt)
        print(Tc)

        if len(self.positions) == 0:
            self.positions = [Ta, Tb, Tc]
            self.positions_gt = [Ta_gt, Tb_gt, Tc_gt]
        else:
            inv = np.linalg.pinv(self.positions[-1])
            self.positions = [inv @ T for T in self.positions]
            self.positions.append(Tc)

            inv_gt = np.linalg.pinv(self.positions_gt[-1])
            self.positions_gt = [inv_gt @ T_gt for T_gt in self.positions_gt]
            self.positions_gt.append(Tc_gt)

        # Debug images
        depth_img = viz.tensor2depthimg(data["depth"][0][0, 0])
        tgt_img = viz.tensor2img(data["tgt"][0])
        img = np.concatenate((tgt_img, depth_img), axis=1)
        tgtrefs = viz.tensor2img(
            torch.cat((data["refs"][0, 0], data["tgt"][0], data["refs"][0, 1]),
                      dim=1))

        points, colors = to_points_3d(data["tgt"][0], data["depth"][0],
                                      data["K"])

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27 or self.renderer.should_quit():
                exit()
            elif key != -1:
                loop = False

            cv2.imshow("target and depth", img)
            cv2.imshow("target and refs", tgtrefs)

            self.renderer.clear_screen()
            self.renderer.draw_points(points, colors)
            line = [T[:3, 3] for T in self.positions]
            line_gt = [T[:3, 3] for T in self.positions_gt]
            self.renderer.draw_line(line, color=(1., 0., 0.))
            self.renderer.draw_line(line_gt, color=(0., 1., 0.))
            #self.renderer.draw_cameras([T0], color=(1.,0.,0.))
            #self.renderer.draw_cameras([T1], color=(0.,1.,0.))
            #self.renderer.draw_cameras([T2], color=(0.,0.,1.))
            self.renderer.finish_frame()
コード例 #6
0
                if self.load_gt:
                    sparse = np.load(paths[0][:-4] + ".npy").astype(np.float32)
                    dense = np.load(paths[0][:-4] + "_dense.npy").astype(
                        np.float32)
                    tgt_pose = self.poses[i][tgt]
                    ref_pose = [self.poses[i][tgt - 1], self.poses[i][tgt + 1]]
                    data = {
                        **data, "gt_sparse": torch.tensor(sparse).unsqueeze(0),
                        "gt_dense": torch.tensor(dense).unsqueeze(0),
                        "gt_pose": torch.tensor(0)
                    }
                return data


if __name__ == '__main__':
    dataset = SequenceDataset(sys.argv[1])
    for i in range(0, len(dataset)):
        data = dataset[i]
        print(data["gt_sparse"].shape)

        #print(relative_transform(tgt_pose, ref_pose[0]))
        #print(relative_transform(tgt_pose, ref_pose[1]))
        #print("---")
        img = torch.cat((data["refs"][0], data["tgt"], data["refs"][1]), dim=1)
        cv2.imshow("img", viz.tensor2img(img))
        cv2.imshow("gt_sparse", viz.tensor2depthimg(data["gt_sparse"]))
        cv2.imshow("gt_dense", viz.tensor2depthimg(data["gt_dense"]))

        key = cv2.waitKey(0)
        if key == 27:
            break