示例#1
0
    def _step_fn(self, step, inputs):

        tgt_i = inputs['tgt_i'].item()

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(self.model, self.loss_fn, inputs)

        # New sequence
        if self.prev_tgt_i != tgt_i - 1:
            print("\n" + "=" * 20 + "\nNew sequence\n" + "=" * 20 + "\n")
            self.ates += evaluation.eval_path(self.gt_poses, self.pred_poses)
            self.gt_poses = []
            self.pred_poses = []

        print(f"{step}/{len(self.loader)-1} - {tgt_i}")

        # Always
        poses = data["pose"]
        T_pred = utils.torch_to_numpy(
            geometry.to_homog_matrix(geometry.pose_vec2mat(
                poses[:, 0])).squeeze(0))
        self.pred_poses.append(T_pred)
        T_gt = utils.torch_to_numpy(data["T"].squeeze(0))[1]
        self.gt_poses.append(T_gt)

        gt_depth = data["gt_sparse"]
        pred_depth = data["depth"][0]
        metrics = evaluation.eval_depth(gt_depth, pred_depth)

        self.metrics = utils.dict_append(self.metrics, metrics)

        self.prev_tgt_i = tgt_i
示例#2
0
    def _compute_debug(self, loss, data):

        if True:  # match descriptors using pytorch
            x = utils.torch_to_numpy(data["x"][self.b].transpose(0, 1))
            w = utils.torch_to_numpy(data["w"][self.b])
            ap, bp = x[:, :2], x[:, 2:]
            print(w.shape)
            inliers = (w > 0.99)
            self.img_matches.append(
                viz.draw_text(
                    "inliers fcons",
                    viz.draw_matches(self.img, self.warp, ap, bp, inliers)))

            src_pts = np.float32(ap).reshape(-1, 1, 2)
            dst_pts = np.float32(bp).reshape(-1, 1, 2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3.0)
            mask = mask.flatten().astype(np.bool)
            self.img_matches.append(
                viz.draw_text(
                    "CV2 find h**o",
                    viz.draw_matches(self.img, self.warp, ap, bp, mask)))

            img_warped = cv2.warpPerspective(
                self.img, M, (self.img.shape[1], self.img.shape[0]))
            ap_warped = cv2.perspectiveTransform(src_pts, M).squeeze(1)
            self.img_matches.append(
                viz.draw_text(
                    "CV2 warp",
                    viz.draw_matches(img_warped,
                                     self.warp,
                                     ap_warped,
                                     bp,
                                     mask,
                                     draw_outliers=False)))

            H_pred = data["H_pred"][self.b].inverse()
            H = H_pred / H_pred[2, 2]
            H = utils.torch_to_numpy(H)

            img_H_warped = cv2.warpPerspective(
                self.img, H, (self.img.shape[1], self.img.shape[0]))
            ap_H_warped = cv2.perspectiveTransform(src_pts, H).squeeze(1)
            self.img_matches.append(
                viz.draw_text(
                    "H warp",
                    viz.draw_matches(img_H_warped,
                                     self.warp,
                                     ap_H_warped,
                                     bp,
                                     mask,
                                     draw_outliers=False)))
示例#3
0
    def get_advantage(self,
                      value_net,
                      gamma,
                      gae_lam,
                      dtype,
                      device,
                      mode='reward'):
        gae_delta = np.zeros((self.eps_len, 1))
        adv_eps = np.zeros((self.eps_len, 1))
        # Check if terminal state, if terminal V(S_T) = 0, else V(S_T)
        status = np.ones((self.eps_len, 1))
        status[-1] = self.not_terminal
        prev_adv = 0

        for t in reversed(range(self.eps_len)):
            # Get value for current and next state
            obs_tensor = torch.Tensor(self.obs_eps[t]).to(dtype).to(device)
            next_obs_tensor = torch.Tensor(
                self.next_obs_eps[t]).to(dtype).to(device)
            current_val, next_val = torch_to_numpy(value_net(obs_tensor),
                                                   value_net(next_obs_tensor))

            # Calculate delta and advantage
            if mode == 'reward':
                gae_delta[t] = self.rew_eps[
                    t] + gamma * next_val * status[t] - current_val
            elif mode == 'cost':
                gae_delta[t] = self.cost_eps[
                    t] + gamma * next_val * status[t] - current_val
            adv_eps[t] = gae_delta[t] + gamma * gae_lam * prev_adv

            # Update previous advantage
            prev_adv = adv_eps[t]

        # Get target for value function
        obs_eps_tensor = torch.Tensor(self.obs_eps).to(dtype).to(device)
        vtarg_eps = torch_to_numpy(value_net(obs_eps_tensor)) + adv_eps

        return adv_eps, vtarg_eps
示例#4
0
    def _compute_debug(self, loss, data):

        if True:  # match descriptors using pytorch
            ids, mask = brute_force_match(self.AF, self.BF)
            ids = utils.torch_to_numpy(ids[self.b])
            mask = utils.torch_to_numpy(mask[self.b])
            print(self.p1[mask].shape)
            self.img_matches.append(
                viz.draw_text(
                    "PyTorch Matcher",
                    viz.draw_matches(self.img, self.warp, self.p1[mask],
                                     self.p2[ids][mask])))
            print(ids.shape, mask.sum())

        if True:  # debug match using ids
            ids = utils.torch_to_numpy(data["ids"][self.b])
            mask = utils.torch_to_numpy(data["mask"][self.b])
            APh = utils.torch_to_numpy(data["APh"][self.b])
            p1h = utils.torch_to_numpy(data["APh"][self.b].transpose(0, 1))
            self.img_matches.append(
                viz.draw_text(
                    "Matched ids",
                    viz.draw_matches(self.img, self.warp, self.p1[ids][mask],
                                     self.p2[mask])))
示例#5
0
def sfm_inspector(data):
    global K
    K = utils.torch_to_numpy(data["K"])
    img = torch.cat((data["refs"][0], data["tgt"], data["refs"][1]), dim=1)
    cv2.imshow("img", viz.tensor2img(img))
    cv2.imshow("gt_sparse", viz.tensor2depthimg(data["gt_sparse"]))
            col = i % 8
            axs[row, col].imshow(im, cmap='gray')
            axs[row, col].set_title(y[i].item(), fontsize=20)
            axs[row, col].set_xticks([])
            axs[row, col].set_yticks([])
        break
    plt.suptitle('samples from the MNIST dataset', fontsize=20)
    plt.subplots_adjust(hspace=0.5, wspace=0.5)
    plt.tight_layout()
    plt.show()

    # CNN CLASSIFIER
    cnn_clf(train_loader, test_loader)

    n = np.prod(im_shape)
    X_train = utils.torch_to_numpy(mnist_trainset.data.data).reshape(
        (len(mnist_trainset), n))
    y_train = utils.torch_to_numpy(mnist_trainset.targets.data).reshape(
        (len(mnist_trainset), ))
    X_test = utils.torch_to_numpy(mnist_testset.data.data).reshape(
        (len(mnist_testset), n))
    y_test = utils.torch_to_numpy(mnist_testset.targets.data).reshape(
        (len(mnist_testset), ))

    X_train = normalize(X_train)
    X_test = normalize(X_test)

    # MLP CLASSIFIER
    mlp_clf(X_train, X_test, y_train, y_test)

    # Linear Regression CLASSIFIER
    lr_clf(X_train, X_test, y_train, y_test)
示例#7
0
raw_path = sys.argv[1]
export_path = sys.argv[2]

print(f"raw: {raw_path}")
print(f"export: {export_path}")

dataset = Lyft(raw_path)
N = len(dataset)

prev_tgt_i = None
seq_i = -1
poses = []

for i, data in enumerate(dataset):
    tgt_img = np.uint8(
        utils.torch_to_numpy(data["tgt"]).transpose((1, 2, 0)) * 255)
    tgt_T = utils.torch_to_numpy(data["tgt_T"])
    T = utils.torch_to_numpy(data["T"])
    K = utils.torch_to_numpy(data["K"])
    tgt_T = utils.torch_to_numpy(data["tgt_T"])
    sparse = utils.torch_to_numpy(data["gt_sparse"].squeeze(0))
    tgt_i = data["tgt_i"]

    if prev_tgt_i != tgt_i - 1:
        if poses:
            poses = np.array([pose[:3, :].flatten() for pose in poses])
            p = f"{export_path}/seq{seq_i:010}"
            np.savetxt(f"{p}/poses.txt", poses)
            poses = []
        seq_i += 1
        print(f"new sequence: {seq_i:010}")
示例#8
0
    def run_traj(self, env, policy, value_net, cvalue_net, running_stat,
                 score_queue, cscore_queue, gamma, c_gamma, gae_lam, c_gae_lam,
                 dtype, device, constraint):

        batch_idx = 0

        cost_ret_hist = []

        avg_eps_len = 0
        num_eps = 0

        while batch_idx < self.batch_size:
            obs = env.reset()
            if running_stat is not None:
                obs = running_stat.normalize(obs)
            ret_eps = 0
            cost_ret_eps = 0

            for t in range(self.max_eps_len):
                act = policy.get_act(torch.Tensor(obs).to(dtype).to(device))
                act = torch_to_numpy(act).squeeze()
                next_obs, rew, done, info = env.step(act)

                if constraint == 'velocity':
                    if 'y_velocity' not in info:
                        cost = np.abs(info['x_velocity'])
                    else:
                        cost = np.sqrt(info['x_velocity']**2 +
                                       info['y_velocity']**2)
                elif constraint == 'circle':
                    cost = info['cost']

                ret_eps += rew
                cost_ret_eps += (c_gamma**t) * cost

                if running_stat is not None:
                    next_obs = running_stat.normalize(next_obs)

                # Store in episode buffer
                self.obs_eps[t] = obs
                self.act_eps[t] = act
                self.next_obs_eps[t] = next_obs
                self.rew_eps[t] = rew
                self.cost_eps[t] = cost

                obs = next_obs

                self.eps_len += 1
                batch_idx += 1

                # Store return for score if only episode is terminal
                if done or t == self.max_eps_len - 1:
                    if done:
                        self.not_terminal = 0
                    score_queue.append(ret_eps)
                    cscore_queue.append(cost_ret_eps)
                    cost_ret_hist.append(cost_ret_eps)

                    num_eps += 1
                    avg_eps_len += (self.eps_len - avg_eps_len) / num_eps

                if done or batch_idx == self.batch_size:
                    break

            # Store episode buffer
            self.obs_eps, self.next_obs_eps = self.obs_eps[:self.
                                                           eps_len], self.next_obs_eps[:
                                                                                       self
                                                                                       .
                                                                                       eps_len]
            self.act_eps, self.rew_eps = self.act_eps[:self.
                                                      eps_len], self.rew_eps[:
                                                                             self
                                                                             .
                                                                             eps_len]
            self.cost_eps = self.cost_eps[:self.eps_len]

            # Calculate advantage
            adv_eps, vtarg_eps = self.get_advantage(value_net,
                                                    gamma,
                                                    gae_lam,
                                                    dtype,
                                                    device,
                                                    mode='reward')
            cadv_eps, cvtarg_eps = self.get_advantage(cvalue_net,
                                                      c_gamma,
                                                      c_gae_lam,
                                                      dtype,
                                                      device,
                                                      mode='cost')

            # Update batch buffer
            start_idx, end_idx = self.ptr, self.ptr + self.eps_len
            self.obs_buf[start_idx:end_idx], self.act_buf[
                start_idx:end_idx] = self.obs_eps, self.act_eps
            self.vtarg_buf[start_idx:end_idx], self.adv_buf[
                start_idx:end_idx] = vtarg_eps, adv_eps
            self.cvtarg_buf[start_idx:end_idx], self.cadv_buf[
                start_idx:end_idx] = cvtarg_eps, cadv_eps

            # Update pointer
            self.ptr = end_idx

            # Reset episode buffer and update pointer
            self.obs_eps = np.zeros((self.max_eps_len, self.obs_dim),
                                    dtype=np.float32)
            self.next_obs_eps = np.zeros((self.max_eps_len, self.obs_dim),
                                         dtype=np.float32)
            self.act_eps = np.zeros((self.max_eps_len, self.act_dim),
                                    dtype=np.float32)
            self.rew_eps = np.zeros((self.max_eps_len, 1), dtype=np.float32)
            self.cost_eps = np.zeros((self.max_eps_len, 1), dtype=np.float32)
            self.eps_len = 0
            self.not_terminal = 1

        avg_cost = np.mean(cost_ret_hist)
        std_cost = np.std(cost_ret_hist)

        # Normalize advantage functions
        self.adv_buf = (self.adv_buf -
                        self.adv_buf.mean()) / (self.adv_buf.std() + 1e-6)
        self.cadv_buf = (self.cadv_buf -
                         self.cadv_buf.mean()) / (self.cadv_buf.std() + 1e-6)

        return {
            'states': self.obs_buf,
            'actions': self.act_buf,
            'v_targets': self.vtarg_buf,
            'advantages': self.adv_buf,
            'cv_targets': self.cvtarg_buf,
            'c_advantages': self.cadv_buf,
            'avg_cost': avg_cost,
            'std_cost': std_cost,
            'avg_eps_len': avg_eps_len
        }
示例#9
0
    def _step_fn(self, step, inputs):

        # Forward pass and loss
        with torch.no_grad():
            loss, data = utils.forward_pass(self.model, self.loss_fn, inputs)

        print(f"loss {loss.item():.3f}")

        for i in range(data["pose"].shape[1]):
            pose = list(data["pose"][0, i, :].cpu().detach().numpy())
            #print("pose %d -> x: %.6f, y: %.6f, z: %.6f, rx: %.6f, ry: %.6f, rz: %.6f" % (i, *pose))

        poses = data["pose"]
        T0 = utils.torch_to_numpy(
            geometry.to_homog_matrix(geometry.pose_vec2mat(
                poses[:, 1])).squeeze(0))
        T1 = np.identity(4)
        T1[:3, 3] = 0
        T2 = utils.torch_to_numpy(
            geometry.to_homog_matrix(geometry.pose_vec2mat(
                poses[:, 0])).squeeze(0))

        T_gt = utils.torch_to_numpy(data["T"].squeeze(0))
        T0_gt = T_gt[0]
        T1_gt = np.identity(4)
        T1_gt[:3, 3] = 0
        T2_gt = T_gt[1]

        Ta, Tb, Tc = T0.copy(), T1.copy(), T2.copy()
        Ta_gt, Tb_gt, Tc_gt = T0_gt.copy(), T1_gt.copy(), T2_gt.copy()

        # Trajectory
        if self.prev_tgt_i != data["tgt_i"] - 1 or self.scale is None:
            self.positions = []  # New sequence!
            self.positions_gt = []
        self.scale = np.linalg.norm(Tc_gt[:3, -1] - Ta_gt[:3, -1]
                                    ) / np.linalg.norm(Tc[:3, -1] - Ta[:3, -1])
        self.prev_tgt_i = data["tgt_i"]

        Ta_gt[:3, -1] /= self.scale
        Tc_gt[:3, -1] /= self.scale

        print(Tc_gt)
        print(Tc)

        if len(self.positions) == 0:
            self.positions = [Ta, Tb, Tc]
            self.positions_gt = [Ta_gt, Tb_gt, Tc_gt]
        else:
            inv = np.linalg.pinv(self.positions[-1])
            self.positions = [inv @ T for T in self.positions]
            self.positions.append(Tc)

            inv_gt = np.linalg.pinv(self.positions_gt[-1])
            self.positions_gt = [inv_gt @ T_gt for T_gt in self.positions_gt]
            self.positions_gt.append(Tc_gt)

        # Debug images
        depth_img = viz.tensor2depthimg(data["depth"][0][0, 0])
        tgt_img = viz.tensor2img(data["tgt"][0])
        img = np.concatenate((tgt_img, depth_img), axis=1)
        tgtrefs = viz.tensor2img(
            torch.cat((data["refs"][0, 0], data["tgt"][0], data["refs"][0, 1]),
                      dim=1))

        points, colors = to_points_3d(data["tgt"][0], data["depth"][0],
                                      data["K"])

        loop = True
        while loop:
            key = cv2.waitKey(10)
            if key == 27 or self.renderer.should_quit():
                exit()
            elif key != -1:
                loop = False

            cv2.imshow("target and depth", img)
            cv2.imshow("target and refs", tgtrefs)

            self.renderer.clear_screen()
            self.renderer.draw_points(points, colors)
            line = [T[:3, 3] for T in self.positions]
            line_gt = [T[:3, 3] for T in self.positions_gt]
            self.renderer.draw_line(line, color=(1., 0., 0.))
            self.renderer.draw_line(line_gt, color=(0., 1., 0.))
            #self.renderer.draw_cameras([T0], color=(1.,0.,0.))
            #self.renderer.draw_cameras([T1], color=(0.,1.,0.))
            #self.renderer.draw_cameras([T2], color=(0.,0.,1.))
            self.renderer.finish_frame()
示例#10
0
    def _debug_step(self, loss, data):

        while True:
            print(f"b = {self.b}")

            self.prel1 = utils.torch_to_numpy(
                data["A"]["Prel"][self.b].transpose(0, 1))
            self.prel2 = utils.torch_to_numpy(
                data["B"]["Prel"][self.b].transpose(0, 1))

            self.prelflat = np.concatenate(
                (self.prel1.flatten(), self.prel2.flatten()))

            self.img = viz.tensor2img(data["img"][self.b])
            self.warp = viz.tensor2img(data["warp"][self.b])

            self.AF = data["A"]["F"]
            self.BF = data["B"]["F"]
            self.B = self.BF.shape[0]
            self.des1 = utils.torch_to_numpy(self.AF[self.b].transpose(0, 1))
            self.des2 = utils.torch_to_numpy(self.BF[self.b].transpose(0, 1))

            self.s1 = utils.torch_to_numpy(data["A"]["S"][self.b])
            self.s2 = utils.torch_to_numpy(data["B"]["S"][self.b])

            self.p1 = utils.torch_to_numpy(data["A"]["P"][self.b].transpose(
                0, 1))
            self.p2 = utils.torch_to_numpy(data["B"]["P"][self.b].transpose(
                0, 1))

            self.img_matches = []
            self.inliers = []

            if True:  # cv2 match descriptor
                bf = cv2.BFMatcher.create(cv2.NORM_L2, crossCheck=True)
                matches = bf.match(self.des1, self.des2)
                #matches = sorted(matches, key = lambda x: -x.distance)
                print(self.des1.shape, len(matches))
                #matches = matches[:20]
                kp1 = [cv2.KeyPoint(xy[0], xy[1], 2) for xy in self.p1]
                kp2 = [cv2.KeyPoint(xy[0], xy[1], 2) for xy in self.p2]
                self.img_matches.append(
                    viz.draw_text(
                        "CV2 BFMatcher",
                        cv2.drawMatches(self.img,
                                        kp1,
                                        self.warp,
                                        kp2,
                                        matches,
                                        flags=2,
                                        outImg=None)))

            self._compute_debug(loss, data)

            self.img_matches = np.concatenate(
                [img for img in self.img_matches])

            plt.ion()
            fig = plt.figure(1)
            plt.clf()

            while True:
                key = cv2.waitKey(10)
                if key == 27:  # esc
                    print("exit")
                    exit()
                elif key == 119:  # w
                    self.b = min(self.b + 1, self.B - 1)
                    break
                elif key == 115:  # s
                    self.b = max(self.b - 1, 0)
                    break
                elif key == 32:  # space
                    print("next")
                    return

                cv2.imshow("matches", self.img_matches)

                plt.hist(self.prelflat, 200, (0., 1.), color=(0, 0, 1))
                #plt.hist(inliers, 10, (0.,1.), color=(0,0,1))

                fig.canvas.flush_events()
 def forward(self, logits, targets):
     y_pred, y_true = logits[2], targets[2]
     self.preds.append(torch_to_numpy(y_pred))
     self.targets.append(torch_to_numpy(y_true))