def update_internal_state(self, habitat_observation):

        rgb, depth = self.rgb_d_from_observation(habitat_observation)

        try:
            self.slam.process_image_rgbd(rgb, depth, self.cur_time)
            self.tracking_is_OK = str(self.slam.get_tracking_state()) == "OK"
        except BaseException:
            print("Warning!!!! ORBSLAM processing frame error")
            self.tracking_is_OK = False

        if self.tracking_is_OK:

            trajectory_history = np.array(self.slam.get_trajectory_points())

            self.pose6D = homogenize_p(
                torch.from_numpy(trajectory_history[-1])[1:].view(3, 4).to(
                    self.device)).view(1, 4, 4)

            self.trajectory_history = trajectory_history

            current_obstacles = self.mapper(
                torch.from_numpy(depth).to(self.device).squeeze(),
                self.pose6D).to(self.device)

            self.current_obstacles = current_obstacles

            self.map2DObstacles = torch.max(
                self.map2DObstacles,
                current_obstacles.unsqueeze(0).unsqueeze(0),
            )
            return True

        else:
            return False
Esempio n. 2
0
    def update_internal_state(self, habitat_observation):
        # sets self.obs = observation
        super(ORBSLAM2Agent, self).update_internal_state(habitat_observation)

        if self.start_location is None:
            self.start_location = habitat_observation['gps']
        if self.start_rotation is None:
            self.start_rotation = habitat_observation['compass']

        self.cur_time += self.timestep
        rgb, depth = self.rgb_d_from_observation(habitat_observation)
        t = time.time()
        try:
            self.slam.process_image_rgbd(rgb, depth, self.cur_time)
            if self.timing:
                print(time.time() - t, "ORB_SLAM2")
            self.tracking_is_OK = str(self.slam.get_tracking_state()) == "OK"
        except BaseException as e:
            #print("Warning!!!! ORBSLAM processing frame error")
            #print("orbslam error:", e)
            self.tracking_is_OK = False
        if not self.tracking_is_OK:
            #print("\n\n\n\nRESETTING MYSELF BC TRACKING NOT OKAY\n\n\n\n")
            #print("SLAM TRACKING STATE:", self.slam.get_tracking_state())
            self.reset(
                np.append(habitat_observation['gps'],
                          habitat_observation['compass']))
        t = time.time()
        #self.set_offset_to_goal(habitat_observation)
        if self.tracking_is_OK:
            trajectory_history = np.array(self.slam.get_trajectory_points())
            self.pose6D = homogenize_p(
                torch.from_numpy(trajectory_history[-1])[1:].view(3, 4).to(
                    self.device)).view(1, 4, 4)
            self.trajectory_history = trajectory_history
            if len(self.position_history) > 1:
                previous_step = get_distance(
                    self.pose6D.view(4, 4),
                    torch.from_numpy(self.position_history[-1]).view(4, 4).to(
                        self.device),
                )
                if self.action_history[-1] == HabitatSimActions.MOVE_FORWARD:
                    self.unseen_obstacle = (
                        previous_step.item() <= 0.001
                    )  # hardcoded threshold for not moving
        current_obstacles = self.mapper(
            torch.from_numpy(depth).to(self.device).squeeze(),
            self.pose6D).to(self.device)
        self.current_obstacles = current_obstacles
        self.map2DObstacles = torch.max(
            self.map2DObstacles,
            current_obstacles.unsqueeze(0).unsqueeze(0))
        if self.timing:
            print(time.time() - t, "Mapping")
        return True
    def update_internal_state(self, habitat_observation):
        super(ORBSLAM2AgentAimas,
              self).update_internal_state(habitat_observation)
        self.cur_time += self.timestep
        rgb, depth = self.rgb_d_from_observation(habitat_observation)
        t = time.time()
        try:
            self.slam.process_image_rgbd(rgb, depth, self.cur_time)
            if self.timing:
                print(time.time() - t, "ORB_SLAM2")
            self.tracking_is_OK = str(self.slam.get_tracking_state()) == "OK"
        except BaseException:
            print("Warning!!!! ORBSLAM processing frame error")
            self.tracking_is_OK = False
        if not self.tracking_is_OK:
            self.reset()
        t = time.time()
        self.set_offset_to_goal(habitat_observation)
        if self.tracking_is_OK:
            trajectory_history = np.array(self.slam.get_trajectory_points())
            self.pose6D = homogenize_p(
                torch.from_numpy(trajectory_history[-1])[1:].view(3, 4).to(
                    self.device)).view(1, 4, 4)
            self.trajectory_history = trajectory_history
            if len(self.position_history) > 1:
                previous_step = get_distance(
                    self.pose6D.view(4, 4),
                    torch.from_numpy(self.position_history[-1]).view(4, 4).to(
                        self.device),
                )
                if self.action_history[-1] == SimulatorActions.MOVE_FORWARD:
                    self.unseen_obstacle = (
                        previous_step.item() <= 0.001
                    )  # hardcoded threshold for not moving

        current_obstacles = self.mapper(
            torch.from_numpy(depth).to(self.device).squeeze(),
            self.pose6D).to(self.device)
        self.current_obstacles = current_obstacles
        self.map2DObstacles = torch.max(
            self.map2DObstacles,
            current_obstacles.unsqueeze(0).unsqueeze(0))
        if self.timing:
            print(time.time() - t, "Mapping")
        return True