コード例 #1
0
def test_change_state():
    random_state = np.random.get_state()
    np.random.seed(233)
    scene_graph = habitat_sim.SceneGraph()
    agent = habitat_sim.Agent(scene_graph.get_root_node().create_child())

    for _ in range(100):
        state = agent.state
        state.position += np.random.uniform(-1, 1, size=3)
        state.rotation *= quat_from_angle_axis(np.random.uniform(0, 2 * np.pi),
                                               np.array([0.0, 1.0, 0.0]))
        for v in state.sensor_states.values():
            v.position += np.random.uniform(-1, 1, size=3)
            v.rotation *= quat_from_angle_axis(np.random.uniform(0, 2 * np.pi),
                                               np.array([1.0, 1.0, 1.0]))

        agent.set_state(state, infer_sensor_states=False)
        new_state = agent.state

        _check_state_same(state, new_state)

        for k, v in state.sensor_states.items():
            assert k in new_state.sensor_states
            _check_state_same(v, new_state.sensor_states[k])

    np.random.set_state(random_state)
コード例 #2
0
def place_agent(sim):
    # place our agent in the scene
    agent_state = habitat_sim.AgentState()
    agent_state.position = [5.0, 0.0, 1.0]
    agent_state.rotation = quat_from_angle_axis(
        math.radians(70), np.array([0, 1.0, 0])) * quat_from_angle_axis(
            math.radians(-20), np.array([1.0, 0, 0]))
    agent = sim.initialize_agent(0, agent_state)
    return agent.scene_node.transformation_matrix()
コード例 #3
0
    def get_observation(self, *args: Any, observations, episode,
                        **kwargs: Any):
        if self.previous_episode != episode:
            self.T_world_prev = SE3_Noise()

        new_agent_state = self._sim.get_agent_state()
        T_world_curr = SE3_Noise(new_agent_state.rotation,
                                 new_agent_state.position)
        self.T_world_prev = T_world_curr.inverse() * self.T_world_prev

        noise_transform = SE3_Noise(
            quat_from_angle_axis(np.random.uniform(-np.pi / 36, np.pi / 36),
                                 geo.UP),
            np.array([1.0, 0.0, 1.0]) * np.random.uniform(-0.02, 0.02),
        )

        T_world_curr_noisy = self.T_world_prev * (self.T_world_prev *
                                                  noise_transform).inverse()
        self.T_curr_prev = T_world_curr_noisy
        self.previous_episode = episode

        agent_position = T_world_curr_noisy.trans
        rotation_world_agent = T_world_curr_noisy.rot
        goal_position = np.array(episode.goals[0].position, dtype=np.float32)

        return self._compute_pointgoal(agent_position, rotation_world_agent,
                                       goal_position)
コード例 #4
0
ファイル: test_physics.py プロジェクト: xjwang-cs/habitat-sim
def test_kinematics(sim):
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    sim.reconfigure(hab_cfg)
    assert sim.get_physics_object_library_size() > 0

    # test adding an object to the world
    object_id = sim.add_object(0)
    assert len(sim.get_existing_object_ids()) > 0

    # test kinematics
    I = np.identity(4)

    # test get and set translation
    sim.set_translation(np.array([0, 1.0, 0]), object_id)
    assert np.allclose(sim.get_translation(object_id), np.array([0, 1.0, 0]))

    # test get and set transform
    sim.set_transformation(I, object_id)
    assert np.allclose(sim.get_transformation(object_id), I)

    # test get and set rotation
    Q = quat_from_angle_axis(np.pi, np.array([0, 1.0, 0]))
    expected = np.eye(4)
    expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
    sim.set_rotation(quat_to_magnum(Q), object_id)
    assert np.allclose(sim.get_transformation(object_id), expected)
    assert np.allclose(quat_from_magnum(sim.get_rotation(object_id)), Q)

    # test object removal
    old_object_id = sim.remove_object(object_id)
    assert len(sim.get_existing_object_ids()) == 0
    assert old_object_id == object_id

    object_id = sim.add_object(0)

    prev_time = 0.0
    for _ in range(2):
        # do some kinematics here (todo: translating or rotating instead of absolute)
        sim.set_translation(np.random.rand(3), object_id)
        T = sim.get_transformation(object_id)

        # test getting observation
        obs = sim.step(
            random.choice(list(hab_cfg.agents[0].action_space.keys())))

        # check that time is increasing in the world
        assert sim.get_world_time() > prev_time
        prev_time = sim.get_world_time()
コード例 #5
0
ファイル: new_actions.py プロジェクト: zeta1999/habitat-sim
    def _strafe_impl(scene_node: habitat_sim.SceneNode, forward_amount: float,
                     strafe_angle: float):
        forward_ax = (
            np.array(scene_node.absolute_transformation().rotation_scaling())
            @ habitat_sim.geo.FRONT)
        rotation = quat_from_angle_axis(np.deg2rad(strafe_angle),
                                        habitat_sim.geo.UP)
        move_ax = quat_rotate_vector(rotation, forward_ax)

        scene_node.translate_local(move_ax * forward_amount)
コード例 #6
0
    def initialize_agent(self, agent_id, initial_state=None):
        agent = self.get_agent(agent_id=agent_id)
        if initial_state is None:
            initial_state = AgentState()
            if self.pathfinder.is_loaded:
                initial_state.position = self.pathfinder.get_random_navigable_point(
                )
                initial_state.rotation = quat_from_angle_axis(
                    np.random.uniform(0, 2.0 * np.pi), np.array([0, 1, 0]))

        agent.set_state(initial_state, is_initial=True)
        self._last_state = agent.state
        return agent
コード例 #7
0
def test_kinematics():
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    with habitat_sim.Simulator(hab_cfg) as sim:
        obj_mgr = sim.get_object_template_manager()

        assert obj_mgr.get_num_templates() > 0

        # test adding an object to the world
        # get handle for object 0, used to test
        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0])
        assert len(sim.get_existing_object_ids()) > 0

        # test setting the motion type

        assert sim.set_object_motion_type(
            habitat_sim.physics.MotionType.STATIC, object_id)
        assert (sim.get_object_motion_type(object_id) ==
                habitat_sim.physics.MotionType.STATIC)
        assert sim.set_object_motion_type(
            habitat_sim.physics.MotionType.KINEMATIC, object_id)
        assert (sim.get_object_motion_type(object_id) ==
                habitat_sim.physics.MotionType.KINEMATIC)

        # test kinematics
        I = np.identity(4)

        # test get and set translation
        sim.set_translation(np.array([0, 1.0, 0]), object_id)
        assert np.allclose(sim.get_translation(object_id),
                           np.array([0, 1.0, 0]))

        # test object SceneNode
        object_node = sim.get_object_scene_node(object_id)
        assert np.allclose(sim.get_translation(object_id),
                           object_node.translation)

        # test get and set transform
        sim.set_transformation(I, object_id)
        assert np.allclose(sim.get_transformation(object_id), I)

        # test get and set rotation
        Q = quat_from_angle_axis(np.pi, np.array([0, 1.0, 0]))
        expected = np.eye(4)
        expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
        sim.set_rotation(quat_to_magnum(Q), object_id)
        assert np.allclose(sim.get_transformation(object_id), expected)
        assert np.allclose(quat_from_magnum(sim.get_rotation(object_id)), Q)

        # test object removal
        sim.remove_object(object_id)
        assert len(sim.get_existing_object_ids()) == 0

        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0])

        prev_time = 0.0
        for _ in range(2):
            # do some kinematics here (todo: translating or rotating instead of absolute)
            sim.set_translation(np.random.rand(3), object_id)
            T = sim.get_transformation(object_id)  # noqa : F841

            # test getting observation
            sim.step(random.choice(list(
                hab_cfg.agents[0].action_space.keys())))

            # check that time is increasing in the world
            assert sim.get_world_time() > prev_time
            prev_time = sim.get_world_time()

        sim.remove_object(object_id)

        # test attaching/dettaching an Agent to/from physics simulation
        agent_node = sim.agents[0].scene_node
        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0], agent_node)
        sim.set_translation(np.random.rand(3), object_id)
        assert np.allclose(agent_node.translation,
                           sim.get_translation(object_id))
        sim.remove_object(object_id, False)  # don't delete the agent's node
        assert agent_node.translation

        # test get/set RigidState
        object_id = sim.add_object_by_handle(obj_handle_list[0])
        targetRigidState = habitat_sim.bindings.RigidState(
            mn.Quaternion(), np.array([1.0, 2.0, 3.0]))
        sim.set_rigid_state(targetRigidState, object_id)
        objectRigidState = sim.get_rigid_state(object_id)
        assert np.allclose(objectRigidState.translation,
                           targetRigidState.translation)
        assert objectRigidState.rotation == targetRigidState.rotation
コード例 #8
0
    def init_physics_test_scene(self, num_objects):
        object_position = np.array(
            [-0.569043, 2.04804, 13.6156]
        )  # above the castle table

        # turn agent toward the object
        print("turning agent toward the physics!")
        agent_state = self._sim.get_agent(0).get_state()
        agent_to_obj = object_position - agent_state.position
        agent_local_forward = np.array([0, 0, -1.0])
        flat_to_obj = np.array([agent_to_obj[0], 0.0, agent_to_obj[2]])
        flat_dist_to_obj = np.linalg.norm(flat_to_obj)
        flat_to_obj /= flat_dist_to_obj
        # move the agent closer to the objects if too far (this will be projected back to floor in set)
        if flat_dist_to_obj > 3.0:
            agent_state.position = object_position - flat_to_obj * 3.0
        # unit y normal plane for rotation
        det = (
            flat_to_obj[0] * agent_local_forward[2]
            - agent_local_forward[0] * flat_to_obj[2]
        )
        turn_angle = math.atan2(det, np.dot(agent_local_forward, flat_to_obj))
        agent_state.rotation = quat_from_angle_axis(turn_angle, np.array([0, 1.0, 0]))
        # need to move the sensors too
        for sensor in agent_state.sensor_states:
            agent_state.sensor_states[sensor].rotation = agent_state.rotation
            agent_state.sensor_states[
                sensor
            ].position = agent_state.position + np.array([0, 1.5, 0])
        self._sim.get_agent(0).set_state(agent_state)

        # hard coded dimensions of maximum bounding box for all 3 default objects:
        max_union_bb_dim = np.array([0.125, 0.19, 0.26])

        # add some objects in a grid
        object_lib_size = self._sim.get_physics_object_library_size()
        object_init_grid_dim = (3, 1, 3)
        object_init_grid = {}
        assert (
            object_lib_size > 0
        ), "!!!No objects loaded in library, aborting object instancing example!!!"

        # clear the objects if we are re-running this initializer
        for old_obj_id in self._sim.get_existing_object_ids():
            self._sim.remove_object(old_obj_id)

        for obj_id in range(num_objects):
            # rand_obj_index = random.randint(0, object_lib_size - 1)
            # rand_obj_index = 0  # overwrite for specific object only
            rand_obj_index = self._sim_settings.get("test_object_index")
            if rand_obj_index < 0:  # get random object on -1
                rand_obj_index = random.randint(0, object_lib_size - 1)
            object_init_cell = (
                random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
                random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
                random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
            )
            while object_init_cell in object_init_grid:
                object_init_cell = (
                    random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
                    random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
                    random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
                )
            object_id = self._sim.add_object(rand_obj_index)
            object_init_grid[object_init_cell] = object_id
            object_offset = np.array(
                [
                    max_union_bb_dim[0] * object_init_cell[0],
                    max_union_bb_dim[1] * object_init_cell[1],
                    max_union_bb_dim[2] * object_init_cell[2],
                ]
            )
            self._sim.set_translation(object_position + object_offset, object_id)
            print(
                "added object: "
                + str(object_id)
                + " of type "
                + str(rand_obj_index)
                + " at: "
                + str(object_position + object_offset)
                + " | "
                + str(object_init_cell)
            )
コード例 #9
0
    def step(self, action, only_allowed=True):
        """
        All angle calculations in this function is w.r.t habitat coordinate frame, on X-Z plane
        where +Y is upward, -Z is forward and +X is rightward.
        Angle 0 corresponds to +X, angle 90 corresponds to +y and 290 corresponds to 270.

        :param action: action to be taken
        :param only_allowed: if true, then can't step anywhere except allowed locations
        :return:
        Dict of observations
        """
        assert self._is_episode_active, (
            "episode is not active, environment not RESET or "
            "STOP action called previously")

        self._previous_step_collided = False
        # STOP: 0, FORWARD: 1, LEFT: 2, RIGHT: 2
        if action == HabitatSimActions.STOP:
            self._is_episode_active = False
        else:
            prev_position_index = self._receiver_position_index
            prev_rotation_angle = self._rotation_angle
            if action == HabitatSimActions.MOVE_FORWARD:
                # the agent initially faces -Z by default
                self._previous_step_collided = True
                for neighbor in self.graph[self._receiver_position_index]:
                    p1 = self.graph.nodes[
                        self._receiver_position_index]['point']
                    p2 = self.graph.nodes[neighbor]['point']
                    direction = int(
                        np.around(
                            np.rad2deg(np.arctan2(p2[2] - p1[2],
                                                  p2[0] - p1[0])))) % 360
                    if direction == self.get_orientation():
                        self._receiver_position_index = neighbor
                        self._previous_step_collided = False
                        break
            elif action == HabitatSimActions.TURN_LEFT:
                # agent rotates counterclockwise, so turning left means increasing rotation angle by 90
                self._rotation_angle = (self._rotation_angle + 90) % 360
            elif action == HabitatSimActions.TURN_RIGHT:
                self._rotation_angle = (self._rotation_angle - 90) % 360

            if self.config.CONTINUOUS_VIEW_CHANGE:
                intermediate_observations = list()
                fps = self.config.VIEW_CHANGE_FPS
                if action == HabitatSimActions.MOVE_FORWARD:
                    prev_position = np.array(
                        self.graph.nodes[prev_position_index]['point'])
                    current_position = np.array(self.graph.nodes[
                        self._receiver_position_index]['point'])
                    for i in range(1, fps):
                        intermediate_position = prev_position + i / fps * (
                            current_position - prev_position)
                        self.set_agent_state(
                            intermediate_position.tolist(),
                            quat_from_angle_axis(
                                np.deg2rad(self._rotation_angle),
                                np.array([0, 1, 0])))
                        sim_obs = self._sim.get_sensor_observations()
                        observations = self._sensor_suite.get_observations(
                            sim_obs)
                        intermediate_observations.append(observations)
                else:
                    for i in range(1, fps):
                        if action == HabitatSimActions.TURN_LEFT:
                            intermediate_rotation = prev_rotation_angle + i / fps * 90
                        elif action == HabitatSimActions.TURN_RIGHT:
                            intermediate_rotation = prev_rotation_angle - i / fps * 90
                        self.set_agent_state(
                            list(self.graph.nodes[
                                self._receiver_position_index]['point']),
                            quat_from_angle_axis(
                                np.deg2rad(intermediate_rotation),
                                np.array([0, 1, 0])))
                        sim_obs = self._sim.get_sensor_observations()
                        observations = self._sensor_suite.get_observations(
                            sim_obs)
                        intermediate_observations.append(observations)

            if not self.config.USE_RENDERED_OBSERVATIONS:
                self.set_agent_state(
                    list(self.graph.nodes[self._receiver_position_index]
                         ['point']),
                    quat_from_angle_axis(np.deg2rad(self._rotation_angle),
                                         np.array([0, 1, 0])))
            else:
                self._sim.set_agent_state(
                    list(self.graph.nodes[self._receiver_position_index]
                         ['point']),
                    quat_from_angle_axis(np.deg2rad(self._rotation_angle),
                                         np.array([0, 1, 0])))
        self._episode_step_count += 1

        # log debugging info
        logging.debug(
            'After taking action {}, s,r: {}, {}, orientation: {}, location: {}'
            .format(action, self._source_position_index,
                    self._receiver_position_index, self.get_orientation(),
                    self.graph.nodes[self._receiver_position_index]['point']))

        sim_obs = self._get_sim_observation()
        if self.config.USE_RENDERED_OBSERVATIONS:
            self._sim.set_sensor_observations(sim_obs)
        self._prev_sim_obs = sim_obs
        observations = self._sensor_suite.get_observations(sim_obs)
        if self.config.CONTINUOUS_VIEW_CHANGE:
            observations['intermediate'] = intermediate_observations

        return observations
コード例 #10
0
    def run(self):

        scene = self.sim.semantic_scene
        objects = scene.objects
        object_dict = {}
        for obj in objects:
            if obj == None or obj.category == None:
                continue
            if obj.category.name() not in object_dict:
                object_dict[str(obj.category.name())] = 0
        for obj in objects:
            if obj == None or obj.category == None or obj.category.name(
            ) in self.ignore_classes:  #not in self.include_classes:
                continue

            if object_dict[str(obj.category.name())] > 5:
                continue
            object_dict[str(obj.category.name())] += 1
            print(object_dict[str(obj.category.name())])
            # st()
            #if self.verbose:
            print(f"Object name is: {obj.category.name()}")
            # Calculate distance to object center
            obj_center = obj.obb.to_aabb().center
            #print(obj_center)
            obj_center = np.expand_dims(obj_center, axis=0)
            #print(obj_center)
            distances = np.sqrt(np.sum((self.nav_pts - obj_center)**2, axis=1))

            # Get points with r_min < dist < r_max
            valid_pts = self.nav_pts[np.where(
                (distances > self.radius_min) * (distances < self.radius_max))]
            # if not valid_pts:
            # continue

            # plot valid points that we happen to select
            # self.plot_navigable_points(valid_pts)

            # Bin points based on angles [vertical_angle (10 deg/bin), horizontal_angle (10 deg/bin)]
            valid_pts_shift = valid_pts - obj_center

            dz = valid_pts_shift[:, 2]
            dx = valid_pts_shift[:, 0]
            dy = valid_pts_shift[:, 1]

            # Get yaw for binning
            valid_yaw = np.degrees(np.arctan2(dz, dx))

            # pitch calculation
            dxdz_norm = np.sqrt((dx * dx) + (dz * dz))
            valid_pitch = np.degrees(np.arctan2(dy, dxdz_norm))

            # binning yaw around object
            nbins = 18
            bins = np.linspace(-180, 180, nbins + 1)
            bin_yaw = np.digitize(valid_yaw, bins)

            num_valid_bins = np.unique(bin_yaw).size

            if num_valid_bins == 0:
                continue

            spawns_per_bin = int(self.num_views / num_valid_bins) + 2
            if self.verbose:
                print(f'spawns_per_bin: {spawns_per_bin}')

            if self.visualize:
                import matplotlib.cm as cm
                colors = iter(cm.rainbow(np.linspace(0, 1, nbins)))
                plt.figure(2)
                plt.clf()
                print(np.unique(bin_yaw))
                for bi in range(nbins):
                    cur_bi = np.where(bin_yaw == (bi + 1))
                    points = valid_pts[cur_bi]
                    x_sample = points[:, 0]
                    z_sample = points[:, 2]
                    plt.plot(z_sample, x_sample, 'o', color=next(colors))
                plt.plot(obj_center[:, 2],
                         obj_center[:, 0],
                         'x',
                         color='black')
                plt.show()

            action = "do_nothing"
            episodes = []
            valid_pts_selected = []
            cnt = 0
            for b in range(nbins):

                # get all angle indices in the current bin range
                # st()
                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1
                if inds_bin_cur[0].size == 0:
                    continue

                for s in range(spawns_per_bin):
                    # st()
                    s_ind = np.random.choice(inds_bin_cur[0])
                    #s_ind = inds_bin_cur[0][0]
                    pos_s = valid_pts[s_ind]
                    valid_pts_selected.append(pos_s)
                    agent_state = habitat_sim.AgentState()
                    agent_state.position = pos_s + np.array([0, 1.5, 0])

                    # YAW calculation - rotate to object
                    agent_to_obj = np.squeeze(
                        obj_center) - agent_state.position
                    agent_local_forward = np.array([0, 0, -1.0])  # y, z, x
                    flat_to_obj = np.array(
                        [agent_to_obj[0], 0.0, agent_to_obj[2]])
                    flat_dist_to_obj = np.linalg.norm(flat_to_obj)
                    flat_to_obj /= flat_dist_to_obj

                    det = (flat_to_obj[0] * agent_local_forward[2] -
                           agent_local_forward[0] * flat_to_obj[2])
                    turn_angle = math.atan2(
                        det, np.dot(agent_local_forward, flat_to_obj))
                    quat_yaw = quat_from_angle_axis(turn_angle,
                                                    np.array([0, 1.0, 0]))

                    # Set agent yaw rotation to look at object
                    agent_state.rotation = quat_yaw

                    # change sensor state to default
                    # need to move the sensors too
                    #print(self.agent.state.sensor_states)
                    for sensor in self.agent.state.sensor_states:
                        # st()
                        self.agent.state.sensor_states[
                            sensor].rotation = agent_state.rotation
                        self.agent.state.sensor_states[
                            sensor].position = agent_state.position  # + np.array([0, 1.5, 0]) # ADDED IN UP TOP
                        # print("PRINT", self.agent.state.sensor_states[sensor].rotation)

                    # Calculate Pitch from head to object
                    turn_pitch = np.degrees(
                        math.atan2(agent_to_obj[1], flat_dist_to_obj))
                    num_turns = np.abs(
                        np.floor(turn_pitch / self.rot_interval)
                    ).astype(
                        int
                    )  # compute number of times to move head up or down by rot_interval
                    #print("MOVING HEAD ", num_turns, " TIMES")
                    movement = "look_up" if turn_pitch > 0 else "look_down"

                    # initiate agent
                    self.agent.set_state(agent_state)
                    self.sim.step(action)

                    # Rotate "head" of agent up or down based on calculated pitch angle to object to center it in view
                    if num_turns == 0:
                        pass
                    else:
                        for turn in range(num_turns):
                            # st()
                            self.sim.step(movement)
                            if self.verbose:
                                for sensor in self.agent.state.sensor_states:
                                    print(self.agent.state.
                                          sensor_states[sensor].rotation)

                    # get observations after centiering
                    observations = self.sim.step(action)

                    # Assuming all sensors have same rotation and position
                    observations["rotations"] = self.agent.state.sensor_states[
                        'color_sensor'].rotation  #agent_state.rotation
                    observations["positions"] = self.agent.state.sensor_states[
                        'color_sensor'].position

                    mainobj_id = obj.id
                    main_id = int(mainobj_id[1:])
                    semantic = observations["semantic_sensor"]
                    mask = np.zeros_like(semantic)
                    mask[semantic == main_id] = 1
                    obj_is_visible = False if np.sum(mask) == 0 else True

                    if self.is_valid_datapoint(observations,
                                               obj) and obj_is_visible:
                        if self.verbose:
                            print("episode is valid......")
                        episodes.append(observations)
                        if self.visualize:
                            rgb = observations["color_sensor"]
                            semantic = observations["semantic_sensor"]
                            depth = observations["depth_sensor"]
                            self.display_sample(rgb,
                                                semantic,
                                                depth,
                                                mainobj=obj,
                                                visualize=False)
                    # if self.visualize:
                    #         rgb = observations["color_sensor"]
                    #         semantic = observations["semantic_sensor"]
                    #         depth = observations["depth_sensor"]
                    #         self.display_sample(rgb, semantic, depth, mainobj=obj, visualize=False)

                    #print("agent_state: position", self.agent.state.position, "rotation", self.agent.state.rotation)

                    cnt += 1

            if len(episodes) >= self.num_views:
                print(f'num episodes: {len(episodes)}')
                data_folder = obj.category.name() + '_' + obj.id
                data_path = os.path.join(self.basepath, data_folder)
                print("Saving to ", data_path)
                os.mkdir(data_path)
                flat_obs = np.random.choice(episodes,
                                            self.num_views,
                                            replace=False)
                viewnum = 0
                for obs in flat_obs:
                    self.save_datapoint(self.agent, obs, data_path, viewnum,
                                        obj, True)
                    viewnum += 1
            else:
                print(f"Not enough episodes: f{len(episodes)}")

            if self.visualize:
                valid_pts_selected = np.vstack(valid_pts_selected)
                self.plot_navigable_points(valid_pts_selected)
コード例 #11
0
ファイル: asset_viewer.py プロジェクト: jturner65/habitat-sim
    # Create observations array
    observations = []

    # @markdown Set how long the resutlant video should be, in seconds.  The object will make 1 full revolution during this time.
    video_length = 4.8  # @param {type:"slider", min:1.0, max:20.0, step:0.1}
    # Sim time step
    time_step = 1.0 / 60.0
    # Amount to rotate per frame to make 1 full rotation
    rot_amount = 2 * math.pi / (video_length / time_step)

    # simulate with updated camera at each frame
    start_time = sim.get_world_time()
    while sim.get_world_time() - start_time < video_length:
        sim.step_physics(time_step)
        # rotate the agent to rotate the camera
        agent_state.rotation *= ut.quat_from_angle_axis(
            rot_amount, np.array([0, 1.0, 0]))
        agent.set_state(agent_state)

        observations.append(sim.get_sensor_observations())

    # video rendering of carousel view
    video_prefix = clip_short_name + "_scene_view"
    if make_video:
        vut.make_video(
            observations,
            "color_sensor_3rd_person",
            "color",
            output_path + video_prefix,
            open_vid=show_video,
            video_dims=[1280, 720],
        )
コード例 #12
0
def vis_panorama(env,
                 num,
                 model,
                 goals,
                 log=False,
                 forward_only=False,
                 classes=None):
    device = get_device(model)
    rotations = [
        hutil.quat_from_angle_axis(a, np.array([0, 1, 0]))
        for a in np.linspace(0, 2 * np.pi, endpoint=False, num=num)
    ]
    pos, rot = env.agent_state()
    ims = []
    dists = min_dists(env, goals)
    cols = 224
    scale = (4.0 / num) - 0.05
    rng = int(scale * cols / 2)
    for r in rotations:
        env.set_agent_state(pos, r * rot)
        obs = env.get_observation()['rgb']
        vals = model(numpy_to_imgnet(obs).unsqueeze(0).to(device))
        if forward_only:
            max_vals = vals[0, :, 0].detach().cpu().numpy()
        else:
            max_vals = vals.max(axis=2).values.squeeze().detach().cpu().numpy()
        env.step(0)
        dist_diff = -(min_dists(env, goals) - dists)
        im = obs[0] if len(obs.shape) == 4 else obs
        im = im[:, (cols // 2) - rng:(cols // 2) + rng, :]
        # im[:,-1] = 0
        if log:
            max_vals = np.log(max_vals)
        ims.append((im, max_vals, dist_diff))

    im, max_vals, dist_diff = util.unzip(reversed(ims))
    joined = np.concatenate(im, axis=1)
    fig, axes = plt.subplots(6,
                             1,
                             gridspec_kw={
                                 'hspace': 0,
                                 "wspace": 0,
                                 'height_ratios': [6, 0.5, 0.5, 0.5, 0.5, 0.5]
                             })
    fig.subplots_adjust(hspace=0, wspace=0)
    imax = axes[0]
    # imax.axis('on')
    pltaxes = axes[1:]
    imax.set_xlim((0,joined.shape[1]))
    imax.set_ylim((joined.shape[0],0))

    # search for right hiehgt
    low,high = 8,9
    for _ in range(20):
        mid = (high+low)/2
        fig.set_figheight(mid)
        fig.canvas.draw()
        imwidth = imax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).width
        axwidth = pltaxes[0].get_window_extent().transformed(fig.dpi_scale_trans.inverted()).width
        axwidth = pltaxes[0].get_window_extent().transformed(fig.dpi_scale_trans.inverted()).width
        if imwidth == axwidth:
            print('eq')
            high = mid
        else:
            print('low')
            low = mid

    fig.set_figheight(high)
    fig.savefig('vis/test.pdf',bbox_inches='tight',pad_inches=0.0)

    imax.axis('on')
    imax.tick_params(
        axis='x',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        bottom=False,  # ticks along the bottom edge are off
        top=False,  # ticks along the top edge are off
        labelbottom=False)  # labels along the bottom edge are off

    imax.tick_params(
        axis='y',  # changes apply to the x-axis
        which='both',  # both major and minor ticks are affected
        left=False,  # ticks along the bottom edge are off
        labelleft=False)  # labels along the bottom edge are off

    vals = np.stack(max_vals).transpose()
    for ax, va in zip(pltaxes, vals):
        ax.imshow(va[None, :],
                  extent=[0, 12, 0, 1],
                  aspect='auto',
                  cmap='Wistia')
        ax.set_xlim((0, 12))
        ax.set_ylim((0,1))
        for i, v in enumerate(va):
            ax.text(i + 0.5,
                    0.45,
                    '%0.2f' % (v),
                    fontdict={'size': 16},
                    horizontalalignment='center',
                    verticalalignment='center')
        ax.axis('on')
        ax.tick_params(
            axis='x',  # changes apply to the x-axis
            which='both',  # both major and minor ticks are affected
            bottom=False,  # ticks along the bottom edge are off
            top=False,  # ticks along the top edge are off
            labelbottom=False)  # labels along the bottom edge are off

        ax.tick_params(
            axis='y',  # changes apply to the x-axis
            which='both',  # both major and minor ticks are affected
            left=False,  # ticks along the bottom edge are off
            labelleft=False)  # labels along the bottom edge are off

    ratio = joined.shape[0] / joined.shape[1]
    imax.imshow(joined)
    # fig.text(0.12, 0.41, 'Bed', horizontalalignment='right', fontdict={'size': 16})
    # fig.text(0.12, 0.34, 'Chair', horizontalalignment='right', fontdict={'size': 16})
    # fig.text(0.12, 0.271, 'Couch', horizontalalignment='right',
             # fontdict={'size': 16})
    # fig.text(0.12, 0.205, 'D. Table', horizontalalignment='right',
             # fontdict={'size': 16})
    # fig.text(0.12, 0.132, 'Toilet', horizontalalignment='right',
             # fontdict={'size': 16})
    # ratio = joined.shape[1] / (joined.shape[0] * 11 / 6)
    ratio = joined.shape[1] / (joined.shape[0] * 8.5 / 6)

    height = 8
    # searched for right high to deal with text shift
    # searched_height = 8.051948547363281
    # fig.set_figheight(searched_height)
    fig.set_figheight(height)
    fig.set_figwidth(height * ratio)
    fig.savefig('vis/test1.pdf',bbox_inches='tight',pad_inches=0.0)
    import pdb; pdb.set_trace()


    # set the agent back to where it was before
    env.set_agent_state(pos, rot)

    ims = list(reversed(ims))
    ims = [(im, val, cor) for im, val, cor in ims]
    annotated = []
    dists = np.array([[v, d] for _, v, d in ims])
    corrs = np.array(
        [np.corrcoef(dists[:, 0, i], dists[:, 1, i])[0, 1] for i in range(5)])
    return fig, corrs
コード例 #13
0
ファイル: SE3.py プロジェクト: NicoGrande/habitat-api
 def __init__(self, rot=None, trans=None):
     self.rot = rot if rot is not None else quat_from_angle_axis(
         0.0, geo.UP)
     self.trans = trans if trans is not None else np.array([0.0, 0.0, 0.0])
コード例 #14
0
    def get_midpoint_obj_conf(self):

        scene = self.sim.semantic_scene
        objects = scene.objects
        print(objects)
        #objects = random.sample(list(objects), self.num_objects_per_episode)
        xyz_obj_mids = []
        print(self.num_objects_per_episode)
        print(len(objects))
        count = 0
        while count < self.num_objects_per_episode:
            print("GETTING OBJECT #", count)
            obj_ind = np.random.randint(low=0, high=len(objects))
            obj = objects[obj_ind]
            if obj == None or obj.category == None or obj.category.name(
            ) not in self.include_classes:
                continue
            # st()
            if self.verbose:
                print(f"Object name is: {obj.category.name()}")
            # Calculate distance to object center
            obj_center = obj.obb.to_aabb().center
            #print(obj_center)
            obj_center = np.expand_dims(obj_center, axis=0)
            #print(obj_center)
            distances = np.sqrt(np.sum((self.nav_pts - obj_center)**2, axis=1))

            # Get points with r_min < dist < r_max
            valid_pts = self.nav_pts[np.where(
                (distances > self.radius_min) * (distances < self.radius_max))]
            # if not valid_pts:
            # continue

            # plot valid points that we happen to select
            # self.plot_navigable_points(valid_pts)

            # Bin points based on angles [vertical_angle (10 deg/bin), horizontal_angle (10 deg/bin)]
            valid_pts_shift = valid_pts - obj_center

            dz = valid_pts_shift[:, 2]
            dx = valid_pts_shift[:, 0]
            dy = valid_pts_shift[:, 1]

            # Get yaw for binning
            valid_yaw = np.degrees(np.arctan2(dz, dx))

            # pitch calculation
            dxdz_norm = np.sqrt((dx * dx) + (dz * dz))
            valid_pitch = np.degrees(np.arctan2(dy, dxdz_norm))

            # binning yaw around object
            nbins = 18
            bins = np.linspace(-180, 180, nbins + 1)
            bin_yaw = np.digitize(valid_yaw, bins)

            num_valid_bins = np.unique(bin_yaw).size

            # if self.visualize:
            #     import matplotlib.cm as cm
            #     colors = iter(cm.rainbow(np.linspace(0, 1, nbins)))
            #     #plt.figure(2)
            #     #plt.clf()
            #     for bi in range(nbins):
            #         cur_bi = np.where(bin_yaw==(bi+1))
            #         points = valid_pts[cur_bi]
            #         x_sample = points[:,0]
            #         z_sample = points[:,2]
            #         plt.plot(z_sample, x_sample, 'o', color = next(colors))
            #     plt.plot(obj_center[:,2], obj_center[:,0], 'x', color = 'black')
            #     plt.show()
            #     plt.pause(0.5)
            #     plt.close()

            action = "do_nothing"
            episodes = []
            valid_pts_selected = []
            bin_inds = list(range(nbins))
            bin_inds = random.sample(bin_inds, len(bin_inds))
            # b_inds_notempty = []
            # # get rid of empty bins
            # for b in bin_inds:
            #     inds_bin_cur = np.where(bin_yaw==(b+1))
            #     if not inds_bin_cur[0].size == 0:
            #         b_inds_notempty.append(b)

            for b in bin_inds:  #b_inds_notempty:

                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1
                if inds_bin_cur[0].size == 0:
                    continue

                # get all angle indices in the current bin range
                # st()
                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1

                # st()
                s_ind = np.random.choice(inds_bin_cur[0])
                #s_ind = inds_bin_cur[0][0]
                pos_s = valid_pts[s_ind]
                valid_pts_selected.append(pos_s)
                agent_state = habitat_sim.AgentState()
                agent_state.position = pos_s + np.array([0, 1.5, 0])

                # YAW calculation - rotate to object
                agent_to_obj = np.squeeze(obj_center) - agent_state.position
                agent_local_forward = np.array([0, 0, -1.0])  # y, z, x
                flat_to_obj = np.array([agent_to_obj[0], 0.0, agent_to_obj[2]])
                flat_dist_to_obj = np.linalg.norm(flat_to_obj)
                flat_to_obj /= flat_dist_to_obj

                det = (flat_to_obj[0] * agent_local_forward[2] -
                       agent_local_forward[0] * flat_to_obj[2])
                turn_angle = math.atan2(
                    det, np.dot(agent_local_forward, flat_to_obj))
                quat_yaw = quat_from_angle_axis(turn_angle,
                                                np.array([0, 1.0, 0]))

                # Set agent yaw rotation to look at object
                agent_state.rotation = quat_yaw

                # change sensor state to default
                # need to move the sensors too
                print(self.agent.state.sensor_states)
                for sensor in self.agent.state.sensor_states:
                    # st()
                    self.agent.state.sensor_states[
                        sensor].rotation = agent_state.rotation
                    self.agent.state.sensor_states[
                        sensor].position = agent_state.position  # + np.array([0, 1.5, 0]) # ADDED IN UP TOP
                    # print("PRINT", self.agent.state.sensor_states[sensor].rotation)

                # NOTE: for finding an object, i dont think we'd want to center it
                # Calculate Pitch from head to object
                turn_pitch = np.degrees(
                    math.atan2(agent_to_obj[1], flat_dist_to_obj))
                num_turns = np.abs(
                    np.floor(turn_pitch / self.rot_interval)
                ).astype(
                    int
                )  # compute number of times to move head up or down by rot_interval
                print("MOVING HEAD ", num_turns, " TIMES")
                movement = "look_up" if turn_pitch > 0 else "look_down"

                # initiate agent
                self.agent.set_state(agent_state)
                self.sim.step(action)

                # Rotate "head" of agent up or down based on calculated pitch angle to object to center it in view
                if num_turns == 0:
                    pass
                else:
                    for turn in range(num_turns):
                        # st()
                        self.sim.step(movement)
                        if self.verbose:
                            for sensor in self.agent.state.sensor_states:
                                print(self.agent.state.sensor_states[sensor].
                                      rotation)

                # get observations after centiering
                observations = self.sim.step(action)

                ####### %%%%%%%%%%%%%%%%%%%%%%% ######### MASK RCNN

                im = observations["color_sensor"]
                im = Image.fromarray(im, mode="RGBA")
                im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)

                # plt.imshow(im)
                # plt.show()

                outputs = self.maskrcnn(im)

                v = Visualizer(im[:, :, ::-1],
                               MetadataCatalog.get(
                                   self.cfg_det.DATASETS.TRAIN[0]),
                               scale=1.2)
                out = v.draw_instance_predictions(
                    outputs['instances'].to("cpu"))
                seg_im = out.get_image()

                if self.visualize:
                    plt.imshow(seg_im)
                    plt.show()

                pred_masks = outputs['instances'].pred_masks
                pred_boxes = outputs['instances'].pred_boxes.tensor
                pred_classes = outputs['instances'].pred_classes
                pred_scores = outputs['instances'].scores

                maskrcnn_to_catname = {
                    56: "chair",
                    59: "bed",
                    61: "toilet",
                    57: "couch",
                    58: "indoor-plant",
                    72: "refrigerator",
                    62: "tv",
                    60: "dining-table"
                }

                obj_ids = []
                obj_catids = []
                obj_scores = []
                obj_masks = []
                obj_all_catids = []
                obj_all_scores = []
                obj_all_boxes = []
                for segs in range(len(pred_masks)):
                    if pred_classes[segs].item() in maskrcnn_to_catname:
                        if pred_scores[segs] >= 0.80:
                            obj_ids.append(segs)
                            obj_catids.append(pred_classes[segs].item())
                            obj_scores.append(pred_scores[segs].item())
                            obj_masks.append(pred_masks[segs])

                            obj_all_catids.append(pred_classes[segs].item())
                            obj_all_scores.append(pred_scores[segs].item())
                            y, x = torch.where(pred_masks[segs])
                            pred_box = torch.Tensor(
                                [min(y), min(x),
                                 max(y), max(x)])  # ymin, xmin, ymax, xmax
                            obj_all_boxes.append(pred_box)

                print("MASKS ", len(pred_masks))
                print("VALID ", len(obj_scores))
                print(obj_scores)
                print(pred_scores.shape)

                translation_ = self.agent.state.sensor_states[
                    'depth_sensor'].position
                quaternion_ = self.agent.state.sensor_states[
                    'depth_sensor'].rotation
                rotation_ = quaternion.as_rotation_matrix(quaternion_)
                T_world_cam = np.eye(4)
                T_world_cam[0:3, 0:3] = rotation_
                T_world_cam[0:3, 3] = translation_

                if not obj_masks:
                    continue
                else:

                    # randomly choose a high confidence object
                    obj_mask_focus = random.choice(obj_masks)

                    depth = observations["depth_sensor"]

                    xs, ys = np.meshgrid(
                        np.linspace(-1 * 256 / 2., 1 * 256 / 2., 256),
                        np.linspace(1 * 256 / 2., -1 * 256 / 2., 256))
                    depth = depth.reshape(1, 256, 256)
                    xs = xs.reshape(1, 256, 256)
                    ys = ys.reshape(1, 256, 256)

                    xys = np.vstack(
                        (xs * depth, ys * depth, -depth, np.ones(depth.shape)))
                    xys = xys.reshape(4, -1)
                    xy_c0 = np.matmul(np.linalg.inv(self.K), xys)
                    xyz = xy_c0.T[:, :3].reshape(256, 256, 3)

                    #xyz = self.get_point_cloud_from_z(depth, self.camera_matrix, scale=1)

                    xyz_obj_masked = xyz[obj_mask_focus]

                    xyz_obj_masked = np.matmul(
                        rotation_, xyz_obj_masked.T) + translation_.reshape(
                            3, 1)
                    xyz_obj_mid = np.mean(xyz_obj_masked, axis=1)

                    print("MIDPOINT=", xyz_obj_mid)

                    xyz_obj_mids.append(xyz_obj_mid)

                    count += 1

                    break  # got an object

                #################################

                # if self.visualize:
                #         rgb = observations["color_sensor"]
                #         semantic = observations["semantic_sensor"]
                #         depth = observations["depth_sensor"]
                #         self.display_sample(rgb, semantic, depth, mainobj=obj, visualize=False)

                #print("agent_state: position", self.agent.state.position, "rotation", self.agent.state.rotation)

        xyz_obj_mids = np.array(xyz_obj_mids)

        return xyz_obj_mids
コード例 #15
0
ファイル: evaluate.py プロジェクト: MatthewChang/video-dqn
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    return DefaultPredictor(cfg)


def degree_to_rad(deg):
    return deg / 180 * np.pi


map_resolution = 1500
success_distance = 1
MAX_STEPS = 500

NUM_ROTATIONS = 12
# angs = np.linspace(0, 2 * np.pi, num=NUM_ROTATIONS, endpoint=False)
angs = np.linspace(0, 2 * np.pi, num=NUM_ROTATIONS + 1)[1:]
rotations = [hutil.quat_from_angle_axis(a, np.array([0, 1, 0])) for a in angs]


# Quat is reference to pointing toward negative z direction
# i.e. 1,0,0,0 -> -z and rotation angle is counter clockwise around the y axis, towards negative x
def check_movement(config, env, start_ang, planner=None, rng=random):
    points = []
    for _ in range(100):
        dist = rng.uniform(0.9, 2)
        ang = rng.uniform(-degree_to_rad(7), degree_to_rad(7)) + start_ang
        translation = np.array([-math.sin(ang), 0, -math.cos(ang)]) * dist
        points.append(translation + env.pos)

    if len(points) == 0: return None
    point_index = planner.reachable_nearby(points)
    if point_index is not None:
コード例 #16
0
    assert np.linalg.norm(s2.position - s1.position - expected.delta_pos) < 1e-5
    assert (
        angle_between_quats(s2.rotation * expected.delta_rot.inverse(), s1.rotation)
        < 1e-5
    )


default_body_control_testdata = [
    ("move_backward", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.BACK)),
    ("move_forward", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.FRONT)),
    ("move_right", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.RIGHT)),
    ("move_left", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.LEFT)),
    (
        "turn_right",
        ExpectedDelta(
            delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.GRAVITY)
        ),
    ),
    (
        "turn_left",
        ExpectedDelta(
            delta_rot=quat_from_angle_axis(np.deg2rad(10.0), habitat_sim.geo.UP)
        ),
    ),
]


@pytest.mark.parametrize("action,expected", default_body_control_testdata)
def test_default_body_contorls(action, expected):
    scene_graph = habitat_sim.SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
コード例 #17
0
def main(dataset):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--config-path",
        type=str,
        default='baselines/config/{}/train_telephone/pointgoal_rgb.yaml'.format(dataset)
    )
    parser.add_argument(
        "opts",
        default=None,
        nargs=argparse.REMAINDER,
        help="Modify config options from command line",
    )
    args = parser.parse_args()

    config = get_config(args.config_path, opts=args.opts)
    config.defrost()
    config.TASK_CONFIG.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    simulator = None
    scene_obs = defaultdict(dict)
    num_obs = 0
    scene_obs_dir = 'data/scene_observations/' + dataset
    os.makedirs(scene_obs_dir, exist_ok=True)
    metadata_dir = 'data/metadata/' + dataset
    for scene in os.listdir(metadata_dir):
        scene_obs = dict()
        scene_metadata_dir = os.path.join(metadata_dir, scene)
        points, graph = load_metadata(scene_metadata_dir)
        if dataset == 'replica':
            scene_mesh_dir = os.path.join('data/scene_datasets', dataset, scene, 'habitat/mesh_semantic.ply')
        else:
            scene_mesh_dir = os.path.join('data/scene_datasets', dataset, scene, scene + '.glb')

        for node in graph.nodes():
            agent_position = graph.nodes()[node]['point']
            for angle in [0, 90, 180, 270]:
                agent_rotation = quat_to_coeffs(quat_from_angle_axis(np.deg2rad(angle), np.array([0, 1, 0]))).tolist()
                goal_radius = 0.00001
                goal = NavigationGoal(
                    position=agent_position,
                    radius=goal_radius
                )
                episode = NavigationEpisode(
                    goals=[goal],
                    episode_id=str(0),
                    scene_id=scene_mesh_dir,
                    start_position=agent_position,
                    start_rotation=agent_rotation,
                    info={'sound': 'telephone'}
                )

                episode_sim_config = merge_sim_episode_config(config.TASK_CONFIG.SIMULATOR, episode)
                if simulator is None:
                    simulator = SoundSpaces(episode_sim_config)
                simulator.reconfigure(episode_sim_config)

                obs, rotation_index = simulator.step(None)
                scene_obs[(node, rotation_index)] = obs
                num_obs += 1

        print('Total number of observations: {}'.format(num_obs))
        with open(os.path.join(scene_obs_dir, '{}.pkl'.format(scene)), 'wb') as fo:
            pickle.dump(scene_obs, fo)
    simulator.close()
    del simulator
コード例 #18
0
    def run(self):

        scene = self.sim.semantic_scene
        objects = scene.objects
        id = 0
        for obj in objects:
            if obj == None or obj.category == None or obj.category.name(
            ) not in self.include_classes:
                continue

            if self.verbose:
                print(f"Object name is: {obj.category.name()}")
            # Calculate distance to object center
            obj_center = obj.obb.to_aabb().center
            obj_center = np.expand_dims(obj_center, axis=0)
            distances = np.sqrt(np.sum((self.nav_pts - obj_center)**2, axis=1))

            # Get points with r_min < dist < r_max
            valid_pts = self.nav_pts[np.where(
                (distances > self.radius_min) * (distances < self.radius_max))]

            # Bin points based on angles [vertical_angle (10 deg/bin), horizontal_angle (10 deg/bin)]
            valid_pts_shift = valid_pts - obj_center

            dz = valid_pts_shift[:, 2]
            dx = valid_pts_shift[:, 0]
            dy = valid_pts_shift[:, 1]

            # Get yaw for binning
            valid_yaw = np.degrees(np.arctan2(dz, dx))
            nbins = 200
            bins = np.linspace(-180, 180, nbins + 1)
            bin_yaw = np.digitize(valid_yaw, bins)

            num_valid_bins = np.unique(bin_yaw).size

            spawns_per_bin = 1  #int(self.num_views / num_valid_bins) + 2
            print(f'spawns_per_bin: {spawns_per_bin}')

            if self.visualize:
                import matplotlib.cm as cm
                colors = iter(cm.rainbow(np.linspace(0, 1, nbins)))
                plt.figure(2)
                plt.clf()
                print(np.unique(bin_yaw))
                for bi in range(nbins):
                    cur_bi = np.where(bin_yaw == (bi + 1))
                    points = valid_pts[cur_bi]
                    x_sample = points[:, 0]
                    z_sample = points[:, 2]
                    plt.plot(z_sample, x_sample, 'o', color=next(colors))
                plt.plot(obj_center[:, 2],
                         obj_center[:, 0],
                         'x',
                         color='black')
                plt.show()

            action = "do_nothing"
            episodes = []
            valid_pts_selected = []
            cnt = 0
            texts_images = []
            texts_depths = []
            rots_cam0 = []
            camXs_T_camX0_4x4 = []
            camX0_T_camXs_4x4 = []
            origin_T_camXs = []
            origin_T_camXs_t = []
            rots_cam0.append([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
            rots_origin = []
            for b in range(nbins):

                # get all angle indices in the current bin range
                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1

                if inds_bin_cur[0].size == 0:
                    print("Continuing")
                    break

                for s in range(spawns_per_bin):
                    if b == 0:
                        np.random.seed(1)
                        s_ind = np.random.choice(inds_bin_cur[0])
                        pos_s = valid_pts[s_ind]
                        pos_s_prev = pos_s
                    else:
                        # get closest valid position for smooth trajectory
                        pos_s_cur_all = valid_pts[inds_bin_cur[0]]
                        distances_to_prev = np.sqrt(
                            np.sum((pos_s_cur_all - pos_s_prev)**2, axis=1))
                        argmin_s_ind_cur = np.argmin(distances_to_prev)
                        s_ind = inds_bin_cur[0][argmin_s_ind_cur]
                        pos_s = valid_pts[s_ind]
                        pos_s_prev = pos_s

                    valid_pts_selected.append(pos_s)
                    agent_state = habitat_sim.AgentState()
                    agent_state.position = pos_s + np.array([0, 1.5, 0])

                    # YAW calculation - rotate to object
                    agent_to_obj = np.squeeze(
                        obj_center) - agent_state.position
                    agent_local_forward = np.array([0, 0, -1.0])  # y, z, x
                    flat_to_obj = np.array(
                        [agent_to_obj[0], 0.0, agent_to_obj[2]])
                    flat_dist_to_obj = np.linalg.norm(flat_to_obj)
                    flat_to_obj /= flat_dist_to_obj

                    det = (flat_to_obj[0] * agent_local_forward[2] -
                           agent_local_forward[0] * flat_to_obj[2])
                    turn_angle = math.atan2(
                        det, np.dot(agent_local_forward, flat_to_obj))
                    quat_yaw = quat_from_angle_axis(turn_angle,
                                                    np.array([0, 1.0, 0]))

                    # Set agent yaw rotation to look at object
                    agent_state.rotation = quat_yaw

                    # change sensor state to default
                    # need to move the sensors too
                    if self.verbose:
                        print(self.agent.state.sensor_states)
                    for sensor in self.agent.state.sensor_states:
                        # st()
                        self.agent.state.sensor_states[
                            sensor].rotation = agent_state.rotation
                        self.agent.state.sensor_states[
                            sensor].position = agent_state.position  # + np.array([0, 1.5, 0]) # ADDED IN UP TOP

                    # Calculate Pitch from head to object
                    turn_pitch = np.degrees(
                        math.atan2(agent_to_obj[1], flat_dist_to_obj))
                    num_turns = np.abs(
                        np.floor(turn_pitch / self.rot_interval)
                    ).astype(
                        int
                    )  # compute number of times to move head up or down by rot_interval
                    if self.verbose:
                        print("MOVING HEAD ", num_turns, " TIMES")
                    movement = "look_up" if turn_pitch > 0 else "look_down"

                    # initiate agent
                    self.agent.set_state(agent_state)
                    self.sim.step(action)

                    # Rotate "head" of agent up or down based on calculated pitch angle to object to center it in view
                    if num_turns == 0:
                        pass
                    else:
                        for turn in range(num_turns):
                            # st()
                            self.sim.step(movement)
                            if self.verbose:
                                for sensor in self.agent.state.sensor_states:
                                    print(self.agent.state.
                                          sensor_states[sensor].rotation)

                    # get observations after centiering
                    observations = self.sim.step(action)

                    # Assuming all sensors have same rotation and position
                    observations["rotations"] = self.agent.state.sensor_states[
                        'color_sensor'].rotation  #agent_state.rotation
                    observations["positions"] = self.agent.state.sensor_states[
                        'color_sensor'].position

                    if self.is_valid_datapoint(observations, obj):
                        if self.verbose:
                            print("episode is valid......")
                        episodes.append(observations)
                        if self.visualize:
                            rgb = observations["color_sensor"]
                            semantic = observations["semantic_sensor"]
                            depth = observations["depth_sensor"]
                            self.display_sample(rgb,
                                                semantic,
                                                depth,
                                                mainobj=obj,
                                                visualize=False)

                    if cnt > 0:

                        origin_T_camX = quaternion.as_rotation_matrix(
                            episodes[cnt]["rotations"])
                        camX0_T_camX = np.matmul(camX0_T_origin, origin_T_camX)
                        # camX0_T_camX = np.matmul(camX0_T_origin, origin_T_camX)

                        origin_T_camXs.append(origin_T_camX)
                        origin_T_camXs_t.append(episodes[cnt]["positions"])

                        origin_T_camX_4x4 = np.eye(4)
                        origin_T_camX_4x4[0:3, 0:3] = origin_T_camX
                        origin_T_camX_4x4[:3, 3] = episodes[cnt]["positions"]
                        camX0_T_camX_4x4 = np.matmul(camX0_T_origin_4x4,
                                                     origin_T_camX_4x4)
                        camX_T_camX0_4x4 = self.safe_inverse_single(
                            camX0_T_camX_4x4)

                        camXs_T_camX0_4x4.append(camX_T_camX0_4x4)
                        camX0_T_camXs_4x4.append(camX0_T_camX_4x4)

                        # # trans_camX0_T_camX = episodes[cnt]["positions"] - episodes[0]["positions"]

                        # r_camX_T_camX0, t_camX_T_camX0, = self.split_rt_single(camX_T_camX0_4x4)
                        # r_camX_T_camX0_quat = quaternion.as_float_array(quaternion.from_rotation_matrix(r_camX_T_camX0))

                        # # print(t_camX_T_camX0)
                        # # print(trans_camX0_T_camX)

                        # # full_trans1 = np.hstack((cnt, trans, quat_diff1))
                        # full_trans_camX_T_camX0 = np.hstack((cnt, t_camX_T_camX0, r_camX_T_camX0_quat))

                        # # rots1.append(list(full_trans1))
                        # rots_cam0.append(list(full_trans_camX_T_camX0))
                        # # rots_origin.append(list(full_trans_origin))

                        camX0_T_camX_4x4 = self.safe_inverse_single(
                            camX_T_camX0_4x4)
                        origin_T_camX_4x4 = np.matmul(origin_T_camX0_4x4,
                                                      camX0_T_camX_4x4)
                        r_origin_T_camX, t_origin_T_camX, = self.split_rt_single(
                            origin_T_camX_4x4)

                        if self.verbose:
                            print(r_origin_T_camX)
                            print(origin_T_camX)

                    else:
                        origin_T_camX0_quat = episodes[0]["rotations"]
                        origin_T_camX0 = quaternion.as_rotation_matrix(
                            origin_T_camX0_quat)
                        camX0_T_origin = np.linalg.inv(origin_T_camX0)
                        # camX0_T_origin = self.safe_inverse_single(origin_T_camX0)

                        origin_T_camXs.append(origin_T_camX0)
                        origin_T_camXs_t.append(episodes[0]["positions"])

                        origin_T_camX0_4x4 = np.eye(4)
                        origin_T_camX0_4x4[0:3, 0:3] = origin_T_camX0
                        origin_T_camX0_4x4[:3, 3] = episodes[0]["positions"]
                        camX0_T_origin_4x4 = self.safe_inverse_single(
                            origin_T_camX0_4x4)

                        camXs_T_camX0_4x4.append(np.eye(4))

                        # r0 = quaternion.as_rotation_vector(episodes[0]["rotations"])
                        # quat_diff_origin = quaternion.as_float_array(episodes[0]["rotations"]) #quaternion.as_float_array(self.quaternion_from_two_vectors(self.origin_rot_vector,r0))
                        # full_trans_origin = np.hstack((cnt, episodes[0]["positions"], quat_diff_origin))
                        # rots_origin.append(list(full_trans_origin))
                    cnt += 1
                    id += 1

            print("Generated ", len(episodes), " views.")
            if len(episodes) < self.min_orbslam_views:
                print("NOT ENOUGH VIEWS FOR ORBSLAM")
                continue

            if self.save_imgs:
                with open(self.orbslam_path + "/" + 'rgb.txt', 'w') as f:
                    for item in texts_images:
                        f.write("%s\n" % item)

                with open(self.orbslam_path + "/" + 'depth.txt', 'w') as f:
                    for item in texts_depths:
                        f.write("%s\n" % item)

                # with open(self.orbslam_path + "/" + 'CamTraj1.txt', 'w') as f:
                #     for item in rots1:
                #         f.write("%s\n" % item)

                # with open(self.orbslam_path + "/" + 'CamTraj.txt', 'w') as f:
                #     for item in rots_cam0:
                #         f.write("%s\n" % item)

            ############ ORBSLAM ################
            if self.do_orbslam:
                camXs_T_camX0_4x4 = np.array(camXs_T_camX0_4x4)
                origin_T_camXs = np.array(origin_T_camXs)
                origin_T_camXs_t = np.array(origin_T_camXs_t)
                camX0_T_camXs_4x4 = np.array(camX0_T_camXs_4x4)

                orbslam_dir = "/home/nel/ORB_SLAM2"
                executable = os.path.join(orbslam_dir,
                                          "Examples/RGB-D/rgbd_tum")
                vocabulary_file = os.path.join(orbslam_dir,
                                               "Vocabulary/ORBvoc.txt")
                settings_file = os.path.join(orbslam_dir,
                                             "Examples/RGB-D/REPLICA.yaml")
                data_dir = os.path.join(orbslam_dir, "Examples/RGB-D/replica")
                associations_file = os.path.join(
                    orbslam_dir, "Examples/RGB-D/associations/replica.txt")
                associations_executable = os.path.join(
                    orbslam_dir, "Examples/RGB-D/associations/associate.py")
                rgb_file = os.path.join(orbslam_dir,
                                        "Examples/RGB-D/replica/rgb.txt")
                depth_file = os.path.join(orbslam_dir,
                                          "Examples/RGB-D/replica/depth.txt")

                # associate rgb and depth files
                print("ASSOCIATING FILES")
                associate(rgb_file, depth_file, associations_file)

                # run ORBSLAM2
                print("RUNNING ORBSLAM2...")
                try:
                    output = subprocess.run([
                        executable, vocabulary_file, settings_file, data_dir,
                        associations_file
                    ],
                                            capture_output=True,
                                            text=True,
                                            check=True,
                                            timeout=800)
                except:
                    print("PROCESS TIMED OUT")
                    continue

                # load camera rotation and translation estimated by orbslam - these are wrt first frame
                camXs_T_camX0_orb_output = np.loadtxt(self.ORBSLAM_Cam_Traj)
                print(output)

                assert len(episodes) == camXs_T_camX0_orb_output.shape[
                    0], f"{camXs_T_camX0_orb_output.shape[0]}, {len(episodes)}"

                camXs_T_camX0_quant = []
                camXs_T_camX0 = []
                for i in range(camXs_T_camX0_orb_output.shape[0]):
                    cur = camXs_T_camX0_orb_output[i]
                    camX_T_camX0_quant = np.quaternion(cur[7], cur[4], cur[5],
                                                       cur[6])
                    camX_T_camX0 = quaternion.as_rotation_matrix(
                        camX_T_camX0_quant
                    )  # Need to negative rotation because axes are flipped
                    camXs_T_camX0_quant.append(camX_T_camX0_quant)
                    camXs_T_camX0.append(camX_T_camX0)
                camXs_T_camX0 = np.array(camXs_T_camX0)
                camXs_T_camX0_quant = np.array(camXs_T_camX0_quant)

                t = []
                for i in range(camXs_T_camX0_orb_output.shape[0]):
                    cur = camXs_T_camX0_orb_output[i]
                    t_cur = np.array([-cur[1], -cur[2], -cur[3]
                                      ])  # i think x shouldn't be inverted
                    t.append(t_cur)
                t = np.array(t)

                camXs_T_camX0_4x4_orb = []
                for i in range(camXs_T_camX0_orb_output.shape[0]):
                    # assert len(episodes) == camXs_T_camX0_orb_output.shape[0], f"{camXs_T_camX0_orb_output.shape[0]}, {len(episodes)}"
                    # get estimated 4x4
                    camX_T_camX0_4x4_orb = np.eye(4)
                    camX_T_camX0_4x4_orb[0:3, 0:3] = camXs_T_camX0[i]
                    camX_T_camX0_4x4_orb[:3, 3] = t[i]

                    # invert
                    camX0_T_camX_4x4_orb = self.safe_inverse_single(
                        camX_T_camX0_4x4_orb)

                    # convert to origin coordinates
                    origin_T_camX_4x4 = np.matmul(origin_T_camX0_4x4,
                                                  camX0_T_camX_4x4_orb)
                    r_origin_T_camX_orb, t_origin_T_camX_orb = self.split_rt_single(
                        origin_T_camX_4x4)
                    r_origin_T_camX_orb_quat = quaternion.from_rotation_matrix(
                        r_origin_T_camX_orb)

                    #save
                    episodes[i]["positions_orb"] = t_origin_T_camX_orb
                    episodes[i]["rotations_orb"] = r_origin_T_camX_orb_quat
                    camXs_T_camX0_4x4_orb.append(camX_T_camX0_4x4_orb)
                camXs_T_camX0_4x4_orb = np.array(camXs_T_camX0_4x4_orb)

                ## PLOTTTING #########

                # x_orbslam = []
                # y_orbslam = []
                # z_orbslam = []
                # for i in range(camXs_T_camX0_orb_output.shape[0]):
                #     camX_T_camX0_4x4_orb_r, camX_T_camX0_4x4_orb_t = self.split_rt_single(camXs_T_camX0_4x4_orb[i])
                #     x_orbslam.append(camX_T_camX0_4x4_orb_t[0])
                #     y_orbslam.append(camX_T_camX0_4x4_orb_t[1])
                #     z_orbslam.append(camX_T_camX0_4x4_orb_t[2])
                # x_orbslam = np.array(x_orbslam)
                # y_orbslam = np.array(y_orbslam)
                # z_orbslam = np.array(z_orbslam)

                # x_gt = []
                # y_gt = []
                # z_gt = []
                # for i in range(camXs_T_camX0_orb_output.shape[0]):
                #     camX_T_camX0_4x4_r, camX_T_camX0_4x4_t = self.split_rt_single(camXs_T_camX0_4x4[i])
                #     x_gt.append(camX_T_camX0_4x4_t[0])
                #     y_gt.append(camX_T_camX0_4x4_t[1])
                #     z_gt.append(camX_T_camX0_4x4_t[2])
                # x_gt = np.array(x_gt)
                # y_gt = np.array(y_gt)
                # z_gt = np.array(z_gt)

                print("PLOTTING")

                x_orbslam = []
                y_orbslam = []
                z_orbslam = []
                for i in range(camXs_T_camX0_orb_output.shape[0]):
                    x_orbslam.append(episodes[i]["positions_orb"][0])
                    y_orbslam.append(episodes[i]["positions_orb"][1])
                    z_orbslam.append(episodes[i]["positions_orb"][2])
                x_orbslam = np.array(x_orbslam)
                y_orbslam = np.array(y_orbslam)
                z_orbslam = np.array(z_orbslam)

                x_gt = []
                y_gt = []
                z_gt = []
                for i in range(camXs_T_camX0_orb_output.shape[0]):
                    x_gt.append(episodes[i]["positions"][0])
                    y_gt.append(episodes[i]["positions"][1])
                    z_gt.append(episodes[i]["positions"][2])
                x_gt = np.array(x_gt)
                y_gt = np.array(y_gt)
                z_gt = np.array(z_gt)

                from mpl_toolkits.mplot3d import Axes3D
                plt.figure()
                ax = plt.axes(projection='3d')
                ax.plot3D(x_orbslam, y_orbslam, z_orbslam, 'green')
                ax.plot3D(x_gt, y_gt, z_gt, 'blue')
                ax.set_xlabel('x')
                ax.set_ylabel('y')
                ax.set_zlabel('z')
                # plt.plot(-np.array(x_orbslam), np.array(y_orbslam), label='ORB-SLAM', color='green', linestyle='dashed')
                # plt.plot(x_gt, y_gt, label='GT', color='blue', linestyle='solid')
                # plt.legend()
                plt_name = 'maps/' + str(
                    self.ep_idx) + '_' + str(id) + '_' + 'map.png'
                plt.savefig(plt_name)

            if not self.do_orbslam:
                if len(episodes) >= self.num_views:
                    print(f'num episodes: {len(episodes)}')
                    data_folder = obj.category.name() + '_' + obj.id
                    data_path = os.path.join(self.basepath, data_folder)
                    print("Saving to ", data_path)
                    os.mkdir(data_path)
                    np.random.seed(1)
                    flat_obs = np.random.choice(episodes,
                                                self.num_views,
                                                replace=False)
                    viewnum = 0
                    for obs in flat_obs:
                        self.save_datapoint(self.agent, obs, data_path,
                                            viewnum, obj.id, True)
                        viewnum += 1

                else:
                    print(f"Not enough episodes: f{len(episodes)}")

            # the len of episodes is sometimes greater than camXs_T_camX0_orb_output.shape[0]
            # so we need to sample from episode number less than camXs_T_camX0_orb_output.shape[0]
            else:
                num_valid_episodes = camXs_T_camX0_orb_output.shape[0]
                if num_valid_episodes >= self.num_views:
                    print(f'num episodes: {num_valid_episodes}')
                    data_folder = obj.category.name() + '_' + obj.id
                    data_path = os.path.join(self.basepath, data_folder)
                    print("Saving to ", data_path)
                    os.mkdir(data_path)
                    np.random.seed(1)
                    flat_obs = np.random.choice(episodes[:num_valid_episodes],
                                                self.num_views,
                                                replace=False)
                    viewnum = 0
                    for obs in flat_obs:
                        self.save_datapoint(self.agent, obs, data_path,
                                            viewnum, obj.id, True)
                        viewnum += 1

                else:
                    print(f"Not enough episodes: f{len(episodes)}")

            if self.visualize:
                valid_pts_selected = np.vstack(valid_pts_selected)
                self.plot_navigable_points(valid_pts_selected)
コード例 #19
0
def test_kinematics():
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    with habitat_sim.Simulator(hab_cfg) as sim:
        # get the rigid object attributes manager, which manages
        # templates used to create objects
        obj_template_mgr = sim.get_object_template_manager()
        obj_template_mgr.load_configs("data/objects/example_objects/", True)
        assert obj_template_mgr.get_num_templates() > 0
        # get the rigid object manager, which provides direct
        # access to objects
        rigid_obj_mgr = sim.get_rigid_object_manager()

        # test adding an object to the world
        # get handle for object 0, used to test
        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])
        assert rigid_obj_mgr.get_num_objects() > 0
        assert (len(rigid_obj_mgr.get_object_handles()) ==
                rigid_obj_mgr.get_num_objects())

        # test setting the motion type
        cheezit_box.motion_type = habitat_sim.physics.MotionType.STATIC
        assert cheezit_box.motion_type == habitat_sim.physics.MotionType.STATIC
        cheezit_box.motion_type = habitat_sim.physics.MotionType.KINEMATIC
        assert cheezit_box.motion_type == habitat_sim.physics.MotionType.KINEMATIC

        # test kinematics
        I = np.identity(4)

        # test get and set translation
        cheezit_box.translation = [0.0, 1.0, 0.0]
        assert np.allclose(cheezit_box.translation, np.array([0.0, 1.0, 0.0]))

        # test object SceneNode
        assert np.allclose(cheezit_box.translation,
                           cheezit_box.root_scene_node.translation)

        # test get and set transform
        cheezit_box.transformation = I
        assert np.allclose(cheezit_box.transformation, I)

        # test get and set rotation
        Q = quat_from_angle_axis(np.pi, np.array([0.0, 1.0, 0.0]))
        expected = np.eye(4)
        expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
        cheezit_box.rotation = quat_to_magnum(Q)
        assert np.allclose(cheezit_box.transformation, expected)
        assert np.allclose(quat_from_magnum(cheezit_box.rotation), Q)

        # test object removal
        rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)

        assert rigid_obj_mgr.get_num_objects() == 0

        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])

        prev_time = 0.0
        for _ in range(2):
            # do some kinematics here (todo: translating or rotating instead of absolute)
            cheezit_box.translation = np.random.rand(3)
            T = cheezit_box.transformation  # noqa : F841

            # test getting observation
            sim.step(random.choice(list(
                hab_cfg.agents[0].action_space.keys())))

            # check that time is increasing in the world
            assert sim.get_world_time() > prev_time
            prev_time = sim.get_world_time()

        rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)

        # test attaching/dettaching an Agent to/from physics simulation
        agent_node = sim.agents[0].scene_node
        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_agent = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0], agent_node)

        cheezit_agent.translation = np.random.rand(3)
        assert np.allclose(agent_node.translation, cheezit_agent.translation)
        rigid_obj_mgr.remove_object_by_id(
            cheezit_agent.object_id,
            delete_object_node=False)  # don't delete the agent's node
        assert agent_node.translation

        # test get/set RigidState
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])
        targetRigidState = habitat_sim.bindings.RigidState(
            mn.Quaternion(), np.array([1.0, 2.0, 3.0]))
        cheezit_box.rigid_state = targetRigidState
        objectRigidState = cheezit_box.rigid_state
        assert np.allclose(objectRigidState.translation,
                           targetRigidState.translation)
        assert objectRigidState.rotation == targetRigidState.rotation
コード例 #20
0
    def run(self):
        object_centers, cat_ids_objects = self.get_midpoint_obj_conf()

        # check if any two 3d centers are too close to each other
        print("OBJ CENTERS BEFORE=", object_centers.shape[0])
        object_centers_unique = []
        cat_ids_objects_unique = []
        object_centers_unique.append(object_centers[-1, :])
        cat_ids_objects_unique.append(cat_ids_objects[-1])
        for i in range(object_centers.shape[0] - 1):
            dists = np.sqrt(
                np.sum((object_centers[i, :] - object_centers[i + 1:])**2,
                       axis=1))
            far = dists > self.closeness_threshold
            if np.all(far):
                object_centers_unique.append(object_centers[i, :])
                cat_ids_objects_unique.append(cat_ids_objects[i])
        object_centers_unique = np.array(object_centers_unique)
        print("OBJ CENTERS AFTER=", object_centers_unique.shape[0])

        # Print all categories in the house that are relevant
        print("OBJ CAT IDs UNIQUE=", cat_ids_objects_unique)
        objs_in_house = []
        scene = self.sim.semantic_scene
        objects = scene.objects
        for obj in objects:
            if obj == None or obj.category == None or obj.category.name(
            ) not in self.include_classes:
                continue
            objs_in_house.append(obj.category.name())
        print("ALL CATEGORIES IN HOUSE=", objs_in_house)

        if self.visualize_obj:
            plt.figure(1)
            x_sample = self.nav_pts[:, 0]
            z_sample = self.nav_pts[:, 2]
            plt.plot(z_sample, x_sample, 'o', color='red')
            plt.plot(object_centers_unique[:, 2],
                     object_centers_unique[:, 0],
                     'x',
                     color='green')

            plt.figure(2)
            x_sample = self.nav_pts[:, 0]
            z_sample = self.nav_pts[:, 2]
            plt.plot(z_sample, x_sample, 'o', color='red')
            plt.plot(object_centers[:, 2],
                     object_centers[:, 0],
                     'x',
                     color='green')
            plt.show()

        for obj_idx in range(object_centers_unique.shape[0]):

            # Calculate distance to object center
            obj_center = object_centers_unique[obj_idx, :]

            #print(obj_center)
            #obj_center = np.expand_dims(object_centers_unique, axis=0)
            obj_center = np.reshape(obj_center, (1, 3))

            print("OBJECT CENTER: ", obj_center)
            #print(self.nav_pts.shape)
            #print(obj_center)
            distances = np.sqrt(np.sum((self.nav_pts - obj_center)**2, axis=1))

            # Get points with r_min < dist < r_max
            valid_pts = self.nav_pts[np.where(
                (distances > self.radius_min) * (distances < self.radius_max))]
            # if not valid_pts:
            # continue

            # plot valid points that we happen to select
            # self.plot_navigable_points(valid_pts)

            # Bin points based on angles [vertical_angle (10 deg/bin), horizontal_angle (10 deg/bin)]
            valid_pts_shift = valid_pts - obj_center

            dz = valid_pts_shift[:, 2]
            dx = valid_pts_shift[:, 0]
            dy = valid_pts_shift[:, 1]

            # Get yaw for binning
            valid_yaw = np.degrees(np.arctan2(dz, dx))

            # pitch calculation
            dxdz_norm = np.sqrt((dx * dx) + (dz * dz))
            valid_pitch = np.degrees(np.arctan2(dy, dxdz_norm))

            # binning yaw around object
            nbins = 18
            bins = np.linspace(-180, 180, nbins + 1)
            bin_yaw = np.digitize(valid_yaw, bins)

            num_valid_bins = np.unique(bin_yaw).size

            spawns_per_bin = int(self.num_views / num_valid_bins) + 2
            print(f'spawns_per_bin: {spawns_per_bin}')

            if self.visualize:
                print("PLOTTING")
                import matplotlib.cm as cm
                colors = iter(cm.rainbow(np.linspace(0, 1, nbins)))
                plt.figure(2)
                plt.clf()
                print(np.unique(bin_yaw))
                for bi in range(nbins):
                    cur_bi = np.where(bin_yaw == (bi + 1))
                    points = valid_pts[cur_bi]
                    x_sample = points[:, 0]
                    z_sample = points[:, 2]
                    plt.plot(z_sample, x_sample, 'o', color=next(colors))
                plt.plot(obj_center[:, 2],
                         obj_center[:, 0],
                         'x',
                         color='black')
                plt.show()

            action = "do_nothing"
            episodes = []
            valid_pts_selected = []
            cnt = 0
            for b in range(nbins):

                # get all angle indices in the current bin range
                # st()
                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1
                if inds_bin_cur[0].size == 0:
                    print("NO BINS")
                    continue

                for s in range(spawns_per_bin):
                    # st()
                    s_ind = np.random.choice(inds_bin_cur[0])
                    #s_ind = inds_bin_cur[0][0]
                    pos_s = valid_pts[s_ind]
                    valid_pts_selected.append(pos_s)
                    agent_state = habitat_sim.AgentState()
                    agent_state.position = pos_s + np.array([0, 1.5, 0])

                    # YAW calculation - rotate to object
                    agent_to_obj = np.squeeze(
                        obj_center) - agent_state.position
                    agent_local_forward = np.array([0, 0, -1.0])  # y, z, x
                    flat_to_obj = np.array(
                        [agent_to_obj[0], 0.0, agent_to_obj[2]])
                    flat_dist_to_obj = np.linalg.norm(flat_to_obj)
                    flat_to_obj /= flat_dist_to_obj

                    det = (flat_to_obj[0] * agent_local_forward[2] -
                           agent_local_forward[0] * flat_to_obj[2])
                    turn_angle = math.atan2(
                        det, np.dot(agent_local_forward, flat_to_obj))
                    quat_yaw = quat_from_angle_axis(turn_angle,
                                                    np.array([0, 1.0, 0]))

                    # Set agent yaw rotation to look at object
                    agent_state.rotation = quat_yaw

                    # change sensor state to default
                    # need to move the sensors too
                    for sensor in self.agent.state.sensor_states:
                        # st()
                        self.agent.state.sensor_states[
                            sensor].rotation = agent_state.rotation
                        self.agent.state.sensor_states[
                            sensor].position = agent_state.position  # + np.array([0, 1.5, 0]) # ADDED IN UP TOP
                        # print("PRINT", self.agent.state.sensor_states[sensor].rotation)

                    # Calculate Pitch from head to object
                    turn_pitch = np.degrees(
                        math.atan2(agent_to_obj[1], flat_dist_to_obj))
                    num_turns = np.abs(
                        np.floor(turn_pitch / self.rot_interval)
                    ).astype(
                        int
                    )  # compute number of times to move head up or down by rot_interval
                    #print("MOVING HEAD ", num_turns, " TIMES")
                    movement = "look_up" if turn_pitch > 0 else "look_down"

                    # initiate agent
                    self.agent.set_state(agent_state)
                    self.sim.step(action)

                    # Rotate "head" of agent up or down based on calculated pitch angle to object to center it in view
                    if num_turns == 0:
                        pass
                    else:
                        for turn in range(num_turns):
                            # st()
                            self.sim.step(movement)
                            if self.verbose:
                                for sensor in self.agent.state.sensor_states:
                                    print(self.agent.state.
                                          sensor_states[sensor].rotation)

                    # get observations after centiering
                    observations = self.sim.step(action)

                    # Assuming all sensors have same rotation and position
                    observations["rotations"] = self.agent.state.sensor_states[
                        'color_sensor'].rotation  #agent_state.rotation
                    observations["positions"] = self.agent.state.sensor_states[
                        'color_sensor'].position

                    if self.verbose:
                        print("episode is valid......")
                    episodes.append(observations)

                    if self.visualize:
                        im = observations["color_sensor"]
                        im = Image.fromarray(im, mode="RGBA")
                        im = cv2.cvtColor(np.asarray(im), cv2.COLOR_RGB2BGR)
                        plt.imshow(im)
                        plt.show()

                    cnt += 1

            print(f'num episodes: {len(episodes)}')
            if len(episodes) >= self.num_views:
                print(f'num episodes: {len(episodes)}')
                data_folder = 'obj' + str(
                    obj_idx)  #obj.category.name() + '_' + obj.id
                data_path = os.path.join(self.basepath, data_folder)
                print("Saving to ", data_path)
                os.mkdir(data_path)
                flat_obs = np.random.choice(episodes,
                                            self.num_views,
                                            replace=False)
                viewnum = 0
                for obs in flat_obs:
                    self.save_datapoint(self.agent, obs, data_path, viewnum,
                                        obj_idx, True)
                    viewnum += 1
            else:
                print(f"Not enough episodes: f{len(episodes)}")

            if self.visualize:
                valid_pts_selected = np.vstack(valid_pts_selected)
                self.plot_navigable_points(valid_pts_selected)
コード例 #21
0
def _check_state_expected(s1, s2, expected: ExpectedDelta):
    assert np.linalg.norm(s2.position - s1.position -
                          expected.delta_pos) < 1e-5
    assert (angle_between_quats(s2.rotation * expected.delta_rot.inverse(),
                                s1.rotation) < 1e-5)


default_body_control_testdata = [
    ("move_backward", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.BACK)),
    ("move_forward", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.FRONT)),
    ("move_right", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.RIGHT)),
    ("move_left", ExpectedDelta(delta_pos=0.25 * habitat_sim.geo.LEFT)),
    (
        "turn_right",
        ExpectedDelta(delta_rot=quat_from_angle_axis(np.deg2rad(10.0),
                                                     habitat_sim.geo.GRAVITY)),
    ),
    (
        "turn_left",
        ExpectedDelta(delta_rot=quat_from_angle_axis(np.deg2rad(10.0),
                                                     habitat_sim.geo.UP)),
    ),
]


@pytest.mark.parametrize("action,expected", default_body_control_testdata)
def test_default_body_contorls(action, expected):
    scene_graph = habitat_sim.SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space = dict(
        move_backward=habitat_sim.ActionSpec(
コード例 #22
0
    def run(self):

        scene = self.sim.semantic_scene
        objects = scene.objects
        for obj in objects:
            if obj == None or obj.category == None or obj.category.name(
            ) not in self.include_classes:
                continue
            # st()
            if self.verbose:
                print(f"Object name is: {obj.category.name()}")
            # Calculate distance to object center
            obj_center = obj.obb.to_aabb().center
            #print(obj_center)
            obj_center = np.expand_dims(obj_center, axis=0)
            #print(obj_center)
            distances = np.sqrt(np.sum((self.nav_pts - obj_center)**2, axis=1))

            # Get points with r_min < dist < r_max
            valid_pts = self.nav_pts[np.where(
                (distances > self.radius_min) * (distances < self.radius_max))]
            # if not valid_pts:
            # continue

            # plot valid points that we happen to select
            # self.plot_navigable_points(valid_pts)

            # Bin points based on angles [vertical_angle (10 deg/bin), horizontal_angle (10 deg/bin)]
            valid_pts_shift = valid_pts - obj_center

            valid_yaw = np.degrees(
                np.arctan2(valid_pts_shift[:, 2], valid_pts_shift[:, 0]))

            nbins = 18
            bins = np.linspace(-180, 180, nbins + 1)
            bin_yaw = np.digitize(valid_yaw, bins)

            num_valid_bins = np.unique(bin_yaw).size

            spawns_per_bin = int(self.num_views / num_valid_bins) + 2
            print(f'spawns_per_bin: {spawns_per_bin}')

            if self.visualize:
                import matplotlib.cm as cm
                colors = iter(cm.rainbow(np.linspace(0, 1, nbins)))
                plt.figure(2)
                plt.clf()
                print(np.unique(bin_yaw))
                for bi in range(nbins):
                    cur_bi = np.where(bin_yaw == (bi + 1))
                    points = valid_pts[cur_bi]
                    x_sample = points[:, 0]
                    z_sample = points[:, 2]
                    plt.plot(z_sample, x_sample, 'o', color=next(colors))
                plt.plot(obj_center[:, 2],
                         obj_center[:, 0],
                         'x',
                         color='black')
                plt.show()

            # 1. angle calculation: https://math.stackexchange.com/questions/2521886/how-to-find-angle-between-2-points-in-3d-space
            # 2. angle quantization (round each angle to the nearst bin, 163 --> 160, 47 --> 50, etc.) - STILL NEED
            '''
            # 3. loop around the sphere and get views
            for vert_angle in vertical_angle:
                for hori_angle in horizontal_angle:
                    # for i in range(num_view_per_angle)
                    valid_pts_bin[vert_angle, hori_angle][np.random.choice()]
                    # spawn agent
                    # rotate the agent with our calculated angle (so that the agent looks at the object)
                    # if is_valid_datapoint: save it to views
                    # self.display_sample(rgb, semantic, depth)
            '''

            action = "do_nothing"
            # flat_views = [] #flatview corresponds to moveup=100
            # any_views = []
            episodes = []
            # spawns_per_bin = 2
            valid_pts_selected = []
            for b in range(nbins):

                # get all angle indices in the current bin range
                # st()
                inds_bin_cur = np.where(
                    bin_yaw == (b + 1))  # bins start 1 so need +1
                if inds_bin_cur[0].size == 0:
                    continue

                for s in range(spawns_per_bin):
                    # st()
                    s_ind = np.random.choice(inds_bin_cur[0])
                    pos_s = valid_pts[s_ind]
                    valid_pts_selected.append(pos_s)
                    agent_state = habitat_sim.AgentState()
                    agent_state.position = pos_s

                    agent_to_obj = np.squeeze(
                        obj_center) - agent_state.position
                    agent_local_forward = np.array([0, 0, -1.0])  # y, z, x
                    flat_to_obj = np.array(
                        [agent_to_obj[0], 0.0, agent_to_obj[2]])
                    flat_dist_to_obj = np.linalg.norm(flat_to_obj)
                    flat_to_obj /= flat_dist_to_obj

                    det = (flat_to_obj[0] * agent_local_forward[2] -
                           agent_local_forward[0] * flat_to_obj[2])
                    turn_angle = math.atan2(
                        det, np.dot(agent_local_forward, flat_to_obj))
                    quat_yaw = quat_from_angle_axis(turn_angle,
                                                    np.array([0, 1.0, 0]))
                    agent_state.rotation = quat_yaw

                    # change sensor state to default
                    # need to move the sensors too
                    for sensor in agent_state.sensor_states:
                        agent_state.sensor_states[
                            sensor].rotation = agent_state.rotation
                        agent_state.sensor_states[
                            sensor].position = agent_state.position + np.array[
                                0, 1.5, 0]

                    # agent_state.rotation = rot

                    self.agent.set_state(agent_state)
                    observations = self.sim.step(action)

                    observations["rotations"] = agent_state.rotation
                    observations["positions"] = agent_state.position

                    if self.is_valid_datapoint(observations, obj):
                        if self.verbose:
                            print("episode is valid......")
                        episodes.append(observations)
                        if self.visualize:
                            rgb = observations["color_sensor"]
                            semantic = observations["semantic_sensor"]
                            depth = observations["depth_sensor"]
                            self.display_sample(rgb,
                                                semantic,
                                                depth,
                                                mainobj=obj,
                                                visualize=False)
                    # if self.visualize:
                    #         rgb = observations["color_sensor"]
                    #         semantic = observations["semantic_sensor"]
                    #         depth = observations["depth_sensor"]
                    #         self.display_sample(rgb, semantic, depth, mainobj=obj, visualize=False)

                    #print("agent_state: position", self.agent.state.position, "rotation", self.agent.state.rotation)

            if len(episodes) >= self.num_views:
                print(f'num episodes: {len(episodes)}')
                data_folder = obj.category.name() + '_' + obj.id
                data_path = os.path.join(self.basepath, data_folder)
                print("Saving to ", data_path)
                os.mkdir(data_path)
                flat_obs = np.random.choice(episodes,
                                            self.num_views,
                                            replace=False)
                viewnum = 0
                for obs in flat_obs:
                    self.save_datapoint(self.agent, obs, data_path, viewnum,
                                        obj.id, True)
                    viewnum += 1
            else:
                print(f"Not enough episodes: f{len(episodes)}")

            if self.visualize:
                valid_pts_selected = np.vstack(valid_pts_selected)
                self.plot_navigable_points(valid_pts_selected)