コード例 #1
0
    def set_state(self, state: AgentState, reset_sensors: bool = True):
        r"""Sets the agents state

        :param state: The state to set the agent to
        :param reset_sensors: Whether or not to reset the sensors to their
            default intrinsic/extrinsic parameters before setting their
            extrinsic state
        """
        habitat_sim.errors.assert_obj_valid(self.body)

        if isinstance(state.rotation, list):
            state.rotation = quat_from_coeffs(state.rotation)

        self.body.object.reset_transformation()

        self.body.object.translate(state.position)
        self.body.object.rotation = quat_to_magnum(state.rotation)

        if reset_sensors:
            for _, v in self._sensors.items():
                v.set_transformation_from_spec()

        for k, v in state.sensor_states.items():
            assert k in self._sensors
            if isinstance(v.rotation, list):
                v.rotation = quat_from_coeffs(v.rotation)

            s = self._sensors[k]

            s.node.reset_transformation()
            s.node.translate(
                quat_rotate_vector(state.rotation.inverse(),
                                   v.position - state.position))
            s.node.rotation = quat_to_magnum(state.rotation.inverse() *
                                             v.rotation)
コード例 #2
0
ファイル: agent.py プロジェクト: jturner65/habitat-sim
    def set_state(
        self,
        state: AgentState,
        reset_sensors: bool = True,
        infer_sensor_states: bool = True,
        is_initial: bool = False,
    ) -> None:
        r"""Sets the agents state

        :param state: The state to set the agent to
        :param reset_sensors: Whether or not to reset the sensors to their
            default intrinsic/extrinsic parameters before setting their extrinsic state.
        :param infer_sensor_states: Whether or not to infer the location of sensors based on
            the new location of the agent base state.
        :param is_initial: Whether this state is the initial state of the
            agent in the scene. Used for resetting the agent at a later time

        Setting ``reset_sensors`` to :py:`False`
        allows the agent base state to be moved and the new
        sensor locations inferred without changing the configuration of the sensors
        with respect to the base state of the agent.

        Setting ``infer_sensor_states``
        to :py:`False` is useful if you'd like to directly control
        the state of a sensor instead of moving the agent.

        """
        attr.validate(state)
        habitat_sim.errors.assert_obj_valid(self.body)

        if isinstance(state.rotation, (list, np.ndarray)):
            state.rotation = quat_from_coeffs(state.rotation)

        self.body.object.reset_transformation()

        self.body.object.translate(state.position)
        self.body.object.rotation = quat_to_magnum(state.rotation)

        if reset_sensors:
            for _, v in self._sensors.items():
                v.set_transformation_from_spec()

        if not infer_sensor_states:
            for k, v in state.sensor_states.items():
                assert k in self._sensors
                if isinstance(v.rotation, list):
                    v.rotation = quat_from_coeffs(v.rotation)

                s = self._sensors[k]

                s.node.reset_transformation()
                s.node.translate(
                    quat_rotate_vector(state.rotation.inverse(),
                                       v.position - state.position))
                s.node.rotation = quat_to_magnum(state.rotation.inverse() *
                                                 v.rotation)

        if is_initial:
            self.initial_state = state
コード例 #3
0
    def find_path(self, goal_pos: np.ndarray) -> List[Any]:
        r"""Finds the sequence actions that greedily follow the geodesic
        shortest path from the agent's current position to get to the goal

        :param goal_pos: The position of the goal
        :return: The list of actions to take. Ends with :py:`None`.

        This is roughly equivilent to just calling `next_action_along()` until
        it returns :py:`None`, but is faster.

        .. note-warning::

            Do not use this method if the agent has actuation noise.
            Instead, use :ref:`next_action_along` to find the action
            to take in a given state, then take that action, and repeat!
        """
        self.reset()

        state = self.agent.state
        path = self.impl.find_path(
            quat_to_magnum(state.rotation), state.position, goal_pos
        )

        if len(path) == 0:
            raise errors.GreedyFollowerError()

        path = list(map(lambda v: self.action_mapping[v], path))

        return path
コード例 #4
0
ファイル: test_physics.py プロジェクト: xjwang-cs/habitat-sim
def test_kinematics(sim):
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    sim.reconfigure(hab_cfg)
    assert sim.get_physics_object_library_size() > 0

    # test adding an object to the world
    object_id = sim.add_object(0)
    assert len(sim.get_existing_object_ids()) > 0

    # test kinematics
    I = np.identity(4)

    # test get and set translation
    sim.set_translation(np.array([0, 1.0, 0]), object_id)
    assert np.allclose(sim.get_translation(object_id), np.array([0, 1.0, 0]))

    # test get and set transform
    sim.set_transformation(I, object_id)
    assert np.allclose(sim.get_transformation(object_id), I)

    # test get and set rotation
    Q = quat_from_angle_axis(np.pi, np.array([0, 1.0, 0]))
    expected = np.eye(4)
    expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
    sim.set_rotation(quat_to_magnum(Q), object_id)
    assert np.allclose(sim.get_transformation(object_id), expected)
    assert np.allclose(quat_from_magnum(sim.get_rotation(object_id)), Q)

    # test object removal
    old_object_id = sim.remove_object(object_id)
    assert len(sim.get_existing_object_ids()) == 0
    assert old_object_id == object_id

    object_id = sim.add_object(0)

    prev_time = 0.0
    for _ in range(2):
        # do some kinematics here (todo: translating or rotating instead of absolute)
        sim.set_translation(np.random.rand(3), object_id)
        T = sim.get_transformation(object_id)

        # test getting observation
        obs = sim.step(
            random.choice(list(hab_cfg.agents[0].action_space.keys())))

        # check that time is increasing in the world
        assert sim.get_world_time() > prev_time
        prev_time = sim.get_world_time()
コード例 #5
0
    def step_cas(self, vel_control, dt=1.0 / 30.0):

        agent_state = self._default_agent.state
        self._num_total_frames += 1
        self._last_state = self._default_agent.get_state()

        previous_rigid_state = habitat_sim.RigidState(
            quat_to_magnum(agent_state.rotation), agent_state.position)
        # manually integrate the rigid state
        target_rigid_state = vel_control.integrate_transform(
            dt, previous_rigid_state)

        # snap rigid state to navmesh and set state to object/agent
        # calls pathfinder.try_step or self.pathfinder.try_step_no_sliding
        end_pos = self.step_filter(previous_rigid_state.translation,
                                   target_rigid_state.translation)

        # set the computed state
        agent_state.position = end_pos
        agent_state.rotation = quat_from_magnum(target_rigid_state.rotation)
        self._default_agent.set_state(agent_state)

        # Check if a collision occured
        dist_moved_before_filter = (target_rigid_state.translation -
                                    previous_rigid_state.translation).dot()
        dist_moved_after_filter = (end_pos -
                                   previous_rigid_state.translation).dot()

        # NB: There are some cases where ||filter_end - end_pos|| > 0 when a
        # collision _didn't_ happen. One such case is going up stairs.  Instead,
        # we check to see if the the amount moved after the application of the filter
        # is _less_ than the amount moved before the application of the filter
        EPS = 0.2
        collided = (dist_moved_after_filter + EPS) < dist_moved_before_filter
        # print("collidded", collided)
        # step physics by dt
        step_start_Time = time.time()
        super().step_world(dt)
        self._previous_step_time = time.time() - step_start_Time

        observations = self.get_sensor_observations()
        # Whether or not the action taken resulted in a collision
        observations["collided"] = collided

        return observations
コード例 #6
0
    def next_action_along(self, goal_pos: np.ndarray) -> Any:
        r"""Find the next action to greedily follow the geodesic shortest path
        from the agent's current position to get to the goal

        :param goal_pos: The position of the goal
        :return: The action to take
        """
        if self.last_goal is None or not np.allclose(goal_pos, self.last_goal):
            self.reset()
            self.last_goal = goal_pos

        state = self.agent.state
        next_act = self.impl.next_action_along(quat_to_magnum(state.rotation),
                                               state.position, goal_pos)

        if next_act == GreedyFollowerCodes.ERROR:
            raise errors.GreedyFollowerError()
        else:
            return self.action_mapping[next_act]
コード例 #7
0
def update(env, state, a, delta, vel_control, time_step):
    dt = time_step
    # angle = quaternion.as_euler_angles(env.habitat_env._sim.get_agent_state().rotation)

    agent_forward = quat_to_magnum(
        env.habitat_env._sim.get_agent_state().rotation).transform_vector(
            mn.Vector3(0, 0, -1.0))

    # print("linear velocity", state.v)
    # print("angular velocity", state.yaw)
    state.x = env.habitat_env._sim.get_agent_state().position[0]
    state.y = -env.habitat_env._sim.get_agent_state().position[2]
    # state.yaw = -angle[1]
    state.yaw = math.atan2(agent_forward[0], agent_forward[2])

    vel_control.linear_velocity = np.array([0, 0, -a])
    # local up is y
    vel_control.angular_velocity = np.array([0, delta, 0])

    print("lin", a)
    print("vel", delta)

    obs, reward, done, info = env.step(vel_control)
    return obs, info, state
コード例 #8
0
        agent_pos = agent_state.position
        # topdown map at agent position
        top_down_map = maps.get_topdown_map(
            sim.pathfinder, height=agent_pos[1], meters_per_pixel=meters_per_pixel
        )
        recolor_map = np.array(
            [[255, 255, 255], [128, 128, 128], [0, 0, 0]], dtype=np.uint8
        )
        top_down_map = recolor_map[top_down_map]
        grid_dimensions = (top_down_map.shape[0], top_down_map.shape[1])
        # convert world agent position to maps module grid point
        agent_grid_pos = maps.to_grid(
            agent_pos[2], agent_pos[0], grid_dimensions, pathfinder=sim.pathfinder
        )
        agent_forward = utils.quat_to_magnum(
            sim.agents[0].get_state().rotation
        ).transform_vector(mn.Vector3(0, 0, -1.0))
        agent_orientation = math.atan2(agent_forward[0], agent_forward[2])
        # draw the agent and trajectory on the map
        maps.draw_agent(
            top_down_map, agent_grid_pos, agent_orientation, agent_radius_px=8
        )
        print("\nDisplay topdown map with agent:")
        display_map(top_down_map)


# %%
# @markdown ##Saving the NavMesh

# fmt: off
# @markdown An existing NavMesh can be saved with *Pathfinder.save_nav_mesh(filename)*
コード例 #9
0
def reference_path_example(mode):
    """
	Saves a video of a shortest path follower agent navigating from a start
	position to a goal. Agent follows the ground truth reference path by
	navigating to intermediate viewpoints en route to goal.
	Args:
		mode: 'geodesic_path' or 'greedy'
	"""
    config = habitat.get_config(
        config_paths="configs/test/habitat_r2r_vln_test_continuous.yaml")
    config.defrost()
    config.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
    config.TASK.SENSORS.append("HEADING_SENSOR")
    config.freeze()
    with SimpleRLEnv(config=config) as env:
        print("Environment creation successful")
        sim_time = 30  # @param {type:"integer"}
        continuous_nav = True  # @param {type:"boolean"}
        if continuous_nav:
            control_frequency = 10  # @param {type:"slider", min:1, max:30, step:1}
            frame_skip = 6  # @param {type:"slider", min:1, max:30, step:1}
        fps = control_frequency * frame_skip
        print("fps = " + str(fps))
        control_sequence = []
        # create and configure a new VelocityControl structure
        vel_control = habitat_sim.physics.VelocityControl()
        vel_control.controlling_lin_vel = True
        vel_control.lin_vel_is_local = True
        vel_control.controlling_ang_vel = True
        vel_control.ang_vel_is_local = True

        for episode in range(6):
            env.reset()
            print(env.habitat_env.current_episode)
            episode_id = env.habitat_env.current_episode.episode_id
            print(
                f"Agent stepping around inside environment. Episode id: {episode_id}"
            )

            dirname = os.path.join(IMAGE_DIR, "vln_reference_path_example",
                                   mode, "%02d" % episode)
            if os.path.exists(dirname):
                shutil.rmtree(dirname)
            os.makedirs(dirname)

            images = []
            steps = 0

            reference_path = env.habitat_env.current_episode.reference_path

            waypoints = np.array(reference_path)

            for i in range(waypoints.shape[1] - 1):
                if np.abs(np.linalg.norm(waypoints[i + 1, :] -
                                         waypoints[i, :])) < 5:
                    waypoints = np.delete(waypoints, (i + 1), axis=0)

            waypoints = waypoints[0::10]
            x_wp = waypoints[:, 0]
            y_wp = -waypoints[:, 2]
            z_wp = waypoints[:, 1]

            ax = x_wp
            ay = y_wp

            cx, cy, cyaw, ck, s_profile, s = cubic_spline_planner.calc_spline_course(
                ax, ay, ds=0.01)

            goal = [cx[-1], cy[-1], cyaw[-1]]

            agent_forward = quat_to_magnum(
                env.habitat_env._sim.get_agent_state(
                ).rotation).transform_vector(mn.Vector3(0, 0, -1.0))

            angle = quaternion.as_euler_angles(
                env.habitat_env._sim.get_agent_state().rotation)
            init_x = env.habitat_env._sim.get_agent_state().position[0]
            init_y = -env.habitat_env._sim.get_agent_state().position[2]
            # init_yaw = angle[1]

            init_yaw = math.atan2(agent_forward[0], agent_forward[2])

            state = State(x=init_x, y=init_y, yaw=init_yaw)

            time_step = 1.0 / (30)
            T = 500
            t, x, y, yaw, vel, omeg, images = closed_loop_prediction(
                env, state, cx, cy, cyaw, ck, s_profile, goal, images,
                vel_control, time_step)
            images_to_video(images,
                            dirname,
                            str(episode_id),
                            fps=int(1.0 / time_step))
            images = []
コード例 #10
0
def test_kinematics():
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    with habitat_sim.Simulator(hab_cfg) as sim:
        obj_mgr = sim.get_object_template_manager()

        assert obj_mgr.get_num_templates() > 0

        # test adding an object to the world
        # get handle for object 0, used to test
        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0])
        assert len(sim.get_existing_object_ids()) > 0

        # test setting the motion type

        assert sim.set_object_motion_type(
            habitat_sim.physics.MotionType.STATIC, object_id)
        assert (sim.get_object_motion_type(object_id) ==
                habitat_sim.physics.MotionType.STATIC)
        assert sim.set_object_motion_type(
            habitat_sim.physics.MotionType.KINEMATIC, object_id)
        assert (sim.get_object_motion_type(object_id) ==
                habitat_sim.physics.MotionType.KINEMATIC)

        # test kinematics
        I = np.identity(4)

        # test get and set translation
        sim.set_translation(np.array([0, 1.0, 0]), object_id)
        assert np.allclose(sim.get_translation(object_id),
                           np.array([0, 1.0, 0]))

        # test object SceneNode
        object_node = sim.get_object_scene_node(object_id)
        assert np.allclose(sim.get_translation(object_id),
                           object_node.translation)

        # test get and set transform
        sim.set_transformation(I, object_id)
        assert np.allclose(sim.get_transformation(object_id), I)

        # test get and set rotation
        Q = quat_from_angle_axis(np.pi, np.array([0, 1.0, 0]))
        expected = np.eye(4)
        expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
        sim.set_rotation(quat_to_magnum(Q), object_id)
        assert np.allclose(sim.get_transformation(object_id), expected)
        assert np.allclose(quat_from_magnum(sim.get_rotation(object_id)), Q)

        # test object removal
        sim.remove_object(object_id)
        assert len(sim.get_existing_object_ids()) == 0

        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0])

        prev_time = 0.0
        for _ in range(2):
            # do some kinematics here (todo: translating or rotating instead of absolute)
            sim.set_translation(np.random.rand(3), object_id)
            T = sim.get_transformation(object_id)  # noqa : F841

            # test getting observation
            sim.step(random.choice(list(
                hab_cfg.agents[0].action_space.keys())))

            # check that time is increasing in the world
            assert sim.get_world_time() > prev_time
            prev_time = sim.get_world_time()

        sim.remove_object(object_id)

        # test attaching/dettaching an Agent to/from physics simulation
        agent_node = sim.agents[0].scene_node
        obj_handle_list = obj_mgr.get_template_handles("cheezit")
        object_id = sim.add_object_by_handle(obj_handle_list[0], agent_node)
        sim.set_translation(np.random.rand(3), object_id)
        assert np.allclose(agent_node.translation,
                           sim.get_translation(object_id))
        sim.remove_object(object_id, False)  # don't delete the agent's node
        assert agent_node.translation

        # test get/set RigidState
        object_id = sim.add_object_by_handle(obj_handle_list[0])
        targetRigidState = habitat_sim.bindings.RigidState(
            mn.Quaternion(), np.array([1.0, 2.0, 3.0]))
        sim.set_rigid_state(targetRigidState, object_id)
        objectRigidState = sim.get_rigid_state(object_id)
        assert np.allclose(objectRigidState.translation,
                           targetRigidState.translation)
        assert objectRigidState.rotation == targetRigidState.rotation
コード例 #11
0
def test_kinematics():
    cfg_settings = examples.settings.default_sim_settings.copy()

    cfg_settings[
        "scene"] = "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    # enable the physics simulator: also clears available actions to no-op
    cfg_settings["enable_physics"] = True
    cfg_settings["depth_sensor"] = True

    # test loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)
    with habitat_sim.Simulator(hab_cfg) as sim:
        # get the rigid object attributes manager, which manages
        # templates used to create objects
        obj_template_mgr = sim.get_object_template_manager()
        obj_template_mgr.load_configs("data/objects/example_objects/", True)
        assert obj_template_mgr.get_num_templates() > 0
        # get the rigid object manager, which provides direct
        # access to objects
        rigid_obj_mgr = sim.get_rigid_object_manager()

        # test adding an object to the world
        # get handle for object 0, used to test
        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])
        assert rigid_obj_mgr.get_num_objects() > 0
        assert (len(rigid_obj_mgr.get_object_handles()) ==
                rigid_obj_mgr.get_num_objects())

        # test setting the motion type
        cheezit_box.motion_type = habitat_sim.physics.MotionType.STATIC
        assert cheezit_box.motion_type == habitat_sim.physics.MotionType.STATIC
        cheezit_box.motion_type = habitat_sim.physics.MotionType.KINEMATIC
        assert cheezit_box.motion_type == habitat_sim.physics.MotionType.KINEMATIC

        # test kinematics
        I = np.identity(4)

        # test get and set translation
        cheezit_box.translation = [0.0, 1.0, 0.0]
        assert np.allclose(cheezit_box.translation, np.array([0.0, 1.0, 0.0]))

        # test object SceneNode
        assert np.allclose(cheezit_box.translation,
                           cheezit_box.root_scene_node.translation)

        # test get and set transform
        cheezit_box.transformation = I
        assert np.allclose(cheezit_box.transformation, I)

        # test get and set rotation
        Q = quat_from_angle_axis(np.pi, np.array([0.0, 1.0, 0.0]))
        expected = np.eye(4)
        expected[0:3, 0:3] = quaternion.as_rotation_matrix(Q)
        cheezit_box.rotation = quat_to_magnum(Q)
        assert np.allclose(cheezit_box.transformation, expected)
        assert np.allclose(quat_from_magnum(cheezit_box.rotation), Q)

        # test object removal
        rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)

        assert rigid_obj_mgr.get_num_objects() == 0

        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])

        prev_time = 0.0
        for _ in range(2):
            # do some kinematics here (todo: translating or rotating instead of absolute)
            cheezit_box.translation = np.random.rand(3)
            T = cheezit_box.transformation  # noqa : F841

            # test getting observation
            sim.step(random.choice(list(
                hab_cfg.agents[0].action_space.keys())))

            # check that time is increasing in the world
            assert sim.get_world_time() > prev_time
            prev_time = sim.get_world_time()

        rigid_obj_mgr.remove_object_by_id(cheezit_box.object_id)

        # test attaching/dettaching an Agent to/from physics simulation
        agent_node = sim.agents[0].scene_node
        obj_handle_list = obj_template_mgr.get_template_handles("cheezit")
        cheezit_agent = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0], agent_node)

        cheezit_agent.translation = np.random.rand(3)
        assert np.allclose(agent_node.translation, cheezit_agent.translation)
        rigid_obj_mgr.remove_object_by_id(
            cheezit_agent.object_id,
            delete_object_node=False)  # don't delete the agent's node
        assert agent_node.translation

        # test get/set RigidState
        cheezit_box = rigid_obj_mgr.add_object_by_template_handle(
            obj_handle_list[0])
        targetRigidState = habitat_sim.bindings.RigidState(
            mn.Quaternion(), np.array([1.0, 2.0, 3.0]))
        cheezit_box.rigid_state = targetRigidState
        objectRigidState = cheezit_box.rigid_state
        assert np.allclose(objectRigidState.translation,
                           targetRigidState.translation)
        assert objectRigidState.rotation == targetRigidState.rotation
コード例 #12
0
def reference_path_example(mode):
    """
    Saves a video of a shortest path follower agent navigating from a start
    position to a goal. Agent follows the ground truth reference path by
    navigating to intermediate viewpoints en route to goal.
    Args:
        mode: 'geodesic_path' or 'greedy'
    """

    show_waypoint_indicators = False
    config = habitat.get_config(
        config_paths="configs/test/habitat_r2r_vln_test.yaml")
    config.defrost()
    config.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
    config.TASK.SENSORS.append("HEADING_SENSOR")
    config.freeze()

    split = 'train'
    vln_data_path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/v1/' + split + '/' + split + '.json.gz'
    with gzip.open(vln_data_path, "rt") as f:
        deserialized = json.loads(f.read())

    val_ids = {}
    for i in range(len(deserialized['episodes'])):
        val_ids[deserialized['episodes'][i]['episode_id']] = i

    new_data_dict = {}
    new_data_dict['episodes'] = {}
    new_data_dict['instruction_vocab'] = deserialized['instruction_vocab']
    new_data_list = []
    save_fig = False
    steps_dict = {}

    with SimpleRLEnv(config=config) as env:
        print("Environment creation successful")

        # obj_attr_mgr = env.habitat_env._sim.get_object_template_manager()
        # remove_all_objects(env.habitat_env._sim)
        sim_time = 30  # @param {type:"integer"}
        continuous_nav = True  # @param {type:"boolean"}
        if continuous_nav:
            control_frequency = 10  # @param {type:"slider", min:1, max:30, step:1}
            frame_skip = 6  # @param {type:"slider", min:1, max:30, step:1}
        fps = control_frequency * frame_skip
        print("fps = " + str(fps))
        control_sequence = []
        for action in range(int(sim_time * control_frequency)):
            if continuous_nav:
                # allow forward velocity and y rotation to vary
                control_sequence.append({
                    "forward_velocity":
                    random.random() * 2.0,  # [0,2)
                    "rotation_velocity":
                    (random.random() - 0.5) * 2.0,  # [-1,1)
                })
            else:
                control_sequence.append(random.choice(action_names))

                # create and configure a new VelocityControl structure
        vel_control = habitat_sim.physics.VelocityControl()
        vel_control.controlling_lin_vel = True
        vel_control.lin_vel_is_local = True
        vel_control.controlling_ang_vel = True
        vel_control.ang_vel_is_local = True

        vis_ids = []

        collided_trajectories = []
        trajectory_without_collision = True

        for episode in range(len(deserialized['episodes'])):
            counter = 0
            env.reset()
            episode_id = env.habitat_env.current_episode.episode_id
            print(
                f"Agent stepping around inside environment. Episode id: {episode_id}"
            )

            dirname = os.path.join(IMAGE_DIR, "vln_reference_path_example",
                                   mode, "%02d" % episode)
            if os.path.exists(dirname):
                shutil.rmtree(dirname)
            os.makedirs(dirname)

            images = []
            steps = 0
            reference_path = env.habitat_env.current_episode.reference_path + [
                env.habitat_env.current_episode.goals[0].position
            ]

            # manually control the object's kinematic state via velocity integration
            time_step = 1.0 / (30)

            x = []
            y = []
            yaw = []
            vel = []
            omega = []

            continuous_path_follower = ContinuousPathFollower(
                env.habitat_env._sim, reference_path, waypoint_threshold=0.4)
            max_time = 30.0
            done = False
            EPS = 1e-4
            prev_pos = np.linalg.norm(
                env.habitat_env._sim.get_agent_state().position)
            if show_waypoint_indicators:
                for id in vis_ids:
                    sim.remove_object(id)
                vis_ids = setup_path_visualization(env.habitat_env._sim,
                                                   continuous_path_follower)

            while continuous_path_follower.progress < 1.0:
                # print("done",done)
                if done:
                    break
                if counter == 150:
                    counter = 0
                    collided_trajectories.append(
                        env.habitat_env.current_episode.episode_id)
                    trajectory_without_collision = False
                    break
                continuous_path_follower.update_waypoint()

                if show_waypoint_indicators:
                    sim.set_translation(continuous_path_follower.waypoint,
                                        vis_ids[0])

                agent_state = env.habitat_env._sim.get_agent_state()
                pos = np.linalg.norm(
                    env.habitat_env._sim.get_agent_state().position)

                if abs(pos - prev_pos) < EPS:
                    counter += 1
                previous_rigid_state = habitat_sim.RigidState(
                    quat_to_magnum(agent_state.rotation), agent_state.position)

                v, w = track_waypoint(
                    continuous_path_follower.waypoint,
                    previous_rigid_state,
                    vel_control,
                    dt=time_step,
                )

                observations, reward, done, info = env.step(vel_control)
                # print(observations)
                # save_map(observations, info, images)
                prev_pos = pos
                x.append(env.habitat_env._sim.get_agent_state().position[0])
                y.append(-env.habitat_env._sim.get_agent_state().position[2])
                yaw.append(
                    quaternion.as_euler_angles(
                        env.habitat_env._sim.get_agent_state().rotation)[1])
                vel.append(v)
                omega.append(w)
                steps += 1

            if save_fig:  # pragma: no cover
                plt.close()
                plt.subplots(1)
                plt.plot(x, y, "xb", label="input")
                plt.grid(True)
                plt.axis("equal")
                plt.xlabel("x[m]")
                plt.ylabel("y[m]")
                plt.legend()
                pose_title = dirname + 'pose.png'
                plt.savefig(pose_title)

                plt.subplots(1)
                plt.plot(yaw, "-r", label="yaw")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("yaw angle[deg]")
                yaw_title = dirname + 'yaw.png'
                plt.savefig(yaw_title)

                plt.subplots(1)
                plt.plot(vel, "-r", label="vel")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("omega_reference [rad/s^2]")
                vel_title = dirname + 'vel.png'
                plt.savefig(vel_title)

                plt.subplots(1)
                plt.plot(omega, "-r", label="v_reference")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("v_reference [m/s]")
                omega_title = dirname + 'omega.png'
                plt.savefig(omega_title)

                x = []
                y = []
                yaw = []
                vel = []
                omega = []

            if trajectory_without_collision:
                ids = val_ids[episode_id]
                single_data_dict = deserialized['episodes'][ids]
                new_data_list.append(single_data_dict)
            trajectory_without_collision = True
            print(f"Navigated to goal in {steps} steps.")
            steps_dict[episode] = steps
            # images_to_video(images, dirname, str(episode_id), fps = int (1.0/time_step))
            images = []

        steps_path = '/home/mirshad7/habitat-lab/train_steps.json.gz'
        # new_data_dict['episodes'] = new_data_list
        # path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/robo_vln/train/train.json.gz'
        # compress_json.dump(new_data_dict, path)
        compress_json.dump(steps_dict, steps_path)
        print("collided trajectories:", collided_trajectories)
コード例 #13
0
def RunSimulation(envNum, record, path, Coords, seed):

    settings = {"image_height": 720, "image_width": 1080, "random_seed": seed}
    # pick a scene file to load
    scene_file = path

    if record:
        os.system("mkdir CapturedFrames")
        os.system(("mkdir CapturedFrames/Environment" + str(envNum)))

    # --------------------------
    # Simulator Setup
    # --------------------------

    # create a SimulatorConfiguration object
    sim_cfg = habitat_sim.SimulatorConfiguration()
    sim_cfg.scene.id = scene_file
    # configure the simulator to intialize a PhysicsManager
    sim_cfg.enable_physics = True
    # configure the simulator to load a specific simulator configuration file (define paths you your object models here)
    sim_cfg.physics_config_file = "data/default.phys_scene_config.json"
    # pick this based on your device
    relative_camera_position = [0.0, 0, 0.0]
    # Note: all sensors must have the same resolution
    sensors = {
        "color": {
            "sensor_type": habitat_sim.SensorType.COLOR,
            "resolution": [settings["image_height"], settings["image_width"]],
            "position": relative_camera_position,
        },
        "depth": {
            "sensor_type": habitat_sim.SensorType.DEPTH,
            "resolution": [settings["image_height"], settings["image_width"]],
            "position": relative_camera_position,
        },
    }

    sensor_specs = []
    for sensor_uuid, sensor_params in sensors.items():
        sensor_spec = habitat_sim.SensorSpec()
        sensor_spec.uuid = sensor_uuid
        sensor_spec.sensor_type = sensor_params["sensor_type"]
        sensor_spec.resolution = sensor_params["resolution"]
        sensor_spec.position = sensor_params["position"]

        sensor_specs.append(sensor_spec)

    # setup the agent/camera
    agent_cfg = habitat_sim.agent.AgentConfiguration()
    agent_cfg.sensor_specifications = sensor_specs

    agent_cfg.action_space = {
        "move_forward":
        habitat_sim.agent.ActionSpec(
            "move_forward", habitat_sim.agent.ActuationSpec(amount=0.2)),
        "turn_left":
        habitat_sim.agent.ActionSpec(
            "turn_left", habitat_sim.agent.ActuationSpec(amount=10.0)),
    }

    # generate the full simulator configuration
    combined_cfg = habitat_sim.Configuration(sim_cfg, [agent_cfg])

    # initialize the simulator
    sim = habitat_sim.Simulator(combined_cfg)
    sim.agents[0].controls.move_filter_fn = sim._scene_collision_step_filter

    # set random seed
    random.seed(settings["random_seed"])
    sim.seed(settings["random_seed"])

    # initialize the agent state
    sim.initialize_agent(0)
    # ----------------------------------
    # Quadrotor Prototype Functionality
    # ----------------------------------

    # assuming the first entry in the object list corresponds to the quadrotor model
    quadrotor_id = sim.add_object(0)

    # the object will not be affected by forces such as gravity and will remain static until explicitly moved
    sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC,
                               quadrotor_id)

    ## Randomly initialize the Ball

    initial_height = 0.00001
    possible_initial_point = sim.pathfinder.get_random_navigable_point(
    ) + np.array([0, initial_height, 0])
    sim.set_translation(possible_initial_point, quadrotor_id)
    while (sim.contact_test(quadrotor_id)):
        possible_initial_point = sim.pathfinder.get_random_navigable_point(
        ) + np.array([0, initial_height, 0])
        sim.set_translation(possible_initial_point, quadrotor_id)

    # place the object in the air
    # sim.set_translation(np.array([-0.569043, 2.04804, 13.6156]), quadrotor_id)
    sim.agents[0].scene_node.translation = sim.get_translation(quadrotor_id)

    # static_3rd_person_camera_position = np.array([-0.569043, 2.04804, 12.6156])
    static_3rd_person_camera_position = np.array(
        sim.get_translation(quadrotor_id)) - np.array([0, 0, 2])

    initTranslation = sim.get_translation(quadrotor_id)

    for frame in range(len(Coords)):

        dx = Coords[frame][0]
        dy = Coords[frame][1]
        dz = 0

        dispInFrame = np.array([dx, dy, dz])
        currentTranslation = np.array(sim.get_translation(quadrotor_id))

        sim.set_translation(dispInFrame + currentTranslation, quadrotor_id)

        if (sim.contact_test(quadrotor_id)):
            print("Collided at frame : ", frame)
            break

        if record:

            # get/save 1st person images
            # agent_observations = sim.get_sensor_observations()
            #save_observation(agent_observations, frame, "agent_")

            # move camera to 3rd person view
            sim.get_agent(0).scene_node.translation = np.array(
                static_3rd_person_camera_position)
            sim.get_agent(0).scene_node.rotation = ut.quat_to_magnum(
                ut.quat_look_at(sim.get_translation(quadrotor_id),
                                static_3rd_person_camera_position))

            agent_observations = sim.get_sensor_observations()
            save_observation(agent_observations, frame, envNum, "3rdperson_")

            # reset the agent for the next step
            sim.get_agent(0).scene_node.translation = sim.get_translation(
                quadrotor_id)
            sim.get_agent(0).scene_node.rotation = sim.get_rotation(
                quadrotor_id)

    if record:
        MakeVideo(envNum)

    sim.close()
    del sim