예제 #1
0
    def update_base(self):
        ctrl_freq = self._sim.ctrl_freq

        before_trans_state = self._capture_robot_state(self._sim)

        trans = self._sim.robot.sim_obj.transformation
        rigid_state = habitat_sim.RigidState(
            mn.Quaternion.from_matrix(trans.rotation()), trans.translation)

        target_rigid_state = self.base_vel_ctrl.integrate_transform(
            1 / ctrl_freq, rigid_state)
        end_pos = self._sim.step_filter(rigid_state.translation,
                                        target_rigid_state.translation)

        target_trans = mn.Matrix4.from_(
            target_rigid_state.rotation.to_matrix(), end_pos)
        self._sim.robot.sim_obj.transformation = target_trans

        if not self._config.get("ALLOW_DYN_SLIDE", True):
            # Check if in the new robot state the arm collides with anything. If so
            # we have to revert back to the previous transform
            self._sim.internal_step(-1)
            colls = self._sim.get_collisions()
            did_coll, _ = rearrange_collision(colls, self._sim.snapped_obj_id,
                                              False)
            if did_coll:
                # Don't allow the step, revert back.
                self._set_robot_state(self._sim, before_trans_state)
                self._sim.robot.sim_obj.transformation = trans
예제 #2
0
                    habitat_sim.errors.assert_obj_valid(v)
                    agent.controls.action(
                        v.object,
                        discrete_action.name,
                        discrete_action.actuation,
                        apply_filter=False,
                    )

        # simulate and collect frames
        for _frame in range(frame_skip):
            if continuous_nav:
                # Integrate the velocity and apply the transform.
                # Note: this can be done at a higher frequency for more accuracy
                agent_state = agent.state
                previous_rigid_state = habitat_sim.RigidState(
                    utils.quat_to_magnum(agent_state.rotation), agent_state.position
                )

                # manually integrate the rigid state
                target_rigid_state = vel_control.integrate_transform(
                    time_step, previous_rigid_state
                )

                # snap rigid state to navmesh and set state to object/agent
                # calls pathfinder.try_step or self.pathfinder.try_step_no_sliding
                end_pos = sim.step_filter(
                    previous_rigid_state.translation, target_rigid_state.translation
                )

                # set the computed state
                agent_state.position = end_pos
def reference_path_example(mode):
    """
    Saves a video of a shortest path follower agent navigating from a start
    position to a goal. Agent follows the ground truth reference path by
    navigating to intermediate viewpoints en route to goal.
    Args:
        mode: 'geodesic_path' or 'greedy'
    """

    show_waypoint_indicators = False
    config = habitat.get_config(
        config_paths="configs/test/habitat_r2r_vln_test.yaml")
    config.defrost()
    config.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
    config.TASK.SENSORS.append("HEADING_SENSOR")
    config.freeze()

    split = 'train'
    vln_data_path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/v1/' + split + '/' + split + '.json.gz'
    with gzip.open(vln_data_path, "rt") as f:
        deserialized = json.loads(f.read())

    val_ids = {}
    for i in range(len(deserialized['episodes'])):
        val_ids[deserialized['episodes'][i]['episode_id']] = i

    new_data_dict = {}
    new_data_dict['episodes'] = {}
    new_data_dict['instruction_vocab'] = deserialized['instruction_vocab']
    new_data_list = []
    save_fig = False
    steps_dict = {}

    with SimpleRLEnv(config=config) as env:
        print("Environment creation successful")

        # obj_attr_mgr = env.habitat_env._sim.get_object_template_manager()
        # remove_all_objects(env.habitat_env._sim)
        sim_time = 30  # @param {type:"integer"}
        continuous_nav = True  # @param {type:"boolean"}
        if continuous_nav:
            control_frequency = 10  # @param {type:"slider", min:1, max:30, step:1}
            frame_skip = 6  # @param {type:"slider", min:1, max:30, step:1}
        fps = control_frequency * frame_skip
        print("fps = " + str(fps))
        control_sequence = []
        for action in range(int(sim_time * control_frequency)):
            if continuous_nav:
                # allow forward velocity and y rotation to vary
                control_sequence.append({
                    "forward_velocity":
                    random.random() * 2.0,  # [0,2)
                    "rotation_velocity":
                    (random.random() - 0.5) * 2.0,  # [-1,1)
                })
            else:
                control_sequence.append(random.choice(action_names))

                # create and configure a new VelocityControl structure
        vel_control = habitat_sim.physics.VelocityControl()
        vel_control.controlling_lin_vel = True
        vel_control.lin_vel_is_local = True
        vel_control.controlling_ang_vel = True
        vel_control.ang_vel_is_local = True

        vis_ids = []

        collided_trajectories = []
        trajectory_without_collision = True

        for episode in range(len(deserialized['episodes'])):
            counter = 0
            env.reset()
            episode_id = env.habitat_env.current_episode.episode_id
            print(
                f"Agent stepping around inside environment. Episode id: {episode_id}"
            )

            dirname = os.path.join(IMAGE_DIR, "vln_reference_path_example",
                                   mode, "%02d" % episode)
            if os.path.exists(dirname):
                shutil.rmtree(dirname)
            os.makedirs(dirname)

            images = []
            steps = 0
            reference_path = env.habitat_env.current_episode.reference_path + [
                env.habitat_env.current_episode.goals[0].position
            ]

            # manually control the object's kinematic state via velocity integration
            time_step = 1.0 / (30)

            x = []
            y = []
            yaw = []
            vel = []
            omega = []

            continuous_path_follower = ContinuousPathFollower(
                env.habitat_env._sim, reference_path, waypoint_threshold=0.4)
            max_time = 30.0
            done = False
            EPS = 1e-4
            prev_pos = np.linalg.norm(
                env.habitat_env._sim.get_agent_state().position)
            if show_waypoint_indicators:
                for id in vis_ids:
                    sim.remove_object(id)
                vis_ids = setup_path_visualization(env.habitat_env._sim,
                                                   continuous_path_follower)

            while continuous_path_follower.progress < 1.0:
                # print("done",done)
                if done:
                    break
                if counter == 150:
                    counter = 0
                    collided_trajectories.append(
                        env.habitat_env.current_episode.episode_id)
                    trajectory_without_collision = False
                    break
                continuous_path_follower.update_waypoint()

                if show_waypoint_indicators:
                    sim.set_translation(continuous_path_follower.waypoint,
                                        vis_ids[0])

                agent_state = env.habitat_env._sim.get_agent_state()
                pos = np.linalg.norm(
                    env.habitat_env._sim.get_agent_state().position)

                if abs(pos - prev_pos) < EPS:
                    counter += 1
                previous_rigid_state = habitat_sim.RigidState(
                    quat_to_magnum(agent_state.rotation), agent_state.position)

                v, w = track_waypoint(
                    continuous_path_follower.waypoint,
                    previous_rigid_state,
                    vel_control,
                    dt=time_step,
                )

                observations, reward, done, info = env.step(vel_control)
                # print(observations)
                # save_map(observations, info, images)
                prev_pos = pos
                x.append(env.habitat_env._sim.get_agent_state().position[0])
                y.append(-env.habitat_env._sim.get_agent_state().position[2])
                yaw.append(
                    quaternion.as_euler_angles(
                        env.habitat_env._sim.get_agent_state().rotation)[1])
                vel.append(v)
                omega.append(w)
                steps += 1

            if save_fig:  # pragma: no cover
                plt.close()
                plt.subplots(1)
                plt.plot(x, y, "xb", label="input")
                plt.grid(True)
                plt.axis("equal")
                plt.xlabel("x[m]")
                plt.ylabel("y[m]")
                plt.legend()
                pose_title = dirname + 'pose.png'
                plt.savefig(pose_title)

                plt.subplots(1)
                plt.plot(yaw, "-r", label="yaw")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("yaw angle[deg]")
                yaw_title = dirname + 'yaw.png'
                plt.savefig(yaw_title)

                plt.subplots(1)
                plt.plot(vel, "-r", label="vel")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("omega_reference [rad/s^2]")
                vel_title = dirname + 'vel.png'
                plt.savefig(vel_title)

                plt.subplots(1)
                plt.plot(omega, "-r", label="v_reference")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("v_reference [m/s]")
                omega_title = dirname + 'omega.png'
                plt.savefig(omega_title)

                x = []
                y = []
                yaw = []
                vel = []
                omega = []

            if trajectory_without_collision:
                ids = val_ids[episode_id]
                single_data_dict = deserialized['episodes'][ids]
                new_data_list.append(single_data_dict)
            trajectory_without_collision = True
            print(f"Navigated to goal in {steps} steps.")
            steps_dict[episode] = steps
            # images_to_video(images, dirname, str(episode_id), fps = int (1.0/time_step))
            images = []

        steps_path = '/home/mirshad7/habitat-lab/train_steps.json.gz'
        # new_data_dict['episodes'] = new_data_list
        # path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/robo_vln/train/train.json.gz'
        # compress_json.dump(new_data_dict, path)
        compress_json.dump(steps_dict, steps_path)
        print("collided trajectories:", collided_trajectories)