コード例 #1
0
ファイル: asset_viewer.py プロジェクト: jturner65/habitat-sim
 def on_sim_click(b):
     observations = simulate(sim, dt=dt)
     vut.make_video(
         observations,
         "color_sensor_1st_person",
         "color",
         output_path + prefix,
         open_vid=show_video,
     )
コード例 #2
0
def run_test(test_id, args):
    print("----- Running %s -----" % test_id)
    start_time = time.time()
    data = benchmarks[test_id]["test"]()
    physics_step_times = data["physics_step_times"]
    graphics_render_times = data["graphics_render_times"]
    collisions = data["collisions"]
    observations = data["observations"]
    end_time = time.time()

    test_log = ""
    test_log += "Test: %s completed" % test_id + "\n"
    test_log += "\t Description: " + benchmarks[test_id]["description"] + "\n"
    test_log += (
        "\t ========================= Performance ======================== " + "\n"
    )
    test_log += "\t| Number of frames: " + str(len(physics_step_times)) + "\n"
    test_log += (
        "\t| Average time per physics step: "
        + str(sum(physics_step_times) / len(physics_step_times))
        + "\n"
    )
    test_log += (
        "\t| Average time per frame render: "
        + str(sum(graphics_render_times) / len(graphics_render_times))
        + "\n"
    )
    test_log += "\t| Maximum Collisions: " + str(max(collisions)) + "\n"
    test_log += "\t| Total time for test: " + str(end_time - start_time) + "\n"
    test_log += "\t|----- END of %s -----" % test_id + "\n"
    test_log += (
        "\t ============================================================== " + "\n"
    )
    test_log += "\n"

    print(test_log)
    if args.make_video:
        vut.make_video(
            observations,
            "rgba_camera_1stperson",
            "color",
            output_path + test_id,
            open_vid=args.show_video,
        )

    for graph_name in args.graph:
        if graph_name in data:
            create_frame_line_plot(test_id, data[graph_name], graph_name)

    return test_log
コード例 #3
0
    # load some object templates from configuration files
    sphere_template_id = obj_templates_mgr.load_configs(
        str(os.path.join(data_path, "test_assets/objects/sphere")))[0]

    # add an object to the scene
    id_1 = sim.add_object(sphere_template_id)
    sim.set_translation(np.array([2.50, 0, 0.2]), id_1)

    # simulate
    observations = simulate(sim, dt=1.5, get_frames=make_video)

    if make_video:
        vut.make_video(
            observations,
            "rgba_camera_1stperson",
            "color",
            output_path + "sim_basics",
            open_vid=show_video,
        )

    # [/basics]

    remove_all_objects(sim)
    # %%
    # [dynamic_control]

    observations = []
    obj_templates_mgr.load_configs(str(os.path.join(data_path, "objects")))
    # search for an object template by key sub-string
    cheezit_template_handle = obj_templates_mgr.get_template_handles(
        "data/objects/cheezit")[0]
コード例 #4
0
                # NB: There are some cases where ||filter_end - end_pos|| > 0 when a
                # collision _didn't_ happen. One such case is going up stairs.  Instead,
                # we check to see if the the amount moved after the application of the filter
                # is _less_ than the amount moved before the application of the filter
                EPS = 1e-5
                collided = (dist_moved_after_filter + EPS) < dist_moved_before_filter

            # run any dynamics simulation
            sim.step_physics(time_step)

            # render observation
            observations.append(sim.get_sensor_observations())

    print("frames = " + str(len(observations)))
    # video rendering with embedded 1st person view
    if do_make_video:
        # use the vieo utility to render the observations
        vut.make_video(
            observations=observations,
            primary_obs="color_sensor",
            primary_obs_type="color",
            video_file=output_directory + "continuous_nav",
            fps=fps,
            open_vid=show_video,
        )

    sim.reset()

# [/embodied_agent_navmesh]
コード例 #5
0
vel_control.controlling_lin_vel = True
vel_control.controlling_ang_vel = True

# simulate forward only with damped angular velocity (reset angular velocity to 0 after each step)
observations += simulate(sim, dt=1.0, get_frames=make_video)

vel_control.angular_velocity = np.array([0.0, -1.25, 0])

# simulate forward and turn
observations += simulate(sim, dt=2.0, get_frames=make_video)

vel_control.controlling_ang_vel = False
vel_control.controlling_lin_vel = False

# simulate settling
observations += simulate(sim, dt=3.0, get_frames=make_video)

# remove the agent's body while preserving the SceneNode
sim.remove_object(id_1, False)

# video rendering with embedded 1st person view
if make_video:
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "4_robot_control_grass",
        open_vid=show_video
    )

# %%
コード例 #6
0
        # simulate for 4 seconds
        simulate(sim, dt=4, get_frames=True, data=data)

        for cur_id in cur_ids:
            vel_control = sim.get_object_velocity_control(cur_id)
            vel_control.controlling_ang_vel = True
            vel_control.angular_velocity = np.array([-1.56, 0, 0])

        # simulate for 4 seconds
        simulate(sim, dt=4, get_frames=True, data=data)
    return data


if __name__ == "__main__":
    # List of objects you want to execute the VHACD tests on.
    obj_paths = [
        "test_assets/objects/chair.glb",
        "test_assets/objects/donut.glb",
    ]
    # create and show the video
    observations = []
    for obj_path in obj_paths:
        observations += runVHACDSimulation(obj_path)["observations"]
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "VHACD_vid_1",
        open_vid=True,
    )
コード例 #7
0
            "dims": overlay_dims,
            "pos": (10, 30 + overlay_dims[1]),
            "border": 2,
        },
        {
            "obs": "semantic_sensor_1st_person",
            "type": "semantic",
            "dims": overlay_dims,
            "pos": (10, 50 + overlay_dims[1] * 2),
            "border": 2,
        },
    ]
    print("overlay_settings = " + str(overlay_settings))

    vut.make_video(
        observations=observations,
        # primary_obs="color_sensor_3rd_person",
        primary_obs="color_sensor_1st_person",
        primary_obs_type="color",
        video_file=output_path + video_prefix,
        fps=int(1.0 / time_step),
        # open_vid=show_video,
        open_vid=False,
        overlay_settings=overlay_settings,
        depth_clip=10.0,
    )

# remove locobot while leaving the agent node for later use
sim.remove_object(locobot_id, delete_object_node=False)
remove_all_objects(sim)
コード例 #8
0
    sim,
    duration=2.0,
    agent_vel=np.array([0.4, 0.0, 0.0]),
    look_rotation_vel=-10.0,
    get_frames=make_video,
)

# %% [markdown]
# ## End the episode. Render the episode observations to a video.
# %%

if make_video:
    vut.make_video(
        observations,
        "rgba_camera",
        "color",
        output_path + "episode",
        open_vid=show_video,
    )

# %% [markdown]
# ## Write a replay to file and do some cleanup.
# gfx replay files are written as JSON.
# %%

sim.gfx_replay_manager.write_saved_keyframes_to_file(replay_filepath)

remove_all_objects(sim)

# %% [markdown]
# ## Reconfigure simulator for replay playback.
コード例 #9
0
sphere_template.half_length = 100
# add an object to the scene
id_1 = sim.add_object(sphere_template.ID)
sim.set_translation(np.array([-3.914907932281494, 1.7, -1.7014040946960449]), id_1)
def place_agent(sim):
    # place our agent in the scene
    agent_state = habitat_sim.AgentState()
    agent_state.position = [-3.914907932281494, 0.7, -0.7014040946960449]
    agent_state.rotation = np.quaternion(0, 0, 0, 0)
    agent = sim.initialize_agent(0, agent_state)
    return agent.scene_node.transformation_matrix()
place_agent(sim)
# simulate
observations = simulate(sim, dt=1.5, get_frames=make_video)
if make_video:
    vut.make_video(
        observations,
        "semantic_camera_1stperson",
        "semantic",
        output_path + "sim_basics_sem",
        open_vid=show_video,
    )
if make_video:
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "sim_basics_rgb",
        open_vid=show_video,
    )
# %%
コード例 #10
0
def test_fetch_robot_wrapper(fixed_base):
    # set this to output test results as video for easy investigation
    produce_debug_video = False
    observations = []
    cfg_settings = examples.settings.default_sim_settings.copy()
    cfg_settings["scene"] = "NONE"
    cfg_settings["enable_physics"] = True

    # loading the physical scene
    hab_cfg = examples.settings.make_cfg(cfg_settings)

    with habitat_sim.Simulator(hab_cfg) as sim:
        obj_template_mgr = sim.get_object_template_manager()
        rigid_obj_mgr = sim.get_rigid_object_manager()

        # setup the camera for debug video (looking at 0,0,0)
        sim.agents[0].scene_node.translation = [0.0, -1.0, 2.0]

        # add a ground plane
        cube_handle = obj_template_mgr.get_template_handles("cubeSolid")[0]
        cube_template_cpy = obj_template_mgr.get_template_by_handle(cube_handle)
        cube_template_cpy.scale = np.array([5.0, 0.2, 5.0])
        obj_template_mgr.register_template(cube_template_cpy)
        ground_plane = rigid_obj_mgr.add_object_by_template_handle(cube_handle)
        ground_plane.translation = [0.0, -0.2, 0.0]
        ground_plane.motion_type = habitat_sim.physics.MotionType.STATIC

        # compute a navmesh on the ground plane
        navmesh_settings = habitat_sim.NavMeshSettings()
        navmesh_settings.set_defaults()
        sim.recompute_navmesh(sim.pathfinder, navmesh_settings, True)
        sim.navmesh_visualization = True

        # add the robot to the world via the wrapper
        robot_path = "data/robots/hab_fetch/robots/hab_fetch.urdf"
        fetch = fetch_robot.FetchRobot(robot_path, sim, fixed_base=fixed_base)
        fetch.reconfigure()
        assert fetch.get_robot_sim_id() == 1  # 0 is the ground plane
        print(fetch.get_link_and_joint_names())
        observations += simulate(sim, 1.0, produce_debug_video)

        # retract the arm
        observations += fetch._interpolate_arm_control(
            [1.2299035787582397, 2.345386505126953],
            [fetch.params.arm_joints[1], fetch.params.arm_joints[3]],
            1,
            30,
            produce_debug_video,
        )

        # ready the arm
        observations += fetch._interpolate_arm_control(
            [-0.45, 0.1],
            [fetch.params.arm_joints[1], fetch.params.arm_joints[3]],
            1,
            30,
            produce_debug_video,
        )

        # setting arm motor positions
        fetch.arm_motor_pos = np.zeros(len(fetch.params.arm_joints))
        observations += simulate(sim, 1.0, produce_debug_video)

        # set base ground position from navmesh
        # NOTE: because the navmesh floats above the collision geometry we should see a pop/settle with dynamics and no fixed base
        target_base_pos = sim.pathfinder.snap_point(fetch.sim_obj.translation)
        fetch.base_pos = target_base_pos
        assert fetch.base_pos == target_base_pos
        observations += simulate(sim, 1.0, produce_debug_video)
        if fixed_base:
            assert np.allclose(fetch.base_pos, target_base_pos)
        else:
            assert not np.allclose(fetch.base_pos, target_base_pos)

        # arm joint queries and setters
        print(f" Arm joint velocities = {fetch.arm_velocity}")
        fetch.arm_joint_pos = np.ones(len(fetch.params.arm_joints))
        fetch.arm_motor_pos = np.ones(len(fetch.params.arm_joints))
        print(f" Arm joint positions (should be ones) = {fetch.arm_joint_pos}")
        print(f" Arm joint limits = {fetch.arm_joint_limits}")
        fetch.arm_motor_pos = fetch.arm_motor_pos
        observations += simulate(sim, 1.0, produce_debug_video)

        # test gripper state
        fetch.open_gripper()
        observations += simulate(sim, 1.0, produce_debug_video)
        assert fetch.is_gripper_open
        assert not fetch.is_gripper_closed
        fetch.close_gripper()
        observations += simulate(sim, 1.0, produce_debug_video)
        assert fetch.is_gripper_closed
        assert not fetch.is_gripper_open

        # halfway open
        fetch.set_gripper_target_state(0.5)
        observations += simulate(sim, 0.5, produce_debug_video)
        assert not fetch.is_gripper_open
        assert not fetch.is_gripper_closed

        # kinematic open/close (checked before simulation)
        fetch.gripper_joint_pos = fetch.params.gripper_open_state
        assert np.allclose(fetch.gripper_joint_pos, fetch.params.gripper_open_state)
        assert fetch.is_gripper_open
        observations += simulate(sim, 0.2, produce_debug_video)
        fetch.gripper_joint_pos = fetch.params.gripper_closed_state
        assert fetch.is_gripper_closed
        observations += simulate(sim, 0.2, produce_debug_video)

        # end effector queries
        print(f" End effector link id = {fetch.ee_link_id}")
        print(f" End effector local offset = {fetch.ee_local_offset}")
        print(f" End effector transform = {fetch.ee_transform}")
        print(
            f" End effector translation (at current state) = {fetch.calculate_ee_forward_kinematics(fetch.sim_obj.joint_positions)}"
        )
        invalid_ef_target = np.array([100.0, 200.0, 300.0])
        print(
            f" Clip end effector target ({invalid_ef_target}) to reach = {fetch.clip_ee_to_workspace(invalid_ef_target)}"
        )

        # produce some test debug video
        if produce_debug_video:
            from habitat_sim.utils import viz_utils as vut

            vut.make_video(
                observations,
                "color_sensor",
                "color",
                "test_fetch_robot_wrapper__fixed_base=" + str(fixed_base),
                open_vid=True,
            )
コード例 #11
0
                "border": 2,
            },
            {
                "obs": "depth_camera_1stperson",
                "type": "depth",
                "dims": overlay_dims,
                "pos": (10, 30 + overlay_dims[1]),
                "border": 2,
            },
        ]

        vut.make_video(
            observations=observations,
            primary_obs="rgba_camera_3rdperson",
            primary_obs_type="color",
            video_file=output_path + video_prefix,
            fps=60,
            open_vid=show_video,
            overlay_settings=overlay_settings,
            depth_clip=10.0,
        )
        vut.make_video(
            observations=observations,
            primary_obs="semantic_camera_1stperson",
            primary_obs_type="semantic",
            video_file=output_path + video_prefix + "semantic",
            fps=60,
            open_vid=show_video,
            overlay_settings=overlay_settings,
            depth_clip=10.0,
        )
コード例 #12
0
# %% [markdown]
# ## Load the Select Scene and Simulate!
# This cell will load the scene selected above, simulate, and produce a visualization.

# %%
global selected_scene
if sim_settings["scene"] != selected_scene:
    sim_settings["scene"] = selected_scene
    make_simulator_from_settings(sim_settings)

observations = []
start_time = sim.get_world_time()
while sim.get_world_time() < start_time + 4.0:
    sim.agents[0].scene_node.rotate(mn.Rad(mn.math.pi_half / 60.0),
                                    mn.Vector3(0, 1, 0))
    sim.step_physics(1.0 / 60.0)
    if make_video:
        observations.append(sim.get_sensor_observations())

# video rendering of carousel view
video_prefix = "ReplicaCAD_scene_view"
if make_video:
    vut.make_video(
        observations,
        "color_sensor_1st_person",
        "color",
        output_path + video_prefix,
        open_vid=show_video,
        video_dims=[1280, 720],
    )
コード例 #13
0
#habitat_sim.physics.MotionType.KINEMATIC/STATIC/DYNAMIC

# drop some dynamic objects
id_2 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.64, 0.28]), id_2)
sim.set_object_motion_type(habitat_sim.physics.MotionType.KINEMATIC, id_2)

id_3 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.64, -0.28]), id_3)
sim.set_object_motion_type(habitat_sim.physics.MotionType.DYNAMIC, id_3)
id_4 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4, -0.3, 0]), id_4)

id_5 = sim.add_object_by_handle(iconsphere_template_handle)
sim.set_translation(np.array([2.4,-0.10,-0.28]), id_5)

# simulate
observations = simulate(sim, dt=2, get_frames=True)

if make_video:
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "2_kinematic_interactions_3STATIC",
        open_vid=show_video
    )

# %%

コード例 #14
0
observations = simulate(sim, dt=1.5, get_frames=make_video)
# %%
print(observations[0]['depth_camera_1stperson'].shape)
# %%
"""
for img in observations:
    #90 images/frames 
    for img_type in img:
        #dict: depth,rgb,semantic
            #each value of key is numpy.ndarray = [544,720]
 """       
if make_video:
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "1_sim_basics",
        open_vid=show_video,
    )


# %%
"""Forces and torques can be applied to the object"""
remove_all_objects(sim)
observations = []
#add sphere template
sphere_template= prim_templates_mgr.get_default_UVsphere_template(is_wireframe = False)
sphere_template_handle = sphere_template.handle

#add 5 cubes 
cheezit_template = prim_templates_mgr.get_default_cylinder_template(is_wireframe = False)
コード例 #15
0
while sim.get_world_time() < start_time + dt:
    # manually control the object's kinematic state
    sim.set_translation(sim.get_translation(id_1) + np.array([0, 0, 0.01]), id_1)
    sim.set_rotation(
        mn.Quaternion.rotation(mn.Rad(0.025), np.array([0, 0, -1.0]))
        * sim.get_rotation(id_1),
        id_1,
    )
    sim.step_physics(1.0 / 60.0)
    observations.append(sim.get_sensor_observations())

if make_video:
    vut.make_video(
        observations,
        "semantic_camera_1stperson",
        "semantic",
        output_path + "3_kinematic_update_z",
        open_vid=show_video,
    )
# %%
# get object VelocityControl structure and setup control
vel_control = sim.get_object_velocity_control(id_1)
vel_control.linear_velocity = np.array([0, 0, -1.0])
vel_control.angular_velocity = np.array([4.0, 0, 0])
vel_control.controlling_lin_vel = True
vel_control.controlling_ang_vel = True

observations = simulate(sim, dt=2.0, get_frames=True)

# reverse linear direction
vel_control.linear_velocity = np.array([0, 0, 1.0])
コード例 #16
0
# %%
"""——————————————————————Simulation——————————————————————"""
start_time = sim.get_world_time()
dt = 10
while sim.get_world_time() < start_time + dt:
    #forces/torques
    for box_id in box_ids:
        sim.apply_force(anti_grav_force, np.array([0, 0.0, 0]), box_id)
        sim.apply_torque(np.array([0, 0.01, 0]), box_id)
    #Kinematic Velocity Control
    sim.set_translation(sim.get_translation(id_2) + np.array([0, 0, 0.01]), id_2)
    sim.set_rotation(
        mn.Quaternion.rotation(mn.Rad(0.0125), np.array([0, 0, -1.0]))
        * sim.get_rotation(id_2),
        id_2,
    )
    
    #
    sim.step_physics(1.0 / 60.0)
    observations.append(sim.get_sensor_observations())

if make_video:
    vut.make_video(
        observations,
        "rgba_camera_1stperson",
        "color",
        output_path + "mid-term_1_rgb",
        open_vid=show_video,
    )

# %%