Exemple #1
0
def main():
    config = parse_config('../configs/jr2_reaching.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)
    #scene = BuildingScene('Rs',
    #                      build_graph=True,
    #                      pybullet_load_texture=True)
    scene = StadiumScene()
    s.import_scene(scene)
    jr = JR2_Kinova_Head(config)
    #turtlebot = Turtlebot(config)
    s.import_robot(jr)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    for i in range(10000):
        with Profiler('Simulator step'):
            jr.apply_action([0, 0, 0.1, 0.1, 0, 0, 0, 0, 0])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
def main():
    config_filename = os.path.join(os.path.dirname(gibson2.__file__),
                                   '../examples/configs/hand_drawer.yaml')
    env = HandDrawerEnv(config_file=config_filename, mode='gui')
    for j in range(1000):
        env.reset()
        for i in range(100):
            with Profiler('Environment action step'):
                action = env.action_space.sample()
                action[0:24] = 0
                action[-7] = 0.05
                action[-6] = 1
                action[-5] = 1.05
                action[-4:] = p.getQuaternionFromEuler([-np.pi / 2, 0, np.pi])
                state, reward, done, info = env.step(action)
                # env.save_seg_image_external('seg/'+str(j*100+i)+'.jpg')
                # env.save_rgb_image('rgb/'+str(j*100+i)+'.jpg')
                # env.save_depth_image_external('depth/'+str(j*100+i)+'.jpg')
                # env.save_normal_image('normal/'+str(j*100+i)+'.jpg')
                # env.save_external_image('external/'+str(j*100+i)+'.jpg')

                if done:
                    logging.info(
                        "Episode finished after {} timesteps".format(i + 1))
                    break
Exemple #3
0
def main():
    if len(sys.argv) > 1:
        model_path = sys.argv[1]
    else:
        model_path = os.path.join(get_model_path('Rs'), 'mesh_z_up.obj')

    renderer = MeshRendererG2G(width=512, height=512, device_idx=0)
    renderer.load_object(model_path)
    renderer.add_instance(0)

    print(renderer.visual_objects, renderer.instances)
    print(renderer.materials_mapping, renderer.mesh_materials)
    camera_pose = np.array([0, 0, 1.2])
    view_direction = np.array([1, 0, 0])
    renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
    renderer.set_fov(90)
    for i in range(3000):
        with Profiler('Render'):
            frame = renderer.render(modes=('rgb', 'normal', '3d'))

    print(frame)
    img_np = frame[0].flip(0).data.cpu().numpy().reshape(
        renderer.height, renderer.width, 4)
    normal_np = frame[1].flip(0).data.cpu().numpy().reshape(
        renderer.height, renderer.width, 4)
    plt.imshow(np.concatenate([img_np, normal_np], axis=1))
    plt.show()
Exemple #4
0
def main():

    config = parse_config('../configs/jr2_reaching.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)

    scene = StadiumScene()
    s.import_scene(scene)

    jr = JR2_Kinova(config)
    s.import_robot(jr)

    marker = VisualMarker(visual_shape=p.GEOM_SPHERE,
                          rgba_color=[0, 0, 1, 0.3],
                          radius=0.5)
    s.import_object(marker)
    marker.set_position([0, 4, 1])

    for i in range(10000):
        with Profiler('Simulator step'):

            jr.apply_action([0, 0, 0, 0, 0, 0, 0])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
Exemple #5
0
def main():
    config_filename = os.path.join(os.path.dirname(gibson2.__file__),
                                   '../examples/configs/turtlebot_demo.yaml')
    nav_env = NavigateRandomEnv(config_file=config_filename, mode='gui')
    for j in range(10):
        nav_env.reset()
        for i in range(100):
            with Profiler('Env action step'):
                action = nav_env.action_space.sample()
                state, reward, done, info = nav_env.step(action)
                if done:
                    print("Episode finished after {} timesteps".format(i + 1))
                    break
Exemple #6
0
def main():
    config_filename = os.path.join(os.path.dirname(gibson2.__file__),
                                   '../examples/configs/master_config.yaml')
    nav_env = NavigateRandomEnv(config_file=config_filename, mode='gui')
    for j in range(100):
        nav_env.reset()
        for i in range(1000):
            with Profiler('Env action step'):
                action = nav_env.action_space.sample()
                action = [1, 1, 0, 0, 0, 0, 0]
                state, reward, done, info = nav_env.step(action)
                print(state['close_to_goal'])
                #nav_env.set_camera([np.random.choice([0,1,2,3,4,5,6,7,8])])
                if done:
                    print("Episode finished after {} timesteps".format(i + 1))
                    break
Exemple #7
0
def main():
    config_filename = os.path.join(os.path.dirname(gibson2.__file__),
                                   '../examples/configs/jr2_reaching.yaml')
    nav_env = NavigateRandomHeightEnv(config_file=config_filename,
                                      mode='pbgui',
                                      random_height=True)
    for j in range(10):
        nav_env.reset()
        for i in range(100):
            with Profiler('Environment action step'):
                action = nav_env.action_space.sample()
                action = [0, 0, 1, 1, 1, 1, 1]
                state, reward, done, info = nav_env.step(action)
                if done:
                    logging.info(
                        "Episode finished after {} timesteps".format(i + 1))
                    break
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    s = Simulator(mode='gui', image_width=512, image_height=512)
    scene = BuildingScene('Rs',
                          build_graph=True,
                          pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        obj.load()
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3), [0,0,0,1])

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1,0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))

    s.disconnect()
Exemple #9
0
import yaml
from gibson2.core.physics.robot_locomotors import Turtlebot
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import BuildingScene, StadiumScene
from gibson2.utils.utils import parse_config
import pytest
import pybullet as p
import numpy as np
from gibson2.core.render.profiler import Profiler

config = parse_config('../configs/turtlebot_p2p_nav.yaml')

s = Simulator(mode='gui', resolution=512, render_to_tensor=False)
scene = BuildingScene('Bolton')
s.import_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)

p.resetBasePositionAndOrientation(scene.ground_plane_mjcf[0],
                                  posObj=[0, 0, -10],
                                  ornObj=[0, 0, 0, 1])

for i in range(2000):
    with Profiler('simulator step'):
        turtlebot.apply_action([0.1, 0.1])
        s.step()
        rgb = s.renderer.render_robot_cameras(modes=('rgb'))

s.disconnect()
Exemple #10
0
                dy = (y - _mouse_iy) / 100.0
                _mouse_ix = x
                _mouse_iy = y
                r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0],
                               [-np.sin(dy), 0, np.cos(dy)]])
                r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0],
                               [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
                view_direction = r1.dot(r2).dot(view_direction)
        elif event == cv2.EVENT_LBUTTONUP:
            down = False

    cv2.namedWindow('test')
    cv2.setMouseCallback('test', change_dir)

    while True:
        with Profiler('Render'):
            frame = renderer.render(modes=('rgb', 'normal', '3d'))
        cv2.imshow(
            'test',
            cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
        q = cv2.waitKey(1)
        if q == ord('w'):
            px += 0.05
        elif q == ord('s'):
            px -= 0.05
        elif q == ord('a'):
            py += 0.05
        elif q == ord('d'):
            py -= 0.05
        elif q == ord('q'):
            break
Exemple #11
0
def play(env, transpose=True, zoom=None, callback=None, keys_to_action=None):
    """Allows one to play the game using keyboard.

    To simply play the game use:

        play(gym.make("Pong-v3"))
        play(env)

    Above code works also if env is wrapped, so it's particularly useful in
    verifying that the frame-level preprocessing does not render the game
    unplayable.

    If you wish to plot real time statistics as you play, you can use
    gym.utils.play.PlayPlot. Here's a sample code for plotting the reward
    for last 5 second of gameplay.

        def callback(obs_t, obs_tp1, rew, done, info):
            return [rew,]
        env_plotter = EnvPlotter(callback, 30 * 5, ["reward"])

        env = gym.make("Pong-v3")
        play(env, callback=env_plotter.callback)


    Arguments
    ---------
    env: gym.Env
        Environment to use for playing.
    transpose: bool
        If True the output of observation is transposed.
        Defaults to true.
    zoom: float
        Make screen edge this many times bigger
    callback: lambda or None
        Callback if a callback is provided it will be executed after
        every step. It takes the following input:
            obs_t: observation before performing action
            obs_tp1: observation after performing action
            action: action that was executed
            rew: reward that was received
            done: whether the environemnt is done or not
            info: debug info
    keys_to_action: dict: tuple(int) -> int or None
        Mapping from keys pressed to action performed.
        For example if pressed 'w' and space at the same time is supposed
        to trigger action number 2 then key_to_action dict would look like this:

            {
                # ...
                sorted(ord('w'), ord(' ')) -> 2
                # ...
            }
        If None, default key_to_action mapping for that env is used, if provided.
    """

    obs_s = env.observation_space
    #assert type(obs_s) == gym.spaces.box.Box
    #assert len(obs_s.shape) == 2 or (len(obs_s.shape) == 3 and obs_s.shape[2] in [1,3])

    if keys_to_action is None:
        if hasattr(env, 'get_keys_to_action'):
            keys_to_action = env.get_keys_to_action()
        elif hasattr(env.unwrapped, 'get_keys_to_action'):
            keys_to_action = env.unwrapped.get_keys_to_action()
    relevant_keys = set(sum(map(list, keys_to_action.keys()), []))

    pressed_keys = []
    running = True
    env_done = True

    record_num = 0
    record_total = 0
    obs = env.reset()
    do_restart = False
    last_keys = []  ## Prevent overacting
    while running:
        if do_restart:
            do_restart = False
            env.reset()
            pressed_keys = []
            continue
        if len(pressed_keys) == 0:
            action = keys_to_action[()]
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                record_total += time.time() - start
                record_num += 1
            #print(info['sensor'])
            print("Play mode: reward %f" % rew)
        for p_key in pressed_keys:
            action = keys_to_action[(p_key, )]
            prev_obs = obs
            with Profiler("Play Env: step"):
                start = time.time()
                obs, rew, env_done, info = env.step(action)
                record_total += time.time() - start
                record_num += 1
            print("Play mode: reward %f" % rew)
        if callback is not None:
            callback(prev_obs, obs, action, rew, env_done, info)
        # process pygame events
        key_codes = env.get_key_pressed(relevant_keys)
        #print("Key codes", key_codes)
        pressed_keys = []

        for key in key_codes:
            if key == ord('r') and key not in last_keys:
                do_restart = True
            if key == ord('j') and key not in last_keys:
                env.robot.turn_left()
            if key == ord('l') and key not in last_keys:
                env.robot.turn_right()
            if key == ord('i') and key not in last_keys:
                env.robot.move_forward()
            if key == ord('k') and key not in last_keys:
                env.robot.move_backward()
            if key not in relevant_keys:
                continue
            pressed_keys.append(key)

        last_keys = key_codes
Exemple #12
0
def main():
    global _mouse_ix, _mouse_iy, down, view_direction

    if len(sys.argv) > 1:
        model_path = sys.argv[1]
    else:
        model_path = os.path.join(get_model_path('Rs'), 'mesh_z_up.obj')

    renderer = MeshRenderer(width=512, height=512)
    renderer.load_object(model_path)
    renderer.add_instance(0)

    print(renderer.visual_objects, renderer.instances)
    print(renderer.materials_mapping, renderer.mesh_materials)
    camera_pose = np.array([0, 0, 1.2])
    view_direction = np.array([1, 0, 0])
    renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
    renderer.set_fov(90)

    px = 0
    py = 0

    _mouse_ix, _mouse_iy = -1, -1
    down = False

    def change_dir(event, x, y, flags, param):
        global _mouse_ix, _mouse_iy, down, view_direction
        if event == cv2.EVENT_LBUTTONDOWN:
            _mouse_ix, _mouse_iy = x, y
            down = True
        if event == cv2.EVENT_MOUSEMOVE:
            if down:
                dx = (x - _mouse_ix) / 100.0
                dy = (y - _mouse_iy) / 100.0
                _mouse_ix = x
                _mouse_iy = y
                r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0],
                               [-np.sin(dy), 0, np.cos(dy)]])
                r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0],
                               [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
                view_direction = r1.dot(r2).dot(view_direction)
        elif event == cv2.EVENT_LBUTTONUP:
            down = False

    cv2.namedWindow('test')
    cv2.setMouseCallback('test', change_dir)

    while True:
        with Profiler('Render'):
            frame = renderer.render(modes=('rgb', 'normal', '3d'))
        cv2.imshow(
            'test',
            cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
        q = cv2.waitKey(1)
        if q == ord('w'):
            px += 0.05
        elif q == ord('s'):
            px -= 0.05
        elif q == ord('a'):
            py += 0.05
        elif q == ord('d'):
            py -= 0.05
        elif q == ord('q'):
            break
        camera_pose = np.array([px, py, 1.2])
        renderer.set_camera(camera_pose, camera_pose + view_direction,
                            [0, 0, 1])

    renderer.release()
Exemple #13
0
def main():
    config = parse_config('../configs/fetch_p2p_nav.yaml')
    s = Simulator(mode='gui', timestep=1 / 240.0)
    scene = EmptyScene()
    s.import_scene(scene)
    fetch = Fetch(config)
    s.import_robot(fetch)

    robot_id = fetch.robot_ids[0]

    arm_joints = joints_from_names(robot_id, [
        'torso_lift_joint', 'shoulder_pan_joint', 'shoulder_lift_joint',
        'upperarm_roll_joint', 'elbow_flex_joint', 'forearm_roll_joint',
        'wrist_flex_joint', 'wrist_roll_joint'
    ])
    #finger_joints = joints_from_names(robot_id, ['l_gripper_finger_joint', 'r_gripper_finger_joint'])
    fetch.robot_body.reset_position([0, 0, 0])
    fetch.robot_body.reset_orientation([0, 0, 1, 0])
    x, y, z = fetch.get_end_effector_position()
    #set_joint_positions(robot_id, finger_joints, [0.04,0.04])
    print(x, y, z)

    visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius=0.02)
    marker = p.createMultiBody(baseVisualShapeIndex=visual_marker)

    #max_limits = [0,0] + get_max_limits(robot_id, arm_joints) + [0.05,0.05]
    #min_limits = [0,0] + get_min_limits(robot_id, arm_joints) + [0,0]
    #rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints)) + [0.04,0.04]
    max_limits = [0, 0] + get_max_limits(robot_id, arm_joints)
    min_limits = [0, 0] + get_min_limits(robot_id, arm_joints)
    rest_position = [0, 0] + list(get_joint_positions(robot_id, arm_joints))
    joint_range = list(np.array(max_limits) - np.array(min_limits))
    joint_range = [item + 1 for item in joint_range]
    jd = [0.1 for item in joint_range]
    print(max_limits)
    print(min_limits)

    def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos,
                                           threshold, maxIter):
        sample_fn = get_sample_fn(robotid, arm_joints)
        set_joint_positions(robotid, arm_joints, sample_fn())
        it = 0
        while it < maxIter:
            jointPoses = p.calculateInverseKinematics(robotid,
                                                      endEffectorId,
                                                      targetPos,
                                                      lowerLimits=min_limits,
                                                      upperLimits=max_limits,
                                                      jointRanges=joint_range,
                                                      restPoses=rest_position,
                                                      jointDamping=jd)
            set_joint_positions(robotid, arm_joints, jointPoses[2:10])
            ls = p.getLinkState(robotid, endEffectorId)
            newPos = ls[4]

            dist = np.linalg.norm(np.array(targetPos) - np.array(newPos))
            if dist < threshold:
                break

            it += 1

        print("Num iter: " + str(it) + ", threshold: " + str(dist))
        return jointPoses

    while True:
        with Profiler("Simulation step"):
            fetch.robot_body.reset_position([0, 0, 0])
            fetch.robot_body.reset_orientation([0, 0, 1, 0])
            threshold = 0.01
            maxIter = 100
            joint_pos = accurateCalculateInverseKinematics(
                robot_id, fetch.parts['gripper_link'].body_part_index,
                [x, y, z], threshold, maxIter)[2:10]

            #set_joint_positions(robot_id, finger_joints, [0.04, 0.04])
            s.step()
            keys = p.getKeyboardEvents()
            for k, v in keys.items():
                if (k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN)):
                    x += 0.01
                if (k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN)):
                    x -= 0.01
                if (k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN)):
                    y += 0.01
                if (k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN)):
                    y -= 0.01
                if (k == ord('z') and (v & p.KEY_IS_DOWN)):
                    z += 0.01
                if (k == ord('x') and (v & p.KEY_IS_DOWN)):
                    z -= 0.01
            p.resetBasePositionAndOrientation(marker, [x, y, z], [0, 0, 0, 1])

    s.disconnect()
def main():
    global _mouse_ix, _mouse_iy, down, view_direction

    ###################################################################################
    # load the scene to get traversable area (disable the whole area for performance) #
    ###################################################################################
    p.connect(p.GUI)
    p.setGravity(0, 0, -9.8)
    p.setTimeStep(1. / 240.)

    scene = BuildingScene('Allensville',
                          build_graph=True,
                          pybullet_load_texture=True)
    objects = scene.load()

    random_floor = scene.get_random_floor()
    p1 = scene.get_random_point_floor(random_floor)[1]
    p2 = scene.get_random_point_floor(random_floor)[1]
    shortest_path, geodesic_distance = scene.get_shortest_path(
        random_floor, p1[:2], p2[:2], entire_path=True)
    print('random point 1:', p1)
    print('random point 2:', p2)
    print('geodesic distance between p1 and p2', geodesic_distance)
    print('shortest path from p1 to p2:', shortest_path)

    p.disconnect()
    ###################################################################################

    if len(sys.argv) > 1:
        model_path = sys.argv[1]
    else:
        model_path = os.path.join(get_model_path('Allensville'),
                                  'mesh_z_up.obj')

    # set width, height (has to be equal for panorama)
    renderer = MeshRenderer(width=512, height=512)
    renderer.load_object(model_path)
    renderer.add_instance(0)

    print(renderer.visual_objects, renderer.instances)
    print(renderer.materials_mapping, renderer.mesh_materials)

    # set camera_pose / view direction
    camera_pose = np.array([0, 0, 1.2])
    view_direction = np.array([1, 0, 0])
    renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
    renderer.set_fov(90)

    px = 0
    py = 0

    _mouse_ix, _mouse_iy = -1, -1
    down = False

    def change_dir(event, x, y, flags, param):
        global _mouse_ix, _mouse_iy, down, view_direction
        if event == cv2.EVENT_LBUTTONDOWN:
            _mouse_ix, _mouse_iy = x, y
            down = True
        if event == cv2.EVENT_MOUSEMOVE:
            if down:
                dx = (x - _mouse_ix) / 100.0
                dy = (y - _mouse_iy) / 100.0
                _mouse_ix = x
                _mouse_iy = y
                r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0],
                               [-np.sin(dy), 0, np.cos(dy)]])
                r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0],
                               [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
                view_direction = r1.dot(r2).dot(view_direction)
        elif event == cv2.EVENT_LBUTTONUP:
            down = False

    cv2.namedWindow('test')
    cv2.setMouseCallback('test', change_dir)

    while True:
        with Profiler('Render'):
            frame = renderer.render(modes=('rgb', 'normal', '3d'))
        cv2.imshow(
            'test',
            cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
        q = cv2.waitKey(1)
        if q == ord('w'):
            px += 0.05
        elif q == ord('s'):
            px -= 0.05
        elif q == ord('a'):
            py += 0.05
        elif q == ord('d'):
            py -= 0.05
        elif q == ord('q'):
            break
        camera_pose = np.array([px, py, 1.2])
        renderer.set_camera(camera_pose, camera_pose + view_direction,
                            [0, 0, 1])

    renderer.release()
Exemple #15
0
obj.set_position([-2, 0, 0.5])
obj = InteractiveObj(
    filename='/data4/mdv0/cabinet/0007/part_objs/cabinet_0007.urdf')
s.import_interactive_object(obj)
obj.set_position([-2, 2, 0.5])
obj = InteractiveObj(
    filename='/data4/mdv0/cabinet/0004/part_objs/cabinet_0004.urdf')
s.import_interactive_object(obj)
obj.set_position([-2.1, 1.6, 2])
obj = InteractiveObj(
    filename='/data4/mdv0/cabinet/0004/part_objs/cabinet_0004.urdf')
s.import_interactive_object(obj)
obj.set_position([-2.1, 0.4, 2])
obj = BoxShape([-2.05, 1, 0.5], [0.35, 0.6, 0.5])
s.import_interactive_object(obj)
obj = BoxShape([-2.45, 1, 1.5], [0.01, 2, 1.5])
s.import_interactive_object(obj)
p.createConstraint(0, -1, obj.body_id, -1, p.JOINT_FIXED, [0, 0, 1],
                   [-2.55, 1, 1.5], [0, 0, 0])
obj = YCBObject('003_cracker_box')
s.import_object(obj)
p.resetBasePositionAndOrientation(obj.body_id, [-2, 1, 1.2], [0, 0, 0, 1])
obj = YCBObject('003_cracker_box')
s.import_object(obj)
p.resetBasePositionAndOrientation(obj.body_id, [-2, 2, 1.2], [0, 0, 0, 1])

for i in range(1000):
    with Profiler("Simulation: step"):
        s.step()

s.disconnect()