예제 #1
0
    def __init__(self):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        scene = StaticIndoorScene('Rs')
        self.s.import_scene(scene)
        #self.s.import_ig_scene(scene)
        self.robot = Turtlebot(config)
        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
예제 #2
0
def test_tensor_render_rendering():
    w = 800
    h = 600
    setting = MeshRendererSettings(enable_pbr=False, msaa=True)
    renderer = MeshRendererG2G(w, h, rendering_settings=setting)
    test_dir = os.path.join(gibson2.assets_path, 'test')
    renderer.load_object(os.path.join(test_dir, 'mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj'))
    renderer.add_instance(0)

    renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
    renderer.set_fov(90)
    tensor, tensor2 = renderer.render(modes=('rgb', 'normal'))

    img_np = tensor.flip(0).data.cpu().numpy().reshape(h, w, 4)
    img_np2 = tensor2.flip(0).data.cpu().numpy().reshape(h, w, 4)

    # plt.subplot(1,2,1)
    # plt.imshow(img_np)
    # plt.subplot(1,2,2)
    # plt.imshow(img_np2)
    # plt.show()
    assert (np.allclose(np.mean(img_np.astype(np.float32), axis=(0, 1)),
                       np.array([131.71548, 128.34981, 121.81708, 255.86292]), rtol=1e-3))

    # print(np.mean(img_np.astype(np.float32), axis = (0,1)))
    # print(np.mean(img_np2.astype(np.float32), axis = (0,1)))
    renderer.release()
예제 #3
0
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings(enable_shadow=False, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for _ in range(10):
        obj = YCBObject('003_cracker_box')
        s.import_object(obj)
        obj.set_position_orientation(np.random.uniform(low=0, high=2, size=3),
                                     [0, 0, 0, 1])

    print(s.renderer.instances)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, 0.1])
            s.step()
            rgb = s.renderer.render_robot_cameras(modes=('rgb'))
    s.disconnect()
예제 #4
0
def test_import_igsdf(scene_name, scene_source):
    hdr_texture = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')
    hdr_texture2 = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')

    if scene_source == "IG":
        scene_dir = get_ig_scene_path(scene_name)
    elif scene_source == "CUBICASA":
        scene_dir = get_cubicasa_scene_path(scene_name)
    else:
        scene_dir = get_3dfront_scene_path(scene_name)

    light_modulation_map_filename = os.path.join(
        scene_dir, 'layout', 'floor_lighttype_0.png')
    background_texture = os.path.join(
        gibson2.ig_dataset_path, 'scenes', 'background', 
        'urban_street_01.jpg')

    scene = InteractiveIndoorScene(
                    scene_name, 
                    texture_randomization=False, 
                    object_randomization=False,
                    scene_source=scene_source)

    settings = MeshRendererSettings(env_texture_filename=hdr_texture,
                                    env_texture_filename2=hdr_texture2,
                                    env_texture_filename3=background_texture,
                                    light_modulation_map_filename=light_modulation_map_filename,
                                    enable_shadow=True, msaa=True,
                                    light_dimming_factor=1.0)
    s = Simulator(mode='iggui', image_width=960,
                  image_height=720, device_idx=0, rendering_settings=settings)

    s.import_ig_scene(scene)
    fpss = []

    np.random.seed(0)
    _,(px,py,pz) = scene.get_random_point()
    s.viewer.px = px
    s.viewer.py = py
    s.viewer.pz = 1.7
    s.viewer.update()
    
    for i in range(3000):
        if i == 2500:
            logId = p.startStateLogging(loggingType=p.STATE_LOGGING_PROFILE_TIMINGS, fileName='trace_beechwood')
        start = time.time()
        s.step()
        end = time.time()
        print("Elapsed time: ", end - start)
        print("Frequency: ", 1 / (end - start))
        fpss.append(1 / (end - start))
    p.stopStateLogging(logId)
    s.disconnect()
    print("end")
    
    plt.plot(fpss)
    plt.show()
예제 #5
0
    def __init__(self,
                 config_file,
                 scene_id=None,
                 mode='headless',
                 action_timestep=1 / 10.0,
                 physics_timestep=1 / 240.0,
                 render_to_tensor=False,
                 device_idx=0):
        """
        :param config_file: config_file path
        :param scene_id: override scene_id in config file
        :param mode: headless or gui mode
        :param action_timestep: environment executes action per action_timestep second
        :param physics_timestep: physics timestep for pybullet
        :param device_idx: device_idx: which GPU to run the simulation and rendering on
        """
        self.config = parse_config(config_file)
        if scene_id is not None:
            self.config['scene_id'] = scene_id

        self.mode = mode
        self.action_timestep = action_timestep
        self.physics_timestep = physics_timestep
        self.texture_randomization_freq = self.config.get(
            'texture_randomization_freq', None)
        self.object_randomization_freq = self.config.get(
            'object_randomization_freq', None)
        self.object_randomization_idx = 0
        self.num_object_randomization_idx = 10

        enable_shadow = self.config.get('enable_shadow', False)
        enable_pbr = self.config.get('enable_pbr', True)
        texture_scale = self.config.get('texture_scale', 1.0)

        settings = MeshRendererSettings(enable_shadow=enable_shadow,
                                        enable_pbr=enable_pbr,
                                        msaa=False,
                                        texture_scale=texture_scale)

        self.simulator = Simulator(
            mode=mode,
            physics_timestep=physics_timestep,
            render_timestep=action_timestep,
            image_width=self.config.get('image_width', 128),
            image_height=self.config.get('image_height', 128),
            vertical_fov=self.config.get('vertical_fov', 90),
            device_idx=device_idx,
            render_to_tensor=render_to_tensor,
            rendering_settings=settings)
        self.load()
예제 #6
0
    def __init__(self, robot='turtlebot', scene='Rs_int'):
        config = parse_config('../../configs/turtlebot_demo.yaml')
        hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                   'background', 'probe_02.hdr')
        hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                    'background', 'probe_03.hdr')
        light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path,
                                                     'scenes', 'Rs_int',
                                                     'layout',
                                                     'floor_lighttype_0.png')
        background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                          'background', 'urban_street_01.jpg')

        scene = InteractiveIndoorScene(scene,
                                       texture_randomization=False,
                                       object_randomization=False)
        #scene._set_first_n_objects(5)
        scene.open_all_doors()

        settings = MeshRendererSettings(
            env_texture_filename=hdr_texture,
            env_texture_filename2=hdr_texture2,
            env_texture_filename3=background_texture,
            light_modulation_map_filename=light_modulation_map_filename,
            enable_shadow=True,
            msaa=True,
            light_dimming_factor=1.0,
            optimized=True)

        self.s = Simulator(mode='headless',
                           image_width=400,
                           image_height=400,
                           rendering_settings=settings)
        self.s.import_ig_scene(scene)

        if robot == 'turtlebot':
            self.robot = Turtlebot(config)
        else:
            self.robot = Fetch(config)

        self.s.import_robot(self.robot)

        for _ in range(5):
            obj = YCBObject('003_cracker_box')
            self.s.import_object(obj)
            obj.set_position_orientation(
                np.random.uniform(low=0, high=2, size=3), [0, 0, 0, 1])
        print(self.s.renderer.instances)
예제 #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--scene',
                        type=str,
                        help='Name of the scene in the iG Dataset')
    args = parser.parse_args()
    settings = MeshRendererSettings(enable_shadow=True, msaa=False)
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = iGSDFScene(args.scene)
    s.import_ig_scene(scene)

    for i in range(10000):
        with Profiler('Simulator step'):
            s.step()
    s.disconnect()
예제 #8
0
def test_render_pbr_optimized():
    hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background',
                               'quattro_canti_4k.hdr')
    model_path = os.path.join(get_ig_model_path('sink', 'sink_1'), 'shape',
                              'visual')
    settings = MeshRendererSettings(msaa=True,
                                    enable_shadow=True,
                                    env_texture_filename=hdr_texture,
                                    env_texture_filename3=hdr_texture,
                                    optimized=True)
    renderer = MeshRenderer(width=1024,
                            height=1024,
                            vertical_fov=90,
                            rendering_settings=settings)
    renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])
    i = 0

    for fn in os.listdir(model_path):
        if fn.endswith('obj'):
            renderer.load_object(os.path.join(model_path, fn), scale=[1, 1, 1])
            renderer.add_instance(i)
            i += 1
            renderer.instances[-1].use_pbr = True
            renderer.instances[-1].use_pbr_mapping = True

    renderer.set_camera([1.5, 1.5, 1.5], [0, 0, 0], [0, 0, 1], cache=True)
    frame = renderer.render(modes=('rgb', 'normal'))

    Image.fromarray((255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(
        np.uint8)).save('test_render_optimized.png')
    renderer.set_camera([1.49, 1.49, 1.49], [0, 0.05, 0.05], [0, 0, 1],
                        cache=True)  # simulate camera movement
    frame = renderer.render(modes=('optical_flow', 'scene_flow'))
    plt.subplot(1, 2, 1)
    plt.imshow(np.abs(frame[0][:, :, :3]) / np.max(np.abs(frame[0][:, :, :3])))
    plt.subplot(1, 2, 2)
    plt.imshow(np.abs(frame[1][:, :, :3]) / np.max(np.abs(frame[1][:, :, :3])))
    plt.savefig('test_render_optimized_flow.png')

    renderer.release()
def main():
    config = parse_config('../configs/turtlebot_demo.yaml')
    settings = MeshRendererSettings()
    s = Simulator(mode='gui',
                  image_width=256,
                  image_height=256,
                  rendering_settings=settings)

    scene = StaticIndoorScene('Rs',
                              build_graph=True,
                              pybullet_load_texture=True)
    s.import_scene(scene)
    turtlebot = Turtlebot(config)
    s.import_robot(turtlebot)

    for i in range(10000):
        with Profiler('Simulator step'):
            turtlebot.apply_action([0.1, -0.1])
            s.step()
            lidar = s.renderer.get_lidar_all()
            print(lidar.shape)
            # TODO: visualize lidar scan

    s.disconnect()
def main():
    config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml')) #robot configuration file path
    settings = MeshRendererSettings() #generating renderer settings object
    
    #generating simulator object
    s = Simulator(mode='headless', #simulating without gui 
    #s = Simulator(mode='gui', #simulating with gui
                  image_width=64, #robot camera pixel width
                  image_height=48, #robot camera pixel height
                  vertical_fov=75, #robot camera view angle (from floor to ceiling 40 degrees)
                  rendering_settings=settings)
    
    #generating scene object
    scene = InteractiveIndoorScene('Benevolence_1_int', #scene name: Benevolence, floor number: 1, does it include interactive objects: yes (int). I pick Benevolence on purpose as it is the most memory friendly scene in the iGibson dataset. 
                              build_graph=True, #builds the connectivity graph over the given facedown traversibility map (floor plan)
                              waypoint_resolution=0.1, #radial distance between 2 consecutive waypoints (10 cm)
                              trav_map_resolution=0.1,
                              trav_map_erosion=2,
                              should_open_all_doors=True,
                              pybullet_load_texture=True) #do you want to include texture and material properties? (you need this for object interaction)

    s.import_ig_scene(scene) #loading the scene object in the simulator object
    turtlebot = Turtlebot(config) #generating the robot object
    s.import_robot(turtlebot) #loading the robot object in the simulator object
    init_pos = turtlebot.get_position() #getting the initial position of the robot base [X:meters, Y:meters, Z:meters] (base: robot's main body. it may have links and the associated joints too. links and joints have positions and orientations as well.)
    init_or = turtlebot.get_rpy() #getting the initial Euler angles of the robot base [Roll: radians, Pitch: radians, Yaw: radians]

    #sampling random goal states in a desired room of the apartment
    np.random.seed(0)

    encoder = keras.models.load_model('./CVAE_encoder')
    decoder = keras.models.load_model('./CVAE_decoder')

    for j in range(30):

        goal1 = scene.get_random_point_by_room_type('living_room')[1] #sampling random points in the living room
        goal2 = scene.get_random_point_by_room_type('living_room')[1]    
        goal3 = scene.get_random_point_by_room_type('living_room')[1]    
        goal4 = init_pos
    
        path1 = scene.get_shortest_path(0,init_pos[0:2],goal1[0:2],entire_path=True)[0] #generate the "entire" a* path between the initial, sub-goal, and terminal goal nodes
        path2 = scene.get_shortest_path(0,goal1[0:2],goal2[0:2],entire_path=True)[0] 
        path3 = scene.get_shortest_path(0,goal2[0:2],goal3[0:2],entire_path=True)[0] 
        path4 = scene.get_shortest_path(0,goal3[0:2],goal4[0:2],entire_path=True)[0] 
    
        rnd_path = np.append(path1,path2[1:],axis=0)
        rnd_path = np.append(rnd_path,path3[1:],axis=0)
        rnd_path = np.append(rnd_path,path4[1:],axis=0)

        #fitting a bezier curve through the a* waypoints and sampling 2000 points from that curve
        path_nodes = np.asfortranarray([rnd_path[:,0].tolist(),rnd_path[:,1].tolist()])
        smt_crv = bezier.Curve.from_nodes(path_nodes)
        s_vals = np.linspace(0,1,2000)
        smt_path = smt_crv.evaluate_multi(s_vals)
        
        #correcting the initial orientation of the robot
        delta_pos = smt_path[:,1] - smt_path[:,0] #direction vector between 2 consecutive waypoints
        delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
        delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
        turtlebot.set_position_orientation([smt_path[0,0],smt_path[1,0],init_pos[2]], delta_qua) #setting the robot base position and the orientation

        episode_path = os.getcwd() + '/episodes/episode_' + str(j).zfill(2) #path of the new episode folder
        #os.makedirs(episode_path) #creating a new folder for the episode
     
        gtrth_pth_str = episode_path + '/episode_' + str(j).zfill(2) + 'ground_truth_path.csv' #printing the ground truth path out
        #np.savetxt(gtrth_pth_str, np.atleast_2d(smt_path).T, delimiter=",")
       
        for i in range(len(smt_path[0])-1):
            with Profiler('Simulator step'): #iGibson simulation loop requieres this context manager
            
                rgb_camera = np.array(s.renderer.render_robot_cameras(modes='rgb')) #probing RGB data, you can also probe rgbd or even optical flow if robot has that property in its config file (.yaml file)
                robot_img = rgb_camera[0,:,:,0] #transforming the RGB probe to uint8 format (PIL.Image.fromarray() accepts that format)
                encoded = encoder.predict(np.expand_dims(robot_img, axis=0))
                decoded = decoder.predict(encoded[0])
                robot_img = decoded[0,:,:,0]
                robot_img = robot_img.astype('uint8')
                robot_img = Image.fromarray(robot_img)
                img_str = './iGib_recons' + '/episode_' + str(j).zfill(2) + '_frame_' + str(i).zfill(5) + '.jpeg'
                robot_img.save(img_str)
                #lidar = s.renderer.get_lidar_all() #probing 360 degrees lidar data

                delta_pos = smt_path[:,i+1] - smt_path[:,i] #direction vector between 2 consecutive waypoints
                delta_yaw = np.arctan2(delta_pos[1],delta_pos[0]) #the yaw angle of the robot base while following the sampled bezier path
                delta_qua = e2q(init_or[0],init_or[1],delta_yaw) #transforming robot base Euler angles to quaternion
                turtlebot.set_position_orientation([smt_path[0,i],smt_path[1,i],init_pos[2]], delta_qua) #setting the robot base position and the orientation
		velcmd1, velcmd2 = PID_path_track(delta_pos, delta_qua)
                turtlebot.set_motor_velocity([velcmd1,velcmd2])

                s.step() #proceed one step ahead in simulation time
    

    s.disconnect()
예제 #11
0
def benchmark(render_to_tensor=False,
              resolution=512,
              obj_num=100,
              optimized=True):

    n_frame = 200

    if optimized:
        settings = MeshRendererSettings(msaa=True, optimized=True)
        renderer = MeshRenderer(width=resolution,
                                height=resolution,
                                vertical_fov=90,
                                rendering_settings=settings)
    else:
        settings = MeshRendererSettings(msaa=True, optimized=False)
        renderer = MeshRenderer(width=resolution,
                                height=resolution,
                                vertical_fov=90,
                                rendering_settings=settings)

    renderer.load_object('plane/plane_z_up_0.obj', scale=[3, 3, 3])
    renderer.add_instance(0)
    renderer.instances[-1].use_pbr = True
    renderer.instances[-1].use_pbr_mapping = True
    renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1)

    model_path = sys.argv[1]

    px = 1
    py = 1
    pz = 1

    camera_pose = np.array([px, py, pz])
    view_direction = np.array([-1, -1, -1])
    renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
    theta = 0
    r = 6
    scale = 1
    i = 1

    obj_count_x = int(np.sqrt(obj_num))

    for fn in os.listdir(model_path):
        if fn.endswith('obj') and 'processed' in fn:
            renderer.load_object(os.path.join(model_path, fn),
                                 scale=[scale, scale, scale])
            for obj_i in range(obj_count_x):
                for obj_j in range(obj_count_x):
                    renderer.add_instance(i)
                    renderer.set_pose([
                        obj_i - obj_count_x / 2., obj_j - obj_count_x / 2., 0,
                        0.7071067690849304, 0.7071067690849304, 0.0, 0.0
                    ], -1)
                    renderer.instances[-1].use_pbr = True
                    renderer.instances[-1].use_pbr_mapping = True

            i += 1

    print(renderer.visual_objects, renderer.instances)
    print(renderer.materials_mapping, renderer.mesh_materials)

    start = time.time()
    for i in range(n_frame):
        px = r * np.sin(theta)
        py = r * np.cos(theta)
        theta += 0.01
        camera_pose = np.array([px, py, pz])
        renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1])

        frame = renderer.render(modes=('rgb', 'normal'))
        #print(frame)
        cv2.imshow(
            'test',
            cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
        cv2.waitKey(1)
    elapsed = time.time() - start
    print('{} fps'.format(n_frame / elapsed))
    return obj_num, n_frame / elapsed
예제 #12
0
def main():
    global _mouse_ix, _mouse_iy, down, view_direction

    if len(sys.argv) > 1:
        model_path = sys.argv[1]
    else:
        model_path = os.path.join(get_scene_path('Rs_int'), 'mesh_z_up.obj')
    settings = MeshRendererSettings(msaa=True, enable_shadow=True)
    renderer = MeshRenderer(width=1024, height=1024,  vertical_fov=70, rendering_settings=settings)
    renderer.set_light_position_direction([0,0,10], [0,0,0])

    i = 0

    v = []
    for fn in os.listdir(model_path):
        if fn.endswith('obj'):
            vertices, faces = load_obj_np(os.path.join(model_path, fn))
            v.append(vertices)

    v = np.vstack(v)
    print(v.shape)
    xlen = np.max(v[:,0]) - np.min(v[:,0])
    ylen = np.max(v[:,1]) - np.min(v[:,1])
    scale = 2.0/(max(xlen, ylen))

    for fn in os.listdir(model_path):
        if fn.endswith('obj'):
            renderer.load_object(os.path.join(model_path, fn), scale=[scale, scale, scale])
            renderer.add_instance(i)
            i += 1

    print(renderer.visual_objects, renderer.instances)
    print(renderer.materials_mapping, renderer.mesh_materials)

    px = 1
    py = 1
    pz = 1

    camera_pose = np.array([px, py, pz])
    view_direction = np.array([-1, -1, -1])
    renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])

    _mouse_ix, _mouse_iy = -1, -1
    down = False

    def change_dir(event, x, y, flags, param):
        global _mouse_ix, _mouse_iy, down, view_direction
        if event == cv2.EVENT_LBUTTONDOWN:
            _mouse_ix, _mouse_iy = x, y
            down = True
        if event == cv2.EVENT_MOUSEMOVE:
            if down:
                dx = (x - _mouse_ix) / 100.0
                dy = (y - _mouse_iy) / 100.0
                _mouse_ix = x
                _mouse_iy = y
                r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0], [-np.sin(dy), 0, np.cos(dy)]])
                r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0], [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
                view_direction = r1.dot(r2).dot(view_direction)
        elif event == cv2.EVENT_LBUTTONUP:
            down = False

    cv2.namedWindow('test')
    cv2.setMouseCallback('test', change_dir)

    while True:
        with Profiler('Render'):
            frame = renderer.render(modes=('rgb', 'normal'))
        cv2.imshow('test', cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))

        q = cv2.waitKey(1)
        if q == ord('w'):
            px += 0.1
        elif q == ord('s'):
            px -= 0.1
        elif q == ord('a'):
            py += 0.1
        elif q == ord('d'):
            py -= 0.1
        elif q == ord('q'):
            break

        camera_pose = np.array([px, py, 1])
        renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])

    renderer.release()
예제 #13
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--scene',
                        type=str,
                        help='Name of the scene in the iG Dataset')
    parser.add_argument('--save_dir',
                        type=str,
                        help='Directory to save the frames.',
                        default='misc')
    parser.add_argument('--seed', type=int, default=15, help='Random seed.')
    parser.add_argument('--domain_rand',
                        dest='domain_rand',
                        action='store_true')
    parser.add_argument('--domain_rand_interval',
                        dest='domain_rand_interval',
                        type=int,
                        default=50)
    parser.add_argument('--object_rand',
                        dest='object_rand',
                        action='store_true')
    args = parser.parse_args()

    # hdr_texture1 = os.path.join(
    # gibson2.ig_dataset_path, 'scenes', 'background', 'photo_studio_01_2k.hdr')
    hdr_texture1 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                'background', 'probe_03.hdr')
    hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                'background', 'probe_02.hdr')
    light_map = os.path.join(get_ig_scene_path(args.scene), 'layout',
                             'floor_lighttype_0.png')

    background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                      'background', 'urban_street_01.jpg')

    settings = MeshRendererSettings(env_texture_filename=hdr_texture1,
                                    env_texture_filename2=hdr_texture2,
                                    env_texture_filename3=background_texture,
                                    light_modulation_map_filename=light_map,
                                    enable_shadow=True,
                                    msaa=True,
                                    skybox_size=36.,
                                    light_dimming_factor=0.8)

    s = Simulator(mode='headless',
                  image_width=1080,
                  image_height=720,
                  vertical_fov=60,
                  rendering_settings=settings)

    random.seed(args.seed)
    scene = InteractiveIndoorScene(args.scene,
                                   texture_randomization=args.domain_rand,
                                   object_randomization=args.object_rand)

    s.import_ig_scene(scene)

    traj_path = os.path.join(get_ig_scene_path(args.scene), 'misc',
                             'tour_cam_trajectory.txt')
    save_dir = os.path.join(get_ig_scene_path(args.scene), args.save_dir)
    os.makedirs(save_dir, exist_ok=True)
    tmp_dir = os.path.join(save_dir, 'tmp')
    os.makedirs(tmp_dir, exist_ok=True)

    with open(traj_path, 'r') as fp:
        points = [l.rstrip().split(',') for l in fp.readlines()]

    for _ in range(60):
        s.step()
    s.sync()

    for i in range(len(points)):
        if args.domain_rand and i % args.domain_rand_interval == 0:
            scene.randomize_texture()
        x, y, dir_x, dir_y = [float(p) for p in points[i]]
        z = 1.7
        tar_x = x + dir_x
        tar_y = y + dir_y
        tar_z = 1.4
        # cam_loc = np.array([x, y, z])
        s.renderer.set_camera([x, y, z], [tar_x, tar_y, tar_z], [0, 0, 1])

        with Profiler('Render'):
            frame = s.renderer.render(modes=('rgb'))
        img = Image.fromarray(
            (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8))
        img.save(os.path.join(tmp_dir, '{:05d}.png'.format(i)))

    cmd = 'ffmpeg -i {t}/%5d.png -y -an -c:v libx264 -crf 18 -preset veryslow -r 30 {s}/tour.mp4'.format(
        t=tmp_dir, s=save_dir)
    subprocess.call(cmd, shell=True)
    cmd = 'rm -r {}'.format(tmp_dir)
    subprocess.call(cmd, shell=True)

    s.disconnect()
def benchmark_scene(scene_name, optimized=False, import_robot=True):
    config = parse_config(os.path.join(gibson2.root_path, '../test/test.yaml'))
    assets_version = get_ig_assets_version()
    print('assets_version', assets_version)
    scene = InteractiveIndoorScene(scene_name,
                                   texture_randomization=False,
                                   object_randomization=False)
    settings = MeshRendererSettings(msaa=False,
                                    enable_shadow=False,
                                    optimized=optimized)
    s = Simulator(
        mode='headless',
        image_width=512,
        image_height=512,
        device_idx=0,
        rendering_settings=settings,
    )
    s.import_ig_scene(scene)
    if import_robot:
        turtlebot = Turtlebot(config)
        s.import_robot(turtlebot)

    s.renderer.use_pbr(use_pbr=True, use_pbr_mapping=True)
    fps = []
    physics_fps = []
    render_fps = []
    obj_awake = []
    for i in range(2000):
        # if i % 100 == 0:
        #     scene.randomize_texture()
        start = time.time()
        s.step()
        if import_robot:
            # apply random actions
            turtlebot.apply_action(turtlebot.action_space.sample())
        physics_end = time.time()
        if import_robot:
            _ = s.renderer.render_robot_cameras(modes=('rgb'))
        else:
            _ = s.renderer.render(modes=('rgb'))
        end = time.time()

        #print("Elapsed time: ", end - start)
        print("Render Frequency: ", 1 / (end - physics_end))
        print("Physics Frequency: ", 1 / (physics_end - start))
        print("Step Frequency: ", 1 / (end - start))
        fps.append(1 / (end - start))
        physics_fps.append(1 / (physics_end - start))
        render_fps.append(1 / (end - physics_end))
        obj_awake.append(s.body_links_awake)
    s.disconnect()
    plt.figure(figsize=(7, 25))

    ax = plt.subplot(6, 1, 1)
    plt.hist(render_fps)
    ax.set_xlabel('Render fps')
    ax.set_title(
        'Scene {} version {}\noptimized {} num_obj {}\n import_robot {}'.
        format(scene_name, assets_version, optimized, scene.get_num_objects(),
               import_robot))
    ax = plt.subplot(6, 1, 2)
    plt.hist(physics_fps)
    ax.set_xlabel('Physics fps')
    ax = plt.subplot(6, 1, 3)
    plt.hist(fps)
    ax.set_xlabel('Step fps')
    ax = plt.subplot(6, 1, 4)
    plt.plot(render_fps)
    ax.set_xlabel('Render fps with time')
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 5)
    plt.plot(physics_fps)
    ax.set_xlabel('Physics fps with time, converge to {}'.format(
        np.mean(physics_fps[-100:])))
    ax.set_ylabel('fps')
    ax = plt.subplot(6, 1, 6)
    plt.plot(obj_awake)
    ax.set_xlabel('Num object links awake, converge to {}'.format(
        np.mean(obj_awake[-100:])))

    plt.savefig('scene_benchmark_{}_o_{}_r_{}.pdf'.format(
        scene_name, optimized, import_robot))
예제 #15
0
#obj3.set_position([0,0,1.2])

obj4 = ArticulatedObject(filename=carpet)
s.import_object(obj4)
obj4.set_position([0, 1, 0])

np.random.seed(0)
for _ in range(10):
    pt = scene.get_random_point_by_room_type('living_room')[1]
    print('random point in living_room', pt)

##############################################adding a fetch robot###################################################
config = parse_config(
    os.path.join(gibson2.example_config_path, 'fetch_motion_planning.yaml'))

settings = MeshRendererSettings(enable_shadow=False, msaa=False)
#turtlebot = Turtlebot(config)
#robot=turtlebot
#position=[1,1,0]
fetchbot = Fetch(config)
s.import_robot(fetchbot)
fetchbot.set_position([0, 1, 0])

##################################################################################################################
##tried changing robot control to position and torque but failed,from the documentation it seems turtle bot only##
############################################supports joint velocity###############################################
##################################################################################################################
#robot.load()
#robot.set_position(position)
#robot.robot_specific_reset()
#robot.keep_still()
예제 #16
0
def main():
    global _mouse_ix, _mouse_iy, down, view_direction

    args = parser.parse_args()
    model_path = args.input_dir
    print(model_path)

    model_id = os.path.basename(model_path)
    category = os.path.basename(os.path.dirname(model_path))

    hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background',
                               'probe_03.hdr')
    settings = MeshRendererSettings(env_texture_filename=hdr_texture,
                                    enable_shadow=True,
                                    msaa=True)

    s = Simulator(mode='headless',
                  image_width=1800,
                  image_height=1200,
                  vertical_fov=70,
                  rendering_settings=settings)

    s.renderer.set_light_position_direction([0, 0, 10], [0, 0, 0])

    s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3, 3, 3])
    s.renderer.add_instance(0)
    s.renderer.set_pose([0, 0, -1.5, 1, 0, 0.0, 0.0], -1)

    ###########################
    # Get center and scale
    ###########################
    bbox_json = os.path.join(model_path, 'misc', 'metadata.json')
    with open(bbox_json, 'r') as fp:
        bbox_data = json.load(fp)
        scale = 1.5 / max(bbox_data['bbox_size'])
        center = -scale * np.array(bbox_data['base_link_offset'])

    urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id))
    print(urdf_path)
    obj = ArticulatedObject(filename=urdf_path, scale=scale)
    s.import_object(obj)
    obj.set_position(center)
    s.sync()

    _mouse_ix, _mouse_iy = -1, -1
    down = False

    theta, r = 0, 1.5

    px = r * np.sin(theta)
    py = r * np.cos(theta)
    pz = 1
    camera_pose = np.array([px, py, pz])
    s.renderer.set_camera(camera_pose, [0, 0, 0], [0, 0, 1])

    num_views = 6
    save_dir = os.path.join(model_path, 'visualizations')
    os.makedirs(save_dir, exist_ok=True)
    for i in range(num_views):
        theta += np.pi * 2 / (num_views + 1)
        obj.set_orientation([0., 0., 1.0, np.cos(theta / 2)])
        s.sync()
        with Profiler('Render'):
            frame = s.renderer.render(modes=('rgb'))
        img = Image.fromarray(
            (255 * np.concatenate(frame, axis=1)[:, :, :3]).astype(np.uint8))
        img.save(os.path.join(save_dir, '{:02d}.png'.format(i)))

    if which('ffmpeg') is not None:
        cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format(
            s=save_dir, m=model_id)
        subprocess.call(cmd, shell=True)
예제 #17
0
    def __init__(self,
                 gravity=9.8,
                 physics_timestep=1 / 120.0,
                 render_timestep=1 / 30.0,
                 mode='gui',
                 image_width=128,
                 image_height=128,
                 vertical_fov=90,
                 device_idx=0,
                 render_to_tensor=False,
                 rendering_settings=MeshRendererSettings()):
        """
        :param gravity: gravity on z direction.
        :param physics_timestep: timestep of physical simulation, p.stepSimulation()
        :param render_timestep: timestep of rendering, and Simulator.step() function
        :param mode: choose mode from gui, headless, iggui (only open iGibson UI), or pbgui(only open pybullet UI)
        :param image_width: width of the camera image
        :param image_height: height of the camera image
        :param vertical_fov: vertical field of view of the camera image in degrees
        :param device_idx: GPU device index to run rendering on
        :param render_to_tensor: Render to GPU tensors
        :param rendering_settings: rendering setting
        """
        # physics simulator
        self.gravity = gravity
        self.physics_timestep = physics_timestep
        self.render_timestep = render_timestep
        self.mode = mode

        # TODO: remove this, currently used for testing only
        self.objects = []

        plt = platform.system()
        if plt == 'Darwin' and self.mode == 'gui':
            self.mode = 'iggui'  # for mac os disable pybullet rendering
            logging.warn(
                'Rendering both iggui and pbgui is not supported on mac, choose either pbgui or '
                'iggui. Default to iggui.')

        self.use_pb_renderer = False
        self.use_ig_renderer = False

        if self.mode in ['gui', 'iggui']:
            self.use_ig_renderer = True

        if self.mode in ['gui', 'pbgui']:
            self.use_pb_renderer = True

        # renderer
        self.image_width = image_width
        self.image_height = image_height
        self.vertical_fov = vertical_fov
        self.device_idx = device_idx
        self.render_to_tensor = render_to_tensor
        self.optimized_renderer = rendering_settings.optimized
        self.rendering_settings = rendering_settings
        self.viewer = None
        self.load()

        self.class_name_to_class_id = get_class_name_to_class_id()
        self.body_links_awake = 0
예제 #18
0
def main():
    global _mouse_ix, _mouse_iy, down, view_direction

    model_path = sys.argv[1]
    print(model_path)

    model_id = os.path.basename(model_path)
    category = os.path.basename(os.path.dirname(model_path))

    hdr_texture = os.path.join(
                gibson2.ig_dataset_path, 'scenes', 'background', 
                'photo_studio_01_2k.hdr')
    settings = MeshRendererSettings(env_texture_filename=hdr_texture,
               enable_shadow=True, msaa=True,
               light_dimming_factor=1.5)

    s = Simulator(mode='headless', 
            image_width=1800, image_height=1200, 
            vertical_fov=70, rendering_settings=settings
            )

    s.renderer.set_light_position_direction([0,0,10], [0,0,0])

    s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3,3,3])
    s.renderer.add_instance(0)
    s.renderer.set_pose([0,0,-1.5,1, 0, 0.0, 0.0], -1)


    v = []
    mesh_path = os.path.join(model_path, 'shape/visual')
    for fn in os.listdir(mesh_path):
        if fn.endswith('obj'):
            vertices, faces = load_obj_np(os.path.join(mesh_path, fn))
            v.append(vertices)

    v = np.vstack(v)
    print(v.shape)
    xlen = np.max(v[:,0]) - np.min(v[:,0])
    ylen = np.max(v[:,1]) - np.min(v[:,1])
    zlen = np.max(v[:,2]) - np.min(v[:,2])
    scale = 1.5/(max([xlen, ylen, zlen]))
    center = np.mean(v, axis=0)
    centered_v = v - center

    center = (np.max(v, axis=0) + np.min(v, axis=0)) / 2.

    urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id))
    print(urdf_path)
    obj = ArticulatedObject(filename=urdf_path, scale=scale)
    s.import_object(obj)
    obj.set_position(center)
    s.sync()
    print(s.renderer.visual_objects, s.renderer.instances)

    _mouse_ix, _mouse_iy = -1, -1
    down = False

    theta,r = 0,1.5

    px = r*np.sin(theta)
    py = r*np.cos(theta)
    pz = 1
    camera_pose = np.array([px, py, pz])
    s.renderer.set_camera(camera_pose, [0,0,0], [0, 0, 1])

    num_views = 6 
    save_dir = os.path.join(model_path, 'visualizations')
    for i in range(num_views):
        theta += np.pi*2/(num_views+1)
        obj.set_orientation([0., 0., 1.0, np.cos(theta/2)])
        s.sync()
        with Profiler('Render'):
            frame = s.renderer.render(modes=('rgb'))
        img = Image.fromarray((
                255*np.concatenate(frame, axis=1)[:,:,:3]).astype(np.uint8))
        img.save(os.path.join(save_dir, '{:02d}.png'.format(i)))

    cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format(s=save_dir,m=model_id)
    subprocess.call(cmd, shell=True)
    cmd = 'rm {}/??.png'.format(save_dir)
    subprocess.call(cmd, shell=True)
예제 #19
0
    AWAKE = 1


hdr_texture = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background',
                           'probe_02.hdr')
hdr_texture2 = os.path.join(gibson2.ig_dataset_path, 'scenes', 'background',
                            'probe_03.hdr')
light_modulation_map_filename = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                             'Rs_int', 'layout',
                                             'floor_lighttype_0.png')
background_texture = os.path.join(gibson2.ig_dataset_path, 'scenes',
                                  'background', 'urban_street_01.jpg')

NamedRenderingPresets = {
    'NO_PBR':
    MeshRendererSettings(enable_pbr=False, enable_shadow=False),
    'PBR_NOSHADOW':
    MeshRendererSettings(enable_pbr=True, enable_shadow=True),
    'PBR_SHADOW_MSAA':
    MeshRendererSettings(enable_pbr=True, enable_shadow=True, msaa=True),
    'NO_PBR_OPT':
    MeshRendererSettings(enable_pbr=False, enable_shadow=False,
                         optimized=True),
    'PBR_NOSHADOW_OPT':
    MeshRendererSettings(enable_pbr=True, enable_shadow=True, optimized=True),
    'PBR_SHADOW_MSAA_OPT':
    MeshRendererSettings(enable_pbr=True,
                         enable_shadow=True,
                         msaa=True,
                         optimized=True),
    'HQ_WITH_BG_OPT':