def render(self, mode='human', close=False): if mode != "rgb_array": return np.array([]) camera_target_pos = self.camera_target_pos if self.debug: self._cam_dist = p.readUserDebugParameter(self.dist_slider) self._cam_yaw = p.readUserDebugParameter(self.yaw_slider) self._cam_pitch = p.readUserDebugParameter(self.pitch_slider) x = p.readUserDebugParameter(self.x_slider) y = p.readUserDebugParameter(self.y_slider) z = p.readUserDebugParameter(self.z_slider) camera_target_pos = (x, y, z) # self._cam_roll = p.readUserDebugParameter(self.roll_slider) # TODO: recompute view_matrix and proj_matrix only in debug mode view_matrix1 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=self._cam_roll, upAxisIndex=2) proj_matrix1 = p.computeProjectionMatrixFOV(fov=60, aspect=float(self._width) / self._height, nearVal=0.1, farVal=100.0) (_, _, px1, _, _) = p.getCameraImage(width=self._width, height=self._height, viewMatrix=view_matrix1, projectionMatrix=proj_matrix1, renderer=self.renderer) rgb_array1 = np.array(px1) if self.multi_view: # adding a second camera on the other side of the robot view_matrix2 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(0.316, 0.316, -0.105), distance=1.05, yaw=32, pitch=-13, roll=0, upAxisIndex=2) proj_matrix2 = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._width) / self._height, nearVal=0.1, farVal=100.0) (_, _, px2, _, _) = p.getCameraImage(width=self._width, height=self._height, viewMatrix=view_matrix2, projectionMatrix=proj_matrix2, renderer=self.renderer) rgb_array2 = np.array(px2) rgb_array_res = np.concatenate( (rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2) else: rgb_array_res = rgb_array1[:, :, :3] return rgb_array_res
def _get_observation(self): look = [ 1.9 + self._cameraRandom * np.random.uniform(-0.05, 0.05), 0.5 + self._cameraRandom * np.random.uniform(-0.05, 0.05), 1 + self._cameraRandom * np.random.uniform(-0.05, 0.05) ] roll = -10 pitch = -35 yaw = 110 look_2 = [ -0.3 + self._cameraRandom * np.random.uniform(-0.05, 0.05), 0.5 + self._cameraRandom * np.random.uniform(-0.05, 0.05), 1.3 + self._cameraRandom * np.random.uniform(-0.05, 0.05) ] # print("Camera 1:\tx: {:.4f}m\ty: {:.4f}m\tz: {:.4f}m".format(look[0], # look[1], # look[2])) # print("Camera 2:\tx: {:.4f}m\ty: {:.4f}m\tz: {:.4f}m".format( look_2[0], # look_2[1], # look_2[2])) pitch_2 = -56 yaw_2 = 245 roll_2 = 0 distance = 1. _view_matrix_2 = p.computeViewMatrixFromYawPitchRoll( look_2, distance, yaw_2, pitch_2, roll_2, 2) _view_matrix = p.computeViewMatrixFromYawPitchRoll( look, distance, yaw, pitch, roll, 2) img_arr = p.getCameraImage(width=self._width, height=self._height, viewMatrix=_view_matrix, projectionMatrix=self._proj_matrix, shadow=True, lightDirection=[1, 1, 1], renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_1 = np.reshape(img_arr[2], (self._height, self._width, 4))[:, :, :3] if self._single_img: return rgb_1 img_arr_2 = p.getCameraImage(width=self._width, height=self._height, viewMatrix=_view_matrix_2, projectionMatrix=self._proj_matrix, shadow=True, lightDirection=[1, 1, 1], renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_2 = np.reshape(img_arr_2[2], (self._height, self._width, 4))[:, :, :3] # (256,128,3) return np.concatenate([rgb_1, rgb_2], axis=0)
def render(self, close=False): # render the rgb and depth image camera_target_pos = self.camera_target_pos if self.debug: self._cam_dist = p.readUserDebugParameter(self.dist_slider) self._cam_yaw = p.readUserDebugParameter(self.yaw_slider) self._cam_pitch = p.readUserDebugParameter(self.pitch_slider) x = p.readUserDebugParameter(self.x_slider) y = p.readUserDebugParameter(self.y_slider) z = p.readUserDebugParameter(self.z_slider) camera_target_pos = (x, y, z) # self._cam_roll = p.readUserDebugParameter(self.roll_slider) # TODO: recompute view_matrix and proj_matrix only in debug mode view_matrix1 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=self._cam_roll, upAxisIndex=2) proj_matrix1 = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, rgb, depth, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1, projectionMatrix=proj_matrix1, renderer=self.renderer) rgb_array1 = np.array(rgb) depth_array1 = np.array(depth) far = 100. near = 0.05 depth_array1 = far * near / (far - (far - near) * depth_array1) if self.multi_view: # adding a second camera on the other side of the robot view_matrix2 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(0.316, 0.316, -0.105), distance=1.05, yaw=32, pitch=-13, roll=0, upAxisIndex=2) proj_matrix2 = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px2, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix2, projectionMatrix=proj_matrix2, renderer=self.renderer) rgb_array2 = np.array(px2) rgb_array_res = np.concatenate((rgb_array1[:, :, :3], rgb_array2[:, :, :3]), axis=2) else: rgb_array_res = rgb_array1[:, :, :3] (ft_reading, pos) = self._kuka.getObservation() # render the F/T sensor data return rgb_array_res, depth_array1, ft_reading, pos
def render(self, mode='human', close=False): if mode != "rgb_array": return np.array([]) camera_target_pos = self.camera_target_pos if self.debug: self._cam_dist = p.readUserDebugParameter(self.dist_slider) self._cam_yaw = p.readUserDebugParameter(self.yaw_slider) self._cam_pitch = p.readUserDebugParameter(self.pitch_slider) x = p.readUserDebugParameter(self.x_slider) y = p.readUserDebugParameter(self.y_slider) z = p.readUserDebugParameter(self.z_slider) camera_target_pos = (x, y, z) # TODO: recompute view_matrix and proj_matrix only in debug mode view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=self._cam_roll, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px1, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=self.renderer) rgb_array = np.array(px1) rgb_array_res = rgb_array[:, :, :3] # if first person view, then stack the obersvation from the car camera if self.fpv: # move camera view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(self.robot_pos[0]-0.25, self.robot_pos[1], 0.15), distance=0.3, yaw=self._cam_yaw, pitch=-17, roll=self._cam_roll, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=90, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) # get and stack image (_, _, px1, _, _) = p.getCameraImage( width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=self.renderer) rgb_array = np.array(px1) rgb_array_res = np.dstack([rgb_array_res, rgb_array[:, :, :3]]) return rgb_array_res
def saveImage(lastTime, imageCount, display, ax, o1, cam, dist, yaw, pitch, camTargetPos, wall_id, on): current = current_milli_time() if (current - lastTime) < 100: return lastTime, imageCount img_arr = [] img_arr2 = [] rgb = [] if display == "fp" or display == "both": camPos = [ camTargetPos[0] - dist * math.cos(o1), camTargetPos[1] - dist * math.sin(o1) ] if wall_id > -1 and (abs(camPos[0]) > 4 or abs(camPos[1]) > 5): p.changeVisualShape(wall_id, -1, rgbaColor=[1, 1, 1, 0.4]) viewMatrixFP = p.computeViewMatrixFromYawPitchRoll( camTargetPos, dist, -90 + (o1 * 180 / math.pi), -35, roll, upAxisIndex) img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrixFP, projectionMatrix, shadow=1, lightDirection=[1, 1, 1], renderer=p.ER_BULLET_HARDWARE_OPENGL, flags=p.ER_NO_SEGMENTATION_MASK) if wall_id > -1: p.changeVisualShape(wall_id, -1, rgbaColor=[1, 1, 1, 1]) if display == "tp" or display == "both": print(camTargetPos, dist, yaw, pitch, roll, upAxisIndex) viewMatrixTP = p.computeViewMatrixFromYawPitchRoll( camTargetPos, dist, yaw, pitch, roll, upAxisIndex) img_arr2 = p.getCameraImage(pixelWidth, pixelHeight, viewMatrixTP, projectionMatrix, shadow=1, lightDirection=[1, 1, 1], renderer=p.ER_BULLET_HARDWARE_OPENGL, flags=p.ER_NO_SEGMENTATION_MASK) if display: if display == "fp": rgb = img_arr[2] #[:800,200:1400,:] elif display == "tp": rgb = img_arr2[2] #[:800,200:1400,:] if not "light" in on: rgb = np.divide(rgb, 2) #plt.imsave("logs/"+str(imageCount)+".jpg", arr=np.reshape(rgb, (800, 1200, 4)) * (1. / 255.)) plt.imsave("logs/" + str(imageCount) + ".jpg", arr=np.reshape(rgb, (pixelHeight, pixelWidth, 4)) * (1. / 255.)) return current, imageCount + 1
def save_obs(dataset_dir, camera, num_obs, scene_id, is_valset=False): """ In: dataset_dir: string, the directory to save observations. camera: Camera instance. num_obs: int, number of observations to be made in current scene with the camera moving round a circle above the origin. scene_id: int, indicating the scene to observe, used to (1) index files to be saved (2) change camera distance and pitch angle. Out: None. Purpose: Save RGB, depth, instance level segmentation mask as files. """ for i in range(num_obs): view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=(0.0, 0.0, 0.0), distance=0.7 - scene_id * 0.005, yaw=i * (360 / num_obs), # move round a circle orbit pitch=-20 - scene_id * 0.5, roll=0, upAxisIndex=2, ) rgb_obs, depth_obs, mask_obs = make_obs(camera, view_matrix) rgb_name = dataset_dir + "rgb/" + str(i + scene_id * num_obs) + "_rgb.png" image.write_rgb(rgb_obs.astype(np.uint8), rgb_name) mask_name = dataset_dir + "gt/" + str(i + scene_id * num_obs) + "_gt.png" image.write_mask(mask_obs, mask_name)
def callback(data): # rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.position) global old_position if data.position == old_position: return old_position = data.position print("CHANGE, moving... {}".format(data.position)) # get the joint states position and move rbx1 bullet.setJointMotorControlArray(rbx1_id, list(range(1, rbx1_num_joints -1)), bullet.POSITION_CONTROL, targetPositions=data.position, forces={0.5}) bullet.resetDebugVisualizerCamera(1.3, 180, -41, [0.52, -0.2, -0.33]) base_pos, orn = bullet.getBasePositionAndOrientation(rbx1_id) view_matrix = bullet.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=tuple(map(sum, zip(base_pos, cam_pos_offset))), distance=cam_dist, yaw=cam_yaw, pitch=cam_pitch, roll=0, upAxisIndex=2) proj_matrix = bullet.computeProjectionMatrixFOV(fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=1.5) (_, _, px, _, _) = bullet.getCameraImage(width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=bullet.ER_BULLET_HARDWARE_OPENGL)
def test(num_runs=100, shadow=1): times = np.zeros(num_runs) yaw_gen = itertools.cycle(range(0, 360, 10)) for i, yaw in zip(range(num_runs), yaw_gen): pybullet.stepSimulation() start = time.time() viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll( camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight projectionMatrix = pybullet.computeProjectionMatrixFOV( fov, aspect, nearPlane, farPlane) img_arr = pybullet.getCameraImage( pixelWidth, pixelHeight, viewMatrix, projectionMatrix, shadow=shadow, lightDirection=[1, 1, 1], renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) # renderer=pybullet.ER_TINY_RENDERER) stop = time.time() duration = (stop - start) if (duration): fps = 1. / duration # print("fps=",fps) else: fps = 0 # print("fps=",fps) # print("duraction=",duration) # print("fps=",fps) times[i] = fps print("mean: {0} for {1} runs".format(np.mean(times), num_runs)) print("")
def _setup_viewing_camera(self): """ Sets up the viewing camera that is used for the render function. :return: """ if self._pybullet_client_w_o_goal_id is not None: client = self._pybullet_client_w_o_goal_id else: client = self._pybullet_client_full_id self._cam_dist = 0.6 self._cam_yaw = 0 self._cam_pitch = -60 self._render_width = 320 self._render_height = 240 base_pos = [0, 0, 0] self.view_matrix = pybullet.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2, physicsClientId=client) self.proj_matrix = pybullet.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width) / self._render_height, nearVal=0.1, farVal=100.0, physicsClientId=client) return
def run_example(): """ Simple example to save the rendered image in a folder called toremove""" dim = 5 env = Shapes3D(gui=0, env_dim=dim) env.reset() env.add_sphere(0.2, [1, 1, 1, 1], [1, 1, 0.2]) env.add_cube([0.2, 0.2, 0.2], [0, 0, 1, 1], [-1, 1, 0.2]) env.add_capsule(0.2, 1, [1, 0, 0, 1], [-1, -1, 0.7]) env.add_cylinder(0.4, 1.5, [1, 0, 0, 1], [1, -1, 0.75]) intrinsic = p.computeProjectionMatrixFOV(fov=45, aspect=1, nearVal=.1, farVal=dim * 2) for i in itertools.count(0, 1): extrinsic = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[1, 1, 1], distance=2.5, yaw=i * 5, pitch=-17, roll=0, upAxisIndex=2) img, depth, segm = env.compute_render(320, 320, intrinsic, extrinsic) if i % 100 == 0: # Save image plt.imsave("%i.png" % i, img) if i > 1000: break env.destroy()
def render(self): camTargetPos = [0, 0, 0] cameraUp = [0, 1, 0] cameraPos = [0, 1, 0] yaw = -90 #+ self.step_counter % 360 pitch = 90 roll = -180 upAxisIndex = 2 camDistance = 0.5 pixelWidth = 640 pixelHeight = 480 nearPlane = 0.01 farPlane = 100 fov = 60 viewMatrix = p.computeViewMatrixFromYawPitchRoll( camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight projectionMatrix = p.computeProjectionMatrixFOV( fov, aspect, nearPlane, farPlane) img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix, projectionMatrix) w = img_arr[0] # width of the image, in pixels h = img_arr[1] # height of the image, in pixels rgb = img_arr[2] # color data RGB dep = img_arr[3] # depth data print("w=", w, "h=", h) np_img_arr = np.reshape(rgb, (h, w, 4)) np_img_arr = np_img_arr * (1. / 255.) env_image = np_img_arr #plt.imshow(env_image) #plt.show() # env_image = np.random.randn(300, 300) #TODO: actual imae return env_image
def render_advance(): main_start = time.time() for pitch in range(90, 180, 10): start = time.time() viewMatrix = p.computeViewMatrixFromYawPitchRoll( camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight projectionMatrix = p.computeProjectionMatrixFOV( fov, aspect, nearPlane, farPlane) img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix, projectionMatrix, lightDirection=[0, 1, 0], renderer=p.ER_TINY_RENDERER) stop = time.time() print("renderImage %f" % (stop - start)) w, h, rgb, _, _ = img_arr print('width = %d height = %d' % (w, h)) np_img_arr = np.reshape(rgb, (h, w, 4)) np_img_arr = np_img_arr / 255. plt.imshow(np_img_arr, interpolation='none') # plt.show() plt.pause(0.001)
def render(self, mode='rgb'): """ return the RBG image :param mode: :return: """ camera_target_position = self.camera_target_pos view_matrix1 = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=camera_target_position, distance=2., yaw=145, # 145 degree pitch=-36, # -36 degree roll=0, upAxisIndex=2) proj_matrix1 = p.computeProjectionMatrixFOV( fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px, depth, mask) = p.getCameraImage(width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix1, projectionMatrix=proj_matrix1, renderer=p.ER_TINY_RENDERER) px, depth, mask = np.array(px), np.array(depth), np.array(mask) return px
def view_at(self, target, distance, yaw, pitch, roll=0., up='z'): """ Move camera to a specified position in space. Args: target (vec3): Target focus point in Cartesian world coordinates. distance (float): Distance from eye to focus point. yaw (float): Yaw angle in degrees left / right around up-axis. pitch (float): Pitch in degrees up / down. roll (float): Roll in degrees around forward vector. up (char): Axis up, one of x, y, z. """ self._view_mat = pb.computeViewMatrixFromYawPitchRoll( target, distance, yaw, pitch, roll, 'xyz'.index(up), physicsClientId=self.client_id) self.infos.update( dict(target=tuple(target), distance=float(distance), yaw=float(yaw), pitch=float(pitch), roll=float(roll)))
def render(self, mode="rgb_array", close=False): """ Render function :param mode: :param close: :return: """ if mode != "rgb_array": return np.array([]) cam_pos = [0.65, 0.236, 0.0] # distance = 1.2 # For fancy view # pitch = 230 # For fancy view distance = 1.2 pitch = 230 yaw = -56 roll = 0 upAxisIndex = 2 viewMat = p.computeViewMatrixFromYawPitchRoll(cam_pos, distance, pitch, yaw, roll, upAxisIndex) projMatrix = [ 0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0 ] img = p.getCameraImage(width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=viewMat, projectionMatrix=projMatrix, renderer=self._p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(img[2], dtype=np.uint8) rgb_array = np.reshape(rgb_array, (RENDER_HEIGHT, RENDER_WIDTH, 4)) rgb_array = rgb_array[:, :, :3] return rgb_array
def render(self): # the target position in camera view will be in half meter [0.5, 0, 0] away from robot base [0,0,0] # the distance of 1.5m away from top of table makes sense for the robot which in longest length is about 1m. view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[-0.5, 0, 0], distance=1.5, yaw=90, pitch=-90, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=float(960) / 720, nearVal=0.1, farVal=100.0) (width, height, rgbPixels, depthPixels, SegMaskBuffer) = p.getCameraImage( width=960, height=720, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(rgbPixels, dtype=np.uint8) rgb_array = np.reshape(rgb_array, (height, width, 4)) # the default internal values are (1; enabled); if 0 will be disabled. p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, 1) p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 1) p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 1) # return a tuple for rgb data and depth values return (rgb_array, depthPixels)
def _prepare_scene(self): """Build an example scene """ table = self._client.loadURDF( "table/table.urdf", flags=pb.URDF_USE_MATERIAL_COLORS_FROM_MTL) self._client.resetBasePositionAndOrientation(table, [0.4, 0.04, -0.7], [0, 0, 0, 1]) kuka = self._client.loadSDF("kuka_iiwa/kuka_with_gripper2.sdf")[0] self._client.resetBasePositionAndOrientation(kuka, [0.0, 0.0, 0.0], [0, 0, 0, 1]) Q = [ 0.006418, 0.413184, -0.011401, -1.589317, 0.005379, 1.137684, -0.006539, 0.000048, -0.299912, 0.000000, -0.000043, 0.299960, 0.000000, -0.000200 ] for i, q in enumerate(Q): self._client.resetJointState(kuka, i, q) self._proj_mat = pb.computeProjectionMatrixFOV(fov=60, aspect=1.0, nearVal=0.1, farVal=10) self._view_mat = pb.computeViewMatrixFromYawPitchRoll((0.4, 0, 0), 2.0, 0, -40, 0, 2)
def setCameraPose(self, idx=0): assert idx < len(self.camera_poses) self.camera_idx = idx camera_pose = self.camera_poses[self.camera_idx] self.camTargetPos = camera_pose.position self.roll = camera_pose.rpy[0] self.pitch = camera_pose.rpy[1] self.yaw = camera_pose.rpy[2] self.camDistance = camera_pose.distance self.viewMatrix = p.computeViewMatrixFromYawPitchRoll( self.camTargetPos, self.camDistance, yaw=self.yaw, pitch=self.pitch, roll=self.roll, upAxisIndex=2, physicsClientId=self._id) p.resetDebugVisualizerCamera(cameraDistance=self.camDistance, cameraYaw=self.yaw, cameraPitch=self.pitch, cameraTargetPosition=self.camTargetPos) self.setCameraParams()
def render(self, mode='human'): view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[0.7, 0, 0.05], distance=.7, yaw=90, pitch=-70, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=float(960) / 720, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage(width=960, height=720, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(px, dtype=np.uint8) rgb_array = np.reshape(rgb_array, (720, 960, 4)) rgb_array = rgb_array[:, :, :3] return rgb_array
def __init__(self, target_pos=(0, 0, 0), pitch=-30.0, yaw=60, roll=0, cam_dist=20, width=480, height=320, up_axis=2, near_plane=0.01, far_plane=100, fov=60): self.target_pos = target_pos self.pitch = pitch self.yaw = yaw self.roll = roll self.cam_dist = cam_dist self.width = width self.height = height self.up_axis = up_axis self.near_plane = near_plane self.far_plane = far_plane self.fov = fov self.view_mat = p.computeViewMatrixFromYawPitchRoll( target_pos, cam_dist, yaw, pitch, roll, up_axis) aspect = width / height self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near_plane, far_plane)
def run_example(): times = [] dim = 5 env = Shapes3D(gui=1, env_dim=dim) env.reset() env.add_sphere(0.2, [1, 1, 1, 1], [1, 1, 0.2]) env.add_cube([0.2, 0.2, 0.2], [0, 0, 1, 1], [-1, 1, 0.2]) env.add_capsule(0.2, 1, [1, 0, 0, 1], [-1, -1, 0.7]) env.add_cylinder(0.4, 1.5, [1, 0, 0, 1], [1, -1, 0.75]) intrinsic = p.computeProjectionMatrixFOV(fov=45, aspect=1, nearVal=.1, farVal=dim * 2) for i in itertools.count(0, 1): start = time.time() extrinsic = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[1, 1, 1], distance=2.5, yaw=i * 5, pitch=-17, roll=0, upAxisIndex=2) env.compute_render(320, 320, intrinsic, extrinsic) end = time.time() print("rendering took: %f" % (end - start)) times.append(end - start) if i > 1000: break env.reset() # Not needed, testing purposes only env.destroy() print("avg of %f per render" % (sum(times) / 1000))
def _render(self, mode, close=False): if (mode == "human"): self.isRender = True if mode != "rgb_array": return np.array([]) base_pos = [0, 0, 0] if (hasattr(self, 'robot')): if (hasattr(self.robot, 'body_xyz')): base_pos = self.robot.body_xyz view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width) / self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage(width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
def render(self, mode="human"): view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=[0.55, 0.35, 0.2], distance=1.2, yaw=0, pitch=-70, roll=0, upAxisIndex=2, ) proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=float(960) / 720, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=960, height=720, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, ) rgb_array = np.array(px, dtype=np.uint8) rgb_array = np.reshape(rgb_array, (720, 960, 4)) rgb_array = rgb_array[:, :, :3] if is_notebook() and os.path.exists("/tmp/render.txt"): display(rgb_data) return rgb_array
def get_camera_image(self): """Get the camera image of the scene Returns ------- tuple Three arrays corresponding to rgb, depth, and segmentation image. """ upAxisIndex = 2 camDistance = 500 pixelWidth = 350 pixelHeight = 700 camTargetPos = [0, 80, 0] far = camDistance near = -far view_matrix = p.computeViewMatrixFromYawPitchRoll( camTargetPos, camDistance, 0, 90, 0, upAxisIndex) projection_matrix = p.computeProjectionMatrix(-90, 60, 150, -150, near, far) # Get depth values using the OpenGL renderer width, height, rgbImg, depthImg, segImg = p.getCameraImage( pixelWidth, pixelHeight, view_matrix, projection_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) return rgbImg, depthImg, segImg
def __init__(self, name, target, distance, roll, pitch, yaw, up_idx=2, image_width=1024 / 8, image_height=768 / 8, fov=45, near_plane=0.1, far_plane=10): ''' Create camera matrix for a particular position in the simulation. Task definitions should produce these and ''' self.name = name self.matrix = np.array( pb.computeViewMatrixFromYawPitchRoll(target, distance, yaw=yaw, pitch=pitch, roll=roll, upAxisIndex=up_idx)) self.image_height = image_height self.image_width = image_width self.aspect_ratio = self.image_width / self.image_height self.fov = fov self.near_plane = near_plane self.far_plane = far_plane self.projection_matrix = np.array( pb.computeProjectionMatrixFOV(self.fov, self.aspect_ratio, self.near_plane, self.far_plane))
def render(self, mode="rgb_array", close=False): if mode != "rgb_array": return np.array([]) cam_dist = 1.3 cam_yaw = 180 cam_pitch = -40 RENDER_HEIGHT = 720 RENDER_WIDTH = 960 base_pos, orn = p.getBasePositionAndOrientation(self.robot_id) view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=cam_dist, yaw=cam_yaw, pitch=cam_pitch, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=float(RENDER_WIDTH) / RENDER_HEIGHT, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage(width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) #renderer=self._p.ER_TINY_RENDERER) rgb_array = np.array(px, dtype=np.uint8) rgb_array = np.reshape(rgb_array, (RENDER_HEIGHT, RENDER_WIDTH, 4)) rgb_array = rgb_array[:, :, :3] return rgb_array
def _render(self, mode, close): if (mode=="human"): self.isRender = True if mode != "rgb_array": return np.array([]) base_pos=[0,0,0] if (hasattr(self,'robot')): if (hasattr(self.robot,'body_xyz')): base_pos = self.robot.body_xyz view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=self._cam_dist, yaw=self._cam_yaw, pitch=self._cam_pitch, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width)/self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage( width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL ) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
def birds_eye(agent: Agent, width=640, height=480) -> np.ndarray: position, _ = pybullet.getBasePositionAndOrientation(agent.vehicle_id) position = np.array([position[0], position[1], 3.0]) view_matrix = pybullet.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=position, distance=3.0, yaw=0, pitch=-90, roll=0, upAxisIndex=2) proj_matrix = pybullet.computeProjectionMatrixFOV(fov=90, aspect=float(width) / height, nearVal=0.01, farVal=100.0) _, _, rgb_image, _, _ = pybullet.getCameraImage( width=width, height=height, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL, viewMatrix=view_matrix, projectionMatrix=proj_matrix) rgb_array = np.reshape(rgb_image, (height, width, -1)) rgb_array = rgb_array[:, :, :3] return rgb_array
def render_map(self): base_pos = [0, 0, -3] if (hasattr(self, 'robot')): if (hasattr(self.robot, 'body_xyz')): base_pos[0] = self.robot.body_xyz[0] base_pos[1] = self.robot.body_xyz[1] view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=35, yaw=0, pitch=-89, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width) / self._render_height, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage(width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] return rgb_array
def render(self, mode='human'): base_pos = [2, 1.4, 3.5] view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=base_pos, distance=1.0, yaw=0, pitch=-80, roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV(fov=60, aspect=float(480) / 480, nearVal=0.1, farVal=100.0) (_, _, px, _, _) = p.getCameraImage(width=480, height=480, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL) rgb_array = np.array(px) rgb_array = rgb_array[:, :, :3] if mode == "rgb_array": return rgb_array elif mode == 'human': if self.viewer is None: self.viewer = rendering.SimpleImageViewer() self.viewer.imshow(rgb_array) return self.viewer.isopen
def render_physics(self): robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id) view_matrix = p.computeViewMatrixFromYawPitchRoll( cameraTargetPosition=robot_pos, distance=self.tracking_camera["distance"], yaw=self.tracking_camera["yaw"], pitch=self.tracking_camera["pitch"], roll=0, upAxisIndex=2) proj_matrix = p.computeProjectionMatrixFOV( fov=60, aspect=float(self._render_width) / self._render_height, nearVal=0.1, farVal=100.0) with Profiler("render physics: Get camera image"): (_, _, px, _, _) = p.getCameraImage(width=self._render_width, height=self._render_height, viewMatrix=view_matrix, projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) rgb_array = np.array(px).reshape( (self._render_width, self._render_height, -1)) rgb_array = rgb_array[:, :, :3] return rgb_array
def step(self,action): p.stepSimulation() start = time.time() yaw = next(self.iter) viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight; projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane); img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix, projectionMatrix, shadow=1,lightDirection=[1,1,1], renderer=p.ER_BULLET_HARDWARE_OPENGL) #renderer=pybullet.ER_TINY_RENDERER) self._observation = img_arr[2] return np.array(self._observation), 0, 0, {}
def test(num_runs=300, shadow=1, log=True, plot=False): if log: logId = pybullet.startStateLogging(pybullet.STATE_LOGGING_PROFILE_TIMINGS, "renderTimings") if plot: plt.ion() img = np.random.rand(200, 320) #img = [tandard_normal((50,100)) image = plt.imshow(img,interpolation='none',animated=True,label="blah") ax = plt.gca() times = np.zeros(num_runs) yaw_gen = itertools.cycle(range(0,360,10)) for i, yaw in zip(range(num_runs),yaw_gen): pybullet.stepSimulation() start = time.time() viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight; projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane); img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix, projectionMatrix, shadow=shadow,lightDirection=[1,1,1], renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) #renderer=pybullet.ER_TINY_RENDERER) stop = time.time() duration = (stop - start) if (duration): fps = 1./duration #print("fps=",fps) else: fps=0 #print("fps=",fps) #print("duraction=",duration) #print("fps=",fps) times[i] = fps if plot: rgb = img_arr[2] image.set_data(rgb)#np_img_arr) ax.plot([0]) #plt.draw() #plt.show() plt.pause(0.01) mean_time = float(np.mean(times)) print("mean: {0} for {1} runs".format(mean_time, num_runs)) print("") if log: pybullet.stopStateLogging(logId) return mean_time
def _reset(self): """Environment reset called at the beginning of an episode. """ # Set the camera settings. look = [0.23, 0.2, 0.54] distance = 1. pitch = -56 + self._cameraRandom*np.random.uniform(-3, 3) yaw = 245 + self._cameraRandom*np.random.uniform(-3, 3) roll = 0 self._view_matrix = p.computeViewMatrixFromYawPitchRoll( look, distance, yaw, pitch, roll, 2) fov = 20. + self._cameraRandom*np.random.uniform(-2, 2) aspect = self._width / self._height near = 0.01 far = 10 self._proj_matrix = p.computeProjectionMatrixFOV( fov, aspect, near, far) self._attempted_grasp = False self._env_step = 0 self.terminated = 0 p.resetSimulation() p.setPhysicsEngineParameter(numSolverIterations=150) p.setTimeStep(self._timeStep) p.loadURDF(os.path.join(self._urdfRoot,"plane.urdf"),[0,0,-1]) p.loadURDF(os.path.join(self._urdfRoot,"table/table.urdf"), 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0) p.setGravity(0,0,-10) self._kuka = kuka.Kuka(urdfRootPath=self._urdfRoot, timeStep=self._timeStep) self._envStepCounter = 0 p.stepSimulation() # Choose the objects in the bin. urdfList = self._get_random_object( self._numObjects, self._isTest) self._objectUids = self._randomly_place_objects(urdfList) self._observation = self._get_observation() return np.array(self._observation)
roll=0 upAxisIndex = 2 camDistance = 4 pixelWidth = 320 pixelHeight = 200 nearPlane = 0.01 farPlane = 100 fov = 60 main_start = time.time() while(1): for yaw in range (0,360,10) : pybullet.stepSimulation() start = time.time() viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex) aspect = pixelWidth / pixelHeight; projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane); img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, shadow=1,lightDirection=[1,1,1],renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) stop = time.time() print ("renderImage %f" % (stop - start)) w=img_arr[0] #width of the image, in pixels h=img_arr[1] #height of the image, in pixels rgb=img_arr[2] #color data RGB dep=img_arr[3] #depth data #print(rgb) print ('width = %d height = %d' % (w,h)) #note that sending the data using imshow to matplotlib is really slow, so we use set_data