Exemple #1
0
def test(args):	
	p.connect(p.GUI)
	p.setAdditionalSearchPath(pybullet_data.getDataPath())
	fileName = os.path.join("mjcf", args.mjcf)
	print("fileName")
	print(fileName)
	p.loadMJCF(fileName)
	while (1):
		p.stepSimulation()
		p.getCameraImage(320,240)
		time.sleep(0.01)
Exemple #2
0
	def _render(self, mode, close):
		if (mode=="human"):
			self.isRender = True
		if mode != "rgb_array":
			return np.array([])
		
		base_pos=[0,0,0]
		if (hasattr(self,'robot')):
			if (hasattr(self.robot,'body_xyz')):
				base_pos = self.robot.body_xyz
		
		view_matrix = p.computeViewMatrixFromYawPitchRoll(
			cameraTargetPosition=base_pos,
			distance=self._cam_dist,
			yaw=self._cam_yaw,
			pitch=self._cam_pitch,
			roll=0,
			upAxisIndex=2)
		proj_matrix = p.computeProjectionMatrixFOV(
			fov=60, aspect=float(self._render_width)/self._render_height,
			nearVal=0.1, farVal=100.0)
		(_, _, px, _, _) = p.getCameraImage(
		width=self._render_width, height=self._render_height, viewMatrix=view_matrix,
			projectionMatrix=proj_matrix,
			renderer=p.ER_BULLET_HARDWARE_OPENGL
			)
		rgb_array = np.array(px)
		rgb_array = rgb_array[:, :, :3]
		return rgb_array
 def getExtendedObservation(self):
    
    #camEyePos = [0.03,0.236,0.54]
    #distance = 1.06
    #pitch=-56
    #yaw = 258
    #roll=0
    #upAxisIndex = 2
    #camInfo = p.getDebugVisualizerCamera()
    #print("width,height")
    #print(camInfo[0])
    #print(camInfo[1])
    #print("viewMatrix")
    #print(camInfo[2])
    #print("projectionMatrix")
    #print(camInfo[3])
    #viewMat = camInfo[2]
    #viewMat = p.computeViewMatrixFromYawPitchRoll(camEyePos,distance,yaw, pitch,roll,upAxisIndex)
    viewMat = [-0.5120397806167603, 0.7171027660369873, -0.47284144163131714, 0.0, -0.8589617609977722, -0.42747554183006287, 0.28186774253845215, 0.0, 0.0, 0.5504802465438843, 0.8348482847213745, 0.0, 0.1925382763147354, -0.24935829639434814, -0.4401884973049164, 1.0]
    #projMatrix = camInfo[3]#[0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
    projMatrix = [0.75, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
    
    img_arr = p.getCameraImage(width=self._width,height=self._height,viewMatrix=viewMat,projectionMatrix=projMatrix)
    rgb=img_arr[2]
    np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
    self._observation = np_img_arr
    return self._observation
 def _get_observation(self):
   """Return the observation as an image.
   """
   img_arr = p.getCameraImage(width=self._width,
                                     height=self._height,
                                     viewMatrix=self._view_matrix,
                                     projectionMatrix=self._proj_matrix)
   rgb = img_arr[2]
   np_img_arr = np.reshape(rgb, (self._height, self._width, 4))
   return np_img_arr[:, :, :3]
Exemple #5
0
 def step(self,action):
     p.stepSimulation()
     start = time.time()
     yaw = next(self.iter)
     viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
     aspect = pixelWidth / pixelHeight;
     projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
     img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix,
         projectionMatrix, shadow=1,lightDirection=[1,1,1],
         renderer=p.ER_BULLET_HARDWARE_OPENGL)
         #renderer=pybullet.ER_TINY_RENDERER)
     self._observation = img_arr[2]
     return np.array(self._observation), 0, 0, {}
Exemple #6
0
def test(num_runs=300, shadow=1, log=True, plot=False):
    if log:
        logId = pybullet.startStateLogging(pybullet.STATE_LOGGING_PROFILE_TIMINGS, "renderTimings")

    if plot:
        plt.ion()

        img = np.random.rand(200, 320)
        #img = [tandard_normal((50,100))
        image = plt.imshow(img,interpolation='none',animated=True,label="blah")
        ax = plt.gca()


    times = np.zeros(num_runs)
    yaw_gen = itertools.cycle(range(0,360,10))
    for i, yaw in zip(range(num_runs),yaw_gen):
        pybullet.stepSimulation()
        start = time.time()
        viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
        aspect = pixelWidth / pixelHeight;
        projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
        img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,
            projectionMatrix, shadow=shadow,lightDirection=[1,1,1],
            renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
            #renderer=pybullet.ER_TINY_RENDERER)
        stop = time.time()
        duration = (stop - start)
        if (duration):
        	fps = 1./duration
        	#print("fps=",fps)
        else:
        	fps=0
        	#print("fps=",fps)
        #print("duraction=",duration)
        #print("fps=",fps)
        times[i] = fps

        if plot:
            rgb = img_arr[2]
            image.set_data(rgb)#np_img_arr)
            ax.plot([0])
            #plt.draw()
            #plt.show()
            plt.pause(0.01)

    mean_time = float(np.mean(times))
    print("mean: {0} for {1} runs".format(mean_time, num_runs))
    print("")
    if log:
        pybullet.stopStateLogging(logId)
    return mean_time
Exemple #7
0
def generate_random_object(view_matrix, projection_matrix):
    p.resetSimulation()
    hw = 128
    # Load object
    p.loadURDF("plane.urdf")
    # x_bound: [0.5, 0.65]
    # y_bound: [-0.17, 0.21]
    obj_x = random.uniform(0.55, 0.6)
    obj_y = random.uniform(-0.1, 0.15)

    obj_pos = [obj_x, obj_y, 0.02]
    obj_rot = random.uniform(-math.pi / 3, math.pi / 3)
    obj_euler_orient = [math.pi / 2, 0, obj_rot]
    obj_quater_orient = p.getQuaternionFromEuler(obj_euler_orient)
    obj = p.loadURDF("./0002/0002.urdf", obj_pos, obj_quater_orient)

    # Get image of the objects without loading the robot
    _, _, _, _, seg_img = p.getCameraImage(width=hw,
                                           height=hw,
                                           viewMatrix=view_matrix,
                                           projectionMatrix=projection_matrix)

    seg_img = np.reshape(seg_img, (hw, hw, 1))
    return seg_img, obj, obj_pos, obj_rot
    def render(self, mode='human'):
        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=[0, 0, 0],
            distance=5,
            yaw=90,
            pitch=-20,
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(fov=60,
                                                   aspect=float(960) / 720,
                                                   nearVal=0.1,
                                                   farVal=100.0)
        (_, _, px, _,
         _) = p.getCameraImage(width=960,
                               height=720,
                               viewMatrix=view_matrix,
                               projectionMatrix=proj_matrix,
                               renderer=p.ER_BULLET_HARDWARE_OPENGL)

        rgb_array = np.array(px, dtype=np.uint8)
        rgb_array = np.reshape(rgb_array, (720, 960, 4))

        rgb_array = rgb_array[:, :, :3]
        return rgb_array
 def getCameraImage(self):
     """Return camera image from CAMERA_JOINT_INDEX.
     """
     # compute eye and target position for viewMatrix
     pos, orn, _, _, _, _ = p.getLinkState(self.robotId,
                                           self.CAMERA_JOINT_INDEX)
     cameraPos = np.array(pos)
     cameraMat = np.array(p.getMatrixFromQuaternion(orn)).reshape(3, 3).T
     eyePos = cameraPos + self.CAMERA_EYE_SCALE * cameraMat[
         self.CAMERA_EYE_INDEX]
     targetPos = cameraPos + self.CAMERA_TARGET_SCALE * cameraMat[
         self.CAMERA_EYE_INDEX]
     up = self.CAMERA_UP_SCALE * cameraMat[self.CAMERA_UP_INDEX]
     # p.addUserDebugLine(eyePos, targetPos, lineColorRGB=[1, 0, 0], lifeTime=0.1) # red line for camera vector
     # p.addUserDebugLine(eyePos, eyePos + up * 0.5, lineColorRGB=[0, 0, 1], lifeTime=0.1) # blue line for up vector
     viewMatrix = p.computeViewMatrix(eyePos, targetPos, up)
     image = p.getCameraImage(self.CAMERA_PIXEL_WIDTH,
                              self.CAMERA_PIXEL_HEIGHT,
                              viewMatrix,
                              self.projectionMatrix,
                              shadow=1,
                              lightDirection=[1, 1, 1],
                              renderer=p.ER_BULLET_HARDWARE_OPENGL)
     return image
    def render(self, mode="human"):
        view_mat = p.computeViewMatrix(
            cameraTargetPosition=[0, 0, 0.2],
            cameraEyePosition=[2, 0, 0.2],
            cameraUpVector=[0, 0, 1],
        )
        nearVal = 0.05
        farVal = 100
        proj_mat = p.computeProjectionMatrixFOV(fov=45,
                                                aspect=1,
                                                nearVal=nearVal,
                                                farVal=farVal)
        (_, _, rgb_px, _, _) = p.getCameraImage(
            width=512,
            height=512,
            viewMatrix=view_mat,
            projectionMatrix=proj_mat,
            renderer=p.ER_BULLET_HARDWARE_OPENGL,
        )
        rgb_array = np.array(rgb_px, "uint8")
        rgb_array = rgb_array.reshape(512, 512, 4)
        rgb_array = rgb_array[:, :, :3]

        return rgb_array
def _get_seg_mask_for_target(target_position, cam_position, world=None):
    """
    Calculates the view and projection Matrix and returns the Segmentation mask
    The segmentation mask indicates for every pixel the visible Object.
    :param cam_position: The position of the Camera as a list of x,y,z and orientation as quaternion
    :param target_position: The position to which the camera should point as a list of x,y,z and orientation as quaternion
    :return: The Segmentation mask from the camera position
    """
    world, world_id = _world_and_id(world)
    # TODO: Might depend on robot cameras, if so please add these camera parameters to RobotDescription object
    # TODO: of your robot with a CameraDescription object.
    fov = 300
    aspect = 256 / 256
    near = 0.2
    far = 10

    view_matrix = p.computeViewMatrix(cam_position[0], target_position[0],
                                      [0, 0, -1])
    projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
    return p.getCameraImage(256,
                            256,
                            view_matrix,
                            projection_matrix,
                            physicsClientId=world_id)[4]
Exemple #12
0
    def render_physics(self):
        robot_pos, _ = p.getBasePositionAndOrientation(self.robot_tracking_id)

        view_matrix = p.computeViewMatrixFromYawPitchRoll(
            cameraTargetPosition=robot_pos,
            distance=self.tracking_camera["distance"],
            yaw=self.tracking_camera["yaw"],
            pitch=self.tracking_camera["pitch"],
            roll=0,
            upAxisIndex=2)
        proj_matrix = p.computeProjectionMatrixFOV(
            fov=60,
            aspect=float(self._render_width) / self._render_height,
            nearVal=0.1,
            farVal=100.0)
        with Profiler("render physics: Get camera image"):
            (_, _, px, _, _) = p.getCameraImage(width=self._render_width,
                                                height=self._render_height,
                                                viewMatrix=view_matrix,
                                                projectionMatrix=proj_matrix,
                                                renderer=p.ER_TINY_RENDERER)
        rgb_array = np.array(px)
        rgb_array = rgb_array[:, :, :3]
        return rgb_array
Exemple #13
0
def get_camera_image(cam_target_pos, cam_dist, cam_yaw, cam_pitch, cam_roll,
                     fov, render_width, render_height, nearval, farval):
    view_matrix = p.computeViewMatrixFromYawPitchRoll(
        cameraTargetPosition=cam_target_pos,
        distance=cam_dist,
        yaw=cam_yaw,
        pitch=cam_pitch,
        roll=cam_roll,
        upAxisIndex=2)
    proj_matrix = p.computeProjectionMatrixFOV(fov=fov,
                                               aspect=float(render_width) /
                                               float(render_height),
                                               nearVal=nearval,
                                               farVal=farval)
    (_, _, px, _, _) = p.getCameraImage(width=render_width,
                                        height=render_height,
                                        renderer=p.ER_BULLET_HARDWARE_OPENGL,
                                        viewMatrix=view_matrix,
                                        projectionMatrix=proj_matrix)
    rgb_array = np.array(px, dtype=np.uint8)
    rgb_array = np.reshape(np.array(px), (render_height, render_width, -1))
    rgb_array = rgb_array[:, :, :3]

    return rgb_array
Exemple #14
0
def plot_humanoid():
    camTargetPos = [0, 0, 0]
    cameraUp = [0, 0, 1]
    cameraPos = [1, 1, 1]
    p.setGravity(0, 0, -10)
    pitch = -10.0
    roll = 0
    upAxisIndex = 2
    camDistance = 3
    pixelWidth = 1080
    pixelHeight = 790
    nearPlane = 0.01
    farPlane = 100
    fov = 90
    cubeStartPos = [0, 0, 1]
    cubeStartOrientation = p.getQuaternionFromEuler([0, 0, 0])
    yaw = 0

    for r in range(2):
        for c in range(2):
            yaw += 60
            pylab.figure(figsize=(10, 5))
            viewMatrix = p.computeViewMatrixFromYawPitchRoll(
                camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
            aspect = pixelWidth / pixelHeight
            projectionMatrix = p.computeProjectionMatrixFOV(
                fov, aspect, nearPlane, farPlane)
            img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix,
                                       projectionMatrix)
            w = img_arr[0]
            h = img_arr[1]
            rgb = img_arr[2]
            dep = img_arr[3]
            np_img_arr = np.reshape(rgb, (h, w, 4))
            np_img_arr = np_img_arr * (1. / 255.)
            pylab.imshow(np_img_arr, interpolation='none', animated=True)
Exemple #15
0
    def get_camera_image(self):
        image = np.zeros((100, 100, 4))
        proj_matrix = p.computeProjectionMatrixFOV(fov=80,
                                                   aspect=1,
                                                   nearVal=0.01,
                                                   farVal=100)
        pos, ori = [
            list(l)
            for l in p.getBasePositionAndOrientation(self.id, self.client)
        ]
        pos[2] = 0.2

        # Rotate camera direction
        rot_mat = np.array(p.getMatrixFromQuaternion(ori)).reshape(3, 3)
        camera_vec = np.matmul(rot_mat, [1, 0, 0])
        up_vec = np.matmul(rot_mat, np.array([0, 0, 1]))
        view_matrix = p.computeViewMatrix(pos, pos + camera_vec, up_vec)

        w, h, rgb, depth, self.segmask = p.getCameraImage(
            100, 100, view_matrix, proj_matrix)
        rgb = rgb[:, :, :-1] / 255.
        rgbd = np.concatenate([rgb, np.expand_dims(depth, 2)], axis=2)

        return rgbd
Exemple #16
0
pixelWidth = 320
pixelHeight = 200
nearPlane = 0.01
farPlane = 100

fov = 60

main_start = time.time()
while(1): 
  for yaw in range (0,360,10) :
    pybullet.stepSimulation()
    start = time.time()
    viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
    aspect = pixelWidth / pixelHeight;
    projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
    img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, shadow=1,lightDirection=[1,1,1],renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
    stop = time.time()
    print ("renderImage %f" % (stop - start))

    w=img_arr[0] #width of the image, in pixels
    h=img_arr[1] #height of the image, in pixels
    rgb=img_arr[2] #color data RGB
    dep=img_arr[3] #depth data
    #print(rgb)
    print ('width = %d height = %d' % (w,h))

    #note that sending the data using imshow to matplotlib is really slow, so we use set_data

    #plt.imshow(rgb,interpolation='none')

Exemple #17
0
    def __init__(self,
                 display=False,
                 steps_to_roll=10,
                 episode_len=600,
                 pause_frac=0.66,
                 n_cells=1000):

        self.display = display
        if self.display:
            p.connect(p.GUI)
        else:
            p.connect(p.DIRECT)

        # set up the world, endEffector is the tip of the left finger
        self.baxterId, self.endEffectorId, self.objectId = setUpWorld()

        # change friction  of object
        p.changeDynamics(self.objectId, -1, lateralFriction=1)

        # save the state of the environment for future reset (save time on URDF loading)
        self.savestate = p.saveState()

        # get joint ranges
        self.lowerLimits, self.upperLimits, self.jointRanges, self.restPoses = getJointRanges(
            self.baxterId, includeFixed=False)
        # deal with display
        if self.display:
            p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
            p.resetDebugVisualizerCamera(2., 180, 0., [0.52, 0.2, np.pi / 4.])
            p.getCameraImage(320, 200, renderer=p.ER_BULLET_HARDWARE_OPENGL)
            sleep(1.)

        # number of pybullet steps to roll at each gym step
        self.steps_to_roll = steps_to_roll

        # number of gym steps to roll before the end of the movement
        self.move_len = int(episode_len * pause_frac)

        # number of pybullet steps to roll after the movement keeping the same action
        self.pause_steps = (episode_len - self.episode_len) * steps_to_roll

        # counts the number of gym steps
        self.step_counter = 0

        # variable to store grasping orientation
        self.diff_or_at_grab = None

        # create the discretisation for the goal spaces (quaternion spaces)
        bounds = [[-1, 1], [-1, 1], [-1, 1], [-1, 1]]
        self.cvt = utils.CVT(n_cells, bounds)

        self.grasped = False

        # observation and action space
        self.observation_space = spaces.Dict({
            'observation':
            spaces.Box(-1, 1, shape=(34, )),
            'desired_goal':
            spaces.Discrete(n_cells),
            'achieved_goal':
            spaces.Discrete(n_cells)
        })
        self.action_space = spaces.Box(-1, 1, shape=(8, ))
    jointOffsets.append(0)
    jointOffsets.append(-0.7)
    jointOffsets.append(0.7)

maxForceId = p.addUserDebugParameter("maxForce", 0, 100, 20)

for j in range(p.getNumJoints(quadruped)):
    p.changeDynamics(quadruped, j, linearDamping=0, angularDamping=0)
    info = p.getJointInfo(quadruped, j)
    #print(info)
    jointName = info[1]
    jointType = info[2]
    if (jointType == p.JOINT_PRISMATIC or jointType == p.JOINT_REVOLUTE):
        jointIds.append(j)

p.getCameraImage(480, 320)
p.setRealTimeSimulation(0)

joints = []

with open(pd.getDataPath() + "/laikago/data1.txt", "r") as filestream:
    for line in filestream:
        print("line=", line)
        maxForce = p.readUserDebugParameter(maxForceId)
        currentline = line.split(",")
        #print (currentline)
        #print("-----")
        frame = currentline[0]
        t = currentline[1]
        #print("frame[",frame,"]")
        joints = currentline[2:14]
        images_depth5 = []
        images_rgb6 = []
        images_depth6 = []
        images_rgb7 = []
        images_depth7 = []
        images_rgb8 = []
        images_depth8 = []

        for i in range(0, 200):
            print(i)
            # p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING)
            if i % 4 == 0:  # 30 frames a second:
                # camera 1
                images = p.getCameraImage(width,
                                          height,
                                          view_matrix_camera_1,
                                          projection_matrix,
                                          shadow=True,
                                          renderer=p.ER_BULLET_HARDWARE_OPENGL)
                rgb_opengl = np.reshape(images[2],
                                        (height, width, 4)) * 1. / 255.
                depth_buffer_opengl = np.reshape(images[3], [width, height])
                depth_opengl = far * near / (
                    far - (far - near) * depth_buffer_opengl)
                images_rgb1.append(rgb_opengl)
                images_depth1.append(depth_opengl)

                # camera 2
                images = p.getCameraImage(width,
                                          height,
                                          view_matrix_camera_2,
                                          projection_matrix,
Exemple #20
0
                                positionGain=0.03,
                                velocityGain=1)


if __name__ == "__main__":

    eef_pos, eef_orn = p.getLinkState(kukaId, kukaEndEffectorIndex)[4:6]
    eef_orn = p.getEulerFromQuaternion(eef_orn)
    eef_pos = np.array(eef_pos)
    eef_orn = np.array(eef_orn)

    while True:
        images = p.getCameraImage(
            width,
            height,
            view_matrix,
            projection_matrix,
            # shadow=True,
            renderer=p.ER_BULLET_HARDWARE_OPENGL)

        # クリックされたobject_uidを取得
        object_uid = -1
        mouse_events = p.getMouseEvents()
        for e in mouse_events:
            if ((e[0] == 2) and (e[3] == 0)
                    and (e[4] & p.KEY_WAS_TRIGGERED)):  # 左クリック判定
                mouse_x = e[1]
                mouse_y = e[2]
                object_uid = get_object_uid(
                    mouse_x, mouse_y)  # -1 for no object was clicked
    def _step(self,action):
        done = False
        #reward (float): amount of reward achieved by the previous action. The scale varies between environments, but the goal is always to increase your total reward.
        #done (boolean): whether it's time to reset the environment again. Most (but not all) tasks are divided up into well-defined episodes, and done being True indicates the episode has terminated. (For example, perhaps the pole tipped too far, or you lost your last life.)
        #observation (object): an environment-specific object representing your observation of the environment. For example, pixel data from a camera, joint angles and joint velocities of a robot, or the board state in a board game.
        #info (dict): diagnostic information useful for debugging. It can sometimes be useful for learning (for example, it might contain the raw probabilities behind the environment's last state change). However, official evaluations of your agent are not allowed to use this for learning.
        def convertSensor(finger_index):
            if finger_index == self.indexId:
                return random.uniform(-1,1)
                #return 0
            else:
                #return random.uniform(-1,1)
                return 0
        def convertAction(action):
            #converted = (action-30)/10
            #converted = (action-16)/10
            if action == 6:
                converted = -1
            elif action == 25:
                converted = 1
            #print("action ",action)
            #print("converted ",converted)
            return converted

        aspect = 1
        camTargetPos = [0,0,0]
        yaw = 40
        pitch = 10.0
        roll=0
        upAxisIndex = 2
        camDistance = 4
        pixelWidth = 320
        pixelHeight = 240
        nearPlane = 0.0001
        farPlane = 0.022
        lightDirection = [0,1,0]
        lightColor = [1,1,1]#optional
        fov = 50  #10 or 50
        hand_po = pb.getBasePositionAndOrientation(self.agent)
        ho = pb.getQuaternionFromEuler([0.0, 0.0, 0.0])        
       
        # So when trying to modify the physics of the engine, we run into some problems. If we leave
        # angular damping at default (0.04) then the hand rotates when moving up and dow, due to torque.
        # If we set angularDamping to 100.0 then the hand will bounce off into the background due to 
        # all the stored energy, when it makes contact with the object. The below set of parameters seem
        # to have a reasonably consistent performance in keeping the hand level and not inducing unwanted
        # behavior during contact. 

        pb.changeDynamics(self.agent, linkIndex=-1, angularDamping=0.9999)

        if action == 65298 or action == 0: #down
            pb.changeConstraint(self.hand_cid,(hand_po[0][0]+self.move,hand_po[0][1],hand_po[0][2]),ho, maxForce=self.tinyForce)
        elif action == 65297 or action == 1: #up            
            pb.changeConstraint(self.hand_cid,(hand_po[0][0]-self.move,hand_po[0][1],hand_po[0][2]),ho, maxForce=self.tinyForce)
        elif action == 65295 or action == 2: #left            
            pb.changeConstraint(self.hand_cid,(hand_po[0][0],hand_po[0][1]+self.move,hand_po[0][2]),ho, maxForce=self.tinyForce)
        elif action== 65296 or action == 3: #right            
            pb.changeConstraint(self.hand_cid,(hand_po[0][0],hand_po[0][1]-self.move,hand_po[0][2]),ho, maxForce=self.tinyForce)
        elif action == 44 or action == 4: #<        
            pb.changeConstraint(self.hand_cid,(hand_po[0][0],hand_po[0][1],hand_po[0][2]+self.move),ho, maxForce=self.tinyForce)            
        elif action == 46 or action == 5: #>            
            pb.changeConstraint(self.hand_cid,(hand_po[0][0],hand_po[0][1],hand_po[0][2]-self.move),ho, maxForce=self.tinyForce)
        elif action >= 6 and action <= 7:
            # keeps the hand from moving towards origin
            pb.changeConstraint(self.hand_cid,(hand_po[0][0],hand_po[0][1],hand_po[0][2]),ho, maxForce=200)
            if action == 7:
                action = 25 #bad kludge redo all this code

            index = convertAction(action) # action = 6 or 25 due to kludge -> return -1 or 1                        

            # Getting positions of the index joints to use for moving to a relative position
            joint17Pos = pb.getJointState(self.agent, 17)[0]
            joint19Pos = pb.getJointState(self.agent, 19)[0]
            joint21Pos = pb.getJointState(self.agent, 21)[0]
            # need to keep the multiplier relatively small otherwise the joint will continue to move
            # when you take other actions
            finger_jump = 0.1
            newJoint17Pos = joint17Pos + index*finger_jump
            newJoint19Pos = joint19Pos + index*finger_jump
            newJoint21Pos = joint21Pos + index*finger_jump
            
            # following values found by experimentation
            if newJoint17Pos <= -0.7:
                newJoint17Pos = -0.7
            elif newJoint17Pos >= 0.57:
                newJoint17Pos = 0.57
            if newJoint19Pos <= 0.13:
                newJoint19Pos = 0.13
            elif newJoint19Pos >= 0.42:
                newJoint19Pos = 0.42            
            if newJoint21Pos <= -0.8:
                newJoint21Pos = -0.8
            elif newJoint21Pos >= 0.58:
                newJoint21Pos = 0.58

            pb.setJointMotorControl2(self.agent,17,pb.POSITION_CONTROL,newJoint17Pos)
            pb.setJointMotorControl2(self.agent,19,pb.POSITION_CONTROL,newJoint19Pos)
            pb.setJointMotorControl2(self.agent,21,pb.POSITION_CONTROL,newJoint21Pos)

        if self.downCameraOn: viewMatrix = down_view()
        else: viewMatrix = self.ahead_view()
        projectionMatrix = pb.computeProjectionMatrixFOV(fov,aspect,nearPlane,farPlane)
        w,h,img_arr,depths,mask = pb.getCameraImage(200,200, viewMatrix,projectionMatrix, lightDirection,lightColor,renderer=pb.ER_TINY_RENDERER)
        #w,h,img_arr,depths,mask = pb.getCameraImage(200,200, viewMatrix,projectionMatrix, lightDirection,lightColor,renderer=pb.ER_BULLET_HARDWARE_OPENGL)
        new_obs = np.absolute(depths-1.0)
        new_obs[new_obs > 0] =1
        self.current_observation = new_obs.flatten()
        info = [42] #answer to life,TODO use real values
        pb.stepSimulation()

        self.steps +=1
        #reward if moving towards the object or touching the object
        reward = 0
        max_steps = 1000
        if self.is_touching():
            touch_reward = 10
            if 'debug' in self.options and self.options['debug'] == True:
                print("TOUCHING!!!!")
        else:
            touch_reward = 0
        obj_po = pb.getBasePositionAndOrientation(self.obj_to_classify)
        distance = math.sqrt(sum([(xi-yi)**2 for xi,yi in zip(obj_po[0],hand_po[0])])) #TODO faster euclidean
        #distance =  np.linalg.norm(obj_po[0],hand_po[0])
        #print("distance:",distance)
        if distance < self.prev_distance:
            reward += 1 * (max_steps - self.steps)
        elif distance > self.prev_distance:
            reward -= 10
        reward -= distance
        reward += touch_reward
        self.prev_distance = distance
        if self.steps >= max_steps or self.is_touching():
            done = True
        return self.current_observation,reward,done,info
upAxisIndex = 2
camDistance = 4
pixelWidth = 1920
pixelHeight = 1080
nearPlane = 0.01
farPlane = 1000

fov = 60

main_start = time.time()
for pitch in range (0,360,10) :
    start = time.time()
    viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
    aspect = pixelWidth / pixelHeight;
    projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
    img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, [0,1,0])
    stop = time.time()
    print ("renderImage %f" % (stop - start))

    w=img_arr[0] #width of the image, in pixels
    h=img_arr[1] #height of the image, in pixels
    rgb=img_arr[2] #color data RGB
    dep=img_arr[3] #depth data

    print 'width = %d height = %d' % (w,h)

    #note that sending the data to matplotlib is really slow
    #show
    plt.imshow(rgb,interpolation='none')
    #plt.show()
    plt.pause(0.01)
Exemple #23
0
import pybullet as p
import time

p.connect(p.GUI)
p.loadURDF("table/table.urdf", 0.5000000,0.00000,-.820000,0.000000,0.000000,0.0,1.0)
p.setGravity(0,0,-10)
arm = p.loadURDF("widowx/widowx.urdf",useFixedBase=True)

p.resetBasePositionAndOrientation(arm,[-0.098612,-0.000726,-0.194018],[0.000000,0.000000,0.000000,1.000000])
  
	
while (1):
	p.stepSimulation()
	time.sleep(0.01)
	#p.saveWorld("test.py")
	viewMat = p.getDebugVisualizerCamera()[2]
	#projMatrix = [0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]
	projMatrix = p.getDebugVisualizerCamera()[3]
	width=640
	height=480
	img_arr = p.getCameraImage(width=width,height=height,viewMatrix=viewMat,projectionMatrix=projMatrix)
oneOverWidth = float(1)/float(width)
oneOverHeight = float(1)/float(height)
dHor = [horizon[0] * oneOverWidth,horizon[1] * oneOverWidth,horizon[2] * oneOverWidth]
dVer = [vertical[0] * oneOverHeight,vertical[1] * oneOverHeight,vertical[2] * oneOverHeight]
        
lendHor = math.sqrt(dHor[0]*dHor[0]+dHor[1]*dHor[1]+dHor[2]*dHor[2]) 
lendVer = math.sqrt(dVer[0]*dVer[0]+dVer[1]*dVer[1]+dVer[2]*dVer[2])

cornersX = [0,width,width,0]
cornersY = [0,0,height,height]
corners3D=[]

imgW = int(width/10)
imgH = int(height/10)

img = p.getCameraImage(imgW,imgH, renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgbBuffer=img[2]
depthBuffer = img[3]
print("rgbBuffer.shape=",rgbBuffer.shape)
print("depthBuffer.shape=",depthBuffer.shape)

#disable rendering temporary makes adding objects faster
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)
p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER,0)
visualShapeId = p.createVisualShape(shapeType=p.GEOM_SPHERE, rgbaColor=[1,1,1,1], radius=0.03 )
collisionShapeId = -1 #p.createCollisionShape(shapeType=p.GEOM_MESH, fileName="duck_vhacd.obj", collisionFramePosition=shift,meshScale=meshScale)

for i in range (4):
	w = cornersX[i]
	h = cornersY[i]
Exemple #25
0
import pybullet as p
useEGL = False
useEGLGUI = False

if useEGL:
	if useEGLGUI:
		p.connect(p.GUI, "window_backend=2")
	else:	
		p.connect(p.DIRECT)
		p.loadPlugin("eglRendererPlugin")
else:
	p.connect(p.GUI)

p.loadURDF("threecubes.urdf", flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
while (1):
	
	viewmat= [0.642787516117096, -0.4393851161003113, 0.6275069713592529, 0.0, 0.766044557094574, 0.36868777871131897, -0.5265407562255859, 0.0, -0.0, 0.8191521167755127, 0.5735764503479004, 0.0, 2.384185791015625e-07, 2.384185791015625e-07, -5.000000476837158, 1.0]
	projmat= [0.7499999403953552, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0, -0.02000020071864128, 0.0]

	p.getCameraImage(64,64, viewMatrix=viewmat, projectionMatrix=projmat, flags=p.ER_NO_SEGMENTATION_MASK )
	p.stepSimulation()
	
                           baseInertialFramePosition=[0, 0, 0],
                           baseCollisionShapeIndex=collisionShapeId,
                           baseVisualShapeIndex=visualShapeId,
                           basePosition=[((-rangex / 2) + i * 2) * meshScale[0] * 2,
                                         (-rangey / 2 + j) * meshScale[1] * 4, 1],
                           useMaximalCoordinates=False)
    p.changeVisualShape(mb, -1, rgbaColor=[1, 1, 1, 1])
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
p.stopStateLogging(logId)
p.setGravity(0, 0, -10)
p.setRealTimeSimulation(1)

colors = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 1, 1]]
currentColor = 0

p.getCameraImage(64, 64, renderer=p.ER_BULLET_HARDWARE_OPENGL)

while (1):

  mouseEvents = p.getMouseEvents()
  for e in mouseEvents:
    if ((e[0] == 2) and (e[3] == 0) and (e[4] & p.KEY_WAS_TRIGGERED)):
      mouseX = e[1]
      mouseY = e[2]
      rayFrom, rayTo = getRayFromTo(mouseX, mouseY)
      rayInfo = p.rayTest(rayFrom, rayTo)
      #p.addUserDebugLine(rayFrom,rayTo,[1,0,0],3)
      for l in range(len(rayInfo)):
        hit = rayInfo[l]
        objectUid = hit[0]
        if (objectUid >= 0):
import pybullet as p
p.connect(p.GUI)
r2d2 = p.loadURDF("r2d2.urdf", [0, 0, 1])
for l in range(p.getNumJoints(r2d2)):
  print(p.getJointInfo(r2d2, l))

p.loadURDF("r2d2.urdf", [2, 0, 1])
p.loadURDF("r2d2.urdf", [4, 0, 1])

p.getCameraImage(320, 200, flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX)
segLinkIndex = 1
verbose = 0

while (1):
  keys = p.getKeyboardEvents()
  #for k in keys:
  #	print("key=",k,"state=", keys[k])
  if ord('1') in keys:
    state = keys[ord('1')]
    if (state & p.KEY_WAS_RELEASED):
      verbose = 1 - verbose
  if ord('s') in keys:
    state = keys[ord('s')]
    if (state & p.KEY_WAS_RELEASED):
      segLinkIndex = 1 - segLinkIndex
      #print("segLinkIndex=",segLinkIndex)
  flags = 0
  if (segLinkIndex):
    flags = p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX

  img = p.getCameraImage(320, 200, flags=flags)
p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.0, 0.025])
cube_trans = p.loadURDF('cube_small.urdf', basePosition=[0.0, 0.1, 0.025])
p.changeVisualShape(cube_trans,-1,rgbaColor=[1,1,1,0.1])
width = 128
height = 128

fov = 60
aspect = width / height
near = 0.02
far = 1

view_matrix = p.computeViewMatrix([0, 0, 0.5], [0, 0, 0], [1, 0, 0])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)

# Get depth values using the OpenGL renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True,renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_opengl= np.reshape(images[2], (height, width, 4))*1./255.
depth_buffer_opengl = np.reshape(images[3], [width, height])
depth_opengl = far * near / (far - (far - near) * depth_buffer_opengl)
seg_opengl = np.reshape(images[4], [width, height])*1./255.
time.sleep(1)

# Get depth values using Tiny renderer
images = p.getCameraImage(width, height, view_matrix, projection_matrix, shadow=True, renderer=p.ER_TINY_RENDERER)
depth_buffer_tiny = np.reshape(images[3], [width, height])
depth_tiny = far * near / (far - (far - near) * depth_buffer_tiny)
rgb_tiny= np.reshape(images[2], (height, width, 4))*1./255.
seg_tiny = np.reshape(images[4],[width,height])*1./255.


Exemple #29
0
    def get_camera_view(self,
                        camera_pose,
                        camera,
                        target_position=None,
                        occlusion_threshold=0.01,
                        rendering_ratio=1.0):
        visible_tracks = []
        rot = quaternion_matrix(camera_pose.quaternion())
        trans = translation_matrix(camera_pose.position().to_array().flatten())
        if target_position is None:
            target = translation_matrix([0.0, 0.0, 1000.0])
            target = translation_from_matrix(np.dot(np.dot(trans, rot),
                                                    target))
        else:
            target = target_position.position().to_array()
        view_matrix = p.computeViewMatrix(camera_pose.position().to_array(),
                                          target, [0, 0, 1])

        width = camera.width
        height = camera.height

        projection_matrix = p.computeProjectionMatrixFOV(
            camera.fov,
            float(width) / height, camera.clipnear, camera.clipfar)

        rendered_width = int(width / rendering_ratio)
        rendered_height = int(height / rendering_ratio)

        width_ratio = width / rendered_width
        height_ratio = height / rendered_height

        if self.use_gui is True:
            camera_image = p.getCameraImage(
                rendered_width,
                rendered_height,
                viewMatrix=view_matrix,
                renderer=p.ER_BULLET_HARDWARE_OPENGL,
                projectionMatrix=projection_matrix)
        else:
            camera_image = p.getCameraImage(rendered_width,
                                            rendered_height,
                                            viewMatrix=view_matrix,
                                            renderer=p.ER_TINY_RENDERER,
                                            projectionMatrix=projection_matrix)

        rgb_image = cv2.resize(np.array(camera_image[2]),
                               (width, height))[:, :, :3]
        depth_image = np.array(camera_image[3], np.float32).reshape(
            (rendered_height, rendered_width))

        far = camera.clipfar
        near = camera.clipnear
        real_depth_image = far * near / (far - (far - near) * depth_image)

        mask_image = camera_image[4]
        unique, counts = np.unique(np.array(mask_image).flatten(),
                                   return_counts=True)

        for sim_id, count in zip(unique, counts):
            if sim_id > 0:
                cv_mask = np.array(mask_image.copy())
                cv_mask[cv_mask != sim_id] = 0
                cv_mask[cv_mask == sim_id] = 255
                xmin, ymin, w, h = cv2.boundingRect(cv_mask.astype(np.uint8))
                mask = cv_mask[ymin:ymin + h, xmin:xmin + w]
                visible_area = w * h + 1
                screen_area = rendered_width * rendered_height + 1
                if screen_area - visible_area == 0:
                    confidence = 1.0
                else:
                    confidence = visible_area / float(screen_area -
                                                      visible_area)
                #TODO compute occlusion score as a ratio between visible 2d bbox and projected 2d bbox areas
                if confidence > occlusion_threshold:

                    depth = real_depth_image[int(ymin + h / 2.0)][int(xmin +
                                                                      w / 2.0)]
                    xmin = int(xmin * width_ratio)
                    ymin = int(ymin * height_ratio)
                    w = int(w * width_ratio)
                    h = int(h * height_ratio)

                    id = self.reverse_entity_id_map[sim_id]
                    scene_node = self.get_entity(id)

                    det = Detection(int(xmin),
                                    int(ymin),
                                    int(xmin + w),
                                    int(ymin + h),
                                    id,
                                    confidence,
                                    depth=depth,
                                    mask=mask)
                    track = SceneNode(detection=det)
                    track.static = scene_node.static
                    track.id = id
                    track.mask = det.mask
                    track.shapes = scene_node.shapes
                    track.pose = scene_node.pose
                    track.label = scene_node.label
                    track.description = scene_node.description
                    visible_tracks.append(track)
        return rgb_image, real_depth_image, visible_tracks
Exemple #30
0
table = p.loadURDF("table/table.urdf", table_pos, flags = flags, useFixedBase=useFixedBase)
xarm = p.loadURDF("xarm/xarm6_robot.urdf", flags = flags, useFixedBase=useFixedBase)

jointIds = []
paramIds = []

for j in range(p.getNumJoints(xarm)):
  p.changeDynamics(xarm, j, linearDamping=0, angularDamping=0)
  info = p.getJointInfo(xarm, j)
  #print(info)
  jointName = info[1]
  jointType = info[2]
  if (jointType == p.JOINT_PRISMATIC or jointType == p.JOINT_REVOLUTE):
    jointIds.append(j)
    paramIds.append(p.addUserDebugParameter(jointName.decode("utf-8"), -4, 4, 0))
  
skip_cam_frames = 10  

while (1):
	p.stepSimulation()
	for i in range(len(paramIds)):
		c = paramIds[i]
		targetPos = p.readUserDebugParameter(c)
		p.setJointMotorControl2(xarm, jointIds[i], p.POSITION_CONTROL, targetPos, force=5 * 240.)
	skip_cam_frames -= 1
	if (skip_cam_frames<0):
		p.getCameraImage(320,200, renderer=p.ER_BULLET_HARDWARE_OPENGL )
		skip_cam_frames = 10
	time.sleep(1./240.)
	
Exemple #31
0
print("rpy=\"",eul[0],eul[1],eul[2],"\" xyz=\"", mPos[0],mPos[1], mPos[2])


shift=0
gui = 1


frame=0
states=[]
states.append(p.saveState())
#observations=[]
#observations.append(obs)
				
running = True
reverting = False
p.getCameraImage(320,200)#,renderer=p.ER_BULLET_HARDWARE_OPENGL )
				
while	(1):

	updateCam = 0
	keys = p.getKeyboardEvents()
	for k,v in keys.items():
		if (reverting or (k == p.B3G_LEFT_ARROW and (v&p.KEY_WAS_TRIGGERED))):
			reverting=True
			stateIndex = len(states)-1
			#print("prestateIndex=",stateIndex)
			time.sleep(timestep)
			updateCam=1
			if (stateIndex>0):
				stateIndex-=1
				states=states[:stateIndex+1]
def run_simulation(dt,
                   t_stance,
                   duration,
                   f,
                   A_lat,
                   th0_lat,
                   th0_abd,
                   thf_abd,
                   z_rotation,
                   girdle_friction,
                   body_friction,
                   last_link_friction,
                   leg_friction,
                   Kp_lat,
                   Kp_r_abd,
                   Kp_l_abd,
                   Kp_flex,
                   Kv_lat,
                   Kv_r_abd,
                   Kv_l_abd,
                   Kv_flex,
                   keep_in_check=True,
                   graphic_mode=False,
                   plot_graph_joint=False,
                   plot_graph_COM=False,
                   video_logging=False,
                   video_name="./video/dump/dump"):
    # dt, t_stance, duration = time parameter, dt MUST be the same used to create the model
    # A,f1,n,t2_off,delta_off,bias = parameters for the generation of the torques
    # theta_rg0, theta_lg0, A_lat_0, theta_lat_0= parameters for setting the starting position
    # girdle_friction, body_friction, leg_friction = lateral friction values
    ##########################################
    ####### PYBULLET SET-UP ##################
    if graphic_mode:
        physicsClient = p.connect(p.GUI)
    else:
        physicsClient = p.connect(p.DIRECT)
    p.setTimeStep(dt)
    p.setAdditionalSearchPath(pybullet_data.getDataPath())
    p.setGravity(0, 0, -9.81)
    ### plane set-up
    planeID = p.loadURDF("plane100.urdf", [0, 0, 0])
    p.changeDynamics(
        planeID,
        linkIndex=-1,
        lateralFriction=1,
        spinningFriction=1,
        rollingFriction=1,
        #restitution = 0.5,
        #contactStiffness = 0,
        #contactDamping = 0
    )
    ##########################################
    ####### MODEL SET-UP #####################
    model = crw.Crawler(
        urdf_path="/home/fra/Uni/Tesi/crawler",
        dt_simulation=dt,
        base_position=[0, 0, 0.005],
        base_orientation=[0, 0, sin(z_rotation / 2),
                          cos(z_rotation / 2)],
        mass_distribution=True)
    # for i,elem in enumerate(model.links_state_array):
    #     print("Link %d  "%i, elem["world_com_trn"])
    # print(model.links_state_array)
    # Set motion damping ("air friction")
    for index in range(-1, model.num_joints):
        p.changeDynamics(model.Id,
                         index,
                         linearDamping=0.001,
                         angularDamping=0.001)
    # Girdle link dynamic properties
    p.changeDynamics(
        model.Id,
        linkIndex=-1,
        lateralFriction=girdle_friction,
        spinningFriction=girdle_friction / 10000,
        rollingFriction=girdle_friction / 10000,
        restitution=0.1,
        #contactStiffness = 0,
        #contactDamping = 0
    )
    # Body dynamic properties
    for i in range(1, model.num_joints - 4, 2):
        p.changeDynamics(
            model.Id,
            linkIndex=i,
            lateralFriction=body_friction,
            spinningFriction=body_friction / 50,
            rollingFriction=body_friction / 100,
            restitution=0.1,
            #contactStiffness = 0,
            #contactDamping = 0
        )
    # # Last link dynamic properties
    p.changeDynamics(
        model.Id,
        linkIndex=(model.control_indices[0][-1] + 1),
        lateralFriction=last_link_friction,
        spinningFriction=last_link_friction / 10000,
        rollingFriction=last_link_friction / 10000,
        restitution=0.1,
        #contactStiffness = 0,
        #contactDamping = 0
    )
    # Legs dynamic properties
    for i in range(model.num_joints - 3, model.num_joints, 2):
        p.changeDynamics(
            model.Id,
            linkIndex=i,
            lateralFriction=leg_friction,
            spinningFriction=leg_friction / 10000,
            rollingFriction=leg_friction / 10000,
            restitution=0.1,
            #contactStiffness = 0,
            #contactDamping = 0
        )
    # Dorsal joints "compliance"
    model.turn_off_crawler()
    for i in range(1, model.control_indices[0][-1] + 1, 2):
        p.setJointMotorControl2(model.Id,
                                i,
                                controlMode=p.VELOCITY_CONTROL,
                                targetVelocity=0,
                                force=0.05)
    # Starting position
    model.set_bent_position(theta_rg=th0_abd,
                            theta_lg=-thf_abd,
                            A_lat=A_lat,
                            theta_lat_0=th0_lat)
    ##########################################
    ####### CONTROLLER PARAMETERS ############
    Kp = model.generate_Kp(Kp_lat, Kp_r_abd, Kp_l_abd, Kp_flex)
    Kv = model.generate_Kv(Kv_lat, Kv_r_abd, Kv_l_abd, Kv_flex)
    model.set_low_pass_lateral_qa(fc=20)
    model.set_low_pass_tau(fc=90)
    ##########################################
    ####### MISCELLANEOUS ####################
    if graphic_mode:
        p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
        p.resetDebugVisualizerCamera(cameraDistance=0.11,
                                     cameraYaw=50,
                                     cameraPitch=-60,
                                     cameraTargetPosition=[0, 0, 0])
    if (video_logging and graphic_mode):
        video_writer = imageio.get_writer(video_name, fps=int(1 / dt))
    ##########################################
    ####### PERFORMANCE EVALUATION ###########
    loss = 0.0
    max_loss = 10000000.0  # value to set when something goes wrong, if constraints are "hard"
    max_COM_height = 4 * model.body_sphere_radius
    max_COM_speed = 10  #TODO: check this value
    n_mean = 6
    ##########################################
    ####### TIME SERIES CREATION #############
    steps = int(duration / dt)
    steps_stance = int(t_stance / dt)
    num_lat = len(model.control_indices[0])
    f_walk = 1 / (2 * t_stance)
    # Torques evolution
    q_des_time_array, qd_des_time_array, qdd_des_time_array = model.generate_trajectory_time_array(
        duration=duration,
        steps=steps,
        f=f,
        A_lat=A_lat,
        th0_lat=th0_lat,
        th0_abd=th0_abd,
        thf_abd=thf_abd,
        include_base=True)
    print("QDD_DES SHAPE: ", qdd_des_time_array.shape)
    print(qdd_des_time_array[model.mask_act + model.mask_both_legs])
    # Data array initialization
    p_COM_time_array = np.zeros((steps, 3))
    v_COM_time_array = np.zeros((steps, 3))
    tau_time_array = np.zeros((steps, model.state.nqd))
    joint_power_time_array = np.zeros((steps, model.state.nqd))
    q_time_array = np.zeros((steps, model.state.nq))
    qd_time_array = np.zeros((steps, model.state.nqd))
    qdd_time_array = np.zeros((steps, model.state.nqd))
    eq_time_array = np.zeros((steps, model.state.nq))
    # Integrator for the work done by each joint
    joint_power_integrator = crw.Integrator_Forward_Euler(
        dt, np.zeros(model.state.nqd))
    joint_energy_time_array = np.zeros((steps, model.state.nqd))
    ##########################################
    ####### RUN SIMULATION ###################
    t = 0.0
    # let the legs fall and leave the time to check the starting position
    for i in range(100):
        p.stepSimulation()
        if graphic_mode:
            time.sleep(dt)
    # Initial Condition
    model.state.update()
    model.integrator_lateral_qa.reset(model.state.q[model.mask_act_shifted])
    model.free_right_foot()
    model.fix_left_foot()
    model.fix_tail()
    # walk and record data
    for i in range(steps):
        # UPDATE CONSTRAINTS
        if not (i % steps_stance):
            model.invert_feet()
            print("step")
            for tmp in range(10):
                p.stepSimulation()
        # UPDATE STATE
        model.state.update()
        p_COM_curr = np.array(model.COM_position_world())
        v_COM_curr = np.array(model.COM_velocity_world())
        # ADD FRAME TO VIDEO
        if (graphic_mode and video_logging):
            img = p.getCameraImage(800,
                                   640,
                                   renderer=p.ER_BULLET_HARDWARE_OPENGL)[2]
            video_writer.append_data(img)
        # ACTUATE MODEL AND STEP SIMULATION
        tau_des, eq = model.solve_computed_torque_control(
            q_des=q_des_time_array[i],
            qd_des=qd_des_time_array[i],
            qdd_des=qdd_des_time_array[i],
            Kp=Kp,
            Kv=Kv,
            #rho=rho,
            verbose=False)
        tau_applied = model.apply_torques(tau_des=tau_des, filtered=True)
        # UPDATE TIME-ARRAYS
        if plot_graph_joint:
            q_time_array[i] = model.state.q.copy()
            qd_time_array[i] = model.state.qd.copy()
            qdd_time_array[i] = model.state.qdd.copy()
            p_COM_time_array[i] = p_COM_curr
            v_COM_time_array[i] = v_COM_curr
            eq_time_array[i] = q_des_time_array[i] - q_time_array[i]
            tau_time_array[i] = tau_applied
            for j, (v_j, tau_j) in enumerate(zip(model.state.qd, tau_applied)):
                joint_power_time_array[i, j] = abs(v_j * tau_j)
            joint_energy = joint_power_integrator.integrate(
                joint_power_time_array[i])
            joint_energy_time_array[i] = joint_energy
        # print("eq:      ", np.round(eq[model.mask_act],2), np.round(eq[model.mask_both_legs],2))
        # print("tau:     ", np.round(tau_applied[model.mask_act],2), np.round(tau_applied[model.mask_both_legs],2))
        # print("qdd_des: ", np.round(qdd_des_time_array[i][model.mask_act],2), np.round(qdd_des_time_array[i][model.mask_both_legs],2))
        #UPDATE LOSS - VARIATIONAL VERSION -d(x-displacement) + d(total energy expenditure)
        # loss += (
        #     -100*(p_COM_time_array[i][0]-p_COM_time_array[i-1][0]) +
        #     0.1*(joint_power_time_array[i]).dot(joint_power_time_array[i])
        #     )
        # STEP SIMULATION
        p.stepSimulation()
        if graphic_mode:
            time.sleep(dt)
        t += dt
        # CHECK SIMULATION to avoid that the model get schwifty
        if keep_in_check:
            # check COM height, model shouldn't fly or go through the ground
            if (p_COM_curr[-1] > (max_COM_height)) or (p_COM_curr[-1] < 0.0):
                print("\nLook at me I'm jumping\n")
                loss += max_loss
                break
            if (np.linalg.norm(np.mean(v_COM_time_array[i - n_mean:i + 1], 0))
                    > max_COM_speed):
                print("\nFaster than light. WOOOOOOOSH\n")
                loss += max_loss
                break
    p.disconnect()
    ####### END SIMULATION ###################
    if (video_logging and graphic_mode):
        video_writer.close()
    ##########################################
    ####### PLOT DATA ########################
    if plot_graph_joint:
        fig_lat, axs_lat = plt.subplots(2, 3)
        for i in range(0, num_lat):
            #axs_lat[i//3,i%3].plot(qd_time_array[:,model.mask_act][:,i], color="xkcd:dark yellow", label="qd")
            #axs_lat[i//3,i%3].plot(qdd_time_array[:,model.mask_act][:,i], color="xkcd:pale yellow", label="qdd")
            #axs_lat[i//3,i%3].plot(joint_power_time_array[:,model.mask_act][:,i], color="xkcd:light salmon", label="Power")
            #axs_lat[i//3,i%3].plot(joint_energy_time_array[:,model.mask_act][:,i], color="xkcd:dark salmon", label="Energy")
            axs_lat[i // 3, i % 3].plot(tau_time_array[:, model.mask_act][:,
                                                                          i],
                                        color="xkcd:salmon",
                                        label="Torque")
            axs_lat[i // 3,
                    i % 3].plot(q_time_array[:, model.mask_act_shifted][:, i],
                                color="xkcd:yellow",
                                label="q")
            axs_lat[i // 3,
                    i % 3].plot(q_des_time_array[:, model.mask_act_shifted][:,
                                                                            i],
                                color="xkcd:dark teal",
                                label="q_des")
            #axs_lat[i//3,i%3].plot(eq_time_array[:,model.mask_act_shifted][:,i], color="xkcd:red", label="error")
            #axs_lat[i//3,i%3].set_title("Lateral joint %d" %i)
        handles_lat, labels_lat = axs_lat[0, 0].get_legend_handles_labels()
        fig_lat.legend(handles_lat, labels_lat, loc='center right')
        #
        fig_girdle, axs_girdle = plt.subplots(2, 2)
        for i in range(0, len(model.mask_both_legs)):
            #axs_girdle[i//2,i%2].plot(qd_time_array[:,model.mask_both_legs][:,i], color="xkcd:dark yellow", label="qd")
            #axs_girdle[i//2,i%2].plot(qdd_time_array[:,model.mask_both_legs][:,i], color="xkcd:pale yellow", label="qdd")
            #axs_girdle[i//2,i%2].plot(joint_power_time_array[:,model.mask_both_legs][:,i], color="xkcd:light salmon", label="Power")
            #axs_girdle[i//2,i%2].plot(joint_energy_time_array[:,model.mask_both_legs][:,i], color="xkcd:dark salmon", label="Energy")
            axs_girdle[i // 2,
                       i % 2].plot(tau_time_array[:, model.mask_both_legs][:,
                                                                           i],
                                   color="xkcd:salmon",
                                   label="Torque")
            axs_girdle[i // 2, i % 2].plot(
                q_time_array[:, model.mask_both_legs_shifted][:, i],
                color="xkcd:yellow",
                label="q")
            axs_girdle[i // 2, i % 2].plot(
                q_des_time_array[:, model.mask_both_legs_shifted][:, i],
                color="xkcd:dark teal",
                label="q_des")
            axs_girdle[i // 2, i % 2].plot(
                eq_time_array[:, model.mask_both_legs_shifted][:, i],
                color="xkcd:red",
                label="error")
        axs_girdle[0, 0].set_title("Right abduction")
        axs_girdle[0, 1].set_title("Right flexion")
        axs_girdle[1, 0].set_title("Left abduction")
        axs_girdle[1, 1].set_title("Left flexion")
        handles_girdle, labels_girdle = axs_girdle[
            0, 0].get_legend_handles_labels()
        fig_girdle.legend(handles_girdle, labels_girdle, loc='center right')
        #
        plt.show()
        #
    if plot_graph_COM:
        fig_COM = plt.figure()
        axs_COM = fig_COM.add_subplot(111)
        axs_COM.plot(p_COM_time_array[:, 0],
                     p_COM_time_array[:, 1],
                     color="xkcd:teal")
        axs_COM.set_title("COM x-y trajectory")
        axs_COM.set_aspect('equal')
        fig_COM_3D = plt.figure()
        axs_COM_3D = fig_COM_3D.add_subplot(111, projection='3d')
        axs_COM_3D.plot(p_COM_time_array[:, 0],
                        p_COM_time_array[:, 1],
                        p_COM_time_array[:, 2],
                        color="xkcd:teal")
        axs_COM_3D.set_title("COM 3D trajectory")
        axs_COM_3D.set_aspect('equal')
        #
        plt.show()
    return loss
Exemple #33
0
import pybullet as p
p.connect(p.DIRECT)
p.loadPlugin("eglRendererPlugin")
p.loadSDF("newsdf.sdf")
while (1):
  p.getCameraImage(320, 240, flags=p.ER_NO_SEGMENTATION_MASK)
  p.stepSimulation()
Exemple #34
0
	jointOffsets.append(-0.7)
	jointOffsets.append(0.7)

maxForceId = p.addUserDebugParameter("maxForce",0,100,20)

for j in range (p.getNumJoints(quadruped)):
        p.changeDynamics(quadruped,j,linearDamping=0, angularDamping=0)
        info = p.getJointInfo(quadruped,j)
        #print(info)
        jointName = info[1]
        jointType = info[2]
        if (jointType==p.JOINT_PRISMATIC or jointType==p.JOINT_REVOLUTE):
                jointIds.append(j)

		
p.getCameraImage(480,320)
p.setRealTimeSimulation(0)

joints=[]

with open(pd.getDataPath()+"/laikago/data1.txt","r") as filestream:
        for line in filestream:
		print("line=",line)
		maxForce = p.readUserDebugParameter(maxForceId)
                currentline = line.split(",")
                #print (currentline)
                #print("-----")
                frame = currentline[0]
                t = currentline[1]
                #print("frame[",frame,"]")
                joints=currentline[2:14]
Exemple #35
0
import pybullet as p

p.connect(p.GUI)
plugin = p.loadPlugin(
    "d:/develop/bullet3/bin/pybullet_testplugin_vs2010_x64_debug.dll",
    "_testPlugin")
print("plugin=", plugin)
p.loadURDF("r2d2.urdf")

while (1):
    p.getCameraImage(320, 200)
Exemple #36
0
import pybullet as p
import time
import pybullet_data
from hamstir_gym.modder import Modder
from tqdm import tqdm

mod = Modder()
physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
# planeId = p.loadURDF("plane.urdf")
room = p.loadURDF("./hamstir_gym/data/room12x12.urdf")
mod.load(room)
# mod.randomize()
#
pbar = tqdm()
frames = 0
try:
    while True:
        p.stepSimulation()
        p.getCameraImage(128, 128, renderer=p.ER_BULLET_HARDWARE_OPENGL)
        mod.randomize()
        # time.sleep(1/20.)
        frames += 1
        pbar.update(frames)
except:
    print('bye')
main_start = time.time()
while (1):
    for yaw in range(0, 360, 10):
        pybullet.stepSimulation()
        start = time.time()

        viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(
            camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
        aspect = pixelWidth / pixelHeight
        projectionMatrix = pybullet.computeProjectionMatrixFOV(
            fov, aspect, nearPlane, farPlane)
        img_arr = pybullet.getCameraImage(
            pixelWidth,
            pixelHeight,
            viewMatrix,
            projectionMatrix,
            shadow=1,
            lightDirection=[1, 1, 1],
            renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
        stop = time.time()
        print("renderImage %f" % (stop - start))

        w = img_arr[0]  #width of the image, in pixels
        h = img_arr[1]  #height of the image, in pixels
        rgb = img_arr[2]  #color data RGB
        dep = img_arr[3]  #depth data

        print('width = %d height = %d' % (w, h))

        #note that sending the data to matplotlib is really slow
Exemple #38
0
                            linkParentIndices=indices,
                            linkJointTypes=jointTypes,
                            linkJointAxis=axis)
  p.changeDynamics(boxId, -1, spinningFriction=0.001, rollingFriction=0.001, linearDamping=0.0)
  print(p.getNumJoints(boxId))
  for joint in range(p.getNumJoints(boxId)):
    targetVelocity = 10
    if (i % 2):
      targetVelocity = -10
    p.setJointMotorControl2(boxId,
                            joint,
                            p.VELOCITY_CONTROL,
                            targetVelocity=targetVelocity,
                            force=100)
  segmentStart = segmentStart - 1.1

p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
while (1):
  camData = p.getDebugVisualizerCamera()
  viewMat = camData[2]
  projMat = camData[3]
  p.getCameraImage(256,
                   256,
                   viewMatrix=viewMat,
                   projectionMatrix=projMat,
                   renderer=p.ER_BULLET_HARDWARE_OPENGL)
  keys = p.getKeyboardEvents()
  p.stepSimulation()
  #print(keys)
  time.sleep(0.01)
Exemple #39
0
    def get_observation(self):
        # get observation
        img_info = p.getCameraImage(
            width=self.w,
            height=self.h,
            viewMatrix=self.view_matrix,
            projectionMatrix=self.proj_matrix,
            shadow=-1,
            flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
            renderer=p.ER_TINY_RENDERER)
        img = img_info[2][:, :, :3]

        if self.opt.add_mask:
            mask = (img_info[4] > 10000000)
            mask_id_label = [
                234881025, 301989889, 285212673, 268435457, 318767105,
                335544321, 201326593, 218103809, 167772161
            ]
            for item in mask_id_label:
                mask = mask * (img_info[4] != item)

            box_mask_label = [
                117440516, 234881028, 184549380, 33554436, 167772164, 50331652,
                134217732, 16777220, 67108868, 150994948, 251658244, 234881028,
                100663300, 218103812, 83886084, 268435460
            ]
            for item in box_mask_label:
                mask = mask * (img_info[4] != item)

            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            img[mask] = [127, 151, 182]

        if self.opt.write_img:
            cv2.imwrite(
                os.path.join(self.log_path,
                             '{:06d}.jpg'.format(self.seq_num - 1)), img)
        else:
            self.img_buffer.append(img)
            np.save(
                os.path.join(self.log_root,
                             'state_{}.npy'.format(self.opt.load_embedding)),
                img)

        if self.epoch_num > self.opt.start_write:
            cv2.imwrite(
                os.path.join(self.log_path,
                             '{:06d}.jpg'.format(self.seq_num - 1)), img)

        self.observation = img

        if self.opt.observation == 'joint_pose':
            self.observation = np.array([
                p.getJointState(self.robotId, i)[0]
                for i in range(self.robotEndEffectorIndex)
            ])
        elif self.opt.observation == 'end_pos':
            self.observation = np.array(p.getLinkState(self.robotId, 7)[0])
        elif self.opt.observation == 'before_cnn':
            self.observation = np.array(self.observation)
        self.start_pos = p.getLinkState(self.robotId, 7)[0]

        return self.get_reward()
Exemple #40
0
pixelWidth = 320
pixelHeight = 220
camTargetPos = [0,0,0]
camDistance = 4
pitch = -10.0
roll=0
upAxisIndex = 2


while (p.isConnected()):
    for yaw in range (0,360,10) :
        start = time.time()
        p.stepSimulation()
        stop = time.time()
        print ("stepSimulation %f" % (stop - start))

        #viewMatrix = [1.0, 0.0, -0.0, 0.0, -0.0, 0.1736481785774231, -0.9848078489303589, 0.0, 0.0, 0.9848078489303589, 0.1736481785774231, 0.0, -0.0, -5.960464477539063e-08, -4.0, 1.0]
        viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
        projectionMatrix  = [1.0825318098068237, 0.0, 0.0, 0.0, 0.0, 1.732050895690918, 0.0, 0.0, 0.0, 0.0, -1.0002000331878662, -1.0, 0.0, 0.0, -0.020002000033855438, 0.0]

        start = time.time()
        img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix=viewMatrix, projectionMatrix=projectionMatrix, shadow=1,lightDirection=[1,1,1])
        stop = time.time()
        print ("renderImage %f" % (stop - start))
        #time.sleep(.1)
        #print("img_arr=",img_arr)


p.unloadPlugin(plugin)
Exemple #41
0
def main(args, no_connect=False):
    print(args.visual)
    save_path = join(SAVE_ROOT_DIR, args.demo)

    demo_path = join(DEMOS_DIR, args.play)
    demo_info = os.path.basename(demo_path).split('_')
    demo_num = int(demo_info[2].split('.')[0])
    print('DEMO NUM', demo_num)
    args.env = demo_info[1]
    # args.env = 'bowlenv1_occ'
    module = importlib.import_module('bullet.simenv.' + args.env)
    envClass = getattr(module, 'UserEnv')
    env = envClass()
    cid = -1
    if demo_path is None:
        cid = p.connect(p.SHARED_MEMORY)

    if cid<0 and not no_connect:
        cid = p.connect(p.GUI)
    p.resetSimulation()
    #disable rendering during loading makes it much faster
    p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
    # Env is all loaded up here
    h, o = env.load()
    print('Total Number:', p.getNumBodies())
    p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)

    p.setGravity(0.000000,0.000000,0.000000)
    p.setGravity(0,0,-10)

    ##show this for 10 seconds
    #now = time.time()
    #while (time.time() < now+10):
    #	p.stepSimulation()
    p.setRealTimeSimulation(1)

    # Replay and generate object centeric log.
    recordID = 0
    log = readLogFile(demo_path)
    recordNum = len(log)
    itemNum = len(log[0])
    print('record num:'),
    print(recordNum)
    print('item num:'),
    print(itemNum)
    init_setup = True
    startID = log[0][2]
    imgCount = 0
    timeCount = 0
    if args.visual:
        rgb_folder, depth_folder, seg_folder, flow_folder, info_folder, base_folder =  create_data_folders(args.visual, save_path)
    else:
        rgb_folder, depth_folder, seg_folder, info_folder, base_folder = create_data_folders(args.visual, save_path)
    
    image_buffer = []
    rgb_writer = imageio.get_writer('{}/{}.mp4'.format(
                    rgb_folder, demo_num), fps=SAMPLE_RATE)
    end_effector_trajectory = []
    # Save high level plan
    # objects of interest
    objects = env.objects()
    obj_start_pose = None
    current_label = -1

    excluded_ids = ['kuka', 'pr2_gripper','pr2_cid', 'pr2_cid2']
    relevant_ids = {val: key for key, val in h.items() if key not in excluded_ids}
    # relevant_ids = {h.cubeid: 'cube1', h.bowlid: 'bowl'}
    relevant_ids_names = {val: str(key) for key, val in relevant_ids.items()}
    object_trajectories = {key: [] for key in relevant_ids.keys()}
    init_object_poses = {}
    # Set arm invisible
    # set_obj_invisible(objects[0])
    set_obj_invisible(1)

    # Optical flow related
    prev = None
    hsv = np.zeros((IMG_SIZE, IMG_SIZE, 3))
    hsv[...,1] = 255

    # For record
    info_record = {}
    for objseq, obj in enumerate(objects):
        info_record[objseq] = []
    cam_pose = getPose(4)[:3]
    # cam_pose[0] -= 0.4
    for i, record in enumerate(log):
        Id = record[2]

        if i != 0 and Id == startID and init_setup:
            init_setup = False
            obj_start_pose = [getPose(x) for x in objects[1:]]
        if init_setup:
            pos = [record[3],record[4],record[5]]
            orn = [record[6],record[7],record[8],record[9]]
            if Id == 6:
                pos = [0.9032135611658497, -0.3798930514455489, 0.6500000245783583]
            if Id == 7:
                pos = [0.9032135611658497, -0.6830514455489, 0.6500000245783583]

            p.resetBasePositionAndOrientation(Id,pos,orn)
            if Id in relevant_ids.keys():
                # if Id == 5:
                #     set_trace()
                init_object_poses[Id] = pos + orn

            numJoints = p.getNumJoints(Id)
            for i in range (numJoints):
                jointInfo = p.getJointInfo(Id,i)
                qIndex = jointInfo[3]
                if qIndex > -1:
                    p.resetJointState(Id,i,record[qIndex-7+17])
        elif Id == h.kuka:  # can also be objects[0]
            numJoints = p.getNumJoints(Id)
            for i in range(numJoints):
                jointInfo = p.getJointInfo(Id,i)
                qIndex = jointInfo[3]
                if i not in (env.o.kukaobject.lf_id, env.o.kukaobject.rf_id) and qIndex > -1:
                    p.setJointMotorControl2(Id, jointIndex=i, controlMode=p.POSITION_CONTROL,
                                            targetPosition=record[qIndex-7+17], targetVelocity=0,
                                            force=env.o.kukaobject.maxForce,
                                            positionGain=0.12,
                                            velocityGain=1)
            rf = record[p.getJointInfo(Id, env.o.kukaobject.rf_id)[3]-7+17]
            lf = record[p.getJointInfo(Id, env.o.kukaobject.lf_id)[3]-7+17]
            position = max(abs(rf), abs(lf))
            if position > 1e-4:
                env.o.kukaobject.instant_close_gripper()
            else:
                env.o.kukaobject.instant_open_gripper()
            env.o.kukaobject.gripper_centered()

            p.setGravity(0.000000,0.000000,-10.000000)
            p.stepSimulation()
            time.sleep(0.003)


        if Id == startID:
            # Don't take pic from the first frame 0
            if timeCount % SAMPLE_RATE == 1 and timeCount // SAMPLE_RATE > 0:
                info = o.kukaobject.get_gripper_pose()
                info.append(o.kukaobject.is_gripper_open)
                # 0, the glory number reserved for kuka arm
                info_record[0].append(np.array(info))
                for objseq, obj in enumerate(objects[1:]):
                    objseq += 1
                    pose = np.array(getPose(obj))
                    info_record[objseq].append(pose)

                viewMat = p.computeViewMatrixFromYawPitchRoll( cam_pose,   #TARGET_POSITION, #getPose(objects[-1])[:3],
                                                              distance=CAMERA_DISTANCE,
                                                              yaw=YAW,
                                                              pitch=PITCH, 
                                                              roll=0,
                                                              upAxisIndex=2)
                projMat = p.computeProjectionMatrixFOV(FOV, aspect=1, nearVal=NEARVAL, farVal=FARVAL)
                results = p.getCameraImage(width=IMG_SIZE,
                                           height=IMG_SIZE,
                                           viewMatrix=viewMat,
                                           projectionMatrix=projMat)

                ### DEBUG for 3D RECONSTRUCTION#####
                # viewMat2 = p.computeViewMatrixFromYawPitchRoll(getPose(4)[:3],
                #                                               distance=CAMERA_DISTANCE,
                #                                               yaw=-10,
                #                                               pitch=-10, 
                #                                               roll=0,
                #                                               upAxisIndex=2)
                # projMat2 = p.computeProjectionMatrixFOV(FOV, aspect=1, nearVal=NEARVAL, farVal=FARVAL)
                # results2 = p.getCameraImage(width=IMG_SIZE,
                #                            height=IMG_SIZE,
                #                            viewMatrix=viewMat2,
                #                            projectionMatrix=projMat2)

                # vin = np.linalg.pinv(np.reshape(projMat, [4,4]).dot(np.reshape(viewMat, [4,4])))
                # vin2 = np.linalg.pinv(np.reshape(projMat2, [4,4]).dot(np.reshape(viewMat2, [4,4])))
                # pin = np.linalg.pinv(np.reshape(projMat, [4,4]))
                # pin2 = np.linalg.pinv(np.reshape(projMat2, [4,4]))                

                # d1 = results[3]
                # d2 = results2[3]
                # d1 = np.swapaxes(copy(d1), 0, 1)
                # d2 = np.swapaxes(copy(d2), 0, 1)
                # d1_r = tinyDepth_no_rescale(copy(d1), NEARVAL, FARVAL)
                # d2_r = tinyDepth_no_rescale(copy(d2), NEARVAL, FARVAL)
                # # d2_r2 = linDepth(d2, NEARVAL, FARVAL)
                # # d2_r3 = nonLinearDepth(d2, NEARVAL, FARVAL)


                # print("tiny: ",d2_r[120,120])



                # w1, h1 = 193, 133
                # w2, h2 = 123, 143
                # vun1 = vin.dot(np.array([w1, h1, d1[w1, h1], 1]))
                # vun2 = vin2.dot(np.array([w2, h2, d2[w2, h2], 1]))
                # pun1 = pin.dot(np.array([w1, h1, d1[w1, h1], 1]))
                # pun2 = pin2.dot(np.array([w2, h2, d2[w2, h2], 1]))
                # pun1 /= pun1[-1]
                # pun2 /= pun2[-1]
                # vun1 /= vun1[-1]
                # vun2 /= vun2[-1]

                # w1_bowl, h1_bowl = 89, 136
                # w2_bowl, h2_bowl = 34, 117
                # vun1_bowl = vin.dot(np.array([w1_bowl, h1_bowl, d1[w1_bowl, h1_bowl], 1]))
                # vun2_bowl = vin2.dot(np.array([w2_bowl, h2_bowl, d2[w2_bowl, h2_bowl], 1]))
                # pun1_bowl = pin.dot(np.array([w1_bowl, h1_bowl, d1[w1_bowl, h1_bowl], 1]))
                # pun2_bowl = pin2.dot(np.array([w2_bowl, h2_bowl, d2[w2_bowl, h2_bowl], 1]))
                # # pun1_bowl /= pun1_bowl[-1]
                # # pun2_bowl /= pun2_bowl[-1]
                # # vun1_bowl /= vun1_bowl[-1]
                # # vun2_bowl /= vun2_bowl[-1]


                # vun1_bowl[2] = d1_r[w1_bowl, h1_bowl]
                # vun2_bowl[2] = d2_r[w2_bowl, h2_bowl]
                # vun1[2] = d1_r[w1, h1]
                # vun2[2] = d2_r[w2, h2]


                # dist1_z = d1_r[w1_bowl, h1_bowl] - d1_r[w1, h1]
                # dist2_z = d2_r[w2_bowl, h2_bowl] - d2_r[w2, h2]


                # dist1 = pun1_bowl[:2] - pun1[:2]
                # dist2 = pun2_bowl[:2] - pun2[:2]


 

                # w1 = 71
                # h1 = 122
                # x_cube_1 = (w1 - 120) * d2_r[w1,h1] / 3.732/100
                # y_cube_1 = (h1 - 120) * d2_r[w1,h1] / 3.732/100
                # z_cube_1 = d1_r[w1, h1]
                # p3d_cube_1 = np.array([x_cube_1, y_cube_1, z_cube_1])
                # x_red_1 = (120 - 120) * d2_r[120,120] / 3.732/100
                # y_red_1 = (120 - 120) * d2_r[120,120] / 3.732/100
                # z_red_1 = d1_r[120, 120]
                # p3d_red_1 = np.array([x_red_1, y_red_1, z_red_1])

                # w2 = 136
                # h2 = 128
                # x_cube_2 = (120 - 120) * d2_r[120,120] / 3.732/100
                # y_cube_2 = (120 - 120) * d2_r[120,120] / 3.732/100
                # z_cube_2 = d2_r[120, 120]
                # p3d_cube_2 = np.array([x_cube_2, y_cube_2, z_cube_2])
                # x_red_2 = (w2 - 120) * d2_r[w2,h2] / 3.732/100
                # y_red_2 = (h2 - 120) * d2_r[w2,h2] / 3.732/100
                # z_red_2 = d2_r[w2, h2]
                # p3d_red_2 = np.array([x_red_2, y_red_2, z_red_2])

                # # print(np.linalg.norm(p3d_red_1 - p3d_cube_1))
                # # print(np.linalg.norm(p3d_red_2 - p3d_cube_2))


                # persp = np.reshape(projMat, [4,4])
                # persp[0:2, 2] = 120
                # alpha, s, x0, beta, y0 = persp[0, 0], persp[0,1], persp[0,2], persp[1,1], persp[1,2]
                # K = np.array([[alpha, s, x0], [0, beta, y0], [0,0,-1]])
                # I_m = np.eye(3)
                # I_m[1,1] = -1.0
                # I_h = np.eye(3)
                # I_h[1,2] = 120.0
                # Ks = I_h.dot(I_m).dot(K)
                # cx = Ks[0,2]
                # cy = Ks[1,2]
                # fx = Ks[0,0]*100
                # fy = Ks[1,1]*100


                # w_eeright = 131
                # h_eeright = 88
                # w_eeleft= 103
                # h_eeleft = 88
                # # w_eeleft= 120
                # # h_eeleft = 120                

                # x_eeleft_1 = (w_eeleft - cx) * d2_r[w_eeleft,h_eeleft] / fx
                # y_eeleft_1 = (h_eeleft - cy + 120) * d2_r[w_eeleft,h_eeleft] / fy
                # z_eeleft_1 = d1_r[w_eeleft, h1]
                # p3d_eeleft_1 = np.array([x_eeleft_1, y_eeleft_1, z_eeleft_1])

                # x_eeright_1 = (w_eeright - cx) * d2_r[w_eeright,h_eeright] / fx
                # y_eeright_1 = (h_eeright - cy + 120) * d2_r[w_eeright,h_eeright] / fy
                # z_eeright_1 = d1_r[w_eeright, h_eeright]
                # p3d_eeright_1 = np.array([x_eeright_1, y_eeright_1, z_eeright_1])

                # w_eeright = 140
                # h_eeright = 92
                # w_eeleft= 133
                # h_eeleft = 89

                # x_eeleft_2 = (w_eeleft - cx) * d2_r[w_eeleft,h_eeleft] / fx
                # y_eeleft_2 = (h_eeleft - cy + 120) * d2_r[w_eeleft,h_eeleft] / fy
                # z_eeleft_2 = d2_r[w_eeleft, h_eeleft]
                # p3d_eeleft_2 = np.array([x_eeleft_2, y_eeleft_2, z_eeleft_2])

                # x_eeright_2 = (w_eeright - cx) * d2_r[w_eeright,h_eeright] / fx
                # y_eeright_2 = (h_eeright - cy + 120) * d2_r[w_eeright,h_eeright] / fy
                # z_eeright_2 = d2_r[w_eeright, h_eeright]
                # p3d_eeright_2 = np.array([x_eeright_2, y_eeright_2, z_eeright_2])
                
                # print(np.linalg.norm(p3d_eeright_1 - p3d_eeleft_1))
                # print(np.linalg.norm(p3d_eeright_2 - p3d_eeleft_2))

                # plt.figure()
                # plt.imshow(results[2])
                # plt.figure()
                # plt.imshow(results2[2])
                # # plt.figure()
                # # plt.imshow(results[3])
                # # plt.figure()
                # # plt.imshow(results2[3])

                # # plt.show()
                # set_trace()
                ############# END DEBUG #####3
                set_trace()
                rgb_writer.append_data(results[2])
                
                for key in relevant_ids.keys():
                    object_trajectories[key].append(getPose(key))


                link_state = p.getLinkState(o.kukaobject.kukaId, o.kukaobject.kukaEndEffectorIndex)
                end_effector_trajectory.append(np.array(link_state[0] + link_state[1]))

                # Note: we can compute flow online
                imageio.imwrite('{0}/{1}_{2:05d}.png'.format(
                    rgb_folder, demo_num, imgCount), results[2])
                imageio.imwrite('{0}/{1}_{2:05d}.png'.format(
                    depth_folder, demo_num, imgCount), results[3])
                imageio.imwrite('{0}/{1}_{2:05d}.png'.format(
                    seg_folder, demo_num, imgCount), results[4])
                np.save('{0}/{1}_{2:05d}_mask.npy'.format(
                    seg_folder, demo_num, imgCount), results[4])
                # With visual flag, save visualizable imgs
                if args.visual:
                    cur = cv2.cvtColor(results[2], cv2.COLOR_BGR2GRAY)
                    # First frame
                    if timeCount // SAMPLE_RATE == 1:
                        prev = cur
                    flow = compute_optical_flow(prev, cur)

                    # Visualize optical flow
                    mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
                    hsv[...,0] = ang*180/np.pi/2
                    hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
                    hsv = hsv.astype(np.uint8)
                    bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)

                    scipy.misc.imsave('{}/flow_{}_{}.png'.format(
                        flow_folder, demo_num, imgCount), bgr)
                    prev = cur

                imgCount += 1
            timeCount += 1

    print("Saving files to ", base_folder)
    np.save('{}/{}_ee.npy'.format(
        base_folder, demo_num), np.array(end_effector_trajectory))

    with open('{}/{}_objects.json'.format(base_folder, demo_num), 'w') as f:
        json.dump(object_trajectories, f)

    with open('{}/{}_init_object_poses.json'.format(base_folder, demo_num), 'w') as f:
        json.dump(init_object_poses, f)
    with open('{}/{}_relevant_ids_names.json'.format(base_folder, demo_num), 'w') as f:
        json.dump(relevant_ids_names, f)
    # Check if consistent with above paramsd
    camera_params = {"cameraTargetPosition": TARGET_POSITION, "distance": CAMERA_DISTANCE, "yaw": YAW, "pitch":   PITCH, 
    "roll": 0.0, "upAxisIndex": 2, "nearPlane":  NEARVAL, "farPlane":  FARVAL, "fov":  FOV,  "aspect":  1}
    with open('{}/{}_camera_params.json'.format(base_folder, demo_num), 'w') as f:
        json.dump(camera_params, f)


    if args.visual:
        for i in range(len(objects)):
            with open('{}/test_{}_{}.txt'.format(
                    info_folder, demo_num, i), 'w') as f:
                print_list = [' '.join(map(str, x)) + '\n' for x in info_record[i]]
                f.writelines(print_list)
    else:
        torch.save(info_record, '{}/info-{}-.tch'.format(info_folder, demo_num))
    rgb_writer.close()
    return 0
Exemple #42
0
                                     RGB=False,
                                     title='seg_' + str(img_counter).zfill(4),
                                     time_stamp=False)
            sensor.camera.save_image(depth_equilibrium,
                                     path,
                                     RGB=False,
                                     title='depth_' +
                                     str(img_counter).zfill(4),
                                     time_stamp=False)
            img_counter += 1

            if args.debug:
                camera_info = p.getDebugVisualizerCamera()
                _, _, debug_rgb, _, _ = p.getCameraImage(
                    640,
                    480,
                    camera_info[2],
                    camera_info[3],
                    renderer=p.ER_BULLET_HARDWARE_OPENGL)
                sensor.camera.save_image(debug_rgb,
                                         path,
                                         RGB=True,
                                         title='debug_' +
                                         str(img_counter).zfill(4),
                                         time_stamp=False)
                # sensor.camera.show_image(debug_rgb, title="Debug", save=False)

            if args.show_image:
                sensor.camera.show_image(rgb_img, title='Raw RGB', save=False)
                sensor.camera.show_image(tactile_img,
                                         title='Tactile RGB',
                                         save=False)
import pybullet as p
p.connect(p.GUI)
plane = p.loadURDF("plane.urdf")
visualData = p.getVisualShapeData(plane, p.VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS)
print(visualData)
curTexUid = visualData[0][8]
print(curTexUid)
texUid = p.loadTexture("tex256.png")
print("texUid=", texUid)

p.changeVisualShape(plane, -1, textureUniqueId=texUid)

for i in range(100):
  p.getCameraImage(320, 200)
p.changeVisualShape(plane, -1, textureUniqueId=curTexUid)

for i in range(100):
  p.getCameraImage(320, 200)
Exemple #44
0
    def computeVisibilities(self, world_name, camera_id):
        """
        """
        visibilities = {}
        mean_distances_from_center = {}
        nb_pixel = {}
        relative_sizes = {}
        if camera_id in self.ctx.worlds()[world_name].scene().nodes():
            camera_node = self.ctx.worlds()[world_name].scene().nodes(
            )[camera_id]
            position = [
                camera_node.position.pose.position.x,
                camera_node.position.pose.position.y,
                camera_node.position.pose.position.z
            ]
            orientation = [
                camera_node.position.pose.orientation.x,
                camera_node.position.pose.orientation.y,
                camera_node.position.pose.orientation.z,
                camera_node.position.pose.orientation.w
            ]
            euler = tf.transformations.euler_from_quaternion(orientation)
            view_matrix = p.computeViewMatrixFromYawPitchRoll(
                position, -0.5, math.degrees(euler[2]), math.degrees(euler[1]),
                math.degrees(euler[1]), 2)
            fov = float(self.ctx.worlds()
                        [world_name].scene().nodes().get_node_property(
                            camera_id, "hfov"))
            clipnear = float(self.ctx.worlds()
                             [world_name].scene().nodes().get_node_property(
                                 camera_id, "clipnear"))
            clipfar = float(self.ctx.worlds()
                            [world_name].scene().nodes().get_node_property(
                                camera_id, "clipfar"))
            aspect = float(self.ctx.worlds()
                           [world_name].scene().nodes().get_node_property(
                               camera_id, "aspect"))
            proj_matrix = p.computeProjectionMatrixFOV(40.0, aspect, clipnear,
                                                       clipfar)
            width, height, rgb, depth, seg = p.getCameraImage(
                self.width,
                self.height,
                viewMatrix=view_matrix,
                projectionMatrix=proj_matrix,
                flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX)
            max_nb_pixel = 0
            background_nb_pixel = 0
            max_visibility = 0
            r = min(width, height)
            for line in range(0, height):
                for col in range(0, width):
                    pixel = seg[line][col]
                    if (pixel >= 0):
                        bullet_id = pixel & ((1 << 24) - 1)
                        if bullet_id in self.reverse_node_id_map:
                            uwds_id = self.reverse_node_id_map[bullet_id]
                            if uwds_id != self.ctx.worlds()[world_name].scene(
                            ).root_id():
                                if uwds_id not in mean_distances_from_center:
                                    mean_distances_from_center[uwds_id] = 0.0
                                line_dist = (line - (line / 2.0))
                                col_dist = (col - (col / 2.0))
                                #dist_from_center = math.sqrt(line_dist*line_dist+col_dist*col_dist)
                                dist_from_center = math.sqrt(col_dist *
                                                             col_dist)
                                mean_distances_from_center[
                                    uwds_id] += dist_from_center
                                if uwds_id not in nb_pixel:
                                    nb_pixel[uwds_id] = 0
                                nb_pixel[uwds_id] += 1
                                if uwds_id not in visibilities:
                                    visibilities[uwds_id] = 0
                                visibilities[uwds_id] += 1 - min(
                                    1, dist_from_center / r)
                                if max_visibility < visibilities[uwds_id]:
                                    max_visibility = visibilities[uwds_id]
                    else:
                        background_nb_pixel += 1.0

            if len(mean_distances_from_center) > 0:
                print "camera <%s> :" % self.worlds[world_name].scene.nodes[
                    camera_id].name
                for node_id, mean_dist in mean_distances_from_center.items():
                    mean_distances_from_center[
                        node_id] = mean_dist / nb_pixel[node_id]
                    camera_node = self.worlds[world_name].scene.nodes[
                        camera_id]
                    object_node = self.worlds[world_name].scene.nodes[node_id]
                    if mean_distances_from_center[node_id] < r:
                        visibilities[node_id] = 1 - mean_distances_from_center[
                            node_id] / r
                    else:
                        visibilities[node_id] = 0
                    if visibilities[node_id] > self.min_treshold:
                        print " - see object <%s> with %5f confidence" % (
                            object_node.name, visibilities[node_id])
                    else:
                        del visibilities[node_id]
        return visibilities
Exemple #45
0
    def __init__(self, quadruped):
        self.q = quadruped
        # self.base_link = p.getLinkState(self.q, 0)
        self.base_link = "chassis"
        self.bl_link = "RL_lower_leg"
        self.br_link = "RR_lower_leg"
        self.fl_link = "FL_lower_leg"
        self.fr_link = "FR_lower_leg"
        self.prepare_kin_dyn()
        self._BuildJointNameToIdDict()
        self._BuildMotorIdList()

        self.kpp = 1000
        self.kdp = 100

        self.kpw = 200
        self.kdw = 20

        #enable collision between lower legs
        for j in range(p.getNumJoints(quadruped)):
            print(p.getJointInfo(quadruped, j))

        #2,5,8 and 11 are the lower legs
        lower_legs = [2, 5, 8, 11]
        for l0 in lower_legs:
            for l1 in lower_legs:
                if (l1 > l0):
                    enableCollision = 1
                    print("collision for pair", l0, l1,
                          p.getJointInfo(quadruped, l0)[12],
                          p.getJointInfo(quadruped, l1)[12], "enabled=",
                          enableCollision)
                    p.setCollisionFilterPair(quadruped, quadruped, 2, 5,
                                             enableCollision)

        jointIds = []
        paramIds = []
        jointOffsets = []
        jointDirections = [-1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1]
        jointAngles = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
        self._motor_direction = jointDirections
        self.jointDirections = jointDirections

        for i in range(4):
            jointOffsets.append(0)
            jointOffsets.append(-0.7)
            jointOffsets.append(0.7)

        maxForceId = p.addUserDebugParameter("maxForce", 0, 100, 20)

        for j in range(p.getNumJoints(quadruped)):
            p.changeDynamics(quadruped, j, linearDamping=0, angularDamping=0)
            info = p.getJointInfo(quadruped, j)
            print(info)
            jointName = info[1]
            jointType = info[2]
            if (jointType == p.JOINT_PRISMATIC
                    or jointType == p.JOINT_REVOLUTE):
                jointIds.append(j)
        self.jointIds = jointIds

        p.getCameraImage(480, 320)
        p.setRealTimeSimulation(0)
physicsClient = p.connect(p.GUI)

p.setGravity(0,0,0)
bearStartPos1 = [-3.3,0,0]
bearStartOrientation1 = p.getQuaternionFromEuler([0,0,0])
bearId1 = p.loadURDF("plane.urdf", bearStartPos1, bearStartOrientation1)
bearStartPos2 = [0,0,0]
bearStartOrientation2 = p.getQuaternionFromEuler([0,0,0])
bearId2 = p.loadURDF("teddy_large.urdf",bearStartPos2, bearStartOrientation2)
textureId = p.loadTexture("checker_grid.jpg")
#p.changeVisualShape(objectUniqueId=0, linkIndex=-1, textureUniqueId=textureId)
#p.changeVisualShape(objectUniqueId=1, linkIndex=-1, textureUniqueId=textureId)


useRealTimeSimulation = 1

if (useRealTimeSimulation):
	p.setRealTimeSimulation(1)

while 1:
	if (useRealTimeSimulation):
		camera = p.getDebugVisualizerCamera()
		viewMat = camera[2]
		projMat = camera[3]
		#An example of setting the view matrix for the projective texture.
		#viewMat = p.computeViewMatrix(cameraEyePosition=[7,0,0], cameraTargetPosition=[0,0,0], cameraUpVector=[0,0,1])
		p.getCameraImage(300, 300, renderer=p.ER_BULLET_HARDWARE_OPENGL, flags=p.ER_USE_PROJECTIVE_TEXTURE, projectiveTextureView=viewMat, projectiveTextureProj=projMat)
		p.setGravity(0,0,0)
	else:
		p.stepSimulation()

while 1:
    viewMatrix = p.computeViewMatrix(cameraEyePosition=[0, 0, 2],
                                     cameraTargetPosition=[0, 0, 0],
                                     cameraUpVector=[0, 1, 0])

    projectionMatrix = p.computeProjectionMatrixFOV(fov=60.0,
                                                    aspect=1.0,
                                                    nearVal=0.02,
                                                    farVal=3.1)

    width, height, rgbImg, depthImg, segImg = p.getCameraImage(
        width=480,
        height=480,
        viewMatrix=viewMatrix,
        projectionMatrix=projectionMatrix,
        shadow=True,
        renderer=p.ER_BULLET_HARDWARE_OPENGL)
    model = core.Model.load('/home/developer/garage/grocery_detector.pth', \
     ['ambrosia','apple','banana','bottle','cereal','coke',\
       'lipton','lysol','milk','nutella','orange','oreo','pepsi'])

    rgbImg = Image.fromarray(rgbImg).convert('RGB')
    predictions = model.predict(rgbImg)
    camera_view = cv2.cvtColor(np.array(rgbImg), cv2.COLOR_RGB2BGR)
    # camera_view = np.array(rgbImg)

    labels, boxes, scores = predictions
    boxes = boxes.numpy()
    scores = scores.numpy()
Exemple #48
0
    env.render = True
    reward_sums = []

    for epd in range(4):
        obs =  env.reset()
        done = False
        sum_rewards = 0.0
        step = 0
        while not done:
            action = population.elite_population[epd % 8]\
                    .get_action(obs)
            obs, reward, done, info = env.step(action)
            time.sleep(0.01)
            sum_rewards += reward
            
            img = p.getCameraImage(512,512)


            skimage.io.imsave("./imgs/epd{}step{}.png".format(epd, step), img[2])

            step += 1
        reward_sums.append(sum_rewards)


    print("average sum of rewards: {:.3e} min: {:.3e} max: {:.3e}"\
            .format(np.mean(reward_sums), np.min(reward_sums), np.max(reward_sums)))


    import pdb; pdb.set_trace();

Exemple #49
0
pixelWidth = 320
pixelHeight = 240
nearPlane = 0.01
farPlane = 1000
lightDirection = [0,1,0]
lightColor = [1,1,1]#optional argument
fov = 60

#img_arr = pybullet.renderImage(pixelWidth, pixelHeight)
#renderImage(w, h, view[16], projection[16])
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight, cameraPos, camTargetPos, cameraUp, nearPlane, farPlane)
for pitch in range (0,360,10) :
    viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
    aspect = pixelWidth / pixelHeight;
    projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
    img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, lightDirection,lightColor)
    w=img_arr[0]
    h=img_arr[1]
    rgb=img_arr[2]
    dep=img_arr[3]
    #print 'width = %d height = %d' % (w,h)
    # reshape creates np array
    np_img_arr = np.reshape(rgb, (h, w, 4))
    np_img_arr = np_img_arr*(1./255.)
    #show
    plt.imshow(np_img_arr,interpolation='none')
    
    plt.pause(0.01)

pybullet.resetSimulation()
Exemple #50
0
	#render Camera at 10Hertz
	if (nowTime-lastTime>.1):
		ls = p.getLinkState(car,zed_camera_joint, computeForwardKinematics=True)
		camPos = ls[0]
		camOrn = ls[1]
		camMat = p.getMatrixFromQuaternion(camOrn)
		upVector = [0,0,1]
		forwardVec = [camMat[0],camMat[3],camMat[6]]
		#sideVec =  [camMat[1],camMat[4],camMat[7]]
		camUpVec =  [camMat[2],camMat[5],camMat[8]]
		camTarget = [camPos[0]+forwardVec[0]*10,camPos[1]+forwardVec[1]*10,camPos[2]+forwardVec[2]*10]
		camUpTarget = [camPos[0]+camUpVec[0],camPos[1]+camUpVec[1],camPos[2]+camUpVec[2]]
		viewMat = p.computeViewMatrix(camPos, camTarget, camUpVec)
		projMat = camInfo[3]
		#p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, flags=p.ER_NO_SEGMENTATION_MASK, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		lastTime=nowTime
	
	nowControlTime = time.time()
	
	nowLidarTime = time.time()
	#lidar at 20Hz
	if (nowLidarTime-lastLidarTime>.3):
		#print("Lidar!")
		numThreads=0
		results = p.rayTestBatch(rayFrom,rayTo,numThreads, parentObjectUniqueId=car, parentLinkIndex=hokuyo_joint)
		for i in range (numRays):
			hitObjectUid=results[i][0]
			hitFraction = results[i][2]
			hitPosition = results[i][3]
			if (hitFraction==1.):
Exemple #51
0
import pybullet as p
import time
p.connect(p.GUI)
p.loadURDF("plane.urdf")
sphereUid = p.loadURDF("sphere_transparent.urdf", [0, 0, 2])

redSlider = p.addUserDebugParameter("red", 0, 1, 1)
greenSlider = p.addUserDebugParameter("green", 0, 1, 0)
blueSlider = p.addUserDebugParameter("blue", 0, 1, 0)
alphaSlider = p.addUserDebugParameter("alpha", 0, 1, 0.5)

while (1):
  red = p.readUserDebugParameter(redSlider)
  green = p.readUserDebugParameter(greenSlider)
  blue = p.readUserDebugParameter(blueSlider)
  alpha = p.readUserDebugParameter(alphaSlider)
  p.changeVisualShape(sphereUid, -1, rgbaColor=[red, green, blue, alpha])
  p.getCameraImage(320,
                   200,
                   flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
                   renderer=p.ER_BULLET_HARDWARE_OPENGL)
  time.sleep(0.01)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
plane = p.loadURDF("plane100.urdf")
cube = p.loadURDF("cube.urdf", [0, 0, 1])
viewMatrix = p.computeViewMatrix(
    cameraEyePosition=[2, 1, 3],
    cameraTargetPosition=[0, 0, 0],
    cameraUpVector=[0, 1, 0])

projectionMatrix = p.computeProjectionMatrixFOV(
    fov=45.0,
    aspect=1.0,
    nearVal=0.1,
    farVal=3.1)
width, height, rgbImg, depthImg, segImg = p.getCameraImage(
    width=224, 
    height=224,
    viewMatrix=viewMatrix,
    projectionMatrix=projectionMatrix)
imgW = width #int(width / 10)
imgH = height #int(height / 10)
#Dimg = Image.fromarray(depthImg, 'F')
depth_img_buffer = np.reshape(depthImg, [imgW, imgH])
pointCloud = np.empty([np.int(imgH/1), np.int(imgW/1), 4])
projectionMatrix = np.asarray(projectionMatrix).reshape([4,4],order='F')
viewMatrix = np.asarray(viewMatrix).reshape([4,4],order='F')
tran_pix_world = np.linalg.inv(np.matmul(projectionMatrix, viewMatrix))

for h in range(0, imgH):
    for w in range(0, imgW):
        x = (2*w - imgW)/imgW
        y = -(2*h - imgH)/imgH  # be careful! deepth and its corresponding position
Exemple #53
0
    def reset(self, target=None):
        self.img_buffer = []
        try:
            self.destroy()
        except:
            print("initial start!")
        self.physical_id = self.p

        self.p.setAdditionalSearchPath(pybullet_data.getDataPath())
        self.p.setGravity(0, 0, -9.8)
        self.p.setTimeStep(1 / 250.)

        # epoch_max = -1
        # for file in os.listdir (self.log_root):
        #     if ('epoch' in file) and ('txt' not in file):
        #         epoch_max = max (epoch_max, int (file.split ('-')[1]))
        # self.epoch_num = epoch_max + 1
        # print ('epoch_max:{}'.format (epoch_max))

        self.epoch_num += 1
        print('epoch_max:{}'.format(self.epoch_num))

        lock = 1
        while (lock):
            try:
                self.log_path = safe_path(
                    os.path.join(self.log_root,
                                 'epoch-{}'.format(self.epoch_num)))
                self.log_info = open(
                    os.path.join(self.log_root,
                                 'epoch-{}.txt'.format(self.epoch_num)), 'w')
                lock = 0
            except:
                self.epoch_num += 1

        # self.evaluator.update (img_path=self.log_path, start_id=0)
        self.seq_num = 0
        self.init_dmp()
        self.init_motion()
        self.init_rl()
        self.reset_obj()

        for j in range(6):
            index_ = self.robot.activeGripperJointIndexList[j]
            p.resetJointState(self.robotId, index_, 0, 0)

        if self.opt.use_embedding:
            action_p = random.random()

            action_p = int(action_p * len(self.opt.embedding_list))

            if self.opt.fine_tune:
                fine_tune_list = self.opt.fine_tune_list
                action_p = random.random()
                action_p = int(action_p * len(fine_tune_list))
                action_p = np.where(
                    np.array(self.opt.embedding_list) ==
                    fine_tune_list[action_p])[0][0]

            if target is not None:
                action_p = np.where(
                    np.array(self.opt.embedding_list) == target)[0][0]

            if self.opt.nlp_embedding:
                self.opt.load_embedding = self.opt.embedding_list[action_p]
                self.action_embedding = self.embedding_data[
                    self.opt.load_embedding]
            else:
                self.action_embedding = np.array([0] * self.opt.embedding_dim)
                self.action_embedding[action_p] = 1
                self.opt.load_embedding = self.opt.embedding_list[action_p]

        self.init_grasp()

        img_info = p.getCameraImage(
            width=self.w,
            height=self.h,
            viewMatrix=self.view_matrix,
            projectionMatrix=self.proj_matrix,
            shadow=-1,
            flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
            renderer=p.ER_TINY_RENDERER)
        observation = img_info[2][:, :, :3]

        if self.opt.add_mask:
            mask = (img_info[4] > 10000000)
            mask_id_label = [
                234881025, 301989889, 285212673, 268435457, 318767105,
                335544321, 201326593, 218103809, 167772161
            ]
            for item in mask_id_label:
                mask = mask * (img_info[4] != item)

            box_mask_label = [
                117440516, 234881028, 184549380, 33554436, 167772164, 50331652,
                134217732, 16777220, 67108868, 150994948, 251658244, 234881028,
                100663300, 218103812, 83886084, 268435460
            ]
            for item in box_mask_label:
                mask = mask * (img_info[4] != item)

            observation = cv2.cvtColor(observation, cv2.COLOR_RGB2BGR)
            observation[mask] = [127, 151, 182]

        if self.opt.observation == 'joint_pose':
            observation = np.array([
                p.getJointState(self.robotId, i)[0]
                for i in range(self.robotEndEffectorIndex)
            ])
        elif self.opt.observation == 'end_pos':
            observation = np.array(p.getLinkState(self.robotId, 7)[0])
        elif self.opt.observation == 'before_cnn':
            observation = np.array(observation)

        if self.opt.use_embedding:
            self.observation = [self.action_embedding, observation]
        else:
            self.observation = observation

        self.info = ''
        return self.observation