コード例 #1
0
    def get_current_detailed_config(self):
        # Get the object states
        gpos, gquat = p.getBasePositionAndOrientation(self.green_block,
                                                      physicsClientId=0)
        rpos, rquat = p.getBasePositionAndOrientation(self.red_block,
                                                      physicsClientId=0)

        g_linear_vel, g_angular_velocity = p.getBaseVelocity(self.green_block,
                                                             physicsClientId=0)
        r_linear_vel, r_angular_velocity = p.getBaseVelocity(self.red_block,
                                                             physicsClientId=0)

        # Convert quat to euler
        geuler = p.getEulerFromQuaternion(gquat)
        reuler = p.getEulerFromQuaternion(rquat)

        # Format into a config vector
        return {
            "object_1":
            np.concatenate(
                [gpos, gquat, geuler, g_linear_vel, g_angular_velocity]),
            "object_2":
            np.concatenate(
                [rpos, rquat, reuler, r_linear_vel, r_angular_velocity])
        }
コード例 #2
0
ファイル: graphMarking.py プロジェクト: yoo-bit/hopBot
def recordData():
    xList.append(p.getBasePositionAndOrientation(robotId)[0][0])
    zList.append(p.getBasePositionAndOrientation(robotId)[0][2])
    pitchList.append(p.getBasePositionAndOrientation(robotId)[1][1])
    bodyAngleList.append(p.getJointState(robotId, 0)[0])
    heightList.append(p.getBasePositionAndOrientation(robotId)[0][2])
    xf = (p.getLinkState(robotId, 3)[0][0] -
          p.getLinkState(robotId, 0)[0][0])
    xdList.append(p.readUserDebugParameter(xd))
    xfList.append(xf)
    xfd = (p.getBaseVelocity(robotId)[0][0] * 0.39 / 2 +
           p.readUserDebugParameter(footFeedbackGain) *
           (p.getBaseVelocity(robotId)[0][0]))
    desiredForwardSpeedX = getDesiredSpeed(
                                currentPosition=(
                                 p.getBasePositionAndOrientation(robotId)[0][0]
                                ),
                                currentSpeed=p.getBaseVelocity(robotId)[0][0],
                                targetPosition=xd,
                                kp=kPosition,
                                kv=kVelocity,
                                maxForwardSpeed=maxSpeed)
    xVdList.append(desiredForwardSpeedX)
    xVelocityList.append(p.getBaseVelocity(robotId)[0][0])
    yVelocityList.append(p.getBaseVelocity(robotId)[0][1])
コード例 #3
0
    def update(self):

        pos = np.array(pybullet.getBasePositionAndOrientation(self._demon)[0])
        vel = np.array(pybullet.getBaseVelocity(self._demon)[0])
        pos = pos + (vel * self._dt)
        pos[2] = 0.5

        # Need to do this so the intersections are computed
        for i in range(self.sim_steps):
            for particle in self._particles:
                pos, ori = [
                    np.array(_)
                    for _ in pybullet.getBasePositionAndOrientation(particle)
                ]
                lin_vel, ang_vel = pybullet.getBaseVelocity(particle)
                pos[-1] = self._wall_heights[0]
                pybullet.resetBasePositionAndOrientation(particle,
                                                         posObj=pos,
                                                         ornObj=ori)
                # noise = np.random.normal(loc=0,scale=0.1,size=3)
                pybullet.resetBaseVelocity(particle, lin_vel, ang_vel)
            pybullet.stepSimulation()

        for particle in self._particles:
            target_base_vel = pybullet.getBaseVelocity(particle)[0]
            updated_vel = target_base_vel + np.random.normal(
                loc=0., size=(3, ), scale=0.75)
            updated_vel[-1] = 0.
            pybullet.resetBaseVelocity(particle, linearVelocity=updated_vel)

        reward = self.computeReward(state=None)
        self.__reward = reward
コード例 #4
0
    def getObservation(self):

        if (self._flat_obs):
            obs = np.array(
                self._obs_stack).flatten() / 255.0  ## Normalize to [0,1]
        else:
            obs = np.array(
                self.getlocalMapObservation()) / 255.0  ## Normalize to [0,1]
        # print ("obs 1: ", obs)
        # obs = np.array(self.getlocalMapObservation()).flatten()
        # obs = np.array(self.getlocalMapObservation()).flatten()
        # print ("obs 2: ", obs)
        # print ("self._obs_stack: ", self._obs_stack)
        if (self._include_true_state):
            pos = np.array(
                pybullet.getBasePositionAndOrientation(self._demon)[0])
            particle = self._get_target()
            posT = np.array(
                pybullet.getBasePositionAndOrientation(particle)[0])
            goalDirection = posT - pos
            vel_d = np.array(pybullet.getBaseVelocity(particle)[0])
            base_vel = pybullet.getBaseVelocity(self._demon)[0]
            obs_ = np.array([
                pos[0], pos[1], base_vel[0], base_vel[1], goalDirection[0],
                goalDirection[1], vel_d[0], vel_d[1]
            ]).flatten()
            #             print ("obs_ :", obs_)
            obs = [obs_, obs]
        return obs
コード例 #5
0
ファイル: block_push.py プロジェクト: UM-ARM-Lab/tampc
    def _observe_info(self, visualize=True):
        info = {}

        # number of wall contacts
        info['wc'] = 0
        if self.level > 0:
            for wallId in self.walls:
                contactInfo = p.getContactPoints(self.blockId, wallId)
                info['wc'] += len(contactInfo)

        # block velocity
        v, va = p.getBaseVelocity(self.blockId)
        info['bv'] = np.linalg.norm(v)
        info['bva'] = np.linalg.norm(va)

        # pusher velocity
        v, va = p.getBaseVelocity(self.pusherId)
        info['pv'] = np.linalg.norm(v)
        info['pva'] = np.linalg.norm(va)

        # block-pusher distance
        x, y, _ = self._observe_pusher()
        xb, yb, theta = self._observe_block()

        along, from_center = pusher_pos_along_face((xb, yb), theta, (x, y))
        info['pusher dist'] = from_center - DIST_FOR_JUST_TOUCHING
        info['pusher along'] = along

        self._observe_additional_info(info, visualize)
        self._sim_step += 1

        for key, value in info.items():
            if key not in self._contact_info:
                self._contact_info[key] = []
            self._contact_info[key].append(value)
コード例 #6
0
 def computeReward(self, state=None):
     import numpy as np
     goalDir = self.getTargetDirection()
     # goalDir = goalDir / np.sqrt((goalDir*goalDir).sum(axis=0))
     # print ("goalDir: ", goalDir)
     agentVel = np.array(p.getBaseVelocity(self._agent)[0])
     # agentSpeed = np.sqrt((agentVel*agentVel).sum(axis=0))
     velDiff = goalDir - agentVel
     diffMag = np.sqrt((velDiff*velDiff).sum(axis=0))
     # agentVel = agentVel / agentSpeed
     # print ("agentVel: ", agentVel)
     # agentSpeedDiff = (1 - agentSpeed)
     ### heading towards goal
     # reward = np.dot(goalDir, agentVel) + np.exp(agentSpeedDiff*agentSpeedDiff * -2.0)
     reward = np.exp((diffMag*diffMag) * -2.0)
     
     llc_dir = np.array([self._llc_target[0], self._llc_target[1], 0])
     ### normalize
     # llc_dir = llc_dir / np.sqrt((llc_dir*llc_dir).sum(axis=0))
     # relative_llc_goal_state = (llc_dir-self._last_state[:3])
     des_change = llc_dir - p.getBaseVelocity(self._agent)[0]
     # des_change = (self._last_pose + llc_dir) - np.array(p.getBasePositionAndOrientation(self._agent)[0])
     # print ("self._last_state[1][:3] - p.getBaseVelocity(self._agent)[0]: ", self._last_state[1][:3] - p.getBaseVelocity(self._agent)[0])
     # llc_reward = np.dot(agentDir, llc_dir) - 1
     # llc_reward = -(agentDir*llc_dir).sum(axis=0)
     # llc_reward = np.exp((llc_reward*llc_reward) * -2.0)
     # print ("des_change: ", des_change*des_change)
     llc_reward = -(des_change*des_change).sum(axis=0)
     # print ("llc_reward: ", llc_reward)
     return llc_reward
コード例 #7
0
ファイル: bouncer.py プロジェクト: mbhs-systems/pf-mdcalc
def simulate(sp):
    # Global body params
    visual_shape_id = -1
    use_maximal_coordinates = 0
    sim_time = 5

    # Create target (i.e. pollen) shape and body
    target = p.createCollisionShape(p.GEOM_SPHERE,
                                    radius=sp['t_rad'],
                                    physicsClientId=sp['pci'])
    tuid = p.createMultiBody(sp['t_m'],
                             target,
                             visual_shape_id,
                             sp['t_pos'],
                             useMaximalCoordinates=use_maximal_coordinates,
                             physicsClientId=sp['pci'])
    p.changeDynamics(tuid, -1, restitution=1 - 0.0001)

    # Create projectile (i.e. air) shape and body
    proj_rad = 0.15
    proj_mass = 0.01
    proj_pos = [0, 0, 0.9]
    proj = p.createCollisionShape(p.GEOM_SPHERE,
                                  radius=sp['p_rad'],
                                  physicsClientId=sp['pci'])
    puid = p.createMultiBody(sp['p_m'],
                             proj,
                             visual_shape_id,
                             sp['p_pos'],
                             useMaximalCoordinates=use_maximal_coordinates,
                             physicsClientId=sp['pci'])
    p.changeDynamics(puid, -1, restitution=1 - 0.0001)

    # Give projectile initial velocity
    p.resetBaseVelocity(puid, sp['lv'], sp['av'], physicsClientId=sp['pci'])

    # Start simulation
    p.setGravity(0, 0, 0, physicsClientId=sp['pci']
                 )  # ignore gravity since this is orientation-agnostic

    first = True
    while first or time.time() - start < sp['sim_time']:
        p.stepSimulation(physicsClientId=sp['pci'])
        foo = len(p.getContactPoints(physicsClientId=sp['pci']))
        if foo > 0 and first:
            start = time.time()
            first = False
        keys = p.getKeyboardEvents()
        time.sleep(0.001)

    print 'bv' + str(sp['pci']) + ': ' + str(
        p.getBaseVelocity(tuid, physicsClientId=sp['pci']))
    return p.getBaseVelocity(tuid, physicsClientId=sp['pci'])
コード例 #8
0
    def cost_function(self, tau, des_a, leg_down):
        time_step = 1/1000.0                    # 单步时间长度
        rid = self.robot_id
        state_id = p.saveState()               # 保存当前系统状态
        md = p.TORQUE_CONTROL   # 扭矩控制模式
        leg_id_li = [0, 1, 2, 4, 5, 6]
        for i in range(6):
            p.setJointMotorControl2(rid, leg_id_li[i], md, tau[i])
        # 1. 获取前一时刻状态
        # 机体速度和角速度
        b_lv, b_av = np.array(p.getBaseVelocity(rid))
        # 摆动腿位置
        if leg_down == 'left':
            vel_a = p.getJointState(rid, 4)[1]
            vel_b = p.getJointState(rid, 5)[1]
            vel_c = p.getJointState(rid, 6)[1]
        else:
            vel_a = p.getJointState(rid, 0)[1]
            vel_b = p.getJointState(rid, 1)[1]
            vel_c = p.getJointState(rid, 2)[1]

        # 2. 进行一步仿真
        p.stepSimulation()      # 进行一个步长为1/240s的仿真
        # 3. 获取仿真一步后的速度等状态
        # body加速度
        acc_bv = (np.array(p.getBaseVelocity(self.robot_id)[0]) - b_lv)/time_step
        # 摆动腿加速度
        if leg_down == 'left':
            acc_a = (p.getJointState(rid, 0)[1] - vel_a) / time_step
            acc_b = (p.getJointState(rid, 1)[1] - vel_b) / time_step
            acc_c = (p.getJointState(rid, 2)[1] - vel_c) / time_step
        else:
            acc_a = (p.getJointState(rid, 4)[1] - vel_a) / time_step
            acc_b = (p.getJointState(rid, 5)[1] - vel_b) / time_step
            acc_c = (p.getJointState(rid, 6)[1] - vel_c) / time_step
        acc_foot = np.array([acc_a, acc_b, acc_c])
        # body角加速度
        acc_ba = (np.array(p.getBaseVelocity(self.robot_id)[1]) - b_av)/time_step
        # 4. 与期望加速度计算Cost函数
        w_com = 25
        w_bod = np.array([20, 60, 14])
        w_foot = 1
        c_com = np.linalg.norm(w_com * (acc_bv - des_a['com']))
        c_bod = np.linalg.norm(w_bod * (acc_ba - des_a['body']))
        c_foo = np.linalg.norm(w_foot * (acc_foot - des_a['foot']))
        # c_tau= 0.001 * sum(abs(tau))           # 关节扭矩之和
        cost = c_com + c_bod + c_foo
        # 获取地面接触力状态,似乎pybullet没法获取切向力的,只有法向力
        #contact_list = p.getContactPoints(rid)
        # 5. 恢复状态并返回cost
        p.restoreState(state_id)
        return cost
コード例 #9
0
    def get_variable_state(self, variable_name):
        """

        :param variable_name: (str) variable name to query about the object.
        :return: (nd.array, float or int) returns the corresponding value of
                                          the variable.
        """
        if variable_name == 'type':
            return self._type_id
        elif variable_name == 'cartesian_position':
            position, orientation = pybullet.getBasePositionAndOrientation(
                self._block_ids[0],
                physicsClientId=self._pybullet_client_ids[0])
            position = np.array(position)
            position[-1] -= WorldConstants.FLOOR_HEIGHT
            return position

        elif variable_name == 'cylindrical_position':
            position, orientation = pybullet.getBasePositionAndOrientation(
                self._block_ids[0],
                physicsClientId=self._pybullet_client_ids[0])
            position = np.array(position)
            position[-1] -= WorldConstants.FLOOR_HEIGHT
            return cart2cyl(position)

        elif variable_name == 'orientation':
            position, orientation = pybullet.getBasePositionAndOrientation(
                self._block_ids[0],
                physicsClientId=self._pybullet_client_ids[0])
            position = np.array(position)
            position[-1] -= WorldConstants.FLOOR_HEIGHT
            return orientation
        elif variable_name == 'linear_velocity':
            linear_velocity, angular_velocity = pybullet.getBaseVelocity(
                self._block_ids[0],
                physicsClientId=self._pybullet_client_ids[0])
            return linear_velocity

        elif variable_name == 'angular_velocity':
            linear_velocity, angular_velocity = pybullet.getBaseVelocity(
                self._block_ids[0],
                physicsClientId=self._pybullet_client_ids[0])
            return angular_velocity

        elif variable_name == 'mass':
            return self._mass
        elif variable_name == 'size':
            return self._size
        elif variable_name == 'color':
            return self._color
        elif variable_name == 'friction':
            return self._lateral_friction
コード例 #10
0
ファイル: main.py プロジェクト: yoo-bit/hopBot
def controlBodyAttitude():
    kp = p.readUserDebugParameter(balanceKp)
    kv = p.readUserDebugParameter(balanceKv)
    # Balance along X axis
    pitchAngle = p.getBasePositionAndOrientation(robotId)[1][1]
    pitchAngleDerivative = p.getBaseVelocity(robotId)[1][1]
    torque = -kp * (pitchAngle - 0) - kv * pitchAngleDerivative
    sendTorqueControl(0, -torque)
    # Balance along Y axis
    rollAngle = p.getBasePositionAndOrientation(robotId)[1][0]
    rollAngleDerivative = p.getBaseVelocity(robotId)[1][0]
    torque2 = -kp * (rollAngle - 0) - kv * rollAngleDerivative
    sendTorqueControl(1, -torque2)
コード例 #11
0
    def getObservation(self):
        observation = []
        # get ee pose
        state = p.getLinkState(bodyUniqueId=self.neobotixschunkUid,
                               linkIndex=self.endEffectorIndex,
                               computeLinkVelocity=1,
                               computeForwardKinematics=1)
        pos = state[0]
        orn = state[1]
        if len(state) > 6:
            vel = state[6]
        euler = p.getEulerFromQuaternion(orn)
        observation.extend(list(pos))
        observation.extend(list(euler))
        # observation.extend(list(vel))
        # get base pose
        basepos, baseorn = p.getBasePositionAndOrientation(
            self.neobotixschunkUid)
        baseeul = p.getEulerFromQuaternion(baseorn)
        observation.extend(list(basepos))
        observation.extend(list(baseeul))

        # get joint positions
        # joint_s = p.getJointStates(bodyUniqueId=self.neobotixschunkUid,  jointIndices=list(self.armIndex))
        for i in self.armIndex:
            joints = p.getJointState(bodyUniqueId=self.neobotixschunkUid,
                                     jointIndex=i)
            observation.append((joints[0]))

        # get base linear and angular vel
        basev = p.getBaseVelocity(self.neobotixschunkUid)
        observation.extend(list(basev[0][0:2]))
        observation.append(basev[1][2])

        return observation
コード例 #12
0
    def getExtendedObservation(self):
        # get robot observation
        self._observation = self._icub.getObservation()

        # get object position and transform it wrt hand c.o.m. frame
        objPos, objOrn = p.getBasePositionAndOrientation(self._objID)
        objEuler = p.getEulerFromQuaternion(objOrn)  #roll, pitch, yaw

        objLVel, objAVel = p.getBaseVelocity(self._objID)

        self._observation.extend(list(objPos))
        self._observation.extend(list(objEuler))
        self._observation.extend(list(objLVel))
        self._observation.extend(list(objAVel))

        # relative object position wrt hand c.o.m. frame
        invHandPos, invHandOrn = p.invertTransform(
            self._observation[:3],
            p.getQuaternionFromEuler(self._observation[3:6]))

        objPosInHand, objOrnInHand = p.multiplyTransforms(
            invHandPos, invHandOrn, objPos, objOrn)

        objEulerInHand = p.getEulerFromQuaternion(objOrnInHand)
        self._observation.extend(list(objPosInHand))
        self._observation.extend(list(objEulerInHand))

        #relLVel = np.array(objAVel) - np.array(self._observation[6:9])
        #self._observation.extend(list(relLVel))

        self._observation.extend(list(self._tg_pose))

        return np.array(self._observation)
コード例 #13
0
def collect_observations(drone):
	#print("ordered_joint_indices")
	#print(ordered_joint_indices)

	jointStates = p.getJointStates(drone,ordered_joint_indices)
	j = np.array([current_relative_position(jointStates, drone, *jtuple) for jtuple in ordered_joints]).flatten()
	#print("j")
	#print(j)
	body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(drone)
	#print("body_xyz")
	#print(body_xyz, qx,qy,qz,qw)
	z = body_xyz[2]
	dummy.distance = body_xyz[0]
	if dummy.initial_z==None:
		dummy.initial_z = z
	(vx, vy, vz), _ = p.getBaseVelocity(drone)
	more = np.array([z-dummy.initial_z, 0.1*vx, 0.1*vy, 0.1*vz, qx, qy, qz, qw])
	# rcont = p.getContactPoints(drone, -1, right_foot, -1)
	# #print("rcont")
	# #print(rcont)
	# lcont = p.getContactPoints(drone, -1, left_foot, -1)
	# #print("lcont")
	# #print(lcont)
	# feet_contact = np.array([len(rcont)>0, len(lcont)>0])
	# return np.clip( np.concatenate([more] + [j] + [feet_contact]), -5, +5)
	return np.clip( np.concatenate([more] + [j]), -5, +5)
コード例 #14
0
 def collect_observations(self, human):
   #print("ordered_joint_indices")
   #print(ordered_joint_indices)
   
   
   jointStates = p.getJointStates(human,self.ordered_joint_indices)
   j = np.array([self.current_relative_position(jointStates, human, *jtuple) for jtuple in self.ordered_joints]).flatten()
   #print("j")
   #print(j)
   body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(human)
   #print("body_xyz")
   #print(body_xyz, qx,qy,qz,qw)
   z = body_xyz[2]
   self.distance = body_xyz[0]
   if self.initial_z==None:
     self.initial_z = z
   (vx, vy, vz), _ = p.getBaseVelocity(human)
   more = np.array([z-self.initial_z, 0.1*vx, 0.1*vy, 0.1*vz, qx, qy, qz, qw])
   rcont = p.getContactPoints(human, -1, self.right_foot, -1)
   #print("rcont")
   #print(rcont)
   lcont = p.getContactPoints(human, -1, self.left_foot, -1)
   #print("lcont")
   #print(lcont)
   feet_contact = np.array([len(rcont)>0, len(lcont)>0])
   return np.clip( np.concatenate([more] + [j] + [feet_contact]), -5, +5)
コード例 #15
0
 def compute_reward(self):
     robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)
     robotEuler = p.getEulerFromQuaternion(robotOrn)
     linear, angular = p.getBaseVelocity(self.botId)
     r = sum(linear) - sum(angular)
     # receive a bonus of 1 for balancing and pay a small cost proportional to speed
     return r  #1.0 - abs(self.vt) * 0.05
コード例 #16
0
def fitness(paramlist, robotID):
    reward = 0
    for ij in range(7200):
        sleep(1 / 240)
        r_hip = sine_signal(paramlist[0], paramlist[1], ij, paramlist[2])
        r_knee = sine_signal(paramlist[3], paramlist[4], ij, paramlist[5],
                             pi / 2)
        l_hip = sine_signal(paramlist[0], paramlist[1], ij, paramlist[2], pi)
        l_knee = sine_signal(paramlist[3], paramlist[4], ij, paramlist[5],
                             3 * pi / 2)
        action = [r_hip, r_knee, l_hip, l_knee, l_hip, l_knee, r_hip, r_knee]
        for joint, action in zip(joints, action):
            p.setJointMotorControl2(robotID,
                                    joint,
                                    p.POSITION_CONTROL,
                                    action,
                                    force=max_torque,
                                    maxVelocity=max_ang_vel)
        p.stepSimulation()
        lin_vel, _ = p.getBaseVelocity(robotID)
        _, orientQuaternion = p.getBasePositionAndOrientation(robotID)
        orientRad = p.getEulerFromQuaternion(orientQuaternion)

        reward += lin_vel[1]

        if abs(orientRad[0]) >= 5 or abs(orientRad[1]) >= 5 or abs(
                orientRad[2]) >= 1.57:
            reset()
            return reward
    reset()
    return reward
コード例 #17
0
ファイル: pybullet.py プロジェクト: Xtuden-com/kubric
def set_position(object_idx, position):
    # reuse existing quaternion
    _, quaternion = pb.getBasePositionAndOrientation(object_idx)
    # resetBasePositionAndOrientation zeroes out velocities, but we wish to conserve them
    velocity, angular_velocity = pb.getBaseVelocity(object_idx)
    pb.resetBasePositionAndOrientation(object_idx, position, quaternion)
    pb.resetBaseVelocity(object_idx, velocity, angular_velocity)
コード例 #18
0
 def get_base_velocity_world(self):
     """ Returns the velocity of the base in the world frame.
     Returns:
         np.array((6,1)) with the translation and angular velocity
     """
     vel, orn = p.getBaseVelocity(self.robot_id)
     return np.array(vel + orn).reshape(6, 1)
コード例 #19
0
    def get_state(self):
        # Returns a pinocchio like representation of the q, dq matrixes
        q = zero(self.nq)
        dq = zero(self.nv)

        if not self.useFixedBase:
            pos, orn = p.getBasePositionAndOrientation(self.robot_id)
            q[:3] = pos
            q[3:7] = orn

            vel, orn = p.getBaseVelocity(self.robot_id)
            dq[:3] = vel
            dq[3:6] = orn

            # Pinocchio assumes base velocity to be in body frame -> rotate.
            rot = np.array(p.getMatrixFromQuaternion(q[3:7])).reshape((3, 3))
            dq[0:3] = rot.T.dot(dq[0:3])
            dq[3:6] = rot.T.dot(dq[3:6])

        # Query the joint readings.
        joint_states = p.getJointStates(self.robot_id, self.bullet_joint_ids)

        if not self.useFixedBase:
            for i in range(self.nj):
                q[5 + self.pinocchio_joint_ids[i]] = joint_states[i][0]
                dq[4 + self.pinocchio_joint_ids[i]] = joint_states[i][1]
        else:
            for i in range(self.nj):
                q[self.pinocchio_joint_ids[i] - 1] = joint_states[i][0]
                dq[self.pinocchio_joint_ids[i] - 1] = joint_states[i][1]

        return q, dq
コード例 #20
0
    def is_static(self):
        """Checks if env is static, used for checking if action finished.

        However, this won't work in PyBullet (at least v2.8.4) since soft
        bodies cause this code to hang. Therefore, look at the task's
        `def_IDs` list, which by design will have all IDs of soft bodies.
        Furthermore, for the bag tasks, the beads generally move around, so
        for those, just use a hard cutoff limit (outside this method).
        """
        if self.is_softbody_env():
            assert len(self.task.def_IDs) > 0, 'Did we forget to add to def_IDs?'
            v = [np.linalg.norm(p.getBaseVelocity(i)[0]) for i in self.objects
                    if i not in self.task.def_IDs]
        else:
            v = [np.linalg.norm(p.getBaseVelocity(i)[0]) for i in self.objects]
        return all(np.array(v) < 1e-2)
コード例 #21
0
ファイル: control.py プロジェクト: ajaygunalan/BipedControl
def imu_state(id=0):
    if SIMULATION == True:
        _, quat = p.getBasePositionAndOrientation(bipedId)
        _, v_angular = p.getBaseVelocity(bipedId)
        return R.from_quat(quat), np.array(v_angular)
    else:
        return np.array([]), np.array([])
コード例 #22
0
 def is_static(self):
     """Return true if objects are no longer moving."""
     v = [
         np.linalg.norm(p.getBaseVelocity(i)[0])
         for i in self.obj_ids['rigid']
     ]
     return all(np.array(v) < 5e-3)
コード例 #23
0
 def compute_observation(self):
     pos, orn = p.getBasePositionAndOrientation(self.copter)
     linear, _ = p.getBaseVelocity(self.copter)
     norm_linear = [l / 7 for l in linear]
     norm_pos = [pos[0] / 4, pos[1] / 4, pos[2] / 4 - 1]
     observation = [*linear, *pos, *orn]
     return observation
コード例 #24
0
    def _compute_reward(self):
        quadruped_pos, quadruped_orientation = p.getBasePositionAndOrientation(
            self.quadruped_id)
        quadruped_linear_vel, quadruped_angular_vel = p.getBaseVelocity(
            self.quadruped_id)

        joint_torques = []
        for j in self.quadruped_joint_ids:
            joint_torques.append(p.getJointState(self.quadruped_id, j)[3])

        vel_x = quadruped_linear_vel[0]
        vel_y = quadruped_linear_vel[1]
        vel_yaw = quadruped_angular_vel[2]

        quadruped_orientation = p.getEulerFromQuaternion(quadruped_orientation)

        if vel_x < self.target_velocity:
            rew_vel_x = vel_x
        else:
            rew_vel_x = (2 * self.target_velocity) - vel_x

        reward = 1 * rew_vel_x - 0.01 * np.abs(vel_y) \
                 - 0.01 * np.abs(vel_yaw) \
                 - 0.01 * np.abs(quadruped_orientation[0]) - 0.01 * np.abs(quadruped_orientation[1]) \
                 - 0.0001 * np.linalg.norm(self.prev_torques - joint_torques)

        self.prev_torques = np.asarray(joint_torques)

        return reward
コード例 #25
0
    def get_obs(self):
        # Base position and orientation
        torso_pos, torso_orient = p.getBasePositionAndOrientation(self.robotId)

        # Target position and orientation
        target_pos, _ = p.getBasePositionAndOrientation(self.targetId)

        # Base velocity and angular velocity
        torso_pos_, torso_orient_ = p.getBaseVelocity(self.robotId)

        # Joint states and velocities
        q, q_, _, _ = zip(*p.getJointStates(self.robotId, self.joint_ids))

        obs_dict = {
            'torso_pos': torso_pos,
            'torso_quat': torso_orient,
            'target_pos': target_pos[0:2],
            'q': q,
            'torso_vel': torso_pos_,
            'torso_angvel': torso_orient_,
            'q_vel': q_
        }

        obs_arr = np.concatenate([v for v in obs_dict.values()
                                  ]).astype(np.float32)

        return obs_arr, obs_dict
コード例 #26
0
    def computeReward(self, state=None):

        pos = np.array(pybullet.getBasePositionAndOrientation(self._demon)[0])

        particle = self._get_target()
        posT = np.array(pybullet.getBasePositionAndOrientation(particle)[0])

        goalDirection = posT - pos
        goalDistance = np.sqrt((goalDirection * goalDirection).sum(axis=0))
        goalDir = self.getTargetDirection()
        agentVel = np.array(pybullet.getBaseVelocity(self._demon)[0])
        velDiff = goalDir - agentVel
        diffMag = np.sqrt((velDiff * velDiff).sum(axis=0))
        # heading towards goal
        reward = np.exp((diffMag * diffMag) * -2.0) + np.exp(
            (goalDistance * goalDistance) * -2.0)
        """
        if (goalDistance < 1.5):
            # Reached goal
            reward = reward + self._map_width
        """
        # Check contacts with obstacles
        """
        for box_id in self._blocks:
            contacts = p.getContactPoints(self._agent, box_id)
            # print ("contacts: ", contacts)
            if len(contacts) > 0:
                reward = reward + -self._map_width
                break
        """
        return reward
コード例 #27
0
ファイル: unicycle.py プロジェクト: jhoonjeong/RL_unicycle
    def get_state(self):
        # state of the center of column
        center_position, center_angle = p.getBasePositionAndOrientation(
            self.unicycleUid)
        center_velocity, center_angular_velocity = p.getBaseVelocity(
            self.unicycleUid)

        # state of joint points (only get angular velocities of links)
        link_states = []
        for i in self.jointinfo:
            link_states.append(
                p.getLinkState(self.unicycleUid,
                               linkIndex=i,
                               computeLinkVelocity=1)[7])
        state = np.concatenate(
            (center_position, center_angle, center_velocity,
             center_angular_velocity, link_states[0], link_states[1]),
            axis=None)

        # fall down or out of ground ends this episode
        out_of_ground = (np.abs(state[0]) > self.floor_r) or (np.abs(state[1])
                                                              > self.floor_r)
        fall_down = state[2] < self.down or state[2] > 1.5
        done = fall_down or out_of_ground

        reward = 1
        if done:  # Penalty
            reward = 0

        if self.get_image:
            return self.img_from_state(state.flatten()), done, reward
        else:
            return state.flatten(), done, reward
コード例 #28
0
ファイル: humanoid.py プロジェクト: tcel2/bullet3
    def collect_observations(self, human):
        #print("ordered_joint_indices")
        #print(ordered_joint_indices)

        jointStates = p.getJointStates(human, self.ordered_joint_indices)
        j = np.array([
            self.current_relative_position(jointStates, human, *jtuple)
            for jtuple in self.ordered_joints
        ]).flatten()
        #print("j")
        #print(j)
        body_xyz, (qx, qy, qz, qw) = p.getBasePositionAndOrientation(human)
        #print("body_xyz")
        #print(body_xyz, qx,qy,qz,qw)
        z = body_xyz[2]
        self.distance = body_xyz[0]
        if self.initial_z == None:
            self.initial_z = z
        (vx, vy, vz), _ = p.getBaseVelocity(human)
        more = np.array(
            [z - self.initial_z, 0.1 * vx, 0.1 * vy, 0.1 * vz, qx, qy, qz, qw])
        rcont = p.getContactPoints(human, -1, self.right_foot, -1)
        #print("rcont")
        #print(rcont)
        lcont = p.getContactPoints(human, -1, self.left_foot, -1)
        #print("lcont")
        #print(lcont)
        feet_contact = np.array([len(rcont) > 0, len(lcont) > 0])
        return np.clip(np.concatenate([more] + [j] + [feet_contact]), -5, +5)
コード例 #29
0
    def get_state(self):
        # Returns a pinocchio like representation of the q, dq matrixes
        q = zero(self.nq)
        dq = zero(self.nv)

        pos, orn = p.getBasePositionAndOrientation(self.robot_id)
        q[:3, 0] = np.array(pos).reshape(3, 1)
        q[3:7, 0] = np.array(orn).reshape(4, 1)

        vel, orn = p.getBaseVelocity(self.robot_id)
        dq[:3, 0] = np.array(vel).reshape(3, 1)
        dq[3:6, 0] = np.array(orn).reshape(3, 1)

        # Pinocchio assumes the base velocity to be in the body frame -> rotate.
        rot = np.matrix(p.getMatrixFromQuaternion(q[3:7])).reshape((3, 3))
        dq[0:3] = rot.T.dot(dq[0:3])
        dq[3:6] = rot.T.dot(dq[3:6])

        # Query the joint readings.
        joint_states = p.getJointStates(self.robot_id, self.bullet_joint_ids)

        for i in range(self.nj):
            q[5 + self.pinocchio_joint_ids[i], 0] = joint_states[i][0]
            dq[4 + self.pinocchio_joint_ids[i], 0] = joint_states[i][1]

        return q, dq
コード例 #30
0
 def _getObservation(self):
     allJoints = [j.value for j in Joints]
     jointStates = p.getJointStates(self.anymal, allJoints)
     position = np.array([js[0] for js in jointStates])
     velocity = np.array([js[1] for js in jointStates])
     global lastVelocity
     if self.t == 0.0:
         lastVelocity = velocity
     acceleration = (velocity - lastVelocity) * SIMULATIONFREQUENCY
     lastVelocity = velocity
     basePosition, baseOrientation = p.getBasePositionAndOrientation(self.anymal)
     baseOrientation_Matrix = np.array(p.getMatrixFromQuaternion(baseOrientation)).reshape(3, 3)
     baseOrientationVector = np.matmul(baseOrientation_Matrix, [0, 0, -1])
     baseVelocity, baseAngularVelocity = p.getBaseVelocity(self.anymal)
     observationAsArray = np.concatenate([
         position,
         velocity,
         basePosition,
         baseOrientation,
         baseOrientationVector,
         baseVelocity,
         baseAngularVelocity,
         acceleration
     ])
     observationAsDict = {
         "position": position,
         "velocity": velocity,
         "basePosition": basePosition,
         "baseOrientation": baseOrientation,
         "baseOrientationVector": baseOrientationVector,
         "baseVelocity": baseVelocity,
         "baseAngularVelocity": baseAngularVelocity,
         "acceleration": acceleration
     }
     return observationAsArray, observationAsDict
コード例 #31
0
    def step(self, action):
        self.take_step(action, robot_arm='right', gains=self.config('robot_gains'), forces=self.config('robot_forces'), human_gains=0.0005)

        robot_force_on_human, cup_force_on_human = self.get_total_force()
        total_force_on_human = robot_force_on_human + cup_force_on_human
        reward_water, water_mouth_velocities, water_hit_human_reward = self.get_water_rewards()
        end_effector_velocity = np.linalg.norm(p.getBaseVelocity(self.cup, physicsClientId=self.id)[0])
        obs = self._get_obs([cup_force_on_human], [robot_force_on_human, cup_force_on_human])

        # Get human preferences
        preferences_score = self.human_preferences(end_effector_velocity=end_effector_velocity, total_force_on_human=robot_force_on_human, tool_force_at_target=cup_force_on_human, food_hit_human_reward=water_hit_human_reward, food_mouth_velocities=water_mouth_velocities)

        cup_pos, cup_orient = p.getBasePositionAndOrientation(self.cup, physicsClientId=self.id)
        cup_pos, cup_orient = p.multiplyTransforms(cup_pos, cup_orient, [0, 0.06, 0], p.getQuaternionFromEuler([np.pi/2.0, 0, 0], physicsClientId=self.id), physicsClientId=self.id)
        cup_top_center_pos, _ = p.multiplyTransforms(cup_pos, cup_orient, self.cup_top_center_offset, [0, 0, 0, 1], physicsClientId=self.id)
        reward_distance = -np.linalg.norm(self.target_pos - np.array(cup_top_center_pos)) # Penalize distances between top of cup and mouth
        reward_action = -np.sum(np.square(action)) # Penalize actions
        # Encourage robot to have a tilted end effector / cup
        cup_euler = p.getEulerFromQuaternion(cup_orient, physicsClientId=self.id)
        reward_tilt = -abs(cup_euler[0] + np.pi/2) if self.robot_type == 'jaco' else -abs(cup_euler[0] - np.pi/2)

        reward = self.config('distance_weight')*reward_distance + self.config('action_weight')*reward_action + self.config('cup_tilt_weight')*reward_tilt + self.config('drinking_reward_weight')*reward_water + preferences_score

        if self.gui and reward_water != 0:
            print('Task success:', self.task_success, 'Water reward:', reward_water)

        info = {'total_force_on_human': total_force_on_human, 'task_success': int(self.task_success >= self.total_water_count*self.config('task_success_threshold')), 'action_robot_len': self.action_robot_len, 'action_human_len': self.action_human_len, 'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}
        done = False

        return obs, reward, done, info
コード例 #32
0
def dumpStateToFile(file):
	for i in  range (p.getNumBodies()):
		pos,orn = p.getBasePositionAndOrientation(i)
		linVel,angVel = p.getBaseVelocity(i)
		txtPos = "pos="+str(pos)+"\n"
		txtOrn = "orn="+str(orn)+"\n"
		txtLinVel = "linVel"+str(linVel)+"\n"
		txtAngVel = "angVel"+str(angVel)+"\n"
		file.write(txtPos)
		file.write(txtOrn)
		file.write(txtLinVel)
		file.write(txtAngVel)
コード例 #33
0
ファイル: unittests.py プロジェクト: bulletphysics/bullet3
 def test_rolling_friction(self):
   import pybullet as p
   p.connect(p.DIRECT)
   p.loadURDF("plane.urdf")
   sphere = p.loadURDF("sphere2.urdf", [0, 0, 1])
   p.resetBaseVelocity(sphere, linearVelocity=[1, 0, 0])
   p.changeDynamics(sphere, -1, linearDamping=0, angularDamping=0)
   #p.changeDynamics(sphere,-1,rollingFriction=0)
   p.setGravity(0, 0, -10)
   for i in range(1000):
     p.stepSimulation()
   vel = p.getBaseVelocity(sphere)
   self.assertLess(vel[0][0], 1e-10)
   self.assertLess(vel[0][1], 1e-10)
   self.assertLess(vel[0][2], 1e-10)
   self.assertLess(vel[1][0], 1e-10)
   self.assertLess(vel[1][1], 1e-10)
   self.assertLess(vel[1][2], 1e-10)
   p.disconnect()
コード例 #34
0
ファイル: robot_bases.py プロジェクト: AndrewMeadows/bullet3
	def speed(self):
		if self.bodyPartIndex == -1:
			(vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.bodyIndex])
		else:
			(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vy) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
		return np.array([vx, vy, vz])
コード例 #35
0
ファイル: testBike.py プロジェクト: AndrewMeadows/bullet3
		
		updateCam=1
		time.sleep(timestep)
		pts = p.getContactPoints()
		#print("numPoints=",len(pts))
		#for point in pts:
		#	print("Point:bodyA=", point[1],"bodyB=",point[2],"linkA=",point[3],"linkB=",point[4],"dist=",point[8],"force=",point[9])
		
		states.append(p.saveState())
		#observations.append(obs)
		stateIndex = len(states)
		#print("stateIndex =",stateIndex )
		frame += 1
	if (updateCam):
		distance=5
		yaw = 0
		humanPos, humanOrn = p.getBasePositionAndOrientation(bike)
		humanBaseVel = p.getBaseVelocity(bike)
		#print("frame",frame, "humanPos=",humanPos, "humanVel=",humanBaseVel)
		if (gui):
			
			camInfo = p.getDebugVisualizerCamera()
			curTargetPos = camInfo[11]
			distance=camInfo[10]
			yaw = camInfo[8]
			pitch=camInfo[9]
			targetPos = [0.95*curTargetPos[0]+0.05*humanPos[0],0.95*curTargetPos[1]+0.05*humanPos[1],curTargetPos[2]]
			p.resetDebugVisualizerCamera(distance,yaw,pitch,targetPos);