コード例 #1
0
    def __init__(self, host, ur_frame, base_frame, arm_name, init_posej=None):
        self.ur_robot = None
        try:
            self.vr_init_pose = None
            self.ur_init_pose = None
            self.time = time.time()
            self.scale = 1  # 机器人移动距离与手臂的比值
            self.ur_frame = ur_frame
            self.base_frame = base_frame
            # self.angle = None
            self.last_gripper_value = 0
            self.stop_vr = False
            self.gripper = [False, False]
            self.force_mode = [False, False]

            self.tran_base_to_ur = TransformerTool(target_frame=ur_frame,
                                                   source_frame=base_frame)
            self.ur_robot = URRobot(host)

            if init_posej is not None:
                self.movej(init_posej)

            # rospy.Subscriber(
            #     name=arm_name + "/vr_pose4", data_class=Pose, callback=self.on_sub_pose, queue_size=3)
            # rospy.Subscriber(name=arm_name + "/gripper", data_class=Int32,
            #                  callback=self.on_sub_gripper, queue_size=1)
        except Exception as e:
            print(e.message)
            if self.ur_robot is not None:
                self.ur_robot.close()
            exit()
コード例 #2
0
 def __init__(self):
     self.target_on_base = None
     self.rgb_image = None
     self.depth_image = None
     self.rbt = URRobot('192.168.31.53')
     self.compute_Q = False
     self.noise_eps = 0
     self.random_eps = 0
     self.use_target_net = False
     self.exploit = False
     self.block_gripper = True  #reach:true pick:false
     #self.detection_storage = []
     seed = 0
     set_global_seeds(seed)
     policy_file = "/home/lu/liboyao/policy_best_reach.pkl"
     # Load policy.
     with open(policy_file, 'rb') as f:
         self.policy = pickle.load(f)
コード例 #3
0
    def __init__(self):
        rospy.Subscriber(name='/tag_detections',
                         data_class=AprilTagDetectionArray,
                         callback=self.on_sub,
                         queue_size=1)  # queue_size 是在订阅者接受消息不够快的时候保留的消息的数量
        #image_sub = rospy.Subscriber("/camera/rgb/image_raw",data_class=Image,callback=self.rgb_callback)
        self.target_on_base = None
        self.rbt = URRobot('192.168.31.53')

        self.gripper = Robotiq_Two_Finger_Gripper(self.rbt)
        #self.gripper.gripper_action(0)

        self.pos_init = [0.1, -0.3, 0.3, 0, pi, 0]
        self.rbt.movel(self.pos_init, 0.3, 0.3)
        #self.gripper.gripper_action(255)
        time.sleep(2)
        self.compute_Q = False
        self.noise_eps = 0
        self.random_eps = 0
        self.use_target_net = False
        self.exploit = False
        self.has_object = True
        self.block_gripper = False  #reach:true pick:false
        self.detection_target_storage = []
        #self.detection_object_storage = []
        self.gripper_state = [0, 0]
        self.pick = False
        self.object_reach = False
        #self.pick = True

        #seed = 0
        #set_global_seeds(seed)
        policy_file = "./policy_best_picknplace.pkl"

        # Load policy.
        with open(policy_file, 'rb') as f:
            self.policy = pickle.load(f)
コード例 #4
0
class ur5_reach:
    def __init__(self):
        self.target_on_base = None
        self.rgb_image = None
        self.depth_image = None
        self.rbt = URRobot('192.168.31.53')
        self.compute_Q = False
        self.noise_eps = 0
        self.random_eps = 0
        self.use_target_net = False
        self.exploit = False
        self.block_gripper = True  #reach:true pick:false
        #self.detection_storage = []
        seed = 0
        set_global_seeds(seed)
        policy_file = "/home/lu/liboyao/policy_best_reach.pkl"
        # Load policy.
        with open(policy_file, 'rb') as f:
            self.policy = pickle.load(f)

    def rgb_callback(self, imgmessage):
        rgb_bridge = CvBridge()
        self.rgb_image = rgb_bridge.imgmsg_to_cv2(imgmessage, "bgr8")
        detect = ColorDetection(self.rgb_image)
        msg_to_send = Floats()
        msg_to_send.data = detect.image_uv(2)
        mypub = rospy.Publisher("uvtopic", Floats, queue_size=1)
        mypub.publish(msg_to_send)

    def depth_callback(self, depthmessage):
        dep_bridge = CvBridge()
        self.depth_image = dep_bridge.imgmsg_to_cv2(depthmessage,
                                                    "passthrough")
        #cv2.imwrite('/home/lu/liboyao/depth.png',depth_image)

    def on_sub(self):
        z = self.depth_image[v][u] + 100
        u /= 1.0
        v /= 1.0
        z /= 1000.0
        print(u, v, z)
        M_in = np.array([[570.342, 0, 319.5], [0, 570.342, 239.5], [0, 0, 1]])
        uv1 = np.array([[u, v, 1]]) * z  #uv1=[u,v,1] *z
        xy1 = np.dot(np.linalg.inv(M_in), uv1.T)  #xy1=[x,y,1] real world xy
        pose_on_camera = xy1
        #detection=ColorDetection(rgbimage, depthimage) # build a 'method'
        #pose_on_camera_x,pose_on_camera_y,pose_on_camera_z=detection.image2camera(2) # 2 is orange
        #pose_on_camera=[pose_on_camera_x,pose_on_camera_y,pose_on_camera_z]
        self.target_on_base = self.camera2base(pose_on_camera)
        #print(pose_on_camera)
        #print(self.target_on_base)
        '''detections = data.detections

        for i in range(len(detections)):
            if detections[i].id == 12:
                self.detection_target = detections[i]
                self.detection_storage.append([self.detection_target.pose.pose.position.x,self.detection_target.pose.pose.position.y,self.detection_target.pose.pose.position.z])
                break

        pose_on_camera = self.detection_storage[0]

        self.target_on_base = self.camera2base(pose_on_camera)'''

        pose_rbt = self.rbt.getl()

        o = [-pose_rbt[1] + 0.75, pose_rbt[0] + 0.74,
             pose_rbt[2] + 0.65]  # grip_pos 末端位置
        ag = o  # grip_pos object_pos
        g = [
            -self.target_on_base[1] + 0.76, self.target_on_base[0] + 0.74,
            self.target_on_base[2] + 0.64
        ]  # 目标位置

        action = self.policy.get_actions(
            o,
            ag,
            g,
            compute_Q=self.compute_Q,
            noise_eps=self.noise_eps if not self.exploit else 0.,
            random_eps=self.random_eps if not self.exploit else 0.,
            use_target_net=self.use_target_net)

        if self.compute_Q:
            u, Q = action
        else:
            u = action

        if u.ndim == 1:
            # The non-batched case should still have a reasonable shape.
            u = u.reshape(1, -1)
        pos_ctrl, gripper_ctrl = action[:3], action[3]
        pos_ctrl *= 0.033
        rot_ctrl = [
            1., 0., 1., 0.
        ]  # fixed rotation of the end effector, expressed as a quaternion

        gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
        assert gripper_ctrl.shape == (2, )
        if self.block_gripper:
            gripper_ctrl = np.zeros_like(gripper_ctrl)

        action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])  # action
        action, _ = np.split(action, (1 * 7, ))
        action = action.reshape(1, 7)
        pos_delta = action[:, :3]

        act_on_base = pose_rbt[:3]
        her_pose = [act_on_base[0], act_on_base[1],
                    act_on_base[2]]  # 末端关于UR3基坐标系的位置

        # test on rule and get right position
        rule_pose = [0, 0, 0, 0, 0, 0]
        rule_pose[0:3] = [
            self.target_on_base[0], self.target_on_base[1],
            self.target_on_base[2]
        ]
        rule_pose[3] = pose_rbt[3]
        rule_pose[4] = pose_rbt[4]
        rule_pose[5] = pose_rbt[5]  # rx, ry, rz 固定

        # judge if the target is reached
        if sum(
                np.square(
                    np.asarray(her_pose) - np.asarray(
                        [rule_pose[0], rule_pose[1], rule_pose[2]]))) < 0.0005:
            print("reach the target")
            return

        act_on_base[0] += pos_delta[0][1]  # left right
        act_on_base[1] -= pos_delta[0][0]  # front behind
        act_on_base[2] += pos_delta[0][2]  # up down
        robot_delta = [
            act_on_base[0], act_on_base[1], act_on_base[2], pose_rbt[3],
            pose_rbt[4], pose_rbt[5]
        ]
        self.rbt.movel(tpose=robot_delta, acc=0.1, vel=0.15)

        print('----------pos_delta----------')
        print(pos_delta)
        print('----------robot_pose----------')
        print([act_on_base[0], act_on_base[1], act_on_base[2]])
        print('----------rule_pose----------')
        print(rule_pose)

    def camera2base(self, pose_on_camera):
        Tm = np.array([[-0.9992, 0.0082, 0.0383, 0.0183],
                       [-0.0036, 0.9543, -0.2987, -0.2376],
                       [-0.0390, -0.2986, -0.9536, 1.1127], [0, 0, 0, 1.0000]])
        #pose_on_base=np.dot([pose_on_camera.x,pose_on_camera.y,pose_on_camera.z,1],Tm.T)
        pose_on_base = np.dot(
            [pose_on_camera[0], pose_on_camera[1], pose_on_camera[2], 1], Tm.T)
        pose_on_base[0] = pose_on_base[0] - 0.03
        pose_on_base[1] = pose_on_base[1] * 0.98 - 0.01
        pose_on_base[2] = pose_on_base[2] * 0.94 + 0.09
        return pose_on_base

    def rbt_init(self, pos_init):
        self.rbt.movel(pos_init, 0.1, 0.15)
コード例 #5
0
from urx import URRobot
rbt = URRobot('192.168.31.53')
import numpy as np


def camera2base(pose_on_camera):
    Tm = np.array([[-0.9992, 0.0082, 0.0383, 0.0183],
                   [-0.0036, 0.9543, -0.2987, -0.2376],
                   [-0.0390, -0.2986, -0.9536, 1.1127], [0, 0, 0, 1.0000]])
    #pose_on_base=np.dot([pose_on_camera.x,pose_on_camera.y,pose_on_camera.z,1],Tm.T)
    pose_on_base = np.dot(
        [pose_on_camera[0], pose_on_camera[1], pose_on_camera[2], 1], Tm.T)
    pose_on_base[0] = pose_on_base[0] - 0.03
    pose_on_base[1] = pose_on_base[1] * 0.98 - 0.01
    pose_on_base[2] = pose_on_base[2] * 0.94 + 0.09
    return pose_on_base


pose_on_camera = [0.12036043, -0.04154134, 1.215]
pose_on_base = camera2base(pose_on_camera)
destination = [pose_on_base[0], pose_on_base[1], pose_on_base[2], 3.14, 0, 0]
print(destination)
rbt.movel(destination, 0.1, 0.1)
rbt.close()
コード例 #6
0
class VRController:
    def __init__(self, host, ur_frame, base_frame, arm_name, init_posej=None):
        self.ur_robot = None
        try:
            self.vr_init_pose = None
            self.ur_init_pose = None
            self.time = time.time()
            self.scale = 1  # 机器人移动距离与手臂的比值
            self.ur_frame = ur_frame
            self.base_frame = base_frame
            # self.angle = None
            self.last_gripper_value = 0
            self.stop_vr = False
            self.gripper = [False, False]
            self.force_mode = [False, False]

            self.tran_base_to_ur = TransformerTool(target_frame=ur_frame,
                                                   source_frame=base_frame)
            self.ur_robot = URRobot(host)

            if init_posej is not None:
                self.movej(init_posej)

            # rospy.Subscriber(
            #     name=arm_name + "/vr_pose4", data_class=Pose, callback=self.on_sub_pose, queue_size=3)
            # rospy.Subscriber(name=arm_name + "/gripper", data_class=Int32,
            #                  callback=self.on_sub_gripper, queue_size=1)
        except Exception as e:
            print(e.message)
            if self.ur_robot is not None:
                self.ur_robot.close()
            exit()

    def movej(self, pose):
        self.ur_robot.movej(pose, acc=0.5, vel=1)

    def on_recv_data(self, data):
        # euler_pose = data['euler_pose']
        quaternion_pose = data['quaternion_pose']
        touched = data['touched']

        pose = Pose()
        pose.position.x = quaternion_pose[0]
        pose.position.y = quaternion_pose[1]
        pose.position.z = quaternion_pose[2]
        pose.orientation.x = quaternion_pose[3]
        pose.orientation.y = quaternion_pose[4]
        pose.orientation.z = quaternion_pose[5]
        pose.orientation.w = quaternion_pose[6]

        self.change_status(touched['circleY'])
        self.grip(touched['gripper'])
        self.on_sub_pose(pose)

    def close(self):
        self.ur_robot.close()

    def set_force_mode(self, value):
        if value == self.force_mode[1]:
            self.force_mode[0] = False
        else:
            self.force_mode[1] = value
            self.force_mode[0] = True

    def grip(self, data):
        if data == self.last_gripper_value:
            self.gripper[0] = False
            return

        self.gripper[0] = True
        self.gripper[1] = data == 1

        # self.ur_robot.set_digital_out(8, data.data != 1)
        self.last_gripper_value = data

    def on_sub_gripper(self, data):
        self.grip(data.data)

    def mat2rvec(self, mat):
        theta = math.acos((mat[0, 0] + mat[1, 1] + mat[2, 2] - 1.0) * 0.5)
        if theta < 0.001:
            return [0, 0, 0]
        else:
            rx = theta * (mat[2, 1] - mat[1, 2]) / (2 * math.sin(theta))
            ry = theta * (mat[0, 2] - mat[2, 0]) / (2 * math.sin(theta))
            rz = theta * (mat[1, 0] - mat[0, 1]) / (2 * math.sin(theta))
            return [rx, ry, rz]

    def change_status(self, data):
        if data != 0:
            self.begin_control()
        else:
            self.stop_control()

        # if data > 0:
        #     self.set_force_mode(True)
        # else:
        #     self.set_force_mode(False)

    def on_sub_pose(self, data):
        try:
            current_pose = self.rvec_pose_from_msg_pose(data)

            ###########################################
            '调整世界坐标系下的角度'
            rotation_matrix = tf.transformations.quaternion_matrix([
                data.orientation.x, data.orientation.y, data.orientation.z,
                data.orientation.w
            ])
            target_rotation_matrix = np.dot(
                rotation_matrix,
                np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0],
                          [0, 0, 0, 1]]))
            angle = self.mat2rvec(target_rotation_matrix)

            # if self.ur_frame == "right_arm_base":
            #     self.angle[2] -= 180 / math.pi

            current_pose[3] = angle[0]
            current_pose[4] = angle[1]
            current_pose[5] = angle[2]

            # self.angle = current_pose[3:6]
            ################################################

            if self.stop_vr:
                self.vr_init_pose = None
                self.ur_robot.send_program(
                    'servoj(get_actual_joint_positions(),t=0.05,lookahead_time=0.03,gain=100)'
                )
                return
            elif self.vr_init_pose is None:
                self.vr_init_pose = current_pose

                pose = self.ur_robot.getl()
                self.ur_init_pose = self.tranform_pose(TransformerTool(
                    target_frame=self.base_frame, source_frame=self.ur_frame),
                                                       pose=pose)

            target_pose = []
            for i in range(0, 3):
                target_pose.append(self.ur_init_pose[i] +
                                   (current_pose[i] - self.vr_init_pose[i]) *
                                   self.scale)

            target_pose.append(angle[0])
            target_pose.append(angle[1])
            target_pose.append(angle[2])

            target_pose = self.tranform_pose(self.tran_base_to_ur,
                                             pose=target_pose)

            self.move_arm_robot(target_pose)

        except Exception as e:
            print(e)
            # self.ur_robot.close()

    def rvec_pose_from_msg_pose(self, pose):
        quaternion = (pose.orientation.x, pose.orientation.y,
                      pose.orientation.z, pose.orientation.w)
        # tran = TransformerTool()
        rvec = self.tran_base_to_ur.quat2rvec(quaternion)
        return [
            pose.position.x, pose.position.y, pose.position.z, rvec[0],
            rvec[1], rvec[2]
        ]

    def stop_control(self):
        # self.ur_robot.stopj()
        self.stop_vr = True

    def begin_control(self):
        self.stop_vr = False

    def move_arm_robot(self, target_pose):
        time_limit = 0.25
        joing_speed_limit = [30.0, 30.0, 60.0, 90.0, 90.0, 120.0]
        joint_max_distance = []
        for i in range(len(joing_speed_limit)):
            joint_max_distance.append(joing_speed_limit[i] / 180 * math.pi *
                                      time_limit)

        # 当工具距离机械臂基座标系较远时停止移动(此方法治标不治本,更合理的方式应为无法规划出有效路径时停止移动,暂未找到判断是否成功规划路径的方法)
        if math.sqrt(
                math.pow(target_pose[0], 2) + math.pow(target_pose[1], 2) +
                math.pow(target_pose[2], 2) > 0.65):
            self.ur_robot.send_program(
                'servoj(get_actual_joint_positions(),t={},lookahead_time=0.1,gain=100)'
                .format(time_limit))
            print('too long')
            return

        urscript = URScript()

        # 控制夹手
        if self.gripper[0]:
            urscript.add_line_to_program('set_tool_digital_out(0,{})'.format(
                self.gripper[1]))
        # urscript.add_line_to_program(
        #     'textmsg("-------------------------------------------------")')

        urscript.add_line_to_program('pi={}'.format(math.pi))  # 添加pi常量

        # urscript.add_line_to_program('textmsg("pose: [{},{},{},{},{},{}])")'.format(
        #     target_pose[0], target_pose[1], target_pose[2], target_pose[3], target_pose[4], target_pose[5]))

        # 获取当前关节角度
        urscript.add_line_to_program(
            'current_joint=get_actual_joint_positions()')

        # 力模式
        # if self.force_mode[0]:
        #     if self.force_mode[1]:
        #         urscript.add_line_to_program(
        #             'force_mode(target_pose,[0,0,0,1.1,1.1,1.1],[0,0,0,0,0,0],2,[100,100,100,20,20,20])')
        #         urscript.add_line_to_program('sleep(0.1)')
        #     else:
        #         urscript.add_line_to_program('end_force_mode()')

        urscript.add_line_to_program('inverse_kin=current_joint')
        urscript.add_line_to_program(
            'inverse_kin=get_inverse_kin(p[{},{},{},{},{},{}],maxPositionError=0.02,maxOrientationError=0.02)'
            .format(target_pose[0], target_pose[1], target_pose[2],
                    target_pose[3], target_pose[4], target_pose[5]))

        # urscript.add_line_to_program('textmsg("inverse kin:",inverse_kin)')

        # 若计算出的关节值与当前值差距较大,保持当前状态(仅判断前几个关节,治标不治本)
        urscript.add_line_to_program('i=0')
        urscript.add_line_to_program('while i<3:')
        urscript.add_line_to_program(
            '  if norm(inverse_kin[i]-current_joint[i])>1:')
        urscript.add_line_to_program('    inverse_kin=current_joint')
        # urscript.add_line_to_program('    textmsg(i,": distance too big")')
        urscript.add_line_to_program('    break')
        urscript.add_line_to_program('  end')
        urscript.add_line_to_program('  i=i+1')
        urscript.add_line_to_program('end')

        # 添加速度限制
        urscript.add_line_to_program(
            'joint_max_distance=[{},{},{},{},{},{}]'.format(
                joint_max_distance[0], joint_max_distance[1],
                joint_max_distance[2], joint_max_distance[3],
                joint_max_distance[4], joint_max_distance[5]))

        # urscript.add_line_to_program('textmsg("current joint:",current_joint)')

        # 添加关节限制
        urscript.add_line_to_program('i=0')
        urscript.add_line_to_program('while i<6:')

        # 添加关节速度限制
        urscript.add_line_to_program(
            '  if inverse_kin[i]-current_joint[i]>joint_max_distance[i]:')
        urscript.add_line_to_program(
            '    inverse_kin[i]=current_joint[i]+joint_max_distance[i]')
        urscript.add_line_to_program(
            '  elif inverse_kin[i]-current_joint[i]<-joint_max_distance[i]:')
        urscript.add_line_to_program(
            '    inverse_kin[i]=current_joint[i]-joint_max_distance[i]')
        urscript.add_line_to_program('  end')

        # 添加关节角度限制
        urscript.add_line_to_program(
            '  if i<5 and norm(inverse_kin[i])>{}:'.format(math.pi * 2 - 0.2))
        urscript.add_line_to_program('    inverse_kin[i]=current_joint[i]')
        # urscript.add_line_to_program('    textmsg("***: ",i)')
        urscript.add_line_to_program('  end')

        urscript.add_line_to_program('  i=i+1')
        urscript.add_line_to_program('end')

        # urscript.add_line_to_program('textmsg("target joint:",inverse_kin)')

        # urscript.add_line_to_program(
        #     'if norm(inverse_kin[0])<pi*2 and norm(inverse_kin[1])<pi*2:')
        urscript.add_line_to_program(
            'servoj(inverse_kin,t={},lookahead_time=0.05,gain=100)'.format(
                time_limit))
        # urscript.add_line_to_program('end')

        prog = urscript()
        # prog = "servoj(get_inverse_kin(p[{},{},{},{},{},{}]),t=0.30,lookahead_time=0.00,gain=100)".format(
        #     target_pose[0], target_pose[1], target_pose[2], target_pose[3], target_pose[4], target_pose[5])

        self.ur_robot.secmon.send_program(prog)

    def tranform_pose(self, tran, pose):
        src_quat = tran.rvec2quat(pose[3:6])

        src_pose = Pose()
        src_pose.position.x = pose[0]
        src_pose.position.y = pose[1]
        src_pose.position.z = pose[2]
        src_pose.orientation.x = src_quat[0]
        src_pose.orientation.y = src_quat[1]
        src_pose.orientation.z = src_quat[2]
        src_pose.orientation.w = src_quat[3]

        target_pose = tran.transformPose(pose=src_pose)

        result = self.rvec_pose_from_msg_pose(target_pose)

        ##################################################
        # if self.ur_frame == "right_arm_base":
        #     print([target_pose.orientation.x, target_pose.orientation.y,
        #            target_pose.orientation.z, target_pose.orientation.w])
        #     if result[5] < -3:
        #         target_pose.orientation.x *= -1
        #         target_pose.orientation.y *= -1
        #         target_pose.orientation.z *= -1
        #         target_pose.orientation.w *= -1
        #         inverse_pose = self.rvec_pose_from_msg_pose(target_pose)
        #         print("inverse rvec: " + str(inverse_pose[3:6]))
        #         return inverse_pose
        ##################################################

        return result
コード例 #7
0
class ur5_reach:
    def __init__(self):
        rospy.Subscriber(name='/tag_detections',
                         data_class=AprilTagDetectionArray,
                         callback=self.on_sub,
                         queue_size=1)  # queue_size 是在订阅者接受消息不够快的时候保留的消息的数量
        #image_sub = rospy.Subscriber("/camera/rgb/image_raw",data_class=Image,callback=self.rgb_callback)
        self.target_on_base = None
        self.rbt = URRobot('192.168.31.53')

        self.gripper = Robotiq_Two_Finger_Gripper(self.rbt)
        #self.gripper.gripper_action(0)

        self.pos_init = [0.1, -0.3, 0.3, 0, pi, 0]
        self.rbt.movel(self.pos_init, 0.3, 0.3)
        #self.gripper.gripper_action(255)
        time.sleep(2)
        self.compute_Q = False
        self.noise_eps = 0
        self.random_eps = 0
        self.use_target_net = False
        self.exploit = False
        self.has_object = True
        self.block_gripper = False  #reach:true pick:false
        self.detection_target_storage = []
        #self.detection_object_storage = []
        self.gripper_state = [0, 0]
        self.pick = False
        self.object_reach = False
        #self.pick = True

        #seed = 0
        #set_global_seeds(seed)
        policy_file = "./policy_best_picknplace.pkl"

        # Load policy.
        with open(policy_file, 'rb') as f:
            self.policy = pickle.load(f)

    def detection_object_by_color(self):
        #u,v=detect_ball()
        return Real_XY_of_BOX_on_Camera(u, v, z)

    # rbt Transfer (rbt_base to frame_base)
    # UR5--Base
    def on_sub(self, data):
        # target Transfer (kinect2_rgb_optical_frame to frame_base),get g
        # Camera--Base
        detections = data.detections

        for i in range(len(detections)):
            if detections[i].id == 12:
                self.detection_target = detections[i]
                self.detection_target_storage.append([
                    self.detection_target.pose.pose.position.x,
                    self.detection_target.pose.pose.position.y,
                    self.detection_target.pose.pose.position.z
                ])
            '''
            if detections[i].id == 13:
                self.detection_object = detections[i]
                self.detection_object_storage.append([self.detection_object.pose.pose.position.x, self.detection_object.pose.pose.position.y, self.detection_object.pose.pose.position.z])
            '''

        pose_on_camera_target = self.detection_target_storage[0]
        #pose_on_camera_object = self.detection_object_storage[0]
        pose_on_camera_object = self.detection_object_by_color().ravel()

        self.target_on_base = self.camera2base(pose_on_camera_target)
        self.object_on_base = self.camera2base(pose_on_camera_object)

        pose_rbt = self.rbt.getl()

        grip_pos = [
            -pose_rbt[1] + 0.75, pose_rbt[0] + 0.74, pose_rbt[2] + 0.65
        ]  # grip_pos 末端位置
        target_pos = [
            -self.target_on_base[1] + 0.75, self.target_on_base[0] + 0.74,
            self.target_on_base[2] + 0.70
        ]  # 目的地位置, 目的地比夹手末端在z轴上高5cm.

        if self.pick == True and sum(
                abs(
                    np.asarray(grip_pos) - np.asarray([
                        -self.object_on_base[1] + 0.75, self.object_on_base[0]
                        + 0.74, self.object_on_base[2] + 0.625
                    ]))) < 0.1:
            object_pos = grip_pos  # 物体位置
            object_rel_pos = [0, 0, 0]
            gripper_state = [0.025, 0.025]
            self.object_reach = True

        if self.object_reach == True:
            object_pos = grip_pos  # 物体位置
            object_rel_pos = [0, 0, 0]
            gripper_state = [0.025, 0.025]
        else:
            object_pos = [
                -self.object_on_base[1] + 0.75, self.object_on_base[0] + 0.74,
                self.object_on_base[2] + 0.625
            ]  # 物体位置
            object_rel_pos = [
                object_pos[0] - grip_pos[0], object_pos[1] - grip_pos[1],
                object_pos[2] - grip_pos[2]
            ]
            gripper_state = self.gripper_state

        achieved_goal = np.squeeze(object_pos.copy())

        o = [
            np.concatenate([
                grip_pos,
                np.asarray(object_pos).ravel(),
                np.asarray(object_rel_pos).ravel(), gripper_state
            ])
        ]  # grip_pos
        ag = [achieved_goal]  # grip_pos object_pos
        g = [target_pos]

        print(o)

        # run model get action, pos_delta (frame_base)
        # 这个action是仿真下的。1 action = 0.033 仿真观测 = 0.033真实观测。
        action = self.policy.get_actions(
            o,
            ag,
            g,
            compute_Q=self.compute_Q,
            noise_eps=self.noise_eps if not self.exploit else 0.,
            random_eps=self.random_eps if not self.exploit else 0.,
            use_target_net=self.use_target_net)

        if self.compute_Q:
            u, Q = action
        else:
            u = action

        if u.ndim == 1:
            # The non-batched case should still have a reasonable shape.
            u = u.reshape(1, -1)
        pos_ctrl, gripper_ctrl = action[:3], action[3]
        if gripper_ctrl < 0:
            self.pick = True
        else:
            self.pick = False

        pos_ctrl *= 0.033  #simulation action -> real world action.(1 sim action = 0.033 real action)
        gripper_ctrl *= 0.7636
        rot_ctrl = [
            1., 0., 1., 0.
        ]  # fixed rotation of the end effector, expressed as a quaternion

        gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
        assert gripper_ctrl.shape == (2, )
        if self.block_gripper:
            gripper_ctrl = np.zeros_like(gripper_ctrl)

        action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])  # action
        action, gripper_st = np.split(action, (1 * 7, ))
        action = action.reshape(1, 7)

        pos_delta = action[:, :3]
        #pos_delta = pos_ctrl
        self.gripper_state += gripper_st
        if self.gripper_state[0] > 0.05:
            self.gripper_state = [0.05, 0.05]
        if self.gripper_state[0] < 0:
            self.gripper_state = [0, 0]

        act_on_base = pose_rbt[:3]
        her_pose = [act_on_base[0], act_on_base[1],
                    act_on_base[2]]  # 末端关于UR3基坐标系的位置

        # test on rule and get right position
        rule_pose = [0, 0, 0, 0, 0, 0]
        rule_pose[0:3] = [
            self.target_on_base[0], self.target_on_base[1],
            self.target_on_base[2]
        ]
        rule_pose[3] = pose_rbt[3]
        rule_pose[4] = pose_rbt[4]
        rule_pose[5] = pose_rbt[5]  # rx, ry, rz 固定

        # judge if the target is reached
        '''
        rule_pose_np = np.asarray(rule_pose[:3]).ravel()
        her_pose_np = np.asarray(her_pose[:3]).ravel()
        if np.matmul((rule_pose_np-her_pose_np).T, rule_pose_np-her_pose_np) < 0.02:
        '''
        if abs(her_pose[0] - rule_pose[0]) + abs(her_pose[1] -
                                                 rule_pose[1]) < 0.04:
            print("reach the target")
            return

        act_on_base[0] += pos_delta[0][1]  # left right
        act_on_base[1] -= pos_delta[0][0]  # front behind
        act_on_base[2] += pos_delta[0][2]  # up down
        robot_delta = [
            act_on_base[0], act_on_base[1], act_on_base[2], pose_rbt[3],
            pose_rbt[4], pose_rbt[5]
        ]

        self.rbt.movel(tpose=robot_delta, acc=0.1, vel=0.25)
        self.gripper.gripper_action(int(255 - 5100 * self.gripper_state[0]))

    def camera2base(self, pose_on_camera):
        Tm = np.array([[-0.9992, 0.0082, 0.0383, 0.0183],
                       [-0.0036, 0.9543, -0.2987, -0.2376],
                       [-0.0390, -0.2986, -0.9536, 1.1127], [0, 0, 0, 1.0000]])
        pose_on_base = np.dot(
            [pose_on_camera[0], pose_on_camera[1], pose_on_camera[2], 1], Tm.T)
        pose_on_base[0] = pose_on_base[0] - 0.03
        pose_on_base[1] = pose_on_base[1] * 0.98 - 0.01
        pose_on_base[2] = pose_on_base[2] * 0.94 + 0.09
        return pose_on_base
コード例 #8
0
from urx import URRobot
from urx.robotiq_two_finger_gripper import Robotiq_Two_Finger_Gripper

rbt = URRobot('192.168.31.53')
robotiqgrip = Robotiq_Two_Finger_Gripper(rbt)
pos_init = [0.1, -0.4, 0.35, 2, 0, 0]
robotiqgrip.open_gripper()
rbt.movel(pos_init, 0.2, 0.2)
print("Robot is moving...")
rbt.close()