Example #1
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D()
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionSingleArmOptimized()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
    def __init__(self):
        # objects
        self.dvrk = dvrkMotionBridgeP()
        self.zivid = ZividCapture()
        self.BD = BallDetection()

        # Load trajectory
        filename_transform = root + 'experiment/0_trajectory_extraction/short_traj_random.npy'
        self.joint_traj_tranform = self.load_trajectory(filename_transform)
        # filename = root + 'experiment/0_trajectory_extraction/verification_traj_random_sampling_10000.npy'
        filename = root + 'experiment/0_trajectory_extraction/verification_traj_insertion_50_.npy'
        self.joint_traj = self.load_trajectory(filename)
Example #3
0
    def __init__(self):
        # objects
        root = '/home/hwangmh/pycharmprojects/FLSpegtransfer/'
        dir = 'motion/'
        self.dvrk = dvrkMotionBridgeC(18,
                                      3,
                                      root + dir + 'model_red_peg.out',
                                      use_history=True)
        self.zivid = ZividCapture()
        self.BD = BallDetection()

        # Load trajectory
        filename = root + 'experiment/0_trajectory_extraction/verification_traj_peg_transfer.npy'
        self.joint_traj = self.load_trajectory(filename)
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionDualArm()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([
            [[0, 1], [7, 8]],
            [[2, 3], [6, 11]],
            [[4, 5], [9, 10]],  # left to right
            [[7, 8], [0, 1]],
            [[6, 11], [2, 3]],
            [[9, 10], [4, 5]]
        ])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
Example #5
0
    def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined', which_arm='PSM1'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_'+which_arm+'.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # self.Trc_stereo = np.load(root + 'calibration_files/Trc_' + 'stereo' + '_'+which_arm+'.npy')  # robot to camera
        # self.Trc_stereo[:3, -1] += np.array([0.00, 0.003, 0.00])
        # self.Tc1c2 = np.linalg.inv(self.Trc).dot(self.Trc_stereo)    # cam1 = inclined, cam2 = stereo

        # import modules
        if self.use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D(dist_gps=5, dist_pps=5, which_arm=which_arm)
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionSingleArm(use_controller=use_controller, use_optimization=use_optimization,
                                             optimizer=optimizer, which_arm=which_arm)
        self.zivid_utils = ZividUtils(which_camera='inclined')

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
Example #6
0
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BlockDetection2D import BlockDetection2D
from FLSpegtransfer.vision.VisualizeDetection import VisualizeDetection
from FLSpegtransfer.vision.GraspingPose import GraspingPose

bd = BlockDetection2D()
zivid = ZividCapture()
vd = VisualizeDetection(bd)
gp = GraspingPose(bd)

img_color = []
img_depth = []
while True:
    image = np.load('../../record/image.npy')
    depth = np.load('../../record/depth.npy')
    point = np.load('../../record/point.npy')
    img_color, img_depth, img_point = zivid.img_crop(image, depth, point)
    # zivid.capture_3Dimage()
    # img_color, img_depth, img_point = BD.img_crop(zivid.color, zivid.depth, zivid.point)
    img_color = cv2.cvtColor(img_color, cv2.COLOR_RGB2BGR)
    if img_color == [] or img_depth == [] or img_point == []:
        pass
    else:
        # find block
        pose_blks, img_blks, img_pegs, peg_pnts = bd.find_blocks(
class FLSPegTransfer:
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionDualArm()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([
            [[0, 1], [7, 8]],
            [[2, 3], [6, 11]],
            [[4, 5], [9, 10]],  # left to right
            [[7, 8], [0, 1]],
            [[6, 11], [2, 3]],
            [[9, 10], [4, 5]]
        ])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()

    def transform_task2robot(self,
                             point,
                             delta=False,
                             inverse=False,
                             which_arm='PSM1'):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.Tpc)  # (mm)
            Tcp[:3, -1] = Tcp[:3, -1] * 0.001  # (m)
            if which_arm == 'PSM1' or which_arm == 0:
                Trp = self.Trc1.dot(Tcp)
            elif which_arm == 'PSM2' or which_arm == 1:
                Trp = self.Trc2.dot(Tcp)
            else:
                raise ValueError
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point * 0.001) + trp
            else:
                transformed = Rrp.dot(point * 0.001)
        else:
            if which_arm == 'PSM1':
                Tcr = np.linalg.inv(self.Trc1)  # (m)
            elif which_arm == 'PSM2':
                Tcr = np.linalg.inv(self.Trc2)  # (m)
            else:
                raise ValueError
            Tcr[:3, -1] = Tcr[:3, -1] * 1000  # (mm)
            Tpr = self.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point * 1000) + tpr
            else:
                transformed = Rpr.dot(point * 1000)
        return transformed

    @classmethod
    def transform(cls, points, T):
        R = T[:3, :3]
        t = T[:3, -1]
        return R.dot(points.T).T + t.T

    def move_blocks(self):
        # [n, ang, x,y,z, seen]
        gp_pick1 = self.gp['PSM1'].pose_grasping
        gp_place1 = self.gp['PSM1'].pose_placing
        gp_pick2 = self.gp['PSM2'].pose_grasping
        gp_place2 = self.gp['PSM2'].pose_placing

        # pick-up motion
        gp_pick_robot1 = self.transform_task2robot(gp_pick1[1:],
                                                   which_arm='PSM1')  # [x,y,z]
        gp_pick_robot2 = self.transform_task2robot(gp_pick2[1:],
                                                   which_arm='PSM2')
        print(gp_pick_robot1, gp_pick_robot2, gp_pick1[0], gp_pick2[0])
        print("pick_above_block")
        self.dvrk_motion.move_above_block(pos1=gp_pick_robot1,
                                          rot1=gp_pick1[0],
                                          pos2=gp_pick_robot2,
                                          rot2=gp_pick2[0])
        print("pick_block")
        self.dvrk_motion.pick_block(pos1=gp_pick_robot1,
                                    rot1=gp_pick1[0],
                                    pos2=gp_pick_robot2,
                                    rot2=gp_pick2[0])

        # place motion
        gp_place_robot1 = self.transform_task2robot(gp_place1[1:],
                                                    which_arm='PSM1')
        gp_place_robot2 = self.transform_task2robot(gp_place2[1:],
                                                    which_arm='PSM2')
        print(gp_place_robot1, gp_place_robot2, gp_place1[0], gp_place2[0])
        print("place_above_block")
        self.dvrk_motion.move_above_block(pos1=gp_place_robot1,
                                          rot1=gp_place1[0],
                                          pos2=gp_place_robot2,
                                          rot2=gp_place2[0])

        # visual servoing prior to drop
        # delta, seen = self.place_servoing()
        # if seen == True:    # if there is a block,
        #     delta_robot = self.transform_task2robot(delta, delta=True)
        self.dvrk_motion.drop_block(pos1=gp_place_robot1,
                                    rot1=gp_place1[0],
                                    pos2=gp_place_robot2,
                                    rot2=gp_place2[0])

    def place_servoing(self):
        # get current position
        self.update_images()
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(
            self.color, self.point)
        if len(pnt_blk) < 400:
            delta = []
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=pnt_blk, pnt_masks=pnt_mask, pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            nb_place = self.action_list[self.action_order][1]
            p_peg = self.block.pnt_pegs[nb_place] + np.array(
                [0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3, -1]  # w.r.t task coordinate
            delta = p_peg - p_fid
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 2:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined.npy')
            else:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_rhp.npy'
                )
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_rhp.npy'
                )
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def main(self):
        while True:
            if self.state[0] == 'initialize':
                print('')
                print('* State:', self.state[0])
                # define ROI
                self.dvrk_motion.move_origin()

                # random move when NN model uses history
                # self.dvrk_motion.move_random()
                self.update_images()

                # find pegs
                self.block.find_pegs(img_color=self.color,
                                     img_point=self.point)
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'update_image':
                print('')
                print('* State:', self.state[0])
                self.update_images()
                print('image updated.')
                self.state.insert(0, 'plan_action')

            elif self.state[0] == 'plan_action':
                print('')
                print('* State:', self.state[0])
                if self.action_order == len(self.action_list):
                    self.state.insert(0, 'exit')
                else:
                    self.state.insert(0, 'find_block')
                    print("action pair #", self.action_list[self.action_order])

            elif self.state[0] == 'find_block':
                print('')
                print('* State:', self.state[0])
                nb_pick1 = self.action_list[self.action_order][0][1]  # PSM1
                nb_pick2 = self.action_list[self.action_order][0][0]  # PSM2
                nb_place1 = self.action_list[self.action_order][1][1]  # PSM1
                nb_place2 = self.action_list[self.action_order][1][0]  # PSM2
                pose_blk_pick1, pnt_blk_pick1, pnt_mask_pick1 = self.block.find_block(
                    block_number=nb_pick1,
                    img_color=self.color,
                    img_point=self.point)
                pose_blk_place1, pnt_blk_place1, pnt_mask_place1 = self.block.find_block(
                    block_number=nb_place1,
                    img_color=self.color,
                    img_point=self.point)
                pose_blk_pick2, pnt_blk_pick2, pnt_mask_pick2 = self.block.find_block(
                    block_number=nb_pick2,
                    img_color=self.color,
                    img_point=self.point)
                pose_blk_place2, pnt_blk_place2, pnt_mask_place2 = self.block.find_block(
                    block_number=nb_place2,
                    img_color=self.color,
                    img_point=self.point)

                # check if there is block to move
                if pose_blk_pick1 != [] and pose_blk_place1 == []\
                        and pose_blk_pick2 != [] and pose_blk_place2 == []:
                    print('A block to move was detected.')
                    # find grasping & placing pose
                    self.gp['PSM1'].find_grasping_pose(
                        pose_blk=pose_blk_pick1,
                        peg_point=self.block.pnt_pegs[nb_pick1])
                    self.gp['PSM1'].find_placing_pose(
                        peg_point=self.block.pnt_pegs[nb_place1])
                    self.gp['PSM2'].find_grasping_pose(
                        pose_blk=pose_blk_pick2,
                        peg_point=self.block.pnt_pegs[nb_pick2])
                    self.gp['PSM2'].find_placing_pose(
                        peg_point=self.block.pnt_pegs[nb_place2])
                    # visualize
                    # pnt_grasping = np.array(self.gp.pose_grasping)[1:]
                    # self.vd.plot3d(pnt_blk_pick, pnt_mask_pick, self.block.pnt_pegs, [pnt_grasping])
                    self.state.insert(0, 'move_block')
                else:
                    print('No block to move was detected. Skip this order.')
                    self.action_order += 1
                    self.state.insert(0, 'plan_action')

            elif self.state[0] == 'move_block':
                print('')
                print('* State:', self.state[0])
                self.move_blocks()
                self.action_order += 1
                if self.action_order == 3:
                    self.dvrk_motion.move_origin()
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'exit':
                print("task completed.")
                self.dvrk_motion.move_origin()
                exit()
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.motion.deprecated.dvrkMotionBridgeP import dvrkMotionBridgeP
import FLSpegtransfer.utils.CmnUtil as U
import sys
import os
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np

zc = ZividCapture()
zc.start()

# check images
img_color, img_depth, img_point = zc.capture_3Dimage(color='BGR')
zc.display_rgb(img_color)
zc.display_depthmap(img_point)
zc.display_pointcloud(img_point, img_color)

dvrk = dvrkMotionBridgeP()
sampling = 100
for i in range(sampling):
    r1 = np.random.uniform(-40, 40)
    r2 = np.random.uniform(-40, 40)
    r3 = np.random.uniform(-40, 40)
    rot1 = [r1, r2, r3]
    q1 = U.euler_to_quaternion(rot1, unit='deg')
    jaw1 = [10 * np.pi / 180.]
    dvrk.set_pose(rot1=q1, jaw1=jaw1)
    img_color, img_depth, img_point = zc.capture_3Dimage(img_crop=False)
Example #9
0
class FLSPegTransfer:
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D()
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionSingleArmOptimized()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()

    def transform_task2robot(self, point, delta=False, inverse=False):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.block.Tpc)  # (mm)
            Tcp[:3, -1] = Tcp[:3, -1] * 0.001  # (m)
            Trp = self.Trc.dot(Tcp)
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point * 0.001) + trp
            else:
                transformed = Rrp.dot(point * 0.001)
        else:
            Tcr = np.linalg.inv(self.Trc)   # (m)
            Tcr[:3,-1] = Tcr[:3,-1] * 1000  # (mm)
            Tpr = self.block.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point * 1000) + tpr
            else:
                transformed = Rpr.dot(point * 1000)
        return transformed

    @classmethod
    def transform(cls, points, T):
        R = T[:3, :3]
        t = T[:3, -1]
        return R.dot(points.T).T + t.T

    def move_blocks(self):
        # [n, ang, x,y,z, seen]
        gp_pick = self.gp.pose_grasping
        gp_place = self.gp.pose_placing
        gp_pick_robot = self.transform_task2robot(gp_pick[1:])  # [x,y,z]
        gp_place_robot = self.transform_task2robot(gp_place[1:])
        if self.action_order==0 or self.action_order==6:
            print("go and pick")
            self.dvrk_motion.go_pick(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        else:
            print("return to peg")
            self.dvrk_motion.return_to_peg(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        print ("transfer block")
        self.dvrk_motion.transfer_block(pos_pick=gp_pick_robot, rot_pick=gp_pick[0],
                                        pos_place=gp_place_robot, rot_place=gp_place[0])


        # visual servoing prior to drop
        # delta, seen = self.place_servoing()
        # if seen == True:    # if there is a block,
        #     delta_robot = self.transform_task2robot(delta, delta=True)
        # delta_robot = np.array([0.0, 0.0, 0.0])
        # self.dvrk_motion.drop_block(pos=gp_place_robot + delta_robot, rot=gp_place[0])

    def place_servoing(self):
        # get current position
        self.update_images()
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(self.color, self.point)
        if len(pnt_blk) < 400:
            delta = []
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=pnt_blk, pnt_masks=pnt_mask, pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            nb_place = self.action_list[self.action_order][1]
            p_peg = self.block.pnt_pegs[nb_place] + np.array([0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3,-1]     # w.r.t task coordinate
            delta = p_peg - p_fid
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 5:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined.npy')
            else:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def main(self):
        while True:
            if self.state[0] == 'initialize':
                print('')
                print('* State:', self.state[0])
                # define ROI
                self.dvrk_motion.move_origin()

                # random move when NN model uses history
                # self.dvrk_motion.move_random()
                self.update_images()

                # find pegs
                self.block.find_pegs(img_color=self.color, img_point=self.point)
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'update_image':
                print('')
                print('* State:', self.state[0])
                self.update_images()
                print ('image updated.')
                self.state.insert(0, 'plan_action')

            elif self.state[0] == 'plan_action':
                print('')
                print('* State:', self.state[0])
                if self.action_order == len(self.action_list):
                    self.state.insert(0, 'exit')
                else:
                    self.state.insert(0, 'find_block')
                    print("action pair #", self.action_list[self.action_order])

            elif self.state[0] == 'find_block':
                print('')
                print('* State:', self.state[0])
                nb_pick = self.action_list[self.action_order][0]
                nb_place = self.action_list[self.action_order][1]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick, img_color=self.color, img_point=self.point)
                pose_blk_place, pnt_blk_place, pnt_mask_place = self.block.find_block(
                    block_number=nb_place, img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != [] and pose_blk_place == []:
                    print('A block to move was detected.')
                    # find grasping & placing pose
                    self.gp.find_grasping_pose(pose_blk=pose_blk_pick, peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp.find_placing_pose(peg_point=self.block.pnt_pegs[nb_place])
                    # visualize
                    # pnt_grasping = np.array(self.gp.pose_grasping)[1:]
                    # self.vd.plot3d(pnt_blk_pick, pnt_mask_pick, self.block.pnt_pegs, [pnt_grasping])
                    self.state.insert(0, 'move_block')
                else:
                    print('No block to move was detected. Skip this order.')
                    self.action_order += 1
                    self.state.insert(0, 'plan_action')

            elif self.state[0] == 'move_block':
                print('')
                print('* State:', self.state[0])
                self.move_blocks()
                if self.action_order == 5:
                    self.dvrk_motion.move_origin()
                self.action_order += 1
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'exit':
                print("task completed.")
                gp_place = self.gp.pose_placing
                gp_place_robot = self.transform_task2robot(gp_place[1:])
                self.dvrk_motion.return_to_origin(pos_place=gp_place_robot, rot_place=gp_place_robot[0])
                # self.dvrk_motion.move_origin()
                exit()
class FLSPegTransferDualArm:
    def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy')  # robot to camera
        # self.Trc1[:3, -1] += np.array([0.002, 0.00, 0.00])
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM2.npy')  # robot to camera
        self.Trc2[:3, -1] += np.array([-0.001, 0.00, 0.00])
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # import modules
        if self.use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {'PSM1': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM1'),
                   'PSM2': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM2')}
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionDualArm(use_controller=use_controller, use_optimization=use_optimization, optimizer=optimizer)

        # action ordering
        self.action_list = np.array([[[0, 1], [7, 8]],
                                     [[2, 3], [6, 11]],
                                     [[4, 5], [9, 10]],  # left to right
                                     [[7, 8], [0, 1]],
                                     [[6, 11], [2, 3]],
                                     [[9, 10], [4, 5]]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()

    def transform_task2robot(self, point, delta=False, inverse=False, which_arm='PSM1'):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.Tpc)
            if which_arm=='PSM1' or which_arm==0:
                Trp = self.Trc1.dot(Tcp)
            elif which_arm=='PSM2' or which_arm==1:
                Trp = self.Trc2.dot(Tcp)
            else:
                raise ValueError
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point) + trp
            else:
                transformed = Rrp.dot(point)
        else:
            if which_arm=='PSM1':
                Tcr = np.linalg.inv(self.Trc1)   # (m)
            elif which_arm=='PSM2':
                Tcr = np.linalg.inv(self.Trc2)   # (m)
            else:
                raise ValueError
            Tpr = self.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point) + tpr
            else:
                transformed = Rpr.dot(point)
        return transformed

    @classmethod
    def transform(cls, points, T):
        R = T[:3, :3]
        t = T[:3, -1]
        return R.dot(points.T).T + t.T

    def move_blocks(self):
        # [n, ang, x,y,z, seen]
        gp_pick1 = self.gp['PSM1'].pose_grasping
        gp_place1 = self.gp['PSM1'].pose_placing
        gp_pick2 = self.gp['PSM2'].pose_grasping
        gp_place2 = self.gp['PSM2'].pose_placing
        gp_place1 = np.array(gp_place1)
        gp_place2 = np.array(gp_place2)

        # pick-up and place motion
        gp_pick_robot1 = self.transform_task2robot(gp_pick1[1:]*0.001, which_arm='PSM1')  # [x,y,z]
        gp_pick_robot2 = self.transform_task2robot(gp_pick2[1:]*0.001, which_arm='PSM2')
        gp_place_robot1 = self.transform_task2robot(gp_place1[1:]*0.001, which_arm='PSM1')
        gp_place_robot2 = self.transform_task2robot(gp_place2[1:]*0.001, which_arm='PSM2')
        if self.action_order == 0 or self.action_order == 3:
            self.dvrk_motion.go_pick(pos_pick1=gp_pick_robot1, rot_pick1=gp_pick1[0], pos_pick2=gp_pick_robot2, rot_pick2=gp_pick2[0])
        else:
            self.dvrk_motion.return_to_peg(pos_pick1=gp_pick_robot1, rot_pick1=gp_pick1[0], pos_pick2=gp_pick_robot2, rot_pick2=gp_pick2[0])
        self.dvrk_motion.transfer_block(pos_pick1=gp_pick_robot1, rot_pick1=gp_pick1[0], pos_place1=gp_place_robot1, rot_place1=gp_place1[0],
                                        pos_pick2=gp_pick_robot2, rot_pick2=gp_pick2[0], pos_place2=gp_place_robot2, rot_place2=gp_place2[0])

        # visual correction prior to drop
        delta1, seen1, delta2, seen2 = self.place_servoing()
        delta_rob1 = self.transform_task2robot(delta1, delta=True)
        delta_rob1[2] = 0.0  # temporary
        delta_rob2 = self.transform_task2robot(delta2, delta=True)
        delta_rob2[2] = 0.0  # temporary
        self.dvrk_motion.servoing_block(pos_place1=gp_place_robot1+delta_rob1, rot_place1=gp_place1[0], pos_place2=gp_place_robot2+delta_rob2, rot_place2=gp_place2[0])

    def place_servoing(self):
        # get current position
        self.update_images()
        nb_place1 = self.action_list[self.action_order][1][1]  # PSM1
        nb_place2 = self.action_list[self.action_order][1][0]  # PSM2
        pose_blk1, pnt_blk1, pnt_mask1 = self.block.find_block_servo(self.color, self.point, self.block.pnt_pegs[nb_place1])
        pose_blk2, pnt_blk2, pnt_mask2 = self.block.find_block_servo(self.color, self.point, self.block.pnt_pegs[nb_place2])
        if len(pnt_blk1) < 400:
            delta1 = [0.0, 0.0, 0.0]
            seen1 = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk1], pnt_masks=[pnt_mask1], pnt_pegs=self.block.pnt_pegs)
            _, T1 = pose_blk1
            p_peg1 = self.block.pnt_pegs[nb_place1] + np.array([0.0, 0.0, -5])  # sub-mm above peg
            p_fid1 = [0, 0, 15]
            p_fid1 = T1[:3, :3].dot(p_fid1) + T1[:3, -1]  # w.r.t task coordinate
            delta1 = (p_peg1 - p_fid1) * 0.001  # unit to (m)
            seen1 = True
        if len(pnt_blk2) < 400:
            delta2 = [0.0, 0.0, 0.0]
            seen2 = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk2], pnt_masks=[pnt_mask2], pnt_pegs=self.block.pnt_pegs)
            _, T2 = pose_blk2
            p_peg2 = self.block.pnt_pegs[nb_place2] + np.array([0.0, 0.0, -5])  # sub-mm above peg
            p_fid2 = [0, 0, 15]
            p_fid2 = T2[:3, :3].dot(p_fid2) + T2[:3, -1]  # w.r.t task coordinate
            delta2 = (p_peg2 - p_fid2) * 0.001  # unit to (m)
            seen2 = True
        return delta1, seen1, delta2, seen2

    def update_images(self):
        if self.use_simulation:
            import cv2
            if self.action_order <= 2:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined.npy')
                cv2.imwrite("img_inclined_detection.png", self.color)
            else:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def main(self):
        while True:
            if self.state[0] == 'initialize':
                print('')
                print('* State:', self.state[0])
                # define ROI
                self.dvrk_motion.move_origin()
                self.update_images()

                # find pegs
                self.block.find_pegs(img_color=self.color, img_point=self.point)
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'update_image':
                print('')
                print('* State:', self.state[0])
                self.update_images()
                print ('image updated.')
                self.state.insert(0, 'plan_action')

            elif self.state[0] == 'plan_action':
                print('')
                print('* State:', self.state[0])
                if self.action_order == len(self.action_list):
                    self.state.insert(0, 'exit')
                else:
                    self.state.insert(0, 'find_block')
                    print("action pair #", self.action_list[self.action_order])

            elif self.state[0] == 'find_block':
                print('')
                print('* State:', self.state[0])
                nb_pick1 = self.action_list[self.action_order][0][1]    # PSM1
                nb_pick2 = self.action_list[self.action_order][0][0]    # PSM2
                nb_place1 = self.action_list[self.action_order][1][1]   # PSM1
                nb_place2 = self.action_list[self.action_order][1][0]   # PSM2

                pose_blk_pick1, pnt_blk_pick1, pnt_mask_pick1 = self.block.find_block(
                    block_number=nb_pick1, img_color=self.color, img_point=self.point)
                pose_blk_pick2, pnt_blk_pick2, pnt_mask_pick2 = self.block.find_block(
                    block_number=nb_pick2, img_color=self.color, img_point=self.point)

                # pose_blk_place1, pnt_blk_place1, pnt_mask_place1 = self.block.find_block(
                #     block_number=nb_place1, img_color=self.color, img_point=self.point)
                # pose_blk_place2, pnt_blk_place2, pnt_mask_place2 = self.block.find_block(
                #     block_number=nb_place2, img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick1 != [] and pose_blk_pick2 != []:
                    print('A block to move was detected.')
                    # find grasping & placing pose
                    self.gp['PSM1'].find_grasping_pose(pose_blk=pose_blk_pick1, peg_point=self.block.pnt_pegs[nb_pick1])
                    self.gp['PSM1'].find_placing_pose(peg_point=self.block.pnt_pegs[nb_place1])
                    self.gp['PSM2'].find_grasping_pose(pose_blk=pose_blk_pick2, peg_point=self.block.pnt_pegs[nb_pick2])
                    self.gp['PSM2'].find_placing_pose(peg_point=self.block.pnt_pegs[nb_place2])
                    # visualize
                    # pnt_grasping = np.array(self.gp['PSM1'].pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick1], [pnt_mask_pick1], self.block.pnt_pegs, [pnt_grasping])
                    # pnt_grasping = np.array(self.gp['PSM2'].pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick2], [pnt_mask_pick2], self.block.pnt_pegs, [pnt_grasping])

                    # visualize all blocks and all grasping points
                    # self.block.find_block_all(img_color=self.color, img_point=self.point)
                    # self.gp['PSM1'].find_grasping_pose_all(pose_blks=self.block.pose_blks, peg_points=self.block.pnt_pegs)
                    # self.gp['PSM2'].find_grasping_pose_all(pose_blks=self.block.pose_blks, peg_points=self.block.pnt_pegs)
                    # pnt_graspings1 = np.array(self.gp['PSM1'].pose_grasping)[:6, 2:5]
                    # pnt_graspings2 = np.array(self.gp['PSM2'].pose_grasping)[:6, 2:5]
                    # pnt_graspings = np.concatenate((pnt_graspings1, pnt_graspings2), axis=0)
                    # self.vd.plot3d(self.block.pnt_blks, self.block.pnt_masks, self.block.pnt_pegs, pnt_graspings1, pnt_graspings2)
                    self.state.insert(0, 'move_block')
                else:
                    print('No block to move was detected. Skip this order.')
                    self.action_order += 1
                    self.state.insert(0, 'plan_action')

            elif self.state[0] == 'move_block':
                print('')
                print('* State:', self.state[0])
                self.move_blocks()
                self.action_order += 1
                if self.action_order == 3:
                    self.dvrk_motion.move_origin()
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'exit':
                print("task completed.")
                self.dvrk_motion.move_origin()
                exit()
Example #11
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        # self.gp = [GraspingPose3D(which_arm='PSM1'), GraspingPose3D(which_arm='PSM2')]
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionHandOver()
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = [-1, -1]  # [pick action, place action]

        # data members
        self.color = []
        self.point = []
        self.state = []  # [pick state, place state]
        self.pnt_handover = []

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()

        # parallelize
        self.initialize()

        # self.thread1 = threading.Thread(target=self.PSM1)
        self.thread2 = threading.Thread(target=self.run_pick)
        # self.thread1.start()
        self.thread2.start()
        self.run_place()
class dvrkCalibration():
    def __init__(self):
        # objects
        self.dvrk = dvrkMotionBridgeP()
        self.zivid = ZividCapture()
        self.BD = BallDetection()

        # Load trajectory
        filename_transform = root + 'experiment/0_trajectory_extraction/short_traj_random.npy'
        self.joint_traj_tranform = self.load_trajectory(filename_transform)
        # filename = root + 'experiment/0_trajectory_extraction/verification_traj_random_sampling_10000.npy'
        filename = root + 'experiment/0_trajectory_extraction/verification_traj_insertion_50_.npy'
        self.joint_traj = self.load_trajectory(filename)

    def load_trajectory(self, filename):
        joint = np.load(filename)
        pos = np.array([
            self.BD.fk_position(q[0],
                                q[1],
                                q[2],
                                0,
                                0,
                                0,
                                L1=self.BD.L1,
                                L2=self.BD.L2,
                                L3=0,
                                L4=0) for q in joint
        ])
        q1 = joint[:, 0]
        q2 = joint[:, 1]
        q3 = joint[:, 2]
        q4 = joint[:, 3]
        q5 = joint[:, 4]
        q6 = joint[:, 5]

        # Create 3D plot
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        plt.plot(pos[:, 0], pos[:, 1], pos[:, 2], 'b.-')
        ax.set_xlabel('X Label')
        ax.set_ylabel('Y Label')
        ax.set_zlabel('Z Label')
        print('data length: ', len(joint))
        plt.show()

        # Create 2D plot for joint angles
        plt.subplot(611)
        plt.plot(q1 * 180. / np.pi, 'b-')
        plt.ylabel('q1 ($^\circ$)')
        plt.subplot(612)
        plt.plot(q2 * 180. / np.pi, 'b-')
        plt.ylabel('q2 ($^\circ$)')
        plt.subplot(613)
        plt.plot(q3, 'b-')
        plt.ylabel('q3 (mm)')
        plt.subplot(614)
        plt.plot(q4 * 180. / np.pi, 'b-')
        plt.ylabel('q4 ($^\circ$)')
        plt.subplot(615)
        plt.plot(q5 * 180. / np.pi, 'b-')
        plt.ylabel('q5 ($^\circ$)')
        plt.subplot(616)
        plt.plot(q6 * 180. / np.pi, 'b-')
        plt.ylabel('q6 ($^\circ$)')
        plt.xlabel('(step)')
        plt.show()
        return joint

    def exp0_get_transform(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j1 = self.joint_traj_tranform[:, 0]
        j2 = self.joint_traj_tranform[:, 1]
        j3 = self.joint_traj_tranform[:, 2]
        j4 = np.zeros_like(j1)
        j5 = np.zeros_like(j1)
        j6 = np.zeros_like(j1)
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='unknown')

    def exp1_move_all_joints(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j1 = self.joint_traj[:, 0]
        j2 = self.joint_traj[:, 1]
        j3 = self.joint_traj[:, 2]
        j4 = self.joint_traj[:, 3]
        j5 = self.joint_traj[:, 4]
        j6 = self.joint_traj[:, 5]
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='known')

    def exp2_move_q4_q5_q6(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j4 = self.joint_traj[:, 3]
        j5 = self.joint_traj[:, 4]
        j6 = self.joint_traj[:, 5]
        j1 = np.ones_like(j4) * self.dvrk.act_joint1[0]
        j2 = np.ones_like(j4) * self.dvrk.act_joint1[1]
        j3 = np.ones_like(j4) * self.dvrk.act_joint1[2]
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='known')

    def exp3_move_q4_only(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j4 = self.joint_traj[:, 3]
        j5 = np.zeros_like(j4)
        j6 = np.zeros_like(j4)
        j1 = np.ones_like(j4) * self.dvrk.act_joint1[0]
        j2 = np.ones_like(j4) * self.dvrk.act_joint1[1]
        j3 = np.ones_like(j4) * self.dvrk.act_joint1[2]
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='known')

    def exp4_move_q5_only(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j5 = self.joint_traj[:, 4]
        j4 = np.zeros_like(j5)
        j6 = np.zeros_like(j5)
        j1 = np.ones_like(j5) * self.dvrk.act_joint1[0]
        j2 = np.ones_like(j5) * self.dvrk.act_joint1[1]
        j3 = np.ones_like(j5) * self.dvrk.act_joint1[2]
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='known')

    def exp5_move_q6_only(self):
        jaw1 = [5. * np.pi / 180.]
        self.dvrk.set_pose(jaw1=jaw1)
        j6 = self.joint_traj[:, 5]
        j4 = np.zeros_like(j6)
        j5 = np.zeros_like(j6)
        j1 = np.ones_like(j6) * self.dvrk.act_joint1[0]
        j2 = np.ones_like(j6) * self.dvrk.act_joint1[1]
        j3 = np.ones_like(j6) * self.dvrk.act_joint1[2]
        self.collect_data_joint(j1, j2, j3, j4, j5, j6, transform='known')

    def collect_data_joint(self,
                           j1,
                           j2,
                           j3,
                           j4,
                           j5,
                           j6,
                           transform='known'):  # j1, ..., j6: joint trajectory
        try:
            time_st = time.time()  # (sec)
            time_stamp = []
            q_des = []
            q_act = []
            pos_des = []
            pos_act = []
            assert len(j1) == len(j2) == len(j3) == len(j4) == len(j5) == len(
                j6)
            for qd1, qd2, qd3, qd4, qd5, qd6 in zip(j1, j2, j3, j4, j5, j6):
                joint1 = [qd1, qd2, qd3, qd4, qd5, qd6]
                self.dvrk.set_joint(joint1=joint1)

                # Capture image from Zivid
                self.zivid.capture_3Dimage()
                img_color, img_depth, img_point = self.BD.img_crop(
                    self.zivid.image, self.zivid.depth, self.zivid.point)
                img_color = cv2.cvtColor(img_color, cv2.COLOR_RGB2BGR)
                img_color_org = np.copy(img_color)

                # Find balls
                pbs = self.BD.find_balls(img_color_org, img_depth, img_point)
                img_color = self.BD.overlay_balls(img_color, pbs)

                # Find tool position, joint angles, and overlay
                if pbs[0] == [] or pbs[1] == []:
                    qa1 = 0.0
                    qa2 = 0.0
                    qa3 = 0.0
                    qa4 = 0.0
                    qa5 = 0.0
                    qa6 = 0.0
                else:
                    # Find tool position, joint angles, and overlay
                    pt = self.BD.find_tool_position(
                        pbs[0], pbs[1])  # tool position of pitch axis
                    pt = np.array(pt) * 0.001  # (m)
                    if transform == 'known':
                        pt = self.BD.Rrc.dot(pt) + self.BD.trc
                        qa1, qa2, qa3 = self.BD.ik_position(pt)

                        # Find tool orientation, joint angles, and overlay
                        count_pbs = [pbs[2], pbs[3], pbs[4], pbs[5]]
                        if count_pbs.count([]) >= 2:
                            qa4 = 0.0
                            qa5 = 0.0
                            qa6 = 0.0
                        else:
                            Rm = self.BD.find_tool_orientation(
                                pbs[2], pbs[3], pbs[4],
                                pbs[5])  # orientation of the marker
                            qa4, qa5, qa6 = self.BD.ik_orientation(
                                qa1, qa2, Rm)
                            img_color = self.BD.overlay_tool(
                                img_color, [qa1, qa2, qa3, qa4, qa5, qa6],
                                (0, 255, 0))

                # Append data pairs
                if transform == 'known':
                    # joint angles
                    q_des.append([qd1, qd2, qd3, qd4, qd5, qd6])
                    q_act.append([qa1, qa2, qa3, qa4, qa5, qa6])
                    time_stamp.append(time.time() - time_st)
                    print('index: ', len(q_des), '/', len(j1))
                    print('t_stamp: ', time.time() - time_st)
                    print('q_des: ', [qd1, qd2, qd3, qd4, qd5, qd6])
                    print('q_act: ', [qa1, qa2, qa3, qa4, qa5, qa6])
                    print(' ')
                elif transform == 'unknown':
                    # positions
                    pos_des_temp = self.BD.fk_position(q1=qd1,
                                                       q2=qd2,
                                                       q3=qd3,
                                                       q4=0,
                                                       q5=0,
                                                       q6=0,
                                                       L1=self.BD.L1,
                                                       L2=self.BD.L2,
                                                       L3=0,
                                                       L4=0)
                    pos_des.append(pos_des_temp)
                    pos_act.append(pt)
                    print('index: ', len(pos_des), '/', len(j1))
                    print('pos_des: ', pos_des_temp)
                    print('pos_act: ', pt)
                    print(' ')

                # Visualize
                cv2.imshow("images", img_color)
                cv2.waitKey(1) & 0xFF
                # cv2.waitKey(0)
        finally:
            # Save data to a file
            if transform == 'known':
                np.save('q_des_raw', q_des)
                np.save('q_act_raw', q_act)
            elif transform == 'unknown':
                # Get transform from robot to camera
                np.save('pos_des', pos_des)
                np.save('pos_act', pos_act)
                T = U.get_rigid_transform(np.array(pos_act), np.array(pos_des))
                np.save('Trc', T)
            np.save('t_stamp_raw', time_stamp)
            print("Data is successfully saved")
Example #13
0
                    pbr[2], pbg, pbb, pby,
                    which_arm)  # orientation of the marker
                qp4, qp5, qp6 = dvrkKinematics.ik_orientation(qp1, qp2, Rm)
            q_phy = [qp1, qp2, qp3, qp4, qp5, qp6]
        else:
            q_phy = []
    return pt, q_phy


# configurations
use_Trc = True
which_camera = 'inclined'
which_arm = 'PSM1'

# define objects
zivid = ZividCapture(which_camera=which_camera)
zivid.start()
dvrk = dvrkController(arm_name='/' + which_arm, comp_hysteresis=True)
# dvrk = dvrkArm('/'+which_arm)
Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_' +
              which_arm + '.npy')
Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')
bd = BallDetectionRGBD(Trc=Trc, Tpc=Tpc, which_camera=which_camera)

traj = np.load("traj.npy", allow_pickle=True)
traj_opt = np.load("traj_opt_cubic.npy", allow_pickle=True)

# # run motion (linear)
# for tt in traj:
#     for qs in tt:
#         dvrk.set_joint(qs[0], wait_callback=True)
class FLSPegTransferHandover:
    def __init__(self,
                 use_simulation=True,
                 use_controller=True,
                 use_optimization=True,
                 optimizer='cubic',
                 which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Trc2[:3, -1] += np.array([-0.001, 0.00, 0.00])
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera
        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM1'),
            'PSM2': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionHandOver(use_controller=use_controller, use_optimization=use_optimization, optimizer=optimizer)
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize', 'initialize']  # [pick state, place state]

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()
        self.event_blockready = threading.Event()

        # parallelize
        self.initialize()
        self.thread1 = threading.Thread(target=self.run_pick)
        self.thread2 = threading.Thread(target=self.run_place)
        self.thread1.start()
        self.thread2.start()
        # self.run_place()

    def initialize(self):
        self.dvrk_motion.move_origin()
        # self.dvrk_motion.move_random(which_arm='PSM1')    # random move when NN model uses history
        self.update_images()
        self.block.find_pegs(img_color=self.color, img_point=self.point)
        self.pnt_handover = (self.block.pnt_pegs[3] + self.block.pnt_pegs[6] +
                             self.block.pnt_pegs[9]) / 3 + np.array(
                                 [0.0, -10.0, 0.0])
        self.state = ['plan_action', 'plan_action']

    def transform_task2robot(self,
                             point,
                             delta=False,
                             inverse=False,
                             which_arm='PSM1'):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.Tpc)
            if which_arm == 'PSM1' or which_arm == 0:
                Trp = self.Trc1.dot(Tcp)
            elif which_arm == 'PSM2' or which_arm == 1:
                Trp = self.Trc2.dot(Tcp)
            else:
                raise ValueError
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point) + trp
            else:
                transformed = Rrp.dot(point)
        else:
            if which_arm == 'PSM1':
                Tcr = np.linalg.inv(self.Trc1)
            elif which_arm == 'PSM2':
                Tcr = np.linalg.inv(self.Trc2)
            else:
                raise ValueError
            Tpr = self.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point) + tpr
            else:
                transformed = Rpr.dot(point)
        return transformed

    def place_servoing(self, nb_place_corr):
        # get current position
        self.update_images()
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(
            self.color, self.point, self.block.pnt_pegs[nb_place_corr])
        if len(pnt_blk) < 400:
            delta = [0.0, 0.0, 0.0]
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk], pnt_masks=[pnt_mask], pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            p_peg = self.block.pnt_pegs[nb_place_corr] + np.array(
                [0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3, -1]  # w.r.t task coordinate
            delta = (p_peg - p_fid) * 0.001  # unit to (m)
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 5:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
            else:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def run_pick(self):
        while True:
            if self.state[0] == 'plan_action':
                if self.action_order == len(self.action_list):
                    print("Pick Task Completed!")
                    exit()
                elif self.action_order == 6:
                    self.event_blockready.set()
                    self.event_blockready.clear()
                    self.event_waiting.clear()
                    self.event_waiting.wait()
                print('')
                print("Action order: ", self.action_order, "started")
                self.state[0] = 'find_block'

            elif self.state[0] == 'find_block':
                # find block
                self.update_images()
                nb_pick = self.action_list[self.action_order][1]
                arm_pick = self.action_list[self.action_order][0]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick,
                    img_color=self.color,
                    img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_pick].find_grasping_pose(
                        pose_blk=pose_blk_pick,
                        peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp[arm_pick].find_placing_pose(
                        peg_point=self.pnt_handover)

                    # # visualize
                    # pnt_grasping = np.array(self.gp[arm_pick].pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])

                    self.state[0] = 'move_block'
                else:  # no block detected
                    print("No block detected for pick-up, Skip this order.")
                    self.action_order += 1
                    self.state[0] = 'plan_action'

            elif self.state[0] == 'move_block':
                # print(' ')
                # print("action order ", self.action_order)
                # print("action pair #", self.action_list[self.action_order[0]])
                # print(' ')
                arm_pick = self.action_list[self.action_order][0]
                gp_pick = self.gp[
                    arm_pick].pose_grasping  # [n, ang, x,y,z, seen]
                gp_place = self.gp[
                    arm_pick].pose_placing  # [n, ang, x,y,z, seen]
                gp_place = np.array(gp_place)
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:] * 0.001, which_arm=arm_pick)  # [x,y,z]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:] * 0.001, which_arm=arm_pick)  # [x,y,z]

                # pick up block and move to center
                self.dvrk_motion.go_pick(pos_pick=gp_pick_robot,
                                         rot_pick=gp_pick[0],
                                         which_arm=arm_pick)
                self.dvrk_motion.go_handover(pos_pick=gp_pick_robot,
                                             rot_pick=gp_pick[0],
                                             pos_place=gp_place_robot,
                                             rot_place=gp_place[0],
                                             which_arm=arm_pick)
                self.event_blockready.set()
                self.event_handover.wait(
                )  # until block is grasped by other gripper
                self.event_handover.clear()

                # escape from hand-over
                self.dvrk_motion.move_jaw(jaw='open', which_arm=arm_pick)
                self.dvrk_motion.move_upright(pos=[
                    gp_place_robot[0], gp_place_robot[1],
                    self.dvrk_motion.height_handover +
                    self.dvrk_motion.offset_handover
                ],
                                              rot=gp_place[0],
                                              jaw='open',
                                              which_arm=arm_pick)
                self.event_handover.set()
                self.action_order += 1
                self.state[0] = 'plan_action'

    def run_place(self):
        while True:
            if self.state[1] == 'plan_action':
                if self.action_order == len(self.action_list):
                    self.dvrk_motion.move_origin()
                    print("Place Task Completed!")
                    exit()
                elif self.action_order == 6:
                    self.dvrk_motion.move_origin()
                    self.event_waiting.set()
                self.state[1] = 'find_block'

            elif self.state[1] == 'find_block':
                arm_place = self.action_list[self.action_order][2]
                self.dvrk_motion.handover_ready(which_arm=arm_place)
                self.event_blockready.wait(
                )  # until block is ready to hand-over
                self.event_blockready.clear()
                if self.use_simulation:
                    self.color = np.load(
                        'record/peg_transfer_kit_capture/img_color_inclined_ho.npy'
                    )
                    self.point = np.load(
                        'record/peg_transfer_kit_capture/img_point_inclined_ho.npy'
                    )
                else:
                    self.update_images()

                nb_place = self.action_list[self.action_order][3]
                arm_place = self.action_list[self.action_order][2]
                nb_place_corr = nb_place
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block_servo_handover(
                    img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_place].find_grasping_pose_handover(
                        pose_blk=pose_blk_pick, which_arm=arm_place)
                    self.gp[arm_place].find_placing_pose(
                        peg_point=self.block.pnt_pegs[nb_place])

                    # visualize
                    # pnt_grasping = np.array(self.gp[arm_place].pose_grasping)[1:]
                    # np.save('pnt_blk_pick', pnt_blk_pick)
                    # np.save('pnt_mask_pick', pnt_mask_pick)
                    # np.save('block_pnt_pegs', self.block.pnt_pegs)
                    # np.save('pnt_grasping', pnt_grasping)
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])
                    self.state[1] = 'move_block'
                else:
                    print("No block detected for hand-over, Skip this order.")
                    self.event_handover.set()
                    self.state[1] = 'plan_action'

            elif self.state[1] == 'move_block':
                # [n, ang, x,y,z, seen]
                # nb_place = self.action_list[self.action_order][3]
                arm_place = self.action_list[self.action_order][2]
                gp_pick = self.gp[arm_place].pose_grasping
                gp_place = self.gp[arm_place].pose_placing
                gp_place = np.array(gp_place)
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:] * 0.001, which_arm=arm_place)  # [x,y,z]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:] * 0.001, which_arm=arm_place)  # [x,y,z]

                # pick (hand-over)
                self.dvrk_motion.pick_block(pos=gp_pick_robot,
                                            rot=gp_pick[0],
                                            which_arm=arm_place)
                self.event_handover.set()
                self.event_handover.clear()
                self.event_handover.wait()  # until other gripper escape
                self.event_handover.clear()

                # place
                self.dvrk_motion.go_place(pos_place=gp_place_robot,
                                          rot_place=gp_place[0],
                                          which_arm=arm_place)

                # visual correction
                delta, seen = self.place_servoing(nb_place_corr)
                delta_rob = self.transform_task2robot(delta, delta=True)
                delta_rob[2] = 0.0  # temporary
                print(delta_rob)
                self.dvrk_motion.servoing_block(pos_place=gp_place_robot +
                                                delta_rob,
                                                rot_place=gp_place[0],
                                                which_arm=arm_place)
                print('')
                print("Action completed")
                self.state[1] = 'plan_action'
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BlockDetection3D import BlockDetection3D
from FLSpegtransfer.vision.PegboardCalibration import PegboardCalibration
from FLSpegtransfer.vision.VisualizeDetection import VisualizeDetection
from FLSpegtransfer.vision.GraspingPose3D import GraspingPose3D

zivid = ZividCapture()
zivid.start()
peg = PegboardCalibration()
bd = BlockDetection3D()
vd = VisualizeDetection()
gp = GraspingPose3D()

# define region of interest
color = np.load('../../record/color_new.npy')
depth = np.load('../../record/depth_new.npy')
point = np.load('../../record/point_new.npy')
# color = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
color, depth, point = zivid.capture_3Dimage(color='BGR')
ycr, hcr, xcr, wcr = peg.define_boundary(color)
dx = 200
dy = 200
zivid.ycr = ycr - dy
zivid.hcr = hcr + 2 * dy
zivid.xcr = xcr - dx
from FLSpegtransfer.vision.ZividCapture import ZividCapture
import cv2
import numpy as np

which_camera = "inclined"
zivid = ZividCapture(which_camera=which_camera)
zivid.start()

# while True:
#     image = zc.capture_2Dimage(color='BGR')
#     cv2.imshow("", image)
#     cv2.waitKey(1)

# check images
img_color, img_depth, img_point = zivid.capture_3Dimage(color='BGR')

# zivid.display_rgb(img_color, block=False)
# zivid.display_rgb(img_color, block=True)
# zivid.display_depthmap(img_point)
# zivid.display_pointcloud(img_point, img_color)

cv2.imwrite("color_" + which_camera + ".png", img_color)
np.save("img_color", img_color)
np.save("img_depth_" + which_camera, img_depth)
np.save("img_point_" + which_camera, img_point)
Example #17
0
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BallDetectionRGBD import BallDetectionRGBD
from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics

BD = BallDetectionRGBD()
zivid = ZividCapture()
# zivid.start()

while True:
    color = np.load('ball_img_zivid_color.npy')
    point = np.load('ball_img_zivid_point.npy')
    # point = np.load('fiducial_images/0/point.npy')
    # color, depth, point = zivid.capture_3Dimage(color='BGR')

    # Find balls
    pbr = BD.find_balls(color, point, 'red')
    pbg = BD.find_balls(color, point, 'green')
    pbb = BD.find_balls(color, point, 'blue')
    pby = BD.find_balls(color, point, 'yellow')

    pbr = np.array(pbr)
    d1 = np.linalg.norm(pbr[0][:3] - pbr[1][:3])
    d2 = np.linalg.norm(pbr[0][:3] - pbr[2][:3])
    d3 = np.linalg.norm(pbr[1][:3] - pbr[2][:3])
    print(d1, d2, d3)
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.PegboardCalibration import PegboardCalibration

zivid = ZividCapture(which_camera='inclined')
zivid.start()
peg = PegboardCalibration()

# define region of interest
color, depth, point = zivid.capture_3Dimage(color='BGR')
ycr, hcr, xcr, wcr = peg.define_boundary(color)
dx = 200
dy = 200
zivid.ycr = ycr - dy
zivid.hcr = hcr + 2 * dy
zivid.xcr = xcr - dx
zivid.wcr = wcr + 2 * dx

# pegboard registration if necessary
peg.registration_pegboard(color, point)
Example #19
0
        R38 = R03.T.dot(Rb)
        r12 = R38[0, 1]
        r22 = R38[1, 1]
        r31 = R38[2, 0]
        r32 = R38[2, 1]
        r33 = R38[2, 2]
        q4 = np.arctan2(-r22, -r12)  # (rad)
        q6 = np.arctan2(-r31, -r33)
        q5 = np.arctan2(r32, np.sqrt(r31**2 + r33**2))
        return q4, q5, q6


if __name__ == "__main__":
    from FLSpegtransfer.vision.ZividCapture import ZividCapture
    BD = BallDetection()
    zivid = ZividCapture()
    while True:
        try:
            zivid.capture_3Dimage()
            img_color, img_depth, img_point = BD.img_crop(
                zivid.image, zivid.depth, zivid.point)
            img_color = cv2.cvtColor(img_color, cv2.COLOR_RGB2BGR)
            img_color_org = np.copy(img_color)

            # Find balls and overlay
            pbs = BD.find_balls(img_color, img_depth, img_point)
            img_color = BD.overlay_balls(img_color, pbs)

            # Find tool position, joint angles, and overlay
            if pbs[0] == [] or pbs[1] == []:
                pass
Example #20
0
class FLSPegTransfer():
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        # self.gp = [GraspingPose3D(which_arm='PSM1'), GraspingPose3D(which_arm='PSM2')]
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionHandOver()
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = [-1, -1]  # [pick action, place action]

        # data members
        self.color = []
        self.point = []
        self.state = []  # [pick state, place state]
        self.pnt_handover = []

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()

        # parallelize
        self.initialize()

        # self.thread1 = threading.Thread(target=self.PSM1)
        self.thread2 = threading.Thread(target=self.run_pick)
        # self.thread1.start()
        self.thread2.start()
        self.run_place()

    def initialize(self):
        self.dvrk_motion.move_origin()
        # self.dvrk_motion.move_random(which_arm='PSM1')    # random move when NN model uses history
        self.update_images()
        self.block.find_pegs(img_color=self.color, img_point=self.point)
        self.pnt_handover = (self.block.pnt_pegs[3] + self.block.pnt_pegs[6] +
                             self.block.pnt_pegs[9]) / 3
        self.state = ['plan_action', 'plan_action']

    def transform_task2robot(self,
                             point,
                             delta=False,
                             inverse=False,
                             which_arm='PSM1'):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.Tpc)  # (mm)
            Tcp[:3, -1] = Tcp[:3, -1] * 0.001  # (m)
            if which_arm == 'PSM1' or which_arm == 0:
                Trp = self.Trc1.dot(Tcp)
            elif which_arm == 'PSM2' or which_arm == 1:
                Trp = self.Trc2.dot(Tcp)
            else:
                raise ValueError
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point * 0.001) + trp
            else:
                transformed = Rrp.dot(point * 0.001)
        else:
            if which_arm == 'PSM1':
                Tcr = np.linalg.inv(self.Trc1)  # (m)
            elif which_arm == 'PSM2':
                Tcr = np.linalg.inv(self.Trc2)  # (m)
            else:
                raise ValueError
            Tcr[:3, -1] = Tcr[:3, -1] * 1000  # (mm)
            Tpr = self.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point * 1000) + tpr
            else:
                transformed = Rpr.dot(point * 1000)
        return transformed

    def place_servoing(self, nb_place):
        # get current position
        self.update_images()
        (_, T), pnt_blk, pnt_mask = self.block.find_block_servo(
            self.color, self.point)
        if len(pnt_blk) < 400:
            delta = []
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=pnt_blk, pnt_masks=pnt_mask, pnt_pegs=self.block.pnt_pegs)
            p_peg = self.block.pnt_pegs[nb_place] + np.array(
                [0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3, -1]  # w.r.t task coordinate
            delta = p_peg - p_fid
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order[0] <= 6 and self.action_order[1] <= 5:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
            else:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def run_pick(self):
        while True:
            time.sleep(0.2)
            if self.state[0] == 'plan_action' or self.state[0] == 'no_block':
                if self.action_order[0] == len(self.action_list) - 1:
                    print("Pick Task Completed!")
                    exit()
                else:
                    if self.state[0] == 'no_block':
                        print(
                            "No block detected for pick-up, Skip this order.")
                        self.action_order[1] += 1
                    arm_pick = self.action_list[self.action_order[0]][0]
                    arm_pick_next = self.action_list[self.action_order[0] +
                                                     1][0]
                    print('')
                    print("(" + arm_pick + ")", " Pick order: ",
                          self.action_order[0], "th completed")
                    if arm_pick != arm_pick_next:
                        self.event_waiting.clear()
                        self.event_waiting.wait()
                    self.action_order[0] += 1
                    self.state[0] = 'find_block'

            elif self.state[0] == 'wait':
                if self.state[1] == 'picked_hand_over':
                    self.state[0] = 'hand_off'
                    self.state[1] = 'wait'

            elif self.state[0] == 'find_block':
                # find block
                self.update_images()
                nb_pick = self.action_list[self.action_order[0]][1]
                arm_pick = self.action_list[self.action_order[0]][0]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick,
                    img_color=self.color,
                    img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_pick].find_grasping_pose(
                        pose_blk=pose_blk_pick,
                        peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp[arm_pick].find_placing_pose(
                        peg_point=self.pnt_handover)
                    self.state[0] = 'move_block'
                else:  # no block detected
                    self.state[0] = 'no_block'

            elif self.state[0] == 'move_block':
                # print(' ')
                # print("action order ", self.action_order)
                # print("action pair #", self.action_list[self.action_order[0]])
                # print(' ')
                arm_pick = self.action_list[self.action_order[0]][0]
                gp_pick = self.gp[
                    arm_pick].pose_grasping  # [n, ang, x,y,z, seen]
                gp_place = self.gp[
                    arm_pick].pose_placing  # [n, ang, x,y,z, seen]
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:], which_arm=arm_pick)  # [x,y,z]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:], which_arm=arm_pick)  # [x,y,z]

                # pick up block and move to center
                self.dvrk_motion.move_upright(pos=[
                    gp_pick_robot[0], gp_pick_robot[1],
                    self.dvrk_motion.height_ready
                ],
                                              rot=gp_pick[0],
                                              jaw='close',
                                              which_arm=arm_pick)
                self.dvrk_motion.pick_block(pos=gp_pick_robot,
                                            rot=gp_pick[0],
                                            which_arm=arm_pick)
                self.dvrk_motion.move_upright(pos=[
                    gp_pick_robot[0], gp_pick_robot[1],
                    self.dvrk_motion.height_ready
                ],
                                              rot=gp_pick[0],
                                              jaw='close',
                                              which_arm=arm_pick,
                                              interpolate=True)
                self.dvrk_motion.move_upright(pos=[
                    gp_place_robot[0], gp_place_robot[1],
                    self.dvrk_motion.height_ready
                ],
                                              rot=gp_place[0],
                                              jaw='close',
                                              which_arm=arm_pick)
                self.state[0] = 'ready_hand_over'
            elif self.state[0] == 'hand_off':  # escape from hand-over
                arm_pick = self.action_list[self.action_order[0]][0]
                gp_place = self.gp[
                    arm_pick].pose_placing  # [n, ang, x,y,z, seen]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:], which_arm=arm_pick)  # [x,y,z]
                self.dvrk_motion.move_jaw(jaw='open', which_arm=arm_pick)
                self.dvrk_motion.move_upright(pos=[
                    gp_place_robot[0], gp_place_robot[1],
                    self.dvrk_motion.height_ready +
                    self.dvrk_motion.offset_handover
                ],
                                              rot=gp_place[0],
                                              jaw='open',
                                              which_arm=arm_pick)
                self.state[1] = 'place_block'
                self.state[0] = 'plan_action'

    def run_place(self):
        while True:
            time.sleep(0.1)
            if self.state[1] == 'plan_action' or self.state[1] == 'no_block':
                if self.action_order[1] == len(self.action_list) - 1:
                    self.dvrk_motion.move_origin()
                    print("Place Task Completed!")
                    exit()
                else:
                    if self.state[1] == 'no_block':
                        print(
                            "No block detected for hand-over, Skip this order."
                        )
                        self.event_handover.set()
                    arm_place = self.action_list[self.action_order[1]][2]
                    arm_place_next = self.action_list[self.action_order[1] +
                                                      1][2]
                    print('')
                    print("(" + arm_place + ")", " Place order: ",
                          self.action_order[1], "th completed")
                    if arm_place != arm_place_next:
                        self.dvrk_motion.move_origin()
                        self.event_waiting.set()
                        self.action_order[1] = self.action_order[0]
                    self.action_order[1] += 1
                    self.state[1] = 'wait'

            elif self.state[1] == 'wait':
                if self.state[0] == 'ready_hand_over':
                    self.state[1] = 'find_block'
                    self.state[0] = 'wait'
                elif self.state[0] == 'hand_off':
                    self.state[1] = 'place_block'

            elif self.state[1] == 'find_block':
                # find block
                if self.use_simulation:
                    self.color = np.load(
                        'record/peg_transfer_kit_capture/img_color_inclined_ho.npy'
                    )
                    self.point = np.load(
                        'record/peg_transfer_kit_capture/img_point_inclined_ho.npy'
                    )
                    # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined.npy')
                    # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined.npy')
                else:
                    self.update_images()
                nb_place = self.action_list[self.action_order[1]][3]
                arm_place = self.action_list[self.action_order[1]][2]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block_servo(
                    img_color=self.color, img_point=self.point)
                # pose_blk_place, pnt_blk_place, pnt_mask_place = self.block.find_block(
                #     block_number=nb_place, img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_place].find_grasping_pose_handover(
                        pose_blk=pose_blk_pick)
                    self.gp[arm_place].find_placing_pose(
                        peg_point=self.block.pnt_pegs[nb_place])
                    # visualize
                    # pnt_grasping = np.array(self.gp[0].pose_grasping)[1:]
                    # self.vd.plot3d(pnt_blk_pick, pnt_mask_pick, self.block.pnt_pegs, [pnt_grasping])
                    self.state[1] = 'move_block'
                else:
                    self.state[1] = 'no_block'

            elif self.state[1] == 'move_block':
                # [n, ang, x,y,z, seen]
                arm_place = self.action_list[self.action_order[1]][2]
                gp_pick = self.gp[arm_place].pose_grasping
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:], which_arm=arm_place)  # [x,y,z]

                # pick (hand-over)
                self.dvrk_motion.move_upright(pos=[
                    gp_pick_robot[0], gp_pick_robot[1],
                    gp_pick_robot[2] + self.dvrk_motion.offset_handover
                ],
                                              rot=gp_pick[0],
                                              jaw='open',
                                              which_arm=arm_place)
                self.dvrk_motion.pick_block(pos=gp_pick_robot,
                                            rot=gp_pick[0],
                                            which_arm=arm_place)
                self.state[1] = 'picked_hand_over'

            elif self.state[1] == 'place_block':
                # place
                nb_place = self.action_list[self.action_order[1]][3]
                arm_place = self.action_list[self.action_order[1]][2]
                gp_place = self.gp[arm_place].pose_placing
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:], which_arm=arm_place)  # [x,y,z]
                pnt_handover_robot = self.transform_task2robot(
                    self.pnt_handover, which_arm=arm_place)  # [x,y,z]
                self.dvrk_motion.move_upright(pos=[
                    gp_place_robot[0], gp_place_robot[1],
                    self.dvrk_motion.height_ready
                ],
                                              rot=gp_place[0],
                                              jaw='close',
                                              which_arm=arm_place)
                # visual servoing prior to drop
                delta, seen = self.place_servoing(nb_place)
                if seen == True:  # if there is a block,
                    delta_robot = self.transform_task2robot(delta, delta=True)
                else:
                    delta_robot = [0.0, 0.0, 0.0]
                self.dvrk_motion.drop_block(pos=gp_place_robot + delta_robot,
                                            rot=gp_place[0],
                                            which_arm=arm_place)
                if arm_place == 'PSM1':  # at the first half
                    self.dvrk_motion.move_upright(pos=[
                        pnt_handover_robot[0] - 0.02, pnt_handover_robot[1],
                        self.dvrk_motion.height_ready_handover
                    ],
                                                  rot=0.0,
                                                  jaw='close',
                                                  which_arm=arm_place)
                else:  # at the last half
                    self.dvrk_motion.move_upright(pos=[
                        pnt_handover_robot[0] + 0.02, pnt_handover_robot[1],
                        self.dvrk_motion.height_ready_handover
                    ],
                                                  rot=0.0,
                                                  jaw='close',
                                                  which_arm=arm_place)
                self.state[1] = 'plan_action'
Example #21
0
class FLSPegTransfer:
    def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined', which_arm='PSM1'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_'+which_arm+'.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # self.Trc_stereo = np.load(root + 'calibration_files/Trc_' + 'stereo' + '_'+which_arm+'.npy')  # robot to camera
        # self.Trc_stereo[:3, -1] += np.array([0.00, 0.003, 0.00])
        # self.Tc1c2 = np.linalg.inv(self.Trc).dot(self.Trc_stereo)    # cam1 = inclined, cam2 = stereo

        # import modules
        if self.use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D(dist_gps=5, dist_pps=5, which_arm=which_arm)
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionSingleArm(use_controller=use_controller, use_optimization=use_optimization,
                                             optimizer=optimizer, which_arm=which_arm)
        self.zivid_utils = ZividUtils(which_camera='inclined')

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()

    def transform_task2robot(self, point, delta=False, inverse=False):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.block.Tpc)
            Trp = self.Trc.dot(Tcp)
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point) + trp
            else:
                transformed = Rrp.dot(point)
        else:
            Tcr = np.linalg.inv(self.Trc)
            Tpr = self.block.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point) + tpr
            else:
                transformed = Rpr.dot(point)
        return transformed

    @classmethod
    def transform(cls, points, T):
        R = T[:3, :3]
        t = T[:3, -1]
        return R.dot(points.T).T + t.T

    def move_blocks(self):
        # [n, ang, x,y,z, seen]
        gp_pick = self.gp.pose_grasping
        gp_place = np.array(self.gp.pose_placing)

        # pick-up and place motion
        gp_pick_robot = self.transform_task2robot(gp_pick[1:]*0.001)  # [x,y,z]
        gp_place_robot = self.transform_task2robot(gp_place[1:]*0.001)

        if self.action_order == 0 or self.action_order == 6:
            self.dvrk_motion.go_pick(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        else:
            self.dvrk_motion.return_to_peg(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        self.dvrk_motion.transfer_block(pos_pick=gp_pick_robot, rot_pick=gp_pick[0],
                                        pos_place=gp_place_robot, rot_place=gp_place[0])

        # visual correction
        delta, seen = self.place_servoing()
        delta_rob = self.transform_task2robot(delta, delta=True)
        delta_rob[2] = 0.0  # temporary
        self.dvrk_motion.servoing_block(pos_place=gp_place_robot+delta_rob, rot_place=gp_place[0])

    def place_servoing(self):
        # get current position
        self.update_images()
        nb_place = self.action_list[self.action_order][1]
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(self.color, self.point, self.block.pnt_pegs[nb_place])
        if len(pnt_blk) < 400:
            delta = [0.0, 0.0, 0.0]
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk], pnt_masks=[pnt_mask], pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            p_peg = self.block.pnt_pegs[nb_place] + np.array([0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3,-1]     # w.r.t task coordinate
            delta = (p_peg - p_fid)*0.001   # unit to (m)
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 5:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined.npy')
            else:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def main(self):
        while True:
            if self.state[0] == 'initialize':
                print('')
                print('* State:', self.state[0])
                # define ROI
                self.dvrk_motion.move_origin()

                # random move when NN model uses history
                # self.dvrk_motion.move_random()
                self.update_images()

                # find pegs
                self.block.find_pegs(img_color=self.color, img_point=self.point)
                self.state.insert(0, 'update_image')
                print ("started")
                time.sleep(3)
                st = time.time()

            elif self.state[0] == 'update_image':
                print('')
                print('* State:', self.state[0])
                self.update_images()
                print ('image updated.')
                self.state.insert(0, 'plan_action')

            elif self.state[0] == 'plan_action':
                print('')
                print('* State:', self.state[0])
                if self.action_order == len(self.action_list):
                    self.state.insert(0, 'exit')
                else:
                    self.state.insert(0, 'find_block')
                    print("action pair #", self.action_list[self.action_order])

            elif self.state[0] == 'find_block':
                print('')
                print('* State:', self.state[0])
                nb_pick = self.action_list[self.action_order][0]
                nb_place = self.action_list[self.action_order][1]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick, img_color=self.color, img_point=self.point)
                pose_blk_place, pnt_blk_place, pnt_mask_place = self.block.find_block(
                    block_number=nb_place, img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != [] and pose_blk_place == []:
                    print('A block to move was detected.')
                    # find grasping & placing pose
                    self.gp.find_grasping_pose(pose_blk=pose_blk_pick, peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp.find_placing_pose(peg_point=self.block.pnt_pegs[nb_place])
                    # visualize
                    # pnt_grasping = np.array(self.gp.pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])
                    # pnt_placing = np.array(self.gp.pose_placing)[1:]
                    # self.vd.plot3d([pnt_blk_place], [pnt_mask_pick], self.block.pnt_pegs, [pnt_placing])

                    # visualize all blocks and all grasping points
                    # self.block.find_block_all(img_color=self.color, img_point=self.point)
                    # self.gp.find_grasping_pose_all(pose_blks=self.block.pose_blks, peg_points=self.block.pnt_pegs)
                    # pnt_graspings = np.array(self.gp.pose_grasping)[:6, 2:5]
                    # self.vd.plot3d(self.block.pnt_blks, self.block.pnt_masks, self.block.pnt_pegs, pnt_graspings)
                    self.state.insert(0, 'move_block')
                else:
                    print('No block to move was detected. Skip this order.')
                    self.action_order += 1
                    self.state.insert(0, 'plan_action')

            elif self.state[0] == 'move_block':
                print('')
                print('* State:', self.state[0])
                self.move_blocks()
                if self.action_order == 5:
                    self.dvrk_motion.move_origin()
                self.action_order += 1
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'exit':
                print("task completed.")
                self.dvrk_motion.move_origin()
                t_comp = time.time() - st
                print (t_comp)
                np.save('/home/hwangmh/t_comp', t_comp)

                print(self.dvrk_motion.time_motion)
                print (np.sum(self.dvrk_motion.time_motion))
                print(self.dvrk_motion.time_computing)
                print (np.sum(self.dvrk_motion.time_computing))
                import pdb; pdb.set_trace()
                exit()