while True:
        arm1.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        zivid.capture_3Dimage()
        arm1.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        zivid.capture_3Dimage()
        arm1.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        zivid.capture_3Dimage()
        arm1.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        zivid.capture_3Dimage()
        arm1.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        zivid.capture_3Dimage()
        arm1.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
        print("completed_arm1")


th = threading.Thread(target=run)
th.start()

while True:
    arm2.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    zivid.capture_3Dimage()
    arm2.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    zivid.capture_3Dimage()
    arm2.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    zivid.capture_3Dimage()
    arm2.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    zivid.capture_3Dimage()
    arm2.set_pose(pos=[0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    zivid.capture_3Dimage()
    arm2.set_pose(pos=[-0.05, 0.0, -0.15], rot=[0.0, 0.0, 0.0, 1.0])
    print("completed_arm2")
    return transformed


which_camera = 'inclined'
which_arm = 'PSM2'
dvrk = dvrkArm('/' + which_arm)
Trc = np.load(root + '/calibration_files/Trc_' + which_camera + '_' +
              which_arm + '.npy')
Tpc = np.load(root + '/calibration_files/Tpc_' + which_camera + '.npy')
bd = BlockDetection3D(Tpc)
# dvrk = dvrkController(comp_hysteresis=False, stop_collision=False)
zivid = ZividCapture(which_camera=which_camera)
zivid.start()
while True:
    # move_origin(dvrk, which_arm)
    img_color, _, img_point = zivid.capture_3Dimage(color='BGR')
    bd.find_pegs(img_color, img_point)
    for i in range(12):
        import pdb
        pdb.set_trace()
        pr = transform_task2robot(bd.pnt_pegs[i] * 0.001, Trc=Trc, Tpc=Tpc)
        # move_ready(dvrk, which_arm)
        if which_arm == 'PSM1':
            # dvrk.set_arm_position(pos1=[pr[0], pr[1], pr[2]+0.005])
            dvrk.set_pose_interpolate(pos=[pr[0], pr[1], pr[2] + 0.01],
                                      rot=[0.0, 0.0, 0.0, 1.0])
        elif which_arm == 'PSM2':
            # dvrk.set_arm_position(pos2=[pr[0], pr[1], pr[2] + 0.005])
            dvrk.set_pose_interpolate(pos=[pr[0], pr[1], pr[2] + 0.01],
                                      rot=[0.0, 0.0, 0.0, 1.0])
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.motion.deprecated.dvrkMotionBridgeP import dvrkMotionBridgeP
import FLSpegtransfer.utils.CmnUtil as U
import sys
import os
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np

zc = ZividCapture()
zc.start()

# check images
img_color, img_depth, img_point = zc.capture_3Dimage(color='BGR')
zc.display_rgb(img_color)
zc.display_depthmap(img_point)
zc.display_pointcloud(img_point, img_color)

dvrk = dvrkMotionBridgeP()
sampling = 100
for i in range(sampling):
    r1 = np.random.uniform(-40, 40)
    r2 = np.random.uniform(-40, 40)
    r3 = np.random.uniform(-40, 40)
    rot1 = [r1, r2, r3]
    q1 = U.euler_to_quaternion(rot1, unit='deg')
    jaw1 = [10 * np.pi / 180.]
    dvrk.set_pose(rot1=q1, jaw1=jaw1)
    img_color, img_depth, img_point = zc.capture_3Dimage(img_crop=False)
class FLSPegTransferHandover:
    def __init__(self,
                 use_simulation=True,
                 use_controller=True,
                 use_optimization=True,
                 optimizer='cubic',
                 which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Trc2[:3, -1] += np.array([-0.001, 0.00, 0.00])
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera
        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM1'),
            'PSM2': GraspingPose3D(dist_pps=5, dist_gps=5, which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionHandOver(use_controller=use_controller, use_optimization=use_optimization, optimizer=optimizer)
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize', 'initialize']  # [pick state, place state]

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()
        self.event_blockready = threading.Event()

        # parallelize
        self.initialize()
        self.thread1 = threading.Thread(target=self.run_pick)
        self.thread2 = threading.Thread(target=self.run_place)
        self.thread1.start()
        self.thread2.start()
        # self.run_place()

    def initialize(self):
        self.dvrk_motion.move_origin()
        # self.dvrk_motion.move_random(which_arm='PSM1')    # random move when NN model uses history
        self.update_images()
        self.block.find_pegs(img_color=self.color, img_point=self.point)
        self.pnt_handover = (self.block.pnt_pegs[3] + self.block.pnt_pegs[6] +
                             self.block.pnt_pegs[9]) / 3 + np.array(
                                 [0.0, -10.0, 0.0])
        self.state = ['plan_action', 'plan_action']

    def transform_task2robot(self,
                             point,
                             delta=False,
                             inverse=False,
                             which_arm='PSM1'):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.Tpc)
            if which_arm == 'PSM1' or which_arm == 0:
                Trp = self.Trc1.dot(Tcp)
            elif which_arm == 'PSM2' or which_arm == 1:
                Trp = self.Trc2.dot(Tcp)
            else:
                raise ValueError
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point) + trp
            else:
                transformed = Rrp.dot(point)
        else:
            if which_arm == 'PSM1':
                Tcr = np.linalg.inv(self.Trc1)
            elif which_arm == 'PSM2':
                Tcr = np.linalg.inv(self.Trc2)
            else:
                raise ValueError
            Tpr = self.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point) + tpr
            else:
                transformed = Rpr.dot(point)
        return transformed

    def place_servoing(self, nb_place_corr):
        # get current position
        self.update_images()
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(
            self.color, self.point, self.block.pnt_pegs[nb_place_corr])
        if len(pnt_blk) < 400:
            delta = [0.0, 0.0, 0.0]
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk], pnt_masks=[pnt_mask], pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            p_peg = self.block.pnt_pegs[nb_place_corr] + np.array(
                [0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3, -1]  # w.r.t task coordinate
            delta = (p_peg - p_fid) * 0.001  # unit to (m)
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 5:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
            else:
                self.color = np.load(
                    'record/peg_transfer_kit_capture/img_color_inclined_.npy')
                self.point = np.load(
                    'record/peg_transfer_kit_capture/img_point_inclined_.npy')
                # self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                # self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def run_pick(self):
        while True:
            if self.state[0] == 'plan_action':
                if self.action_order == len(self.action_list):
                    print("Pick Task Completed!")
                    exit()
                elif self.action_order == 6:
                    self.event_blockready.set()
                    self.event_blockready.clear()
                    self.event_waiting.clear()
                    self.event_waiting.wait()
                print('')
                print("Action order: ", self.action_order, "started")
                self.state[0] = 'find_block'

            elif self.state[0] == 'find_block':
                # find block
                self.update_images()
                nb_pick = self.action_list[self.action_order][1]
                arm_pick = self.action_list[self.action_order][0]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick,
                    img_color=self.color,
                    img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_pick].find_grasping_pose(
                        pose_blk=pose_blk_pick,
                        peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp[arm_pick].find_placing_pose(
                        peg_point=self.pnt_handover)

                    # # visualize
                    # pnt_grasping = np.array(self.gp[arm_pick].pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])

                    self.state[0] = 'move_block'
                else:  # no block detected
                    print("No block detected for pick-up, Skip this order.")
                    self.action_order += 1
                    self.state[0] = 'plan_action'

            elif self.state[0] == 'move_block':
                # print(' ')
                # print("action order ", self.action_order)
                # print("action pair #", self.action_list[self.action_order[0]])
                # print(' ')
                arm_pick = self.action_list[self.action_order][0]
                gp_pick = self.gp[
                    arm_pick].pose_grasping  # [n, ang, x,y,z, seen]
                gp_place = self.gp[
                    arm_pick].pose_placing  # [n, ang, x,y,z, seen]
                gp_place = np.array(gp_place)
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:] * 0.001, which_arm=arm_pick)  # [x,y,z]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:] * 0.001, which_arm=arm_pick)  # [x,y,z]

                # pick up block and move to center
                self.dvrk_motion.go_pick(pos_pick=gp_pick_robot,
                                         rot_pick=gp_pick[0],
                                         which_arm=arm_pick)
                self.dvrk_motion.go_handover(pos_pick=gp_pick_robot,
                                             rot_pick=gp_pick[0],
                                             pos_place=gp_place_robot,
                                             rot_place=gp_place[0],
                                             which_arm=arm_pick)
                self.event_blockready.set()
                self.event_handover.wait(
                )  # until block is grasped by other gripper
                self.event_handover.clear()

                # escape from hand-over
                self.dvrk_motion.move_jaw(jaw='open', which_arm=arm_pick)
                self.dvrk_motion.move_upright(pos=[
                    gp_place_robot[0], gp_place_robot[1],
                    self.dvrk_motion.height_handover +
                    self.dvrk_motion.offset_handover
                ],
                                              rot=gp_place[0],
                                              jaw='open',
                                              which_arm=arm_pick)
                self.event_handover.set()
                self.action_order += 1
                self.state[0] = 'plan_action'

    def run_place(self):
        while True:
            if self.state[1] == 'plan_action':
                if self.action_order == len(self.action_list):
                    self.dvrk_motion.move_origin()
                    print("Place Task Completed!")
                    exit()
                elif self.action_order == 6:
                    self.dvrk_motion.move_origin()
                    self.event_waiting.set()
                self.state[1] = 'find_block'

            elif self.state[1] == 'find_block':
                arm_place = self.action_list[self.action_order][2]
                self.dvrk_motion.handover_ready(which_arm=arm_place)
                self.event_blockready.wait(
                )  # until block is ready to hand-over
                self.event_blockready.clear()
                if self.use_simulation:
                    self.color = np.load(
                        'record/peg_transfer_kit_capture/img_color_inclined_ho.npy'
                    )
                    self.point = np.load(
                        'record/peg_transfer_kit_capture/img_point_inclined_ho.npy'
                    )
                else:
                    self.update_images()

                nb_place = self.action_list[self.action_order][3]
                arm_place = self.action_list[self.action_order][2]
                nb_place_corr = nb_place
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block_servo_handover(
                    img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != []:
                    # find grasping & placing pose
                    self.gp[arm_place].find_grasping_pose_handover(
                        pose_blk=pose_blk_pick, which_arm=arm_place)
                    self.gp[arm_place].find_placing_pose(
                        peg_point=self.block.pnt_pegs[nb_place])

                    # visualize
                    # pnt_grasping = np.array(self.gp[arm_place].pose_grasping)[1:]
                    # np.save('pnt_blk_pick', pnt_blk_pick)
                    # np.save('pnt_mask_pick', pnt_mask_pick)
                    # np.save('block_pnt_pegs', self.block.pnt_pegs)
                    # np.save('pnt_grasping', pnt_grasping)
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])
                    self.state[1] = 'move_block'
                else:
                    print("No block detected for hand-over, Skip this order.")
                    self.event_handover.set()
                    self.state[1] = 'plan_action'

            elif self.state[1] == 'move_block':
                # [n, ang, x,y,z, seen]
                # nb_place = self.action_list[self.action_order][3]
                arm_place = self.action_list[self.action_order][2]
                gp_pick = self.gp[arm_place].pose_grasping
                gp_place = self.gp[arm_place].pose_placing
                gp_place = np.array(gp_place)
                gp_pick_robot = self.transform_task2robot(
                    gp_pick[1:] * 0.001, which_arm=arm_place)  # [x,y,z]
                gp_place_robot = self.transform_task2robot(
                    gp_place[1:] * 0.001, which_arm=arm_place)  # [x,y,z]

                # pick (hand-over)
                self.dvrk_motion.pick_block(pos=gp_pick_robot,
                                            rot=gp_pick[0],
                                            which_arm=arm_place)
                self.event_handover.set()
                self.event_handover.clear()
                self.event_handover.wait()  # until other gripper escape
                self.event_handover.clear()

                # place
                self.dvrk_motion.go_place(pos_place=gp_place_robot,
                                          rot_place=gp_place[0],
                                          which_arm=arm_place)

                # visual correction
                delta, seen = self.place_servoing(nb_place_corr)
                delta_rob = self.transform_task2robot(delta, delta=True)
                delta_rob[2] = 0.0  # temporary
                print(delta_rob)
                self.dvrk_motion.servoing_block(pos_place=gp_place_robot +
                                                delta_rob,
                                                rot_place=gp_place[0],
                                                which_arm=arm_place)
                print('')
                print("Action completed")
                self.state[1] = 'plan_action'
Example #5
0
class FLSPegTransfer:
    def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined', which_arm='PSM1'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_'+which_arm+'.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # self.Trc_stereo = np.load(root + 'calibration_files/Trc_' + 'stereo' + '_'+which_arm+'.npy')  # robot to camera
        # self.Trc_stereo[:3, -1] += np.array([0.00, 0.003, 0.00])
        # self.Tc1c2 = np.linalg.inv(self.Trc).dot(self.Trc_stereo)    # cam1 = inclined, cam2 = stereo

        # import modules
        if self.use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D(dist_gps=5, dist_pps=5, which_arm=which_arm)
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionSingleArm(use_controller=use_controller, use_optimization=use_optimization,
                                             optimizer=optimizer, which_arm=which_arm)
        self.zivid_utils = ZividUtils(which_camera='inclined')

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()

    def transform_task2robot(self, point, delta=False, inverse=False):
        point = np.array(point)
        if inverse == False:
            Tcp = np.linalg.inv(self.block.Tpc)
            Trp = self.Trc.dot(Tcp)
            Rrp = Trp[:3, :3]
            trp = Trp[:3, -1]
            if delta == False:
                transformed = Rrp.dot(point) + trp
            else:
                transformed = Rrp.dot(point)
        else:
            Tcr = np.linalg.inv(self.Trc)
            Tpr = self.block.Tpc.dot(Tcr)
            Rpr = Tpr[:3, :3]
            tpr = Tpr[:3, -1]
            if delta == False:
                transformed = Rpr.dot(point) + tpr
            else:
                transformed = Rpr.dot(point)
        return transformed

    @classmethod
    def transform(cls, points, T):
        R = T[:3, :3]
        t = T[:3, -1]
        return R.dot(points.T).T + t.T

    def move_blocks(self):
        # [n, ang, x,y,z, seen]
        gp_pick = self.gp.pose_grasping
        gp_place = np.array(self.gp.pose_placing)

        # pick-up and place motion
        gp_pick_robot = self.transform_task2robot(gp_pick[1:]*0.001)  # [x,y,z]
        gp_place_robot = self.transform_task2robot(gp_place[1:]*0.001)

        if self.action_order == 0 or self.action_order == 6:
            self.dvrk_motion.go_pick(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        else:
            self.dvrk_motion.return_to_peg(pos_pick=gp_pick_robot, rot_pick=gp_pick[0])
        self.dvrk_motion.transfer_block(pos_pick=gp_pick_robot, rot_pick=gp_pick[0],
                                        pos_place=gp_place_robot, rot_place=gp_place[0])

        # visual correction
        delta, seen = self.place_servoing()
        delta_rob = self.transform_task2robot(delta, delta=True)
        delta_rob[2] = 0.0  # temporary
        self.dvrk_motion.servoing_block(pos_place=gp_place_robot+delta_rob, rot_place=gp_place[0])

    def place_servoing(self):
        # get current position
        self.update_images()
        nb_place = self.action_list[self.action_order][1]
        pose_blk, pnt_blk, pnt_mask = self.block.find_block_servo(self.color, self.point, self.block.pnt_pegs[nb_place])
        if len(pnt_blk) < 400:
            delta = [0.0, 0.0, 0.0]
            seen = False
        else:
            # self.vd.plot3d(pnt_blocks=[pnt_blk], pnt_masks=[pnt_mask], pnt_pegs=self.block.pnt_pegs)
            _, T = pose_blk
            p_peg = self.block.pnt_pegs[nb_place] + np.array([0.0, 0.0, -5])  # sub-mm above peg
            p_fid = [0, 0, 15]
            p_fid = T[:3, :3].dot(p_fid) + T[:3,-1]     # w.r.t task coordinate
            delta = (p_peg - p_fid)*0.001   # unit to (m)
            seen = True
        return delta, seen

    def update_images(self):
        if self.use_simulation:
            if self.action_order <= 5:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined.npy')
            else:
                self.color = np.load('record/peg_transfer_kit_capture/img_color_inclined_rhp.npy')
                self.point = np.load('record/peg_transfer_kit_capture/img_point_inclined_rhp.npy')
        else:
            self.color, _, self.point = self.zivid.capture_3Dimage(color='BGR')

    def main(self):
        while True:
            if self.state[0] == 'initialize':
                print('')
                print('* State:', self.state[0])
                # define ROI
                self.dvrk_motion.move_origin()

                # random move when NN model uses history
                # self.dvrk_motion.move_random()
                self.update_images()

                # find pegs
                self.block.find_pegs(img_color=self.color, img_point=self.point)
                self.state.insert(0, 'update_image')
                print ("started")
                time.sleep(3)
                st = time.time()

            elif self.state[0] == 'update_image':
                print('')
                print('* State:', self.state[0])
                self.update_images()
                print ('image updated.')
                self.state.insert(0, 'plan_action')

            elif self.state[0] == 'plan_action':
                print('')
                print('* State:', self.state[0])
                if self.action_order == len(self.action_list):
                    self.state.insert(0, 'exit')
                else:
                    self.state.insert(0, 'find_block')
                    print("action pair #", self.action_list[self.action_order])

            elif self.state[0] == 'find_block':
                print('')
                print('* State:', self.state[0])
                nb_pick = self.action_list[self.action_order][0]
                nb_place = self.action_list[self.action_order][1]
                pose_blk_pick, pnt_blk_pick, pnt_mask_pick = self.block.find_block(
                    block_number=nb_pick, img_color=self.color, img_point=self.point)
                pose_blk_place, pnt_blk_place, pnt_mask_place = self.block.find_block(
                    block_number=nb_place, img_color=self.color, img_point=self.point)

                # check if there is block to move
                if pose_blk_pick != [] and pose_blk_place == []:
                    print('A block to move was detected.')
                    # find grasping & placing pose
                    self.gp.find_grasping_pose(pose_blk=pose_blk_pick, peg_point=self.block.pnt_pegs[nb_pick])
                    self.gp.find_placing_pose(peg_point=self.block.pnt_pegs[nb_place])
                    # visualize
                    # pnt_grasping = np.array(self.gp.pose_grasping)[1:]
                    # self.vd.plot3d([pnt_blk_pick], [pnt_mask_pick], self.block.pnt_pegs, [pnt_grasping])
                    # pnt_placing = np.array(self.gp.pose_placing)[1:]
                    # self.vd.plot3d([pnt_blk_place], [pnt_mask_pick], self.block.pnt_pegs, [pnt_placing])

                    # visualize all blocks and all grasping points
                    # self.block.find_block_all(img_color=self.color, img_point=self.point)
                    # self.gp.find_grasping_pose_all(pose_blks=self.block.pose_blks, peg_points=self.block.pnt_pegs)
                    # pnt_graspings = np.array(self.gp.pose_grasping)[:6, 2:5]
                    # self.vd.plot3d(self.block.pnt_blks, self.block.pnt_masks, self.block.pnt_pegs, pnt_graspings)
                    self.state.insert(0, 'move_block')
                else:
                    print('No block to move was detected. Skip this order.')
                    self.action_order += 1
                    self.state.insert(0, 'plan_action')

            elif self.state[0] == 'move_block':
                print('')
                print('* State:', self.state[0])
                self.move_blocks()
                if self.action_order == 5:
                    self.dvrk_motion.move_origin()
                self.action_order += 1
                self.state.insert(0, 'update_image')

            elif self.state[0] == 'exit':
                print("task completed.")
                self.dvrk_motion.move_origin()
                t_comp = time.time() - st
                print (t_comp)
                np.save('/home/hwangmh/t_comp', t_comp)

                print(self.dvrk_motion.time_motion)
                print (np.sum(self.dvrk_motion.time_motion))
                print(self.dvrk_motion.time_computing)
                print (np.sum(self.dvrk_motion.time_computing))
                import pdb; pdb.set_trace()
                exit()