def __init__(self, use_simulation=True, which_camera='inclined'): self.use_simulation = use_simulation # load transform self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy') # robot to camera self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy') # pegboard to camera # import modules if use_simulation: pass else: self.zivid = ZividCapture(which_camera=which_camera) self.zivid.start() self.block = BlockDetection3D(self.Tpc) self.gp = GraspingPose3D() self.vd = VisualizeDetection() self.pegboard = PegboardCalibration() self.dvrk_motion = dvrkPegTransferMotionSingleArmOptimized() self.dvrk_model = dvrkKinematics() # action ordering self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10], # left to right [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]]) # right to left self.action_order = 0 # data members self.color = [] self.point = [] self.state = ['initialize'] self.main()
def registration_pegboard(self, img_color, img_point): # color masking img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV) img_color_masked = cv2.inRange(img_hsv, self.lower_red, self.upper_red) # get points inside contour cnts, _ = cv2.findContours(img_color_masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key=cv2.contourArea, reverse=True) peg_board = np.zeros_like(img_color) cv2.drawContours(peg_board, cnts, 0, (255, 255, 255), -1) pnt_board = img_point[np.all(peg_board == [255, 255, 255], axis=-1)] pnt_board = BlockDetection3D.remove_nan(pnt_board) * 0.001 # (m) pcl_board = o3d.geometry.PointCloud() pcl_board.points = o3d.utility.Vector3dVector(pnt_board) # load target (pegboard model) pcl_model = o3d.io.read_point_cloud(root + 'img/peg_board.pcd') pnt_model = PCLRegistration.convert(pcl_model)[1] * 0.001 # to (m) pcl_model = PCLRegistration.convert(pnt_model)[0] print(pnt_board) print(pnt_model) # registration Tpc = PCLRegistration.registration(source=pcl_board, target=pcl_model) PCLRegistration.display_registration(pcl_board, pcl_model, Tpc) print('Tpc= ', Tpc) np.save('Tpc.npy', Tpc) print('Tpc saved')
def __init__(self, use_simulation=True, which_camera='inclined'): self.use_simulation = use_simulation # load transform self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy') # robot to camera self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM2.npy') # robot to camera self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy') # pegboard to camera # import modules if use_simulation: pass else: self.zivid = ZividCapture(which_camera=which_camera) self.zivid.start() self.block = BlockDetection3D(self.Tpc) self.gp = { 'PSM1': GraspingPose3D(which_arm='PSM1'), 'PSM2': GraspingPose3D(which_arm='PSM2') } self.vd = VisualizeDetection() self.pegboard = PegboardCalibration() self.dvrk_motion = dvrkPegTransferMotionDualArm() self.dvrk_model = dvrkKinematics() # action ordering self.action_list = np.array([ [[0, 1], [7, 8]], [[2, 3], [6, 11]], [[4, 5], [9, 10]], # left to right [[7, 8], [0, 1]], [[6, 11], [2, 3]], [[9, 10], [4, 5]] ]) # right to left self.action_order = 0 # data members self.color = [] self.point = [] self.state = ['initialize'] self.main()
def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined', which_arm='PSM1'): self.use_simulation = use_simulation # load transform self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_'+which_arm+'.npy') # robot to camera self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy') # pegboard to camera # self.Trc_stereo = np.load(root + 'calibration_files/Trc_' + 'stereo' + '_'+which_arm+'.npy') # robot to camera # self.Trc_stereo[:3, -1] += np.array([0.00, 0.003, 0.00]) # self.Tc1c2 = np.linalg.inv(self.Trc).dot(self.Trc_stereo) # cam1 = inclined, cam2 = stereo # import modules if self.use_simulation: pass else: self.zivid = ZividCapture(which_camera=which_camera) self.zivid.start() self.block = BlockDetection3D(self.Tpc) self.gp = GraspingPose3D(dist_gps=5, dist_pps=5, which_arm=which_arm) self.vd = VisualizeDetection() self.pegboard = PegboardCalibration() self.dvrk_motion\ = dvrkPegTransferMotionSingleArm(use_controller=use_controller, use_optimization=use_optimization, optimizer=optimizer, which_arm=which_arm) self.zivid_utils = ZividUtils(which_camera='inclined') # action ordering self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10], # left to right [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]]) # right to left self.action_order = 0 # data members self.color = [] self.point = [] self.state = ['initialize'] self.main()
def __init__(self, use_simulation=True, which_camera='inclined'): self.use_simulation = use_simulation # load transform self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy') # robot to camera self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM2.npy') # robot to camera self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy') # pegboard to camera # import modules if use_simulation: pass else: self.zivid = ZividCapture(which_camera=which_camera) self.zivid.start() self.block = BlockDetection3D(self.Tpc) # self.gp = [GraspingPose3D(which_arm='PSM1'), GraspingPose3D(which_arm='PSM2')] self.gp = { 'PSM1': GraspingPose3D(which_arm='PSM1'), 'PSM2': GraspingPose3D(which_arm='PSM2') } self.vd = VisualizeDetection() self.pegboard = PegboardCalibration() self.dvrk_motion = dvrkPegTransferMotionHandOver() self.dvrk_model = dvrkKinematics() # action ordering (pick, place) pair self.action_list = [ ['PSM2', 1, 'PSM1', 7], # left to right ['PSM2', 0, 'PSM1', 6], ['PSM2', 3, 'PSM1', 8], ['PSM2', 2, 'PSM1', 9], ['PSM2', 5, 'PSM1', 11], ['PSM2', 4, 'PSM1', 10], ['PSM1', 7, 'PSM2', 1], # right to left ['PSM1', 6, 'PSM2', 0], ['PSM1', 8, 'PSM2', 3], ['PSM1', 9, 'PSM2', 2], ['PSM1', 11, 'PSM2', 5], ['PSM1', 10, 'PSM2', 4] ] self.action_order = [-1, -1] # [pick action, place action] # data members self.color = [] self.point = [] self.state = [] # [pick state, place state] self.pnt_handover = [] # event self.event_handover = threading.Event() self.event_waiting = threading.Event() # parallelize self.initialize() # self.thread1 = threading.Thread(target=self.PSM1) self.thread2 = threading.Thread(target=self.run_pick) # self.thread1.start() self.thread2.start() self.run_place()
import sys for p in sys.path: if p == '/opt/ros/kinetic/lib/python2.7/dist-packages': sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') import cv2 import numpy as np from FLSpegtransfer.vision.ZividCapture import ZividCapture from FLSpegtransfer.vision.BlockDetection3D import BlockDetection3D from FLSpegtransfer.vision.PegboardCalibration import PegboardCalibration from FLSpegtransfer.vision.VisualizeDetection import VisualizeDetection from FLSpegtransfer.vision.GraspingPose3D import GraspingPose3D zivid = ZividCapture() zivid.start() peg = PegboardCalibration() bd = BlockDetection3D() vd = VisualizeDetection() gp = GraspingPose3D() # define region of interest color = np.load('../../record/color_new.npy') depth = np.load('../../record/depth_new.npy') point = np.load('../../record/point_new.npy') # color = cv2.cvtColor(color, cv2.COLOR_RGB2BGR) color, depth, point = zivid.capture_3Dimage(color='BGR') ycr, hcr, xcr, wcr = peg.define_boundary(color) dx = 200 dy = 200 zivid.ycr = ycr - dy zivid.hcr = hcr + 2 * dy zivid.xcr = xcr - dx
else: Tcr = np.linalg.inv(Trc) Tpr = Tpc.dot(Tcr) Rpr = Tpr[:3, :3] tpr = Tpr[:3, -1] transformed = Rpr.dot(point) + tpr return transformed which_camera = 'inclined' which_arm = 'PSM2' dvrk = dvrkArm('/' + which_arm) Trc = np.load(root + '/calibration_files/Trc_' + which_camera + '_' + which_arm + '.npy') Tpc = np.load(root + '/calibration_files/Tpc_' + which_camera + '.npy') bd = BlockDetection3D(Tpc) # dvrk = dvrkController(comp_hysteresis=False, stop_collision=False) zivid = ZividCapture(which_camera=which_camera) zivid.start() while True: # move_origin(dvrk, which_arm) img_color, _, img_point = zivid.capture_3Dimage(color='BGR') bd.find_pegs(img_color, img_point) for i in range(12): import pdb pdb.set_trace() pr = transform_task2robot(bd.pnt_pegs[i] * 0.001, Trc=Trc, Tpc=Tpc) # move_ready(dvrk, which_arm) if which_arm == 'PSM1': # dvrk.set_arm_position(pos1=[pr[0], pr[1], pr[2]+0.005]) dvrk.set_pose_interpolate(pos=[pr[0], pr[1], pr[2] + 0.01],