Exemple #1
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D()
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionSingleArmOptimized()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionDualArm()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([
            [[0, 1], [7, 8]],
            [[2, 3], [6, 11]],
            [[4, 5], [9, 10]],  # left to right
            [[7, 8], [0, 1]],
            [[6, 11], [2, 3]],
            [[9, 10], [4, 5]]
        ])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
Exemple #3
0
    def __init__(self, use_simulation=True, use_controller=True, use_optimization=True, optimizer='cubic', which_camera='inclined', which_arm='PSM1'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_'+which_arm+'.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # self.Trc_stereo = np.load(root + 'calibration_files/Trc_' + 'stereo' + '_'+which_arm+'.npy')  # robot to camera
        # self.Trc_stereo[:3, -1] += np.array([0.00, 0.003, 0.00])
        # self.Tc1c2 = np.linalg.inv(self.Trc).dot(self.Trc_stereo)    # cam1 = inclined, cam2 = stereo

        # import modules
        if self.use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D(dist_gps=5, dist_pps=5, which_arm=which_arm)
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion\
            = dvrkPegTransferMotionSingleArm(use_controller=use_controller, use_optimization=use_optimization,
                                             optimizer=optimizer, which_arm=which_arm)
        self.zivid_utils = ZividUtils(which_camera='inclined')

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
Exemple #4
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        # self.gp = [GraspingPose3D(which_arm='PSM1'), GraspingPose3D(which_arm='PSM2')]
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionHandOver()
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = [-1, -1]  # [pick action, place action]

        # data members
        self.color = []
        self.point = []
        self.state = []  # [pick state, place state]
        self.pnt_handover = []

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()

        # parallelize
        self.initialize()

        # self.thread1 = threading.Thread(target=self.PSM1)
        self.thread2 = threading.Thread(target=self.run_pick)
        # self.thread1.start()
        self.thread2.start()
        self.run_place()
Exemple #5
0
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BlockDetection2D import BlockDetection2D
from FLSpegtransfer.vision.VisualizeDetection import VisualizeDetection
from FLSpegtransfer.vision.GraspingPose import GraspingPose

bd = BlockDetection2D()
zivid = ZividCapture()
vd = VisualizeDetection(bd)
gp = GraspingPose(bd)

img_color = []
img_depth = []
while True:
    image = np.load('../../record/image.npy')
    depth = np.load('../../record/depth.npy')
    point = np.load('../../record/point.npy')
    img_color, img_depth, img_point = zivid.img_crop(image, depth, point)
    # zivid.capture_3Dimage()
    # img_color, img_depth, img_point = BD.img_crop(zivid.color, zivid.depth, zivid.point)
    img_color = cv2.cvtColor(img_color, cv2.COLOR_RGB2BGR)
    if img_color == [] or img_depth == [] or img_point == []:
        pass
    else:
        # find block
        pose_blks, img_blks, img_pegs, peg_pnts = bd.find_blocks(
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BlockDetection3D import BlockDetection3D
from FLSpegtransfer.vision.PegboardCalibration import PegboardCalibration
from FLSpegtransfer.vision.VisualizeDetection import VisualizeDetection
from FLSpegtransfer.vision.GraspingPose3D import GraspingPose3D

zivid = ZividCapture()
zivid.start()
peg = PegboardCalibration()
bd = BlockDetection3D()
vd = VisualizeDetection()
gp = GraspingPose3D()

# define region of interest
color = np.load('../../record/color_new.npy')
depth = np.load('../../record/depth_new.npy')
point = np.load('../../record/point_new.npy')
# color = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
color, depth, point = zivid.capture_3Dimage(color='BGR')
ycr, hcr, xcr, wcr = peg.define_boundary(color)
dx = 200
dy = 200
zivid.ycr = ycr - dy
zivid.hcr = hcr + 2 * dy
zivid.xcr = xcr - dx
zivid.wcr = wcr + 2 * dx
    else:
        raise ValueError
    ref_waypoints = np.concatenate((q0, qw1, qw2, qf), axis=0).reshape(4, 6)
    return q_pos, ref_waypoints, cal_time


# load transform
which_camera = 'inclined'
Trc = np.load(root + 'calibration_files/Trc_' + which_camera +
              '_PSM1.npy')  # robot to camera
Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
              '.npy')  # pegboard to camera

block = BlockDetection3D(Tpc)
gp = GraspingPose3D(dist_gps=5.5, dist_pps=5.5)
vd = VisualizeDetection()
pegboard = PegboardCalibration()
dvrk_model = dvrkKinematics()
motion_opt_2wp = PegMotionOptimizer_2wp()
motion_opt_cubic = CubicOptimizer_2wp()

# action ordering
action_list = np.array([
    [1, 7],
    [0, 6],
    [3, 8],
    [2, 9],
    [5, 11],
    [4, 10],  # left to right
    [7, 1],
    [6, 0],