def compute_lift(self, q0, q1, H=25):
        dim = 6
        robot = dvrkKinematics()
        qp = self._new_qp(H, a_prev=np.zeros(6))
        qp.constrain_q(0, q0)
        qp.constrain_v(0, np.zeros(6))
        qp.constrain_q(H, q1)
        x = qp.solve()

        p0 = robot.fk(q0)[:3,3]
        p1 = robot.fk(q1)[:3,3]

        for h in range(H-1, 3, -1):
            #print(f"solve h={h}, lift")
            # for iter_no in range(3):
            qp = self._new_qp(h, a_prev=np.zeros(6))
            qp.constrain_q(0, q0)
            qp.constrain_v(0, np.zeros(6))
            qp.constrain_q(h, q1)
            for t in range(1,h):
                q_prev = x[qp._qidx(t):qp._qidx(t+1)]
                p_prev = (p0*(h-t) + p1*t)/h
                qp.constrain_pos(t, q_prev, p_prev, tolerance=1e-3/4, penalty=1e+3)

            x_prev = None # interpolate(x, H, self.t_step, h, self.t_step, n_slack=(h-1)*4)
            x_next = qp.solve(x_prev) #x if iter_no > 0 else None)
            if x_next is None:
                break
            x,H = x_next,h
        
        return x,H
Exemple #2
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_PSM1.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = GraspingPose3D()
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionSingleArmOptimized()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([[1, 7], [0, 6], [3, 8], [2, 9], [5, 11], [4, 10],  # left to right
                                     [7, 1], [6, 0], [8, 3], [9, 2], [11, 5], [10, 4]])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
    def compute_drop(self, q2, q3, H=25):
        robot = dvrkKinematics()
        qp = self._new_qp(H, a_next=np.zeros(6))
        qp.constrain_q(0, q2)
        qp.constrain_q(H, q3)
        qp.constrain_v(H, np.zeros(6))
        x = qp.solve()

        p2 = robot.fk(q2)[:3,3]
        p3 = robot.fk(q3)[:3,3]

        for h in range(H-1, 3, -1):
            #print(f"solve h={h}, drop")
            qp = self._new_qp(h, a_next=np.zeros(6))
            qp.constrain_q(0, q2)
            qp.constrain_q(h, q3)
            qp.constrain_v(h, np.zeros(6))
            for t in range(1,h):
                q_prev = x[qp._qidx(t):qp._qidx(t+1)]
                p_prev = (p2*(h-t) + p3*t)/h
                qp.constrain_pos(t, q_prev, p_prev, tolerance=1e-3/4, penalty=1e+3)

            x_next = qp.solve() # x if iter_no > 0 else None)
            if x_next is None:
                break
            x,H = x_next,h
        
        return x, H
Exemple #4
0
 def __init__(self):
     super(dvrkMotionBridgeC, self).__init__()
     root = "/home/hwangmh/pycharmprojects/FLSpegtransfer/"
     dir = "calibration_files/model_grey_history_peg_sampled/"
     # self.NNController = dvrkNNController(root+dir+"model_hysteresis.out", nb_ensemble=10)
     self.NNController = dvrkNNController(root + dir + "model_grey_history_peg_sampled.out", nb_ensemble=10)
     self.dvrk_model = dvrkKinematics()
    def __init__(self, H, t_step, max_vel, max_acc, max_jerk=None, a_prev=None, a_next=None, minimize="a"):
        assert minimize == "v" or minimize == "a" or minimize == "j"
        self.robot = dvrkKinematics()
        self.max_vel = max_vel # [1.0, 1.0, 1.0, 8.0, 8.0, 8.0]
        self.max_acc = max_acc # [1.0, 1.0, 1.0, 8.0, 8.0, 8.0]
        self.minimize = minimize
        self.max_jerk = max_jerk
        self.t_step = t_step
        self.H = H
        self.dim = 6

        # H+1 q, H+1 v, H a
        self._n_vars = (H+1) * self.dim * 3 - self.dim
        self._v0 = (H+1) * self.dim
        self._a0 = (H+1) * self.dim * 2
        self.init(a_prev, a_next)
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionDualArm()
        self.dvrk_model = dvrkKinematics()

        # action ordering
        self.action_list = np.array([
            [[0, 1], [7, 8]],
            [[2, 3], [6, 11]],
            [[4, 5], [9, 10]],  # left to right
            [[7, 8], [0, 1]],
            [[6, 11], [2, 3]],
            [[9, 10], [4, 5]]
        ])  # right to left
        self.action_order = 0

        # data members
        self.color = []
        self.point = []
        self.state = ['initialize']
        self.main()
Exemple #7
0
    def __init__(self, comp_hysteresis=False, stop_collision=False):
        super(dvrkController, self).__init__()

        # Members
        self.stop_collision = stop_collision
        self.comp_hysteresis = comp_hysteresis
        self.curr_threshold_low = np.array([[0.1, 0.1, 0.18], [0.1, 0.1, 0.18]])
        self.curr_threshold_high = self.curr_threshold_low*1.5
        self._curr_threshold = self.curr_threshold_low
        self.filter = LPF(fc=3, fs=100, order=2, nb_axis=3)

        # Load models
        if stop_collision:
            self.force_est = dvrkCurrEstDNN(history=50, nb_ensemble=10, nb_axis=3)  # torque prediction model
            for i in range(100):     # model start-up by padding dummy histories
                self.detect_collision()
        if comp_hysteresis:
            self.hyst_comp = dvrkHystCompDNN(history=6)  # hysteresis compensation model
        self.dvrk_model = dvrkKinematics()
    def __init__(self, model='None'):

        # load model
        self.model = model
        if self.model=='neural net':
            from FLSpegtransfer.motion.NNModel import NNModel
            self.NN = NNModel(ensemble=3, horizon=4, iter=3, alpha=0.5, model='forward')
        elif self.model=='linear':
            from FLSpegtransfer.motion.LinearModel import LinearModel
            self.LM = LinearModel(4)

        # define instances
        self.dvrk = dvrkMotionBridgeP()
        self.dvrk_model = dvrkKinematics()

        # limit joints
        self.joint_limits_min = np.deg2rad([-80, -60, 0.08, -100, -90, -100])
        self.joint_limits_min[2] = 0.08
        self.joint_limits_max = np.deg2rad([80, 60, 0.245, 100, 90, 100])
        self.joint_limits_max[2] = 0.245
Exemple #9
0
    def __init__(self, use_controller):
        # motion library
        self.dvrk = dvrkController(arm_name='/PSM1',
                                   comp_hysteresis=True,
                                   stop_collision=False)
        self.use_controller = True
        # self.dvrk = dvrkArm('/PSM1')
        self.dvrk_model = dvrkKinematics()
        self.motion_opt_2wp = CubicOptimizer_2wp()
        self.motion_opt_1wp = CubicOptimizer_1wp()

        # Motion variables
        self.pos_org1 = [0.060, 0.0, -0.095]
        self.rot_org1 = [0.0, 0.0, 0.0, 1.0]
        self.jaw_org1 = [0.0]
        self.pos_org2 = [-0.060, 0.0, -0.095]
        self.rot_org2 = [0.0, 0.0, 0.0, 1.0]
        self.jaw_org2 = [0.0]

        self.offset_grasp = [-0.003, +0.003]
        self.height_ready = -0.125
        self.height_drop = -0.14
        self.jaw_opening = [np.deg2rad(40)]
        self.jaw_closing = [np.deg2rad(0)]
        const[i * H + H - 2] = 1.0
        const[i * H + H - 1] = -1.0
        A_eq = np.insert(A_eq, len(A_eq), const, axis=0)
        b_eq = np.insert(b_eq, len(b_eq), 0.0)
    return Q, c, A_eq, b_eq


def solve_QP(Q, c, A_eq, b_eq):
    A = np.block([[Q, A_eq.T], [A_eq, np.eye(np.shape(A_eq)[0])]])
    b = np.concatenate((-c, b_eq))
    result = np.linalg.lstsq(A, b)
    return result[0][:np.shape(Q)[0]]


from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics
dvrk_model = dvrkKinematics()

# pose information of pick & place
pos1 = np.array(
    [0.14635528297124054, -0.02539699498070678, -0.15560356171404516])
rot1 = np.array([0.0, 0.0, 0.2985186426947612, 0.9544038034101067])
joint1 = dvrk_model.pose_to_joint(pos1, rot1)

pos2 = np.array([0.14635528297124054, -0.02539699498070678, -0.12])
rot2 = np.array([0.0, 0.0, 0.2985186426947612, 0.9544038034101067])
joint2 = dvrk_model.pose_to_joint(pos2, rot2)

pos3 = np.array([0.11511456574520817, -0.03390017669363312, -0.12])
rot3 = np.array([0.0, 0.0, 0.0, 1.0])
joint3 = dvrk_model.pose_to_joint(pos3, rot3)
Exemple #11
0
    def __init__(self, use_simulation=True, which_camera='inclined'):
        self.use_simulation = use_simulation

        # load transform
        self.Trc1 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM1.npy')  # robot to camera
        self.Trc2 = np.load(root + 'calibration_files/Trc_' + which_camera +
                            '_PSM2.npy')  # robot to camera
        self.Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera +
                           '.npy')  # pegboard to camera

        # import modules
        if use_simulation:
            pass
        else:
            self.zivid = ZividCapture(which_camera=which_camera)
            self.zivid.start()
        self.block = BlockDetection3D(self.Tpc)
        # self.gp = [GraspingPose3D(which_arm='PSM1'), GraspingPose3D(which_arm='PSM2')]
        self.gp = {
            'PSM1': GraspingPose3D(which_arm='PSM1'),
            'PSM2': GraspingPose3D(which_arm='PSM2')
        }
        self.vd = VisualizeDetection()
        self.pegboard = PegboardCalibration()
        self.dvrk_motion = dvrkPegTransferMotionHandOver()
        self.dvrk_model = dvrkKinematics()

        # action ordering (pick, place) pair
        self.action_list = [
            ['PSM2', 1, 'PSM1', 7],  # left to right
            ['PSM2', 0, 'PSM1', 6],
            ['PSM2', 3, 'PSM1', 8],
            ['PSM2', 2, 'PSM1', 9],
            ['PSM2', 5, 'PSM1', 11],
            ['PSM2', 4, 'PSM1', 10],
            ['PSM1', 7, 'PSM2', 1],  # right to left
            ['PSM1', 6, 'PSM2', 0],
            ['PSM1', 8, 'PSM2', 3],
            ['PSM1', 9, 'PSM2', 2],
            ['PSM1', 11, 'PSM2', 5],
            ['PSM1', 10, 'PSM2', 4]
        ]
        self.action_order = [-1, -1]  # [pick action, place action]

        # data members
        self.color = []
        self.point = []
        self.state = []  # [pick state, place state]
        self.pnt_handover = []

        # event
        self.event_handover = threading.Event()
        self.event_waiting = threading.Event()

        # parallelize
        self.initialize()

        # self.thread1 = threading.Thread(target=self.PSM1)
        self.thread2 = threading.Thread(target=self.run_pick)
        # self.thread1.start()
        self.thread2.start()
        self.run_place()
Exemple #12
0
 def __init__(self):
     self.arm1 = dvrkArm('/PSM1')
     self.arm2 = dvrkArm('/PSM2')
     self.dvrk_kin = dvrkKinematics()