예제 #1
0
def explore(arm):
    """ Use this to extract positions after we move the dvrk. """
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("pos, rot: {}, {}".format(pos, rot))
    pos[2] += 0.02
    U.move(arm, pos, rot)
    print("pos, rot: {}, {}".format(pos, rot))
예제 #2
0
def get_in_good_starting_position(arm, which='arm1'):
    """ 
    Only meant so we get a good starting position, to compensate for how
    commanding the dVRK to go to a position/rotation doesn't actually work that
    well, particularly for the rotations. Some human direct touch is needed.
    """
    assert which == 'arm1'
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("(starting method) starting position and rotation:")
    print(pos, rot)
    U.move(arm, HOME_POS_ARM1, HOME_ROT_ARM1, speed='slow')
    time.sleep(2)
    print("(starting method) position and rotation after moving:")
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print(pos, rot)
    print("(Goal was: {} and {}".format(HOME_POS_ARM1, HOME_ROT_ARM1))
    R = U.rotation_matrix_3x3_axis(angle=180, axis='z')
    print("With desired rotation matrix:\n{}".format(R))
    print("Now exiting...")
    sys.exit()
예제 #3
0
def test(arm):
    """ Use to see if we can get it to move it to roughly the right target. """
    spos, srot = starts[0]
    tpos, trot = targets[1]

    U.move(arm, spos, srot)
    print("moved: {}".format(U.pos_rot_arm(arm, nparrays=True)))
    time.sleep(2)

    spos[2] += 0.02
    U.move(arm, spos, srot)
    print("moved: {}".format(U.pos_rot_arm(arm, nparrays=True)))
    time.sleep(2)

    U.move(arm, tpos, trot)
    print("moved: {}".format(U.pos_rot_arm(arm, nparrays=True)))
    time.sleep(2)

    trot[1] = 80
    U.move(arm, tpos, trot)
    print("moved: {}".format(U.pos_rot_arm(arm, nparrays=True)))
예제 #4
0
def get_tip_needle(arm, d, idx, rot_targ):
    """ 
    Now the user has to click to get the needle tips. We click twice, one for
    the left and one for the right, and return the camera coordinates.
    """
    global CENTERS, image
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    old_len = len(CENTERS)

    # Left camera image.
    w = "{}-th rot_targ: {}, left camera. Drag boxes on needle tip, then SPACE.".format(
        idx, rot_targ)
    image = np.copy(d.left['raw'])
    cv2.namedWindow(w, cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(w, click)
    cv2.resizeWindow(w, 1800, 2600)
    cv2.imshow(w, image)
    key = cv2.waitKey(0)
    if key in U.ESC_KEYS:
        sys.exit()
    cv2.destroyAllWindows()

    # Right camera image.
    w = "{}-th rot_targ: {}, right camera. Drag boxes on needle tip, then SPACE.".format(
        idx, rot_targ)
    image = np.copy(d.right['raw'])
    cv2.namedWindow(w, cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(w, click)
    cv2.resizeWindow(w, 1800, 2600)
    cv2.imshow(w, image)
    key = cv2.waitKey(0)
    if key in U.ESC_KEYS:
        sys.exit()
    cv2.destroyAllWindows()

    assert len(CENTERS) == old_len + 2
    assert len(POINTS) % 2 == 0
    needle_tip_c = U.camera_pixels_to_camera_coords(CENTERS[-2],
                                                    CENTERS[-1],
                                                    nparrays=True)
    return needle_tip_c
예제 #5
0
def evaluate(arm, d, args, wrist_map_c2l, wrist_map_c2r, wrist_map_l2r):
    """ Stage 2. 
    
    Testing stage. At a high level, move the dVRK end-effector to various
    locations and once again explicitly record needle tips. Steps:

    1. Start with the dVRK gripping a needle. Try different orientations, to
    ensure that performance is orientation-invariant. This time, we should have
    the suturing phantom there! For now we still have the click interface to
    accurately compute psi.

    2. To get psi, images pop up from left and right cameras. Press ESC to
    terminate the program. If we want to proceed, then click and drag boxes of
    target locations for the needle tip.
    
    3. This time, we let the needle move. We do NOT explicitly compute an offset
    vector because we do not know where the dVRK's end-effector goes.
    """
    global CENTERS, image
    which = 'left' if args.arm==1 else 'right'
    pname = args.path_offsets+'/offsets_{}_arm_data.p'.format(which)
    if which == 'left':
        R = wrist_map_c2l
    else:
        R = wrist_map_c2r
    offsets = U.load_pickle_to_list(pname)
    print("loaded offsets, length: {}".format(len(offsets)))

    # Do stuff here just to grip the needle and get set up for computing phi/psi.
    arm.open_gripper(50)
    name = "Left image for evaluation stage. MOVE DVRK, then press any key"
    U.call_wait_key( cv2.imshow(name, d.left['raw']) )
    arm.close_gripper()
    pos,rot = U.pos_rot_arm(arm, nparrays=True)
    pos[2] += args.z_height
    U.move(arm, pos, rot)
    time.sleep(2)

    # The user now clicks stuff. LEFT CAMERA, then RIGHT CAMERA.
    click_stuff('left')
    click_stuff('right')
    assert len(CENTERS) == 4, "Error, len(CENTERS): {}".format(len(CENTERS))
    assert len(POINTS) == 8, "Error, len(POINTS): {}".format(len(POINTS))

    # New to this method: move the dVRK to a different location.
    print("Not moving to a new spot, for now.")

    # Now we can actually get phi/psi.
    pos_g,rot_g = U.pos_rot_arm(arm, nparrays=True)
    camera_tip  = U.camera_pixels_to_camera_coords(CENTERS[-4], CENTERS[-2], nparrays=True)
    camera_grip = U.camera_pixels_to_camera_coords(CENTERS[-3], CENTERS[-1], nparrays=True)
    ct_h        = np.concatenate( (camera_tip,np.ones(1)) )
    cg_h        = np.concatenate( (camera_grip,np.ones(1)) )
    base_t      = R.dot(ct_h)
    base_g      = R.dot(cg_h)
    camera_dist = np.linalg.norm( camera_tip-camera_grip )

    # Distance based on click interace (should be same as camera) and based
    # on dvrk, using cartesian position function (this will be different).
    # Unfortunately the dvrk one is the easiest to use in real applications.
    base_dist_click = np.linalg.norm( base_t-base_g )
    base_dist_dvrk  = np.linalg.norm( base_t-pos_g )
    assert np.abs(base_dist_click - camera_dist) < 1e-5

    # Compute phi and psi based on the STARTING configuration.
    phi_c, psi_c = compute_phi_and_psi(d=base_dist_click)
    phi_d, psi_d = compute_phi_and_psi(d=base_dist_dvrk)

    # --------------------------------------------------------------------------
    # Compute offset vector wrt base. I don't think the order of subtraction
    # matters as long as in real applications, we are consistent with usage, so
    # in application we'd do (pos_n_tip_wrt_base)-(offset). 
    #
    # NOTE: this is only the offset vector wrt the starting position that we
    # have, but there are two hypotheses. 
    #
    #   1. That we can avoid computing these if we pre-compute before hand (as
    #   we have code for) so maybe we don't need a click interface. 
    #   2. To get the needle to go somewhere, it's a matter of doing the
    #   arithmetic as specified earlier in the comments! 
    #
    # For now we'll compute the two offsets wrt base because we might as well
    # use it to simplify the task (but this is only simplifying the detection of
    # the needle tip and where the dVRK grips it).
    # --------------------------------------------------------------------------
    offset_wrt_base_click = base_t - base_g
    offset_wrt_base_dvrk  = base_t - pos_g
    offset_saved =  get_offset(offsets, psi=psi_c, kind='click', which=which, debug=True)

    print("offset_wrt_base_click: {}".format(offset_wrt_base_click))
    print("offset_wrt_base_dvrk:  {}".format(offset_wrt_base_dvrk))
    print("offset_saved:          {}".format(offset_saved))
예제 #6
0
def offsets(arm, d, args, wrist_map_c2l, wrist_map_c2r, wrist_map_l2r):
    """ Stage 1. 
    
    1. Before running the python script, get the setup as shown in the image in
    the README where the needle is on the foam and the dVRK (w/SNAP) is about to
    grip it. 
    
    2. Then, this code waits until the user has manipulated master tools to pick
    up the needle at some location. EDIT: actually it's easier to just move and
    then assume we translate up by a few mm. I'm getting lack of responses with
    the master tools, for some reason. So just move it directly and explicitly.
    
    3. Images will pop-up. Press ESC to terminate the program. Otherwise, we
    click and crop: (a) the end-point of the needle AND (b) the place where the
    dVRK grasps the needle. The latter is mainly to see if this coincides with
    the dVRK's actual position. MUST BE DONE IN ORDER!
    
    4. This saves into a pickle file by appending, so in theory we continually
    add data points. (The main catch is to check that if we change the setup,
    we're NOT counting those older points.)
    
    Repeat the process by moving the master tools again, and go to step 2.
    """
    global CENTERS, image
    which = 'left' if args.arm==1 else 'right'
    pname = args.path_offsets+'/offsets_{}_arm_data.p'.format(which)
    if which == 'left':
        R = wrist_map_c2l
    else:
        R = wrist_map_c2r
    num_offsets = 0

    while True:
        # Just an overall view before grabbing needle. OK to only see the left
        # image. And BTW, this is when we change the dVRK to get more data.
        arm.open_gripper(50)
        print("\nNow starting new offset with {} offsets so far...".format(num_offsets))
        name = "Left image w/{} offsets so far. MOVE DVRK, press any key".format(num_offsets)
        U.call_wait_key( cv2.imshow(name, d.left['raw']) )
        arm.close_gripper()
        pos,rot = U.pos_rot_arm(arm, nparrays=True)
        pos[2] += args.z_height
        U.move(arm, pos, rot)
        time.sleep(2)

        # The user now clicks stuff. LEFT CAMERA, then RIGHT CAMERA.
        click_stuff('left')
        click_stuff('right')

        # Compute data to store. We stored left tip, left grip, right tip, right grip.
        pos_g,rot_g = U.pos_rot_arm(arm, nparrays=True)
        assert len(CENTERS) % 4 == 0, "Error, len(CENTERS): {}".format(len(CENTERS))
        camera_tip  = U.camera_pixels_to_camera_coords(CENTERS[-4], CENTERS[-2], nparrays=True)
        camera_grip = U.camera_pixels_to_camera_coords(CENTERS[-3], CENTERS[-1], nparrays=True)

        # Map stuff to stuff. The `h` in `xyz_h` refers to homogeneous coordinates.
        ct_h = np.concatenate( (camera_tip,np.ones(1)) )
        cg_h = np.concatenate( (camera_grip,np.ones(1)) )
        base_t = R.dot(ct_h)
        base_g = R.dot(cg_h)
        camera_dist = np.linalg.norm( camera_tip-camera_grip )

        # Distance based on click interace (should be same as camera) and based
        # on dvrk, using cartesian position function (this will be different).
        # Unfortunately the dvrk one is the easiest to use in real applications.
        base_dist_click = np.linalg.norm( base_t-base_g )
        base_dist_dvrk  = np.linalg.norm( base_t-pos_g )
        assert np.abs(base_dist_click - camera_dist) < 1e-5

        # Compute offset vector wrt base frame. I don't think the order of
        # subtraction matters as long as in real applications, we are consistent
        # with usage, so in application we'd do (pos_n_tip_wrt_base)-(offset).
        offset_wrt_base_click = base_t - base_g
        offset_wrt_base_dvrk  = base_t - pos_g

        phi_c, psi_c = compute_phi_and_psi(d=base_dist_click)
        phi_d, psi_d = compute_phi_and_psi(d=base_dist_dvrk)

        # Bells and whistles.
        base = 'base_'+which+'_'
        info = {}
        info['pos_g_dvrk'] = pos_g
        info['rot_g_dvrk'] = rot_g
        info['camera_tip'] = camera_tip
        info['camera_grip'] = camera_grip
        info[base+'tip'] = base_t
        info[base+'grip'] = base_g
        info['camera_dist'] = camera_dist 
        info[base+'dist_click'] = base_dist_click
        info[base+'dist_dvrk'] = base_dist_dvrk
        info[base+'offset_click'] = offset_wrt_base_click
        info[base+'offset_dvrk'] = offset_wrt_base_dvrk
        info['phi_click_deg'] = phi_c
        info['psi_click_mm'] = psi_c
        info['phi_dvrk_deg'] = phi_d
        info['psi_dvrk_mm'] = psi_d

        num_offsets += 1
        U.store_pickle(fname=pname, info=info, mode='a')
        num_before_this = U.get_len_of_pickle(pname)

        print("Computed and saved {} offset vectors in this session".format(num_offsets))
        print("We have {} items total (including prior sessions)".format(num_before_this))
        print("  pos: {}\n  rot: {}".format(pos_g, rot_g))
        print("  for tip, CENTER coords (left,right): {}, {}".format(CENTERS[-4], CENTERS[-2]))
        print("  for grip, CENTER coords (left,right): {}, {}".format(CENTERS[-3], CENTERS[-1]))
        print("  camera_tip:  {}".format(camera_tip))
        print("  camera_grip: {}".format(camera_grip))
        print("  base_{}_tip:  {}".format(which, base_t))
        print("  base_{}_grip: {}".format(which, base_g))
        print("  camera_dist (mm):      {:.2f}".format(camera_dist*1000.))
        print("  base_dist_camera (mm): {:.2f}".format(base_dist_click*1000.))
        print("  base_dist_dvrk (mm):   {:.2f}".format(base_dist_dvrk*1000.))
        print("  camera, phi: {:.2f}, psi: {:.2f}".format(phi_c, psi_c))
        print("  base,   phi: {:.2f}, psi: {:.2f}".format(phi_d, psi_d))
예제 #7
0
def collect_data(arm, R, wrist_map_c2l, d):
    """ Collects data points to determine which rotation matrix we're using.

    The `t_st` and `R_st` define the rigid body from the tool to the arm base.
    Don't forget that the needle must be gripped with SNAP so that the tip
    vector `n_t` remains consistent wrt the tool (t) frame.

    Needs to be called directly after `collect_tip_data()` so that the position
    of the needle tip wrt the TOOL frame is the same as earlier when we were
    explicitly computing/estimating that in `collect_tip_data()`. We don't
    actually need the `n_t` data here as that's needed in the LAST step.
    """
    R_z = R
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    t_st = pos
    data = defaultdict(list)

    # We'll test out these equally-spaced values. Note 180 = -180.
    yaws = [-30, 0, 30]
    pitches = [-30, 0, 30]
    rolls = [-160, -180, 160]
    idx = 0

    # --------------------------------------------------------------------------
    # I think it helps the dVRK to move to the first rotation *incrementally*.
    print("We first incrementally move to the starting rotation ...")

    U.move(arm, pos=t_st, rot=[0, 0, -160])
    time.sleep(1)
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("we have pos, rot: {}, {}".format(pos, rot))

    U.move(arm, pos=t_st, rot=[0, -30, -160])
    time.sleep(1)
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("we have pos, rot: {}, {}".format(pos, rot))

    U.move(arm, pos=t_st, rot=[-30, -30, -160])
    time.sleep(1)
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("we have pos, rot: {}, {}".format(pos, rot))

    print("(End of incrementally moving to start)\n")
    # --------------------------------------------------------------------------

    # NOW begin the loop over different possible rotations.
    for alpha in yaws:
        for beta in pitches:
            for gamma in rolls:
                idx += 1
                rot_targ = [alpha, beta, gamma]
                U.move(arm, pos=t_st, rot=rot_targ)
                time.sleep(2)
                pos, rot = U.pos_rot_arm(arm, nparrays=True)

                # A human now clicks on the windows to get needle TIP position.
                # Actually we probably shouldn't use this specific map as we may
                # get better calibration later, but it doesn't hurt to include.
                needle_tip_c = get_tip_needle(arm, d, idx, rot_targ)
                needle_tip_c_h = np.concatenate((needle_tip_c, np.ones(1)))
                needle_tip_l = wrist_map_c2l.dot(needle_tip_c_h)

                # Bells and whistles, a bit inefficient but whatever.
                data['pos_tool_wrt_s_targ'].append(t_st)
                data['pos_tool_wrt_s_code'].append(pos)
                data['rot_tool_wrt_s_targ'].append(rot_targ)
                data['rot_tool_wrt_s_code'].append(rot)
                data['pos_ntip_wrt_c_clicks'].append(needle_tip_c)
                data['pos_ntip_wrt_s'].append(needle_tip_l)

                print("\nAdding {}-th data point".format(idx))
                print("TARGET (yaw,pitch,roll):  {}".format(rot_targ))
                print("Actual (yaw,pitch,roll):  {}".format(rot))
                print("Target pos (i.e., t_st):  {}".format(t_st))
                print("Actual pos from command:  {}".format(pos))

    return data
예제 #8
0
def collect_tip_data(arm, R_real, R_desired, wrist_map_c2l, d):
    """ Collects data points on the needle tips.
    
    We want to be at rotation [0, 0, 180] (equivalently, -180 for the last part,
    the `roll`) so that we can assume we know the rotation matrix. Assumes that
    the roll belongs to the z-axis. Due to imperfections, we won't be exact.

    The goal here is to determine as best an estimate of the position of the
    needle TIP wrt the TOOL frame as possible. We'll run this through several
    different positions, each time clicking to get an estimate, and then we
    average those together.
    """
    data = defaultdict(list)
    home_pos = np.copy(HOME_POS_ARM1)
    home_rot = np.copy(HOME_ROT_ARM1)
    assert (home_rot[0] == 0 and home_rot[1] == 0 and home_rot[2] == 180)
    idx = 0

    # Slightly adjust position to get different data points in x-y dims.
    deltas_meters = [-0.009, -0.003, 0.003, 0.009]

    # NOW begin the loop over different possible positions. Obviously we need to
    # always go to the `home_rot`, though some imprecisions will result.
    for dx in deltas_meters:
        for dy in deltas_meters:
            idx += 1
            pos_tool_wrt_base_targ = [
                home_pos[0] + dx, home_pos[1] + dy, home_pos[2]
            ]
            U.move(arm, pos_tool_wrt_base_targ, home_rot)
            time.sleep(2)
            pos_tool_wrt_base_code, rot_tool_wrt_base_code = U.pos_rot_arm(
                arm, nparrays=True)

            # A human now clicks on the windows to get needle TIP position.
            # Unfortunately this calibration may be quite erroneous, but if we
            # save all the files we might be able to re-run w/out re-clicking.
            needle_tip_c = get_tip_needle(arm, d, idx, home_rot)
            needle_tip_c_h = np.concatenate((needle_tip_c, np.ones(1)))
            needle_tip_l = wrist_map_c2l.dot(needle_tip_c_h)

            # Compute the needle tip wrt TOOL frame; approximate with R_z(roll).
            roll = rot_tool_wrt_base_code[2]
            R = U.rotation_matrix_3x3_axis(angle=roll, axis='z')
            Rinv = np.linalg.inv(R)
            needle_tip_r = Rinv.dot(needle_tip_l - pos_tool_wrt_base_code)

            # Bells and whistles, a bit inefficient but whatever.
            data['pos_tool_wrt_s_targ'].append(pos_tool_wrt_base_targ)
            data['pos_tool_wrt_s_code'].append(pos_tool_wrt_base_code)
            data['rot_tool_wrt_s_targ'].append(home_rot)
            data['rot_tool_wrt_s_code'].append(rot_tool_wrt_base_code)
            data['pos_ntip wrt_c'].append(needle_tip_c)
            data['pos_ntip_wrt_s'].append(needle_tip_l)
            data['pos_ntip_wrt_r'].append(needle_tip_r)

            print("\nAdding {}-th data point for `collect_tip_data()`.".format(
                idx))
            print("pos_tool_wrt_s_targ:  {}".format(
                np.array(pos_tool_wrt_base_targ)))
            print("pos_tool_wrt_s_code:  {}".format(pos_tool_wrt_base_code))
            print("rot_tool_wrt_s_targ:  {}".format(home_rot))
            print("rot_tool_wrt_s_code:  {}".format(rot_tool_wrt_base_code))
            print("pos_ntip wrt_c:       {}".format(needle_tip_c))
            print("pos_ntip_wrt_s:       {}".format(needle_tip_l))
            print("pos_ntip_wrt_r:       {}".format(needle_tip_r))

    # This was the whole point of the method.
    print("\nFinished collecting data for computing needle_tip_r.")
    n_t = np.zeros(3)
    for idx, item in enumerate(data['pos_ntip_wrt_r']):
        print("{}, {}".format(item, idx))
        n_t += item
    n_t /= len(data['pos_ntip_wrt_r'])
    print("average `n_t` to use later: {}\n".format(n_t))
    return data
예제 #9
0
    wrist_map_l2r = U.load_pickle_to_list(PATH_CALIB + 'wrist_map_l2r.p',
                                          squeeze=True)

    # We're on stage 1, 2, or 3. ***ADJUST THIS***.
    stage = 3

    if stage == 1:
        get_in_good_starting_position(arm1)

    elif stage == 2:
        assert not os.path.isfile(ROT_FILE)
        assert not os.path.isfile(TIP_FILE)
        arm1.open_gripper(60)
        time.sleep(2)  # PUT NEEDLE INSIDE THE GRIPPER, WITH SNAP!
        arm1.close_gripper()
        pos, rot = U.pos_rot_arm(arm1, nparrays=True)
        print("starting position and rotation:")
        print(pos, rot)
        print("HOME_POS_ARM1: {}".format(HOME_POS_ARM1))
        print("HOME_ROT_ARM1: {}".format(HOME_ROT_ARM1))
        R_real = U.rotation_matrix_3x3_axis(angle=rot[2], axis='z')
        R_desired = U.rotation_matrix_3x3_axis(angle=180, axis='z')
        print("With actual rotation matrix:\n{}".format(R_real))
        print("With desired rotation matrix:\n{}".format(R_desired))

        # Get position of the needle TIP wrt the TOOL frame.
        tip_data = collect_tip_data(arm1, R_real, R_desired, wrist_map_c2l, d)
        U.store_pickle(fname=TIP_FILE, info=tip_data)

        # Now collect data from different rotations, using SAME needle grip.
        # This is why the code must be run sequentially in the same stage.
예제 #10
0
def collect_guidelines(args, arm, d):
    """ Gather statistics about the workspace on how safe we can set things.
    Save things in a pickle file specified by the `directory` parameter. Click
    the ESC key to exit the program and restart if I've made an error. BTW, the
    four poses we collect will be the four "home" positions that I use later,
    though with more z-coordinate offset.

    Some information:

        `yaw` must be limited in [-180,180]  # But actually, [-90,90] is OK.
        `pitch` must be limited in [-50,50]  # I _think_ ...
        `roll` must be limited in [-180,180] # I think ...

    Remember, if I change the numbers, it doesn't impact the code until
    re-building `guidelines.p`!!
    """
    # Add stuff we should already know, particularly the rotation ranges.
    info = {}
    info['min_yaw'] = 30
    info['max_yaw'] = 150
    info['min_pitch'] = 40
    info['max_pitch'] = 80

    # Roll is annoying because of the values the dVRK provides.
    info['roll_neg_ubound'] = -140  # (-180, roll_neg_ubound)
    info['roll_pos_lbound'] = 140  # (roll_pos_lbound, 180)
    info['min_pos_roll'] = 40
    info['max_pos_roll'] = 180
    info['min_neg_roll'] = -180
    info['max_neg_roll'] = -150

    # Move arm to positions to determine approximately safe ranges for x,y,z
    # values. All the `pos_{lr,ll,ul,ur}` are in robot coordinates.
    U.call_wait_key(
        cv2.imshow("Left camera (move to lower right corner now!)",
                   d.left['raw']))
    info['pos_lr'], info['rot_lr'] = U.pos_rot_arm(arm)
    U.call_wait_key(
        cv2.imshow("Left camera (move to lower left corner now!)",
                   d.left['raw']))
    info['pos_ll'], info['rot_ll'] = U.pos_rot_arm(arm)
    U.call_wait_key(
        cv2.imshow("Left camera (move to upper left corner now!)",
                   d.left['raw']))
    info['pos_ul'], info['rot_ul'] = U.pos_rot_arm(arm)
    U.call_wait_key(
        cv2.imshow("Left camera (move to upper right corner now!)",
                   d.left['raw']))
    info['pos_ur'], info['rot_ur'] = U.pos_rot_arm(arm)

    # So P[:,0] is a vector of the x's, P[:,1] vector of y's, P[:,2] vector of z's.
    p_lr = np.squeeze(np.array(info['pos_lr']))
    p_ll = np.squeeze(np.array(info['pos_ll']))
    p_ul = np.squeeze(np.array(info['pos_ul']))
    p_ur = np.squeeze(np.array(info['pos_ur']))
    P = np.vstack((p_lr, p_ll, p_ul, p_ur))

    # Get ranges. This is a bit of a heuristic but generally good I think.
    info['min_x'] = np.min([p_lr[0], p_ll[0], p_ul[0], p_ur[0]])
    info['max_x'] = np.max([p_lr[0], p_ll[0], p_ul[0], p_ur[0]])
    info['min_y'] = np.min([p_lr[1], p_ll[1], p_ul[1], p_ur[1]])
    info['max_y'] = np.max([p_lr[1], p_ll[1], p_ul[1], p_ur[1]])

    # For z, we fit a plane. See https://stackoverflow.com/a/1400338/3287820
    # Find (alpha, beta, gamma) s.t. f(x,y) = alpha*x + beta*y + gamma = z.
    A = np.zeros((3, 3))  # Must be symmetric!
    A[0, 0] = np.sum(P[:, 0] * P[:, 0])
    A[0, 1] = np.sum(P[:, 0] * P[:, 1])
    A[0, 2] = np.sum(P[:, 0])
    A[1, 0] = np.sum(P[:, 0] * P[:, 1])
    A[1, 1] = np.sum(P[:, 1] * P[:, 1])
    A[1, 2] = np.sum(P[:, 1])
    A[2, 0] = np.sum(P[:, 0])
    A[2, 1] = np.sum(P[:, 1])
    A[2, 2] = P.shape[0]

    b = np.array([
        np.sum(P[:, 0] * P[:, 2]),
        np.sum(P[:, 1] * P[:, 2]),
        np.sum(P[:, 2])
    ])

    x = np.linalg.inv(A).dot(b)
    info['z_alpha'] = x[0]
    info['z_beta'] = x[1]
    info['z_gamma'] = x[2]

    # Sanity checks before saving stuff.
    assert info['min_x'] < info['max_x']
    assert info['min_y'] < info['max_y']
    assert P.shape == (4, 3)

    print("\nThe key/val pairings for {}.".format(args.directory))
    keys = sorted(info.keys())
    for key in keys:
        print("{:20} ==> {}".format(key, info[key]))
    print("")
    print("P:\n{}".format(P))
    print("A:\n{}".format(A))
    print("x:\n{}".format(x))
    print("b:\n{}".format(b))
    U.store_pickle(fname=args.directory, info=info, mode='w')
예제 #11
0
w1 = [(-0.125, 0.068, -0.065), (-178.8, 20.9, 143.9)]
w1 = [(-0.131, 0.069, -0.063), (-178.4, 19.0, 142.8)]
w1 = [(-0.130, 0.067, -0.063), (-178.2, 19.3, 138.2)]
w1 = [(-0.132, 0.068, -0.063), (-179.0, 18.9, 137.7)]

v0 = [(-0.103, 0.039, -0.042), (-170.9, 21.1, 130.5)]
u0 = [( 0.038, 0.032, -0.114), (-1.8, 16.2, -79.8)]



if __name__ == "__main__":
    arm1, arm2, d = U.init()
    arm1.close_gripper()
    arm2.close_gripper()

    print(U.pos_rot_arm(arm1, nparrays=True))
    print(U.pos_rot_arm(arm2, nparrays=True))

    U.move(arm1, x7[0],  x7[1])
    U.move(arm1, x11[0], x11[1])
    U.move(arm1, x9[0],  x9[1])
    U.move(arm1, x12[0], x12[1])

    U.move(arm2, y0[0], y0[1])
    arm2.open_gripper(70)
    time.sleep(2)
    U.move(arm2, y1[0], y1[1])
    arm2.open_gripper(-10)
    time.sleep(2)

    arm1.open_gripper(70)
예제 #12
0

def explore(arm):
    """ Use this to extract positions after we move the dvrk. """
    pos, rot = U.pos_rot_arm(arm, nparrays=True)
    print("pos, rot: {}, {}".format(pos, rot))
    pos[2] += 0.02
    U.move(arm, pos, rot)
    print("pos, rot: {}, {}".format(pos, rot))


if __name__ == "__main__":
    arm1, arm2, d = U.init()
    arm1.close_gripper()

    #explore(arm1)
    #test(arm1)

    print("start {}".format(U.pos_rot_arm(arm1, nparrays=True)))

    # this will fail unless it's starting near that position
    pos, rot = [0.039, 0.045, -0.074], [45.7, 64.7, 62.2]
    U.move(arm1, pos, rot)
    print(U.pos_rot_arm(arm1, nparrays=True))
    time.sleep(2)

    #rot = [79.14, 45.98, 57.77] # fails a lot
    rot = [60., 50., 60.]
    U.move(arm1, pos, rot)
    print(U.pos_rot_arm(arm1, nparrays=True))