コード例 #1
0
def find_specific_spot(image):
    """ 
    Given thresholded image, have to figure out _where_ the end-effector tip is located.
    Parameter `image` should be a thresholded image from HSV stuff.
    To make it easier we should also have a bounding box condition.
    Note: to detect contours, we need grayscale (monochrome) images.
    """
    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
    img = image.copy()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    utilities.call_wait_key(cv2.imshow("Grayscale image", image))

    # Detect contours *inside* the bounding box heuristic.
    xx, yy, ww, hh = 650, 25, 800, 825        
    (cnts, _) = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contained_cnts = []
    print("number of cnts: {}".format(len(cnts)))

    for c in cnts:
        try:
            # Find the centroids of the contours in _pixel_space_. :)
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            # Enforce it to be within bounding box.
            print(xx,cX,yy,cY)
            if (xx < cX < xx+ww) and (yy < cY < yy+hh):
                print("appending!")
                contained_cnts.append(c)
        except:
            pass
    print("number of contained contours: {}".format(len(contained_cnts)))

    contours = sorted(contained_cnts, key=cv2.contourArea, reverse=True)[:1]
    processed = []
    for c in contours:
        try:
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02*peri, True)
            processed.append( (cX,cY,approx,peri) )
        except:
            pass
    print("number of processed contours: {}".format(len(processed)))

    for i, (cX, cY, approx, peri) in enumerate(processed):
        cv2.circle(img, (cX,cY), 50, (0,0,255))
        cv2.drawContours(img, [approx], -1, (0,255,0), 3)
        cv2.putText(img=img, text=str(i), org=(cX,cY), 
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1, color=(0,0,0), thickness=2)
    utilities.call_wait_key(cv2.imshow("With contours!", img))
コード例 #2
0
def proof_of_concept_part_one(left_im, right_im, left_contours, right_contours, arm):
    """ 
    See my notes. I'm effectively trying to argue why my DL way with automatic trajectory
    collection will work. Maybe this will work as a screenshot or picture sequence in an
    eventual paper? I hope ...
    """
    center_left  = get_single_contour_center(left_im,  left_contours)
    center_right = get_single_contour_center(right_im, right_contours)
    cv2.destroyAllWindows()
    camera_pt = utils.camera_pixels_to_camera_coords(center_left, center_right)
    print("(left, right) = ({}, {})".format(center_left, center_right))
    print("(cx,cy,cz) = ({:.4f}, {:.4f}, {:.4f})".format(*camera_pt))

    # Get rotation set to -90 to start.
    pos, rot = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    utils.move(arm, pos, [-90, 10, -170], 'Slow') 

    # Now do some manual movement.
    arm.open_gripper(90)
    utils.call_wait_key( cv2.imshow("(left image) move to point keeping angle roughly -90)", left_im) )
    pos, rot = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    utils.move(arm, pos, [-90, 10, -170], 'Slow')  # Because we probably adjusted angle by mistake.
    pos2, rot2 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())

    # At this point, pos, rot should be the -90 degree angle version which can grip this.
    print("pos,rot -90 yaw:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(
        pos2[0],pos2[1],pos2[2],rot2[0],rot2[1],rot2[2]))

    # Next, let's MANUALLY move to the reverse angle, +90. This SHOULD use a different position.
    # This takes some practice to figure out a good location. Also, the reason for manual movement
    # is that when I told the code to go to +90 yaw, it was going to +180 for some reason ...
    utils.call_wait_key( cv2.imshow("(left image) move to +90 where it can grip seed)", left_im) )
    pos3, rot3 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    print("pos,rot after manual +90 yaw change:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(pos3[0],pos3[1],pos3[2],rot3[0],rot3[1],rot3[2]))

    # Automatic correction in case we made slight error.
    utils.move(arm, pos3, [90, 0, -165], 'Slow') 
    pos4, rot4 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())

    # Now pos, rot should be the +90 degree angle version which can grip this.
    print("pos,rot +90 yaw:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(
        pos4[0],pos4[1],pos4[2],rot4[0],rot4[1],rot4[2]))
コード例 #3
0
def detect_color_direct(image):
    """
    http://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/

    These are Adrian's boundaries in BGR format:

        boundaries = [
            ([17, 15, 100],  [50, 56, 200]),  # Red
            ([86, 31, 4],    [220, 88, 50]),  # Blue
            ([25, 146, 190], [62, 174, 250]), # Yellow
            ([103, 86, 65],  [145, 133, 128]) # Gray
        ]

    Unfortunately these are hard to get. These assume BGR format, btw ... but I tried
    switching the B and R values for the Yellow stuff and that doesn't work either. :-(
    When OpenCV reads in images from disk (e.g. `cv2.imread(...)`) it's assumed to be BGR,
    but I don't think that's true if I have `self.bridge.imgmsg_to_cv2(msg, "rgb8")`.
    """
    lower  = np.array([190, 146, 25], dtype='uint8')
    upper  = np.array([250, 174, 62], dtype='uint8')
    mask   = cv2.inRange(image, lower, upper)
    output = cv2.bitwise_and(image, image, mask=mask)
    utilities.call_wait_key(cv2.imshow("left image", output))
    return output
コード例 #4
0
def detect_color_hsv(frame):
    """ 
    The original image is stored in RGB (not BGR as is usual) because of the way
    we designed the autolab data collector. Hence, the use of `cv2.COLOR_RGB2HSV`.

    The color ranges are tricky. Use the stuff from:
    http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html#object-tracking
    where they recommend having a single target for the hue value and then taking
    a range of +/- 10 about the center.

    My heuristics:

    Yellow tape:
        lower = np.array([0, 90, 90])
        upper = np.array([80, 255, 255])

    Red tape:
    """
    # Define the range of the color. TRICKY! Probably can't use these.
    colors = {
            'yellow': 30,
            'green':  60,
            'blue':   120
    }
    lower = np.array([110,  70,  70])
    upper = np.array([180, 255, 255])

    # Convert from RGB (not BGR) to hsv and apply our chosen thresholding.
    frame = cv2.bilateralFilter(frame, 7, 13, 13)
    utilities.call_wait_key(cv2.imshow("After first filter", frame))
    #frame = cv2.medianBlur(frame, 9)
    #utilities.call_wait_key(cv2.imshow("After second filter", frame))
    hsv   = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
    mask  = cv2.inRange(hsv, lower, upper)
    utilities.call_wait_key(cv2.imshow("Does the mask make sense?", mask))
    res   = cv2.bitwise_and(frame, frame, mask=mask)

    # Let's inspect the output and pray it works.
    utilities.call_wait_key(cv2.imshow("Does it detect the desired color?", res))
    res   = cv2.medianBlur(res, 9)
    utilities.call_wait_key(cv2.imshow("res but with blur on this", res))
    return res
コード例 #5
0
    print("current arm position: {}".format(
        arm.get_current_cartesian_position()))
    arm.close_gripper()

    ## ---------------------------------------------------------------------------------
    ## This will test calibration! Progressively accumulate 36 (or 35...) values. Do the
    ## same for the left image. Then, with this data, we redo the entire process but we
    ## know we no longer have to do the repeated skipping I was doing.
    ## Note: this used to be its own method, but I got confused about the callback code,
    ## and I'm in a bit of a rush, so sorry!
    ## ---------------------------------------------------------------------------------

    # The right image will typically tell us how many contours we can expect (34, 35, 36??).
    cv2.imwrite("images/right_image.jpg", d.right_image)
    cv2.imwrite("images/left_image.jpg", d.left_image)
    utils.call_wait_key(
        cv2.imshow("RIGHT camera, used for contours", d.right_image_proc))
    utils.call_wait_key(
        cv2.imshow("left camera, used for contours", d.left_image_proc))

    # Don't forget to load our trained neural network and rigid bodies!
    f_network = load_model(PARAMS['modeldir'])
    net_mean = PARAMS['X_mean']
    net_std = PARAMS['X_std']

    ## ---------------------------------------------------------------------------------
    ## If we're using the random forest (or whatever) corrector, load it here! For now,
    ## we assume we know we're using one of our five pre-determined yaw values. Sorry...
    ## ---------------------------------------------------------------------------------
    RF_correctors = {}
    if args.use_rf_correctors:
        head = 'config/mapping_results/rf_human_guided_yesz_v'
コード例 #6
0
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02*peri, True)
            processed.append( (cX,cY,approx,peri) )
        except:
            pass
    print("number of processed contours: {}".format(len(processed)))

    for i, (cX, cY, approx, peri) in enumerate(processed):
        cv2.circle(img, (cX,cY), 50, (0,0,255))
        cv2.drawContours(img, [approx], -1, (0,255,0), 3)
        cv2.putText(img=img, text=str(i), org=(cX,cY), 
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1, color=(0,0,0), thickness=2)
    utilities.call_wait_key(cv2.imshow("With contours!", img))
 

if __name__ == "__main__":
    arm, _, d = utilities.initializeRobots()
    arm.close_gripper()
    print("current arm position: {}".format(arm.get_current_cartesian_position()))

    utilities.call_wait_key(cv2.imshow("Bounding Box for Contours (LEFT IMAGE)", d.left_image_bbox))
    frame = d.left_image.copy()

    utilities.call_wait_key(cv2.imshow("RIGHT IMAGE", d.right_image_bbox))
    frame = d.right_image.copy()

    res = detect_color_hsv(frame)
    #detect_color_direct(frame)
    find_specific_spot(res)
コード例 #7
0
    assert not os.path.exists(IMDIR), "IMDIR: {}".format(IMDIR)
    os.makedirs(IMDIR)
    PARAMS = pickle.load(
            open('config/mapping_results/auto_params_matrices_v'+IN_VERSION+'.p', 'r')
    )
    rotation = sample_rotation(args.fixed_yaw)
    print("Our starting rotation: {}".format(rotation))
    
    # Now get the robot arm initialized and test it out!
    arm, _, d = utils.initializeRobots()
    print("current arm position: {}".format(arm.get_current_cartesian_position()))
    utils.home(arm, rot=rotation)
    print("current arm position: {}".format(arm.get_current_cartesian_position()))
    arm.close_gripper()
    cv2.imwrite("images/left_image.jpg", d.left_image)
    utils.call_wait_key(cv2.imshow("left camera, used for contours", d.left_image_proc))

    ## ---------------------------------------------------------------------------------------
    ## PART ONE: Get the contours. Manual work here to cut down on boredom in the second part.
    ## Actually ... we don't even need both images, right? I mean, c'mon...
    ## ---------------------------------------------------------------------------------------
    left_im = d.left_image.copy()
    left_c = get_good_contours(left_im, d.left_contours, args.max_num_add)
    cv2.destroyAllWindows()

    ## ---------------------------------------------------------------------------------------
    ## PART TWO: Explicitly fit the point to the upper left corner of the grid. This is
    ## definitely cheating, like it is to cheat to have that z-coordinate, but hey, baselines!
    ## Oh, load the z-plane information here in the `info` dictionary.
    ## ---------------------------------------------------------------------------------------
    info = pickle.load( open(args.guidelines_dir,'r') )
コード例 #8
0
def motion_planning(contours_by_size, img, arm, rotations):
    """ The open loop.
    
    Parameters
    ----------
    contours_by_size: [list]
        A list of contours, arranged from largest area to smallest area.
    img: [np.array]
        Image the camera sees, in BGR form (not RGB).
    arm: [dvrk arm]
        Represents the arm we're using for the DVRK.
    rotations:
    """
    print("Identified {} contours but will keep top {}.".format(len(contours_by_size), TOPK_CONTOURS))
    img_for_drawing = img.copy()
    contours = list(contours_by_size)
    cv2.drawContours(img_for_drawing, contours, -1, (0,255,0), 3)
    places_to_visit = []

    # Iterate and find centers. We'll make the robot move to these centers in a sequence.
    # Note that duplicate contours should be detected beforehand.
    for i,cnt in enumerate(contours):
        M = cv2.moments(cnt)
        if M["m00"] == 0: continue
        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])
        places_to_visit.append((cX,cY))

    # Collect only top K places to visit and insert ordering preferences. I do right to left.
    places_to_visit = places_to_visit[:TOPK_CONTOURS]
    places_to_visit = sorted(places_to_visit, key=lambda x:x[0], reverse=True)

    # Number the places to visit in an image so I see them.
    for i,(cX,cY) in enumerate(places_to_visit):
        cv2.putText(img=img_for_drawing, text=str(i), org=(cX,cY), 
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX, 
                    fontScale=1, color=(0,0,0), thickness=2)
        cv2.circle(img=img_for_drawing, center=(cX,cY), radius=3, color=(0,0,255), thickness=-1)
        print(i,cX,cY)

    # Show image with contours + exact centers. Exit if it's not looking good. 
    # If good, SAVE IT!!! This protects me in case of poor perception causing problems.
    trial = len([fname for fname in os.listdir(IMAGE_DIR) if '.png' in fname])
    cv2.imshow("Top-K contours (exit if not looking good), trial index is {}".format(trial), img_for_drawing)
    utils.call_wait_key()
    cv2.imwrite(IMAGE_DIR+"/im_"+str(trial).zfill(3)+".png", img_for_drawing)
    cv2.destroyAllWindows()

    # Given points in `places_to_visit`, must figure out the robot points for each.
    robot_points_to_visit = []
    print("\nHere's where we'll be visiting (left_pixels, robot_point):")
    for left_pixels in places_to_visit:
        robot_points_to_visit.append(
                utils.left_pixel_to_robot_prediction(
                        left_pt=left_pixels,
                        params=PARAMETERS,
                        better_rf=RF_REGRESSOR,
                        ARM1_XOFFSET=ARM1_XOFFSET,
                        ARM1_YOFFSET=ARM1_YOFFSET,
                        ARM1_ZOFFSET=ARM1_ZOFFSET
                )
        )
        print("({}, {})\n".format(left_pixels, robot_points_to_visit[-1]))

    # With robot points, tell it to _move_ to these points. Apply vertical offsets here, FYI.
    # Using the `linear_interpolation` is slower and jerkier than just moving directly.
    arm.open_gripper(degree=90, time_sleep=2)

    for (robot_pt, rot) in zip(robot_points_to_visit, rotations):
        # Go to the point. Note: switch rotation **first**?
        pos = [robot_pt[0], robot_pt[1], robot_pt[2]+ZOFFSET_SAFETY]
        utils.move(arm, HOME_POS, rot, SPEED_CLASS) 
        utils.move(arm, pos,      rot, SPEED_CLASS)

        # Lower, close, and raise the end-effector.
        pos[2] -= ZOFFSET_SAFETY
        utils.move(arm, pos, rot, SPEED_CLASS)
        arm.open_gripper(degree=CLOSE_ANGLE, time_sleep=2) # Need >=2 seconds!
        pos[2] += ZOFFSET_SAFETY
        utils.move(arm, pos, rot, SPEED_CLASS)

        # Back to the home position.
        utils.move(arm, HOME_POS, rot, SPEED_CLASS)
        arm.open_gripper(degree=90, time_sleep=2) # Need >=2 seconds!
コード例 #9
0
def collect_guidelines(arm, d, directory):
    """ 
    Gather statistics about the workspace on how safe we can set things. Save things in a 
    pickle file specified by the `directory` parameter. Click the ESC key to exit the 
    program and restart. BTW, the four poses we collect will be the four "home" positions 
    that I use later, though with more z-coordinate offset.

    Some information:

        `yaw` must be limited in [-180,180]  # But actually, [-90,90] is all we need.
        `pitch` must be limited in [-50,50]  # I _think_ ...
        `roll` must be limited in [-180,180] # I think ...

    Remember, if I change the numbers, it doesn't impact the code until re-building `guidelines.p`!!
    """
    # First, add stuff that we should already know, particularly the rotation ranges.
    info = {}
    info['min_yaw']   = -90
    info['max_yaw']   =  90
    info['min_pitch'] = -15
    info['max_pitch'] =  25
    info['roll_neg_ubound'] = -150 # (-180, roll_neg_ubound)
    info['roll_pos_lbound'] =  150 # (roll_pos_lbound, 180)
    info['min_roll'] = -180
    info['max_roll'] = -150

    # Move the arm to positions to determine approximately safe ranges for x,y,z values.
    # And to be clear, all the `pos_{lr,ll,ul,ur}` are in robot coordinates.
    utils.call_wait_key(cv2.imshow("Left camera (move to lower right corner now!)", d.left_image))
    pos_lr = arm.get_current_cartesian_position()
    utils.call_wait_key(cv2.imshow("Left camera (move to lower left corner now!)", d.left_image))
    pos_ll = arm.get_current_cartesian_position()
    utils.call_wait_key(cv2.imshow("Left camera (move to upper left corner now!)", d.left_image))
    pos_ul = arm.get_current_cartesian_position()
    utils.call_wait_key(cv2.imshow("Left camera (move to upper right corner now!)", d.left_image))
    pos_ur = arm.get_current_cartesian_position()

    # Save these so that we can use them for `home` positions.
    info['home_lr'] = pos_lr
    info['home_ll'] = pos_ll
    info['home_ul'] = pos_ul
    info['home_ur'] = pos_ur
    
    # So P[:,0] is a vector of the x's, P[:,1] vector of y's, P[:,2] vector of z's.
    p_lr = np.squeeze(np.array( pos_lr.position[:3] ))
    p_ll = np.squeeze(np.array( pos_ll.position[:3] ))
    p_ul = np.squeeze(np.array( pos_ul.position[:3] ))
    p_ur = np.squeeze(np.array( pos_ur.position[:3] ))
    P = np.vstack((p_lr, p_ll, p_ul, p_ur))

    # Get ranges. This is a bit of a heuristic but generally good I think.
    info['min_x'] = np.min( [p_lr[0], p_ll[0], p_ul[0], p_ur[0]] )
    info['max_x'] = np.max( [p_lr[0], p_ll[0], p_ul[0], p_ur[0]] )
    info['min_y'] = np.min( [p_lr[1], p_ll[1], p_ul[1], p_ur[1]] )
    info['max_y'] = np.max( [p_lr[1], p_ll[1], p_ul[1], p_ur[1]] )

    # For z, we fit a plane. See https://stackoverflow.com/a/1400338/3287820
    # Find (alpha, beta, gamma) s.t. f(x,y) = alpha*x + beta*y + gamma = z.
    A = np.zeros((3,3)) # Must be symmetric!
    A[0,0] = np.sum(P[:,0] * P[:,0])
    A[0,1] = np.sum(P[:,0] * P[:,1])
    A[0,2] = np.sum(P[:,0])
    A[1,0] = np.sum(P[:,0] * P[:,1])
    A[1,1] = np.sum(P[:,1] * P[:,1])
    A[1,2] = np.sum(P[:,1])
    A[2,0] = np.sum(P[:,0])
    A[2,1] = np.sum(P[:,1])
    A[2,2] = P.shape[0]

    b = np.array(
            [np.sum(P[:,0] * P[:,2]), 
             np.sum(P[:,1] * P[:,2]), 
             np.sum(P[:,2])]
    )

    x = np.linalg.inv(A).dot(b)
    info['z_alpha'] = x[0]
    info['z_beta']  = x[1]
    info['z_gamma'] = x[2]

    # Sanity checks before saving stuff.
    assert info['min_x'] < info['max_x']
    assert info['min_y'] < info['max_y']
    assert P.shape == (4,3)
    for key in info:
        print("key,val = {},{}".format(key, info[key]))
    print("P:\n{}".format(P))
    print("A:\n{}".format(A))
    print("x:\n{}".format(x))
    print("b:\n{}".format(b))

    pickle.dump(info, open(directory, 'w'))