def get_good_contours(image, image_contours, max_num_add): contours = [] num_added = 0 for i, (cX,cY,approx,peri) in enumerate(image_contours): if utils.filter_point(cX,cY, xlower=500, xupper=1500, ylower=50, yupper=1000): continue cv2.circle(img=image, center=(cX,cY), radius=50, color=(0,0,255), thickness=1) cv2.circle(img=image, center=(cX,cY), radius=4, color=(0,0,255), thickness=-1) cv2.drawContours(image=image, contours=[approx], contourIdx=-1, color=(0,255,0), thickness=3) name = "ESC if duplicate/undesired, other key to proceed." cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 2000, 4000) cv2.imshow(name, image) firstkey = cv2.waitKey(0) # Update `contours` and add extra text to make the image updating easier to follow. if firstkey not in utils.ESC_KEYS: contours.append( (cX,cY,approx,peri) ) num_added += 1 cv2.putText(img=image, text="{}".format(num_added), org=(cX,cY), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255,0,0), thickness=2) if num_added == max_num_add: break return contours
def get_contours(cnts, img): for c in cnts: try: # Approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) # Find the centroids of the contours in _pixel_space_. :) M = cv2.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) if utils.filter_point(cX,cY, xlower=700, xupper=1300, ylower=500, yupper=800): continue # Now fit an ellipse! ellipse = cv2.fitEllipse(c) cv2.ellipse(img, ellipse, (0,255,0), 2) img_copy = img.copy() cv2.imshow("Is this ellipse good? ESC to skip it, else return it.", img_copy) firstkey = cv2.waitKey(0) if firstkey not in utils.ESC_KEYS: return (cX, cY, peri, approx, ellipse) except: pass return None
def get_single_contour_center(image, contours, return_contour=False): """ Assuming there is _one_ contour to return, we find it (return as a list). """ for i,(cX,cY,approx,peri) in enumerate(contours): if utils.filter_point(cX,cY, xlower=700, xupper=1400, ylower=500, yupper=1000): continue cv2.circle(img=image, center=(cX,cY), radius=50, color=(0,0,255), thickness=1) cv2.circle(img=image, center=(cX,cY), radius=4, color=(0,0,255), thickness=-1) cv2.drawContours(image=image, contours=[approx], contourIdx=-1, color=(0,255,0), thickness=3) cv2.imshow("ESC if duplicate/undesired, other key to proceed", image) firstkey = cv2.waitKey(0) # Return outright if we're only going to return one contour. if firstkey not in utils.ESC_KEYS: if return_contour: return [cX,cY,approx,peri] else: return [cX,cY]
def calibrateImage(contours, img, arm1, outfile): """ Perform camera calibration using both images. This code saves the camera pixels (cX,cY) and the robot coordinates (the (pos,rot) for ONE arm) all in one pickle file. Then, not in this code, we run regression to get our desired mapping from pixel space to robot space. Whew. It's manual, but worth it. I put numbers to indicate how many we've saved. DO ONE SAVE PER CONTOUR so that I can get a correspondence with left and right images after arranging pixels in the correct ordering (though I don't think I have to do that). """ utils.move(arm1, HOME_POS, ROTATION, 'Fast') arm1.close_gripper() print("(after calling `home`) psm1 current position: {}".format( arm1.get_current_cartesian_position())) print("len(contours): {}".format(len(contours))) num_saved = 0 for i, (cX, cY, approx, peri) in enumerate(contours): if utils.filter_point(cX, cY, 500, 1500, 75, 1000): continue image = img.copy() # Deal with the image and get a visual. Keep clicking ESC key until we see a circle. cv2.circle(image, (cX, cY), 50, (0, 0, 255)) cv2.drawContours(image, [approx], -1, (0, 255, 0), 3) cv2.putText(img=image, text=str(num_saved), org=(cX, cY), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 0), thickness=2) cv2.imshow("Contour w/{} saved so far out of {}.".format(num_saved, i), image) key1 = cv2.waitKey(0) if key1 not in utils.ESC_KEYS: # We have a circle. Move arm to target. The rotation is off, but we command it to rotate. frame = arm1.get_current_cartesian_position() utils.move(arm=arm1, pos=frame.position[:3], rot=ROTATION, SPEED_CLASS='Slow') # Now the human re-positions it to the center. cv2.imshow( "Here's where we are after generic movement + rotation. Now correct it!", image) key2 = cv2.waitKey(0) # Get position and orientation of the arm, save, & reset. pos, rot = utils.lists_of_pos_rot_from_frame( arm1.get_current_cartesian_position()) a1 = (pos, rot, cX, cY) print("contour {}, a1={}".format(i, a1)) else: print("(not storing contour {} on the left)".format(i)) utils.move(arm1, HOME_POS, ROTATION, 'Fast') arm1.close_gripper() # Only store this contour if both keys were not escape keys. if key1 not in utils.ESC_KEYS: utils.storeData(outfile, a1) num_saved += 1 cv2.destroyAllWindows()
arm.close_gripper() print("current arm position: {}".format( arm.get_current_cartesian_position())) # I will be IGNORING the random forest here, because it doesn't really work. params = pickle.load( open('config/mapping_results/params_matrices_v' + VERSION + '.p', 'r')) # Use the d.left_image for calibration. cv2.imwrite("images/left_image.jpg", d.left_image) image_original = cv2.imread("images/left_image.jpg") num_added = 0 # Iterate through valid contours from the _left_ camera (we'll simulate right camera). for i, (cX, cY, approx, peri) in enumerate(d.left_contours): if utils.filter_point(cX, cY, 500, 1500, 50, 1000): continue if num_added == MAX_NUM_ADD: break image = image_original.copy() cv2.circle(img=image, center=(cX, cY), radius=50, color=(0, 0, 255), thickness=1) cv2.circle(img=image, center=(cX, cY), radius=4, color=(0, 0, 255), thickness=-1)
'config/mapping_results/random_forest_predictor_v' + IN_VERSION + '.p', 'r')) else: better_rf = None # Use the d.left_image for calibration. Originally I used a saved image, but it should # be determined here since the paper location and camera might adjust slightly. cv2.imwrite("images/left_image.jpg", d.left_image) image_original = cv2.imread("images/left_image.jpg") num_added = 0 # Iterate through valid contours from the _left_ camera (we'll simulate right camera). for i, (cX, cY, approx, peri) in enumerate(d.left_contours): if utils.filter_point(cX, cY, xlower=500, xupper=1500, ylower=50, yupper=1000): continue if num_added == MAX_NUM_ADD: break image = image_original.copy() cv2.circle(img=image, center=(cX, cY), radius=50, color=(0, 0, 255), thickness=1) cv2.circle(img=image, center=(cX, cY), radius=4,