def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) # Display the image #cv2.imshow("Image window", cv_image) frame_gray = cv2.cvtColor(cv_image, cv2.COLOR_RGB2GRAY) order = 10 kernel_size = 25 scale_factor = 100 mt = MarkerTracker(order, kernel_size, scale_factor) self.pose = mt.locate_marker(frame_gray) cv2.rectangle(cv_image, (self.pose.x + 100, self.pose.y + 100), (self.pose.x - 100, self.pose.y - 100), (0, 255, 0), 3) cv2.imshow("Box", cv_image) cv2.waitKey(3) print("{} {} {} {} {} {}".format("x-pose:", self.pose.x, "y-pose:", self.pose.y, "theta:", self.pose.theta)) height, width = cv_image.shape[:2] try: self.image_pub.publish( self.bridge.cv2_to_imgmsg(frame_gray, "8UC1")) self.x_pose_pub.publish(self.pose.x - (width / 2)) self.y_pose_pub.publish(self.pose.y - (height / 2)) self.theta_pose_pub.publish(self.pose.theta) except CvBridgeError as e: print(e)
class CameraDriver: def __init__(self, marker_order, marker_size, scaling_parameter): # Storage for image processing. self.current_frame = None self.processed_frame = None self.running = True # Initialize trackers. self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter) self.location = MarkerPose(None, None, None, None, None) def open_image_window(self): cv2.namedWindow('filterdemo', cv2.cv.CV_WINDOW_AUTOSIZE) def process_frame(self): self.processed_frame = self.current_frame frame_gray = self.current_frame self.processed_frame = cv2.cvtColor(self.current_frame, cv2.cv.CV_GRAY2BGR) # Previous marker location is unknown, search in the entire image. self.current_frame = self.tracker.locate_marker(frame_gray) self.location = self.tracker.pose def show_processed_frame(self): xm = self.location.x ym = self.location.y orientation = self.location.theta if self.location.quality < 0.9: cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1) else: cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3) xm2 = int(xm + 50 * math.cos(orientation)) ym2 = int(ym + 50 * math.sin(orientation)) cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2) cv2.imshow('filterdemo', self.processed_frame) def reset_location(self): # Reset all markers locations, forcing a full search on the next iteration. self.location = MarkerPose(None, None, None, None, None) def handle_keyboard_events(self): # Listen for keyboard events and take relevant actions. key = cv2.waitKey(100) # Discard higher order bit, http://permalink.gmane.org/gmane.comp.lib.opencv.devel/410 key = key & 0xff if key == 27: # Esc self.running = False if key == 114: # R print("Resetting") self.reset_location() if key == 115: # S # save image print("Saving image") filename = strftime("%Y-%m-%d %H-%M-%S") cv2.imwrite("output/%s.png" % filename, self.current_frame)
class TrackerInWindowMode: def __init__(self, order=7, defaultKernelSize=21): self.window_width = 100 self.window_height = 100 self.frameGray = np.zeros((self.window_height, self.window_width, 1), dtype=np.float32) self.originalImage = np.zeros((self.window_height, self.window_width, 3), dtype=np.float32) self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500) self.trackerIsInitialized = False self.subImagePosition = None self.reducedImage = None def crop_frame(self, frame, last_marker_location_x, last_marker_location_y): if not self.trackerIsInitialized: self.reducedImage = np.zeros((self.window_height, self.window_width, 3), frame.dtype) x_corner_pos = last_marker_location_x - self.window_width / 2 y_corner_pos = last_marker_location_y - self.window_height / 2 # Ensure that extracted window is inside the original image. if x_corner_pos < 1: x_corner_pos = 1 if y_corner_pos < 1: y_corner_pos = 1 if x_corner_pos > frame.shape[1] - self.window_width: x_corner_pos = frame.shape[1] - self.window_width if y_corner_pos > frame.shape[0] - self.window_height: y_corner_pos = frame.shape[0] - self.window_height try: self.subImagePosition = (x_corner_pos, y_corner_pos, self.window_width, self.window_height) self.reducedImage = frame[self.subImagePosition[1]:self.subImagePosition[1]+self.subImagePosition[3], self.subImagePosition[0]:self.subImagePosition[0]+self.subImagePosition[2], :] self.frameGray = cv2.cvtColor(self.reducedImage, cv2.cv.CV_RGB2GRAY) except: print("frame: ", frame.dtype) print("originalImage: ", self.originalImage.shape[0], self.originalImage.shape[1], self.originalImage) print("frameGray: ", self.frameGray.shape[0], self.frameGray.shape[1], self.frameGray.dtype) print "Unexpected error:", sys.exc_info()[0] pass def locate_marker(self): (xm, ym) = self.markerTracker.locate_marker(self.frameGray) red_color = (55, 55, 255) blue_color = (255, 0, 0) orientation = self.markerTracker.orientation cv2.circle(self.reducedImage, (xm, ym), 4, red_color, 2) xm2 = int(xm + 50*math.cos(orientation)) ym2 = int(ym + 50*math.sin(orientation)) cv2.line(self.reducedImage, (xm, ym), (xm2, ym2), blue_color, 2) xm = xm + self.subImagePosition[0] ym = ym + self.subImagePosition[1] return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order) def show_cropped_image(self): pass
def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) # Display the image # cv2.imshow("Image window", cv_image) frame_gray = cv2.cvtColor(cv_image, cv2.COLOR_RGB2GRAY) order = 9 kernel_size = 25 scale_factor = 100 mt = MarkerTracker(order, kernel_size, scale_factor) self.pose = mt.locate_marker(frame_gray) cv2.rectangle(frame_gray, (self.pose.x + 100, self.pose.y + 100), (self.pose.x - 100, self.pose.y - 100), (0, 255, 0), 3) # cv2.imshow("Box",cv_image) # cv2.waitKey(3) # print("{} {} {} {} {} {}".format("x-pose:", self.pose.x, "y-pose:", self.pose.y, "theta:", self.pose.theta)) height, width = cv_image.shape[:2] try: # position out currX = self.pose.x - (width / 2) currY = self.pose.y - (height / 2) msg = MarkerLocation(x=currX, y=currY, theta=self.pose.theta, quality=self.pose.quality) self.marker_pose_pub.publish(msg) #camera feed with marker detection self.image_pub.publish( self.bridge.cv2_to_imgmsg(frame_gray, '8UC1')) except CvBridgeError as e: print(e)
# main function if __name__ == "__main__": print "Running.." print 'Loading image..' img = cv2.imread('marker_image.jpg', 0) ''' plt.imshow(img, cmap='gray', interpolation='bicubic') #plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis #plt.show() #print "image plotted.." ''' # Number of n-fold edges: order = 3 kernel_size = 25 scale_factor = 100 #print 'Finding marker..' mt = MarkerTracker(order, kernel_size, scale_factor) while True: mt.locate_marker(img) mt.determine_marker_orientation(img) mt.determine_marker_quality(img) #print 'Marker found..' #print 'Plotting marker..' #img2 = mt.extract_window_around_maker_location(img) #plt.imshow(img2, cmap='gray', interpolation='bicubic') #plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis #plt.show()
class TrackerInWindowMode: def __init__(self, order=7, defaultKernelSize=21): self.window_width = 100 self.window_height = 100 self.frameGray = np.zeros((self.window_height, self.window_width, 1), dtype=np.float32) self.originalImage = np.zeros( (self.window_height, self.window_width, 3), dtype=np.float32) self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500) self.trackerIsInitialized = False self.subImagePosition = None self.reducedImage = None def crop_frame(self, frame, last_marker_location_x, last_marker_location_y): if not self.trackerIsInitialized: self.reducedImage = np.zeros( (self.window_height, self.window_width, 3), frame.dtype) x_corner_pos = last_marker_location_x - self.window_width / 2 y_corner_pos = last_marker_location_y - self.window_height / 2 # Ensure that extracted window is inside the original image. if x_corner_pos < 1: x_corner_pos = 1 if y_corner_pos < 1: y_corner_pos = 1 if x_corner_pos > frame.shape[1] - self.window_width: x_corner_pos = frame.shape[1] - self.window_width if y_corner_pos > frame.shape[0] - self.window_height: y_corner_pos = frame.shape[0] - self.window_height try: self.subImagePosition = (x_corner_pos, y_corner_pos, self.window_width, self.window_height) self.reducedImage = frame[ self.subImagePosition[1]:self.subImagePosition[1] + self.subImagePosition[3], self.subImagePosition[0]:self.subImagePosition[0] + self.subImagePosition[2], :] self.frameGray = cv2.cvtColor(self.reducedImage, cv2.cv.CV_RGB2GRAY) except: print("frame: ", frame.dtype) print("originalImage: ", self.originalImage.shape[0], self.originalImage.shape[1], self.originalImage) print("frameGray: ", self.frameGray.shape[0], self.frameGray.shape[1], self.frameGray.dtype) print "Unexpected error:", sys.exc_info()[0] pass def locate_marker(self): (xm, ym) = self.markerTracker.locate_marker(self.frameGray) red_color = (55, 55, 255) blue_color = (255, 0, 0) orientation = self.markerTracker.orientation cv2.circle(self.reducedImage, (xm, ym), 4, red_color, 2) xm2 = int(xm + 50 * math.cos(orientation)) ym2 = int(ym + 50 * math.sin(orientation)) cv2.line(self.reducedImage, (xm, ym), (xm2, ym2), blue_color, 2) xm = xm + self.subImagePosition[0] ym = ym + self.subImagePosition[1] return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order) def show_cropped_image(self): pass
class CameraDriver: def __init__(self, marker_order, marker_size, scaling_parameter): # Storage for image processing. self.current_frame = None self.processed_frame = None self.running = True self.busy = False # Initialize trackers. self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter) self.location = MarkerPose(None, None, None, None, None) def process_frame(self): if self.busy == False: self.busy = True self.processed_frame = self.current_frame frame_gray = self.current_frame if LooseVersion(cv2.__version__).version[0] == 2: self.processed_frame = cv2.cvtColor(self.current_frame, cv2.cv.CV_GRAY2BGR) else: self.processed_frame = cv2.cvtColor(self.current_frame, cv2.COLOR_GRAY2BGR) # Previous marker location is unknown, search in the entire image. #self.current_frame = self.tracker.locate_marker(frame_gray) current_pose = self.tracker.locate_marker(frame_gray) self.location = self.tracker.pose else: print 'skipping' def show_processed_frame(self): xm = self.location.x ym = self.location.y orientation = self.location.theta if self.location.quality < 0.9: cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1) else: cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3) xm2 = int(xm + 50 * math.cos(orientation)) ym2 = int(ym + 50 * math.sin(orientation)) cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2) cv2.imshow('filterdemo', self.processed_frame) key = cv2.waitKey(1) # Discard higher order bit, http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html key = key & 0xff if key == 27: # Esc self.running = False if key == 114: # R print("Resetting") self.reset_location() if key == 115: # S # save image print("Saving image") filename = strftime("%Y-%m-%d %H-%M-%S") cv2.imwrite("markerlocator_image_%s.png" % filename, self.current_frame) self.busy = False def reset_location(self): # Reset all markers locations, forcing a full search on the next iteration. self.location = MarkerPose(None, None, None, None, None)