def locate_marker(self, frame): self.frame_real = frame.copy() self.frame_imag = frame.copy() # Calculate convolution and determine response strength. self.frame_real = cv2.filter2D(self.frame_real, cv2.CV_32F, self.mat_real) self.frame_imag = cv2.filter2D(self.frame_imag, cv2.CV_32F, self.mat_imag) frame_real_squared = cv2.multiply(self.frame_real, self.frame_real, dtype=cv2.CV_32F) frame_imag_squared = cv2.multiply(self.frame_imag, self.frame_imag, dtype=cv2.CV_32F) frame_sum_squared = cv2.add(frame_real_squared, frame_imag_squared, dtype=cv2.CV_32F) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(frame_sum_squared) self.last_marker_location = max_loc self.determine_marker_orientation(frame) self.determine_marker_quality(frame) self.pose = MarkerPose(max_loc[0], max_loc[1], self.orientation, self.quality, self.order) return self.pose
def __init__(self, marker_orders=[6], default_kernel_size=21, scaling_parameter=2500, downscale_factor=1): # Initialize camera driver. # Open output window. if show_image is True: cv2.namedWindow('filterdemo', cv2.WINDOW_AUTOSIZE) # Select the camera where the images should be grabbed from. set_camera_focus() self.camera = cv2.VideoCapture(0) self.set_camera_resolution() # Storage for image processing. self.current_frame = None self.processed_frame = None self.running = True self.downscale_factor = downscale_factor # Storage for trackers. self.trackers = [] self.old_locations = [] # Initialize trackers. for marker_order in marker_orders: temp = MarkerTracker(marker_order, default_kernel_size, scaling_parameter) temp.track_marker_with_missing_black_leg = False self.trackers.append(temp) self.old_locations.append(MarkerPose(None, None, None, None, None))
def __init__(self, markerOrders=[7, 8], defaultKernelSize=21, scalingParameter=2500): # Initialize camera driver. # Open output window. cv.NamedWindow('filterdemo', cv.CV_WINDOW_AUTOSIZE) self.setFocus() # Select the camera where the images should be grabbed from. self.camera = cv.CaptureFromCAM(0) #self.setResolution() # Storage for image processing. self.currentFrame = None self.processedFrame = None self.running = True # Storage for trackers. self.trackers = [] self.oldLocations = [] # Initialize trackers. for markerOrder in markerOrders: temp = ImageAnalyzer(downscaleFactor=1) temp.addMarkerToTrack(markerOrder, defaultKernelSize, scalingParameter) self.trackers.append(temp) self.oldLocations.append(MarkerPose(None, None, None, None)) self.cnt = 0 self.defaultOrientation = 0
def processFrame(self): # Locate all markers in image. for k in range(len(self.trackers)): # Previous marker location is unknown, search in the entire image. self.processedFrame = self.trackers[k].analyzeImage(self.currentFrame) markerX = self.trackers[k].markerLocationsX[0] markerY = self.trackers[k].markerLocationsY[0] orientation = self.trackers[k].markerTrackers[0].orientation quality = self.trackers[k].markerTrackers[0].quality self.oldLocations[k] = MarkerPose(markerX, markerY, orientation, quality)
def __init__(self, marker_order, marker_size, scaling_parameter): # Storage for image processing. self.current_frame = None self.processed_frame = None self.running = True # Initialize trackers. self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter) self.location = MarkerPose(None, None, None, None, None)
def main(): pointLocationsInImage = [[197, 136], [168, 403], [449, 169], [420, 347]] realCoordinates = [[0, 0], [0, 4], [6, 0], [6, 4]] perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates) print perspectiveConverter.convert([294, 269]) pointLocationsInImage = [[0, 0], [0, 4], [6, 0], [6, 4]] realCoordinates = [[0, 0], [0, 4], [6, 0], [6, 4]] perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates) pose = perspectiveConverter.convertPose(MarkerPose(50, 120, 10, 0.2)) print("%8.3f %8.3f %8.3f %8.3f" % (pose.x, pose.y, pose.theta, pose.quality))
def processFrame(self): # Locate all markers in image. for k in range(len(self.trackers)): if(self.oldLocations[k].x is None): # Previous marker location is unknown, search in the entire image. self.processedFrame = self.trackers[k].analyzeImage(self.currentFrame) markerX = self.trackers[k].markerLocationsX[0] markerY = self.trackers[k].markerLocationsY[0] order = self.trackers[k].markerTrackers[0].order self.oldLocations[k] = MarkerPose(markerX, markerY, self.defaultOrientation, 0, order) else: # Search for marker around the old location. self.windowedTrackers[k].cropFrame(self.currentFrame, self.oldLocations[k].x, self.oldLocations[k].y) self.oldLocations[k] = self.windowedTrackers[k].locateMarker() self.windowedTrackers[k].showCroppedImage()
def convertPose(self, pose): # Idea is to take the input pose and convert it to two points, the # location and a director. These two points are then perspective # corrected and the transformed orientation can then be determined # from the two points. location = [pose.x, pose.y] orientation = pose.theta dist = 10 dx = dist * math.cos(orientation) dy = dist * math.sin(orientation) pointTwo = [location[0] + dx, location[1] + dy] loc1 = self.convert(location) loc2 = self.convert(pointTwo) dx = loc2[0] - loc1[0] dy = loc2[1] - loc1[1] orient = math.atan2(dy, dx) return MarkerPose(loc1[0], loc1[1], orient, pose.quality, pose.order)
def locate_marker(self): (xm, ym) = self.markerTracker.locate_marker(self.frameGray) red_color = (55, 55, 255) blue_color = (255, 0, 0) orientation = self.markerTracker.orientation cv2.circle(self.reducedImage, (xm, ym), 4, red_color, 2) xm2 = int(xm + 50 * math.cos(orientation)) ym2 = int(ym + 50 * math.sin(orientation)) cv2.line(self.reducedImage, (xm, ym), (xm2, ym2), blue_color, 2) xm = xm + self.subImagePosition[0] ym = ym + self.subImagePosition[1] return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order)
def __init__(self, markerOrders=[7, 8], defaultKernelSize=21, scalingParameter=2500, cameraDevice=0): # Initialize camera driver. # Open output window. # cv.NamedWindow('filterdemo', cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow('filterdemo', cv.CV_WINDOW_NORMAL) # cv.NamedWindow('filterdemo', cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow('temp_kernel', cv.CV_WINDOW_NORMAL) cv.NamedWindow('small_image', cv.CV_WINDOW_NORMAL) self.cameraDevice = cameraDevice # If an interface has been given to the constructor, it's a camdevice and we need to set some properties. If it's not, it means it's a file. if (isinstance(cameraDevice, numbers.Integral)): self.setFocus() # Select the camera where the images should be grabbed from. self.camera = cv.CaptureFromCAM(cameraDevice) else: self.camera = cv.CaptureFromFile(cameraDevice) self.setResolution() # Storage for image processing. self.currentFrame = None self.processedFrame = None self.running = True # Storage for trackers. self.trackers = [] self.windowedTrackers = [] self.oldLocations = [] # Initialize trackers. for markerOrder in markerOrders: temp = ImageAnalyzer(downscaleFactor=1) temp.addMarkerToTrack(markerOrder, defaultKernelSize, scalingParameter) self.trackers.append(temp) self.windowedTrackers.append( TrackerInWindowMode(markerOrder, defaultKernelSize)) self.oldLocations.append(MarkerPose(None, None, None, None)) self.cnt = 0 self.defaultOrientation = 0
def __init__(self, marker_orders=[4], default_kernel_size=14, scaling_parameter=2500, downscale_factor=1): # Initialize camera driver. # Open output window. if show_image is True: cv2.namedWindow('filterdemo', cv2.WINDOW_AUTOSIZE) # Storage for image processing. self.current_frame = None self.processed_frame = None self.running = True self.downscale_factor = downscale_factor self.cv_image = None # Storage for trackers. self.trackers = [] self.old_locations = [] self.bridge = CvBridge() self.image_sub = rospy.Subscriber(markerlocator_sub_topic, Image, self.callback) self.heading_sub = rospy.Subscriber( "/mavros/global_position/compass_hdg", Float64, self.callback_heading) self.heading = 0 # self.image_sub = rospy.Subscriber("/markerlocator/image_raw", Image, # self.callback) # cam params self.fov = 1.3962634 self.pix_w = 800 # Initialize trackers. for marker_order in marker_orders: temp = MarkerTracker(marker_order, default_kernel_size, scaling_parameter) self.trackers.append(temp) self.old_locations.append(MarkerPose(None, None, None, None, None))
def locateMarker(self): (xm, ym) = self.markerTracker.locateMarker(self.frameGray) #xm = 50 #ym = 50 #cv.Line(self.reducedImage, (0, ym), (self.originalImage.width, ym), (0, 0, 255)) # B, G, R #cv.Line(self.reducedImage, (xm, 0), (xm, self.originalImage.height), (0, 0, 255)) redColor = (55, 55, 255) blueColor = (255, 0, 0) orientation = self.markerTracker.orientation cv.Circle(self.reducedImage, (xm, ym), 4, redColor, 2) xm2 = int(xm + 50 * math.cos(orientation)) ym2 = int(ym + 50 * math.sin(orientation)) cv.Line(self.reducedImage, (xm, ym), (xm2, ym2), blueColor, 2) xm = xm + self.subImagePosition[0] ym = ym + self.subImagePosition[1] #print((xm, ym)) #return [xm, ym, orientation, self.markerTracker.quality] return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order)
def resetAllLocations(self): # Reset all markers locations, forcing a full search on the next iteration. for k in range(len(self.trackers)): self.oldLocations[k] = MarkerPose(None, None, None, None)
def reset_location(self): # Reset all markers locations, forcing a full search on the next iteration. self.location = MarkerPose(None, None, None, None, None)