Beispiel #1
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        # Display the image
        #cv2.imshow("Image window", cv_image)

        frame_gray = cv2.cvtColor(cv_image, cv2.COLOR_RGB2GRAY)
        order = 10
        kernel_size = 25
        scale_factor = 100
        mt = MarkerTracker(order, kernel_size, scale_factor)
        self.pose = mt.locate_marker(frame_gray)
        cv2.rectangle(cv_image, (self.pose.x + 100, self.pose.y + 100),
                      (self.pose.x - 100, self.pose.y - 100), (0, 255, 0), 3)
        cv2.imshow("Box", cv_image)
        cv2.waitKey(3)
        print("{} {} {} {} {} {}".format("x-pose:", self.pose.x, "y-pose:",
                                         self.pose.y, "theta:",
                                         self.pose.theta))
        height, width = cv_image.shape[:2]
        try:
            self.image_pub.publish(
                self.bridge.cv2_to_imgmsg(frame_gray, "8UC1"))
            self.x_pose_pub.publish(self.pose.x - (width / 2))
            self.y_pose_pub.publish(self.pose.y - (height / 2))
            self.theta_pose_pub.publish(self.pose.theta)
        except CvBridgeError as e:
            print(e)
    def __init__(self, marker_orders=[6], default_kernel_size=21, scaling_parameter=2500, downscale_factor=1):
        # Initialize camera driver.
        # Open output window.
        if show_image is True:
            cv2.namedWindow('filterdemo', cv2.WINDOW_AUTOSIZE)

        # Select the camera where the images should be grabbed from.
        set_camera_focus()
        self.camera = cv2.VideoCapture(0)
        self.set_camera_resolution()

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True
        self.downscale_factor = downscale_factor

        # Storage for trackers.
        self.trackers = []
        self.old_locations = []

        # Initialize trackers.
        for marker_order in marker_orders:
            temp = MarkerTracker(marker_order, default_kernel_size, scaling_parameter)
            temp.track_marker_with_missing_black_leg = False
            self.trackers.append(temp)
            self.old_locations.append(MarkerPose(None, None, None, None, None))
    def __init__(self,
                 marker_orders=[6],
                 default_kernel_size=21,
                 scaling_parameter=2500,
                 downscale_factor=1):
        # Initialize camera driver.
        # Open output window.
        if show_image is True:
            cv2.namedWindow('filterdemo', cv2.WINDOW_AUTOSIZE)

        # Select the camera where the images should be grabbed from.
        set_camera_focus()
        self.camera = cv2.VideoCapture(0)
        self.set_camera_resolution()

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True
        self.downscale_factor = downscale_factor

        # Storage for trackers.
        self.trackers = []
        self.old_locations = []

        # Initialize trackers.
        for marker_order in marker_orders:
            temp = MarkerTracker(marker_order, default_kernel_size,
                                 scaling_parameter)
            temp.track_marker_with_missing_black_leg = False
            self.trackers.append(temp)
            self.old_locations.append(MarkerPose(None, None, None, None, None))
 def __init__(self, order = 7, defaultKernelSize = 21):
     #cv.NamedWindow('reducedWindow', cv.CV_WINDOW_AUTOSIZE)
     self.windowWidth = 100
     self.windowHeight = 100
     self.frameGray = cv.CreateImage ((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 1)
     self.originalImage = cv.CreateImage((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 3)
     self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
     self.trackerIsInitialized = False
     self.subImagePosition = None
     pass
Beispiel #5
0
 def __init__(self, order=7, defaultKernelSize=21):
     self.window_width = 100
     self.window_height = 100
     self.frameGray = np.zeros((self.window_height, self.window_width, 1),
                               dtype=np.float32)
     self.originalImage = np.zeros(
         (self.window_height, self.window_width, 3), dtype=np.float32)
     self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
     self.trackerIsInitialized = False
     self.subImagePosition = None
     self.reducedImage = None
    def __init__(self, marker_order, marker_size, scaling_parameter):

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True

        # Initialize trackers.
        self.tracker = MarkerTracker(marker_order, marker_size,
                                     scaling_parameter)
        self.location = MarkerPose(None, None, None, None, None)
class CameraDriver:
    def __init__(self, marker_order, marker_size, scaling_parameter):

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True

        # Initialize trackers.
        self.tracker = MarkerTracker(marker_order, marker_size,
                                     scaling_parameter)
        self.location = MarkerPose(None, None, None, None, None)

    def open_image_window(self):
        cv2.namedWindow('filterdemo', cv2.cv.CV_WINDOW_AUTOSIZE)

    def process_frame(self):
        self.processed_frame = self.current_frame
        frame_gray = self.current_frame
        self.processed_frame = cv2.cvtColor(self.current_frame,
                                            cv2.cv.CV_GRAY2BGR)

        # Previous marker location is unknown, search in the entire image.
        self.current_frame = self.tracker.locate_marker(frame_gray)
        self.location = self.tracker.pose

    def show_processed_frame(self):
        xm = self.location.x
        ym = self.location.y
        orientation = self.location.theta
        if self.location.quality < 0.9:
            cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1)
        else:
            cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3)
        xm2 = int(xm + 50 * math.cos(orientation))
        ym2 = int(ym + 50 * math.sin(orientation))
        cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2)
        cv2.imshow('filterdemo', self.processed_frame)

    def reset_location(self):
        # Reset all markers locations, forcing a full search on the next iteration.
        self.location = MarkerPose(None, None, None, None, None)

    def handle_keyboard_events(self):
        # Listen for keyboard events and take relevant actions.
        key = cv2.waitKey(100)
        # Discard higher order bit, http://permalink.gmane.org/gmane.comp.lib.opencv.devel/410
        key = key & 0xff
        if key == 27:  # Esc
            self.running = False
        if key == 114:  # R
            print("Resetting")
            self.reset_location()
        if key == 115:  # S
            # save image
            print("Saving image")
            filename = strftime("%Y-%m-%d %H-%M-%S")
            cv2.imwrite("output/%s.png" % filename, self.current_frame)
class TrackerInWindowMode:
    def __init__(self, order=7, defaultKernelSize=21):
        self.window_width = 100
        self.window_height = 100
        self.frameGray = np.zeros((self.window_height, self.window_width, 1), dtype=np.float32)
        self.originalImage = np.zeros((self.window_height, self.window_width, 3), dtype=np.float32)
        self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
        self.trackerIsInitialized = False
        self.subImagePosition = None
        self.reducedImage = None

    def crop_frame(self, frame, last_marker_location_x, last_marker_location_y):
        if not self.trackerIsInitialized:
            self.reducedImage = np.zeros((self.window_height, self.window_width, 3), frame.dtype)
        x_corner_pos = last_marker_location_x - self.window_width / 2
        y_corner_pos = last_marker_location_y - self.window_height / 2
        # Ensure that extracted window is inside the original image.
        if x_corner_pos < 1:
            x_corner_pos = 1
        if y_corner_pos < 1:
            y_corner_pos = 1
        if x_corner_pos > frame.shape[1] - self.window_width:
            x_corner_pos = frame.shape[1] - self.window_width
        if y_corner_pos > frame.shape[0] - self.window_height:
            y_corner_pos = frame.shape[0] - self.window_height
        try:
            self.subImagePosition = (x_corner_pos, y_corner_pos, self.window_width, self.window_height)
            self.reducedImage = frame[self.subImagePosition[1]:self.subImagePosition[1]+self.subImagePosition[3],
                                      self.subImagePosition[0]:self.subImagePosition[0]+self.subImagePosition[2],
                                      :]
            self.frameGray = cv2.cvtColor(self.reducedImage, cv2.cv.CV_RGB2GRAY)
        except:
            print("frame: ", frame.dtype)
            print("originalImage: ", self.originalImage.shape[0], self.originalImage.shape[1], self.originalImage)
            print("frameGray: ", self.frameGray.shape[0], self.frameGray.shape[1], self.frameGray.dtype)
            print "Unexpected error:", sys.exc_info()[0]
            pass
        
    def locate_marker(self):
        (xm, ym) = self.markerTracker.locate_marker(self.frameGray)

        red_color = (55, 55, 255)
        blue_color = (255, 0, 0)

        orientation = self.markerTracker.orientation
        cv2.circle(self.reducedImage, (xm, ym), 4, red_color, 2)
        xm2 = int(xm + 50*math.cos(orientation))
        ym2 = int(ym + 50*math.sin(orientation))
        cv2.line(self.reducedImage, (xm, ym), (xm2, ym2), blue_color, 2)

        xm = xm + self.subImagePosition[0]
        ym = ym + self.subImagePosition[1]

        return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order)
        
    def show_cropped_image(self):
        pass
 def __init__(self, order=7, defaultKernelSize=21):
     self.window_width = 100
     self.window_height = 100
     self.frameGray = np.zeros((self.window_height, self.window_width, 1), dtype=np.float32)
     self.originalImage = np.zeros((self.window_height, self.window_width, 3), dtype=np.float32)
     self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
     self.trackerIsInitialized = False
     self.subImagePosition = None
     self.reducedImage = None
class CameraDriver:
	def __init__(self, marker_order, marker_size, scaling_parameter):

		# Storage for image processing.
		self.current_frame = None
		self.processed_frame = None
		self.running = True

		# Initialize trackers.
		self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter)
		self.location = MarkerPose(None, None, None, None, None)

	def open_image_window(self):
		cv2.namedWindow('filterdemo', cv2.cv.CV_WINDOW_AUTOSIZE)

	def process_frame(self):
		self.processed_frame = self.current_frame
		frame_gray = self.current_frame
		self.processed_frame = cv2.cvtColor(self.current_frame, cv2.cv.CV_GRAY2BGR)
		
		# Previous marker location is unknown, search in the entire image.
		self.current_frame = self.tracker.locate_marker(frame_gray)
		self.location = self.tracker.pose

	def show_processed_frame(self):
		xm = self.location.x
		ym = self.location.y
		orientation = self.location.theta
		if self.location.quality < 0.9:
			cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1)
		else:
			cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3)
		xm2 = int(xm + 50 * math.cos(orientation))
		ym2 = int(ym + 50 * math.sin(orientation))
		cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2)
		cv2.imshow('filterdemo', self.processed_frame)

	def reset_location(self):
		# Reset all markers locations, forcing a full search on the next iteration.
		self.location = MarkerPose(None, None, None, None, None)

	def handle_keyboard_events(self):
		# Listen for keyboard events and take relevant actions.
		key = cv2.waitKey(100)
		# Discard higher order bit, http://permalink.gmane.org/gmane.comp.lib.opencv.devel/410
		key = key & 0xff
		if key == 27:  # Esc
			self.running = False
		if key == 114:  # R
			print("Resetting")
			self.reset_location()
		if key == 115:  # S
			# save image
			print("Saving image")
			filename = strftime("%Y-%m-%d %H-%M-%S")
			cv2.imwrite("output/%s.png" % filename, self.current_frame)
 def __init__(self, order = 7, defaultKernelSize = 21):
     #cv.NamedWindow('reducedWindow', cv.CV_WINDOW_AUTOSIZE)
     self.windowWidth = 100
     self.windowHeight = 100
     self.frameGray = cv.CreateImage ((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 1)
     self.originalImage = cv.CreateImage((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 3)
     self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
     self.trackerIsInitialized = False
     self.subImagePosition = None
     pass
 def __init__(self, order = 7, defaultKernelSize = 21):
     #cv.NamedWindow('reducedWindow', cv.CV_WINDOW_AUTOSIZE)
     self.windowWidth = 100
     self.windowHeight = 100
     self.frameGray = np.zeros((self.windowHeight, self.windowWidth,1), dtype=np.float32)
     self.originalImage = np.zeros((self.windowHeight, self.windowWidth,3), dtype=np.float32)
     self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
     self.trackerIsInitialized = False
     self.subImagePosition = None
     pass
	def __init__(self, marker_order, marker_size, scaling_parameter):

		# Storage for image processing.
		self.current_frame = None
		self.processed_frame = None
		self.running = True

		# Initialize trackers.
		self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter)
		self.location = MarkerPose(None, None, None, None, None)
Beispiel #14
0
    def callback(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        # Display the image
        # cv2.imshow("Image window", cv_image)

        frame_gray = cv2.cvtColor(cv_image, cv2.COLOR_RGB2GRAY)
        order = 9
        kernel_size = 25
        scale_factor = 100
        mt = MarkerTracker(order, kernel_size, scale_factor)
        self.pose = mt.locate_marker(frame_gray)
        cv2.rectangle(frame_gray, (self.pose.x + 100, self.pose.y + 100),
                      (self.pose.x - 100, self.pose.y - 100), (0, 255, 0), 3)

        # cv2.imshow("Box",cv_image)
        # cv2.waitKey(3)
        # print("{} {} {} {} {} {}".format("x-pose:", self.pose.x, "y-pose:", self.pose.y, "theta:", self.pose.theta))
        height, width = cv_image.shape[:2]
        try:
            # position out
            currX = self.pose.x - (width / 2)
            currY = self.pose.y - (height / 2)
            msg = MarkerLocation(x=currX,
                                 y=currY,
                                 theta=self.pose.theta,
                                 quality=self.pose.quality)
            self.marker_pose_pub.publish(msg)

            #camera feed with marker detection
            self.image_pub.publish(
                self.bridge.cv2_to_imgmsg(frame_gray, '8UC1'))
        except CvBridgeError as e:
            print(e)
Beispiel #15
0
    def __init__(self,
                 marker_orders=[4],
                 default_kernel_size=14,
                 scaling_parameter=2500,
                 downscale_factor=1):
        # Initialize camera driver.
        # Open output window.
        if show_image is True:
            cv2.namedWindow('filterdemo', cv2.WINDOW_AUTOSIZE)

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True
        self.downscale_factor = downscale_factor
        self.cv_image = None

        # Storage for trackers.
        self.trackers = []
        self.old_locations = []

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber(markerlocator_sub_topic, Image,
                                          self.callback)
        self.heading_sub = rospy.Subscriber(
            "/mavros/global_position/compass_hdg", Float64,
            self.callback_heading)
        self.heading = 0
        # self.image_sub = rospy.Subscriber("/markerlocator/image_raw", Image,
        #                                   self.callback)

        # cam params
        self.fov = 1.3962634
        self.pix_w = 800

        # Initialize trackers.
        for marker_order in marker_orders:
            temp = MarkerTracker(marker_order, default_kernel_size,
                                 scaling_parameter)
            self.trackers.append(temp)
            self.old_locations.append(MarkerPose(None, None, None, None, None))
class TrackerInWindowMode:
    def __init__(self, order=7, defaultKernelSize=21):
        #cv.NamedWindow('reducedWindow', cv.CV_WINDOW_AUTOSIZE)
        self.windowWidth = 100
        self.windowHeight = 100
        self.frameGray = cv.CreateImage((self.windowWidth, self.windowHeight),
                                        cv.IPL_DEPTH_32F, 1)
        self.originalImage = cv.CreateImage(
            (self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 3)
        self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
        self.trackerIsInitialized = False
        self.subImagePosition = None
        pass

    def cropFrame(self, frame, lastMarkerLocationX, lastMarkerLocationY):
        if (not self.trackerIsInitialized):
            self.markerTracker.allocateSpaceGivenFirstFrame(self.originalImage)
            self.reducedImage = cv.CreateImage(
                (self.windowWidth, self.windowHeight), frame.depth, 3)
        xCornerPos = lastMarkerLocationX - self.windowWidth / 2
        yCornerPos = lastMarkerLocationY - self.windowHeight / 2
        # Ensure that extracted window is inside the original image.
        if (xCornerPos < 1):
            xCornerPos = 1
        if (yCornerPos < 1):
            yCornerPos = 1
        if (xCornerPos > frame.width - self.windowWidth):
            xCornerPos = frame.width - self.windowWidth
        if (yCornerPos > frame.height - self.windowHeight):
            yCornerPos = frame.height - self.windowHeight
        try:
            self.subImagePosition = (xCornerPos, yCornerPos, self.windowWidth,
                                     self.windowHeight)
            self.reducedImage = cv.GetSubRect(frame, self.subImagePosition)
            cv.ConvertScale(self.reducedImage, self.originalImage)
            cv.CvtColor(self.originalImage, self.frameGray, cv.CV_RGB2GRAY)
        except:
            print("frame: ", frame.depth)
            print("originalImage: ", self.originalImage.height,
                  self.originalImage.width, self.originalImage)
            print("frameGray: ", self.frameGray.height, self.frameGray.width,
                  self.frameGray.depth)
            print "Unexpected error:", sys.exc_info()[0]
            #quit(0)
            pass

    def locateMarker(self):
        (xm, ym) = self.markerTracker.locateMarker(self.frameGray)
        #xm = 50
        #ym = 50
        #cv.Line(self.reducedImage, (0, ym), (self.originalImage.width, ym), (0, 0, 255)) # B, G, R
        #cv.Line(self.reducedImage, (xm, 0), (xm, self.originalImage.height), (0, 0, 255))

        redColor = (55, 55, 255)
        blueColor = (255, 0, 0)

        orientation = self.markerTracker.orientation
        cv.Circle(self.reducedImage, (xm, ym), 4, redColor, 2)
        xm2 = int(xm + 50 * math.cos(orientation))
        ym2 = int(ym + 50 * math.sin(orientation))
        cv.Line(self.reducedImage, (xm, ym), (xm2, ym2), blueColor, 2)

        xm = xm + self.subImagePosition[0]
        ym = ym + self.subImagePosition[1]
        #print((xm, ym))

        #return [xm, ym, orientation, self.markerTracker.quality]
        return MarkerPose(xm, ym, orientation, self.markerTracker.quality,
                          self.markerTracker.order)

    def showCroppedImage(self):
        pass
Beispiel #17
0
class CameraDriver:
    def __init__(self, marker_order, marker_size, scaling_parameter):

        # Storage for image processing.
        self.current_frame = None
        self.processed_frame = None
        self.running = True
        self.busy = False

        # Initialize trackers.
        self.tracker = MarkerTracker(marker_order, marker_size,
                                     scaling_parameter)
        self.location = MarkerPose(None, None, None, None, None)

    def process_frame(self):
        if self.busy == False:
            self.busy = True
            self.processed_frame = self.current_frame
            frame_gray = self.current_frame
            if LooseVersion(cv2.__version__).version[0] == 2:
                self.processed_frame = cv2.cvtColor(self.current_frame,
                                                    cv2.cv.CV_GRAY2BGR)
            else:
                self.processed_frame = cv2.cvtColor(self.current_frame,
                                                    cv2.COLOR_GRAY2BGR)

            # Previous marker location is unknown, search in the entire image.
            #self.current_frame = self.tracker.locate_marker(frame_gray)
            current_pose = self.tracker.locate_marker(frame_gray)
            self.location = self.tracker.pose
        else:
            print 'skipping'

    def show_processed_frame(self):
        xm = self.location.x
        ym = self.location.y
        orientation = self.location.theta
        if self.location.quality < 0.9:
            cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1)
        else:
            cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3)
        xm2 = int(xm + 50 * math.cos(orientation))
        ym2 = int(ym + 50 * math.sin(orientation))
        cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2)

        cv2.imshow('filterdemo', self.processed_frame)
        key = cv2.waitKey(1)

        # Discard higher order bit, http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html
        key = key & 0xff
        if key == 27:  # Esc
            self.running = False
        if key == 114:  # R
            print("Resetting")
            self.reset_location()
        if key == 115:  # S
            # save image
            print("Saving image")
            filename = strftime("%Y-%m-%d %H-%M-%S")
            cv2.imwrite("markerlocator_image_%s.png" % filename,
                        self.current_frame)
        self.busy = False

    def reset_location(self):
        # Reset all markers locations, forcing a full search on the next iteration.
        self.location = MarkerPose(None, None, None, None, None)
class CameraDriver:
	def __init__(self, marker_order, marker_size, scaling_parameter):

		# Storage for image processing.
		self.current_frame = None
		self.processed_frame = None
		self.running = True
		self.busy = False

		# Initialize trackers.
		self.tracker = MarkerTracker(marker_order, marker_size, scaling_parameter)
		self.location = MarkerPose(None, None, None, None, None)

	def process_frame(self):
		if self.busy == False:
			self.busy = True
			self.processed_frame = self.current_frame
			frame_gray = self.current_frame
			if LooseVersion(cv2.__version__).version[0] == 2:
				self.processed_frame = cv2.cvtColor(self.current_frame, cv2.cv.CV_GRAY2BGR)
			else:
				self.processed_frame = cv2.cvtColor(self.current_frame, cv2.COLOR_GRAY2BGR)
	
			# Previous marker location is unknown, search in the entire image.
			#self.current_frame = self.tracker.locate_marker(frame_gray)
			current_pose = self.tracker.locate_marker(frame_gray)
			self.location = self.tracker.pose
		else:
			print 'skipping'

	def show_processed_frame(self):
		xm = self.location.x
		ym = self.location.y
		orientation = self.location.theta
		if self.location.quality < 0.9:
			cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 1)
		else:
			cv2.circle(self.processed_frame, (xm, ym), 4, (55, 55, 255), 3)
		xm2 = int(xm + 50 * math.cos(orientation))
		ym2 = int(ym + 50 * math.sin(orientation))
		cv2.line(self.processed_frame, (xm, ym), (xm2, ym2), (255, 0, 0), 2)

		cv2.imshow('filterdemo', self.processed_frame)
		key = cv2.waitKey(1)

		# Discard higher order bit, http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html
		key = key & 0xff
		if key == 27:  # Esc
			self.running = False
		if key == 114:  # R
			print("Resetting")
			self.reset_location()
		if key == 115:  # S
			# save image
			print("Saving image")
			filename = strftime("%Y-%m-%d %H-%M-%S")
			cv2.imwrite("markerlocator_image_%s.png" % filename, self.current_frame)
		self.busy = False

	def reset_location(self):
		# Reset all markers locations, forcing a full search on the next iteration.
		self.location = MarkerPose(None, None, None, None, None)
Beispiel #19
0
from MarkerTracker import MarkerTracker
import numpy as np
import cv2
import cv

from time import sleep

kernelSize = 31
order=5

markerTracker = MarkerTracker(5, kernelSize, 1)

(kernel_order_n_Real, kernel_order_n_Imag) = markerTracker.generateSymmetryDetectorKernel(order, kernelSize)
(kernel_order_1_Real, kernel_order_1_Imag) = markerTracker.generateSymmetryDetectorKernel(1, kernelSize)

kernel_order_1_array = np.array(kernel_order_1_Real + 1j*kernel_order_1_Imag, dtype=complex)
kernel_order_n_array = np.array(kernel_order_n_Real + 1j*kernel_order_n_Imag, dtype=complex)

absolute = np.absolute(kernel_order_n_array)
threshold = 0.4*absolute.max()


phase = np.exp(0.1*3.14*1j)

t1_temp = kernel_order_n_array*np.power(phase, order)
t1 = t1_temp.real > threshold
	
t2_temp = kernel_order_n_array*np.power(phase, order)
t2 = t2_temp.real < -threshold

img_t1_t2_diff = t1.astype(np.float32)-t2.astype(np.float32)
Beispiel #20
0
 def addMarkerToTrack(self, order, kernelSize, scaleFactor):
     self.markerTrackers.append(
         MarkerTracker(order, kernelSize, scaleFactor))
     self.markerLocationsX.append(0)
     self.markerLocationsY.append(0)
     self.subClassesInitialized = False
class TrackerInWindowMode:
    def __init__(self, order = 7, defaultKernelSize = 21):
        #cv.NamedWindow('reducedWindow', cv.CV_WINDOW_AUTOSIZE)
        self.windowWidth = 100
        self.windowHeight = 100
        self.frameGray = cv.CreateImage ((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 1)
        self.originalImage = cv.CreateImage((self.windowWidth, self.windowHeight), cv.IPL_DEPTH_32F, 3)
        self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
        self.trackerIsInitialized = False
        self.subImagePosition = None
        pass
    
    def cropFrame(self, frame, lastMarkerLocationX, lastMarkerLocationY):
        if(not self.trackerIsInitialized):
            self.markerTracker.allocateSpaceGivenFirstFrame(self.originalImage)
            self.reducedImage = cv.CreateImage((self.windowWidth, self.windowHeight), frame.depth, 3)
        xCornerPos = lastMarkerLocationX - self.windowWidth / 2
        yCornerPos = lastMarkerLocationY - self.windowHeight / 2
        # Ensure that extracted window is inside the original image.
        if(xCornerPos < 1):
            xCornerPos = 1
        if(yCornerPos < 1):
            yCornerPos = 1
        if(xCornerPos > frame.width - self.windowWidth):
            xCornerPos = frame.width - self.windowWidth
        if(yCornerPos > frame.height - self.windowHeight):
            yCornerPos = frame.height - self.windowHeight
        try:
            self.subImagePosition = (xCornerPos, yCornerPos, self.windowWidth, self.windowHeight)
            self.reducedImage = cv.GetSubRect(frame, self.subImagePosition)
            cv.ConvertScale(self.reducedImage, self.originalImage)
            cv.CvtColor(self.originalImage, self.frameGray, cv.CV_RGB2GRAY)
        except:
            print("frame: ", frame.depth)
            print("originalImage: ", self.originalImage.height, self.originalImage.width, self.originalImage)
            print("frameGray: ", self.frameGray.height, self.frameGray.width, self.frameGray.depth)
            print "Unexpected error:", sys.exc_info()[0]
            #quit(0)
            pass
        
    def locateMarker(self):
        (xm, ym) = self.markerTracker.locateMarker(self.frameGray)
        #xm = 50
        #ym = 50
        #cv.Line(self.reducedImage, (0, ym), (self.originalImage.width, ym), (0, 0, 255)) # B, G, R
        #cv.Line(self.reducedImage, (xm, 0), (xm, self.originalImage.height), (0, 0, 255))

        redColor = (55, 55, 255)
        blueColor = (255, 0, 0)

        orientation = self.markerTracker.orientation
        cv.Circle(self.reducedImage, (xm, ym), 4, redColor, 2)
        xm2 = int(xm + 50*math.cos(orientation))
        ym2 = int(ym + 50*math.sin(orientation))
        cv.Line(self.reducedImage, (xm, ym), (xm2, ym2), blueColor, 2)

        
        xm = xm + self.subImagePosition[0]
        ym = ym + self.subImagePosition[1]
        #print((xm, ym))
        
        #return [xm, ym, orientation, self.markerTracker.quality]
        return MarkerPose(xm, ym, orientation, self.markerTracker.quality, self.markerTracker.order)        
        
    def showCroppedImage(self):
        pass
Beispiel #22
0
class TrackerInWindowMode:
    def __init__(self, order=7, defaultKernelSize=21):
        self.window_width = 100
        self.window_height = 100
        self.frameGray = np.zeros((self.window_height, self.window_width, 1),
                                  dtype=np.float32)
        self.originalImage = np.zeros(
            (self.window_height, self.window_width, 3), dtype=np.float32)
        self.markerTracker = MarkerTracker(order, defaultKernelSize, 2500)
        self.trackerIsInitialized = False
        self.subImagePosition = None
        self.reducedImage = None

    def crop_frame(self, frame, last_marker_location_x,
                   last_marker_location_y):
        if not self.trackerIsInitialized:
            self.reducedImage = np.zeros(
                (self.window_height, self.window_width, 3), frame.dtype)
        x_corner_pos = last_marker_location_x - self.window_width / 2
        y_corner_pos = last_marker_location_y - self.window_height / 2
        # Ensure that extracted window is inside the original image.
        if x_corner_pos < 1:
            x_corner_pos = 1
        if y_corner_pos < 1:
            y_corner_pos = 1
        if x_corner_pos > frame.shape[1] - self.window_width:
            x_corner_pos = frame.shape[1] - self.window_width
        if y_corner_pos > frame.shape[0] - self.window_height:
            y_corner_pos = frame.shape[0] - self.window_height
        try:
            self.subImagePosition = (x_corner_pos, y_corner_pos,
                                     self.window_width, self.window_height)
            self.reducedImage = frame[
                self.subImagePosition[1]:self.subImagePosition[1] +
                self.subImagePosition[3],
                self.subImagePosition[0]:self.subImagePosition[0] +
                self.subImagePosition[2], :]
            self.frameGray = cv2.cvtColor(self.reducedImage,
                                          cv2.cv.CV_RGB2GRAY)
        except:
            print("frame: ", frame.dtype)
            print("originalImage: ", self.originalImage.shape[0],
                  self.originalImage.shape[1], self.originalImage)
            print("frameGray: ", self.frameGray.shape[0],
                  self.frameGray.shape[1], self.frameGray.dtype)
            print "Unexpected error:", sys.exc_info()[0]
            pass

    def locate_marker(self):
        (xm, ym) = self.markerTracker.locate_marker(self.frameGray)

        red_color = (55, 55, 255)
        blue_color = (255, 0, 0)

        orientation = self.markerTracker.orientation
        cv2.circle(self.reducedImage, (xm, ym), 4, red_color, 2)
        xm2 = int(xm + 50 * math.cos(orientation))
        ym2 = int(ym + 50 * math.sin(orientation))
        cv2.line(self.reducedImage, (xm, ym), (xm2, ym2), blue_color, 2)

        xm = xm + self.subImagePosition[0]
        ym = ym + self.subImagePosition[1]

        return MarkerPose(xm, ym, orientation, self.markerTracker.quality,
                          self.markerTracker.order)

    def show_cropped_image(self):
        pass
# main function
if __name__ == "__main__":
	print "Running.."

	print 'Loading image..'
	img = cv2.imread('marker_image.jpg', 0)
	''' plt.imshow(img, cmap='gray', interpolation='bicubic')
	#plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
	#plt.show()
	#print "image plotted.." '''

	# Number of n-fold edges:
	order = 3
	kernel_size = 25
	scale_factor = 100

	#print 'Finding marker..'
	mt = MarkerTracker(order, kernel_size, scale_factor)
	while True:
		mt.locate_marker(img)
		mt.determine_marker_orientation(img)
		mt.determine_marker_quality(img)
		#print 'Marker found..'

		#print 'Plotting marker..'
		#img2 = mt.extract_window_around_maker_location(img)
		#plt.imshow(img2, cmap='gray', interpolation='bicubic')
		#plt.xticks([]), plt.yticks([])  # to hide tick values on X and Y axis
		#plt.show()