Пример #1
0
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)

    def run(self):
        img = cv2.imread('image.jpg')
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame

            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    #cv2.fillPoly(vis, [np.int32(tracked.quad)], 255)
                    cv2.polylines(vis, [np.int32(tracked.quad)], True,
                                  (255, 0, 0), 2)

                    xt, yt = tracked.quad[3]
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(vis, 'Planar track', (xt, yt), font, 1,
                                (0, 0, 255), 2)
                    cv2.circle(vis, (xt, yt), 30, (0, 255, 0), 2)

                    #for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                    #    cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Пример #2
0
    def __init__(self, src):
        self.cap = cv2.VideoCapture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #3
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #4
0
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('plane', cv.WINDOW_NORMAL)
        cv.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        cv.resizeWindow('plane', 1920, 1080)
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.add_target(self.frame, rect)

    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            vis = self.frame.copy()
            if playing:
                tracked = self.tracker.track(self.frame)
                for tr in tracked:
                    cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
                    for (x, y) in np.int32(tr.p1):
                        cv.circle(vis, (x, y), 2, (255, 255, 255))
                    self.draw_overlay(vis, tr)

            self.rect_sel.draw(vis)
            cv.imshow('plane', vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.tracker.clear()
            if ch == 27:
                break

    def draw_overlay(self, vis, tracked):
        x0, y0, x1, y1 = tracked.target.rect
        quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
        fx = 0.5 + cv.getTrackbarPos('focal', 'plane') / 50.0
        h, w = vis.shape[:2]
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        _ret, rvec, tvec = cv.solvePnP(quad_3d, tracked.quad, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
Пример #5
0
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.add_target(self.frame, rect)

    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            vis = self.frame.copy()
            if playing:
                tracked = self.tracker.track(self.frame)
                for tr in tracked:
                    cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
                    for (x, y) in np.int32(tr.p1):
                        cv2.circle(vis, (x, y), 2, (255, 255, 255))
                    self.draw_overlay(vis, tr)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.tracker.clear()
            if ch == 27:
                break

    def draw_overlay(self, vis, tracked):
        x0, y0, x1, y1 = tracked.target.rect
        quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
        fx = 0.5 + cv2.getTrackbarPos('focal', 'plane') / 50.0
        h, w = vis.shape[:2]
        K = np.float64([[fx*w, 0, 0.5*(w-1)],
                        [0, fx*w, 0.5*(h-1)],
                        [0.0,0.0,      1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)
        verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2)
Пример #6
0
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)
    def __init__(self, src):
        image = cv.imread(src, 1)
        small = cv.resize(image, (0,0), fx=0.125, fy=0.125) # resizing the image, as high resolution image taken
        self.frame = small
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('Selected Region')
        # cv.setMouseCallback('Selected Region', self.draw_circle_corner)
        self.rect_sel = common.RectSelector('Selected Region', self.on_rect)
Пример #8
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('plane', cv.WINDOW_NORMAL)
        cv.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        cv.resizeWindow('plane', 1920, 1080)
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #9
0
class CompareImage:
    """
    This lets you compare multiple template images to some arbitrary image, and return True or False if it's a 'match',
    where the match is fairly loosly defined.

    The class doesn't have to be too accurate, as long as there are few false negatives.
    """

    def __init__(self, features_detect=1500):
        self.tracker = PlaneTracker(0.025, 10, max_features_detect=1500)
        self.__templates = []  # Keep track of templates being tracked

    def add_template(self, img):
        """
        This will add a template image to compare other images to. It will add the whole image,
        then many other smaller parts of the image, to try to capture more images.
        """
        if img is None: return

        h, w, _ = img.shape
        self.__templates.append(img)

        # Track the whole image
        self.tracker.add_target(img, (0, 0, w, h))

        # # Track the center quarter of the image
        # self.tracker.add_target(img, (int(w * .25),
        #                               int(h * .25),
        #                               int(w * .75),
        #                               int(h * .75)))
        #
        # # Track the right half of the image
        # self.tracker.add_target(img, (int(w * .5),
        #                               int(h * .5),
        #                               int(w * 1),
        #                               int(h * 1)))
        #
        # # Track the left half of the image
        # self.tracker.add_target(img, (int(w * 0),
        #                               int(h * 0),
        #                               int(w * .5),
        #                               int(h * .5)))

    def get_template(self):
        return self.__templates

    def is_match(self, img, min_match_ratio):
        """ This will compare the img to the images that are currently being compared"""
        self.tracker.track(img)

        for tracked in self.tracker.history[0]:
            if tracked.match_ratio >= min_match_ratio:
                return True

        return False
Пример #10
0
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)

    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv.polylines(vis, [np.int32(tracked.quad)], True,
                                 (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0),
                                                  np.int32(tracked.p1)):
                        cv.line(vis, (x0 + w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow('plane', vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.moveWindow('plane', 100, 100)
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.addTargetSamples(self.frame, rect)

    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Пример #12
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets["book"])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow("plane")
        self.rect_sel = common.RectSelector("plane", self.on_rect)
Пример #13
0
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #14
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #15
0
 def __init__(self, src):
     cv.namedWindow(WIN_NAME)
     cv.moveWindow(WIN_NAME, 0, 0)
     self.cap = video.create_capture(src, presets['book'])
     self.frame = None
     self.auto_output_frame = None
     self.paused = False
     self.tracker = PlaneTracker()
     self.rect_sel = common.RectSelector('plane', self.on_rect)
     self.user_selected_rect = None
     self.focal_length = 0.0
     self.horizontal_angel = 0.0
     self.known_distance = 200
     self.KNOWN_WIDTH = 40
     self.KNOWN_HEIGHT = 40
     self.msg = None
     self.serial = 0
     self.auto_save = False
     self.auto_serial = 0
    def __init__(self, src):
        self.drone = libardrone.ARDrone(is_ar_drone_2=True)
        #self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Пример #17
0
    def __init__(self):
        self.template = None
        self.paused = False
        self.recognised = False
        self.tracker = PlaneTracker()
        self.character = ""

		#Add template images to tracker
        scarlet = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/scarlet.png')))
        mustard = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/mustard.png')))
        plum = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/plum.png')))
        peacock = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/peacock.png')))

        scarlet_rect = (0,0,scarlet.shape[1],scarlet.shape[0])
        mustard_rect = (0,0,mustard.shape[1],mustard.shape[0])
        plum_rect = (0,0,plum.shape[1],plum.shape[0])
        peacock_rect = (0,0,peacock.shape[1],peacock.shape[0])

        self.tracker.add_target(scarlet.copy(), scarlet_rect,'scarlet')
        self.tracker.add_target(mustard.copy(), mustard_rect,'mustard')
        self.tracker.add_target(plum.copy(), plum_rect,'plum')
        self.tracker.add_target(peacock.copy(), peacock_rect,'peacock')
Пример #18
0
class characterDetection:
    def __init__(self):
        self.template = None
        self.paused = False
        self.recognised = False
        self.tracker = PlaneTracker()
        self.character = ""

		#Add template images to tracker
        scarlet = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/scarlet.png')))
        mustard = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/mustard.png')))
        plum = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/plum.png')))
        peacock = cv2.imread(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'cluedo_images/peacock.png')))

        scarlet_rect = (0,0,scarlet.shape[1],scarlet.shape[0])
        mustard_rect = (0,0,mustard.shape[1],mustard.shape[0])
        plum_rect = (0,0,plum.shape[1],plum.shape[0])
        peacock_rect = (0,0,peacock.shape[1],peacock.shape[0])

        self.tracker.add_target(scarlet.copy(), scarlet_rect,'scarlet')
        self.tracker.add_target(mustard.copy(), mustard_rect,'mustard')
        self.tracker.add_target(plum.copy(), plum_rect,'plum')
        self.tracker.add_target(peacock.copy(), peacock_rect,'peacock')

    def checkPoster(self, cv_image):
        if not self.recognised:
            self.frame = cv_image.copy()
            w, h = getsize(self.frame)
            vis = np.zeros((h, w, 3), np.uint8)

            vis[:h,:w] = self.frame

            tracked = self.tracker.track(self.frame)
            print(len(tracked))
            if len(tracked) > 0:
                rospy.loginfo("3")
                for tracked_ob in tracked:
                    rospy.loginfo ('Found ' + tracked_ob.target.data)
                    self.character = tracked_ob.target.data

                    # Calculate Homography
                    h, status = cv2.findHomography(tracked_ob.p0, tracked_ob.p1)
                    self.recognised = True
Пример #19
0
    def __init__(self, src):
        #self.cap = video.create_capture(src, presets['book'])
	self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size=10)
	self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback)
	self.bridge = CvBridge()
	#dtype, n_channels = br.encoding_as_cvtype2('8UC3')
	#this.im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)
	self.cv_image = None
        self.frame = None
	#self.count = 1
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
	
	self.tracker.clear()

	rospack = rospkg.RosPack()	
	labPath = rospack.get_path('laboratorio3')

	files = ['Cercano', 'Medio', 'Lejano','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15']
	
	for f in files:
		rectanguloFile = os.path.join(labPath, 'detectarCono/rect{}.pkl'.format(f))
		frameFile = os.path.join(labPath, 'detectarCono/frame{}.pkl'.format(f))
	
		with open(rectanguloFile, 'rb') as f:
			rectangulo = pickle.load(f)
		with open(frameFile, 'rb') as f:
			frame = pickle.load(f)
		self.tracker.add_target(frame, rectangulo)

	self.pubVel = rospy.Publisher('/cmd_vel', Twist , queue_size=10)
	self.pubNavegacion = rospy.Publisher('/laboratorio3/exploration', String , queue_size=10)

	self.reiniciar_exploracion_timer = False
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)

    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            blue = cv.cvtColor(frame, cv.COLOR_BGR2Luv)
            # Display the resulting frame
            cv.imshow('self.frame', blue)

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                #cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (255, 0, 0), 2)
                center_x = int((x0 + x1 + 2 * w) / 2)
                center_y = int((y0 + y1) / 2)
                r_x = int(abs((x0 - x1) / 2))
                r_y = int(abs((x0 - x1) / 2))
                radius = int(math.sqrt((r_x * r_x) + (r_y * r_y)))

                cv.circle(vis, (center_x, center_y), radius, (0, 0, 255), 2)
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv.polylines(vis, [np.int32(tracked.quad)], True,
                                 (255, 0, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0),
                                                  np.int32(tracked.p1)):
                        cv.line(vis, (x0 + w, y0), (x1, y1), (255, 0, 0))
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow('plane', vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Пример #21
0
class App:
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)

    def run(self):
        def find_line(p0, p1):
            x0, y0 = p0
            x1, y1 = p1
            k = (x1 - x0) / (y1 - y0)
            b = y1 - k * x0
            return k, b

        def cross(l0, l1):
            k0, b0 = l0
            k1, b1 = l1
            x = (b1 - b0) / (k0 - k1)
            y = k0 * x + b0
            return x, y

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv2.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv2.polylines(vis, [np.int32(tracked.quad)], True,
                                  (255, 255, 255), 2)
                    # print([np.int32(tracked.quad)])
                    p0, p1, p2, p3 = np.int32(tracked.quad)
                    l0 = find_line(p0, p2)
                    l1 = find_line(p1, p3)
                    x, y = cross(l0, l1)
                    z = 3 * 180 / (p3[1] - p0[1]) * 1
                    print(x, y, z)
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
class App:
    def __init__(self, src):
        image = cv.imread(src, 1)
        small = cv.resize(image, (0,0), fx=0.125, fy=0.125) # resizing the image, as high resolution image taken
        self.frame = small
        self.paused = False
        self.tracker = PlaneTracker()

        cv.namedWindow('Selected Region')
        # cv.setMouseCallback('Selected Region', self.draw_circle_corner)
        self.rect_sel = common.RectSelector('Selected Region', self.on_rect)

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)

    def run_original(self):

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            # flag used to quit imaging, when the size detected.
            flag = 0
            # if playing or self.frame is None:
            #     # ret, frame = self.cap.read()
            #     if not ret:
            #         break
            #     self.frame = frame.copy()
            # size of the frame (image) - used to draw rectangle
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            # copy to the image (original)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect # rectangle coordinates
                cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2) # green line drawn
                # finding the width of the region
                width_region = distance(x0, x1, y0, y1)
                flag = 1 # set flag = 1, quit the program!
            self.rect_sel.draw(vis)
            cv.imshow('Selected Region', vis) # show the image
            ch = cv.waitKey(1)
            if ch == 27 or flag == 1:
                print("Quitting")
                return width_region

    def run(self, Cover_Dimensions, Actual_Dimensions):
        '''
        Runs the program, of selection of the image's region. Approximates the
        dimensions of the cover based on given dimensions.
        '''
        while True:
            # flag used to quit imaging, when the size detected.
            flag = 0
            playing = not self.rect_sel.dragging

            # get size of the book cover
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame

            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect # get the selected region coordinates
                cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)

                Region_Dimensions = [x1 - x0, y1 - y0]
                print("Region_Dimensions:", Region_Dimensions)
                # Ratio of the cover dimensions (width) to region dimensions (width)
                # similarly for height
                ratio_width = float(Cover_Dimensions)/(Region_Dimensions[0])

                # Hard coded the actual width and height of the region.
                # Actual width (in cm) = 14
                # Actual height (in cm) = 1.1
                width_of_book = ratio_width * Actual_Dimensions[0]
                # height_of_book = ratio_height * Actual_Dimensions[1]

            # Optional (useful in case of video source)
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    cv.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
                        print("Detected")
                        # break when the region is detected.
                        flag = 1
                        break
            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow('Selected Region', vis)
            cv.waitKey(1)
            if flag == 1:
                print("Dimensions of the book : ", width_of_book)
                return
Пример #23
0
import numpy as np
import cv2
from common import getsize, draw_keypoints
from plane_tracker import PlaneTracker

face_cascade = cv2.CascadeClassifier(
    'haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')

searchFor = cv2.imread('myPic.jpg', 0)  # Load Search Image
#searchForGray = cv2.cvtColor(searchFor, cv2.COLOR_BGR2GRAY) # Change to GRAY
searchHeight, searchWidth = searchFor.shape

tracker = PlaneTracker()
tracker.add_target(searchFor, (0, 0, searchWidth, searchHeight))

cap = cv2.VideoCapture(0)  # Init Camera

while (True):
    # Get Frame
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0),
                      2)  # Draw Faces On img

        # Crop Face
        cropped = img[y:y + h, x:x + w]
        #croppedGray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
Пример #24
0
def track_from_file(input_image_file, check_images_dir, result_dir):
    print "Tracking", input_image_file
    tracker = PlaneTracker()

    # ------------ INPUT IMAGE
    input_image = cv2.imread(input_image_file)
    input_image = cv2.resize(input_image, (0,0), fx=SCALE_DOWN_COEF, fy=SCALE_DOWN_COEF)
    # input_image = rotateImage(input_image, -90)
    input_image_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)

    height, width = input_image.shape[:2]
    # ------------

    tracker.clear()
    tracker.add_target(input_image_gray, (0, 0, width, height))

    # ----------------
    for fn in os.listdir(check_images_dir):
        # print os.path.join(imagesDir, fn)
        filename = os.path.join(check_images_dir, fn)

        if os.path.isfile(filename):
            print "Checking", filename

            fName, fExtension = os.path.splitext(fn)
            inpfName, _ = os.path.splitext(os.path.split(input_image_file)[1])

            # print fn, fName, fExtension
            inFile = os.path.join(check_images_dir, fn)
            # print 'inFile =', inFile

            resFile = os.path.join(result_dir, inpfName + 'r' + fName + fExtension)
            # print 'resFile =', resFile

            # DO SMTH HERE inFile and resFile
            im = cv2.imread(inFile)
            im = cv2.resize(im, (0,0), fx=SCALE_DOWN_COEF, fy=SCALE_DOWN_COEF)
            # im = rotateImage(im, -90)
            imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)


            # im = rotateImage(imgray, -90)

            # draw corners
            # for i in range(len(corners)):
            # print i, corners[i][0]
            #     cv2.circle(im, (corners[i][0][0], corners[i][0][1]), 50, (0,255,0), 8)


            # track & match features
            tracked = tracker.track(imgray)

            if tracked:
                print "Tracked generation started"

                resImage = np.zeros((height, width * 2, 3), np.uint8)
                resImage[:height, :width] = input_image
                resImage[:height, width:width * 2] = im

                # keypoints = tracked[0].target.keypoints
                # resImage = cv2.drawKeypoints(resImage, keypoints, color=(0, 255, 0),
                #                                     flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

                for tr in tracked:
                    for p0, p1 in zip(tr.p0, tr.p1):

                        from_point = (int(p0[0]), int(p0[1]))
                        to_point = (int(p1[0] + width), int(p1[1]))
                        cv2.line(resImage, from_point, to_point, color=(255, 0, 0))

                cv2.imwrite(resFile, rotateImage(resImage, -90))

                print 'images ', input_image_file, 'and', filename, ' are similar', ' len.p0=', len(
                    tracked[0].p0), ' len.p1=', len(tracked[0].p1)

                print_apropriate_room(filename)
                # else:
                #     print 'images are not similar'
                print "Tracked generation finished"
class App:
    def __init__(self, src):
        self.drone = libardrone.ARDrone(is_ar_drone_2=True)
        #self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)

    def on_rect(self, rect):
        self.tracker.add_target(self.frame, rect)

    def run(self):
        try:
            while True:
                playing = not self.paused and not self.rect_sel.dragging
                if playing or self.frame is None:
                    #ret, frame = self.cap.read()
                    #if not ret:
                    #    break
                    self.frame = cv2.cvtColor(self.drone.get_image(),
                                              cv2.COLOR_BGR2RGB)

                vis = self.frame.copy()
                if playing:
                    tracked = self.tracker.track(self.frame)
                    for tr in tracked:
                        cv2.polylines(vis, [np.int32(tr.quad)], True,
                                      (255, 255, 255), 2)
                        for (x, y) in np.int32(tr.p1):
                            cv2.circle(vis, (x, y), 2, (255, 255, 255))
                        self.draw_overlay(vis, tr)

                self.rect_sel.draw(vis)
                cv2.imshow('plane', vis)
                ch = cv2.waitKey(1)
                if ch == ord(' '):
                    self.paused = not self.paused
                if ch == ord('c'):
                    self.tracker.clear()
                if ch == 27:
                    break
        finally:
            cv2.destroyAllWindows()
            self.drone.emergency()
            self.drone.reset()
            self.drone.halt()

    def draw_overlay(self, vis, tracked):
        x0, y0, x1, y1 = tracked.target.rect
        quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0],
                              [x0, y1, 0]])
        fx = 0.5 + cv2.getTrackbarPos('focal', 'plane') / 50.0
        h, w = vis.shape[:2]
        K = np.float64([[fx * w, 0, 0.5 * (w - 1)], [0, fx * w, 0.5 * (h - 1)],
                        [0.0, 0.0, 1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)
        verts = ar_verts * [(x1 - x0),
                            (y1 - y0), -(x1 - x0) * 0.3] + (x0, y0, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K,
                                  dist_coef)[0].reshape(-1, 2)
        for i, j in ar_edges:
            (x0, y0), (x1, y1) = verts[i], verts[j]
            cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)),
                     (255, 255, 0), 2)
Пример #26
0
class App:
    def __init__(self, src):
        #self.cap = video.create_capture(src, presets['book'])
	self.image_pub = rospy.Publisher("image_topic_2",Image, queue_size=10)
	self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback)
	self.bridge = CvBridge()
	#dtype, n_channels = br.encoding_as_cvtype2('8UC3')
	#this.im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)
	self.cv_image = None
        self.frame = None
	#self.count = 1
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
	
	self.tracker.clear()

	rospack = rospkg.RosPack()	
	labPath = rospack.get_path('laboratorio3')

	files = ['Cercano', 'Medio', 'Lejano','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15']
	
	for f in files:
		rectanguloFile = os.path.join(labPath, 'detectarCono/rect{}.pkl'.format(f))
		frameFile = os.path.join(labPath, 'detectarCono/frame{}.pkl'.format(f))
	
		with open(rectanguloFile, 'rb') as f:
			rectangulo = pickle.load(f)
		with open(frameFile, 'rb') as f:
			frame = pickle.load(f)
		self.tracker.add_target(frame, rectangulo)

	self.pubVel = rospy.Publisher('/cmd_vel', Twist , queue_size=10)
	self.pubNavegacion = rospy.Publisher('/laboratorio3/exploration', String , queue_size=10)

	self.reiniciar_exploracion_timer = False
	#rospy.init_node('talker', anonymous=True)

    def callback(self,data):
        try:
	    self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
	    #print(self.cv_image)
        except CvBridgeError as e:
            print(e)
	#(rows,cols,channels) = self.cv_image.shape
        #if cols > 60 and rows > 60 :
        #    cv2.circle(self.cv_image, (50,50), 10, 255)

        #cv2.imshow("Image window", self.cv_image)
        #cv2.waitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
    
    def on_rect(self, rect):
        #self.tracker.clear()
        #self.tracker.add_target(self.frame, rect)
	#print('ON_RECT')
	#with open('rect'+str(self.count)+'.pkl', 'wb') as f:
	#	pickle.dump(rect, f, pickle.HIGHEST_PROTOCOL)
	#with open('frame'+str(self.count)+'.pkl', 'wb') as f:
	#	pickle.dump(self.frame, f, pickle.HIGHEST_PROTOCOL)
	#self.count = self.count + 1
	self.tracker.clear()

	rospack = rospkg.RosPack()	
	labPath = rospack.get_path('laboratorio3')

	files = ['Cercano', 'Medio', 'Lejano','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15']
	
	for f in files:
		rectanguloFile = os.path.join(labPath, 'detectarCono/rect{}.pkl'.format(f))
		frameFile = os.path.join(labPath, 'detectarCono/frame{}.pkl'.format(f))
	
		with open(rectanguloFile, 'rb') as f:
			rectangulo = pickle.load(f)
		with open(frameFile, 'rb') as f:
			frame = pickle.load(f)
		self.tracker.add_target(frame, rectangulo)
	
    def reiniciar_exploracion(self, event):
	self.reiniciar_exploracion_timer = False
	self.pubNavegacion.publish('START')

    def run(self):
	direccion = None
        while not rospy.is_shutdown():
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
		#print(self.cv_image)
                ret = True
		frame = self.cv_image
		#print(self.cv_image)
		#print("**************************** frame")
		#print(frame)
		#ret, frame = self.cap.read()
		if not ret:
                    break
                self.frame = frame.copy()
	    
            w, h = getsize(self.frame)
            vis = np.zeros((h, w*2, 3), np.uint8)
            vis[:h,:w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:,w:] = target.image
                draw_keypoints(vis[:,w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
		#print(x0,y0,x1,y1)
                cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2)
	    
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
		    tracked = tracked[0]
		    if not self.reiniciar_exploracion_timer:
		    	self.pubNavegacion.publish('STOP')
			self.reiniciar_exploracion_timer = True
		    	rospy.Timer(rospy.Duration(15), self.reiniciar_exploracion)
		    #Aca se imprimen los 4 puntos que genera
		    #print(str(np.int32(tracked.quad[0])))
		    #print(str(np.int32(tracked.quad[1])))
		    #print(str(np.int32(tracked.quad[2])))
		    #print(str(np.int32(tracked.quad[3])))
		    #Este es el punto medio del poligono que genera. 
		    ptoMedio = (np.int32(tracked.quad[0]) + np.int32(tracked.quad[1]) + np.int32(tracked.quad[2]) + np.int32(tracked.quad[3]))/4
		    direccion = (ptoMedio[0]-320)/-320.0
		    twist = Twist(Vector3(15,0,0),Vector3(0,0,direccion))
		    self.pubVel.publish(twist)
		    
                    cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2)
                    for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)):
                        cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0))
		else:
		    if self.reiniciar_exploracion_timer:
			direccion = direccion or 0.5
		    	twist = Twist(Vector3(0 if direccion > 0.2 else 10,0,0),Vector3(0,0, direccion if direccion > 0.2 else 0))
		    	self.pubVel.publish(twist)

            draw_keypoints(vis, self.tracker.frame_points)
	    
            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == 27:
                break
Пример #27
0
class App(object):
    def __init__(self, src):
        cv.namedWindow(WIN_NAME)
        cv.moveWindow(WIN_NAME, 0, 0)
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.auto_output_frame = None
        self.paused = False
        self.tracker = PlaneTracker()
        self.rect_sel = common.RectSelector('plane', self.on_rect)
        self.user_selected_rect = None
        self.focal_length = 0.0
        self.horizontal_angel = 0.0
        self.known_distance = 200
        self.KNOWN_WIDTH = 40
        self.KNOWN_HEIGHT = 40
        self.msg = None
        self.serial = 0
        self.auto_save = False
        self.auto_serial = 0

    def on_rect(self, rect):
        self.tracker.clear()
        self.tracker.add_target(self.frame, rect)
        # rasmus hacked {
        self.user_selected_rect = rect
        x0, y0, x1, y1 = rect
        crop_img = self.frame[y0:y1, x0:x1]
        #print('type: {}, {}, {}'.format(rect, type(rect), type(rect[0])))
        fn = FRAME_FN
        self.write_file(fn, self.frame)
        fn = 'rect.jpg'
        self.write_file(fn, crop_img)
        self.save_config(rect)
        # rasmus hacked }

    def save_config(self, rect):
        fn = FRAME_FN
        # elements in 'rect' is numpy.int16, convert them into int,
        # so that json could store
        rects = list(map(int, rect))
        data = {'fn': fn, 'rect': rects}
        print(data)
        j = json.dumps(data)  # now j is a string to keep json data
        with open('h**o.json', 'w') as h**o:
            h**o.write(j)

    def load_setting(self, fn):
        data = myutil.read_jsonfile(fn, debug=True)
        try:
            tmp = data['auto_save']
            self.auto_save = tmp
        except:
            pass
        print('setting loaded, auto_save: {}'.format(self.auto_save))

    def load_config(self, fn):
        data = myutil.read_jsonfile(fn, debug=True)
        #print(data['fn'])
        fn = data.get('fn')
        frame = np.zeros((640, 480, 3), np.uint8)
        if myutil.isfile(fn):
            frame = cv.imread(fn)
        else:
            print('file not found: {}'.format(fn))
            return None

        rect = data['rect']
        self.focal_length = self.calc_focallength(rect)

        rect = tuple(map(np.int16, rect))
        print(rect)
        self.tracker.clear()
        self.tracker.add_target(frame, rect)
        print('preset loaded')
        return frame

    def calc_focallength(self, rect):
        # initialize the known distance from the camera to the object, which
        # preset distance from samples
        self.known_distance = 200
        # initialize the known object width, which in this case, the piece of
        # paper, unit mm
        width = rect[2] - rect[0]
        focalLength = (width * self.known_distance) / self.KNOWN_WIDTH
        print('width: {} focal length: {:.2f}'.format(width, focalLength))
        return focalLength

    def distance_to_camera(self, knownWidth, focalLength, perWidth):
        # compute and return the distance from the maker to the camera
        if perWidth == 0.0:
            return 0.0
        return (knownWidth * focalLength) / perWidth

    @staticmethod
    def get_dist2d(p1, p2):
        return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)

    @staticmethod
    def write_file(fn, frame):
        cv.imwrite(fn, frame)
        print('output to %s' % fn)

    @staticmethod
    def get_slope(p1, p2):
        delta_x = float(p2[0] - p1[0])
        delta_y = float(p2[1] - p1[1])
        if delta_x == 0:
            return -1.0
        return math.fabs(delta_y / delta_x)

    @staticmethod
    def get_degree(slope):
        if slope < 0.0:
            return 90.0
        deg = math.atan(slope) * 180.0 / math.pi
        return deg

    def show_wtf(self, wtf):
        print('wtf: {} -- {}'.format(wtf, self.user_selected_rect))

    def check_wtf(self, wtf, debug=False):
        self.msg = None
        for ww in wtf:
            xx, yy = ww
            if xx > 640 or yy > 480 or xx < 0 or yy < 0:
                if debug:
                    print('OOB')
                return False
        ang1 = self.get_degree(self.get_slope(wtf[0], wtf[1]))
        ang2 = self.get_degree(self.get_slope(wtf[2], wtf[3]))
        if ang1 > 30.0 or ang2 > 30:
            if debug:
                print('h ang')
            return False

        self.horizontal_angel = ang1
        dist = self.get_dist2d(wtf[0], wtf[1])
        h_dist = dist * math.cos(self.horizontal_angel * math.pi / 180.0)
        detph = self.distance_to_camera(self.KNOWN_WIDTH, self.focal_length,
                                        h_dist)
        #print('ang{:4.2f} dist{:4.2f}'.format(self.horizontal_angel, dist), end=' ')
        #print('hdist {:4.2f}'.format(h_dist), end=' ')
        self.msg = 'depth {:4.2f} mm'.format(detph)

        s1 = self.get_slope(wtf[1], wtf[2])
        s2 = self.get_slope(wtf[0], wtf[3])
        if s1 < 0.0 or s2 < 0.0:
            if debug:
                print('t slopy')
            return False
        ang1 = self.get_degree(s1)
        ang2 = self.get_degree(s2)
        if ang1 < 60.0 or ang2 < 60.0:
            if debug:
                print('too s')
            return False

        return True

    def draw_result(self, img, wtf):
        cv.putText(img,
                   self.msg, (50, 50),
                   cv.FONT_HERSHEY_SIMPLEX,
                   1.0, (0, 0, 0),
                   lineType=cv.LINE_AA)
        cv.polylines(img, [np.int32(wtf)], True, (255, 255, 255), 2)

    def run(self):
        setting_fn = 'setting.json'
        if myutil.isfile(setting_fn):
            print('setting exists')
            self.load_setting(setting_fn)
        # load preset frame and rect
        if True and myutil.isfile('h**o.json'):
            print('preset exists')
            tmp_frame = self.load_config('h**o.json')
            #self.frame = tmp_frame

        print('test: {}'.format(
            self.distance_to_camera(self.KNOWN_WIDTH, self.focal_length, 135)))

        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()
                self.auto_output_frame = frame.copy()

            w, h = getsize(self.frame)
            vis = np.zeros((h, w * 2, 3), np.uint8)
            # if tmp_frame:
            #     vis[:,w:] = tmp_frame
            vis[:h, :w] = self.frame
            if len(self.tracker.targets) > 0:
                target = self.tracker.targets[0]
                vis[:, w:] = target.image
                draw_keypoints(vis[:, w:], target.keypoints)
                x0, y0, x1, y1 = target.rect
                cv.rectangle(vis, (x0 + w, y0), (x1 + w, y1), (0, 255, 0), 2)

            is_ok_to_export = False
            if playing:
                tracked = self.tracker.track(self.frame)
                if len(tracked) > 0:
                    tracked = tracked[0]
                    wtf = np.int32(tracked.quad)
                    if self.check_wtf(wtf):
                        self.draw_result(vis, wtf)
                        cv.fillPoly(vis, [wtf], (255, 0, 0))
                        for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0),
                                                      np.int32(tracked.p1)):
                            cv.line(vis, (x0 + w, y0), (x1, y1), (0, 255, 0))
                        is_ok_to_export = True
                        if self.auto_save:
                            self.draw_result(self.auto_output_frame, wtf)
                            fn = 'autosave_{:04d}.png'.format(self.auto_serial)
                            self.save_image(fn, self.auto_output_frame)
                            self.auto_serial += 1

            draw_keypoints(vis, self.tracker.frame_points)

            self.rect_sel.draw(vis)
            cv.imshow(WIN_NAME, vis)
            ch = cv.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            elif ch == 27:
                break
            elif ch == ord('s'):
                fn = 'saved_{:04d}.png'.format(self.serial)
                self.serial += 1
                self.save_image(fn, vis)

    def save_image(self, fn, img):
        print('save image to {}'.format(fn))
        cv.imwrite(fn, img)
Пример #28
0
 def __init__(self, features_detect=1500):
     self.tracker = PlaneTracker(0.025, 10, max_features_detect=1500)
     self.__templates = []  # Keep track of templates being tracked