コード例 #1
0
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        self.frame = None
        self.paused = False
        self.tracker = PoseEstimator()

        cv2.namedWindow('Augmented Reality',cv2.WINDOW_NORMAL)
        self.roi_selector = ROISelector('Augmented Reality', self.on_rect)

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], [0.5, 0.5, 4]])
        self.overlay_edges = [(0, 1), (1, 2), (2, 3), (3, 0),
                              (0, 4), (1, 4), (2, 4), (3, 4)]
        self.color_base = (0, 255, 0)
        self.color_lines = (0, 0, 0)

        self.graphics_counter = 0
        self.time_counter = 0
コード例 #2
0
    def __init__(self, capId, scaling_factor, win_name):
        self.cap = cv2.VideoCapture(capId)
        self.rect = None
        self.win_name = win_name
        self.scaling_factor = scaling_factor
        self.tracker = PoseEstimator()

        ret, frame = self.cap.read()
        self.rect = None
        self.frame = cv2.resize(frame,
                                None,
                                fx=scaling_factor,
                                fy=scaling_factor,
                                interpolation=cv2.INTER_AREA)

        self.roi_selector = ROISelector(win_name, self.frame, self.set_rect)
        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0],
                                            [1, 0, 0], [0.5, 0.5, 4]])
        self.overlay_edges = [(0, 1), (1, 2), (2, 3), (3, 0), (0, 4), (1, 4),
                              (2, 4), (3, 4)]
        self.color_base = (0, 255, 0)
        self.color_lines = (0, 0, 0)
コード例 #3
0
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        self.frame = None
        self.paused = False
        self.tracker = PoseEstimator()

        cv2.namedWindow('Augmented Reality')
        self.roi_selector = ROISelector('Augmented Reality', self.on_rect)

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0],
                                            [1, 0, 0], [0.5, 0.5, 4]])
        self.overlay_edges = [(0, 1), (1, 2), (2, 3), (3, 0), (0, 4), (1, 4),
                              (2, 4), (3, 4)]
        self.color_base = (0, 255, 0)
        self.color_lines = (0, 0, 0)

        self.time_counter = 0
        self.frame_jump = 1
        self.degrees_counter = 0
        self.step_size_degrees = 3
        self.radius = 1.0
        self.xm = self.radius
        self.ym = 0
コード例 #4
0
class Tracker(object):
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        self.frame = None
        self.paused = False
        self.tracker = PoseEstimator()

        cv2.namedWindow('Augmented Reality')
        self.roi_selector = ROISelector('Augmented Reality', self.on_rect)

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0],
                                            [1, 0, 0], [0.5, 0.5, 4]])
        self.overlay_edges = [(0, 1), (1, 2), (2, 3), (3, 0), (0, 4), (1, 4),
                              (2, 4), (3, 4)]
        self.color_base = (0, 255, 0)
        self.color_lines = (0, 0, 0)

        self.graphics_counter = 0.5
        self.time_counter = 0
        self.sign = 1
        self.frame_jump = 3
        self.movement_size = 1
        self.step_size = 0.1

    def on_rect(self, rect):
        self.tracker.add_target(self.frame, rect)

    def start(self):
        while True:
            is_running = not self.paused and self.roi_selector.selected_rect is None
            if is_running or self.frame is None:
                ret, frame = self.cap.read()
                scaling_factor = 0.5
                frame = cv2.resize(frame,
                                   None,
                                   fx=scaling_factor,
                                   fy=scaling_factor,
                                   interpolation=cv2.INTER_AREA)
                if not ret:
                    break

                self.frame = frame.copy()

            img = self.frame.copy()
            if is_running:
                tracked = self.tracker.track_target(self.frame)
                for item in tracked:
                    cv2.polylines(img, [np.int32(item.quad)], True,
                                  self.color_lines, 2)
                    for (x, y) in np.int32(item.points_cur):
                        cv2.circle(img, (x, y), 2, self.color_lines)

                    self.overlay_graphics(img, item)

            self.roi_selector.draw_rect(img)
            cv2.imshow('Augmented Reality', img)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.tracker.clear_targets()
            if ch == 27:
                break

    def overlay_graphics(self, img, tracked):
        x_start, y_start, x_end, y_end = tracked.target.rect
        quad_3d = np.float32([[x_start, y_start, 0], [x_end, y_start, 0],
                              [x_end, y_end, 0], [x_start, y_end, 0]])
        h, w = img.shape[:2]
        K = np.float64([[w, 0, 0.5 * (w - 1)], [0, w, 0.5 * (h - 1)],
                        [0, 0, 1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)

        self.time_counter += 1
        if not self.time_counter % self.frame_jump:
            self.graphics_counter = self.graphics_counter + self.sign * self.step_size
            if abs(self.graphics_counter) >= self.movement_size:
                self.sign *= -1

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0],
                                            [1, 0, 0],
                                            [self.graphics_counter, 0.5, 4]])

        verts = self.overlay_vertices * [
            (x_end - x_start), (y_end - y_start), -(x_end - x_start) * 0.3
        ] + (x_start, y_start, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K,
                                  dist_coef)[0].reshape(-1, 2)

        verts_floor = np.int32(verts).reshape(-1, 2)
        cv2.drawContours(img, [verts_floor[:4]], -1, self.color_base, -3)
        cv2.drawContours(img, [np.vstack((verts_floor[:2], verts_floor[4:5]))],
                         -1, (0, 255, 0), -3)
        cv2.drawContours(img,
                         [np.vstack((verts_floor[1:3], verts_floor[4:5]))], -1,
                         (255, 0, 0), -3)
        cv2.drawContours(img,
                         [np.vstack((verts_floor[2:4], verts_floor[4:5]))], -1,
                         (0, 0, 150), -3)
        cv2.drawContours(img, [
            np.vstack((verts_floor[3:4], verts_floor[0:1], verts_floor[4:5]))
        ], -1, (255, 255, 0), -3)

        for i, j in self.overlay_edges:
            (x_start, y_start), (x_end, y_end) = verts[i], verts[j]
            cv2.line(img, (int(x_start), int(y_start)),
                     (int(x_end), int(y_end)), self.color_lines, 2)
コード例 #5
0
class Tracker(object):
    def __init__(self, capId, scaling_factor, win_name):
        self.cap = cv2.VideoCapture(capId)
        self.rect = None
        self.win_name = win_name
        self.scaling_factor = scaling_factor
        self.tracker = PoseEstimator()

        ret, frame = self.cap.read()
        self.rect = None
        self.frame = cv2.resize(frame,
                                None,
                                fx=scaling_factor,
                                fy=scaling_factor,
                                interpolation=cv2.INTER_AREA)

        self.roi_selector = ROISelector(win_name, self.frame, self.set_rect)

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0],
                                            [1, 0, 0], [0.5, 0.5, 4]])
        self.overlay_edges = [(0, 1), (1, 2), (2, 3), (3, 0), (0, 4), (1, 4),
                              (2, 4), (3, 4)]
        self.color_base = (0, 255, 0)
        self.color_lines = (0, 0, 0)

        self.graphics_counter = 0
        self.time_counter = 0

    def set_rect(self, rect):
        self.rect = rect
        self.tracker.add_target(self.frame, rect)

    def start(self):
        paused = False
        while True:
            if not paused or self.frame is None:
                ret, frame = self.cap.read()
                scaling_factor = self.scaling_factor
                frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor,\
                    interpolation=cv2.INTER_AREA)
                if not ret: break

                self.frame = frame.copy()

            img = self.frame.copy()
            if not paused:
                tracked = self.tracker.track_target(self.frame)
                for item in tracked:
                    cv2.polylines(img, [np.int32(item.quad)], True,
                                  self.color_lines, 2)
                    for (x, y) in np.int32(item.points_cur):
                        cv2.circle(img, (x, y), 2, self.color_lines)

                    self.overlay_graphics(img, item)

            self.roi_selector.draw_rect(img, self.rect)
            cv2.imshow(self.win_name, img)
            ch = cv2.waitKey(1)
            if ch == ord(' '): self.paused = not self.paused
            if ch == ord('c'): self.tracker.clear_targets()
            if ch == 27: break

    def overlay_graphics(self, img, tracked):
        x_start, y_start, x_end, y_end = tracked.target.rect
        quad_3d = np.float32([[x_start, y_start, 0], [x_end, y_start, 0], \
            [x_end, y_end, 0], [x_start, y_end, 0]])
        h, w = img.shape[:2]
        K = np.float64([[w, 0, 0.5 * (w - 1)], [0, w, 0.5 * (h - 1)],
                        [0, 0, 1.0]])
        dist_coef = np.zeros(4)
        ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef)

        self.time_counter += 1
        if not self.time_counter % 20:
            self.graphics_counter = (self.graphics_counter + 1) % 8

        self.overlay_vertices = np.float32([[0, 0, 0], [0, 1, 0],\
            [1, 1, 0], [1, 0, 0], [0.5, 0.5, self.graphics_counter]])

        verts = self.overlay_vertices * [(x_end-x_start), (y_end-y_start),\
            -(x_end-x_start)*0.3] + (x_start, y_start, 0)
        verts = cv2.projectPoints(verts, rvec, tvec, K,
                                  dist_coef)[0].reshape(-1, 2)

        verts_floor = np.int32(verts).reshape(-1, 2)
        cv2.drawContours(img, [verts_floor[:4]], -1, self.color_base, -3)
        cv2.drawContours(img, [np.vstack((verts_floor[:2], \
            verts_floor[4:5]))], -1, (0,255,0), -3)
        cv2.drawContours(img, [np.vstack((verts_floor[1:3], \
            verts_floor[4:5]))], -1, (255,0,0), -3)
        cv2.drawContours(img, [np.vstack((verts_floor[2:4], \
            verts_floor[4:5]))], -1, (0,0,150), -3)
        cv2.drawContours(img, [np.vstack((verts_floor[3:4], \
            verts_floor[0:1], verts_floor[4:5]))], -1, (255,255,0), -3)

        for i, j in self.overlay_edges:
            (x_start, y_start), (x_end, y_end) = verts[i], verts[j]
            cv2.line(img, (int(x_start), int(y_start)),
                     (int(x_end), int(y_end)), self.color_lines, 2)