Exemplo n.º 1
0
 def __init__(self, capture=None, face_img_path=None, should_mirror=False):
     self._window_manager = WindowManager(self.on_keypress)
     self._capture_manager = CaptureManager(cv2.VideoCapture(0)) \
             if capture is None else CaptureManager(capture)
     self._window_name = 'FaceOff'
     self._should_mirror = should_mirror
     self._face_tracker = FaceTracker()
     self._show_face_rect = False
     self._swap_face = True
     self._template_face = None
     if face_img_path is not None:
         self._template_face = cv2.imread(face_img_path)
Exemplo n.º 2
0
    def __init__(self):
        self._thread_mode = True

        self._windowManager = WindowManager('Minimum Viable Product',
                                            self.on_keypress)

        self._amount_frames = 0
        self._success_finding_contours = 0
        # DroidCam URL
        # url = 'http://192.168.55.78:4747/video'
        url = 0
        self._captureManager = CaptureManager(
            cv2.VideoCapture(url), self._windowManager, False)
Exemplo n.º 3
0
    def __init__(self, window_manager):
        self._record_face = 0
        self._record_count = 0
        self._stopped = True
        self._processed = False

        self._amount_frames = 0
        self._success_finding_contours = 0
        self._original_capture = None

        self._data = dict()

        # DroidCam URL
        # url = 'http://192.168.55.51:4747/video'
        url = 0
        self._capture_manager = CaptureManager(cv2.VideoCapture(url))
        self._window_manager = window_manager
Exemplo n.º 4
0
  def test_manager(self):
    sniffer = BaseSnifferDevice()
    manager = CaptureManager(controller=sniffer)
    time.sleep(1)
    for t in range(2):
      manager.start_new_task('test%d' % t, 'test owner', 'localhost')
      time.sleep(2)
      manager.stop_task('test%d' % t)

    time.sleep(1)
    manager.start_new_task('long test', 'test owner', 'localhost')
    time.sleep(60)
    manager.stop_task('long test')
    time.sleep(1)

    for task in manager.get_finished_tasks():
      print('Finished: id %s, start %s, stop %s' % (task.id, task.start_time, task.stop_time))
      for trace in manager.get_trace_list_by_task_id(task.id):
        print('- T: %s' % trace)

    manager.shutdown()
    time.sleep(2)
Exemplo n.º 5
0
class ImageDataRecorder(object):
    def __init__(self, window_manager):
        self._record_face = 0
        self._record_count = 0
        self._stopped = True
        self._processed = False

        self._amount_frames = 0
        self._success_finding_contours = 0
        self._original_capture = None

        self._data = dict()

        # DroidCam URL
        # url = 'http://192.168.55.51:4747/video'
        url = 0
        self._capture_manager = CaptureManager(cv2.VideoCapture(url))
        self._window_manager = window_manager

    @property
    def original_capture(self):
        return self._original_capture

    @property
    def data(self):
        return self._data

    # from CaptureManager
    @property
    def processing_capture(self):
        return self._capture_manager.processed_frame

    @property
    def roi_capture(self):
        return self._capture_manager.roi_frame

    def release_frame(self):
        self._capture_manager.release_frame()

    # Thread
    @property
    def active(self):
        return not self._stopped

    @property
    def processed(self):
        return self._processed

    def start(self, face=0, count=100):
        print('Start recording')

        self._record_face = face
        self._record_count = count
        self._stopped = False

        # TODO: Thread Refactoring
        Thread(target=self.get, args=()).start()
        return self

    def get(self):
        self._processed = True
        self._amount_frames = 0
        self._success_finding_contours = 0

        while not self._stopped:
            self._capture_manager.enter_frame()

            self._original_capture = self._capture_manager.original_frame
            if self._original_capture is None:
                # self.stop()
                continue

            if self._capture_manager.roi_frame is None:
                self._capture_manager.roi_frame = self._original_capture

            # print("Recording: " + str(self._success_finding_contours))
            # print("Count: " + str(self._record_count))
            self._amount_frames += 1
            self._capture_manager.processed_frame, roi_frame = \
                processing.process_and_detect(self._original_capture, self._window_manager)

            if roi_frame is not None:
                self._capture_manager.roi_frame = roi_frame
                self._data[self._success_finding_contours * 10 + self._record_face] = self._capture_manager.roi_frame

                self._success_finding_contours += 1

                color_yellow = (0, 255, 255)
                percent_success_finding = round((self._success_finding_contours / self._amount_frames) * 100, 2)
                cv2.putText(self._capture_manager.processed_frame, str(percent_success_finding) + "%",
                            (15, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2)
                cv2.putText(self._capture_manager.processed_frame, str(self._success_finding_contours),
                            (15, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2)

            self._capture_manager.exit_frame()

            if self._success_finding_contours >= self._record_count:
                print('Stop recording')
                self.stop()

        self._processed = False

    def stop(self):
        self._record_face = 0
        self._record_count = 0
        # self._data.clear()
        self._stopped = True

    def write_image(self, filename):
        self._capture_manager.write_image(filename)

    def is_writing_video(self):
        return self._capture_manager.is_writing_video

    def start_writing_video(self, filename):
        self._capture_manager.start_writing_video(filename)

    def stop_writing_video(self):
        self._capture_manager.stop_writing_video()
Exemplo n.º 6
0
class VideoCaptureApp(object):
    def __init__(self, capture=None, face_img_path=None, should_mirror=False):
        self._window_manager = WindowManager(self.on_keypress)
        self._capture_manager = CaptureManager(cv2.VideoCapture(0)) \
                if capture is None else CaptureManager(capture)
        self._window_name = 'FaceOff'
        self._should_mirror = should_mirror
        self._face_tracker = FaceTracker()
        self._show_face_rect = False
        self._swap_face = True
        self._template_face = None
        if face_img_path is not None:
            self._template_face = cv2.imread(face_img_path)

    def run(self):
        self._window_manager.create_window(self._window_name)
        while self._window_manager.is_window_created(self._window_name):
            self._capture_manager.enter_frame()
            frame = self._capture_manager.frame
            if frame is None:
                print "get None frame!"
                break

            # process frame
            # detect face
            self._face_tracker.update(frame)
            face_num = len(self._face_tracker.faces)
            face_rect = None if face_num == 0 else \
                    self._face_tracker.faces[0].face_rect
            if self._show_face_rect:
                txt_str = 'face_num: {}'.format(face_num)
                for face in self._face_tracker.faces:
                    x, y, w, h = face.face_rect
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    # only show the 1st face
                    break

            if face_rect is not None and self._swap_face and \
                    self._template_face is not None:
                x, y, w, h = face_rect
                template_face = self._template_face.copy()
                template_face = cv2.resize(template_face, (w, h))
                # simply paste
                # frame[y:y+h,x:x+w] = template_face
                # or use seamless clone
                mask = 255 * np.ones(template_face.shape, template_face.dtype)
                center = (x + w / 2, y + h / 2)
                frame = cv2.seamlessClone(template_face, frame, mask, center, \
                        cv2.MIXED_CLONE)
                # frame = cv2.seamlessClone(template_face, frame, mask, center, \
                #         cv2.NORMAL_CLONE)

            # show frame window
            if self._should_mirror:
                # horizontal flipping
                frame = cv2.flip(frame, 1)
            # draw text
            if self._show_face_rect:
                cv2.putText(frame, txt_str, (50, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 255, 0), 2)
            self._window_manager.show_window(self._window_name, frame)

            self._capture_manager.exit_frame()
            self._window_manager.process_event()

    def on_keypress(self, keycode):
        if keycode == ord('m'):
            self._should_mirror = not self._should_mirror
        elif keycode == ord('s'):
            self._swap_face = not self._swap_face
        elif keycode == ord('f'):
            self._show_face_rect = not self._show_face_rect
        elif keycode == 27:
            # Escape
            self._window_manager.destroy_all_window()
Exemplo n.º 7
0
class ImageDataRecorder(object):
    def __init__(self, window_manager):
        self._record_face = 0
        self._record_count = 0
        self._stopped = True
        self._processed = False

        self._amount_frames = 0
        self._success_finding_contours = 0
        self._original_capture = None

        self._data = dict()

        # DroidCam URL
        # url = 'http://192.168.55.129:4747/video'
        url = 1
        self._capture_manager = CaptureManager(cv2.VideoCapture(url))
        self._window_manager = window_manager

    @property
    def original_capture(self):
        return self._original_capture

    @property
    def data(self):
        return self._data

    # from CaptureManager
    @property
    def processing_capture(self):
        return self._capture_manager.processed_frame

    @property
    def roi_capture(self):
        return self._capture_manager.roi_frame

    def release_frame(self):
        self._capture_manager.release_frame()

    # Thread
    @property
    def active(self):
        return not self._stopped

    @property
    def processed(self):
        return self._processed

    def start(self, face=0, count=100):
        print('Start recording')

        self._record_face = face
        self._record_count = count
        self._stopped = False

        # TODO: Thread Refactoring
        Thread(target=self.get, args=()).start()
        return self

    def get(self):
        self._processed = True
        self._amount_frames = 0
        self._success_finding_contours = 0

        while not self._stopped:
            self._capture_manager.enter_frame()

            original_capture = self._capture_manager.original_frame
            if original_capture is None:
                # self.stop()
                continue

            # (h, w) = original_capture.shape[:2]
            # center = (w / 2, h / 2)
            # M = cv2.getRotationMatrix2D(center, 270, 1.0)
            # self._original_capture = cv2.warpAffine(original_capture, M, (h, w))

            self._original_capture = original_capture

            self._amount_frames += 1

            height, width, _ = self._original_capture.shape

            self._capture_manager.processed_frame = self._original_capture.copy(
            )
            cv2.rectangle(self._capture_manager.processed_frame, (440, 300),
                          (width - 400, height - 250), (0, 0, 255), 3)
            # cv2.rectangle(self._capture_manager.processed_frame,
            #               (80, 120), (width - 0, height - 260), (0, 0, 255), 3)

            self._capture_manager.roi_frame = self._original_capture[
                300:height - 250, 440:width - 400]
            # self._capture_manager.roi_frame = self._original_capture[120:height - 260, 80:width - 0]
            self._data[
                self._record_face * 10 + self.
                _success_finding_contours] = self._capture_manager.roi_frame

            self._success_finding_contours += 1
            '''
            if self._capture_manager.roi_frame is None:
                self._capture_manager.roi_frame = self._original_capture

            # print("Recording: " + str(self._success_finding_contours))
            # print("Count: " + str(self._record_count))
            
            self._capture_manager.processed_frame, roi_frame = \
                processing.process_and_detect(self._original_capture, self._window_manager)

            if roi_frame is not None:
                self._capture_manager.roi_frame = roi_frame
                self._data[self._success_finding_contours * 10 + self._record_face] = self._capture_manager.roi_frame

                self._success_finding_contours += 1

                color_yellow = (0, 255, 255)
                percent_success_finding = round((self._success_finding_contours / self._amount_frames) * 100, 2)
                cv2.putText(self._capture_manager.processed_frame, str(percent_success_finding) + "%",
                            (15, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2)
                cv2.putText(self._capture_manager.processed_frame, str(self._success_finding_contours),
                            (15, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2)
            '''

            self._capture_manager.exit_frame()

            if self._success_finding_contours >= self._record_count:
                print('Stop recording')
                self.stop()

        self._processed = False

    def stop(self):
        self._record_face = 0
        self._record_count = 0
        # self._data.clear()
        self._stopped = True

    def write_image(self, filename):
        self._capture_manager.write_image(filename)

    def is_writing_video(self):
        return self._capture_manager.is_writing_video

    def start_writing_video(self, filename):
        self._capture_manager.start_writing_video(filename)

    def stop_writing_video(self):
        self._capture_manager.stop_writing_video()
Exemplo n.º 8
0
class MVP(object):
    def __init__(self):
        self._thread_mode = True

        self._windowManager = WindowManager('Minimum Viable Product',
                                            self.on_keypress)

        self._amount_frames = 0
        self._success_finding_contours = 0
        # DroidCam URL
        # url = 'http://192.168.55.78:4747/video'
        url = 0
        self._captureManager = CaptureManager(
            cv2.VideoCapture(url), self._windowManager, False)

        # self._curveFilter = filters.BGRPortraCurveFilter()
        # self._convolutionFilter = filters.FindEdgesFilter()
        # self._imageProcessor = image_processor.SimpleImageProcessor()
        # self._objectDetector = object_detector.SimpleObjectDetector()

    def run(self):
        """Run the main loop."""

        threadn = cv2.getNumberOfCPUs()
        pool = ThreadPool(processes=threadn)
        pending = deque()

        # latency = StatValue()
        # frame_interval = StatValue()
        # last_frame_time = clock()

        # TODO: Camera Calibration, Video Stabilization

        self._windowManager.create_window()

        while self._windowManager.is_window_created:
            self._captureManager.enter_frame()
            original = self._captureManager.original
            self._captureManager.frame = original

            # if original is not None:
            #    output = self.process_and_detect(original)
            #    self._captureManager.frame = output§

            while len(pending) > 0 and pending[0].ready():
                output = pending.popleft().get()
                # latency.update(clock() - t0)
                cv2.putText(output, "threaded      :  " + str(self._thread_mode),
                            (15, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                # draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value * 1000))
                # draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value * 1000))
                self._captureManager.frame = output
                self._captureManager.exit_frame()

            if len(pending) < threadn:
                # ret, frame = cap.read()
                # t = clock()
                # frame_interval.update(t - last_frame_time)
                # last_frame_time = t
                if self._thread_mode:
                    task = pool.apply_async(self.process_and_detect, (original.copy(),))
                else:
                    task = DummyTask(self.process_and_detect(original))
                pending.append(task)

            self._captureManager.exit_frame()
            self._windowManager.process_events()

    def process_and_detect(self, src):
        self._amount_frames += 1
        # filters.strokeEdges(src, src)
        # self._curveFilter.apply(src, src)
        # self._convolutionFilter.apply(src, src)
        # self._imageProcessor.process(src, src)
        # self._objectDetector.detect(src)

        # TODO: Image Preprocessor: removing shadows, small blobs, noise, enhancing, etc

        # TODO: Image Processor
        processing = self.image_processing_template_one(src)
        # gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
        # filtering = cv2.bilateralFilter(gray, 1, 10, 120)

        # TODO: Object Detector
        output = cv2.cvtColor(processing, cv2.COLOR_GRAY2BGR)
        success_detect = self.try_detect(input=processing, output=output, post_detect_fn=self.post_detect_draw)

        if not success_detect:
            # TODO: image_processing_template_two
            pass

        # TODO: Get 4-contours square counts If zero
        # TODO: [For 3D] Wrapping & Transformations

        # TODO: to be continued
        return output

    def image_processing_template_one(self, src):
        # TODO: Color space: GRAYSCALE, HSV, ...
        gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

        # TODO: Convolution, Blurring, ...
        # filtering = cv2.bilateralFilter(gray, 1, 10, 120)
        filtering = self.image_filtering(gray)

        # TODO: Edge detection
        # edges = cv2.Canny(gray, 10, 250)
        edges = self.edge_detection(filtering)

        # TODO: Morphological operations
        # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
        # closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
        closed = self.morphological_transformations(edges)
        return closed

    def image_filtering(self, src):
        diameter = self._windowManager.get_trackbar_value('diameter(for bilateralFilter)')
        sigma_color = self._windowManager.get_trackbar_value('sigmaColor(for bilateralFilter)')
        sigma_space = self._windowManager.get_trackbar_value('sigmaSpace(for bilateralFilter)')
        filtering = cv2.bilateralFilter(src, diameter, sigma_color, sigma_space)
        return filtering

    def edge_detection(self, src):
        threshold_min = self._windowManager.get_trackbar_value('threshold min(for Canny edge detection)')
        threshold_max = self._windowManager.get_trackbar_value('threshold max(for Canny edge detection)')
        edges = cv2.Canny(src, threshold_min, threshold_max)
        return edges

    def morphological_transformations(self, edges):
        kernel_size = self._windowManager.get_trackbar_value('kernel size(morphological structuring element)')
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
        closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
        return closed

    def try_detect(self, input, output, post_detect_fn):
        success_standard = self.detect_standard_rects(input, output, post_detect_fn)
        if not success_standard:
            # TODO: detect_rects_by_lines
            pass

        return success_standard

    def detect_standard_rects(self, input, output, post_detect_fn):
        contour_area_points = self._windowManager.get_trackbar_value('Contour area min amount points (*100)')
        approx_edges_amount = self._windowManager.get_trackbar_value('Approx edges amount')

        _, contours, h = cv2.findContours(input, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        rects_count = 0
        for cont in contours:
            if cv2.contourArea(cont) > contour_area_points * 100:
                arc_len = cv2.arcLength(cont, True)
                approx = cv2.approxPolyDP(cont, 0.1 * arc_len, True)
                if len(approx) == approx_edges_amount:
                    post_detect_fn(output, approx)
                    rects_count += 1

        if rects_count > 0:
            self._success_finding_contours += 1

        color_yellow = (0, 255, 255)
        percent_success_finding = round((self._success_finding_contours / self._amount_frames) * 100, 2)
        cv2.putText(output, str(percent_success_finding) + "%",
                    (15, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, color_yellow, 2)

        return rects_count > 0

    @staticmethod
    def post_detect_draw(output, approx):
        cv2.drawContours(output, [approx], -1, (255, 0, 0), 2)

    def on_keypress(self, keycode):
        """Handle a keypress.

        space  -> Take a screenshot.
        tab    -> Start/stop recording a screencast.
        escape -> Quit.

        """
        if keycode == 32:  # space
            # self._captureManager.write_image('screenshot.png')
            self._thread_mode = not self._thread_mode
        elif keycode == 9:  # tab
            if not self._captureManager.is_writing_video:
                self._captureManager.start_writing_video(
                    'screencast.avi')
            else:
                self._captureManager.stop_writing_video()
        elif keycode == 27:  # escape
            self._windowManager.destroy_window()
Exemplo n.º 9
0
    filename = 'cap-%s-%s.btt' % (
        time.strftime('%y%m%d_%H%M%S', time.localtime(start_time)),
        time.strftime('%y%m%d_%H%M%S', time.localtime(stop_time)))
    return filename


def _epoch_time_to_human_readable(timestamp):
    return time.strftime('%x %X', time.localtime(timestamp))


def _task_list_to_string(task_list):
    str_list = []
    for task in task_list:
        task_dict = task.to_dict()
        str_list.append(task_dict)
    return str_list


def sigint_handler(signal, frame):
    print('Shutting down service.')
    capture_manager.shutdown()

    sys.exit(0)


if __name__ == "__main__":
    signal.signal(signal.SIGINT, sigint_handler)
    sniffer = EllisysController()
    capture_manager = CaptureManager(sniffer, split_interval=600)
    app.run(host='0.0.0.0', port=5000)