Exemplo n.º 1
0
    def test_ffmpeg(self):
        """Runs two FFmpeg subprocesses: the first encodes raw 24-bit frames into raw MPEG-4 video stream,
        the second decodes that stream and feeds the reader again with raw 24-bit frames. The latter triggers
        detection of simple shapes on an image. The number of shapes detected is counted and signals to end
        the test.
        """

        width = 480
        height = 360
        encoder_frame_buffer = FrameBuffer(10, width, height)
        decoder_frame_buffer = FrameBuffer(10, width, height)

        encoder_frame_queue = Queue(1)
        decoder_frame_queue = Queue(1)
        artist_subscribe_queue = Queue(1)
        decoder_subscribe_queue = Queue(1)

        log_queue = Queue()
        getLogger().addHandler(QueueHandler(log_queue))

        stop_process_event = Event()

        latch = CountDownLatch(100)

        encoder = FFmpegEncoder("encoder", stop_process_event, log_queue, encoder_frame_queue, encoder_frame_buffer,
                                ['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-f', 'rawvideo', '-pix_fmt',
                                 'rgb24', '-s', '{}x{}'.format(width, height), '-i', '-', '-an', '-f', 'm4v',
                                 '-'], None, PIPE)
        decoder = FFmpegDecoder("decoder", stop_process_event, log_queue, decoder_frame_queue, decoder_frame_buffer,
                                ['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-f', 'm4v', '-i', '-', '-f',
                                 'rawvideo', '-pix_fmt', 'rgb24', '-'], None, PIPE)

        artist = Artist("artist", stop_process_event, log_queue, encoder_frame_queue, encoder_frame_buffer)
        conductor = WorkPublish(Thread, "conductor", stop_process_event, log_queue, artist_subscribe_queue,
                                encoder_frame_buffer)

        processes = [LogHandler(Thread, "logger", stop_process_event, log_queue, filename=None),
                     artist,
                     conductor,
                     encoder,
                     decoder,
                     Copy(Thread, "copier", stop_process_event, log_queue, encoder.stdout, decoder.stdin),
                     ShapeDetector(Process, "detector", stop_process_event, log_queue, decoder_frame_queue,
                                   decoder_frame_buffer),
                     ShapeCounter(Thread, "counter", stop_process_event, log_queue, decoder_subscribe_queue,
                                  decoder_frame_buffer, latch)]

        artist.subscribe(artist_subscribe_queue)
        decoder.subscribe(decoder_subscribe_queue)

        for process in processes:
            process.start()

        try:
            self.assertTrue(latch.wait(15))
        finally:
            stop_process_event.set()
            for process in processes:
                process.join(30)
Exemplo n.º 2
0
    def test_frame_buffer(self):
        frame_buffer = FrameBuffer(10, 1, 1, 1)

        length = len(frame_buffer.frames)
        frame_cycle = cycle(range(length))
        for i in range(-1, length * 3):
            _, frame_index = frame_buffer.select_next_ready(i)
            self.assertEqual(next(frame_cycle), frame_index)
Exemplo n.º 3
0
    def test_frame_buffer_expire(self):
        getLogger(FrameBuffer.__name__).setLevel(ERROR)

        frame_buffer = FrameBuffer(1, 1, 1, 1)
        for frame in frame_buffer.frames:
            frame.header.epoch = time()
            frame.latch.next()
        frame, frame_index = frame_buffer.select_next_ready()
        self.assertIsNone(frame)

        for frame in frame_buffer.frames:
            frame.header.epoch -= 60
        frame, frame_index = frame_buffer.select_next_ready()
        self.assertIsNotNone(frame)
Exemplo n.º 4
0
    def _new_frame(self, frame_in, payload: Payload, stop_event,
                   frame_buffer_in: FrameBuffer, frame_buffer_out: FrameBuffer,
                   *args, **kwargs):
        self._subscribers_lock.acquire()
        try:
            max_subscribers = len(self._subscribers)
            if max_subscribers == 0:  # Release the frame as no one has subscribed
                return

            frame_out, frame_index = frame_buffer_out.select_next_ready(
                self.__last_frame_index)
            self.__last_frame_index = frame_index
            if frame_out is None:
                raise BufferError

            self._incoming_frame(frame_in, frame_out, stop_event, *args,
                                 **kwargs)

            # Enforcing confirmation from all subscribers before returning frame into buffer
            frame_out.latch.next()
            frame_out.latch.next(max_subscribers)

            payload = Payload(self.name, frame_index)
            count = self._publish(payload)

            # Helps to return frame into buffer if not all subscribers received it
            while count < max_subscribers:
                frame_out.latch.next()
                count += 1
        finally:
            self._subscribers_lock.release()
Exemplo n.º 5
0
    def _next_frame(self, frame_queue, frame_buffer: FrameBuffer, *args, **kwargs):
        frame, frame_index = frame_buffer.select_next_ready(self.__last_frame_index)
        self.__last_frame_index = frame_index
        if frame is None:
            raise BufferError

        return frame_index if self._new_frame(frame, frame_queue, frame_buffer, *args, **kwargs) else None
Exemplo n.º 6
0
    def _create_encoder(self, camera_config, camera_name, frame_buffer_out, buffer_size,
                        detection_sieve, visual_effects, visual_effects_queue):
        if 'encoder' not in camera_config['ffmpeg']:
            return None, None, None

        encoder_queue = Queue(1)
        encoder = FFmpegEncoder(camera_name, self._stop_events[0], self._log_queue, encoder_queue,
                                frame_buffer_out, camera_config['ffmpeg']['encoder'], self._config_path,
                                DEVNULL if 'output' in camera_config else PIPE,
                                kwargs={'log_level': self._args.log_level})
        self._processes.append(encoder)
        detection_sieve.subscribe(visual_effects_queue)
        visual_effects.subscribe(encoder_queue)

        if 'output' in camera_config:
            return encoder, None, None

        mpegts_buffer = FrameBuffer(buffer_size, int(camera_config['width'] / 4), 188, 1)

        mpegts_reader = MpegTSReader(camera_name, self._stop_events[0], self._log_queue,
                                     encoder.stdout, mpegts_buffer,
                                     kwargs={'log_level': self._args.log_level})
        self._processes.append(mpegts_reader)

        return encoder, mpegts_reader, mpegts_buffer
Exemplo n.º 7
0
    def __init_frame_keeper(self, camera_config):
        self.__dict = {}
        frame_index = 0
        for entry in camera_config['detect']:
            coco_class = next(iter(entry))
            idx = COCO_CLASSES.index(coco_class)
            self.__dict[idx] = KeepData(frame_index, 0, 0)
            frame_index += 1

        self.__frame_keeper = FrameBuffer(frame_index, camera_config['width'],
                                          camera_config['height'])
Exemplo n.º 8
0
    def test_shape_detection(self):
        """Tests TensorFlow object detection using the trained model of simple geometric shapes.
        The detector recognises shapes drawn on a frame image, the sieve filters those having confidence
        above 50%, lastly shapes are counted signalling to end the test.
        """

        with EnvironmentVarGuard() as env:
            env.set("TF_CPP_MIN_LOG_LEVEL", "3")
            env.set("CORAL_VISIBLE_DEVICES", "")
            env.set("CUDA_VISIBLE_DEVICES", "")

            frame_buffer = FrameBuffer(10, 100, 100)

            frame_queue = Queue(1)
            subscriber_queue = Queue(1)
            detection_sieve_queue = Queue(1)

            log_queue = Queue()
            getLogger().addHandler(QueueHandler(log_queue))

            stop_process_event = Event()

            latch = CountDownLatch(100)

            artist = Artist("artist", stop_process_event, log_queue, frame_queue, frame_buffer)

            detection_sieve = DetectionSieve("sieve", stop_process_event, log_queue,
                                             detection_sieve_queue, frame_buffer,
                                             self._create_filters(), RateLimiter())

            processes = [artist, detection_sieve,
                         LogHandler(Thread, "logger", stop_process_event, log_queue, filename=None),
                         ShapeCounter(Thread, "counter", stop_process_event, log_queue, subscriber_queue,
                                      frame_buffer, latch)]

            processes += create_object_detectors(Process, stop_process_event, log_queue, frame_queue,
                                                 {artist.name: frame_buffer}, self._get_model_path())

            artist.subscribe(detection_sieve_queue)
            detection_sieve.subscribe(subscriber_queue)

            for process in processes:
                process.start()

            try:
                self.assertTrue(latch.wait(15))
            finally:
                stop_process_event.set()
                for process in processes:
                    process.join(30)
Exemplo n.º 9
0
    def test_numpy_stream(self):
        """Tests shared memory usage across processes. One process fills a frame buffer
        with random data, while another performs simple math operations on the given random image,
        comparing with predicted result.
        """

        frame_buffer = FrameBuffer(5, 10, 10, 1, 'd')

        frame_queue = Queue()

        log_queue = Queue()
        getLogger().addHandler(QueueHandler(log_queue))

        stop_process_event = Event()

        log_handler = LogHandler(Process,
                                 "logger",
                                 stop_process_event,
                                 log_queue,
                                 filename=None)
        reader = NumpyRead(Process, "reader", stop_process_event, log_queue,
                           frame_queue, frame_buffer)
        worker = NumpyWork(Process, "worker", stop_process_event, log_queue,
                           frame_queue, frame_buffer)

        log_handler.start()
        reader.start()
        worker.start()

        try:
            # Wait till the last frame in the buffer is used.
            self.assertTrue(frame_buffer.frames[-1].latch.wait(
                State.PUBLISH, 5))
        finally:
            stop_process_event.set()
            reader.join(30)
            worker.join(30)
            log_handler.join(30)

        self.assertEqual(len(frame_buffer.frames),
                         frame_buffer.status[State.PUBLISH])
        self.assertEqual(first=len(frame_buffer.frames),
                         second=worker.matches,
                         msg="Not all frames were processed")
Exemplo n.º 10
0
    def _next_frame(self, frame_queue, stop_event, frame_buffer: FrameBuffer):
        frame, frame_index = frame_buffer.select_next_ready(
            self._last_frame_index)
        self._last_frame_index = frame_index
        if frame is None:
            return None

        image_shape, image_np = frame.get_numpy_image()

        # Fill image buffer with random data
        data = np.random.randn(*image_shape)
        np.copyto(image_np, data)

        # Calculate sum of all pixels and do math against the sum
        x = np.sum(image_np[:]).item()
        y = random.random()
        operation = OPERATIONS[random.randint(0, len(OPERATIONS[0]))]
        expected_result = simple_math(operation, x, y)

        frame.header.epoch = time()
        frame.latch.next()

        return Action(frame_index, operation, y, expected_result)
Exemplo n.º 11
0
    def test_snapshot(self):
        width = 100
        height = 100
        frame_buffer = FrameBuffer(10, width, height)
        frame_queue = Queue(1)

        log_queue = Queue()
        getLogger().addHandler(QueueHandler(log_queue))

        stop_process_event = Event()

        effect = MockEffect()
        effect.draw_rect = MagicMock()

        snapshot = Snapshot("snapshot", stop_process_event, log_queue,
                            frame_queue, frame_buffer,
                            self._create_detect_config(width,
                                                       height), [effect])
        processes = [
            snapshot,
            LogHandler(Thread,
                       "logger",
                       stop_process_event,
                       log_queue,
                       filename=None)
        ]

        for process in processes:
            process.start()

        try:
            frame_index = 0
            frame = frame_buffer.frames[frame_index]

            frame.header.detections[0].label = COCO_CLASSES.index('book')
            frame.header.detections[0].bounding_box.x_min = 1
            frame.header.detections[0].bounding_box.y_min = 2
            frame.header.detections[0].bounding_box.x_max = 3
            frame.header.detections[0].bounding_box.y_max = 4
            frame.header.epoch = time()

            frame.latch.next()
            frame.latch.next()

            payload = Payload(None, frame_index)
            frame_queue.put(payload)

            self.assertTrue(
                frame.latch.wait_for(State.READY, stop_process_event.is_set,
                                     10))

            with self.assertRaises(AssertionError):
                snapshot.get('person')

            self.assertIsNotNone(snapshot.get('book'))

            effect.draw_rect.assert_called_with(1, 2, 3, 4)
        finally:
            stop_process_event.set()
            for process in processes:
                process.join(30)
Exemplo n.º 12
0
def prepare_shape_model(groups):
    frame_buffer = FrameBuffer(10, 300, 300)

    frame_queue = Queue(1)
    subscriber_queue = Queue(1)

    log_queue = CountableQueue()
    getLogger().addHandler(QueueHandler(log_queue))

    stop_logging_event = Event()

    log_handler = LogHandler(Thread,
                             "logger",
                             stop_logging_event,
                             log_queue,
                             filename=None)
    log_handler.start()

    for group, count in groups.items():
        path = os.path.abspath(
            os.path.join(
                Path(__file__).parent.parent.parent.parent,
                'build/test/model'))
        os.makedirs(os.path.join(path, "images", group), exist_ok=True)
        os.makedirs(os.path.join(path, "annotations"), exist_ok=True)

        stop_process_event = Event()

        latch = CountDownLatch(count)

        artist = Artist("artist", stop_process_event, log_queue, frame_queue,
                        frame_buffer)
        processes = [
            artist,
            ShapeDetector(Thread, "detector", stop_process_event, log_queue,
                          frame_queue, frame_buffer),
            Classifier(Thread,
                       "classifier",
                       stop_process_event,
                       log_queue,
                       subscriber_queue,
                       frame_buffer,
                       path,
                       group,
                       latch,
                       kwargs={'log_level': DEBUG})
        ]
        artist.subscribe(subscriber_queue)

        for process in processes:
            process.start()

        try:
            latch.wait()
        finally:
            stop_process_event.set()
            for process in processes:
                process.join(30)

    stop_logging_event.set()
    log_queue.join()
Exemplo n.º 13
0
    def _setup(self):
        self._processes = []
        self._stop_events += [Event()]
        self._frame_queue = Queue()

        all_semaphores = {}
        for camera in self._config['cameras']:
            camera_name = next(iter(camera))
            camera_config = camera[camera_name]

            buffer_size = 10
            frame_buffer_in = FrameBuffer(buffer_size, camera_config['width'], camera_config['height'])
            frame_buffer_out = FrameBuffer(buffer_size, camera_config['width'], camera_config['height'])

            decoder_stop_event = Event()
            decoder_queue_semaphore = BoundedSemaphore(1)
            all_semaphores[camera_name] = decoder_queue_semaphore
            decoder_queue = BalancedQueue(self._frame_queue, {camera_name: decoder_queue_semaphore}, camera_name)
            decoder = FFmpegDecoder(camera_name, decoder_stop_event, self._log_queue, decoder_queue,
                                    frame_buffer_in, camera_config['ffmpeg']['decoder'], self._config_path,
                                    kwargs={'log_level': self._args.log_level})
            self._processes.append(decoder)
            self._stop_events.append(decoder_stop_event)

            filters = self._create_filters(camera_config)
            detection_sieve_queue = Queue(1)
            detection_sieve = DetectionSieve(camera_name, self._stop_events[0], self._log_queue,
                                             detection_sieve_queue, frame_buffer_in, filters, decoder.rate_limiter,
                                             kwargs={'log_level': self._args.log_level})
            self._processes.append(detection_sieve)
            decoder.subscribe(detection_sieve_queue)

            visual_effects_queue = Queue(1)
            visual_effects = VisualEffects(camera_name, self._stop_events[0], self._log_queue,
                                           visual_effects_queue, frame_buffer_in, frame_buffer_out,
                                           self._create_effects(camera_config),
                                           kwargs={'log_level': self._args.log_level})
            self._processes.append(visual_effects)

            encoder, mpegts_reader, mpegts_buffer \
                = self._create_encoder(camera_config, camera_name, frame_buffer_out, buffer_size,
                                       detection_sieve, visual_effects, visual_effects_queue)

            mqtt = self._create_mqtt(camera_config, camera_name, frame_buffer_in, decoder,
                                     decoder_stop_event, detection_sieve)

            snapshot_queue = Queue(1)
            snapshot = Snapshot(camera_name, self._stop_events[0], self._log_queue, snapshot_queue,
                                frame_buffer_in, camera_config,
                                self._create_effects(camera_config),
                                kwargs={'topic': self.app_name, 'log_level': self._args.log_level})
            self._processes.append(snapshot)
            detection_sieve.subscribe(snapshot_queue)

            self._cameras[camera_name] = Camera(frame_buffer_in, frame_buffer_out,
                                                decoder, encoder, detection_sieve, mqtt, snapshot,
                                                visual_effects, visual_effects_queue,
                                                MotionJpeg.create_buffer(buffer_size),
                                                mpegts_reader, mpegts_buffer)

        self._detectors += create_object_detectors(Process, self._stop_events[0], self._log_queue,
                                                   BalancedQueue(self._frame_queue, all_semaphores),
                                                   {n: c.frame_buffer_in for n, c in self._cameras.items()},
                                                   self._args.model_path,
                                                   kwargs={'log_level': self._args.log_level})
        self._processes += self._detectors
Exemplo n.º 14
0
    def test_subscribe(self):
        """Tests the coherence among the processes sharing the same frame buffer.
        The queues tying the processes are of limited size (1) and the workers are a bit slower,
        than the reader. The reader has to drop frames if any of the workers is busy handling previous
        frame. The frame state latch must get back to READY state, if neither the main worker nor subscriber
        can pick up the next frame. This ensures the buffer will never overflow.
        """

        width = 500
        height = 500
        frame_buffer_in = FrameBuffer(10, width, height)
        frame_buffer_out = FrameBuffer(10, width, height)

        frame_queue = Queue(1)
        subscriber_queue = Queue(1)
        subscriber_queue1 = Queue(1)
        subscriber_queue2 = Queue(1)
        subscriber_queue3 = Queue(1)

        log_queue = Queue()
        getLogger().addHandler(QueueHandler(log_queue))

        stop_process_event = Event()

        latch = CountDownLatch(100)

        effects = [CopyHeaderEffect(), CopyImageEffect(), DrawEffect()]

        artist = Artist("artist", stop_process_event, log_queue, frame_queue,
                        frame_buffer_in)
        conductor = VisualEffects("conductor", stop_process_event, log_queue,
                                  subscriber_queue, frame_buffer_in,
                                  frame_buffer_out, effects)

        processes = [
            artist, conductor,
            LogHandler(Thread,
                       "logger",
                       stop_process_event,
                       log_queue,
                       filename=None),
            ShapeDetector(Process, "detector", stop_process_event, log_queue,
                          frame_queue, frame_buffer_in),
            ShapeCounter(Thread, "counter1", stop_process_event, log_queue,
                         subscriber_queue1, frame_buffer_out, latch),
            ShapeCounter(Thread, "counter2", stop_process_event, log_queue,
                         subscriber_queue2, frame_buffer_out, latch),
            ShapeCounter(Thread, "counter3", stop_process_event, log_queue,
                         subscriber_queue3, frame_buffer_out, latch)
        ]

        artist.subscribe(subscriber_queue)
        conductor.subscribe(subscriber_queue1)
        conductor.subscribe(subscriber_queue2)
        conductor.subscribe(subscriber_queue3)

        for process in processes:
            process.start()

        try:
            self.assertTrue(latch.wait(15))
        finally:
            stop_process_event.set()
            for process in processes:
                process.join(30)

            conductor.unsubscribe(subscriber_queue1)
            conductor.unsubscribe(subscriber_queue2)
            conductor.unsubscribe(subscriber_queue3)
            artist.unsubscribe(subscriber_queue)