示例#1
0
    def start(self):
        if not self.stopped:
            return
        super().start()
        mouse_source, keyboard_source, engagement_source = self.sources

        initial_keyboard_event = keyboard.KeyboardEvent(keyboard.KEY_UP, 0)
        initial_mouse_event = mouse.ButtonEvent(event_type=mouse.UP,
                                                button=0,
                                                time=time.time())
        self.subscriptions = [
            mouse_source.output.pipe(
                operators.start_with(initial_mouse_event)).pipe(
                    operators.combine_latest(
                        keyboard_source.output.pipe(
                            operators.start_with(initial_keyboard_event)),
                        engagement_source.output)).pipe(
                            operators.throttle_first(0.1))  # in seconds
            .subscribe(self.update)
        ]

        if self.window is None:
            self.window = Window(points=self.points_in_buffer,
                                 toggle_callback=self.toggle_recording)

        self.window.show()
        self.window.activateWindow()
        self.window.raise_()
示例#2
0
    def start(self):
        if not self.stopped:
            return
        super().start()
        video_source, = self.sources

        self.subscriptions = [
            video_source.output.pipe(
                operators.throttle_first(0.1))  # in seconds
            .subscribe(self.process_frame),
        ]
    def start(self):
        if not self.stopped:
            return
        super().start()
        mouse_source, = self.sources

        self.subscriptions = [
            mouse_source.output.pipe(
                operators.throttle_first(self.window_duration)).subscribe(
                    self._subj.on_next),
        ]
示例#4
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    semaphore = Subject()

    semaphore_stream = semaphore.pipe(
        ops.flat_map(lambda _: rx.of(True).pipe(
            ops.delay(ARGS.block_time, scheduler=scheduler),
            ops.start_with(False))), ops.start_with(True))

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    gated_video_stream = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.combine_latest(semaphore_stream),
        ops.filter(lambda tup: tup[1]),  # proceed only if semaphore allows
        ops.map(lambda tup: tup[0])  # take only frame
    )

    disposable = gated_video_stream.pipe(
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.map(lambda img: img.resize(
            (640, 360))),  # resize image (inference will be faster)
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse frame for faces
        ops.filter(lambda img_faces_pair: any([
            face.top_prediction.confidence > ARGS.threshold
            for face in img_faces_pair.faces
        ])),  # proceed only if there is a known face in the frame
        ops.throttle_first(1),
        ops.flat_map(unlock_request),  # unlock the door
        ops.do_action(
            on_next=lambda _: semaphore.on_next(True)
        )  # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request)
    ).subscribe(on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Smart lock face recognition engine shutdown")
        disposable.dispose()
示例#5
0
 def add_by_user_id(self, userid: str, sniff_config: Config):
     assert userid == sniff_config.userid
     assert userid not in self.observable_map
     filtered_notification = self.attend_notifier.pipe(
         operators.filter(lambda uid: uid == userid)).pipe(
             operators.throttle_first(1))
     context = AttendancenStateContext(
         userid,
         False,
         filtered_notification,
         absence_due_second=sniff_config.absence_due_second)
     self.observable_map[sniff_config.userid] = context.get_observable()
     self.observable_map[sniff_config.userid].subscribe(
         self.handle_state_change)
示例#6
0
    def configure_subscriptions(self, connected):
        if connected:
            self.subjects.image_producer.pipe(
                operators.observe_on(self.feed_scheduler),
                operators.buffer_with_count(self.batch_size),
                bp_operator(BackPressure.DROP, 5),
                operators.take_until(self._stop),
            ).subscribe(ErrorToConsoleObserver(self.feed_image))

            self.inference_comm.back_pressure_chan.pipe(
                operators.subscribe_on(self.process_scheduler),
                operators.take_until(self._stop)).subscribe(
                    ErrorToConsoleObserver(self.update_back_pressure_status))

            # report error when image source is still producing when back pressuring
            self.subjects.analyzer_back_pressure_detected.pipe(
                operators.combine_latest(self.subjects.image_producer),
                operators.filter(lambda x: x[0]),
                operators.throttle_first(1.0),
                operators.take_until(self._stop),
            ).subscribe(
                ErrorToConsoleObserver(lambda x: self.logger.warning(
                    "Image is feeding while back-pressure is detected. Please slow down the FPS"
                )))

            self.inference_comm.result_chan.pipe(
                operators.subscribe_on(self.process_scheduler),
                operators.take_until(self._stop)).subscribe(
                    ErrorToConsoleObserver(self.result_processing))

            self.inference_comm.error_chan.pipe(
                operators.take_until(self._stop)).subscribe(
                    ErrorToConsoleObserver(lambda err: self.logger.error(err)))
            self.inference_comm.connection_chan.pipe(
                operators.take_until(self._stop)).subscribe(
                    ErrorToConsoleObserver(lambda connected: self.logger.info(
                        "GRPC Remote analyzer connected" if connected else
                        "GRPC Remote analyzer disconnected")))
            self.inference_comm.stats_chan.take_until(self._stop).subscribe(
                ErrorToConsoleObserver(lambda x: self.logger.info(
                    f"Processed {x.frame} frames. Average {x.processTime / x.frame} secs"
                )))
示例#7
0
    def start(self):
        # report more image when back pressure
        self.subjects.image_producer.pipe(
            operators.observe_on(self.scheduler),
            operators.combine_latest(
                self.subjects.analyzer_back_pressure_detected),
            operators.filter(
                lambda x: x[1]),  # only operate when back pressure
            operators.buffer_with_time(1.0),  # in 1 sec
            operators.filter(lambda x: len(x) > 3),  # more than 3 emission
            operators.throttle_first(3.0),  # report every 3 seconds
            operators.take_until(self._stop),
        ).subscribe(self.report_back_pressure_emission)

        self.subjects.image_producer.pipe(
            operators.observe_on(
                self.scheduler),  # prevent blocking the upstream subject
            operators.filter(self.back_pressure_barrier),
            operators.buffer_with_count(5),
            bp_drop_report_full(self.subjects.analyzer_back_pressure_detected,
                                3, 1),
            operators.take_until(self._stop),
        ).subscribe(ErrorToConsoleObserver(self.produce_fake_analyze_data))
        super(TestAnalyzer, self).start()
 def create():
     return xs.pipe(ops.throttle_first(200))
 def create():
     return xs.pipe(ops.throttle_first(200))
示例#10
0
# 操作数据流
print('求所有偶数')
some_data = rx.of(1, 2, 3, 4, 5, 6, 7, 8)
some_data2 = rx.from_iterable(range(10, 20))
some_data.pipe(
    op.merge(some_data2),
    op.filter(lambda i: i % 2 == 0),
    # op.map(lambda i: i * 2)
).subscribe(lambda i: print(i))

# debounce操作符,仅在时间间隔之外的可以发射
print('防止重复发送')
ob = Subject()
ob.pipe(
    op.throttle_first(3)
    # op.debounce(3)
).subscribe(
    on_next=lambda i: print(i),
    on_completed=lambda: print('Completed')
)

print('press enter to print, press other key to exit')
while True:
    s = input()
    if s == '':
        ob.on_next(datetime.datetime.now().time())
    else:
        ob.on_completed()
        break