Beispiel #1
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    disposable = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse faces on frame
        ops.filter(
            lambda img_faces_pair: any([
                face.top_prediction.confidence >= ARGS.min_confidence and face.
                top_prediction.confidence <= ARGS.max_confidence
                for face in img_faces_pair.faces
            ])
        ),  # proceed only if min_confidence <= person_confidence <= max_confidence
        ops.do_action(on_next=save_frame)).subscribe(
            on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Data collector shutdown")
        disposable.dispose()
Beispiel #2
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                raise _raise(ex)

            def create_observable(d):
                create_invoked[0] += 1
                return rx.never()

            return rx.using(create_resource, create_observable)
Beispiel #3
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                raise _raise(ex)

            def create_observable(d):
                create_invoked[0] += 1
                return rx.never()

            return rx.using(create_resource, create_observable)
Beispiel #4
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                disp[0] = MockDisposable(scheduler)
                return disp[0]

            def create_observable(d):
                create_invoked[0] += 1
                _raise(ex)

            return rx.using(create_resource, create_observable)
Beispiel #5
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                disp[0] = MockDisposable(scheduler)
                return disp[0]

            def create_observable(d):
                create_invoked[0] += 1
                _raise(ex)

            return rx.using(create_resource, create_observable)
Beispiel #6
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                disp[0] = MockDisposable(scheduler)
                return disp[0]

            def create_observable(d):
                _d[0] = d
                create_invoked[0] += 1
                xs[0] = scheduler.create_cold_observable(
                        on_next(100, scheduler.clock), on_error(200, ex))
                return xs[0]
            return rx.using(create_resource, create_observable)
Beispiel #7
0
        def create():
            def create_resources():
                dispose_invoked[0] += 1
                disp[0] = None
                return disp[0]

            def create_observable(d):
                _d[0] = d
                create_invoked[0] += 1
                xs[0] = scheduler.create_cold_observable(
                        on_next(100, scheduler.clock), on_completed(200))
                return xs[0]
            return rx.using(create_resources, create_observable)
Beispiel #8
0
        def create():
            def create_resource():
                dispose_invoked[0] += 1
                disp[0] = MockDisposable(scheduler)
                return disp[0]

            def create_observable(d):
                _d[0] = d
                create_invoked[0] += 1
                xs[0] = scheduler.create_cold_observable(
                    on_next(100, scheduler.clock), on_error(200, ex))
                return xs[0]

            return rx.using(create_resource, create_observable)
Beispiel #9
0
        def create():
            def create_resources():
                dispose_invoked[0] += 1
                disp[0] = None
                return disp[0]

            def create_observable(d):
                _d[0] = d
                create_invoked[0] += 1
                xs[0] = scheduler.create_cold_observable(
                    on_next(100, scheduler.clock), on_completed(200))
                return xs[0]

            return rx.using(create_resources, create_observable)
Beispiel #10
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    semaphore = Subject()

    semaphore_stream = semaphore.pipe(
        ops.flat_map(lambda _: rx.of(True).pipe(
            ops.delay(ARGS.block_time, scheduler=scheduler),
            ops.start_with(False))), ops.start_with(True))

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    gated_video_stream = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.combine_latest(semaphore_stream),
        ops.filter(lambda tup: tup[1]),  # proceed only if semaphore allows
        ops.map(lambda tup: tup[0])  # take only frame
    )

    disposable = gated_video_stream.pipe(
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.map(lambda img: img.resize(
            (640, 360))),  # resize image (inference will be faster)
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse frame for faces
        ops.filter(lambda img_faces_pair: any([
            face.top_prediction.confidence > ARGS.threshold
            for face in img_faces_pair.faces
        ])),  # proceed only if there is a known face in the frame
        ops.throttle_first(1),
        ops.flat_map(unlock_request),  # unlock the door
        ops.do_action(
            on_next=lambda _: semaphore.on_next(True)
        )  # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request)
    ).subscribe(on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Smart lock face recognition engine shutdown")
        disposable.dispose()
Beispiel #11
0
def using_file(file, mode, to_observable):
    def open_file():
        opened_file = open(file, mode)
        return _DisposableFile(opened_file)

    return using(open_file, lambda f: to_observable(f.opened_file))
import rx
import rx.operators as ops
from rx.subject import Subject
import datetime
import time
from rx.disposable import Disposable


def resource():
    print("create resource at {}".format(datetime.datetime.now()))

    def dispose():
        print("dispose resource at {}".format(datetime.datetime.now()))

    return Disposable(dispose)


rx.using(resource, lambda r: rx.just(1).pipe(ops.delay(0.2))).subscribe(
    on_next=lambda i: print("on_next {}".format(i)),
    on_error=lambda e: print("on_error: {}".format(e)),
    on_completed=lambda: print("on_completed")
)
time.sleep(500)