Exemple #1
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    disposable = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse faces on frame
        ops.filter(
            lambda img_faces_pair: any([
                face.top_prediction.confidence >= ARGS.min_confidence and face.
                top_prediction.confidence <= ARGS.max_confidence
                for face in img_faces_pair.faces
            ])
        ),  # proceed only if min_confidence <= person_confidence <= max_confidence
        ops.do_action(on_next=save_frame)).subscribe(
            on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Data collector shutdown")
        disposable.dispose()
Exemple #2
0
def grouped_sample(
    key_mapper: ops.Mapper,
    sampler: Union[ops.timedelta, float, rx.Observable],
):
    """
    Combination of "group_by", "flat_map" and "sample", groups an observable sequence by the
    "key_mapper" function, maps the resulting observable sequences with the "sample" operator
    and flatten it into a single observable sequence.
    """
    return rx.pipe(
        ops.group_by(key_mapper),
        ops.flat_map(lambda x: x.pipe(ops.sample(sampler))),
    )
Exemple #3
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    semaphore = Subject()

    semaphore_stream = semaphore.pipe(
        ops.flat_map(lambda _: rx.of(True).pipe(
            ops.delay(ARGS.block_time, scheduler=scheduler),
            ops.start_with(False))), ops.start_with(True))

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    gated_video_stream = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.combine_latest(semaphore_stream),
        ops.filter(lambda tup: tup[1]),  # proceed only if semaphore allows
        ops.map(lambda tup: tup[0])  # take only frame
    )

    disposable = gated_video_stream.pipe(
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.map(lambda img: img.resize(
            (640, 360))),  # resize image (inference will be faster)
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse frame for faces
        ops.filter(lambda img_faces_pair: any([
            face.top_prediction.confidence > ARGS.threshold
            for face in img_faces_pair.faces
        ])),  # proceed only if there is a known face in the frame
        ops.throttle_first(1),
        ops.flat_map(unlock_request),  # unlock the door
        ops.do_action(
            on_next=lambda _: semaphore.on_next(True)
        )  # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request)
    ).subscribe(on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Smart lock face recognition engine shutdown")
        disposable.dispose()
Exemple #4
0
groups = rx.from_(range(3)).pipe(
    ops.group_by(key_selector)
)
groups.subscribe(subscribe_group_observable)


# Sample: A way to grab items and emit latest values at certain points
print('-- Sample')

# 10 ms 마다 인터벌 발생 (시간의 단위는 초이다.)
rx.interval(0.01).pipe(
    # 1초 동안 동작
    ops.take_until(rx.timer(1)),
    # 100ms 마다 이벤트 출력 (현재 받은 최신값)
    ops.sample(0.1)
).subscribe(print)
time.sleep(2)


# Max
print('-- Max')
rx.from_([1, 2, 3, 4, 12, 3, 3, -10]).pipe(
    ops.max(lambda x, y: x - y)
).subscribe(print)





Exemple #5
0
 def create():
     return rx.never().pipe(ops.sample(1))
Exemple #6
0
 def create():
     return rx.throw(ex).pipe(ops.sample(0))
Exemple #7
0
 def create():
     return rx.empty().pipe(ops.sample(0))
Exemple #8
0
 def create():
     return xs.pipe(ops.sample(50))
Exemple #9
0
import time
import rx
from rx import operators as ops

# https://www.youtube.com/watch?v=HLtZek0OhQA&list=PL1A418Fn3fORAnrEIwMiP04vsLzzusEjK&index=6

rx.from_(['abc', 'def', 'ghi']).subscribe(print)
# abc
# def
# ghi
print()

# 1초마다 인터벌 발생 (시간의 단위는 초이다.)
rx.interval(1).pipe(
    # 30 초가 될때까지 동작
    ops.take_until(rx.timer(30)),
    # 3초마다 이벤트 출력
    ops.sample(3)).subscribe(print)
# 1
# 4
# 7
# 10
# 13
# 16
# 19
# 22
# 25
# 28
time.sleep(40)
import rx
import rx.operators as ops
from rx.subject import Subject

numbers = Subject()
sampler = Subject()
numbers.pipe(ops.sample(sampler=sampler)).subscribe(
    on_next=lambda i: print("on_next {}".format(i)),
    on_error=lambda e: print("on_error: {}".format(e)),
    on_completed=lambda: print("on_completed"))

numbers.on_next(1)
numbers.on_next(2)
sampler.on_next(True)
numbers.on_next(3)
numbers.on_next(4)
numbers.on_next(5)
sampler.on_next(True)