Exemple #1
0
class WriteOptions(object):
    def __init__(
        self,
        write_type: WriteType = WriteType.batching,
        batch_size=1_000,
        flush_interval=1_000,
        jitter_interval=0,
        retry_interval=1_000,
        write_scheduler=ThreadPoolScheduler(max_workers=1)
    ) -> None:
        """
        Creates write api configuration.

        :param write_type: methods of write (batching, asynchronous, synchronous)
        :param batch_size: the number of data point to collect in batch
        :param flush_interval: flush data at least in this interval
        :param jitter_interval: this is primarily to avoid large write spikes for users running a large number of
               client instances ie, a jitter of 5s and flush duration 10s means flushes will happen every 10-15s.
        :param retry_interval: the time to wait before retry unsuccessful write
        :param write_scheduler:
        """
        self.write_type = write_type
        self.batch_size = batch_size
        self.flush_interval = flush_interval
        self.jitter_interval = jitter_interval
        self.retry_interval = retry_interval
        self.write_scheduler = write_scheduler
Exemple #2
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    disposable = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse faces on frame
        ops.filter(
            lambda img_faces_pair: any([
                face.top_prediction.confidence >= ARGS.min_confidence and face.
                top_prediction.confidence <= ARGS.max_confidence
                for face in img_faces_pair.faces
            ])
        ),  # proceed only if min_confidence <= person_confidence <= max_confidence
        ops.do_action(on_next=save_frame)).subscribe(
            on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Data collector shutdown")
        disposable.dispose()
def main():
    data_scheduler = ThreadPoolScheduler(1)

    with \
            cam_srv.exposure, cam_srv.focus, \
            main_proc.motion, \
            threads.RepeatingTimer(
                function=main_proc.process,
                interval=0,
                name="ProcessThread",
            ) as process_thread:
        main_proc.processed_imgs.subscribe(robot_finder.find)
        robot_finder.found.subscribe(prepare_data)
        ready_data.pipe(
            rx.operators.observe_on(data_scheduler)).subscribe(save_data)

        robot_finder.found.subscribe(cam_srv.ParamOptimizer.on_found)
        robot_finder.not_found.subscribe(on_not_found)

        robot_finder.found.subscribe(move)

        process_thread.start()
        main_proc.run_gui()

        process_thread.join()
Exemple #4
0
 def __init__(
     self,
     edit_speed_profile_presenter: EditSpeedProfilePresenter,
     preferences_presenter: PreferencesPresenter,
     has_supported_kraken_interactor: HasSupportedKrakenInteractor,
     get_status_interactor: GetStatusInteractor,
     set_speed_profile_interactor: SetSpeedProfileInteractor,
     settings_interactor: SettingsInteractor,
     check_new_version_interactor: CheckNewVersionInteractor,
     speed_profile_changed_subject: SpeedProfileChangedSubject,
     speed_step_changed_subject: SpeedStepChangedSubject,
     composite_disposable: CompositeDisposable,
 ) -> None:
     _LOG.debug("init MainPresenter ")
     self.main_view: MainViewInterface = MainViewInterface()
     self._edit_speed_profile_presenter = edit_speed_profile_presenter
     self._preferences_presenter = preferences_presenter
     self._scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())
     self._has_supported_kraken_interactor = has_supported_kraken_interactor
     self._get_status_interactor: GetStatusInteractor = get_status_interactor
     self._set_speed_profile_interactor: SetSpeedProfileInteractor = set_speed_profile_interactor
     self._settings_interactor = settings_interactor
     self._check_new_version_interactor = check_new_version_interactor
     self._speed_profile_changed_subject = speed_profile_changed_subject
     self._speed_step_changed_subject = speed_step_changed_subject
     self._composite_disposable: CompositeDisposable = composite_disposable
     self._profile_selected: Dict[str, SpeedProfile] = {}
     self._should_update_fan_speed: bool = False
     self._legacy_firmware_dialog_shown: bool = False
     self.application_quit: Callable = lambda *args: None  # will be set by the Application
Exemple #5
0
    def __init__(self,
                 url: str,
                 token: str,
                 org: str,
                 data_bucket: str,
                 meta_bucket: str,
                 workers: int = cpu_count()):
        super().__init__()
        self.client = InfluxDBClient(url=url, token=token, org=org)
        self.url = url
        self.token = token
        self.org = org

        if not self.check_bucket_exists(data_bucket):
            raise KeyError(f"Data bucket {data_bucket} as does not exist")
        if not self.check_bucket_exists(meta_bucket):
            raise KeyError(f"Meta bucket {meta_bucket} as does not exist")

        self.data_bucket = data_bucket
        self.meta_bucket = meta_bucket

        # write with batch api with sane looking defaults
        self.api = self.client.write_api(
            write_options=WriteOptions(batch_size=200,
                                       flush_interval=2000,
                                       jitter_interval=100,
                                       retry_interval=2000,
                                       write_scheduler=ThreadPoolScheduler(
                                           max_workers=workers)))
Exemple #6
0
class Gaia:
    _pool = ThreadPoolScheduler(5)
    _client_factory = GaiaClientFactory()
    _stream_client_factory = GaiaStreamClientFactory()

    @classmethod
    def connect(cls, url: str, credentials: GaiaCredentials) -> 'GaiaRef':
        config = GaiaConfig(
            url,
            HttpSensorFunction(url, credentials, cls._pool,
                               cls._client_factory),
            HttpSensorStream(url, credentials, cls._pool,
                             cls._stream_client_factory))
        return GaiaRef(config, config.functionProcessor,
                       config.streamProcessor)

    @classmethod
    def login(cls, url: str,
              credentials: UsernamePasswordCredentials) -> 'GaiaRef':
        headers = {'Content-Type': 'application/json'}
        response = requests.post(f"{url}/api/auth/access",
                                 json=credentials.__repr__(),
                                 headers=headers)
        response.raise_for_status()
        return Gaia.connect(
            url, JWTCredentials(LoggedIn(response.json()).access_token))
def rx_multi_threads(count_times=100000,
                     clear_or_nice_or_rx=ClearNiceRx.clear):
    start_time = timer()

    # calculate cpu count, using which will create a ThreadPoolScheduler
    thread_count = multiprocessing.cpu_count()
    thread_pool_scheduler = ThreadPoolScheduler(thread_count)
    print("Cpu count is : {0}".format(thread_count))
    range_list = range(count_times)
    chunks = split_list(range_list, int(count_times / thread_count) + 1)
    i: int = 0
    subscriber = [None] * thread_count
    task_complete_counter = 0

    for cpu_c in chunks:
        subscriber[i] = rx.from_(cpu_c) \
            .pipe(
            ops.map(lambda a: calculate_date_faster(day_matrix='45;12;1;29;2;', years_count=200)),
            ops.subscribe_on(thread_pool_scheduler)
        ).subscribe(
            lambda s: final_time(start_time), #print("Next date is: {0}; Elapsed time: {1}".format(s[0], s[1]))
            lambda error: print(error),
            on_next=final_time(start_time)
        )
        i = i + 1

    end_time = timer()
    print("everything is done {}".format(task_complete_counter))
    print(
        'Total execution times: {0} Average time is {1} microsec. Total elapsed time is : {2}'
        .format(count_times, 0, timedelta(seconds=end_time - start_time)))
Exemple #8
0
 def test_threadpool_now_units(self):
     scheduler = ThreadPoolScheduler()
     diff = scheduler.now
     sleep(1.1)
     diff = scheduler.now - diff
     assert timedelta(milliseconds=1000) < diff < timedelta(
         milliseconds=1300)
 def __init__(self):
     super().__init__()
     self.config = config.SettingAccessor(self.config_prefix)
     self.fps = self.config["fps"]
     self.images = self._source_images()
     self._stop = subject.Subject()
     self.running = False
     self.feed_scheduler = ThreadPoolScheduler()
 def __init__(self) -> None:
     self._subject = Subject()
     self._scheduler = ThreadPoolScheduler(max_workers=1)
     obs = self._subject.pipe(ops.observe_on(self._scheduler))
     self._disposable = obs \
         .pipe(ops.window_with_time_or_count(count=5, timespan=datetime.timedelta(milliseconds=10_000)),
               ops.flat_map(lambda x: self._window_to_group(x)),
               ops.map(mapper=lambda x: self._retryable(data=x, delay=self._jitter_delay(jitter_interval=1000))),
               ops.merge_all()) \
         .subscribe(self._result, self._error, self._on_complete)
     pass
 def __init__(self, set_overclock_interactor: SetOverclockInteractor,
              composite_disposable: CompositeDisposable) -> None:
     LOG.debug("init EditOverclockProfilePresenter")
     self._set_overclock_interactor = set_overclock_interactor
     self._composite_disposable: CompositeDisposable = composite_disposable
     self.view: EditOverclockProfileViewInterface = EditOverclockProfileViewInterface(
     )
     self._profile = OverclockProfile()
     self._overclock = Overclock()
     self._scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())
     self._gpu_index: int = 0
Exemple #12
0
def run(onComplete=lambda: None):
    limit = 10 ** 22
    optimal_thread_count = multiprocessing.cpu_count()
    pool_scheduler = ThreadPoolScheduler(optimal_thread_count)

    count = rx.from_iterable(pandigital_step_numbers()) \
        .pipe(
        ops.take_while(lambda n: n < limit),
        ops.count(),
    ) \
        .run()

    onComplete(count)
Exemple #13
0
 def __init__(self,
              concurrency_per_group,
              delay_seconds=0,
              description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self.request_scheduler = ThreadPoolScheduler(10)
     self._requests = Subject()
     self._output_subject = Subject()
     self._output = self._output_subject.pipe(share())
     self._description = description
     self._subscription = self._requests.pipe(
         observe_on(self.scheduler),
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group),
             delay(delay_seconds))),
         take_until_disposed()).subscribe(
             on_next=lambda request: self._output_subject.on_next(request),
             on_error=lambda error: logging.exception(
                 'Error in {} request stream'.format(self)),
             on_completed=lambda: self.dispose(),
             scheduler=self.scheduler)
Exemple #14
0
def main():
    loop = asyncio.get_event_loop()
    io_scheduler = AsyncIOThreadSafeScheduler(loop=loop)
    scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

    semaphore = Subject()

    semaphore_stream = semaphore.pipe(
        ops.flat_map(lambda _: rx.of(True).pipe(
            ops.delay(ARGS.block_time, scheduler=scheduler),
            ops.start_with(False))), ops.start_with(True))

    video_stream_observable = rx.using(
        lambda: VideoStreamDisposable(),
        lambda d: rx.from_iterable(video_stream_iterable(d.cap)))

    gated_video_stream = video_stream_observable.pipe(
        ops.subscribe_on(scheduler),
        ops.sample(1 / ARGS.fps),  # sample frames based on fps
        ops.combine_latest(semaphore_stream),
        ops.filter(lambda tup: tup[1]),  # proceed only if semaphore allows
        ops.map(lambda tup: tup[0])  # take only frame
    )

    disposable = gated_video_stream.pipe(
        ops.filter(has_face),  # filter frames without faces
        ops.map(lambda frame: Image.fromarray(
            cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))),  # map frame to PIL image
        ops.map(lambda img: img.resize(
            (640, 360))),  # resize image (inference will be faster)
        ops.observe_on(io_scheduler),
        ops.map(lambda img: ImageFacesPair(img, analyse_frame(img))
                ),  # analyse frame for faces
        ops.filter(lambda img_faces_pair: any([
            face.top_prediction.confidence > ARGS.threshold
            for face in img_faces_pair.faces
        ])),  # proceed only if there is a known face in the frame
        ops.throttle_first(1),
        ops.flat_map(unlock_request),  # unlock the door
        ops.do_action(
            on_next=lambda _: semaphore.on_next(True)
        )  # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request)
    ).subscribe(on_error=lambda e: logger.exception(e))

    try:
        loop.run_forever()
    except Exception as e:
        logger.exception(e)
        logger.info("Smart lock face recognition engine shutdown")
        disposable.dispose()
Exemple #15
0
def main():
    logging.info("in main")

    optimal_thread_count = multiprocessing.cpu_count()
    thread_pool = ThreadPoolScheduler(optimal_thread_count)
    logging.info(f"using {optimal_thread_count} threads")

    observable = MyObservable(thread_pool)
    subject = MySubject()
    observer = MyObserver()

    observable.subscribe(subject)
    subject.subscribe(observer)

    observable.start()

    input('Press <enter> to quit\n')
    def __init__(self, stub):
        self._stub = stub
        self._fid_list = KiwoomOpenApiPlusRealType.get_fids_by_realtype('주식시세')

        self._request_observer = QueueBasedIterableObserver()
        self._request_iterator = iter(self._request_observer)
        self._response_iterator = self._stub.BidirectionalRealCall(self._request_iterator)
        self._response_subject = Subject()
        self._response_scheduler_max_workers = 8
        self._response_scheduler = ThreadPoolScheduler(self._response_scheduler_max_workers)
        self._buffered_response_iterator = QueueBasedBufferedIterator(self._response_iterator)
        self._response_observable = rx.from_iterable(self._buffered_response_iterator, self._response_scheduler)
        self._response_subscription = self._response_observable.subscribe(self._response_subject)

        self._subjects_by_code = {}

        self.initialize()
Exemple #17
0
 def __init__(self, concurrency_per_group, description=None):
     self.scheduler = ThreadPoolScheduler(concurrency_per_group)
     self._requests = Subject()
     self._output = ReplaySubject()
     self._description = description
     self._subscription = self._requests.pipe(
         group_by(lambda r: r['concurrency_group']),
         flat_map(lambda concurrency_group: concurrency_group.pipe(
             map(lambda r: r['request']),
             merge(max_concurrent=concurrency_per_group)))).subscribe(
                 on_next=lambda request: self._output.on_next(request),
                 on_error=lambda error: logging.exception(
                     'Error in {} request stream'.format(self)),
                 on_completed=lambda: logging.error(
                     '{} request stream unexpectedly completed'.format(self
                                                                       )),
                 scheduler=self.scheduler)
def main():
    pool_scheduler = ThreadPoolScheduler(2)
    writer = rx.subject.Subject()
    reader = rx.from_iterable(read())

    # Subjects behave differently than Observables w.r.t. subscribe_on
    # subscribe_on doesn't work since on_next() is called on original thread
    # observe_on ensures we don't block the thread calling on_next()
    writer.pipe(
        ops.observe_on(pool_scheduler),
    ).subscribe(write)

    reader.subscribe(print_thread("reader"), scheduler=pool_scheduler)

    for i in range(10):
        writer.on_next(i)
        sleep(1)
    def __init__(self, stub):
        self._stub = stub

        request = KiwoomOpenApiPlusService_pb2.ListenRequest()
        self._response_iterator = self._stub.OrderListen(request)
        self._response_subject = Subject()
        self._response_scheduler_max_workers = 8
        self._response_scheduler = ThreadPoolScheduler(self._response_scheduler_max_workers)
        self._buffered_response_iterator = QueueBasedBufferedIterator(self._response_iterator)
        self._response_observable = rx.from_iterable(self._buffered_response_iterator, self._response_scheduler)
        self._response_subscription = self._response_observable.subscribe(self._response_subject)

        self._observable = Subject()
        self._subscription = self._response_subject.pipe(
            self.filter_chejan_response(),
            self.convert_to_dict(),
        ).subscribe(self._observable)
Exemple #20
0
class MainWindow(QMainWindow):

    core_controller = None
    plugins = None
    draw_data_scheduler = ThreadPoolScheduler(1)

    def __init__(self):
        QMainWindow.__init__(self)
        self.setWindowTitle('vizzero')

        self.core_controller = CoreController()
        self.plugins = [Handsim(self.core_controller), RecordHandFixed(self.core_controller)]

        vbox = QVBoxLayout(self)
        window = QWidget()
        window.setLayout(vbox)

        self.myo_canvas = RealtimeCanvas(self.core_controller)
        self.myo_canvas.native.setParent(window)
        self.sensor_controls = SensorControls(self.core_controller.sensor_controller)
        vbox.addWidget(self.sensor_controls)
        vbox.addWidget(self.myo_canvas.native)

        self.node_proc = None

        self.tabs = Tabs(self.core_controller, self.plugins)
        splitter1 = QSplitter(Qt.Horizontal)
        splitter1.addWidget(self.tabs)
        splitter1.addWidget(window)
        splitter1.setSizes([70, 30])

        self.setCentralWidget(splitter1)

        self.core_controller.sensor_controller.rx_sensor_data_subject\
            .subscribe(self.myo_canvas.feed_data, scheduler=self.draw_data_scheduler)

    def closeEvent(self, event):
        for plugin in self.plugins:
            try:
                plugin.destroy()
            finally:
                pass
Exemple #21
0
 def __init__(
     self,
     edit_fan_profile_presenter: EditFanProfilePresenter,
     edit_overclock_profile_presenter: EditOverclockProfilePresenter,
     historical_data_presenter: HistoricalDataPresenter,
     preferences_presenter: PreferencesPresenter,
     get_status_interactor: GetStatusInteractor,
     set_power_limit_interactor: SetPowerLimitInteractor,
     set_overclock_interactor: SetOverclockInteractor,
     set_fan_speed_interactor: SetFanSpeedInteractor,
     settings_interactor: SettingsInteractor,
     check_new_version_interactor: CheckNewVersionInteractor,
     speed_step_changed_subject: SpeedStepChangedSubject,
     fan_profile_changed_subject: FanProfileChangedSubject,
     overclock_profile_changed_subject: OverclockProfileChangedSubject,
     composite_disposable: CompositeDisposable,
 ) -> None:
     LOG.debug("init MainPresenter ")
     self.main_view: MainViewInterface = MainViewInterface()
     self._edit_fan_profile_presenter = edit_fan_profile_presenter
     self._edit_overclock_profile_presenter = edit_overclock_profile_presenter
     self._historical_data_presenter = historical_data_presenter
     self._preferences_presenter = preferences_presenter
     self._scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())
     self._get_status_interactor: GetStatusInteractor = get_status_interactor
     self._set_power_limit_interactor = set_power_limit_interactor
     self._set_overclock_interactor = set_overclock_interactor
     self._settings_interactor = settings_interactor
     self._check_new_version_interactor = check_new_version_interactor
     self._set_fan_speed_interactor = set_fan_speed_interactor
     self._speed_step_changed_subject = speed_step_changed_subject
     self._fan_profile_changed_subject = fan_profile_changed_subject
     self._overclock_profile_changed_subject = overclock_profile_changed_subject
     self._composite_disposable: CompositeDisposable = composite_disposable
     self._fan_profile_selected: Optional[FanProfile] = None
     self._fan_profile_applied: Optional[FanProfile] = None
     self._overclock_profile_selected: Optional[OverclockProfile] = None
     self._overclock_profile_applied: Optional[OverclockProfile] = None
     self._latest_status: Optional[Status] = None
     self._gpu_index: int = 0
Exemple #22
0
 def __init__(
     self,
     preferences_presenter: PreferencesPresenter,
     x52_driver_interactor: X52DriverInteractor,
     udev_interactor: UdevInteractor,
     settings_interactor: SettingsInteractor,
     check_new_version_interactor: CheckNewVersionInteractor,
     composite_disposable: CompositeDisposable,
 ) -> None:
     _LOG.debug("init MainPresenter ")
     self.main_view: MainViewInterface = MainViewInterface()
     self._preferences_presenter = preferences_presenter
     self._scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())
     self._x52_driver_interactor = x52_driver_interactor
     self._udev_interactor = udev_interactor
     self._settings_interactor = settings_interactor
     self._check_new_version_interactor = check_new_version_interactor
     self._composite_disposable: CompositeDisposable = composite_disposable
     self._profile_selected: Optional[Union[X52ProProfile,
                                            X52Profile]] = None
     self._last_applied_profile: Optional[Union[X52ProProfile,
                                                X52Profile]] = None
     self._driver_list: List[X52Driver] = []
     self._driver_index = 0
Exemple #23
0
import multiprocessing
from threading import current_thread

import rx
from rx import operators as ops
from rx.scheduler import CurrentThreadScheduler, ThreadPoolScheduler


def print_thread(tag):
    def inner(x):
        print(tag, x, current_thread().name)

    return inner


pool_scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())

rx.from_iterable(range(10)).pipe(
    # Runs on main thread
    ops.do_action(print_thread(0)),
    # Switch threads to unused one from pool
    ops.flat_map(lambda x: rx.just(x, pool_scheduler).pipe(
        ops.do_action(print_thread(1)), )),
    # This executes on the same thread from the pool as in the flat_map
    ops.do_action(print_thread(2)),
).subscribe()

print("This may be printed on any line *after* the final MainThread print!")
Exemple #24
0
import multiprocessing
import random
import time


def intense_calculation(value):
    # sleep for a random short duration between 0.5 to 2.0 seconds
    # to simulate a long-running calculation
    time.sleep(random.randint(5, 20) * .1)
    return value


# calculate number of CPU's and add 1,
# then create a ThreadPoolScheduler with that number of threads
optimal_thread_count = multiprocessing.cpu_count() + 1
pool_scheduler = ThreadPoolScheduler(optimal_thread_count)

# Create TASK 1
rx.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"
          ]).pipe(ops.map(lambda s: intense_calculation(s)),
                  ops.subscribe_on(pool_scheduler)).subscribe(
                      on_next=lambda s: print("TASK 1: {0} {1}".format(
                          current_thread().name, s)),
                      on_error=lambda e: print(e),
                      on_completed=lambda: print("TASK 1 done!"))

# Create TASK 2
rx.range(1, 10).pipe(ops.map(lambda s: intense_calculation(s)),
                     ops.subscribe_on(pool_scheduler)).subscribe(
                         on_next=lambda i: print("TASK 2: {0} {1}".format(
                             current_thread().name, i)),
 def test_threadpool_now(self):
     scheduler = ThreadPoolScheduler()
     diff = scheduler.now - default_now()
     assert abs(diff) < timedelta(milliseconds=1)
Exemple #26
0
        f"> Choose what shop(s) you want to search by typing the respective number(s):\n{shops_picker}\n>> "
    )

    options = Options()
    options.add_argument("--disable-extensions")
    options.add_argument("--disable-gpu")
    options.add_argument("--disable-infobars")

    prefs = {"profile.managed_default_content_settings.images": 2}

    options.add_experimental_option("prefs", prefs)
    options.headless = True

    shops_list = shops_input.split(",")

    scheduler = ThreadPoolScheduler(len(shops_list))

    shops = rx.from_(shops_list).pipe(
        ops.map(
            lambda shop: re.findall(f"\[{shop}\]\s(\w+)", shops_picker)[0]),
        ops.map(lambda shop: {
            **CONFIG.get(shop),
            "priceRegexp": PRICES_REGEXP.get(shop),
        }),
        ops.filter(lambda el: el),
    )

    search_term = shops.pipe(
        ops.pluck("innerChar"),
        ops.map(lambda inner_char: inner_char.join(search_term_input.split())),
    )
Exemple #27
0
def main(
    base_dir: Path = "",
    storage_dir: Path = "",
    filter_regex: str = r".*(?:jpg|JPG|JPEG|jpeg)$",
    copy_only: bool = False,
    dry_run: bool = False,
) -> None:
    """Organise image files from one location to another.

    This application allows you to specify a base directory from which to
    recursively search for image files, and to organise those files, based on
    the date they were taken,into a collection of year and month folders.  The
    application will respect albums which already exist based on the presence
    of an album name.

    By setting the --copy-only flag, this application will copy, rather than
    the default move, files when organising them.

    Arguments:
        base_dir: The location from which the application should search for
            image files.

        storage_dir: The location from which the application should create the
            archive of organised files.

        filter_regex: The python Regular Expression used to select files to
            operate on.

        copy_only: A flag to request that we make copies of files, rather than
            moving them.

        dry_run: A flag to print proposed changes only, don't actually do
            anything.

    """
    operation_complete = Event()
    operation_failed = Event()

    if not storage_dir:
        storage_dir = base_dir

    worker_pool = ThreadPoolScheduler(3)
    failed_results: List[FailedTarget] = []

    # Use this to pull errors out of the stream.
    failed_record_filter = partial(filter_errors,
                                   error_collection=failed_results)

    # Identify targets
    file_listing_shared = get_files(base_dir, filter_regex, worker_pool).pipe(
        operators.filter(failed_record_filter),
        operators.publish(),
    )

    # Load targets from disk
    loaded_files = load_file_content(file_listing_shared).pipe(
        operators.filter(failed_record_filter), )

    enriched_files = loaded_files.pipe(
        generate_file_metadata,
        operators.filter(failed_record_filter),
        generate_image_metadata,
        operators.filter(failed_record_filter),
    )
    #  hashed_files = generate_file_metadata(file_listing)
    #  files_with_metadata = generate_image_metadata(hashed_files)

    files_with_move_path = generate_move_path(
        enriched_files,
        storage_dir).pipe(operators.filter(failed_record_filter), )

    if dry_run:
        files_with_move_path.subscribe(
            on_next=dry_run_print,
            on_error=lambda err: handle_error(err, operation_failed),
            on_completed=operation_complete.set,
        )

        file_listing_shared.connect()

        while not any(
            (operation_complete.is_set(), operation_failed.is_set())):
            typer.echo("Waiting for processing to complete.", err=True)
            sleep(1)

        typer.echo(
            f"Encountered {len(failed_results)} Records that failed to process:"
        )
        for fail in failed_results:
            typer.secho(fail, fg=typer.colors.RED)

        typer.echo("Operation completed.")

        return

    moved_files = files_with_move_path.pipe(
        operators.map(
            lambda target: fo.migrate_file_target(target, copy_only)),
        operators.filter(failed_record_filter),
        operators.map(fo.clear_empty_directories),
        operators.filter(failed_record_filter),
    )

    moved_files.subscribe(
        on_next=lambda target: typer.echo(
            f"{'Copied' if copy_only else 'Moved'} "
            f"{target.file_path} to {target.target_move_path}.", ),
        on_error=lambda err: handle_error(err, operation_complete),
        on_completed=operation_complete.set,
    )

    file_listing_shared.connect()

    while not any((operation_complete.is_set(), operation_failed.is_set())):
        typer.echo("Waiting for processing to complete.", err=True)
        sleep(1)

    typer.echo("Operation completed.")

    typer.echo(
        f"Encountered {len(failed_results)} Records that failed to process:")
    for fail in failed_results:
        typer.secho(fail, fg=typer.colors.RED)
Exemple #28
0

def make_TM(observer: Observer, _: Scheduler):
    observer.on_next(Write(1, "x"))
    observer.on_next(Read(2, "y"))
    observer.on_next(Read(1, "x"))
    observer.on_next(Commit(1))
    observer.on_next(Read(2, "x"))
    observer.on_next(Write(2, "y"))
    observer.on_next(Commit(2))
    return observer


def a(step: Step):
    pass


TM = rx.create(make_TM)
pool = ThreadPoolScheduler(4)

TM.pipe(map(a)).subscribe(on_next=lambda e: print(e))
#
# TM.pipe(
#     subscribe_on(pool),
# ).subscribe(
#     on_next=lambda e: print(e)
# )

if __name__ == "__main__":
    pass
Exemple #29
0
                if highest in to_process:
                    to_process.remove(highest)


if __name__ == "__main__":
    from AccountLiquidator import NullAccountLiquidator
    from Context import default_context
    from Observables import create_backpressure_skipping_observer, log_subscription_error
    from Wallet import default_wallet

    from rx.scheduler import ThreadPoolScheduler

    if default_wallet is None:
        raise Exception("No wallet")

    pool_scheduler = ThreadPoolScheduler(2)

    def fetch_prices(context):
        group = Group.load(context)

        def _fetch_prices(_):
            return group.fetch_token_prices()

        return _fetch_prices

    def fetch_margin_accounts(context):
        def _fetch_margin_accounts(_):
            group = Group.load(context)
            return MarginAccount.load_all_for_group_with_open_orders(
                context, context.program_id, group)
Exemple #30
0
 def __init__(self) -> None:
     self._scheduler = ThreadPoolScheduler(multiprocessing.cpu_count())