def _start_refresh(self) -> None: _LOG.debug("start refresh") refresh_interval = self._settings_interactor.get_int( 'settings_refresh_interval') self._composite_disposable.add( rx.interval(refresh_interval, scheduler=self._scheduler).pipe( operators.start_with(0), operators.subscribe_on(self._scheduler), operators.flat_map(lambda _: self._get_status()), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_next=self._update_status, on_error=self._handle_refresh_error))
def requestQuotes(self, tickers): logger.debug(f'QuoteObserver.requestQuotes({tickers})') self.quoteSubscription = create( lambda o, s: beginStreamingQuotes(tickers, o, s)).pipe( op.subscribe_on(config.pool_scheduler), op.observe_on(config.pool_scheduler), op.do_action(lambda q: logger.debug(f'QO: {q}')), op.group_by(lambda q: q['symbol']), ).subscribe(on_next=self.handleQuote, on_error=lambda e: logger.debug(e), on_completed=lambda: logger.debug( 'QuoteObserver subscription completed'))
def addFile(self, files): def _createObserver(subscription: rx.typing.Subscription, scheduler) -> rx.Observable: for file in files: print("file:%s" % file) with open(file, mode='r') as f: content = f.readlines() subscription.on_next(Class.LogInfo(file, "".join(content))) rx.create(_createObserver).pipe( ops.subscribe_on(scheduler.ThreadPoolScheduler()), qtScheduler.QtScheduler()).subscribe( on_next=lambda value: self.handlerLogInfoResult(value))
def __init__(self, obs_stream, symbol): logger.debug(f'Stock.__init__({symbol})') self.obs_stream = obs_stream self.symbol = symbol self.price = () self.stockSubscription = self.obs_stream.pipe( op.subscribe_on(config.pool_scheduler), op.observe_on(config.pool_scheduler), op.do_action(lambda s: logger.debug(f'STK: {s}')), ).subscribe( on_next=self.handleQuote, on_error=lambda e: logger.debug(e), on_completed=lambda: logger.debug('Stock subscription completed'))
def _set_fan_speed(self, gpu_index: int, speed: int = 100, manual_control: bool = True) -> None: self._composite_disposable.add( self._set_fan_speed_interactor.execute( gpu_index, speed, manual_control).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_error=lambda e: (LOG.exception(f"Set cooling error: {str(e)}"), self.main_view.set_statusbar_text( 'Error applying fan profile!'))))
def _retryable(self, data: str, delay: timedelta): return rx.of(data).pipe( ops.subscribe_on(self._write_options.write_scheduler), # use delay if its specified ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler), # invoke http call ops.map(lambda x: self._http(x)), # if there is an error than retry ops.catch(handler=lambda exception, source: self._retry_handler( exception, source, data)), )
def _set_speed_profile(self, profile: SpeedProfile) -> None: observable = self._set_speed_profile_interactor \ .execute(profile.channel, self._get_profile_data(profile)) self._composite_disposable.add( observable.pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe( on_next=lambda _: self._update_current_speed_profile(profile), on_error=lambda e: (_LOG.exception("Set cooling error: %s", str(e)), self.main_view.set_statusbar_text( 'Error applying %s speed profile!' % profile.channel))))
def main(): loop = asyncio.get_event_loop() io_scheduler = AsyncIOThreadSafeScheduler(loop=loop) scheduler = ThreadPoolScheduler(multiprocessing.cpu_count()) semaphore = Subject() semaphore_stream = semaphore.pipe( ops.flat_map(lambda _: rx.of(True).pipe( ops.delay(ARGS.block_time, scheduler=scheduler), ops.start_with(False))), ops.start_with(True)) video_stream_observable = rx.using( lambda: VideoStreamDisposable(), lambda d: rx.from_iterable(video_stream_iterable(d.cap))) gated_video_stream = video_stream_observable.pipe( ops.subscribe_on(scheduler), ops.sample(1 / ARGS.fps), # sample frames based on fps ops.combine_latest(semaphore_stream), ops.filter(lambda tup: tup[1]), # proceed only if semaphore allows ops.map(lambda tup: tup[0]) # take only frame ) disposable = gated_video_stream.pipe( ops.filter(has_face), # filter frames without faces ops.map(lambda frame: Image.fromarray( cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))), # map frame to PIL image ops.map(lambda img: img.resize( (640, 360))), # resize image (inference will be faster) ops.observe_on(io_scheduler), ops.map(lambda img: ImageFacesPair(img, analyse_frame(img)) ), # analyse frame for faces ops.filter(lambda img_faces_pair: any([ face.top_prediction.confidence > ARGS.threshold for face in img_faces_pair.faces ])), # proceed only if there is a known face in the frame ops.throttle_first(1), ops.flat_map(unlock_request), # unlock the door ops.do_action( on_next=lambda _: semaphore.on_next(True) ) # trigger semaphore which will block stream for "block-seconds" seconds (doors are unlocked for that long after unlock request) ).subscribe(on_error=lambda e: logger.exception(e)) try: loop.run_forever() except Exception as e: logger.exception(e) logger.info("Smart lock face recognition engine shutdown") disposable.dispose()
def _schedule_lighting_setting(self, settings: LightingSettings) -> None: _LOG.info( "Setting lighting: [ Channel: %s, Mode: %s, Speed: %s, Direction: %s, Colors: %s ]", settings.channel.value, settings.mode.name, settings.speed_or_default, settings.direction_or_default, settings.colors.values()) self._composite_disposable.add( self._set_lighting_interactor.execute(settings).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe( on_next=lambda _: self._on_lighting_setting_complete(settings), on_error=lambda e: _LOG.exception("Lighting apply error: %s", str(e))))
def on_mfd_brightness_value_changed(self, widget: Any, *_: Any) -> None: brightness = int(widget.get_value()) if brightness != self._last_applied_profile.mfd_brightness: self._last_applied_profile.mfd_brightness = brightness self._composite_disposable.add( self._x52_driver_interactor.set_mfd_brightness( self._driver_list[self._driver_index], brightness).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe( on_error=lambda e: self._handle_generic_set_result( e, "LED brightness"))) if brightness != self._profile_selected.mfd_brightness: self._profile_selected.mfd_brightness = brightness self._profile_selected.save()
def on_overclock_apply_button_clicked(self, *_: Any) -> None: if self._overclock_profile_selected: self._overclock_profile_applied = self._overclock_profile_selected self._refresh_overclock_profile_ui( profile_id=self._overclock_profile_selected.id) assert self._latest_status is not None self._composite_disposable.add( self._set_overclock_interactor.execute( self._gpu_index, self._latest_status.gpu_status_list[ self._gpu_index].overclock.perf_level_max, self._overclock_profile_applied.gpu, self._overclock_profile_applied.memory).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_next=self._handle_set_overclock_result, on_error=self._handle_set_overclock_result))
def test_create_and_run_all_supported_algorithms(self): sys = ActorSystem("multiprocTCPBase", logDefs=log_helper.EVOGIL_LOG_CONFIG) test_cases = run_config.algorithms for test_case in test_cases: with self.subTest(algorithm=test_case): algo_factory, _ = prepare(test_case, "ZDT1") algorithm = algo_factory() simple_simulation = StepsRun(1) result = list( simple_simulation.create_job(algorithm) .pipe(ops.subscribe_on(NewThreadScheduler()), ops.to_iterable()) .run() ) self.assertEqual(1, len(result)) self.assertIsInstance(result[0], ProgressMessage) sys.shutdown()
def _update_mfd_date_time(self) -> None: _LOG.debug("update_mfd_date_time") if self._driver_list: self._composite_disposable.add( self._x52_driver_interactor.set_date_time( self._driver_list[self._driver_index], self._profile_selected.clock_1_use_local_time, (self._profile_selected.clock_1_use_24h, self._profile_selected.clock_2_use_24h, self._profile_selected.clock_3_use_24h), timedelta(minutes=self._profile_selected.clock_2_offset), timedelta(minutes=self._profile_selected.clock_3_offset), self._profile_selected.date_format).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_error=lambda e: self. _handle_generic_set_result(e, "Date")))
def read_config_from_file(filename, file_response, scheduler=None): read_request, read_response = filename.pipe( ops.map(lambda i: file.Read(id='config', path=i)), file.read(file_response), ) if scheduler is not None: read_request = read_request.pipe( ops.subscribe_on(scheduler), ) config = read_response.pipe( ops.filter(lambda i: i.id == "config"), ops.flat_map(lambda i: i.data), parse_config(), ) return config, read_request
def makinage(aio_scheduler, sources): def on_error(e): raise e config, read_request, http_request = read_config_from_args( sources.argv.argv, sources.file.response, sources.http.response, scheduler=aio_scheduler ) first_config = rx.concat(config.pipe(ops.take(1),), rx.never()) kafka_source = sources.kafka.response.pipe( trace_observable("kafka source1"), ops.replay(), ops.ref_count(), trace_observable("kafka source2"), ) kafka_source.subscribe(on_error=on_error) kafka_request = first_config.pipe( ops.flat_map(lambda i: create_operators( i, config, kafka_source, sources.kafka.feedback.pipe(ops.share()), )), ops.subscribe_on(aio_scheduler), trace_observable("makinage"), ) ''' config.pipe(ops.subscribe_on(aio_scheduler)).subscribe( on_next=print, on_error=print, ) ''' return MakiNageSink( file=file.Sink(request=read_request), http=http.Sink(request=http_request), kafka=kafka.Sink(request=kafka_request), )
def run_metaepoch(self): node_jobs = [] for node in self.level_nodes[2]: node_jobs.append(node.run_metaepoch()) for node in self.level_nodes[1]: node_jobs.append(node.run_metaepoch()) for node in self.level_nodes[0]: node_jobs.append(node.run_metaepoch()) # _plot_node(node, 'r', [[0, 1], [0, 3]]) node_costs = [] for node_job in node_jobs: node_job.pipe( ops.subscribe_on(NewThreadScheduler()), ops.map(lambda message: self._update_cost(message)), ops.sum(), ops.do_action( on_next=lambda cost: node_costs.append(cost))).run() # self.cost += max(node_costs) self.cost += sum(node_costs)
def run(self): print('Starting server {}'.format(self._server)) self._subscription = rx.create(self._receive).pipe( ops.subscribe_on(NewThreadScheduler()), ops.map(lambda msg: self._mapper(msg)), ops.filter(lambda gfx: gfx is not None), ).subscribe(lambda gfx: self._entities.append(gfx)) print('Start') while True: with canvas(self._virtual) as draw: for entity in self._entities: entity.render(draw) entity.update() self._entities[:] = [ent for ent in self._entities if not ent.can_destroy()] time.sleep(0.010)
def _initUI(self): self.mainLayout = QHBoxLayout(self) # self.treeModel = QFileSystemModel(self) # self.treeView = QTreeView(self) self.setAcceptDrops(True) # # self.treeView.setModel(self.treeModel) # self.treeView.setSortingEnabled(True) # self.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) # self.treeView.customContextMenuRequested.connect(self.showContextMenu) # self.mainLayout.addWidget(self.treeView) self.editTabsView = QTabWidget(self) self.editTabsView.setTabsClosable(True) self.editTabsView.setMovable(True) self.editTabsView.setDocumentMode(True) self.editTabsView.tabCloseRequested.connect(self.tabCloseRequested) self.contentLayout = QVBoxLayout(self) self.topHandlerLayout = QHBoxLayout(self) self.input = QLineEdit(self) self.searchButton = QPushButton(self) self.searchButton.clicked.connect(self.handerFilter) self.searchButton.setText("搜索当前文件") self.searchButtonAll = QPushButton(self) self.searchButtonAll.clicked.connect(self.handerFilterAll) self.searchButtonAll.setText("搜索全部文件") self.topHandlerLayout.addWidget(self.input, 1) self.topHandlerLayout.addWidget(self.searchButton) self.topHandlerLayout.addWidget(self.searchButtonAll) self.contentLayout.addLayout(self.topHandlerLayout) self.contentLayout.addWidget(self.editTabsView, 1) self.mainLayout.addLayout(self.contentLayout, 1) self.setLayout(self.mainLayout) self._initMenus() RxBus.instance.register(self, Class.LogInfo).pipe( ops.subscribe_on(scheduler.ThreadPoolScheduler()), qtScheduler.QtScheduler()).subscribe( on_next=lambda value: self.handlerLogInfo(value))
def startFilter(self, index): searchTag = self.input.displayText() print(searchTag) filterData = list() if index == -1: print("全部") for i in range(self.editTabsView.count()): currentTab = self.editTabsView.widget(i) data = currentTab.toPlainText() filterData.append(data) else: data = self.editTabsView.currentWidget().toPlainText() filterData.append(data) Fiter().filter(searchTag, filterData).pipe( ops.subscribe_on(scheduler.ThreadPoolScheduler()), qtScheduler.QtScheduler()).subscribe( on_next=lambda filterResult: self.handlerFilterResult( filterResult), on_error=lambda e: self.handlerFilterErrorResult(e), )
def test_imga_cost_calculation(self): final_driver, problem_mod = prepare("IMGA+NSGAII", "ZDT1") imga = final_driver() steps_run = StepsRun(4) total_costs = [] islands_costs = [] def on_imga_result(result): total_costs.append(result.cost), islands_costs.append( sum([island.driver.cost for island in imga.islands])) steps_run.create_job(imga).pipe( ops.subscribe_on(NewThreadScheduler()), ops.do_action(on_next=on_imga_result), ).run() self.assertListEqual(total_costs, islands_costs)
def on_led_status_selected(self, widget: Any, *_: Any) -> None: active = widget.get_active() if active >= 0: assert self._profile_selected is not None enum_value = widget.get_model()[active][0] attr_name = widget.get_model()[active][2] old_led_status = getattr(self._profile_selected, attr_name) last_applied_led_status = getattr(self._last_applied_profile, attr_name) new_led_status = type(old_led_status)(enum_value) if last_applied_led_status != new_led_status: setattr(self._last_applied_profile, attr_name, new_led_status) self._composite_disposable.add( self._x52_driver_interactor.set_led_status( self._driver_list[self._driver_index], new_led_status, attr_name).pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_error=lambda e: self. _handle_generic_set_result(e, "LED status"))) new_led_status = type(old_led_status)(enum_value) if old_led_status != new_led_status: setattr(self._profile_selected, attr_name, new_led_status) self._profile_selected.save()
def _check_supported_kraken(self) -> None: self._composite_disposable.add( self._has_supported_kraken_interactor.execute().pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), ).subscribe(on_next=self._has_supported_kraken_result))
def deepspeech_server(aio_scheduler, sources): argv = sources.argv.argv stt = sources.httpd.route stt_response = sources.deepspeech.text ds_logs = sources.deepspeech.log http_ds_error, route_ds_error = make_error_router() args = parse_arguments(argv) read_request, read_response = args.pipe( ops.map(lambda i: file.Read(id='config', path=i.value)), file.read(sources.file.response), ) read_request = read_request.pipe( ops.subscribe_on(aio_scheduler), ) config = parse_config(read_response) logs_config = config.pipe( ops.flat_map(lambda i: rx.from_(i.log.level, scheduler=ImmediateScheduler())), ops.map(lambda i: logging.SetLevel(logger=i.logger, level=i.level)), ) logs = rx.merge(logs_config, ds_logs) ds_stt = stt.pipe( ops.flat_map(lambda i: i.request), ops.map(lambda i: deepspeech.SpeechToText(data=i.data, context=i.context)), ) # config is hot, the combine operator allows to keep its last value # until logging is initialized ds_arg = config.pipe( ops.map(lambda i: deepspeech.Initialize( model=i.deepspeech.model, scorer=deepspeech.Scorer( scorer=getattr(i.deepspeech, 'scorer', None), lm_alpha=getattr(i.deepspeech, 'lm_alpha', None), lm_beta=getattr(i.deepspeech, 'lm_beta', None), ), beam_width=getattr(i.deepspeech, 'beam_width', None), )), ) ds = rx.merge(ds_stt, ds_arg) http_init = config.pipe( ops.flat_map(lambda i: rx.from_([ httpd.Initialize(request_max_size=i.server.http.request_max_size), httpd.AddRoute( methods=['POST'], path='/stt', id='stt', headers=MultiDict([('Content-Type', 'text/plain')]), ), httpd.StartServer( host=i.server.http.host, port=i.server.http.port), ])), ) http_response = stt_response.pipe( route_ds_error( error_map=lambda e: httpd.Response( data="Speech to text error".encode('utf-8'), context=e.args[0].context, status=500 )), ops.map(lambda i: httpd.Response( data=i.text.encode('utf-8'), context=i.context, )), ) http = rx.merge(http_init, http_response, http_ds_error) return DeepspeechSink( file=file.Sink(request=read_request), logging=logging.Sink(request=logs), deepspeech=deepspeech.Sink(speech=ds), httpd=httpd.Sink(control=http) )
def intense_calculation(value): # sleep for a random short duration between 0.5 to 2.0 seconds to simulate a long-running calculation time.sleep(random.randint(5, 20) * 0.1) return value # Calculate number of CPU's, then create a ThreadPoolScheduler with that number of threads optimal_thread_count = multiprocessing.cpu_count() print(Fore.YELLOW + f'number of CPUs {optimal_thread_count}') pool_scheduler = ThreadPoolScheduler(optimal_thread_count) # Create Process 1 rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon").pipe( ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler) ).subscribe( on_next=lambda s: print(Fore.BLUE + "PROCESS 1: {0} {1}".format(current_thread().name, s)), on_error=lambda e: print(e), on_completed=lambda: print(Fore.GREEN + "PROCESS 1 done!"), ) # Create Process 2 rx.range(1, 10).pipe( ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler) ).subscribe( on_next=lambda i: print(Fore.YELLOW + "PROCESS 2: {0} {1}".format(current_thread().name, i)), on_error=lambda e: print(e), on_completed=lambda: print(Fore.GREEN + "PROCESS 2 done!"), )
def fetch_margin_accounts(context): def _fetch_margin_accounts(_): group = Group.load(context) return MarginAccount.load_all_for_group_with_open_orders( context, context.program_id, group) return _fetch_margin_accounts liquidation_processor = LiquidationProcessor(default_context, NullAccountLiquidator(), NullWalletBalancer()) print("Starting margin account fetcher subscription") margin_account_interval = 60 margin_account_subscription = rx.interval(margin_account_interval).pipe( ops.subscribe_on(pool_scheduler), ops.start_with(-1), ops.map(fetch_margin_accounts(default_context)), ).subscribe( create_backpressure_skipping_observer( on_next=liquidation_processor.update_margin_accounts, on_error=log_subscription_error)) print("Starting price fetcher subscription") price_interval = 2 price_subscription = rx.interval(price_interval).pipe( ops.subscribe_on(pool_scheduler), ops.map(fetch_prices(default_context))).subscribe( create_backpressure_skipping_observer( on_next=liquidation_processor.update_prices, on_error=log_subscription_error))
def create(): return xs.pipe(ops.subscribe_on(scheduler))
def transform(self): return ops.flat_map(lambda x: rx.of(x).pipe( ops.map(self.operation), ops.subscribe_on(self.scheduler)))
print("Building network...") net = network.RamanAINetwork(cfg.network.structure) print("Begin training...") stream = train.train_net(net, train_dataset, valid_dataset, test_dataset, cfg) handler = utils.str_to_obj( f"train_stream_handlers.{cfg.train_stream_handler}") # main loop scheduler loop = asyncio.new_event_loop() main_scheduler = AsyncIOThreadSafeScheduler(loop) s = scheduler.ThreadPoolScheduler() observer = handler(cfg, cfg_dir) stream.pipe(operators.subscribe_on(s), ).subscribe(observer) if observer.plot_sub: def plot_task(data): f: plt.Figure = plt.gcf() f.clear() ax = f.add_subplot(211) ax.plot(data["epoch"], data["train"], c="red") if data["valid"]: ax.plot(data["epoch"], data["valid"], c="blue") ax = f.add_subplot(212) if data["test_output"]: ax.plot(data["test_output"]) plt.draw() plt.show(block=False)
def _get_lighting_modes(self) -> Observable: return self._get_lighting_modes_interactor.execute().pipe( operators.subscribe_on(self._scheduler), operators.observe_on(GtkScheduler(GLib)), )
def intense_calculation(value): # sleep for a random short duration between 0.5 to 2.0 seconds # to simulate a long-running calculation time.sleep(random.randint(5, 20) * .1) return value # calculate number of CPU's and add 1, # then create a ThreadPoolScheduler with that number of threads optimal_thread_count = multiprocessing.cpu_count() + 1 pool_scheduler = ThreadPoolScheduler(optimal_thread_count) # Create TASK 1 rx.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon" ]).pipe(ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler)).subscribe( on_next=lambda s: print("TASK 1: {0} {1}".format( current_thread().name, s)), on_error=lambda e: print(e), on_completed=lambda: print("TASK 1 done!")) # Create TASK 2 rx.range(1, 10).pipe(ops.map(lambda s: intense_calculation(s)), ops.subscribe_on(pool_scheduler)).subscribe( on_next=lambda i: print("TASK 2: {0} {1}".format( current_thread().name, i)), on_error=lambda e: print(e), on_completed=lambda: print("TASK 2 done!")) # Create TASK 3, which is infinite rx.interval(1.0).pipe(ops.map(lambda i: i * 100),
import rx from rx import operators as ops import multiprocessing import rx.scheduler as scheduler thread_count = multiprocessing.cpu_count() thread_pool_scheduler = scheduler.ThreadPoolScheduler(thread_count) rx.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).pipe( ops.filter(lambda i: i % 2 == 0), ops.subscribe_on(thread_pool_scheduler)).subscribe(lambda i: print(1)) rx.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).pipe( ops.filter(lambda i: i % 2 == 0), ops.subscribe_on(thread_pool_scheduler)).subscribe(lambda i: print(2)) rx.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).pipe( ops.filter(lambda i: i % 2 == 0), ops.subscribe_on(thread_pool_scheduler)).subscribe(lambda i: print(3)) rx.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).pipe( ops.filter(lambda i: i % 2 == 0), ops.subscribe_on(thread_pool_scheduler)).subscribe(lambda i: print(4)) print("AAAAA")