class AlgoOrder(pywingchun.AlgoOrder): registry = {} __params_schema__ = None __status_schema__ = None def __init__(self, **kwargs): order_id = kwargs.pop("order_id", 0) self.sender_uid = kwargs.pop("sender_uid", 0) self.active_orders = {} self.active = False pywingchun.AlgoOrder.__init__(self, order_id) self.subject = Subject() def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) cls.registry[cls.__name__] = cls def __repr__(self): return '%s(%r,%r)' % (self.__class__, self.params, self.status) @classmethod def types(cls): return list(cls.registry.keys()) def dumps(self): dct = { "order_id": self.order_id, "sender_uid": self.sender_uid, "algo_type": self.type, "status": self.status, "params": self.params, "active": self.active } return json.dumps(dct) @classmethod def create(cls, type, **kwargs): return cls.registry[type](**kwargs) def on_start(self, ctx): self.active = True def on_stop(self, ctx): self.active = False def on_modify(self, ctx, msg): raise NotImplementedError def on_child_trade(self, ctx, trade): raise NotImplementedError def on_child_order(self, ctx, order): if self.active: order_id = order.order_id if order_id in self.active_orders: pass def on_quote(self, ctx, quote): raise NotImplementedError def on_order_report(self, ctx, report_msg): raise NotImplementedError @property def type(self): return self.__class__.__name__ @property def sent(self): return self.order_id > 0 def send_notice(self): print("send notice .......") self.subject.on_next(self)
class StateManagement(metaclass=Singleton): def __init__(self): self.initRequest = Subject() self.addSegmentConfigRequest = Subject() self.removeSegmentConfigRequest = Subject() self.updateSegmentConfigRequest = Subject() self.generateManipulatorRequest = Subject() self.updateTensionsRequest = Subject() self.computeStateRequest = Subject() segment_config_repo_op = ops.merge( self.initRequest.pipe( ops.map(Repo().publish_init_segments_config) ), self.addSegmentConfigRequest.pipe( ops.map(Repo().add_segment) ), self.removeSegmentConfigRequest.pipe( ops.map(Repo().remove_segment) ), self.updateSegmentConfigRequest.pipe( ops.map(Repo().update_segment_config) ) ) self._segment_configs_stream = Observable().pipe( segment_config_repo_op, ) # TODO self._segment_configs_err_stream = Observable().pipe( segment_config_repo_op, ops.flat_map(lambda x: Observable.just(Repo().get_error())) ) self._tension_inputs_stream = Observable().pipe( ops.merge( self.generateManipulatorRequest.pipe( ops.map(Repo().generate_manipulator) ), self.updateTensionsRequest.pipe( ops.map(Repo().updateTensions) ), ) ) compute_state_result = self.computeStateRequest.pipe( ops.map(Repo().computeTensions) ) self._text_result_stream = compute_state_result.pipe( ops.map(format_manipulator) ) self._graph_stream = compute_state_result.pipe( ) def request_init_segment_configs(self): self.initRequest.on_next(0) def request_add_segment_config(self): self.addSegmentConfigRequest.on_next(0) def request_remove_segment_config(self, key): self.removeSegmentConfigRequest.on_next(key) def request_update_segment_config(self, config): self.updateSegmentConfigRequest.on_next(config) def request_generate_manipulator(self): self.generateManipulatorRequest.on_next(0) def request_update_tensions(self, tensions): self.updateTensionsRequest.on_next(tensions) def request_compute_state(self): self.computeStateRequest.on_next(0) @property def segment_configs_stream(self): return self._segment_configs_stream @property def segment_configs_err_stream(self): return self._segment_configs_err_stream @property def tension_inputs_stream(self): return self._tension_inputs_stream @property def graph_stream(self): return self._graph_stream @property def text_result_stream(self): return self._text_result_stream def __del__(self): for v in self.__dict__.values(): if isinstance(v, Subject): v.dispose()
import rx from rx.subject import Subject, AsyncSubject, BehaviorSubject, ReplaySubject # Subject同时是Observer和Observable print('--------Subject---------') subject = Subject() subject.on_next(1) subject.subscribe(lambda i: print(i)) subject.on_next(2) subject.on_next(3) subject.on_next(4) subject.on_completed() # ReplaySubject会缓存所有值,如果指定参数的话只会缓存最近的几个值 print('--------ReplaySubject---------') subject = ReplaySubject() subject.on_next(1) subject.subscribe(lambda i: print(i)) subject.on_next(2) subject.on_next(3) subject.on_next(4) subject.on_completed() # BehaviorSubject会缓存上次发射的值,除非Observable已经关闭 print('--------BehaviorSubject---------') subject = BehaviorSubject(0) subject.on_next(1) subject.on_next(2) subject.subscribe(lambda i: print(i)) subject.on_next(3)
class Axis: """ Representation of an axis which can read values or write values. Axes have fixed schema: in this sense, you can always expect to receive the same shape data back from the axis. In most cases, axes record single points, but you can produce any Python primitive, as well as `np.ndarray`s and `pd.DataFrame`s if it is appropriate. See also the schema module for type hinting Arrays. Axes are fundamentally asynchronous, since they represent actual hardware resources that exist over I/O. Additionally, measurements may take finite time, and in the case of event stream axes, you may not know when values will be produced. """ raw_value_stream: Optional[Subject] _type_def: TypeDefinition async def shutdown(self): raise NotImplementedError def collect_state(self): return None def receive_state(self, state): pass def append_point_to_history(self, point): self.collected_xs.append(point["time"]) self.collected_ys.append(point["value"]) def reset_history(self): self.collected_xs = [] self.collected_ys = [] def __init__(self, name: str, schema: type): self.name = name self.schema = schema self._type_def = TypeDefinition.from_type(schema) self.raw_value_stream = Subject() # for scalar schemas we can provide a stream of values if schema in (float, int): self.collected_xs = [] self.collected_ys = [] self.raw_value_stream.subscribe(self.append_point_to_history) def emit(self, value): if self.raw_value_stream: self.raw_value_stream.on_next( {"value": value, "time": datetime.datetime.now().timestamp()} ) @property def type_def(self): """ We allow overriding this because in the case of subaxes the type is derived from the parent. """ return self._type_def async def trigger(self): return async def settle(self): raise NotImplementedError # We use a two level API in order to make the code here # more straightforward. *_internal methods are virtual # and set the internal behavior for an axis # the high level API provides synchronous (if available) # and asynchronous bindings which also handle emitting # values for subscribers async def write_internal(self, value): raise NotImplementedError async def read_internal(self) -> Any: raise NotImplementedError async def sync_write_internal(self, value): raise NotImplementedError async def sync_read_internal(self) -> Any: raise NotImplementedError # in general, you do not need to implement # the top level methods, unless you need to control how # values are emitted. You should be able to implement the # low level API above and be a client to the high level API # below async def write(self, value): value = await self.write_internal(value) self.emit(value) return value async def read(self): value = await self.read_internal() self.emit(value) return value def sync_read(self): value = self.sync_read_internal() self.emit(value) return value def sync_write(self, value): value = self.sync_write_internal(value) self.emit(value) return value
class Context: def __init__(self, config_path: str): deepkit.globals.last_context = self self.log_lock = Lock() self.defined_metrics = {} self.log_subject = Subject() self.metric_subject = Subject() self.speed_report_subject = Subject() self.client = deepkit.client.Client(config_path) self.wait_for_connect() atexit.register(self.shutdown) self.last_iteration_time = 0 self.last_batch_time = 0 self.job_iteration = 0 self.job_iterations = 0 self.seconds_per_iteration = 0 self.seconds_per_iterations = [] self.debugger_controller = None if deepkit.utils.in_self_execution(): self.job_controller = JobController() self.debugger_controller = JobDebuggerController() def on_connect(connected): if connected: if deepkit.utils.in_self_execution(): asyncio.run_coroutine_threadsafe( self.client.register_controller( 'job/' + self.client.job_id, self.job_controller), self.client.loop) asyncio.run_coroutine_threadsafe( self.client.register_controller( 'job/' + self.client.job_id + '/debugger', self.debugger_controller), self.client.loop) self.client.connected.subscribe(on_connect) def on_log(data: List): if len(data) == 0: return packed = '' for d in data: packed += d self.client.job_action('log', ['main_0', packed]) self.log_subject.pipe(buffer(interval(1))).subscribe(on_log) if len(deepkit.globals.last_logs.getvalue()) > 0: self.log_subject.on_next(deepkit.globals.last_logs.getvalue()) def on_metric(data: List): if len(data) == 0: return packed = {} for d in data: if d['id'] not in packed: packed[d['id']] = [] packed[d['id']].append(d['row']) for i, v in packed.items(): self.client.job_action('channelData', [i, v]) self.metric_subject.pipe(buffer(interval(1))).subscribe(on_metric) def on_speed_report(rows): # only save latest value, each second if len(rows) == 0: return self.client.job_action('streamJsonFile', ['.deepkit/speed.csv', [rows[-1]]]) self.speed_report_subject.pipe(buffer( interval(1))).subscribe(on_speed_report) p = psutil.Process() self.client.job_action('streamJsonFile', [ '.deepkit/hardware/main_0.csv', [[ 'time', 'cpu', 'memory', 'network_rx', 'network_tx', 'block_write', 'block_read' ]] ]) def on_hardware_metrics(dummy): net = psutil.net_io_counters() disk = psutil.disk_io_counters() data = [ time.time(), (p.cpu_percent(interval=None) / 100) / psutil.cpu_count(), p.memory_percent() / 100, net.bytes_recv, net.bytes_sent, disk.write_bytes, disk.read_bytes, ] self.client.job_action('streamJsonFile', ['.deepkit/hardware/main_0.csv', [data]]) interval(1).subscribe(on_hardware_metrics) def wait_for_connect(self): async def wait(): await self.client.connecting asyncio.run_coroutine_threadsafe(wait(), self.client.loop).result() def shutdown(self): self.metric_subject.on_completed() self.log_subject.on_completed() self.client.shutdown() def epoch(self, current: int, total: Optional[int]): self.iteration(current, total) def iteration(self, current: int, total: Optional[int]): self.job_iteration = current if total: self.job_iterations = total now = time.time() if self.last_iteration_time: self.seconds_per_iterations.append({ 'diff': now - self.last_iteration_time, 'when': now, }) self.last_iteration_time = now self.last_batch_time = now # remove all older than twenty seconds self.seconds_per_iterations = [ x for x in self.seconds_per_iterations if (now - x['when']) < 20 ] self.seconds_per_iterations = self.seconds_per_iterations[-30:] if len(self.seconds_per_iterations) > 0: diffs = [x['diff'] for x in self.seconds_per_iterations] self.seconds_per_iteration = sum(diffs) / len(diffs) if self.seconds_per_iteration: self.client.patch('secondsPerIteration', self.seconds_per_iteration) self.client.patch('iteration', self.job_iteration) if total: self.client.patch('iterations', self.job_iterations) iterations_left = self.job_iterations - self.job_iteration if iterations_left > 0: self.client.patch('eta', self.seconds_per_iteration * iterations_left) else: self.client.patch('eta', 0) def step(self, current: int, total: int = None, size: int = None): self.client.patch('step', current) now = time.time() x = self.job_iteration + (current / total) speed_per_second = size / ( now - self.last_batch_time) if self.last_batch_time else size if self.last_batch_time: self.seconds_per_iterations.append({ 'diff': (now - self.last_batch_time) * total, 'when': now }) # remove all older than twenty seconds self.seconds_per_iterations = [ x for x in self.seconds_per_iterations if (now - x['when']) < 20 ] self.seconds_per_iterations = self.seconds_per_iterations[-30:] if len(self.seconds_per_iterations) > 0: diffs = [x['diff'] for x in self.seconds_per_iterations] self.seconds_per_iteration = sum(diffs) / len(diffs) iterations_left = self.job_iterations - self.job_iteration self.client.patch('eta', self.seconds_per_iteration * iterations_left) self.last_batch_time = now if self.seconds_per_iteration: self.client.patch('secondsPerIteration', self.seconds_per_iteration) self.client.patch('speed', speed_per_second) self.speed_report_subject.on_next([x, now, speed_per_second]) if total: self.client.patch('steps', total) def set_title(self, s: str): self.client.patch('title', s) def set_info(self, name: str, value: any): self.client.patch('infos.' + name, value) def set_description(self, description: any): self.client.patch('description', description) def add_tag(self, tag: str): self.client.job_action('addTag', [tag]) def rm_tag(self, tag: str): self.client.job_action('rmTag', [tag]) def set_parameter(self, name: str, value: any): self.client.patch('config.parameters.' + name, value) def define_metric(self, name: str, options: dict): self.defined_metrics[name] = {} self.client.job_action('defineMetric', [name, options]) def debug_snapshot(self, graph: dict): self.client.job_action('debugSnapshot', [graph]) def add_file(self, path: str): self.client.job_action( 'uploadFile', [path, base64.b64encode(open(path, 'rb').read()).decode('utf8')]) def add_file_content(self, path: str, content: bytes): self.client.job_action( 'uploadFile', [path, base64.b64encode(content).decode('utf8')]) def set_model_graph(self, graph: dict): self.client.job_action('setModelGraph', [graph]) def metric(self, name: str, x, y): if name not in self.defined_metrics: self.define_metric(name, {}) if not isinstance(y, list): y = [y] self.metric_subject.on_next({'id': name, 'row': [x, time.time()] + y}) self.client.patch('channels.' + name + '.lastValue', y) def log(self, s: str): self.log_subject.on_next(s)
def main(): pygame.init() size = 500, 500 screen = pygame.display.set_mode(size) pygame.display.set_caption("Rx for Python rocks") black = 0, 0, 0 background = pygame.Surface(screen.get_size()) background.fill(black) # fill the background black background = background.convert() # prepare for faster blitting scheduler = PyGameScheduler(pygame) mousemove = Subject() color = "white" base = dirname(__file__) files = [ join(base, img % color) for img in [ "chess_rook_%s.png", "chess_knight_%s.png", "chess_bishop_%s.png", "chess_king_%s.png", "chess_queen_%s.png", "chess_bishop_%s.png", "chess_knight_%s.png", "chess_rook_%s.png" ] ] images = [pygame.image.load(image).convert_alpha() for image in files] old = [None] * len(images) draw = [] erase = [] def handle_image(i, image): imagerect = image.get_rect() def on_next(ev): imagerect.top = ev[1] imagerect.left = ev[0] + i * 32 if old[i]: erase.append(old[i]) old[i] = imagerect.copy() draw.append((image, imagerect.copy())) def on_error(err): print("Got error: %s" % err) sys.exit() mousemove.pipe(ops.delay(0.1 * i, scheduler=scheduler)).subscribe( on_next, on_error=on_error) for i, image in enumerate(images): handle_image(i, image) while True: for event in pygame.event.get(): if event.type == pygame.MOUSEMOTION: pos = event.pos mousemove.on_next(pos) elif event.type == pygame.QUIT: sys.exit() if len(draw): update = [] for rect in erase: screen.blit(background, (rect.x, rect.y), rect) update.append(rect) for image, rect in draw: screen.blit(image, rect) update.append(rect) pygame.display.update(update) pygame.display.flip() draw = [] erase = [] scheduler.run()
class WriteApi(AbstractClient): def __init__(self, influxdb_client, write_options: WriteOptions = WriteOptions()) -> None: self._influxdb_client = influxdb_client self._write_service = WriteService(influxdb_client.api_client) self._write_options = write_options if self._write_options.write_type is WriteType.batching: self._subject = Subject() observable = self._subject.pipe(ops.observe_on(self._write_options.write_scheduler)) self._disposable = observable \ .pipe(ops.window_with_time_or_count(count=write_options.batch_size, timespan=timedelta(milliseconds=write_options.flush_interval)), ops.flat_map(lambda v: _window_to_group(v)), ops.map(mapper=lambda x: self._retryable(data=x, delay=self._jitter_delay())), ops.merge_all()) \ .subscribe(self._on_next, self._on_error, self._on_complete) else: self._subject = None self._disposable = None def write(self, bucket: str, org: str, record: Union[str, List['str'], Point, List['Point'], Observable], write_precision: WritePrecision = DEFAULT_WRITE_PRECISION) -> None: """ Writes time-series data into influxdb. :param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required) :param str bucket: specifies the destination bucket for writes (required) :param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol :param record: Points, line protocol, RxPY Observable to write """ if self._write_options.write_type is WriteType.batching: return self._write_batching(bucket, org, record, write_precision) final_string = '' if isinstance(record, str): final_string = record if isinstance(record, Point): final_string = record.to_line_protocol() if isinstance(record, list): lines = [] for item in record: if isinstance(item, str): lines.append(item) if isinstance(item, Point): lines.append(item.to_line_protocol()) final_string = '\n'.join(lines) _async_req = True if self._write_options.write_type == WriteType.asynchronous else False return self._post_write(_async_req, bucket, org, final_string, write_precision) def flush(self): # TODO pass def __del__(self): if self._subject: self._subject.on_completed() self._subject.dispose() self._subject = None # Wait for finish writing while not self._disposable.is_disposed: sleep(0.1) if self._disposable: self._disposable = None pass def _write_batching(self, bucket, org, data, precision=DEFAULT_WRITE_PRECISION): _key = _BatchItemKey(bucket, org, precision) if isinstance(data, str): self._subject.on_next(_BatchItem(key=_key, data=data)) elif isinstance(data, Point): self._subject.on_next(_BatchItem(key=_key, data=data.to_line_protocol())) elif isinstance(data, list): for item in data: self._write_batching(bucket, org, item, precision) elif isinstance(data, Observable): data.subscribe(lambda it: self._write_batching(bucket, org, it, precision)) pass return None def _http(self, batch_item: _BatchItem): logger.debug("http post to: %s", batch_item) self._post_write(False, batch_item.key.bucket, batch_item.key.org, batch_item.data, batch_item.key.precision) return _BatchResponse(data=batch_item) def _post_write(self, _async_req, bucket, org, body, precision): return self._write_service.post_write(org=org, bucket=bucket, body=body.encode("utf-8"), precision=precision, async_req=_async_req, content_encoding="identity", content_type="text/plain; charset=utf-8") def _retryable(self, data: str, delay: timedelta): return rx.of(data).pipe( ops.delay(duetime=delay, scheduler=self._write_options.write_scheduler), ops.map(lambda x: self._http(x)), ops.catch(handler=lambda exception, source: self._retry_handler(exception, source, data)), ) def _retry_handler(self, exception, source, data): if isinstance(exception, ApiException): if exception.status == 429 or exception.status == 503: _delay = self._jitter_delay() + timedelta(milliseconds=self._write_options.retry_interval) return self._retryable(data, delay=_delay) return rx.just(_BatchResponse(exception=exception, data=data)) def _jitter_delay(self): return timedelta(milliseconds=random() * self._write_options.jitter_interval) @staticmethod def _on_next(response: _BatchResponse): if response.exception: logger.error("The batch item wasn't processed successfully because: %s", response.exception) else: logger.debug("The batch item: %s was processed successfully.", response) @staticmethod def _on_error(ex): logger.error("unexpected error during batching: %s", ex) def _on_complete(self): self._disposable.dispose() logger.info("the batching processor was dispose")
class _NativeAppClient: def __init__(self): self.__rawPluginOutputStream = Subject() self.__connectionStatusStream = Subject() self.__speechStream = with_latest_from( self.__rawPluginOutputStream.pipe( map(lambda dehydratedMsgDict: rehydrateMessage( dehydratedMsgDict))), self.__connectionStatusStream, ).pipe( map(lambda combinedTuple: { **combinedTuple[0], **combinedTuple[1] }), merge(self.__connectionStatusStream), ) # TODO - need to trigger NVDA to startup, if it isn't already # - first need to check if NVDA is installed + if the plugin is asyncio.create_task(self.__startListeningForOutput()) @property def speechStream(self): return self.__speechStream # TODO - add streams for other vitals checks. # - Consumer to choose frequencies? # - Is NVDA running? async def __startListeningForOutput(self): await self.__connectToOutputServer() asyncio.create_task(self.__pollForOutput()) async def __connectToOutputServer(self): self.__serverConnection = None # Remove a potential previous, broken connection await asyncio.sleep(_NVDA_EXPECTED_STARTUP_TIME + _PADDING_FOR_NVDA_STARTUP_TIME) while True: try: self.__serverConnection = Client( _address, _authkey) # Seems to wait ~3s before exception is raised self.__connectionStatusStream.on_next( {OutputKeys.IS_CONNECTED: True}) break except ConnectionRefusedError: # If the listener hasn't been setup yet self.__connectionStatusStream.on_next( {OutputKeys.IS_CONNECTED: False}) await asyncio.sleep(0) async def __pollForOutput(self): while True: if self.__serverConnection and self.__serverConnection.poll(): try: dehydratedMsgDict = self.__serverConnection.recv() self.__rawPluginOutputStream.on_next(dehydratedMsgDict) except ConnectionResetError: # If NVDA/the plugin is terminated self.__connectionStatusStream.on_next( {OutputKeys.IS_CONNECTED: False}) await self.__connectToOutputServer() # In case it restarts else: await asyncio.sleep(0)
import rx import rx.operators as ops from rx.subject import Subject first = Subject() second = Subject() first.pipe(ops.amb(second)).subscribe( on_next=lambda i: print("on_next {}".format(i)), on_error=lambda e: print("on_error: {}".format(e)), on_completed=lambda: print("on_completed") ) first.on_next(1) second.on_next(2) first.on_completed()
class Stomp: def __init__(self, url): self.ws = WebSocket(url) # Stomp streams self.rx_status = Subject() self.rx_frame = Subject() self.rx_message = Subject() self.rx_receipt = Subject() self.rx_error = Subject() # Assign streams self.ws.rx_on_open.subscribe(on_next=self.__on_open) self.ws.rx_on_data.subscribe(on_next=self.__on_data) self.ws.rx_on_close.subscribe(on_next=self.__on_close) self.ws.rx_on_ping.subscribe(on_next=self.__on_ping) self.ws.rx_on_pong.subscribe(on_next=self.__on_pong) self.ws.rx_on_count_message.subscribe(on_next=self.__on_count_message) self.ws.rx_on_error.subscribe(on_next=self.__on_error) self.ws.rx_on_message.subscribe(on_next=self.__on_message) def transmit(self, frame: Frame): self.ws.send(frame.build()) def connect(self): pass def send(self): pass def subscribe(self): pass def unsubscribe(self): pass def disconnect(self): pass def __on_open(self): pass def __on_close(self): pass def __on_data(self): pass def __on_ping(self): pass def __on_pong(self): pass def __on_count_message(self): pass def __on_error(self): pass def __on_message(self, c, message): self.rx_frame.on_next(Frame.parse(message))
async def _listen(websocket, path): async for message in websocket: LOGGER.debug(f"<<<< MSG_RECV: {message}") try: ipc_resolvers[message]() except Exception as e: LOGGER.error(f"Failed to process IPC Instruction ${message}", e) def get_server(): return websockets.serve( _listen, config.get("qt", "ipc_ws_bind_address"), config.getint("qt", "ipc_ws_port"), origins=None, ) def start_listener(): LOGGER.info("Starting IPC Listener Thread") _el.run_until_complete(ipc_server) _el.run_forever() _el = asyncio.get_event_loop() ipc_resolvers = {"closeApplication": lambda: ipc_shutdown_event.on_next(True)} ipc_server = get_server()
class WorkQueue(object): def __init__(self, concurrency_per_group, description=None): self.scheduler = ThreadPoolScheduler(concurrency_per_group) self._requests = Subject() self._output = ReplaySubject() self._description = description self._subscription = self._requests.pipe( group_by(lambda r: r['concurrency_group']), flat_map(lambda concurrency_group: concurrency_group.pipe( map(lambda r: r['request']), merge(max_concurrent=concurrency_per_group)))).subscribe( on_next=lambda request: self._output.on_next(request), on_error=lambda error: logging.exception( 'Error in {} request stream'.format(self)), on_completed=lambda: logging.error( '{} request stream unexpectedly completed'.format(self )), scheduler=self.scheduler) def enqueue(self, observable: Observable, group: str = None, retries: int = 0, description: str = None): # Provide a function returning a callable? description = description or str(Observable) key = '{}({})'.format(description, random.random()) def log_status(status): logging.debug( str({ 'WorkQueue': str(self), 'group': group, 'key': key, status: description })) log_status('ENQUEUED') error: Optional[Exception] = None def handle_error(e): log_status('FAILED') nonlocal error error = e return of({'key': key, 'error': e}) def throw_if_error(request): if error: return throw(error) else: return of(request) def extract_value(value): if type(value) == Observable: return value else: return of(value) request = of(True).pipe( do_action(lambda _: log_status('STARTED')), flat_map(lambda _: observable.pipe( flat_map(extract_value), map(lambda value: { 'key': key, 'value': value }), retry_with_backoff( retries=retries, description='{}.enqueue(group={}, description={})'.format( self, group, description)), catch(handler=lambda e, o: handle_error(e)), )), concat( of({ 'key': key, 'complete': True }).pipe(do_action(lambda _: log_status('COMPLETED'))))) result_stream = self._output.pipe( filter(lambda request: request['key'] == key), flat_map(lambda request: throw_if_error(request)), take_while(lambda request: not request.get('complete')), flat_map(lambda request: of(request.get('value')))) self._requests.on_next({ 'request': request, 'concurrency_group': group }) return result_stream def dispose(self): if self._subscription: self._subscription.dispose() def __str__(self): return 'WorkQueue({})'.format( self._description) if self._description else super().__str__()
def test_parent_before_source(): source = Subject() parent = Subject() actual_value = None def on_next(i): nonlocal actual_value actual_value = i disposable = parent.pipe(rxsci.ops.with_latest_from(source), ).subscribe( on_next=on_next, on_error=lambda e: print(e), ) parent.on_next(1) assert actual_value == None source.on_next('a') assert actual_value == None parent.on_next(2) assert actual_value == (2, 'a') parent.on_next(3) assert actual_value == (3, 'a') actual_value = None source.on_next('b') assert actual_value == None parent.on_next(4) assert actual_value == (4, 'b')
class Trainer(object): # type: Type[Trainer] def __init__(self, classifier_name='MLP'): super(Trainer, self).__init__() # Classifier initialiser self.classifier_name = classifier_name classifiers = { 'RandomForest': RandomForestClassifier(n_estimators=10), 'MLP': MLPClassifier( solver='adam', alpha=1, # hidden_layer_sizes=(100, 100, 100, 100, 100), verbose=True, shuffle=True, warm_start=True, learning_rate='adaptive', ), 'SVC': SVC(gamma='auto'), 'KNN': KNeighborsClassifier(n_neighbors=3), 'AdaBoost': AdaBoostClassifier( n_estimators=100, learning_rate=0.05, random_state=0, ) } self.cls = classifiers[classifier_name] self.pca = PCA(n_components=2) self._is_open = True # Observables and Subjects self.prediction = Subject() self.status = Subject() self.identifiers = Subject() # disposal handler self.subscriptions = [] self.subscriptions.append(self.prediction) self.subscriptions.append(self.status) self.subscriptions.append(self.identifiers) # Training Params self.training_wait_time = 3 self.recording_time = 10 self._is_recording_data = False self.is_trained = False self._is_training = False self.current_training_target = None self.recorded_data = [] self.samples = [] self.targets = [] self.accumulative_samples = [] self.accumulative_targets = [] # Prediction Params self.prediction_wait_time = 0.25 self.current_data = [] # Scoring Params self.training_summary = [] # Prediction initialiser self._identifiers = [] # self._init_thread(target=self._initialise_classifier, args=(classifier_name,)) @staticmethod def _init_thread(target, args=()): Thread(target=target, args=args).start() def add_data(self, data): # type: (Trainer, Any) -> None self.current_data = data if self._is_recording_data: self.recorded_data.append(data) self.samples.append(data) self.targets.append(self.current_training_target) self.accumulative_samples.append(data) self.accumulative_targets.append(self.current_training_target) else: self.predict() def _initialise_classifier(self, classifier_name): self.status.on_next('Initialising Trainer...') for i in range(int(len(os.listdir('./data')) / 2)): arm_down_data = np.load('./data/arm_down_processor_' + str(i + 1) + '.npy') for data in arm_down_data: # print(data) self.accumulative_samples.append(data[1]) self.accumulative_targets.append(1) self.samples.append(data[1]) self.targets.append(1) arm_up_data = np.load('./data/arm_up_processor_' + str(i + 1) + '.npy') for data in arm_up_data: self.accumulative_samples.append(data[1]) self.accumulative_targets.append(0) self.samples.append(data[1]) self.targets.append(0) # print(np.array(self.samples).shape) if classifier_name == 'RandomForest': self._init_thread(self._random_forest.__wrapped__, args=(self, )) elif classifier_name == 'MLP': self._init_thread(self._mlp.__wrapped__, args=(self, )) elif classifier_name == 'SVC': self._init_thread(self._svc.__wrapped__, args=(self, )) elif classifier_name == 'KNN': self._init_thread(self._knn.__wrapped__, args=(self, )) elif classifier_name == 'AdaBoost': self._init_thread(self._adaboost.__wrapped__, args=(self, )) def train(self, identifier_name): # type: (Trainer, Any) -> None if self._is_open: for identifier in self._identifiers: if identifier['name'] is identifier_name: self.current_training_target = identifier['target'] identifier['training_count'] += 1 self._update_identifiers() break if self.classifier_name == 'RandomForest': print('RandomForest') self._init_thread(target=self._random_forest) elif self.classifier_name == 'MLP': print('MLP') self._init_thread(target=self._mlp) elif self.classifier_name == 'SVC': print('SVC') self._init_thread(target=self._svc) elif self.classifier_name == 'KNN': print('KNN') self._init_thread(target=self._knn) def _training(func): @wraps(func) def wrapper(self): if not self._is_training: # Set training flag and status identifier_name = 'arm_up' for identifier in self._identifiers: if identifier['target'] is self.current_training_target: identifier_name = identifier['name'] break self.status.on_next( 'Training for {0}...'.format(identifier_name)) self._is_training = True sleep(self.training_wait_time) self.status.on_next('Recording data...') self.recorded_data = [] self.samples = [] self.targets = [] self._is_recording_data = True sleep(self.recording_time) self._is_recording_data = False np.save( './data/' + self.get_next_processor_label(identifier_name), self.recorded_data) self.status.on_next('Scoring data...') score = self.cls.score(self.samples, self.targets) print('Current Score is {0}'.format(score)) self.status.on_next('Fitting data...') start_time = time() func(self) training_time = time() - start_time print(training_time) print('Current Score is {0}'.format( self.cls.score(self.samples, self.targets))) self.training_summary.append({ 'score': score, 'identifier_name': identifier_name, 'time_elapse': training_time, 'total_sample_size': len(self.accumulative_samples), 'total_target_size': len(self.accumulative_targets), 'classifier_name': self.classifier_name }) self._is_training = False self.status.on_next('Training Complete') return wrapper @_training def _random_forest(self): self.cls.n_estimators += 1 self.cls.fit(self.accumulative_samples, self.accumulative_targets) self.is_trained = True @_training def _mlp(self): self.cls.fit(self.accumulative_samples, self.accumulative_targets) print(self.cls.loss_) self.is_trained = True @_training def _svc(self): self.cls.fit(self.accumulative_samples, self.accumulative_targets) self.is_trained = True @_training def _knn(self): self.cls.fit(self.accumulative_samples, self.accumulative_targets) self.is_trained = True @_training def _adaboost(self): self.cls.fit(self.accumulative_samples, self.accumulative_targets) self.is_trained = True def predict(self): # type: (Trainer) -> None def _predict(): if self.is_trained and not self._is_training: self.status.on_next('Predicting...') # try: # if self.classifier_name == 'SVC': # self.pca.fit_transform(self.) print(np.array(self.current_data).shape) train_data = np.array(self.current_data).reshape(1, -1) prediction = self.cls.predict(train_data)[0] for identifier in self._identifiers: if prediction == identifier['target']: self.prediction.on_next(identifier['name']) # except: # print('An error occurred on prediction, prediction not performed!') if self._is_open: self._init_thread(target=_predict) # State Management def add_identifier(self, identifier_name): # type: (Trainer, str) -> str identifier = { 'name': identifier_name, 'target': len(self._identifiers), 'connector_index': 0, 'processor_index': int(len(os.listdir('./data')) / 2), 'training_count': 0 } self._identifiers.append(identifier) self._update_identifiers() return identifier_name def _update_identifiers(self): # type: (Trainer) -> None self.identifiers.on_next(self._identifiers) def get_next_connector_label( self, identifier_name): # type: (Trainer, Any) -> Any for identifier in self._identifiers: if identifier['name'] is identifier_name: identifier['connector_index'] += 1 self._update_identifiers() return identifier_name + '_connector_' + identifier[ 'connector_index'] def get_next_processor_label( self, identifier_name): # type: (Trainer, Any) -> Any for identifier in self._identifiers: if identifier['name'] is identifier_name: identifier['processor_index'] += 1 self._update_identifiers() return identifier_name + '_processor_' + str( identifier['processor_index']) def close(self): # type: (Trainer) -> None self._is_open = False sleep(0.5) if len(self.training_summary) > 0: with open('./neurosky/score.json', 'r') as file: saved_data = json.loads(file.read()) saved_data.append(self.training_summary) data_to_save = json.dumps(saved_data) with open('./neurosky/score.json', 'w+') as file: file.write(data_to_save) for subscription in self.subscriptions: try: subscription.dispose() except DisposedException: pass
class ModbusPumpControl(SerialControl): serial: ModbusSerialClient def __init__(self, name="pump_control"): super().__init__(name) self.pump_control_config = self.config["pumpControl"] self.control_delay = self.pump_control_config["controlDelay"] self.tq = Subject() self.scheduler = NewThreadScheduler() self.update_subject = Subject() def on_next(job): try: job() except Exception as ex: self.logger.error(ex) self.tq.pipe(operators.observe_on(self.scheduler)).subscribe( on_next, lambda ex: self.logger.error(ex), lambda: self.serial.close()) self.state = [0.0, 0.0] self.enable_remote_control(True) def create_serial_connection(self): self.pump_control_config = self.config["pumpControl"] port = self.pump_control_config["port"] baud = self.pump_control_config["baud"] timeout = self.pump_control_config["timeout"] client = ModbusSerialClient(method="rtu", port=port, baudrate=baud, timeout=timeout) client.connect() return client def enable_remote_control(self, enable=True): def _enable_remote_control(unit, enable=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1004, 1 if enable else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) _enable_remote_control( self.pump_control_config["endpoint"]["slurry"]["address"], enable) _enable_remote_control( self.pump_control_config["endpoint"]["clear"]["address"], enable) def set_speed(self, slurry_speed, clear_speed): if slurry_speed is not None and slurry_speed != self.state[0]: self.ctrl_pump( self.pump_control_config["endpoint"]["slurry"]["address"], slurry_speed) self.logger.debug(f"Slurry pump speed updated: {slurry_speed}") self.state[0] = slurry_speed if clear_speed is not None and clear_speed != self.state[1]: self.ctrl_pump( self.pump_control_config["endpoint"]["clear"]["address"], clear_speed) self.logger.debug(f"Clear pump speed updated: {clear_speed}") self.state[1] = clear_speed def ctrl_pump(self, unit, speed): def start_pump(unit, enable=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1001, 1 if enable else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) def direction(unit, direction=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1003, 65280 if direction else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) def rate(unit, speed): buffer = struct.pack("f", math.fabs(speed)) lb = struct.unpack("<H", buffer[0:2])[0] hb = struct.unpack("<H", buffer[2:4])[0] self.tq.on_next(lambda: self.serial.write_registers( 0x3001, [hb, lb], unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) # stop pump first otherwise cannot adjust direction start_pump(unit, False) rate(unit, speed) if speed == 0: return direction(unit, speed > 0) start_pump(unit, True) def on_command(self, x): assert isinstance(x, ModbusPumpControlCommand) self.set_speed(x.slurry_pump, x.clear_pump) self.update_subject.on_next(x) def on_subscribe(self, observer, scheduler=None): self.update_subject.subscribe(observer, scheduler)
# 操作数据流 print('求所有偶数') some_data = rx.of(1, 2, 3, 4, 5, 6, 7, 8) some_data2 = rx.from_iterable(range(10, 20)) some_data.pipe( op.merge(some_data2), op.filter(lambda i: i % 2 == 0), # op.map(lambda i: i * 2) ).subscribe(lambda i: print(i)) # debounce操作符,仅在时间间隔之外的可以发射 print('防止重复发送') ob = Subject() ob.pipe( op.throttle_first(3) # op.debounce(3) ).subscribe( on_next=lambda i: print(i), on_completed=lambda: print('Completed') ) print('press enter to print, press other key to exit') while True: s = input() if s == '': ob.on_next(datetime.datetime.now().time()) else: ob.on_completed() break