class OrderMatcher(Observer): def __init__(self, loop: asyncio.AbstractEventLoop, collection: core.AgnosticCollection, channel: RobustChannel): super().__init__() self.loop = loop self.collection = collection self.channel = channel self.tradeNotif = Subject() self.tradeNotifier = self.tradeNotif.subscribe( TradeNotifier(self.loop, self.channel)) def on_next(self, message: Tuple[IncomingMessage, dict]): async def asyncOrder(): qmsg = message[0] order = message[1] orderCounter = order['orderCounter'] orderCounter.labels(order['action'], order['stock']) await self.matchOrder(order) qmsg.ack() latency = time.time() - order['eventTime'] eventLatency = order['eventLatency'] eventLatency.labels('ORDER', 'SUCCESS').observe(latency) eventProgress = order['eventProgress'] eventProgress.labels('ORDER').dec() self.loop.create_task(asyncOrder()) # Match incoming order to existing orders in MongoDB and send any matched order to trade observable async def matchOrder(self, order): dictFilter = { 'price': { '$lte': order['price'] } } if order['action'] == 'BUY' else { 'price': { '$gte': order['price'] } } dictFilter.update({ 'action': 'BUY' if order['action'] == 'SELL' else 'SELL', 'stock': order['stock'], 'selected': False, 'status': { '$nin': ['2', 'R'] } }) dictSort = { 'price': 1 if order['action'] == 'BUY' else -1, 'timestamp': 1 } orderContra = await self.collection.find_one_and_update( dictFilter, {'$set': { 'selected': True }}, sort=list(dictSort.items()), return_document=True) while orderContra is not None and order['status'] != '2': leaveVol = order['vol'] - order['cumVol'] contraLeaveVol = orderContra['vol'] - orderContra['cumVol'] matchVol = contraLeaveVol if contraLeaveVol <= leaveVol else leaveVol contraMatchVol = leaveVol if leaveVol <= contraLeaveVol else contraLeaveVol order['cumVol'] += matchVol orderContra['cumVol'] += contraMatchVol order['status'] = '2' if order['cumVol'] == order['vol'] else '1' orderContra['status'] = '2' if orderContra[ 'cumVol'] == orderContra['vol'] else '1' await self.collection.find_one_and_update( {'orderId': order['orderId']}, { '$set': { 'cumVol': order['cumVol'], 'status': order['status'] } }) tradeInit = { 'action': order['action'], 'account': order['account'], 'stock': order['stock'], 'price': order['price'], 'vol': matchVol } self.tradeNotif.on_next(tradeInit) await self.collection.find_one_and_update( {'orderId': orderContra['orderId']}, { '$set': { 'cumVol': orderContra['cumVol'], 'status': orderContra['status'], 'selected': False } }) tradeContra = { 'action': orderContra['action'], 'account': orderContra['account'], 'stock': orderContra['stock'], 'price': orderContra['price'], 'vol': contraMatchVol } self.tradeNotif.on_next(tradeContra) if order['status'] != '2': orderContra = await self.collection.find_one_and_update( dictFilter, {'$set': { 'selected': True }}, sort=list(dictSort.items()), return_document=True) await self.collection.find_one_and_update( {'orderId': order['orderId']}, {'$set': { 'selected': False }})
class Session(object): def __init__(self, parent): self.parent = parent self.removeHandler = None self.subject = Subject() self.count = 0 def connect(self, observer): # # We connect the given observer to the subject first, before performing any kind # of initialization which will register an event handler. This is done to ensure # we don't have a time gap between adding the handler and connecting the user's # subject, e.g. when the ImmediateScheduler is used. # # [OK] Use of unsafe Subscribe: called on a known subject implementation. # connection = self.subject.subscribe(observer) self.count += 1 if self.count == 1: try: self.initialize() except Exception as e: self.count -= 1 connection.dispose() observer.onError(e) return Disposable.empty() def dispose(): connection.dispose() with self.parent.gate: self.count -=1 if self.count == 0: self.parent.scheduler.schedule(self.removeHandler.dispose) self.parent.session = None return Disposable.create(dispose) def initialize(self): # # When the ref count goes to zero, no-one should be able to perform operations on # the session object anymore, because it gets nulled out. # assert self.removeHandler == None self.removeHandler = SingleAssignmentDisposable() # # Conversion code is supposed to be a pure function and shouldn't be run on the # scheduler, but the add handler call should. Notice the scheduler can be the # ImmediateScheduler, causing synchronous invocation. This is the default when # no SynchronizationContext is found (see QueryLanguage.Events.cs and search for # the GetSchedulerForCurrentContext method). # onNext = self.parent.getHandler(self.subject.onNext) self.parent.scheduler.scheduleWithState(onNext, self.addHandler) def addHandler(self, scheduler, onNext): try: removeHandler = self.parent.addHandler(onNext) except Exception as e: self.subject.onError(e) else: self.removeHandler.disposable = removeHandler # # We don't propagate the exception to the OnError channel upon Dispose. This is # not possible at this stage, because we've already auto-detached in the base # class Producer implementation. Even if we would switch the OnError and auto- # detach calls, it wouldn't work because the remove handler logic is scheduled # on the given scheduler, causing asynchrony. We can't block waiting for the # remove handler to run on the scheduler. # return Disposable.empty()
class DirectionSensor(EdgeThing): def __init__(self, width, height, class_id, class_label, coords, config_uri: str = '', properties_str: str = '', tag_group_dir: str = './definitions/TagGroup', thing_class_dir: str = './definitions/ThingClass', tag_groups: tuple = tuple(), thing_cls: tuple = tuple()): super().__init__(config_uri, properties_str, tag_group_dir, thing_class_dir, tag_groups, thing_cls) self.class_id = class_id self.class_label = class_label self.running = True self.total_frames = 0 self.coords = coords self.results = {} self.confidence_threshold = 0.65 self.polygon = None self.line = None self.trend = [] self.most = 0 self.counter = Counter() self.trend_window = 81 self.width = width self.height = height self.__detection_box_cls = class_from_thing_input( self.dr, self.thing, 'DetectionBoxData') self.__direction_cls = class_from_thing_output(self.dr, self.thing, 'DirectionSensor') self.__input_subject = Subject() self.__input_subject.subscribe(observer=self.process_data) self.__output_subject = Subject() self.__output_subject.subscribe(observer=self.process_count) self.trackers = ObjectTrackers() self.opposite = "" self.samples_quantity = 10 self.threshold_displacement = 20 self.config_env() def config_env(self): line = ((int(self.coords[0][0] * self.width / 100), int(self.coords[0][1] * self.height / 100)), (int(self.coords[1][0] * self.width / 100), int(self.coords[1][1] * self.height / 100))) line = get_line(line) self.polygon = line.buffer(5) self.polygon = get_polygon(list(self.polygon.exterior.coords)) def get_direction(self, direction): if self.direction in ['U', 'D']: if direction > 0: return 'D' elif direction > 0: return 'U' else: if direction > 0: return 'R' elif direction > 0: return 'L' return '' def process_data(self, data): if data.flow_state == FlowState.ALIVE and data.data.size() > 0: trackers = [] for nvp in data.data: if nvp.name == 'data': for inner_nvp in nvp.value.nvp_seq: box_nvp_seq = inner_nvp.value.nvp_seq box_data = Map() for box_nvp in box_nvp_seq: box_data[box_nvp.name] = getattr( box_nvp.value, tag_to_value_property_map[box_nvp.name]) res, trackable_obj = self.process_boxes(box_data) if res: trackers.append(trackable_obj) self.trackers.similarity(trackers) for track_id, v in self.trackers.trackers.items(): if len(v.centroids) > self.samples_quantity: centroids = v.centroids if self.direction in ["U", "D"]: y = [c[1] for c in centroids] dire = sum(y[-2:]) / 2 - y[0] else: x = [c[0] for c in centroids] dire = sum(x[-2:]) / -x[0] if math.sqrt(dire**2) < self.threshold_displacement: continue direction = self.get_direction(dire) self.__output_subject.on_next( DirectionData(data.flow_id, track_id, direction)) def process_box(self, box_data): xmin = int(box_data['x1'] * self.width) xmax = int(box_data['x2'] * self.width) ymin = int(box_data['y1'] * self.height) ymax = int(box_data['y2'] * self.height) c_x = int((xmin + xmax) / 2.0) c_y = int((ymin + ymax) / 2.0) point = Point([c_x, c_y]) if self.polygon.contains(point): return True, TrackableObject([xmin, ymin, xmax, ymax], box_data['obj_id'], centroids=[(c_x, c_y)]) else: return False, None def process_direction(self, direction_data): direction_obj = self.__direction_cls() direction_obj.class_id = self.class_id direction_obj.class_label = self.class_label direction_obj.id = direction_data.id direction_obj.direction = direction_data.direction write_tag(self.thing, 'DirectionSensor', as_nvp_seq(direction_obj), flow=direction_data.flow_id) def run(self): log.info('Running') selector = self.thing.select('DetectionBoxData') while not self.terminate: samples = selector.read_iot_nvp(1000) for sample in samples: self.__input_subject.on_next(sample)
from rx.subject import Subject c = Subject() c.subscribe(on_next=lambda i: print("on_next called with {}".format(i))) c.on_next("ss")
class RxImpTest(unittest.TestCase): TEST_TOPIC = '/topic/test' def setUp(self): self.inSubject = Subject() self.outSubject = Subject() self.rxImp = RxImp(self.inSubject, self.outSubject) def test_messagesSubscribeOnCall(self): mockObs = MockObserver(scheduler=scheduler) self.outSubject.pipe( map(lambda x: self.rxImp._mapIncoming(x)), map(lambda x: json.loads(x.payload))).subscribe(mockObs) self.outSubject.subscribe(self.inSubject) self.rxImp.observableCall(RxImpTest.TEST_TOPIC, 253).subscribe() time.sleep(0.5) self.assertTrue(len(mockObs.messages) == 1) self.assertTrue(mockObs.messages[0].value.value is 253) def test_simpleConnect(self): mockObs = MockObserver(scheduler=scheduler) def handler(args): return of(args) self.rxImp.registerCall(RxImpTest.TEST_TOPIC, lambda x: handler(x)) self.outSubject.subscribe(self.inSubject) self.rxImp.observableCall(RxImpTest.TEST_TOPIC, 1).subscribe(mockObs) time.sleep(0.5) self.assertTrue(len(mockObs.messages) == 2) self.assertTrue(mockObs.messages[0].value.value is 1) def test_signalsComplete(self): mockObs = MockObserver(scheduler=scheduler) mockObs2 = MockObserver(scheduler=scheduler) subject = Subject() subject.subscribe(mockObs2) def handler(args): return interval(0.01).pipe( take(10), finally_action(lambda: subject.on_completed())) self.rxImp.registerCall(RxImpTest.TEST_TOPIC, lambda x: handler(x)) self.outSubject.subscribe(self.inSubject) self.rxImp.observableCall(RxImpTest.TEST_TOPIC, 1).pipe(take(5)).subscribe(mockObs) time.sleep(0.1) print(mockObs.messages) self.assertTrue(len(mockObs.messages) == 6) print(mockObs2.messages) self.assertTrue(len(mockObs2.messages) == 1) def test_ordersMessages(self): mockObs = MockObserver(scheduler=scheduler) self.outSubject.pipe( map(lambda x: self.rxImp._mapIncoming(x)), map(lambda x: json.loads(x.payload))).subscribe(mockObs) def handleCall(msg): nextMsg = RxImpMessage(RxImpTest.TEST_TOPIC, 0, RxImpMessage.STATE_NEXT, 253, msg.id) cmplMsg = RxImpMessage(RxImpTest.TEST_TOPIC, 1, RxImpMessage.STATE_COMPLETE, None, msg.id) self.inSubject(self.rxImp._mapOutgoing(cmplMsg)) self.inSubject(self.rxImp._mapOutgoing(nextMsg)) self.rxImp.observableCall(RxImpTest.TEST_TOPIC, 253).subscribe() time.sleep(0.5) self.assertTrue(len(mockObs.messages) == 1) self.assertTrue(mockObs.messages[0].value.value is 253)
import rx from rx.subject import Subject, AsyncSubject, BehaviorSubject, ReplaySubject # Subject同时是Observer和Observable print('--------Subject---------') subject = Subject() subject.on_next(1) subject.subscribe(lambda i: print(i)) subject.on_next(2) subject.on_next(3) subject.on_next(4) subject.on_completed() # ReplaySubject会缓存所有值,如果指定参数的话只会缓存最近的几个值 print('--------ReplaySubject---------') subject = ReplaySubject() subject.on_next(1) subject.subscribe(lambda i: print(i)) subject.on_next(2) subject.on_next(3) subject.on_next(4) subject.on_completed() # BehaviorSubject会缓存上次发射的值,除非Observable已经关闭 print('--------BehaviorSubject---------') subject = BehaviorSubject(0) subject.on_next(1) subject.on_next(2) subject.subscribe(lambda i: print(i)) subject.on_next(3)
video_subject = Subject() def sender(kind, data): #print('模拟发送走', event, data) video_subject.on_next((kind, data)) #-----保存录像---------- def fname_record1(): #f'{datetime.now().isoformat()}' dt_str = datetime.now().strftime('%Y-%m-%d-%H-%M-%S') return f'手工解码数据源{id_vehicle}_{dt_str}' #订阅 # ----------保存---------- recorder = Recorder(fname_record1, event_need_record) video_subject.subscribe(lambda args: recorder.on_frame(*args)) #----------播放----------- video_subject.pipe( #处理:过滤出图像 ops.filter(lambda args: args[0] == 'h264') #过滤出frame部分 , ops.map(lambda args: args[1])).subscribe(show_frame264) async def client_mock(): '''模拟客户操作,直播开始30秒后开始录像 30秒后停止录像''' print('客户操作') await asyncio.sleep(20) print('模拟玩家开始录像') event_need_record._loop.call_soon_threadsafe(event_need_record.set)
class Axis: """ Representation of an axis which can read values or write values. Axes have fixed schema: in this sense, you can always expect to receive the same shape data back from the axis. In most cases, axes record single points, but you can produce any Python primitive, as well as `np.ndarray`s and `pd.DataFrame`s if it is appropriate. See also the schema module for type hinting Arrays. Axes are fundamentally asynchronous, since they represent actual hardware resources that exist over I/O. Additionally, measurements may take finite time, and in the case of event stream axes, you may not know when values will be produced. """ raw_value_stream: Optional[Subject] _type_def: TypeDefinition async def shutdown(self): raise NotImplementedError def collect_state(self): return None def receive_state(self, state): pass def append_point_to_history(self, point): self.collected_xs.append(point["time"]) self.collected_ys.append(point["value"]) def reset_history(self): self.collected_xs = [] self.collected_ys = [] def __init__(self, name: str, schema: type): self.name = name self.schema = schema self._type_def = TypeDefinition.from_type(schema) self.raw_value_stream = Subject() # for scalar schemas we can provide a stream of values if schema in (float, int): self.collected_xs = [] self.collected_ys = [] self.raw_value_stream.subscribe(self.append_point_to_history) def emit(self, value): if self.raw_value_stream: self.raw_value_stream.on_next( {"value": value, "time": datetime.datetime.now().timestamp()} ) @property def type_def(self): """ We allow overriding this because in the case of subaxes the type is derived from the parent. """ return self._type_def async def trigger(self): return async def settle(self): raise NotImplementedError # We use a two level API in order to make the code here # more straightforward. *_internal methods are virtual # and set the internal behavior for an axis # the high level API provides synchronous (if available) # and asynchronous bindings which also handle emitting # values for subscribers async def write_internal(self, value): raise NotImplementedError async def read_internal(self) -> Any: raise NotImplementedError async def sync_write_internal(self, value): raise NotImplementedError async def sync_read_internal(self) -> Any: raise NotImplementedError # in general, you do not need to implement # the top level methods, unless you need to control how # values are emitted. You should be able to implement the # low level API above and be a client to the high level API # below async def write(self, value): value = await self.write_internal(value) self.emit(value) return value async def read(self): value = await self.read_internal() self.emit(value) return value def sync_read(self): value = self.sync_read_internal() self.emit(value) return value def sync_write(self, value): value = self.sync_write_internal(value) self.emit(value) return value
class ModbusPumpControl(SerialControl): serial: ModbusSerialClient def __init__(self, name="pump_control"): super().__init__(name) self.pump_control_config = self.config["pumpControl"] self.control_delay = self.pump_control_config["controlDelay"] self.tq = Subject() self.scheduler = NewThreadScheduler() self.update_subject = Subject() def on_next(job): try: job() except Exception as ex: self.logger.error(ex) self.tq.pipe(operators.observe_on(self.scheduler)).subscribe( on_next, lambda ex: self.logger.error(ex), lambda: self.serial.close()) self.state = [0.0, 0.0] self.enable_remote_control(True) def create_serial_connection(self): self.pump_control_config = self.config["pumpControl"] port = self.pump_control_config["port"] baud = self.pump_control_config["baud"] timeout = self.pump_control_config["timeout"] client = ModbusSerialClient(method="rtu", port=port, baudrate=baud, timeout=timeout) client.connect() return client def enable_remote_control(self, enable=True): def _enable_remote_control(unit, enable=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1004, 1 if enable else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) _enable_remote_control( self.pump_control_config["endpoint"]["slurry"]["address"], enable) _enable_remote_control( self.pump_control_config["endpoint"]["clear"]["address"], enable) def set_speed(self, slurry_speed, clear_speed): if slurry_speed is not None and slurry_speed != self.state[0]: self.ctrl_pump( self.pump_control_config["endpoint"]["slurry"]["address"], slurry_speed) self.logger.debug(f"Slurry pump speed updated: {slurry_speed}") self.state[0] = slurry_speed if clear_speed is not None and clear_speed != self.state[1]: self.ctrl_pump( self.pump_control_config["endpoint"]["clear"]["address"], clear_speed) self.logger.debug(f"Clear pump speed updated: {clear_speed}") self.state[1] = clear_speed def ctrl_pump(self, unit, speed): def start_pump(unit, enable=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1001, 1 if enable else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) def direction(unit, direction=True): self.tq.on_next(lambda: self.serial.write_coil( 0x1003, 65280 if direction else 0, unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) def rate(unit, speed): buffer = struct.pack("f", math.fabs(speed)) lb = struct.unpack("<H", buffer[0:2])[0] hb = struct.unpack("<H", buffer[2:4])[0] self.tq.on_next(lambda: self.serial.write_registers( 0x3001, [hb, lb], unit=unit)) self.tq.on_next(lambda: time.sleep(self.control_delay)) # stop pump first otherwise cannot adjust direction start_pump(unit, False) rate(unit, speed) if speed == 0: return direction(unit, speed > 0) start_pump(unit, True) def on_command(self, x): assert isinstance(x, ModbusPumpControlCommand) self.set_speed(x.slurry_pump, x.clear_pump) self.update_subject.on_next(x) def on_subscribe(self, observer, scheduler=None): self.update_subject.subscribe(observer, scheduler)