def _load_fun_parameters(fun, is_method=False): signature = inspect.signature(fun) if signature.return_annotation == inspect.Signature.empty: raise ValueError('Must supply return value') params = OrderedDict() for i, (_, param) in enumerate(signature.parameters.items()): param: inspect.Parameter if is_method and i == 0: continue if param.annotation == inspect.Parameter.empty: trc().debug('%s', signature) raise ValueError(f'No type for `{param.name}`') if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: params[param.name] = param.annotation elif param.kind == inspect.Parameter.VAR_POSITIONAL: params[param.name] = typing.List[param.annotation] elif param.kind == inspect.Parameter.KEYWORD_ONLY: params[param.name] = param.annotation elif param.kind == inspect.Parameter.VAR_KEYWORD: params[param.name] = typing.Dict[str, param.annotation] else: raise ValueError(f'Not supported: {param}') return params, signature.return_annotation
async def main_client(addr): try: trc().debug('%s', 'created') await asyncio.sleep(0.33) trc().debug('%s', 'connecting') async with await Client.connect( *addr, 'rate_limiter', '0.1.0', {'Authorize': 'Bearer: 123123'}) as client: client: Client chan1 = client.channel() chan2 = client.channel() xx = await asyncio.gather(chan1.write(Request('get')), chan2.write(Request('put', {'a': 'b'}))) await asyncio.gather(chan1.sync(), chan2.sync()) trc('xx').debug('%s', xx) yy = await asyncio.gather(chan1.read(), chan2.read()) trc('yy').debug('%s', yy) await asyncio.sleep(1) yy = await asyncio.gather(chan1.close(), chan2.close()) pass except: trc().exception('$%')
def test_api_1(self): pkt = Packet(None, Service('ratelimiter')) wlkr1 = Walker(JSON_FROM) wlkr2 = Walker(JSON_INTO) fac1 = wlkr1.resolve(Packet) fac2 = wlkr2.resolve(Packet) json_from, = wlkr1.mappers(fac1) json_into, = wlkr2.mappers(fac2) out1 = json_into.serialize(pkt) trc('test_api_1').debug('%s', out1) out2 = json_from.serialize(out1) trc('test_api_2').debug('%s', out2) self.assertEqual( OrderedDict([('type', 'service'), ('stream', None), ('body', { 'version': '0.1.0', 'name': 'ratelimiter', 'proto': '0.1.0' })]), out1) self.assertEqual(pkt, out2)
def push_push_assign(self): def spread(iter_obj: Iterable[Tuple[str, int]]): for wid, capa in iter_obj: for i in range(capa): yield wid def eat(obj: Deque[RPCKey]): while len(obj): try: yield obj.pop() except IndexError: return w_caps = ((wid, max( 0, self.workers[wid].load.capacity - len(self.workers_jobs[wid]))) for wid, wst in self.workers.items()) jobs_workers = zip(spread(w_caps), eat(self.jobs_pending)) for wid, jid in jobs_workers: trc('1').debug('%s %s', wid, jid) self.jobs[jid].started = time_now() self.jobs[jid].attempts += 1 self.jobs_workers[jid] = wid self.workers_jobs[wid].append(jid) self.jobs_pending_assign.push(SchedKey.now(jid)) reset(self.push_assign, 0)
def _job_new(self, jid: RPCKey, jreq: RequestType): trc('0').debug('%s %s', jid, jreq) self.jobs[jid] = JobState(jreq, time_now()) self.jobs_pending.push(jid) reset(self.push_push_assign, 0)
def _job_clean(self, jid: RPCKey) -> str: trc('0').error('%s', jid) wid = self.jobs_workers[jid] del self.jobs_workers[jid] self.workers_jobs[wid].remove(jid) reset(self.push_push_assign, 0) return wid
def send(self, packet: Packet): addr = packet.addr # todo: KeyError if the address had been subsequently removed addr = _norm_addr(addr) if addr not in self._fd_clients: log_tr_net_raw_err.error('Drop %s %s %s', self, addr, packet) for x in self._fd_clients.keys(): log_tr_net_raw_err.getChild('dump').error('%s', x) _log_called_from(log_tr_net_raw_err.getChild('tb')) return sock = self._fd_clients[addr] log_tr_net_raw_out.debug('[%d] %s %s', len(packet.data), addr, packet.data) try: r = sock.send(packet.pack()) trc('return').debug('%s', r) return r except BrokenPipeError as e: self._clean_client_id(addr, str(e)) log_tr_net_raw_err.error('[%d] %s %s', len(packet.data), addr, packet.data)
def parse_response(self): envelope = bottle_envelope(self.operation, request) trc('0').debug('%s', envelope) return_ = self.method.call(self.endpoint, envelope) trc('1').debug('%s', return_) return return_
def _test_worker_startup(self, par_conf: WorkerConf): conf = ClusterConf() ub = 'udp://127.0.0.1:5678' uw = 'udp://127.0.0.1:5789' pidw = self._worker( uw, conf, ub, worker_function, None, par_conf=par_conf ) with self.ps.timer(20.) as tr, client_transport( Worker[Request, Response], uw, ClientConfig(horz=False)) as br: x: Optional[WorkerMetric] = None slept = 0. # while the WorkerBroker is starting up, it's being bombarded # by loads of messages from Workers. # it's thus missing the message while True: try: x = br.metrics() except TimeoutError: x = None if x is None or x.workers_free < par_conf.processes * par_conf.threads: slept = tr.sleep(0.3) else: trc('slept1').error('%s', slept) break pidw.send_signal(SIGTERM) self.assertEqual(wait_all(pidw, max_wait=2), [0])
def test_gen_2(self): r = auto_actor(Executioner) r = OAS(paths=r).serialize() trc('0').debug('%s', pformat(r)) r = generate_yaml(r) trc('1').debug('%s', '\n' + r)
def bk_done(self, brid: RPCKey, jid: RPCKey, ok: bool, res: Optional[ResponseType] = None): w = service(Worker[self.cls_req, self.cls_res], sender()) w.done_ack(brid, jid) if self._brid_check(brid): return if jid not in self.jobs: trc('1').error('%s', jid) return if self.jobs[jid].finished: trc('2').warning('%s', jid) return if jid in self.jobs_pending_assign: del self.jobs_pending_assign[jid] if jid in self.jobs_pending_resign: del self.jobs_pending_resign[jid] if jid not in self.jobs_workers or sender() != self.jobs_workers[jid]: return self._worker_pings(brid, sender()) if ok: self._job_done(jid, res) else: self._job_resign(jid)
def get_metrics(): with client_transport(Broker[Request, Response], ub_front, ClientConfig(horz=False, timeout_total=3)) as mr: mr: Broker[Request, Response] r = mr.metrics() trc().error('%s', r) return r
def bk_announce(self, wbrid: Optional[RPCKey], load: WorkerLoad): wid = sender() trc('1').debug('%s', wid) if wid not in self.workers: self._worker_new(RPCKey.new(), wid, load) brid = self.workers_brids[wid] if wbrid != brid: s = service(Worker[self.cls_req, self.cls_res], wid, ClientConfig(timeout_total=1)) s.registered(self.workers_brids[wid]) # todo only ping workers through announce wst = self.workers[wid] flag_changed = wst.load.capacity != load.capacity wst.load.capacity = load.capacity self._worker_pings(brid, wid) if flag_changed: self._worker_conf_changed()
def test_decl_1(self): @rpc() @infer(JSON_INTO, JSON_FROM) async def func1(a: int, b: str) -> AsyncIterable[Header]: pass @rpc() @infer(JSON_INTO, JSON_FROM) def func2(a: int, b: str) -> Iterable[Header]: pass @rpc() @infer(JSON_INTO, JSON_FROM) def func3(a: int, b: str) -> Header: pass @rpc() @infer(JSON_INTO, JSON_FROM) def func4(a: int, b: str) -> AsyncIterable[Header]: pass def func5(a: int, b) -> AsyncIterable[Header]: pass @rpc(is_method=False) @infer(JSON_INTO, JSON_FROM) def func6(a: int, *b: int, d: int = 6, **kwargs: Header) -> AsyncIterable[Header]: pass # signature(func6).bind() -> raises TypeError def bind_pars(fun, *args, **kwargs): r = signature(fun).bind(*args, **kwargs) r.apply_defaults() return r.arguments args = bind_pars(func6, 1, 2, 3, k={'name': 'b', 'value': None}, d=5) map_argsm1, = get_serializers(JSON_FROM, Args(Methodist.meth1)) map_args6, = get_serializers(JSON_FROM, Args(func6)) trc().debug('%s', args) trc().debug('%s', map_args6.serialize(args)) # trc().debug('%s %s', fac_args6, fac_ret6) trc().debug('%s', signature(func6).bind(**map_args6.serialize(args))) rr = collect(Methodist).to_methods(JSON_INTO, JSON_FROM) rr2 = collect(Methodist()).to_methods(JSON_INTO, JSON_FROM) trc().debug('%s', rr) trc().debug('%s', rr2) assert False
async def __aenter__(self): trc().debug('') """ - create a channel for reading data - return a ClientChanneler :return: """ return self
def main(**kwargs): async def googly(): pass r = inspect.getfullargspec(googly) trc('brute').debug('%s', r) asyncio.run(main_async())
def from_url(cls, url) -> 'Transport': parsed: ParseResult = urlparse(url) trc().debug('%s %s', url, parsed) if parsed.scheme in TRANSPORT_MAP: return TRANSPORT_MAP[parsed.scheme](url) else: raise ValueError(f'Could not find transport for scheme `{parsed.scheme}`')
def gc(self) -> float: for k in list(self.workers.keys()): self.workers[k].pings_remaining -= 1 trc().debug('%s %s', k, self.workers[k]) if self.workers[k].pings_remaining <= 0: self._worker_lost(k) return self.conf.heartbeat
def _get_rpc(self, name): if name not in self.rpcs: _log_traceback(trc('1')) trc('0').error('%s %s %s %s', self.chan.origin, self.group, name, self.rpcs.keys()) raise InvalidFingerprintError('name') rpc_def = self.rpcs[name] return rpc_def
def _worker_new(self, wbrid: RPCKey, wid: Origin, load: WorkerLoad): trc().debug('%s %s %s', wbrid, wid, load) self.workers[wid] = WorkerState( self.conf.max_pings, WorkerLoad(occupied=0, capacity=load.capacity)) self.workers_jobs[wid] = [] self.workers_brids[wid] = wbrid self._worker_conf_changed()
async def __aexit__(self, *args): trc().debug('') self.sender_task.cancel() self.receiver_task.cancel() self.sender_task = None self.receiver_task = None await self.reader.__aexit__(*args) await self.writer.__aexit__(*args)
def test_body_1(self): fac_from = AnyAnyWith( BinaryFromVarInt(), AnyAnyWith( BinaryFromBytes( AnyAnyAttr('val', This(int)), AnyAnyAttr('next', This()) ), AnyFromStruct( [ AnyAnyField( 'val', BinaryFromJson( AnyAnyAttr('val') ) ), AnyAnyField('next', AnyAnyAttr('next')) ], cls=BinaryNext ) ) ) fac_into = AnyAnyWith( BinaryIntoJson(This()), BinaryIntoConcat([ AnyAnyWith( AnyAnyLen(This()), BinaryIntoVarInt(), ), This() ]) ) bin_from, = Walker(BINARY_FROM).mappers(fac_from) bin_into, = Walker(BINARY_INTO).mappers(fac_into) x = bin_from.serialize(b'\x02{}abc') trc('0').debug(x) self.assertEqual( BinaryNext({}, b'abc'), x, ) try: x = bin_from.serialize(b'\x55{}abc') except SerializationError as e: self.assertEqual(BUFFER_NEEDED, e.reason) self.assertEqual( b'\x04null', bin_into.serialize(None) )
def _worker_lost(self, wid: Origin): trc().debug('%s', wid) for jid in list(self.workers_jobs[wid]): self._job_resign(jid) del self.workers[wid] del self.workers_jobs[wid] del self.workers_brids[wid] self._worker_conf_changed()
def done_ack(self, brid: RPCKey, jid: RPCKey): if brid != self.brid: trc('brid').error('%s != %s %s', brid, self.brid, jid) reset(self.push_announce, 0) return if jid in self.jobs_res: self._evict(jid) if jid in self.jobs_pending_done: del self.jobs_pending_done[jid]
def bottle_connect(oas: OAS, endpoints: Endpoints, methods: Methods, app: Bottle): for val in oas.paths: for method, operation in val.operations.items(): path = convert_path_parameters(val.path) trc().debug('%s', path) app.route(path, method=method, callback=BottleEndpoint( operation, endpoints[operation.operation_id], methods[operation.operation_id]))
def actor_create(el: EventLoop, sr: SignalRunner, cls, cls_inst, bind_urls: Dict[str, str], horizon_each=60., name: Optional[str] = None) -> 'Actor': if isinstance(bind_urls, list): assert len(bind_urls) == 1, bind_urls bind_urls = {DEFAULT_GROUP: bind_urls[0]} rpcs = RPCEntrySet.from_cls(cls).bind(cls_inst) regs = RegularEntrySet.from_cls(cls).bind(cls_inst) sios = SocketIOEntrySet.from_cls(cls).bind(cls_inst) sigs = SignalEntrySet.from_cls(cls).bind(cls_inst) tran_grps = sorted(bind_urls.keys()) rpc_grps = sorted(rpcs.groups()) rpc_requested = set(rpc_grps) - set(tran_grps) assert len(rpc_requested) == 0, rpc_requested act = Actor(el, name=name) for tran_grp in tran_grps: trc('tran').debug('%s %s', tran_grp, bind_urls[tran_grp]) tran_ref = act.add_transport(tran_grp, bind_urls[tran_grp]) if tran_grp not in rpc_grps: continue srpcs = rpcs.by_group(tran_grp) act.add( RPCGroupRunner(act, tran_grp, tran_ref, srpcs.serde, srpcs, horizon_each=horizon_each)) regr = RegularRunner(act, el, regs) sior = SocketIORunner(act, el, sios) act.add(regr, 'regular') act.add(sior) act.add(sr.add(act, sigs.to_signal_map()), 'signal') return act
def resign(self, jid: RPCKey, reason: Optional[str] = None) -> bool: if jid not in self.jobs: trc('0').debug('%s %s', jid, reason) return False if jid not in self.jobs_workers: trc('1').debug('%s %s', jid, reason) return False self.jobs_pending_resign.push(SchedKey.now(jid)) reset(self.push_resign, 0) return True
def auto_any(cls) -> schema.Schema: if inspect.isclass(cls): if issubclass(cls, bool): return schema.BooleanSchema() elif issubclass(cls, float): return schema.NumberSchema() elif issubclass(cls, int): return schema.IntSchema() elif issubclass(cls, str): return schema.StringSchema() elif issubclass(cls, datetime): return schema.StringSchema(format='date-time') elif issubclass(cls, date): return schema.StringSchema(format='date') elif issubclass(cls, timedelta): return schema.NumberSchema(format='double') elif issubclass(cls, List): vt, = get_args(cls) subschema = auto_any(vt) return schema.ArraySchema(items=subschema) elif issubclass(cls, Dict): kt, vt = get_args(cls) if not issubclass(kt, str): raise NotImplementedError(('class3', cls, kt)) subschema = auto_any(vt) return ObjectSchema(additional=subschema, ) elif is_dataclass(cls): return auto_dataclass(cls) else: raise NotImplementedError(('class2', cls)) else: if is_optional_type(cls): item, _ = get_last_args(cls) trc('1').debug('%s %s', item, _) r = auto_any(item) if isinstance(r, schema.Nullable): r.nullable = True else: raise NotImplementedError('can not be nullable') trc('2').debug('%s', r) return r else: raise NotImplementedError(('class', cls))
def call(self, endpoint: Endpoint, envelope: RequestEnvelope) -> JsonAny: kwargs = {} arguments_missed = set([x.name for x in endpoint.arguments]) trc('4').debug('%s', endpoint) for p in envelope.parameters: arguments_missed.remove(p.name) kwargs[p.name] = endpoint.arguments[p.name].mapper.serialize( p.value) trc('1').debug('%s', endpoint.body) if endpoint.body: kwargs[endpoint.body_name] = endpoint.body_mapper.serialize( envelope.body) for x in arguments_missed: assert endpoint.arguments[x].default is not MISSING kwargs[x] = endpoint.arguments[x].default trc('0').debug('%s', kwargs) ret = self.callable(**kwargs) trc('2').debug('%s', endpoint.response) if endpoint.response: return endpoint.response_mapper.serialize(ret)
def bk_assign(self, brid: RPCKey, jid: RPCKey, ok: bool): sdr = sender() if sdr not in self.workers: trc('3').error('%s', sdr) return if self._brid_check(brid): return if jid not in self.jobs: trc('1').error('%s', jid) return if jid not in self.jobs_workers: trc('2').error('%s', jid) return if jid not in self.workers_jobs[sdr]: trc('5').error('%s', jid) return if jid in self.jobs_pending_assign: del self.jobs_pending_assign[jid] self._worker_pings(brid, sdr) if ok: return else: if jid in self.jobs_pending_resign: del self.jobs_pending_resign[jid] self._job_resign(jid)