def message(self, task: Job) -> List[NextJob]: try: # todo how do we match the jobs to the plugin that executes them ? # todo we could just pass the plugin to the worker executor # we would somehow like to define a list of task executors that are called from there. if task.task.val.get('b', None) == 'd': ctr = task.task.val.get('ctr') if ctr < 100: return [ NextJob(Id(uuid.uuid4().hex), Body({ 'b': 'd', 'ctr': ctr + 1 })) ] else: return [] else: return [] except: logging.getLogger('then.plugin.process').exception( f'Unhandled with {task.id}') return [ NextJob( Id(uuid.uuid4().hex), Body({ '_error': True, 'rule': task.rule.val, 'body': task.body })) ]
def test_queue(): c = TestQueueChannel() cs = TestQueueStateChannel() m = Structure.deserialize('./triggers') q = Queue( Config(3, 1), c, cs, m, ) q.task_match(Id('j-1'), Body({'a': 'f', 'b': 'd'})) q.worker_ping(Id('w-1'), Filter(Body({ 'type': 'Const', 'body': { 'a': 'f' } })), Capacity(0, 0, 5)) print(q.workers) q.tick() q.tick() print(q.pending_ack) q.tick() print(q.pending_ack) print(c.load()) print(cs.load()) print(q.workers)
def polled(self, pr: List[bool]): s_mg, *other = pr if s_mg: for addr, buffer in _recv_parse_buffer(sock_recv): msg = deserialize_json(buffer) addr = Address(*addr) known_hosts[Id(msg['w'])] = addr if msg['t'] == 'p': q.worker_ping(Id(msg['w']), Filter(Body(msg['f'])), capacity_json_from(msg)) elif msg['t'] == 'ja': q.worker_job_ack(Id(msg['w']), capacity_json_from(msg), Id(msg['ji'])) elif msg['t'] == 'jn': q.worker_job_nack(Id(msg['w']), capacity_json_from(msg), Id(msg['ji'])) elif msg['t'] == 'jf': q.worker_job_finish( Id(msg['w']), capacity_json_from(msg), Id(msg['ji']), [ NextJob(Id(x['i']), Body(x['t'])) for x in msg['ps'] ]) else: assert False, (addr, msg) if any(other): tfs.polled(other)
def repair_done(self): # todo: find all subsequent in DONE and check if they are KNOWN # todo: find all pending and check if any of them are DONE - remove them pass assigned = self.list_assigned() pending = self.list_pending() running = self.list_running() done = self._list(self.DONE) for job_id in done: if job_id in [x for x, _ in assigned]: os.unlink(self._path(self.ASSIGNED, job_id)) logging.getLogger('server.jobs.repair.done').error( f'done but assigned {job_id}') if job_id in [x for x, _ in running]: os.unlink(self._path(self.RUNNING, job_id)) logging.getLogger('server.jobs.repair.done').error( f'done but running {job_id}') if job_id in [x for x in pending]: logging.getLogger('server.jobs.repair.done').error( f'done but pending {job_id}') os.unlink(self._path(self.PENDING, job_id)) with open(self._path(self.DONE, job_id)) as f_in: obj = yaml.load(f_in) subs = [(Id(x['i']), x) for x in obj['s']] for sub_id, sub in subs: if sub_id not in self: logging.getLogger('server.jobs.repair.done').error( f'done but unsubsq {job_id} {sub_id}') self.create(Job(sub_id, Body(sub['r']), Body(sub['t'])))
def polled(self, pr: List[bool]): ssend, *others = pr if ssend: for addr, buffer in _recv_parse_buffer(sock_send): # when we parse the packets - we may know of the mapping msg = deserialize_json(buffer) if msg['t'] == 'j': w.job_assign( Job(Id(msg['i']), Body(msg['r']), Body(msg['b']))) elif msg['t'] == 'jfa': w.job_finish_ack(Id(msg['i'])) else: assert False, (addr, msg) if any(others): wp.polled(others)
def _task_match(self, id: Id, task: Body): for i, rule in enumerate(self.matcher.match(task)): # todo: job creation is idempotent # todo: therefore if the matcher configuration had changed # todo: between attempts at creating a job # todo: then it may or may not be idempotent # todo: I don't personally think it's important # todo: 1) addition of a job will create a job at n+1 # todo: 2) deletion of a match will forget to re-create the match yield Job(Id(id.id + '_' + str(i)), rule, task)
def _get_assigned(self, context, job_id: Id) -> Optional[Id]: try: with open(self._path(context, job_id), 'r') as f_in: obj = yaml.load(f_in) return Id(obj['w']) except KeyError: logging.getLogger('server.jobs').error( f'[3] could not deser {job_id}') return None except FileNotFoundError: return None
def polled(self, pr: List[bool]): s, *clients = pr if s: conn, addr = self.sock.accept() self.clients.append(conn) logging.getLogger('server.frontend').debug(f'client {addr}') if any(clients): for client_sock in [b for a, b in zip(clients, self.clients) if a]: # todo: ensure conn timeouts # todo: ensure conn timeout err handling # todo: ensure conn gc client_sock: socket.socket for _, buffer in _recv_parse_buffer(client_sock): msg = deserialize_json(buffer) if msg['t'] == 'j': self.q.task_match(Id(msg['i']), Body(msg['b'])) client_sock.send( pack_bytes( serialize_json({ 't': 'ja', 'i': msg['i'], 'o': True }))) elif msg['t'] == 'l': client_sock.send( pack_bytes( serialize_json( [rule_json_to(x) for x in self.s.list()]))) elif msg['t'] == 'w': client_sock.send( pack_bytes( serialize_json([ worker_json_to( x, self.q.ws.capacity_get(x.id)) for x in self.q.ws ]))) else: logging.getLogger('tcp_front').error(str(msg)) client_sock.close()
def worker_bin(worker_id, herz, ttl, wait, workdir, max_capacity, listen, port, filter, worker_path, worker_config): # we initialize the matcher types, etc. in one single entity. <in general> sock_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock_send.settimeout(0) filter = Filter(Body({ 'type': 'Const', 'body': {} })) if filter is None else Filter(Body(filter)) assert 'type' in filter.body.val assert 'body' in filter.body.val set_logging() logging.getLogger('worker').setLevel(logging.DEBUG) worker_id = Id(worker_id) c = Config(max_pings=ttl, min_acks=wait, herz=herz) wpc: Type[WorkerPlugin] w: Worker # todo: now the thing is that current worker impl favours the job-based queue # todo: the workers that just pass the jobs to the docker, for example - must have a separate impl. uwc = UDPWorkerChannel(worker_id, filter, sock_send, Address(listen, port)) def func_capacity(): return w.capacity worker_config = Body({}) if worker_config is None else Body(worker_config) wpc = import_dyn(worker_path) wp = wpc.init(UDPWorkerPluginChannel(func_capacity, uwc), worker_config) w = Worker(c, uwc, wp, filter, max_capacity) logging.getLogger('worker').error(f'Started as {worker_id}') logging.getLogger('worker').error(f'Connecting to {listen}:{port}') class ClientPollingUnit(PollingUnit): def poll(self, max_sleep) -> Optional[List[bool]]: return select_helper([sock_send] + wp.poll(), max_sleep) def tick(self): w.tick() def polled(self, pr: List[bool]): ssend, *others = pr if ssend: for addr, buffer in _recv_parse_buffer(sock_send): # when we parse the packets - we may know of the mapping msg = deserialize_json(buffer) if msg['t'] == 'j': w.job_assign( Job(Id(msg['i']), Body(msg['r']), Body(msg['b']))) elif msg['t'] == 'jfa': w.job_finish_ack(Id(msg['i'])) else: assert False, (addr, msg) if any(others): wp.polled(others) p = ClientPollingUnit(1. / c.herz) try: p.run() except KeyboardInterrupt: logging.getLogger('worker').info('Stopping') except Exception as e: logging.getLogger('worker').exception('Must not happen')
def task_finished(self, task_id, task_payload: List[NextJob]): self.comm.job_finished(Id(task_id), task_payload)
def rule_json_from(x): return Rule(Id(x['i']), Filter(Body(x['f'])), Body(x['b']))
def worker_json_from(x): return Worker(Id(x['i']), Filter(Body(x['f'])))
def _list(self) -> Iterable[Id]: for x in os.listdir(self.workdir): if not x.startswith('.'): suff = '.yaml' if x.endswith(suff): yield Id(x[:-len(suff)])
def _list(self, context): files = os.listdir(os.path.join(self.workdir, context)) files = [x for x in files if not x.startswith('.')] suffix = '.yaml' files = [x[:-len(suffix)] for x in files if x.endswith(suffix)] return [Id(x) for x in files]