def read(self): 'Read and return a message' gn = str(self.seqno) self.seqno += 1 seq = self.group.get(gn) if not seq: log.error(f'failed to get {gn} from {self.group}') return attrs = dict(seq.attrs) msg = Message(form='FLOW', origin=attrs.pop("origin"), granule=attrs.pop("granule"), seqno=attrs.pop("seqno")) for k, v in attrs.items(): if type(v) == numpy.int64: attrs[k] = int(v) msg.label = json.dumps(attrs) payload = list() for ind, slot in sorted(seq.items()): tn = slot.attrs['pbtype'] type_name = tn.rsplit('.', 1)[-1] topb = getattr(self.topb, type_name) pbojb = topb(slot, self.pbmod) a = Any() a.Pack(pbojb) payload.append(a.SerializeToString()) msg.payload = payload return msg
def read(self): 'Read and return a message' # start at 1 because writer doesn't save anything for BOT self.seqno += 1 # fixme: should iterate seqnos over subgroups? gn = self.seqno_interp % self.seqno seq = self.group.get(gn) if not seq: log.info( f'TensReader: failed to get {gn} from {self.group}, iteration over' ) return tens = seq.get("tensors") attrs = dict(tens.attrs) log.debug(f'tensor attributes: {attrs}') msg = Message(form='FLOW', origin=attrs.pop("origin"), granule=attrs.pop("granule"), seqno=attrs.pop("seqno")) for k, v in attrs.items(): if type(v) == numpy.int64: attrs[k] = int(v) umd = seq.get("metadata") if umd: attrs["metadata"] = dict(umd.attrs) # fixme: the lines between flow and file handler are too damn # blurred! attrs['flow'] = 'DAT' msg.label_object = attrs partnums = [int(p) for p in tens.keys()] ntens = len(partnums) maxpart = max(partnums) nparts = maxpart + 1 payload = [None] * nparts tensors = list() for part, ds in tens.items(): part = int(part) # fixme there are more TENS attr which might be needed if # the file wasn't written by writer.TensWriter! md = dict(ds.attrs) dtype = str(ds.dtype) log.debug( f'TENS part {part}/{nparts} {dtype} {type(ds.dtype)} {ds.shape}' ) md.update( dict(shape=ds.shape, dtype=ds.dtype.kind, word=ds.dtype.alignment, part=part)) tensors.append(md) payload[part] = ds[:].tostring() msg.payload = payload log.debug(f'read {msg}') return msg
def flow_depos(ctx, pipe, nsend, name, address): ''' An actor with a flow client sending depo messages. ''' log.debug(f'actor: flow_depos({nsend}, "{name}", "{address}"') pipe.signal() port = Port(name, zmq.CLIENT, '') port.connect(address) port.online(None) # peer not needed if port only direct connects flow = Flow(port) fobj = dict(flow='BOT', direction='extract', credit=3, stream=name) msg = Message(seqno=0, form='FLOW', label=json.dumps(fobj)) log.debug(f'flow_depos {name} send BOT:\n{msg}') flow.send_bot(msg) msg = flow.recv_bot(1000) log.debug(f'flow_depos {name} got BOT:\n{msg}') assert (msg) for count in range(nsend): depo = pb.Depo(ident=count, pos=pb.Point(x=1, y=2, z=3), time=100.0, charge=1000.0, trackid=12345, pdg=11, extent_long=9.6, extent_tran=6.9) a = Any() a.Pack(depo) msg = Message(form='FLOW', seqno=count + 1, label=json.dumps({'flow': 'DAT'}), payload=[a.SerializeToString()]) log.debug( f'flow_depos {name} put: {count}/{nsend}[{flow.credit}]:\n{msg}') flow.put(msg) log.debug(f'flow_depos {name} again [{flow.credit}]') log.debug(f'flow_depos {name} send EOT') flow.send_eot(Message(seqno=nsend + 1)) log.debug(f'flow_depos {name} recv EOT') flow.recv_eot() log.debug(f'flow_depos {name} wait for quit signal') pipe.recv() # wait for signal to quit log.debug(f'flow_depos {name} exiting') return
def send_tens(number, connect, shape, verbosity, attrs): ''' Generate and flow some TENS messages. ''' import zmq from zio import Port, Message, Node from zio.flow import Flow log.level = getattr(logging, verbosity.upper(), "INFO") msg_attr = attrify(attrs) cnode = Node("flow-send-tens") cport = cnode.port("output", zmq.CLIENT) cport.connect(connect) cnode.online() cflow = Flow(cport) shape = list(map(int, shape.split(','))) size = 1 for s in shape: size *= s attr = dict(credit=2, direction="extract", **msg_attr) bot = Message(label=json.dumps(attr)) cflow.send_bot(bot) bot = cflow.recv_bot(5000) log.debug('flow-send-tens: BOT handshake done') assert (bot) tens_attr = dict(shape=shape, word=1, dtype='u') # unsigned char attr["TENS"] = dict(tensors=[tens_attr], metadata=dict(source="gen-tens")) label = json.dumps(attr) payload = [b'X' * size] for count in range(number): msg = Message(label=label, payload=payload) cflow.put(msg) log.debug(f'flow-send-tens: {count}: {msg}') log.debug(f'flow-send-tens: send EOT') cflow.send_eot(Message()) log.debug(f'flow-send-tens: recv EOT (waiting)') cflow.recv_eot() log.debug(f'flow-send-tens: going offline') cnode.offline() log.debug(f'flow-send-tens: end')
def recv_tens(number, connect, verbosity, attrs): ''' Client to recv flow of TENS messages. ''' import zmq from zio import Port, Message, Node from zio.flow import Flow log.level = getattr(logging, verbosity.upper(), "INFO") msg_attr = attrify(attrs) cnode = Node("flow-recv-tens") cport = cnode.port("input", zmq.CLIENT) cport.connect(connect) cnode.online() cflow = Flow(cport) attr = dict(credit=2, direction="inject", **msg_attr) bot = Message(label=json.dumps(attr)) cflow.send_bot(bot) bot = cflow.recv_bot(5000) log.debug('flow-recv-tens: BOT handshake done') assert (bot) count = 0 while True: if number > 0 and count == number: break ++count msg = cflow.get() log.info(f'flow-recv-tens: {count}: {msg}') if msg is None: cflow.send_eot() cnode.offline() log.debug('flow-recv-tens: EOT whille receiving') return log.debug(f'flow-recv-tens: send EOT') cflow.send_eot(Message()) log.debug(f'flow-recv-tens: recv EOT (waiting)') cflow.recv_eot() log.debug(f'flow-recv-tens: going offline') cnode.offline() log.debug(f'flow-recv-tens: end')
def test_conversation(self): # normally, we use .bot() but here we are synchronous with # both endpoints so have to break up the steps of at least one # endpoint. self.cflow.send_bot() # this can pretend to be async sbot = self.sflow.bot() assert (sbot) assert (sbot.form == 'FLOW') cbot = self.cflow.recv() assert (cbot) assert (cbot.form == 'FLOW') # here, server is giver, should start with no credit assert (self.sflow.credit == 0) assert (self.sflow.total_credit == TestFlow.credit) # here, client is taker, should start with all credit assert (self.cflow.credit == TestFlow.credit) assert (self.cflow.total_credit == TestFlow.credit) log.debug("flow BOT handshake done") assert (self.cflow.sm.state == "READY") assert (self.sflow.sm.state == "READY") # this also imitates PAY self.cflow.begin() log.debug("client flow began") assert (self.cflow.sm.state == "taking_HANDSOUT") self.sflow.begin() log.debug("server flow began") assert (self.sflow.sm.state == "giving_GENEROUS") for count in range(10): log.debug(f"test_flow: server put in {self.sflow.sm.state}") dat = Message(form='FLOW') self.sflow.put(dat) log.debug(f"test_flow: client get in {self.cflow.sm.state}") dat = self.cflow.get() # flow protocol: BOT=0, DAT=1+ assert (dat.seqno == 1 + count) # normally, when a flow explicitly sends EOT the other end # will recv the EOT when its trying to recv another message # (PAY or DAT). self.cflow.eotsend() should_be_eot = self.sflow.recv() assert (should_be_eot) self.sflow.eotsend() expected = self.cflow.eotrecv() assert (expected)
def test_sendrecv(self): sport = self.snode.port("sport") cport = self.cnode.port("cport") lobj = dict(foo='bar') msg = Message(form='TEST', label_object=lobj) log.debug(f'{msg}') cport.send(msg) log.debug('now recv') msg2 = sport.recv() assert (msg2) assert (msg2.form == 'TEST') lobj2 = msg2.label_object assert (lobj2) assert (lobj2 == lobj)
def test_conversation(self): # cflow is recver bot = Message(label='{"credit":2,"direction":"inject"}') self.cflow.send_bot(bot) bot = self.sflow.recv_bot(1000) assert (bot) assert (self.sflow.credit == 0) assert (self.sflow.total_credit == 2) # sflow is sender bot = Message(label='{"credit":2,"direction":"extract"}') self.sflow.send_bot(bot) bot = self.cflow.recv_bot(1000) assert (bot) assert (self.cflow.credit == 2) assert (self.cflow.total_credit == 2) self.cflow.flush_pay() assert (self.cflow.credit == 0) c = self.sflow.slurp_pay() assert (c == 2) assert (self.sflow.credit == 2) for count in range(10): # note, seqno normally should sequential self.sflow.put(Message(coord=CoordHeader(seqno=100 + count))) self.sflow.put(Message(coord=CoordHeader(seqno=200 + count))) dat = self.cflow.get() assert (dat.seqno == 100 + count) dat = self.cflow.get() assert (dat.seqno == 200 + count) # normally, when a flow explicitly sends EOT the other end # will recv the EOT when its trying to recv another message # (PAY or DAT). In this test things are synchronous and so we # explicitly recv_eot(). self.cflow.send_eot(Message()) surprise = self.sflow.recv_eot(1000) assert (surprise) self.sflow.send_eot(Message()) expected = self.cflow.recv_eot(1000) assert (expected)
def file_handler(ctx, pipe, filename, *wargs): '''An actor that marshals messages from socket to file. Parameters ----------- filename : string Name of an HDF file in which to write wargs : tuple of args wargs[0] : string (address pattern) An f-string formatted with a "port" parameter that should resolve to a legal ZeroMQ socket address. When a successful bind() can be done on the result, the resolved address is returned through the pipe. If no successful address can be bound, an empty string is returned as an error indicator. ''' wargs = list(wargs) addrpat = wargs.pop(0) log.debug(f'actor: writer("{filename}", "{addrpat}")') fp = h5py.File(filename,'w') log.debug(f'writer: opened {filename}') pipe.signal() pull = ctx.socket(PULL) minport,maxport = 49999,65000 for port in range(minport,maxport): writer_addr = addrpat.format(port=port) pull.bind(writer_addr) log.debug(f'writer: bind to {writer_addr}') pipe.send_string(writer_addr) break flow_writer = dict() poller = Poller() poller.register(pipe, POLLIN) poller.register(pull, POLLIN) while True: for which,_ in poller.poll(): if not which or which == pipe: # signal exit log.debug(f'writer: {filename} exiting') fp.close() return # o.w. we have flow data = pull.recv() if not data: continue msg = Message(encoded=data) fobj = objectify(msg) path = fobj.pop("hdfgroup") # must be supplied msg.label = json.dumps(fobj) log.debug(f'writer: {filename}:/{path} writing: {msg}') fw = flow_writer.get(path, None) if fw is None: sg = fp.get(path, None) or fp.create_group(path) # note: in principle/future, TensWriter type can be # made an arg to support other message formats. fw = flow_writer[path] = TensWriter(sg, *wargs) fw.save(msg) #log.debug(f'writer: flush {filename}') fp.flush() return
def test_flow_string(self): msg = Message(label='{"extra":42}') msg.label = stringify('DAT', **objectify(msg)) fobj = objectify(msg) assert (fobj["extra"] == 42) assert (fobj["flow"] == "DAT")
def file_handler(ctx, pipe, filename, addrpat, wargs): '''An actor that marshals messages from socket to file. Parameters ----------- filename : string Name of an HDF file in which to write addrpat : string An f-string formatted with a "port" parameter that should resolve to a legal ZeroMQ socket address. When a successful bind() can be done on the result, the resolved address is returned through the pipe. If no successful address can be bound, an empty string is returned as an error indicator. wargs : tuple Args passed to Writer. ''' log.debug(f'actor: writer("{filename}", "{addrpat}")') fp = h5py.File(filename,'w') log.debug(f'opened {filename}') pipe.signal() log.debug('make writer PULL socket') pull = ctx.socket(PULL) minport,maxport = 49999,65000 for port in range(minport,maxport): writer_addr = addrpat.format(port=port) pull.bind(writer_addr) log.debug(f'writer bind to {writer_addr}') pipe.send_string(writer_addr) break flow_writer = dict() poller = Poller() poller.register(pipe, POLLIN) poller.register(pull, POLLIN) while True: for which,_ in poller.poll(): if not which: return if which == pipe: # signal exit log.debug(f'writer for {filename} exiting') return # o.w. we have flow data = pull.recv() if not data: continue msg = Message(encoded=data) fobj = objectify(msg) path = fobj.pop("hdfgroup") # must be supplied msg.label = json.dumps(fobj) log.debug(f'{filename}:/{path} writing:\n{msg}') fw = flow_writer.get(path, None) if fw is None: sg = fp.get(path, None) or fp.create_group(path) fw = flow_writer[path] = Writer(sg, *wargs) fw.save(msg) return