def handle(self, *args, **options): bic = options['destination'][0] # Setup the queue where the router delivering a packet from # waiting. qname = "{}_{}".format(bic, "send") queue = RedisSMQ(host="127.0.0.1", qname=qname) # Receive a payment packet try: msg = queue.receiveMessage().execute() # Process payload from YAML packet = yaml.safe_load(msg['message']) self.success("Payment packet received {}".format( self.format_payment(packet))) queue.deleteMessage(id=msg['id']).execute() except NoMessageInQueue: self.notice("No payment packets for {} ".format(bic))
class MsgSender: def __init__(self, host, port='6379', qname='message_sender'): self.queue = RedisSMQ(host=host, port=port, qname=qname) self.msg = [] try: # 删除queue如果已经存在 self.queue.deleteQueue().exceptions(False).execute() except Exception as e: print(e) try: # 创建queue self.queue.createQueue(delay=0).vt(0).maxsize(-1).execute() except Exception as e: print(e) def send_result(self, result): message_id = self.queue.sendMessage(delay=0).message( str(result)).execute() self.msg.append(message_id) if len(self.msg) > 20: rt = self.queue.deleteMessage(id=self.msg[0]).execute() if rt: print("RedisSMQ send_result block") del self.msg[0]
class RSMQueue(object): _msg = [] def __init__(self, qname, host=DEFAULT['server']): self.host = host self.qname = qname self.queue = RedisSMQ(host=host, qname=qname) self.consumer = None self.callback = None try: self.queue.createQueue(delay=0).maxsize(-1).vt(0).execute() except Exception as e: logging.error('[Exception] RSMQueue createQueue: %s', e) print('[Exception] RSMQueue createQueue: %s', e) def set_callback(self, callback): self.callback = callback def publish(self, message): message_id = self.queue.sendMessage(delay=0).message(message).execute() self._msg.append(message_id) if len(self._msg) > 1: try: self.queue.deleteMessage(id=self._msg[0]).execute() except Exception as e: logging.error('[Exception] RSMQueue publish: %s', e) print('[Exception] RSMQueue publish: %s', e) del self._msg[0] return message_id def deleteMessage(self, mid): return self.queue.deleteMessage(id=mid).execute() def subscribe1(self, qname, callback): self.consumer = RedisSMQConsumerThread(qname, callback, host=DEFAULT['server']) self.consumer.start() return self.consumer def receiveMessage(self, callback): try: id, message, rc, ts = self.queue.popMessage().execute() if callback and callable(callback): callback(message) except Exception as e: print('[Exception] receivemessage', e) def subscribe(self, callback, obj, freq=10): queue = self.queue def f(callback): while True: try: rt = queue.popMessage().execute() # print(rt) if rt['id'] and callback and callable(callback): callback(rt['message'], obj) except Exception as e: # print('[Exception] receivemessage', e) pass time.sleep(1/freq) t = Thread(target=f, args=(callback,)) t.start() return t def cancel_subscribe(self): if self.consumer: self.consumer.stop() def peak(self): def _peak(id, message, rc, ts): print("\t\tpeak", id, message, rc, ts) time.sleep(0.1) return False self.subscribe( _peak)
def handle(self, *args, **options): # Load the configuration self.success("Booting the router by reading configuration...") # FIXME: Configuration needs to be better deployed (and not hardwired) with open("csm.yaml") as fh: config = yaml.load(fh.read(), Loader=yaml.FullLoader) bics = [x['bic'] for x in config['participants']] self.success("Found PSPs with BICs: {}".format(", ".join(bics))) # Setup queues for all the PSPs self.success("Setting up interface for each PSP...") for psp in config['participants']: bic = psp['bic'] name = psp['name'] for direction in ['send', 'recv']: qname = "{}_{}".format(bic, direction) queue = RedisSMQ(host="127.0.0.1", qname=qname) try: queue.createQueue(delay=0, vt=20, quiet=True).execute() except QueueAlreadyExists: pass self.QUEUES.setdefault(bic, {}) self.QUEUES[bic][direction] = queue self.success("Interface set up for {} ({})".format( bic, name)) # Start event loop trying to read messages from the different queues # FIXME: This is completely naive way to do this, but it is # intentional and will be switched over to Kafka at a later # stage. self.success("Listening for payment packets...") while True: for bic, queues in self.QUEUES.items(): # Receive a payment packet try: queue = queues['recv'] msg = queue.receiveMessage().execute() # Process payload from YAML packet = yaml.safe_load(msg['message']) self.success("Payment packet received: {}".format( self.format_payment(packet))) queue.deleteMessage(id=msg['id']).execute() except NoMessageInQueue: self.notice("No payment packets for {} [{}]".format( bic, time.asctime())) continue # Authorise a payment packet; if not authorised just # drop the packet. # FIXME: The payment packet should be an object and we # should have methods for routing etc around that. [Or # maybe not as we have a routing service for the # routing. But the payment packet should certainly be # an object.] routserv = RoutingService() if not routserv.authorise(packet): # FIXME: Non-authorised packets should be returned # to sender. The router would need to have more in # the payment packet to describe what a returned # packet is. Therefore we will need to have # unified packet types. self.success("Payment packet authorisation failed: {}".format( routserv.format_payment(packet))) continue # we just drop the non-authorised packet self.success("Payment packet authorisation succeeded: {}".format( routserv.format_payment(packet))) # Route the packet by finding out what the destination # interface is. destination_bic = routserv.route(packet) if not destination_bic: self.error("No destination for payment packet {}".format( routserv.format_payment(packet))) continue self.success("Routing payment packet to destination: {}".format( routserv.format_payment(packet))) # Pass the message along to the destination BIC. qname = "{}_{}".format(destination_bic, "send") queue = RedisSMQ(host="127.0.0.1", qname=qname) message_id = queue.sendMessage().message( yaml.safe_dump(packet)).execute() self.success("Payment packet sent: {}".format( routserv.format_payment(packet))) time.sleep(1) # just so we don't use _all_ CPU