def __init__(self, ctx, device_type=zmq.QUEUE, in_type=None, out_type=None): self.ctx = ctx # XXX: super(ThreadDevice, self).__init__(device_type, in_type, out_type) # but ZMQThreadDevice is an old-style class (yay!). ZMQThreadDevice.__init__(self, device_type, in_type, out_type)
class Heart(object): """A basic heart object for responding to a HeartMonitor. This is a simple wrapper with defaults for the most common Device model for responding to heartbeats. It simply builds a threadsafe zmq.FORWARDER Device, defaulting to using SUB/XREQ for in/out. You can specify the XREQ's IDENTITY via the optional heart_id argument.""" device=None id=None def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.XREQ, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) self.device.daemon=True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, "") if heart_id is None: heart_id = str(uuid.uuid4()) self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id def start(self): return self.device.start()
class Heart(object): """A basic heart object for responding to a HeartMonitor. This is a simple wrapper with defaults for the most common Device model for responding to heartbeats. It simply builds a threadsafe zmq.FORWARDER Device, defaulting to using SUB/XREQ for in/out. You can specify the XREQ's IDENTITY via the optional heart_id argument.""" device = None id = None def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.XREQ, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) self.device.daemon = True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, "") if heart_id is None: heart_id = str(uuid.uuid4()) self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id def start(self): return self.device.start()
class Heart(object): """A basic heart object for responding to a HeartMonitor. This is a simple wrapper with defaults for the most common Device model for responding to heartbeats. It simply builds a threadsafe zmq.FORWARDER Device, defaulting to using SUB/XREQ for in/out. You can specify the XREQ's IDENTITY via the optional heart_id argument.""" device=None id=None def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.DEALER, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) # do not allow the device to share global Context.instance, # which is the default behavior in pyzmq > 2.1.10 self.device.context_factory = zmq.Context self.device.daemon=True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, b"") if heart_id is None: heart_id = uuid.uuid4().bytes self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id def start(self): return self.device.start()
class Heart(object): """A basic heart object for responding to a HeartMonitor. This is a simple wrapper with defaults for the most common Device model for responding to heartbeats. It simply builds a threadsafe zmq.FORWARDER Device, defaulting to using SUB/XREQ for in/out. You can specify the XREQ's IDENTITY via the optional heart_id argument.""" device = None id = None def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.DEALER, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) # do not allow the device to share global Context.instance, # which is the default behavior in pyzmq > 2.1.10 self.device.context_factory = zmq.Context self.device.daemon = True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, b"") if heart_id is None: heart_id = uuid.uuid4().bytes self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id def start(self): return self.device.start()
def setup_zmq(frontend_port, backend_port): device = ThreadDevice(device_type=zmq.QUEUE, in_type=zmq.ROUTER, out_type=zmq.DEALER) # Set high water mark to 1 to set constraint on req/rep pattern device.setsockopt_in(zmq.SNDHWM, 1) device.setsockopt_out(zmq.RCVHWM, 1) device.bind_in("tcp://127.0.0.1:{}".format(frontend_port)) device.bind_out("tcp://127.0.0.1:{}".format(backend_port)) return device
class Forwarder(object): def __init__(self, input_addr, output_addr): self.device = ThreadDevice(zmq.FORWARDER, in_type=zmq.SUB, out_type=zmq.PUB) self.device.bind_in(input_addr) self.device.bind_out(output_addr) self.device.setsockopt_in(zmq.SUBSCRIBE, b"") def start(self): self.device.start() def stop(self): self.device._context.term() self.device.join()
def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.DEALER, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) # do not allow the device to share global Context.instance, # which is the default behavior in pyzmq > 2.1.10 self.device.context_factory = zmq.Context self.device.daemon=True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, b"") if heart_id is None: heart_id = uuid.uuid4().bytes self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id
def start_proxy(): logger.info("Starting proxy") device = ThreadDevice(zmq.FORWARDER, zmq.XSUB, zmq.XPUB) device.bind_in(PROXY_BACKEND) device.bind_out(PROXY_FRONTEND) device.start() return device
def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.XREQ, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) self.device.daemon = True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, "") if heart_id is None: heart_id = str(uuid.uuid4()) self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id
def start(self): # Launch two proxy sockets, one for remote communication, and one for in-process communication. self.device_thread = ThreadDevice(zmq.FORWARDER, zmq.XSUB, zmq.XPUB) self.device_thread.bind_in("tcp://*:" + str(MessageProxy.SUB_PORT)) self.device_thread.bind_out("tcp://*:" + str(MessageProxy.PUB_PORT)) self.device_thread.start() # TODO Finish in-process communication """ self.local_frontend = self.context.socket(zmq.XSUB) self.local_frontend.bind("inproc://" + MessageProxy.INPROC_SUB_ID) self.local_backend = self.context.socket(zmq.XPUB) self.local_backend.bind("inproc://" + MessageProxy.INPROC_PUB_ID) zmq.device(zmq.FORWARDER, self.local_frontend, self.local_backend) """ self.initialized = True
def main(): """Main Function""" device = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB) device.bind_in("tcp://127.0.0.1:5555") device.bind_out("tcp://127.0.0.1:5556") device.setsockopt_in(zmq.SUBSCRIBE,"") device.start() router = ThreadRouter() router.bind("tcp://127.0.0.1:5560") router.start()
def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.XREQ, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) self.device.daemon=True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, "") if heart_id is None: heart_id = str(uuid.uuid4()) self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id
def __init__( self, in_addr, out_addr, mon_addr=None, in_type=zmq.SUB, out_type=zmq.DEALER, mon_type=zmq.PUB, heart_id=None, curve_serverkey=None, curve_secretkey=None, curve_publickey=None, ): if mon_addr is None: self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) else: self.device = ThreadMonitoredQueue(in_type, out_type, mon_type, in_prefix=b"", out_prefix=b"") # do not allow the device to share global Context.instance, # which is the default behavior in pyzmq > 2.1.10 self.device.context_factory = zmq.Context self.device.daemon = True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if curve_serverkey: self.device.setsockopt_in(zmq.CURVE_SERVERKEY, curve_serverkey) self.device.setsockopt_in(zmq.CURVE_PUBLICKEY, curve_publickey) self.device.setsockopt_in(zmq.CURVE_SECRETKEY, curve_secretkey) self.device.setsockopt_out(zmq.CURVE_SERVERKEY, curve_serverkey) self.device.setsockopt_out(zmq.CURVE_PUBLICKEY, curve_publickey) self.device.setsockopt_out(zmq.CURVE_SECRETKEY, curve_secretkey) if mon_addr is not None: self.device.setsockopt_mon(zmq.CURVE_SERVERKEY, curve_publickey) self.device.setsockopt_mon(zmq.CURVE_PUBLICKEY, curve_publickey) self.device.setsockopt_mon(zmq.CURVE_SECRETKEY, curve_secretkey) if mon_addr is not None: self.device.connect_mon(mon_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, b"") if heart_id is None: heart_id = uuid.uuid4().bytes self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id
def __init__(self, in_addr, out_addr, in_type=zmq.SUB, out_type=zmq.DEALER, heart_id=None): self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type) # do not allow the device to share global Context.instance, # which is the default behavior in pyzmq > 2.1.10 self.device.context_factory = zmq.Context self.device.daemon = True self.device.connect_in(in_addr) self.device.connect_out(out_addr) if in_type == zmq.SUB: self.device.setsockopt_in(zmq.SUBSCRIBE, b"") if heart_id is None: heart_id = uuid.uuid4().bytes self.device.setsockopt_out(zmq.IDENTITY, heart_id) self.id = heart_id
def start(self): # open log files now = datetime.datetime.utcnow() self.logDir = os.path.abspath(self.opts.logDir) self.messageLogPath = self.readyLog(self.opts.messageLog, now) self.messageLog = open(self.messageLogPath, 'a') self.consoleLogPath = self.readyLog(self.opts.consoleLog, now) rootLogger = logging.getLogger() rootLogger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s - %(levelname)-7s - %(message)s') fmt.converter = time.gmtime fh = logging.FileHandler(self.consoleLogPath) fh.setFormatter(fmt) fh.setLevel(logging.DEBUG) rootLogger.addHandler(fh) if self.opts.foreground: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(fmt) rootLogger.addHandler(ch) # daemonize if self.opts.foreground: logging.info('staying in foreground') else: logging.info('daemonizing') pid = os.fork() if pid != 0: os._exit(0) os.setsid() pid = os.fork() if pid != 0: os._exit(0) os.chdir('/') os.close(1) os.close(2) nullFd = os.open('/dev/null', os.O_RDWR) os.dup2(nullFd, 1) os.dup2(nullFd, 2) try: # set up zmq self.context = zmq.Context.instance() self.rpcStream = ZMQStream(self.context.socket(zmq.REP)) self.rpcStream.bind(self.opts.rpcEndpoint) self.rpcStream.on_recv(self.handleRpcCall) self.forwarder = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB) self.forwarder.setsockopt_in(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_out(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_in(zmq.SUBSCRIBE, '') self.forwarder.setsockopt_out(zmq.HWM, self.opts.highWaterMark) self.forwarder.bind_in(self.opts.subscribeEndpoint) self.forwarder.bind_in(INJECT_ENDPOINT) self.forwarder.bind_out(self.opts.publishEndpoint) self.forwarder.bind_out(MONITOR_ENDPOINT) for entry in self.opts.subscribeTo: try: moduleName, endpoint = entry.split('@') endpoint = parseEndpoint(endpoint) except ValueError: raise ValueError( '--subscribeTo argument "%s" is not in the format "<moduleName>@<endpoint>"' % entry) self.forwarder.connect_in(endpoint) self.info[moduleName] = {'module': moduleName, 'pub': endpoint} self.forwarder.start() time.sleep(0.1) # wait for forwarder to bind sockets self.monStream = ZMQStream(self.context.socket(zmq.SUB)) self.monStream.setsockopt(zmq.SUBSCRIBE, '') self.monStream.connect(MONITOR_ENDPOINT) self.monStream.on_recv(self.handleMessages) self.injectStream = ZMQStream(self.context.socket(zmq.PUB)) self.injectStream.connect(INJECT_ENDPOINT) self.disconnectTimer = ioloop.PeriodicCallback( self.handleDisconnectTimer, 5000) self.disconnectTimer.start() except: # pylint: disable=W0702 errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.error(''.join(traceback.format_tb(errTB))) logging.error(errText) logging.error('[error during startup -- exiting]') sys.exit(1)
def run(self, *args, **kwargs): frontend_address = kwargs['frontend-address'] assert frontend_address backend_address = kwargs['backend-address'] assert backend_address device = ThreadDevice(zmq.QUEUE, zmq.ROUTER, zmq.DEALER) device.bind_in(frontend_address) device.setsockopt_in(zmq.IDENTITY, 'ROUTER') device.bind_out(backend_address) device.setsockopt_out(zmq.IDENTITY, 'DEALER') device.start()
def run_proxy(context): proxy=ThreadDevice(zmq.QUEUE, zmq.XSUB,zmq.XPUB ) proxy.bind_in(ADDR_SUB) #proxy.setsockopt_in(zmq.SUBSCRIBE, '') proxy.bind_out(ADDR_PUB) proxy.start()
def start(self): # open log files now = datetime.datetime.utcnow() self.logDir = os.path.abspath(self.opts.logDir) self.messageLogPath = self.readyLog(self.opts.messageLog, now) self.messageLog = open(self.messageLogPath, 'a') self.consoleLogPath = self.readyLog(self.opts.consoleLog, now) rootLogger = logging.getLogger() rootLogger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s - %(levelname)-7s - %(message)s') fmt.converter = time.gmtime fh = logging.FileHandler(self.consoleLogPath) fh.setFormatter(fmt) fh.setLevel(logging.DEBUG) rootLogger.addHandler(fh) if self.opts.foreground: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(fmt) rootLogger.addHandler(ch) # daemonize if self.opts.foreground: logging.info('staying in foreground') else: logging.info('daemonizing') pid = os.fork() if pid != 0: os._exit(0) os.setsid() pid = os.fork() if pid != 0: os._exit(0) os.chdir('/') os.close(1) os.close(2) nullFd = os.open('/dev/null', os.O_RDWR) os.dup2(nullFd, 1) os.dup2(nullFd, 2) try: # set up zmq self.context = zmq.Context.instance() self.rpcStream = ZMQStream(self.context.socket(zmq.REP)) self.rpcStream.bind(self.opts.rpcEndpoint) self.rpcStream.on_recv(self.handleRpcCall) self.forwarder = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB) self.forwarder.setsockopt_in(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_out(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_in(zmq.SUBSCRIBE, '') self.forwarder.setsockopt_out(zmq.HWM, self.opts.highWaterMark) self.forwarder.bind_in(self.opts.subscribeEndpoint) self.forwarder.bind_in(INJECT_ENDPOINT) self.forwarder.bind_out(self.opts.publishEndpoint) self.forwarder.bind_out(MONITOR_ENDPOINT) for entry in self.opts.subscribeTo: try: moduleName, endpoint = entry.split('@') endpoint = parseEndpoint(endpoint) except ValueError: raise ValueError('--subscribeTo argument "%s" is not in the format "<moduleName>@<endpoint>"' % entry) self.forwarder.connect_in(endpoint) self.info[moduleName] = {'module': moduleName, 'pub': endpoint} self.forwarder.start() time.sleep(0.1) # wait for forwarder to bind sockets self.monStream = ZMQStream(self.context.socket(zmq.SUB)) self.monStream.setsockopt(zmq.SUBSCRIBE, '') self.monStream.connect(MONITOR_ENDPOINT) self.monStream.on_recv(self.handleMessages) self.injectStream = ZMQStream(self.context.socket(zmq.PUB)) self.injectStream.connect(INJECT_ENDPOINT) self.disconnectTimer = ioloop.PeriodicCallback(self.handleDisconnectTimer, 5000) self.disconnectTimer.start() except: # pylint: disable=W0702 errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.error(''.join(traceback.format_tb(errTB))) logging.error(errText) logging.error('[error during startup -- exiting]') sys.exit(1)
class ZmqCentral(object): def __init__(self, opts): self.opts = opts self.info = {} def announceConnect(self, moduleName, params): logging.info('module %s connected', moduleName) self.injectStream.send('central.connect.%s:%s' % (moduleName, json.dumps(params))) def announceDisconnect(self, moduleName): logging.info('module %s disconnected', moduleName) self.injectStream.send('central.disconnect.%s:%s' % (moduleName, json.dumps({'timestamp': str(getTimestamp())}))) def logMessage(self, msg, posixTime=None, attachmentDir='-'): mlog = self.messageLog mlog.write('@@@ %d %d %s ' % (getTimestamp(posixTime), len(msg), attachmentDir)) mlog.write(msg) mlog.write('\n') def logMessageWithAttachments0(self, msg): parsed = parseMessage(msg) posixTime = time.time() # construct attachment directory dt = datetime.datetime.utcfromtimestamp(posixTime) dateText = dt.strftime('%Y-%m-%d') timeText = dt.strftime('%H-%M-%S') + '.%06d' % dt.microsecond uniq = '%08x' % random.getrandbits(32) attachmentSuffix = os.path.join('attachments', dateText, timeText, parsed['topic'], uniq) attachmentPath = os.path.join(self.logDir, attachmentSuffix) os.makedirs(attachmentPath) # write attachments to attachment directory for attachment in parsed['attachments']: fullName = os.path.join(attachmentPath, attachment.get_filename()) open(fullName, 'wb').write(attachment.get_payload()) # log message with a pointer to the attachment directory self.logMessage(':'.join((parsed['topic'], parsed['json'])), posixTime, attachmentSuffix) def logMessageWithAttachments(self, msg): try: return self.logMessageWithAttachments0(msg) except: # pylint: disable=W0702 self.logException('logging message with attachments') def handleHeartbeat(self, params): moduleName = params['module'].encode('utf-8') now = getTimestamp() oldInfo = self.info.get(moduleName, None) if oldInfo: if oldInfo.get('pub', None) != params.get('pub', None): self.announceDisconnect(moduleName) self.announceConnect(moduleName, params) else: self.announceConnect(moduleName, params) self.info[moduleName] = params keepalive = params.get('keepalive', DEFAULT_KEEPALIVE_US) params['timeout'] = now + keepalive return 'ok' def handleInfo(self): return self.info def logException(self, whileClause): errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.warning(''.join(traceback.format_tb(errTB))) logging.warning(errText) logging.warning('[error while %s at time %s]', whileClause, getTimestamp()) def handleMessages(self, messages): for msg in messages: if hasAttachments(msg): self.logMessageWithAttachments(msg) else: self.logMessage(msg) if msg.startswith('central.heartbeat.'): try: _topic, body = msg.split(':', 1) self.handleHeartbeat(json.loads(body)) except: # pylint: disable=W0702 self.logException('handling heartbeat') def handleRpcCall(self, messages): for msg in messages: try: call = json.loads(msg) callId = call['id'] except: # pylint: disable=W0702 self.rpcStream.send(json.dumps({'result': None, 'error': 'malformed request'})) try: method = call['method'] _params = call['params'] if method == 'info': result = self.handleInfo() else: raise ValueError('unknown method %s' % method) self.rpcStream.send(json.dumps({'result': result, 'error': None, 'id': callId})) except: # pylint: disable=W0702 self.logException('handling rpc message') errClass, errObject = sys.exc_info()[:2] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) self.rpcStream.send(json.dumps({'result': None, 'error': errText, 'id': callId})) def handleDisconnectTimer(self): now = getTimestamp() disconnectModules = [] for moduleName, entry in self.info.iteritems(): timeout = entry.get('timeout', None) if timeout is not None and now > timeout: disconnectModules.append(moduleName) for moduleName in disconnectModules: self.announceDisconnect(moduleName) del self.info[moduleName] def readyLog(self, pathTemplate, timestamp): if '%s' in pathTemplate: timeText = timestamp.strftime('%Y-%m-%d-%H-%M-%S') logFile = pathTemplate % timeText else: logFile = pathTemplate if not os.path.exists(self.logDir): os.makedirs(self.logDir) logPath = os.path.join(self.logDir, logFile) if '%s' in pathTemplate: latestPath = os.path.join(self.logDir, pathTemplate % 'latest') if os.path.islink(latestPath): os.unlink(latestPath) os.symlink(logFile, latestPath) return logPath def start(self): # open log files now = datetime.datetime.utcnow() self.logDir = os.path.abspath(self.opts.logDir) self.messageLogPath = self.readyLog(self.opts.messageLog, now) self.messageLog = open(self.messageLogPath, 'a') self.consoleLogPath = self.readyLog(self.opts.consoleLog, now) rootLogger = logging.getLogger() rootLogger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s - %(levelname)-7s - %(message)s') fmt.converter = time.gmtime fh = logging.FileHandler(self.consoleLogPath) fh.setFormatter(fmt) fh.setLevel(logging.DEBUG) rootLogger.addHandler(fh) if self.opts.foreground: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(fmt) rootLogger.addHandler(ch) # daemonize if self.opts.foreground: logging.info('staying in foreground') else: logging.info('daemonizing') pid = os.fork() if pid != 0: os._exit(0) os.setsid() pid = os.fork() if pid != 0: os._exit(0) os.chdir('/') os.close(1) os.close(2) nullFd = os.open('/dev/null', os.O_RDWR) os.dup2(nullFd, 1) os.dup2(nullFd, 2) try: # set up zmq self.context = zmq.Context.instance() self.rpcStream = ZMQStream(self.context.socket(zmq.REP)) self.rpcStream.bind(self.opts.rpcEndpoint) self.rpcStream.on_recv(self.handleRpcCall) self.forwarder = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB) self.forwarder.setsockopt_in(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_out(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_in(zmq.SUBSCRIBE, '') self.forwarder.setsockopt_out(zmq.HWM, self.opts.highWaterMark) self.forwarder.bind_in(self.opts.subscribeEndpoint) self.forwarder.bind_in(INJECT_ENDPOINT) self.forwarder.bind_out(self.opts.publishEndpoint) self.forwarder.bind_out(MONITOR_ENDPOINT) for entry in self.opts.subscribeTo: try: moduleName, endpoint = entry.split('@') endpoint = parseEndpoint(endpoint) except ValueError: raise ValueError('--subscribeTo argument "%s" is not in the format "<moduleName>@<endpoint>"' % entry) self.forwarder.connect_in(endpoint) self.info[moduleName] = {'module': moduleName, 'pub': endpoint} self.forwarder.start() time.sleep(0.1) # wait for forwarder to bind sockets self.monStream = ZMQStream(self.context.socket(zmq.SUB)) self.monStream.setsockopt(zmq.SUBSCRIBE, '') self.monStream.connect(MONITOR_ENDPOINT) self.monStream.on_recv(self.handleMessages) self.injectStream = ZMQStream(self.context.socket(zmq.PUB)) self.injectStream.connect(INJECT_ENDPOINT) self.disconnectTimer = ioloop.PeriodicCallback(self.handleDisconnectTimer, 5000) self.disconnectTimer.start() except: # pylint: disable=W0702 errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.error(''.join(traceback.format_tb(errTB))) logging.error(errText) logging.error('[error during startup -- exiting]') sys.exit(1) def shutdown(self): self.messageLog.flush()
def run(self, *args, **kwargs): frontend_address = kwargs["frontend-address"] assert frontend_address backend_address = kwargs["backend-address"] assert backend_address device = ThreadDevice(zmq.QUEUE, zmq.ROUTER, zmq.DEALER) device.bind_in(frontend_address) device.setsockopt_in(zmq.IDENTITY, "ROUTER") device.bind_out(backend_address) device.setsockopt_out(zmq.IDENTITY, "DEALER") device.start()
class MessageProxy(object): """ The MessageProxy wraps a ZeroMQ proxy server (XSUB socket) which forwards 'publish' messages from Python Analysis objects to subscribers (Scala Analysis objects) in the streamer process. """ DEFAULT_NUM_THREADS = 1 # In-process communication settings INPROC_PUB_ID = "msg_proxy_pub" INPROC_SUB_ID = "msg_proxy_sub" # Remote communication settings PUB_PORT = settings.PUB_PORT SUB_PORT = settings.SUB_PORT # TODO: This will only work if the Python front-end and the streamer process are running on the same machine DEFAULT_BIND_HOST = "localhost" def __init__(self, host=DEFAULT_BIND_HOST, num_threads=DEFAULT_NUM_THREADS): self.initialized = False self.context = zmq.Context(num_threads) self.host = host self.device_thread = None def start(self): # Launch two proxy sockets, one for remote communication, and one for in-process communication. self.device_thread = ThreadDevice(zmq.FORWARDER, zmq.XSUB, zmq.XPUB) self.device_thread.bind_in("tcp://*:" + str(MessageProxy.SUB_PORT)) self.device_thread.bind_out("tcp://*:" + str(MessageProxy.PUB_PORT)) self.device_thread.start() # TODO Finish in-process communication """ self.local_frontend = self.context.socket(zmq.XSUB) self.local_frontend.bind("inproc://" + MessageProxy.INPROC_SUB_ID) self.local_backend = self.context.socket(zmq.XPUB) self.local_backend.bind("inproc://" + MessageProxy.INPROC_PUB_ID) zmq.device(zmq.FORWARDER, self.local_frontend, self.local_backend) """ self.initialized = True def _get_pub_addr(self, remote=True): if remote: return "tcp://" + self.host + ":" + str(MessageProxy.SUB_PORT) else: return "inproc://" + MessageProxy.INPROC_SUB_ID def _get_sub_addr(self, remote=True): if remote: return "tcp://" + self.host + ":" + str(MessageProxy.PUB_PORT) else: return "inproc://" + MessageProxy.INPROC_PUB_ID def get_publisher(self, remote=True): """ :param tag: The tag the client will use to publish :param remote: True if the client wishes to publish to remote services, False otherwise. :return: A Publisher object constructed using this MessageProxy's ZMQ Context """ return Publisher.get_publisher(self.context, self._get_pub_addr(remote)) def get_subscriber(self, tag=None, remote=True): """ :param tag: The tag the client will use to subscribe :param remote: True if the client wishes to subscribe to remote services, False otherwise. :return: A Subscriber object constructed using this MessageProxy's ZMQ Context """ return Subscriber.get_subscriber(self.context, self._get_sub_addr(remote), tag)
def __init__(self, input_addr, output_addr): self.device = ThreadDevice(zmq.FORWARDER, in_type=zmq.SUB, out_type=zmq.PUB) self.device.bind_in(input_addr) self.device.bind_out(output_addr) self.device.setsockopt_in(zmq.SUBSCRIBE, b"")
def __init__(self, ctx, device_type=zmq.QUEUE, in_type=None, out_type=None): self.ctx = ctx ZMQThreadDevice.__init__(self, device_type, in_type, out_type)
class ZmqCentral(object): def __init__(self, opts): self.opts = opts self.info = {} def announceConnect(self, moduleName, params): logging.info('module %s connected', moduleName) self.injectStream.send('central.connect.%s:%s' % (moduleName, json.dumps(params))) def announceDisconnect(self, moduleName): logging.info('module %s disconnected', moduleName) self.injectStream.send( 'central.disconnect.%s:%s' % (moduleName, json.dumps({'timestamp': str(getTimestamp())}))) def logMessage(self, msg, posixTime=None, attachmentDir='-'): mlog = self.messageLog mlog.write('@@@ %d %d %s ' % (getTimestamp(posixTime), len(msg), attachmentDir)) mlog.write(msg) mlog.write('\n') def logMessageWithAttachments0(self, msg): parsed = parseMessage(msg) posixTime = time.time() # construct attachment directory dt = datetime.datetime.utcfromtimestamp(posixTime) dateText = dt.strftime('%Y-%m-%d') timeText = dt.strftime('%H-%M-%S') + '.%06d' % dt.microsecond uniq = '%08x' % random.getrandbits(32) attachmentSuffix = os.path.join('attachments', dateText, timeText, parsed['topic'], uniq) attachmentPath = os.path.join(self.logDir, attachmentSuffix) os.makedirs(attachmentPath) # write attachments to attachment directory for attachment in parsed['attachments']: fullName = os.path.join(attachmentPath, attachment.get_filename()) open(fullName, 'wb').write(attachment.get_payload()) # log message with a pointer to the attachment directory self.logMessage(':'.join((parsed['topic'], parsed['json'])), posixTime, attachmentSuffix) def logMessageWithAttachments(self, msg): try: return self.logMessageWithAttachments0(msg) except: # pylint: disable=W0702 self.logException('logging message with attachments') def handleHeartbeat(self, params): moduleName = params['module'].encode('utf-8') now = getTimestamp() oldInfo = self.info.get(moduleName, None) if oldInfo: if oldInfo.get('pub', None) != params.get('pub', None): self.announceDisconnect(moduleName) self.announceConnect(moduleName, params) else: self.announceConnect(moduleName, params) self.info[moduleName] = params keepalive = params.get('keepalive', DEFAULT_KEEPALIVE_US) params['timeout'] = now + keepalive return 'ok' def handleInfo(self): return self.info def logException(self, whileClause): errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.warning(''.join(traceback.format_tb(errTB))) logging.warning(errText) logging.warning('[error while %s at time %s]', whileClause, getTimestamp()) def handleMessages(self, messages): for msg in messages: if hasAttachments(msg): self.logMessageWithAttachments(msg) else: self.logMessage(msg) if msg.startswith('central.heartbeat.'): try: _topic, body = msg.split(':', 1) self.handleHeartbeat(json.loads(body)) except: # pylint: disable=W0702 self.logException('handling heartbeat') def handleRpcCall(self, messages): for msg in messages: try: call = json.loads(msg) callId = call['id'] except: # pylint: disable=W0702 self.rpcStream.send( json.dumps({ 'result': None, 'error': 'malformed request' })) try: method = call['method'] _params = call['params'] if method == 'info': result = self.handleInfo() else: raise ValueError('unknown method %s' % method) self.rpcStream.send( json.dumps({ 'result': result, 'error': None, 'id': callId })) except: # pylint: disable=W0702 self.logException('handling rpc message') errClass, errObject = sys.exc_info()[:2] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) self.rpcStream.send( json.dumps({ 'result': None, 'error': errText, 'id': callId })) def handleDisconnectTimer(self): now = getTimestamp() disconnectModules = [] for moduleName, entry in self.info.iteritems(): timeout = entry.get('timeout', None) if timeout is not None and now > timeout: disconnectModules.append(moduleName) for moduleName in disconnectModules: self.announceDisconnect(moduleName) del self.info[moduleName] def readyLog(self, pathTemplate, timestamp): if '%s' in pathTemplate: timeText = timestamp.strftime('%Y-%m-%d-%H-%M-%S') logFile = pathTemplate % timeText else: logFile = pathTemplate if not os.path.exists(self.logDir): os.makedirs(self.logDir) logPath = os.path.join(self.logDir, logFile) if '%s' in pathTemplate: latestPath = os.path.join(self.logDir, pathTemplate % 'latest') if os.path.islink(latestPath): os.unlink(latestPath) os.symlink(logFile, latestPath) return logPath def start(self): # open log files now = datetime.datetime.utcnow() self.logDir = os.path.abspath(self.opts.logDir) self.messageLogPath = self.readyLog(self.opts.messageLog, now) self.messageLog = open(self.messageLogPath, 'a') self.consoleLogPath = self.readyLog(self.opts.consoleLog, now) rootLogger = logging.getLogger() rootLogger.setLevel(logging.DEBUG) fmt = logging.Formatter('%(asctime)s - %(levelname)-7s - %(message)s') fmt.converter = time.gmtime fh = logging.FileHandler(self.consoleLogPath) fh.setFormatter(fmt) fh.setLevel(logging.DEBUG) rootLogger.addHandler(fh) if self.opts.foreground: ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(fmt) rootLogger.addHandler(ch) # daemonize if self.opts.foreground: logging.info('staying in foreground') else: logging.info('daemonizing') pid = os.fork() if pid != 0: os._exit(0) os.setsid() pid = os.fork() if pid != 0: os._exit(0) os.chdir('/') os.close(1) os.close(2) nullFd = os.open('/dev/null', os.O_RDWR) os.dup2(nullFd, 1) os.dup2(nullFd, 2) try: # set up zmq self.context = zmq.Context.instance() self.rpcStream = ZMQStream(self.context.socket(zmq.REP)) self.rpcStream.bind(self.opts.rpcEndpoint) self.rpcStream.on_recv(self.handleRpcCall) self.forwarder = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.PUB) self.forwarder.setsockopt_in(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_out(zmq.IDENTITY, THIS_MODULE) self.forwarder.setsockopt_in(zmq.SUBSCRIBE, '') self.forwarder.setsockopt_out(zmq.HWM, self.opts.highWaterMark) self.forwarder.bind_in(self.opts.subscribeEndpoint) self.forwarder.bind_in(INJECT_ENDPOINT) self.forwarder.bind_out(self.opts.publishEndpoint) self.forwarder.bind_out(MONITOR_ENDPOINT) for entry in self.opts.subscribeTo: try: moduleName, endpoint = entry.split('@') endpoint = parseEndpoint(endpoint) except ValueError: raise ValueError( '--subscribeTo argument "%s" is not in the format "<moduleName>@<endpoint>"' % entry) self.forwarder.connect_in(endpoint) self.info[moduleName] = {'module': moduleName, 'pub': endpoint} self.forwarder.start() time.sleep(0.1) # wait for forwarder to bind sockets self.monStream = ZMQStream(self.context.socket(zmq.SUB)) self.monStream.setsockopt(zmq.SUBSCRIBE, '') self.monStream.connect(MONITOR_ENDPOINT) self.monStream.on_recv(self.handleMessages) self.injectStream = ZMQStream(self.context.socket(zmq.PUB)) self.injectStream.connect(INJECT_ENDPOINT) self.disconnectTimer = ioloop.PeriodicCallback( self.handleDisconnectTimer, 5000) self.disconnectTimer.start() except: # pylint: disable=W0702 errClass, errObject, errTB = sys.exc_info()[:3] errText = '%s.%s: %s' % (errClass.__module__, errClass.__name__, str(errObject)) logging.error(''.join(traceback.format_tb(errTB))) logging.error(errText) logging.error('[error during startup -- exiting]') sys.exit(1) def shutdown(self): self.messageLog.flush()