def start_logPublisher(self, publish): log = logging.getLogger() log.info('Activating log publisher on port {}'.format(publish)) zmq_log_handler = PUBHandler("tcp://*:{}".format(publish)) zmq_log_handler.setFormatter( logging.Formatter(fmt='{asctime}|{message}', style='{')) zmq_log_handler.root_topic = self.nodename log.addHandler(zmq_log_handler) if self.camwatcher: handoff = { 'node': self.nodename, 'log': self.publish_log, 'video': self.publish_cam, 'host': socket.gethostname() } msg = "CameraUp|" + json.dumps(handoff) try: with zmq.Context().socket(zmq.REQ) as sock: log.debug('connecting to ' + self.camwatcher) sock.connect(self.camwatcher) sock.send(msg.encode("ascii")) resp = sock.recv().decode("ascii") except Exception as ex: log.exception('Unable to connect with camwatcher:' + ex) sys.exit() log.handlers.remove( log.handlers[0]) # OK, all logging over PUB socket only log.setLevel(logging.INFO) return log
def add_zmq_handler(root_logger): """Take a context and generate a ZMQ PUBHandler object adding it to the handlers """ from zmq.log.handlers import PUBHandler zmq_handler = PUBHandler(LOG_ADDR) root_logger.addHandler(zmq_handler)
def installZMQlogger(port=5800, name=None, clear=True, level=None, logger=None): """ Add ZMQ logging handler to a Python logger """ if clear: removeZMQlogger(name) ctx = zmq.Context() pub = ctx.socket(zmq.PUB) pub.setsockopt(zmq.RCVHWM, 10) pub.connect('tcp://127.0.0.1:%i' % port) if logger is None: logger = logging.getLogger() if level is not None: logger.setLevel(level) handler = PUBHandler(pub) pid = os.getpid() pstr = 'pid %d: ' % pid handler.formatters = { logging.DEBUG: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.INFO: logging.Formatter(pstr + "%(message)s\n"), logging.WARN: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.ERROR: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), logging.CRITICAL: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")} logger.addHandler(handler) logger.debug('installZMQlogger: handler installed') # first message always is discarded return logger
def start_stats(root_topic="snowpear"): context = zmq.Context(cpu_count()) pub = context.socket(zmq.PUB) pub.bind(options.stats_endpoint) handler = PUBHandler(pub) handler.root_topic = root_topic logger = logging.getLogger() logger.addHandler(handler)
def network_handler(protocol='tcp', endpoint='*', port='4547'): ctx = zmq.Context() pub = ctx.socket(zmq.PUB) try: pub.bind('%s://%s:%s' % (protocol, endpoint, port)) except zmq.error.ZMQError: print("Logger::Network logger endpoint is already in use!") handler = PUBHandler(pub) return handler
def forward_logging(self): if self.log_url: self.log.info("Forwarding logging to %s" % self.log_url) context = zmq.Context.instance() lsock = context.socket(zmq.PUB) lsock.connect(self.log_url) handler = PUBHandler(lsock) handler.root_topic = 'controller' handler.setLevel(self.log_level) self.log.addHandler(handler)
def start_logging(self): super(IPControllerApp, self).start_logging() if self.master_config.Global.log_url: context = self.factory.context lsock = context.socket(zmq.PUB) lsock.connect(self.master_config.Global.log_url) handler = PUBHandler(lsock) handler.root_topic = 'controller' handler.setLevel(self.log_level) self.log.addHandler(handler)
def __init__(self, ip="127.0.0.1", port=8000): self._logger = logging.getLogger("LogPublisher") self._logger.setLevel(logging.DEBUG) self.ctx = zmq.Context() self.socket = self.ctx.socket(zmq.PUB) self.socket.bind("tcp://{}:{}".format(ip, port)) self.handler = PUBHandler(self.socket) self.format = logging.Formatter( "[%(filename)s:%(lineno)d] %(levelname)s %(message)s") self.handler.setFormatter(self.format) self._logger.addHandler(self.handler)
def __init__(self, host, port=config.PUBSUB_LOGGER_PORT): self._logger = logging.getLogger(socket.gethostname()) self._logger.setLevel(logging.DEBUG) self.ctx = zmq.Context() self.pub = self.ctx.socket(zmq.PUB) self.pub.connect('tcp://{0}:{1}'.format(host, port)) # create console handler and set level to info # handler = logging.StreamHandler(sys.stdout) self._handler = PUBHandler(self.pub) self._handler.formatters = formatters self._logger.addHandler(self._handler)
def zmq_external_logger(host='localhost', port='8899'): ''' This publish logging messages over a zmq.PUB socket ''' context = zmq.Context() socket = context.socket(zmq.PUB) socket.connect('tcp://{0}:{1}'.format(host, port)) handler = PUBHandler(socket) logger = logging.getLogger() logger.addHandler(handler) handler.root_topic = 'logging' return logger
def log_worker(port, interval=1, level=logging.DEBUG): ctx = zmq.Context() pub = ctx.socket(zmq.PUB) pub.connect('tcp://127.0.0.1:%i' % port) logger = logging.getLogger(str(os.getpid())) logger.setLevel(level) handler = PUBHandler(pub) logger.addHandler(handler) print "starting logger at %i with level=%s" % (os.getpid(), level) while True: level = random.choice(LOG_LEVELS) logger.log(level, "Hello from %i!" % os.getpid()) time.sleep(interval)
def PublishLogging(self, LoggingName="zmq.auth", root_topic="zmq.auth"): """ Publishes the given python logger to the publishing service :param LoggingName: Name of the python logger service :type LoggingName: str :param root_topic: the topic given with message. is appended with .<LEVEL> :type root_topic: str :return: """ handler = PUBHandler(self.Logger) handler.root_topic = root_topic handler.formatters[logging.DEBUG] = logging.Formatter( fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') handler.formatters[logging.INFO] = logging.Formatter( fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') l = logging.getLogger(LoggingName) l.addHandler(handler)
def main(): ctx = zmq.Context() publisher = ctx.socket(zmq.PUB) publisher.bind("tcp://*:5557") handler = PUBHandler(publisher) logger = logging.getLogger() logger.addHandler(handler) print("Network Manager CNVSS Broker listening") collector = ctx.socket(zmq.PULL) collector.bind("tcp://*:5558") while True: message = collector.recv() print("Publishing update %s" % message) publisher.send(message)
import time import logging import zmq from zmq.log.handlers import PUBHandler import logjson from collections import defaultdict # In this demo, we create our own socket. ctx = zmq.Context() socket = ctx.socket(zmq.PUSH) socket.connect('tcp://127.0.0.1:12345') handler = PUBHandler(socket) handler.setLevel('INFO') # Override all the level formatters to use JSON handler.formatters = defaultdict(logjson.JSONFormatter) logging.basicConfig(level='DEBUG') logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) for i in range(100): logger.info('blah') time.sleep(1)
import time import logging from zmq.log.handlers import PUBHandler import logjson from collections import defaultdict handler = PUBHandler('tcp://127.0.0.1:12345') handler.setLevel('INFO') # Override all the level formatters to use JSON handler.formatters = defaultdict(logjson.JSONFormatter) logging.basicConfig(level='DEBUG') logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) for i in range(100): logger.info('blah') time.sleep(1)
def main(settings): """ The :meth:`main` method for worker processes. Here we will: - create a :class:`ZmqMgmt` instance - create a :class:`Fetcher` instance - initialize and instantiate the extractor chain The `settings` have to be loaded already. """ # create my own identity identity = "worker:%s:%s" % (socket.gethostname(), os.getpid()) ctx = zmq.Context() io_loop = IOLoop.instance() # initialize the logging subsystem log_pub = ctx.socket(zmq.PUB) log_pub.connect(settings.ZEROMQ_LOGGING) zmq_logging_handler = PUBHandler(log_pub) zmq_logging_handler.root_topic = "spyder.worker" logger = logging.getLogger() logger.addHandler(zmq_logging_handler) logger.setLevel(settings.LOG_LEVEL_WORKER) logger.info("process::Starting up another worker") mgmt = create_worker_management(settings, ctx, io_loop) logger.debug("process::Initializing fetcher, extractor and scoper") fetcher = create_worker_fetcher(settings, mgmt, ctx, zmq_logging_handler, io_loop) fetcher.start() extractor = create_worker_extractor(settings, mgmt, ctx, zmq_logging_handler, io_loop) extractor.start() def quit_worker(raw_msg): """ When the worker should quit, stop the io_loop after 2 seconds. """ msg = MgmtMessage(raw_msg) if ZMQ_SPYDER_MGMT_WORKER_QUIT == msg.data: logger.info("process::We have been asked to shutdown, do so") DelayedCallback(io_loop.stop, 2000, io_loop).start() ack = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, identity=identity, data=ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK) mgmt._out_stream.send_multipart(ack.serialize()) mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, quit_worker) mgmt.start() # notify the master that we are online msg = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, identity=identity, data=ZMQ_SPYDER_MGMT_WORKER_AVAIL) mgmt._out_stream.send_multipart(msg.serialize()) def handle_shutdown_signal(_sig, _frame): """ Called from the os when a shutdown signal is fired. """ msg = MgmtMessage(data=ZMQ_SPYDER_MGMT_WORKER_QUIT) quit_worker(msg.serialize()) # zmq 2.1 stops blocking calls, restart the ioloop io_loop.start() # handle kill signals signal.signal(signal.SIGINT, handle_shutdown_signal) signal.signal(signal.SIGTERM, handle_shutdown_signal) logger.info("process::waiting for action") # this will block until the worker quits try: io_loop.start() except ZMQError: logger.debug("Caught a ZMQError. Hopefully during shutdown") logger.debug(traceback.format_exc()) for mod in [fetcher, extractor, mgmt]: mod.close() logger.info("process::Houston: Worker down") ctx.term()
def __init__(self, puuid): self.config = Config_parser( os.path.join(os.environ['FLOW_CONFIG'], 'easyFlow_conf.json')).get_config() try: self._serv_config = redis.Redis( unix_socket_path=self.config.redis.project.unix_socket_path, decode_responses=True) except: # fallback using TCP instead of unix_socket self._serv_config = redis.StrictRedis( self.config.redis.project.host, self.config.redis.project.port, self.config.redis.project.db, charset="utf-8", decode_responses=True) signal.signal(signal.SIGUSR1, self.sig_handler) self._alert_manager = Alert_manager() self.puuid = puuid self.pid = os.getpid() self._p = psutil.Process() self.custom_message = "" self._keyCommands = 'command_' + self.puuid self.state = 'running' self.logger = None logging.basicConfig(format='%(levelname)s[%(asctime)s]: %(message)s') self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(levelname)s[%(asctime)s]: %(message)s') self._log_handler = logging.FileHandler( os.path.join(os.environ['FLOW_LOGS'], '{}.log'.format(self.puuid))) self._log_handler.setLevel(logging.INFO) self._log_handler.setFormatter(formatter) self.logger.addHandler(self._log_handler) pub = zmqContext().socket(zmqPUB) pub.connect('tcp://{}:{}'.format(self.config.server.host, self.config.zmq.port)) self._pubhandler = PUBHandler(pub) self._pubhandler.root_topic = self.puuid self._pubhandler.setLevel(logging.INFO) self.logger.addHandler(self._pubhandler) self.update_config() self._metadata_interface = Process_metadata_interface() self._buffer_metadata_interface = Buffer_metadata_interface() self.last_refresh = time.time( ) - self.state_refresh_rate # ensure a refresh self.last_reload = time.time( ) - self.state_refresh_rate # ensure a reload self._processStat = ProcessStat( self.config.default_project.process.buffer_time_resolution_in_sec, self.config.default_project.process.buffer_time_spanned_in_min) self.push_p_info() if self.type == 'multiplexer_in': self.logger.debug('Using multiplexer_in link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=True) elif self.type == 'multiplexer_out': self.logger.debug('Using multiplexer_out link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False) elif self.type == 'switch': self.logger.debug('Using switch link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False, is_switch=True) else: self._link_manager = Link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger) # do not log to zmq by default self.log_to_zmq(False) self.pre_run() self.run()
import logging, logging.handlers import os import zmq from zmq.log.handlers import PUBHandler _logger = logging.getLogger(__name__) _logger.setLevel(1) #LOG_LEVELS = (logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL) ctx = zmq.Context() pub = ctx.socket(zmq.PUB) port = logging.handlers.DEFAULT_TCP_LOGGING_PORT pub.connect('tcp://%s:%i' % (os.environ['SIP_HOSTNAME'], port)) handler = PUBHandler(pub) _logger.addHandler(handler) def debug(msg): """ Log an DEBUG level message """ _logger.debug(msg) def info(msg): """ Log an INFO level message """ _logger.info(msg)
def main(settings): """ Main method for master processes. """ # create my own identity identity = "master:%s:%s" % (socket.gethostname(), os.getpid()) ctx = zmq.Context() io_loop = IOLoop.instance() # initialize the logging subsystem log_pub = ctx.socket(zmq.PUB) log_pub.connect(settings.ZEROMQ_LOGGING) zmq_logging_handler = PUBHandler(log_pub) zmq_logging_handler.root_topic = "spyder.master" logger = logging.getLogger() logger.addHandler(zmq_logging_handler) logger.setLevel(settings.LOG_LEVEL_MASTER) logger.info("process::Starting up the master") mgmt = create_master_management(settings, ctx, io_loop) frontier = create_frontier(settings, zmq_logging_handler) publishing_socket = ctx.socket(zmq.PUSH) publishing_socket.setsockopt(zmq.HWM, settings.ZEROMQ_MASTER_PUSH_HWM) publishing_socket.bind(settings.ZEROMQ_MASTER_PUSH) receiving_socket = ctx.socket(zmq.SUB) receiving_socket.setsockopt(zmq.SUBSCRIBE, "") receiving_socket.bind(settings.ZEROMQ_MASTER_SUB) master = ZmqMaster(settings, identity, receiving_socket, publishing_socket, mgmt, frontier, zmq_logging_handler, settings.LOG_LEVEL_MASTER, io_loop) def handle_shutdown_signal(_sig, _frame): """ Called from the os when a shutdown signal is fired. """ master.shutdown() # zmq 2.1 stops blocking calls, restart the ioloop io_loop.start() # handle kill signals signal.signal(signal.SIGINT, handle_shutdown_signal) signal.signal(signal.SIGTERM, handle_shutdown_signal) if settings.MASTER_CALLBACK: callback = import_class(settings.MASTER_CALLBACK) callback(settings, ctx, io_loop, frontier) mgmt.start() master.start() # this will block until the master stops try: io_loop.start() except ZMQError: logger.debug("Caught a ZMQError. Hopefully during shutdown") logger.debug(traceback.format_exc()) master.close() mgmt.close() logger.info("process::Master is down.") log_pub.close() ctx.term()