def start_logPublisher(self, publish): log = logging.getLogger() log.info('Activating log publisher on port {}'.format(publish)) zmq_log_handler = PUBHandler("tcp://*:{}".format(publish)) zmq_log_handler.setFormatter( logging.Formatter(fmt='{asctime}|{message}', style='{')) zmq_log_handler.root_topic = self.nodename log.addHandler(zmq_log_handler) if self.camwatcher: handoff = { 'node': self.nodename, 'log': self.publish_log, 'video': self.publish_cam, 'host': socket.gethostname() } msg = "CameraUp|" + json.dumps(handoff) try: with zmq.Context().socket(zmq.REQ) as sock: log.debug('connecting to ' + self.camwatcher) sock.connect(self.camwatcher) sock.send(msg.encode("ascii")) resp = sock.recv().decode("ascii") except Exception as ex: log.exception('Unable to connect with camwatcher:' + ex) sys.exit() log.handlers.remove( log.handlers[0]) # OK, all logging over PUB socket only log.setLevel(logging.INFO) return log
def installZMQlogger(port=5800, name=None, clear=True, level=None, logger=None): """ Add ZMQ logging handler to a Python logger """ if clear: removeZMQlogger(name) ctx = zmq.Context() pub = ctx.socket(zmq.PUB) pub.setsockopt(zmq.RCVHWM, 10) pub.connect('tcp://127.0.0.1:%i' % port) if logger is None: logger = logging.getLogger() if level is not None: logger.setLevel(level) handler = PUBHandler(pub) pid = os.getpid() pstr = 'pid %d: ' % pid handler.formatters = { logging.DEBUG: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.INFO: logging.Formatter(pstr + "%(message)s\n"), logging.WARN: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.ERROR: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), logging.CRITICAL: logging.Formatter(pstr + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")} logger.addHandler(handler) logger.debug('installZMQlogger: handler installed') # first message always is discarded return logger
def start_stats(root_topic="snowpear"): context = zmq.Context(cpu_count()) pub = context.socket(zmq.PUB) pub.bind(options.stats_endpoint) handler = PUBHandler(pub) handler.root_topic = root_topic logger = logging.getLogger() logger.addHandler(handler)
def __init__(self, interface_or_socket, context=None): PUBHandler.__init__(self, interface_or_socket, context) self.formatters = { logging.DEBUG: logging.Formatter("%(asctime)-6s: %(name)s - %(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.INFO: logging.Formatter("%(asctime)-6s: %(name)s - %(message)s\n"), logging.WARN: logging.Formatter("%(asctime)-6s: %(name)s - %(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.ERROR: logging.Formatter("%(asctime)-6s: %(name)s - %(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), logging.CRITICAL: logging.Formatter("%(asctime)-6s: %(name)s - %(levelname)s %(filename)s:%(lineno)d - %(message)s\n")}
def forward_logging(self): if self.log_url: self.log.info("Forwarding logging to %s"%self.log_url) context = zmq.Context.instance() lsock = context.socket(zmq.PUB) lsock.connect(self.log_url) handler = PUBHandler(lsock) handler.root_topic = 'controller' handler.setLevel(self.log_level) self.log.addHandler(handler)
def forward_logging(self): if self.log_url: self.log.info("Forwarding logging to %s" % self.log_url) context = zmq.Context.instance() lsock = context.socket(zmq.PUB) lsock.connect(self.log_url) handler = PUBHandler(lsock) handler.root_topic = 'controller' handler.setLevel(self.log_level) self.log.addHandler(handler)
def start_logging(self): super(IPControllerApp, self).start_logging() if self.master_config.Global.log_url: context = self.factory.context lsock = context.socket(zmq.PUB) lsock.connect(self.master_config.Global.log_url) handler = PUBHandler(lsock) handler.root_topic = 'controller' handler.setLevel(self.log_level) self.log.addHandler(handler)
def __init__(self, ip="127.0.0.1", port=8000): self._logger = logging.getLogger("LogPublisher") self._logger.setLevel(logging.DEBUG) self.ctx = zmq.Context() self.socket = self.ctx.socket(zmq.PUB) self.socket.bind("tcp://{}:{}".format(ip, port)) self.handler = PUBHandler(self.socket) self.format = logging.Formatter( "[%(filename)s:%(lineno)d] %(levelname)s %(message)s") self.handler.setFormatter(self.format) self._logger.addHandler(self.handler)
def zmq_external_logger(host='localhost', port='8899'): ''' This publish logging messages over a zmq.PUB socket ''' context = zmq.Context() socket = context.socket(zmq.PUB) socket.connect('tcp://{0}:{1}'.format(host, port)) handler = PUBHandler(socket) logger = logging.getLogger() logger.addHandler(handler) handler.root_topic = 'logging' return logger
def run(self): context = zmq.Context() pub = context.socket(zmq.PUB) pub.connect(self.add_log) self.log = logging.getLogger() self.log.setLevel(logging.DEBUG) handler = PUBHandler(pub) handler.formatters = formatters self.log.addHandler(handler) self.log.debug('start_sink') self.receiver = context.socket(zmq.PULL) self.receiver.bind(self.add_rec) self.controller = context.socket(zmq.PUB) self.controller.bind(self.add_pub) #socket to sinks self.main = context.socket(zmq.PUSH) self.main.connect(self.add_push) # Message from main: start self.receiver.recv() #Measure time! t_start = time.time() results = [] for task_nbr in range(self.tasks): raw_json_data = self.receiver.recv() res = json.loads( raw_json_data, object_hook=MyOptResult.unserialize_object ) results.append(res) t_end = time.time() t_duration = t_end - t_start self.log.debug( 'Collected {count} results'.format( count=len(results), ) ) self.log.debug( 'Total elapsed time: {duration} s'.format(duration=t_duration) ) self.controller.send(b'KILL') self.handle_results(results) time.sleep(1) self.main.send(b'0')
def add_zmq_handler(root_logger): """Take a context and generate a ZMQ PUBHandler object adding it to the handlers """ from zmq.log.handlers import PUBHandler zmq_handler = PUBHandler(LOG_ADDR) root_logger.addHandler(zmq_handler)
def PublishLogging(self,LoggingName="zmq.auth", root_topic="zmq.auth"): """ Publishes the given python logger to the publishing service :param LoggingName: Name of the python logger service :type LoggingName: str :param root_topic: the topic given with message. is appended with .<LEVEL> :type root_topic: str :return: """ handler = PUBHandler(self.Logger) handler.root_topic = root_topic handler.formatters[logging.DEBUG] = logging.Formatter(fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') handler.formatters[logging.INFO] = logging.Formatter(fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') l = logging.getLogger(LoggingName) l.addHandler(handler)
def network_handler(protocol='tcp', endpoint='*', port='4547'): ctx = zmq.Context() pub = ctx.socket(zmq.PUB) try: pub.bind('%s://%s:%s' % (protocol, endpoint, port)) except zmq.error.ZMQError: print("Logger::Network logger endpoint is already in use!") handler = PUBHandler(pub) return handler
def PublishLogging(self, LoggingName="zmq.auth", root_topic="zmq.auth"): """ Publishes the given python logger to the publishing service :param LoggingName: Name of the python logger service :type LoggingName: str :param root_topic: the topic given with message. is appended with .<LEVEL> :type root_topic: str :return: """ handler = PUBHandler(self.Logger) handler.root_topic = root_topic handler.formatters[logging.DEBUG] = logging.Formatter( fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') handler.formatters[logging.INFO] = logging.Formatter( fmt='%(asctime)s\t%(levelname)s: %(message)s', datefmt='%H:%M:%S') l = logging.getLogger(LoggingName) l.addHandler(handler)
class LogPublisher(object): """ Centralized logger publisher, used in MP.Process.run() to send log message to log collector. """ def __init__(self, ip="127.0.0.1", port=8000): self._logger = logging.getLogger("LogPublisher") self._logger.setLevel(logging.DEBUG) self.ctx = zmq.Context() self.socket = self.ctx.socket(zmq.PUB) self.socket.bind("tcp://{}:{}".format(ip, port)) self.handler = PUBHandler(self.socket) self.format = logging.Formatter( "[%(filename)s:%(lineno)d] %(levelname)s %(message)s") self.handler.setFormatter(self.format) self._logger.addHandler(self.handler) @property def logger(self): return self._logger
def __init__(self, host, port=config.PUBSUB_LOGGER_PORT): self._logger = logging.getLogger(socket.gethostname()) self._logger.setLevel(logging.DEBUG) self.ctx = zmq.Context() self.pub = self.ctx.socket(zmq.PUB) self.pub.connect('tcp://{0}:{1}'.format(host, port)) # create console handler and set level to info # handler = logging.StreamHandler(sys.stdout) self._handler = PUBHandler(self.pub) self._handler.formatters = formatters self._logger.addHandler(self._handler)
def _start_logger(context): global log controller = context.socket(zmq.PUB) controller.bind(ADD_LOG_CONTROLLER) l = Logger(ADD_LOGGING, ADD_LOG_LH_CONTROLLER) l.start() time.sleep(2) pub = context.socket(zmq.PUB) pub.connect(ADD_LH_LOGGING) log = logging.getLogger('main') log.setLevel(logging.DEBUG) handler = PUBHandler(pub) handler.formatters = formatters log.addHandler(handler) return controller
def log_worker(port, interval=1, level=logging.DEBUG): ctx = zmq.Context() pub = ctx.socket(zmq.PUB) pub.connect('tcp://127.0.0.1:%i' % port) logger = logging.getLogger(str(os.getpid())) logger.setLevel(level) handler = PUBHandler(pub) logger.addHandler(handler) print "starting logger at %i with level=%s" % (os.getpid(), level) while True: level = random.choice(LOG_LEVELS) logger.log(level, "Hello from %i!" % os.getpid()) time.sleep(interval)
def main(): ctx = zmq.Context() publisher = ctx.socket(zmq.PUB) publisher.bind("tcp://*:5557") handler = PUBHandler(publisher) logger = logging.getLogger() logger.addHandler(handler) print("Network Manager CNVSS Broker listening") collector = ctx.socket(zmq.PULL) collector.bind("tcp://*:5558") while True: message = collector.recv() print("Publishing update %s" % message) publisher.send(message)
def __init__(self, engine, *args, **kwargs): # type: (object, object, object) -> object PUBHandler.__init__(self,*args, **kwargs) self.engine = engine
def main(settings): """ Main method for master processes. """ # create my own identity identity = "master:%s:%s" % (socket.gethostname(), os.getpid()) ctx = zmq.Context() io_loop = IOLoop.instance() # initialize the logging subsystem log_pub = ctx.socket(zmq.PUB) log_pub.connect(settings.ZEROMQ_LOGGING) zmq_logging_handler = PUBHandler(log_pub) zmq_logging_handler.root_topic = "spyder.master" logger = logging.getLogger() logger.addHandler(zmq_logging_handler) logger.setLevel(settings.LOG_LEVEL_MASTER) logger.info("process::Starting up the master") mgmt = create_master_management(settings, ctx, io_loop) frontier = create_frontier(settings, zmq_logging_handler) publishing_socket = ctx.socket(zmq.PUSH) publishing_socket.setsockopt(zmq.HWM, settings.ZEROMQ_MASTER_PUSH_HWM) publishing_socket.bind(settings.ZEROMQ_MASTER_PUSH) receiving_socket = ctx.socket(zmq.SUB) receiving_socket.setsockopt(zmq.SUBSCRIBE, "") receiving_socket.bind(settings.ZEROMQ_MASTER_SUB) master = ZmqMaster(settings, identity, receiving_socket, publishing_socket, mgmt, frontier, zmq_logging_handler, settings.LOG_LEVEL_MASTER, io_loop) def handle_shutdown_signal(_sig, _frame): """ Called from the os when a shutdown signal is fired. """ master.shutdown() # zmq 2.1 stops blocking calls, restart the ioloop io_loop.start() # handle kill signals signal.signal(signal.SIGINT, handle_shutdown_signal) signal.signal(signal.SIGTERM, handle_shutdown_signal) if settings.MASTER_CALLBACK: callback = import_class(settings.MASTER_CALLBACK) callback(settings, ctx, io_loop, frontier) mgmt.start() master.start() # this will block until the master stops try: io_loop.start() except ZMQError: logger.debug("Caught a ZMQError. Hopefully during shutdown") logger.debug(traceback.format_exc()) master.close() mgmt.close() logger.info("process::Master is down.") log_pub.close() ctx.term()
def register_zmq_handler(cls, zmq_socket): # pragma: no cover handler = PUBHandler(zmq_socket) handler.root_topic = "logger" logger = logging.getLogger() logger.addHandler(handler)
def __init__(self, engine, *args, **kwargs): # type: (object, object, object) -> object PUBHandler.__init__(self, *args, **kwargs) self.engine = engine
import logging, logging.handlers import os import zmq from zmq.log.handlers import PUBHandler _logger = logging.getLogger(__name__) _logger.setLevel(1) #LOG_LEVELS = (logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL) ctx = zmq.Context() pub = ctx.socket(zmq.PUB) port = logging.handlers.DEFAULT_TCP_LOGGING_PORT pub.connect('tcp://%s:%i' % (os.environ['SIP_HOSTNAME'], port)) handler = PUBHandler(pub) _logger.addHandler(handler) def debug(msg): """ Log an DEBUG level message """ _logger.debug(msg) def info(msg): """ Log an INFO level message """ _logger.info(msg)
import time import logging import zmq from zmq.log.handlers import PUBHandler import logjson from collections import defaultdict # In this demo, we create our own socket. ctx = zmq.Context() socket = ctx.socket(zmq.PUSH) socket.connect('tcp://127.0.0.1:12345') handler = PUBHandler(socket) handler.setLevel('INFO') # Override all the level formatters to use JSON handler.formatters = defaultdict(logjson.JSONFormatter) logging.basicConfig(level='DEBUG') logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) for i in range(100): logger.info('blah') time.sleep(1)
import time import logging from zmq.log.handlers import PUBHandler import logjson from collections import defaultdict handler = PUBHandler('tcp://127.0.0.1:12345') handler.setLevel('INFO') # Override all the level formatters to use JSON handler.formatters = defaultdict(logjson.JSONFormatter) logging.basicConfig(level='DEBUG') logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(handler) for i in range(100): logger.info('blah') time.sleep(1)
def __init__(self, engine, *args, **kwargs): PUBHandler.__init__(self,*args, **kwargs) self.engine = engine
class Process(metaclass=ABCMeta): def __init__(self, puuid): self.config = Config_parser( os.path.join(os.environ['FLOW_CONFIG'], 'easyFlow_conf.json')).get_config() try: self._serv_config = redis.Redis( unix_socket_path=self.config.redis.project.unix_socket_path, decode_responses=True) except: # fallback using TCP instead of unix_socket self._serv_config = redis.StrictRedis( self.config.redis.project.host, self.config.redis.project.port, self.config.redis.project.db, charset="utf-8", decode_responses=True) signal.signal(signal.SIGUSR1, self.sig_handler) self._alert_manager = Alert_manager() self.puuid = puuid self.pid = os.getpid() self._p = psutil.Process() self.custom_message = "" self._keyCommands = 'command_' + self.puuid self.state = 'running' self.logger = None logging.basicConfig(format='%(levelname)s[%(asctime)s]: %(message)s') self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(levelname)s[%(asctime)s]: %(message)s') self._log_handler = logging.FileHandler( os.path.join(os.environ['FLOW_LOGS'], '{}.log'.format(self.puuid))) self._log_handler.setLevel(logging.INFO) self._log_handler.setFormatter(formatter) self.logger.addHandler(self._log_handler) pub = zmqContext().socket(zmqPUB) pub.connect('tcp://{}:{}'.format(self.config.server.host, self.config.zmq.port)) self._pubhandler = PUBHandler(pub) self._pubhandler.root_topic = self.puuid self._pubhandler.setLevel(logging.INFO) self.logger.addHandler(self._pubhandler) self.update_config() self._metadata_interface = Process_metadata_interface() self._buffer_metadata_interface = Buffer_metadata_interface() self.last_refresh = time.time( ) - self.state_refresh_rate # ensure a refresh self.last_reload = time.time( ) - self.state_refresh_rate # ensure a reload self._processStat = ProcessStat( self.config.default_project.process.buffer_time_resolution_in_sec, self.config.default_project.process.buffer_time_spanned_in_min) self.push_p_info() if self.type == 'multiplexer_in': self.logger.debug('Using multiplexer_in link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=True) elif self.type == 'multiplexer_out': self.logger.debug('Using multiplexer_out link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False) elif self.type == 'switch': self.logger.debug('Using switch link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False, is_switch=True) else: self._link_manager = Link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger) # do not log to zmq by default self.log_to_zmq(False) self.pre_run() self.run() def update_config(self): configData = self._serv_config.get('config_' + self.puuid) if configData is None: # already updated. Should not happend return configData = json.loads(configData) self.custom_config = configData['custom_config'] self._serv_config.delete('config_' + self.puuid) self.state_refresh_rate = self.config.web.refresh_metadata_interval_in_sec self.projectUUID = configData.get('projectUUID', 'No projectUUID') self.name = configData.get('name', 'No name') self.type = configData.get('type', None) self.description = configData.get('description', '') self.bulletin_level = configData.get('bulletin_level', 'WARNING') if self.logger: # update logging level if self.bulletin_level == 'DEBUG': self.logger.setLevel(logging.DEBUG) self._log_handler.setLevel(logging.DEBUG) self._pubhandler.setLevel(logging.DEBUG) elif self.bulletin_level == 'INFO': self.logger.setLevel(logging.INFO) self._log_handler.setLevel(logging.INFO) self._pubhandler.setLevel(logging.INFO) elif self.bulletin_level == 'WARNING': self.logger.setLevel(logging.WARNING) self._pubhandler.setLevel(logging.WARNING) self._log_handler.setLevel(logging.WARNING) elif self.bulletin_level == 'ERROR': self.logger.setLevel(logging.ERROR) self._pubhandler.setLevel(logging.ERROR) self._log_handler.setLevel(logging.ERROR) self.x = configData.get('x', 0) self.y = configData.get('y', 0) self.config = Config_parser('config/easyFlow_conf.json', self.projectUUID).get_config() def sig_handler(self, signum, frame): self.logger.debug('Signal received') self.push_p_info() def reload(self): self.logger.debug('Reloading configuration and connections') self.update_config() self._link_manager.update_connections(self.custom_config) def change_name(self, name): self.logger.info('Changing process name') self.name = name def get_uuid(self): return self.puuid def get_system_info(self): self.logger.debug('Getting process\'s system info') to_ret = {} to_ret['cpu_load'] = self._p.cpu_percent() to_ret['memory_load'] = self._p.memory_info().rss to_ret['pid'] = self.pid to_ret['state'] = self.state to_ret['custom_message'] = self.custom_message return to_ret def get_representation(self, full=False): pInfo = objToDictionnary(self, full=full, to_ignore=['logger']) dStat = self._processStat.get_dico() dStat.update(self.get_system_info()) pInfo['stats'] = dStat pInfo['representationTimestamp'] = time.time() return pInfo # push current process info to redis depending on the refresh value. def push_p_info(self): now = time.time() if now - self.last_refresh > self.state_refresh_rate: self.logger.debug('Pushing process info to redis') self.last_refresh = now self.timestamp = now self._metadata_interface.push_info(self.get_representation()) def push_process_start(self): self.logger.debug('Sending that process has started') self._alert_manager.send_alert(title=self.name, content='{state}[{pid}] ({now})'.format( now=time.strftime('%H:%M:%S'), pid=self.pid, state="started"), mType='info', group=self.projectUUID + '_processes') def process_commands(self): self.logger.debug('Processing inbound commands') while True: rawCommand = self._serv_config.rpop(self._keyCommands) if rawCommand is not None: # there is a message jCommand = json.loads(rawCommand) self.apply_operation(jCommand['operation'], jCommand.get('data', None)) else: break ''' - Process incoming commands - Push self info - Process messages ''' def run(self): self.push_process_start() while True: # Process incoming commands self.process_commands() # send info about current module state self.push_p_info() if self.state == 'running': # Process flowItems flowItem = self._link_manager.get_flowItem() if flowItem is not None: # if not part of the flow yet #FIXME SHOULD WE LOG HERE? PERFS ISSUE? self._processStat.register_processing(flowItem) self.process_message(flowItem.message(), flowItem.channel) self._processStat.register_processed() else: self.logger.debug( 'No message, sleeping %s sec', self.config.default_project.process. pooling_time_interval_get_message) time.sleep(self.config.default_project.process. pooling_time_interval_get_message) else: # process paused time.sleep(self.config.default_project.process. pooling_time_interval_get_message) def forward(self, msg, channel=0): flowItem = FlowItem(msg, channel=channel) if self._link_manager.push_flowItem(flowItem): self._processStat.register_forward(flowItem) def apply_operation(self, operation, data): self.logger.debug('Applying operation: %s', operation) if operation == 'reload': # this condition prevent multiple reload in case of buffered reload operation if time.time( ) - self.last_reload > self.config.processes.max_reload_interval: self.reload() self.last_reload = time.time() self._alert_manager.send_alert( title=self.name, content='got reloaded ({now})'.format( now=time.strftime('%H:%M:%S')), mType='info') elif operation == 'pause': self.pause() elif operation == 'play': self.play() elif operation == 'shutdown': self.shutdown() elif operation == 'log_to_zmq': self.log_to_zmq(True) elif operation == 'stop_log_to_zmq': self.log_to_zmq(False) else: pass def pause(self): self.logger.warning('Pausing process') self.state = 'paused' def play(self): self.logger.info('Playing process') self.state = 'running' def shutdown(self): sys.exit(0) def log_to_zmq(self, should_log): if should_log: self._log_handler.setLevel(self.logger.getEffectiveLevel()) self.logger.info('Started logging to ZMQ') else: self._log_handler.setLevel(logging.CRITICAL) self.logger.info('Stopped logging to ZMQ') # can be used to add variables in processes def pre_run(self): pass @abstractmethod def process_message(self, msg, channel=0): pass
def __init__(self, puuid): self.config = Config_parser( os.path.join(os.environ['FLOW_CONFIG'], 'easyFlow_conf.json')).get_config() try: self._serv_config = redis.Redis( unix_socket_path=self.config.redis.project.unix_socket_path, decode_responses=True) except: # fallback using TCP instead of unix_socket self._serv_config = redis.StrictRedis( self.config.redis.project.host, self.config.redis.project.port, self.config.redis.project.db, charset="utf-8", decode_responses=True) signal.signal(signal.SIGUSR1, self.sig_handler) self._alert_manager = Alert_manager() self.puuid = puuid self.pid = os.getpid() self._p = psutil.Process() self.custom_message = "" self._keyCommands = 'command_' + self.puuid self.state = 'running' self.logger = None logging.basicConfig(format='%(levelname)s[%(asctime)s]: %(message)s') self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.INFO) formatter = logging.Formatter( '%(levelname)s[%(asctime)s]: %(message)s') self._log_handler = logging.FileHandler( os.path.join(os.environ['FLOW_LOGS'], '{}.log'.format(self.puuid))) self._log_handler.setLevel(logging.INFO) self._log_handler.setFormatter(formatter) self.logger.addHandler(self._log_handler) pub = zmqContext().socket(zmqPUB) pub.connect('tcp://{}:{}'.format(self.config.server.host, self.config.zmq.port)) self._pubhandler = PUBHandler(pub) self._pubhandler.root_topic = self.puuid self._pubhandler.setLevel(logging.INFO) self.logger.addHandler(self._pubhandler) self.update_config() self._metadata_interface = Process_metadata_interface() self._buffer_metadata_interface = Buffer_metadata_interface() self.last_refresh = time.time( ) - self.state_refresh_rate # ensure a refresh self.last_reload = time.time( ) - self.state_refresh_rate # ensure a reload self._processStat = ProcessStat( self.config.default_project.process.buffer_time_resolution_in_sec, self.config.default_project.process.buffer_time_spanned_in_min) self.push_p_info() if self.type == 'multiplexer_in': self.logger.debug('Using multiplexer_in link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=True) elif self.type == 'multiplexer_out': self.logger.debug('Using multiplexer_out link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False) elif self.type == 'switch': self.logger.debug('Using switch link manager') self._link_manager = Multiple_link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger, multi_in=False, is_switch=True) else: self._link_manager = Link_manager(self.projectUUID, self.puuid, self.custom_config, self.logger) # do not log to zmq by default self.log_to_zmq(False) self.pre_run() self.run()
def main(settings): """ The :meth:`main` method for worker processes. Here we will: - create a :class:`ZmqMgmt` instance - create a :class:`Fetcher` instance - initialize and instantiate the extractor chain The `settings` have to be loaded already. """ # create my own identity identity = "worker:%s:%s" % (socket.gethostname(), os.getpid()) ctx = zmq.Context() io_loop = IOLoop.instance() # initialize the logging subsystem log_pub = ctx.socket(zmq.PUB) log_pub.connect(settings.ZEROMQ_LOGGING) zmq_logging_handler = PUBHandler(log_pub) zmq_logging_handler.root_topic = "spyder.worker" logger = logging.getLogger() logger.addHandler(zmq_logging_handler) logger.setLevel(settings.LOG_LEVEL_WORKER) logger.info("process::Starting up another worker") mgmt = create_worker_management(settings, ctx, io_loop) logger.debug("process::Initializing fetcher, extractor and scoper") fetcher = create_worker_fetcher(settings, mgmt, ctx, zmq_logging_handler, io_loop) fetcher.start() extractor = create_worker_extractor(settings, mgmt, ctx, zmq_logging_handler, io_loop) extractor.start() def quit_worker(raw_msg): """ When the worker should quit, stop the io_loop after 2 seconds. """ msg = MgmtMessage(raw_msg) if ZMQ_SPYDER_MGMT_WORKER_QUIT == msg.data: logger.info("process::We have been asked to shutdown, do so") DelayedCallback(io_loop.stop, 2000, io_loop).start() ack = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, identity=identity, data=ZMQ_SPYDER_MGMT_WORKER_QUIT_ACK) mgmt._out_stream.send_multipart(ack.serialize()) mgmt.add_callback(ZMQ_SPYDER_MGMT_WORKER, quit_worker) mgmt.start() # notify the master that we are online msg = MgmtMessage(topic=ZMQ_SPYDER_MGMT_WORKER, identity=identity, data=ZMQ_SPYDER_MGMT_WORKER_AVAIL) mgmt._out_stream.send_multipart(msg.serialize()) def handle_shutdown_signal(_sig, _frame): """ Called from the os when a shutdown signal is fired. """ msg = MgmtMessage(data=ZMQ_SPYDER_MGMT_WORKER_QUIT) quit_worker(msg.serialize()) # zmq 2.1 stops blocking calls, restart the ioloop io_loop.start() # handle kill signals signal.signal(signal.SIGINT, handle_shutdown_signal) signal.signal(signal.SIGTERM, handle_shutdown_signal) logger.info("process::waiting for action") # this will block until the worker quits try: io_loop.start() except ZMQError: logger.debug("Caught a ZMQError. Hopefully during shutdown") logger.debug(traceback.format_exc()) for mod in [fetcher, extractor, mgmt]: mod.close() logger.info("process::Houston: Worker down") ctx.term()
def __init__(self, engine, *args, **kwargs): PUBHandler.__init__(self, *args, **kwargs) self.engine = engine