Exemplo n.º 1
0
 def router_main(self):
     """
     Kick off router with logging and settings import
     """
     setup_logger('eventmq')
     import_settings()
     self.start(frontend_addr=conf.FRONTEND_ADDR,
                backend_addr=conf.BACKEND_ADDR)
Exemplo n.º 2
0
 def pub_main(self):
     """
     Kick off PubSub with logging and settings import
     """
     setup_logger('eventmq')
     import_settings(section='publisher')
     self.start(incoming_addr=conf.PUBLISHER_INCOMING_ADDR,
                outgoing_addr=conf.PUBLISHER_OUTGOING_ADDR)
Exemplo n.º 3
0
 def router_main(self):
     """
     Kick off router with logging and settings import
     """
     setup_logger('eventmq')
     import_settings()
     self.start(frontend_addr=conf.FRONTEND_ADDR,
                backend_addr=conf.BACKEND_ADDR)
Exemplo n.º 4
0
 def scheduler_main(self):
     """
     Kick off scheduler with logging and settings import
     """
     setup_logger("eventmq")
     import_settings()
     self.__init__()
     self.start(addr=conf.SCHEDULER_ADDR)
Exemplo n.º 5
0
 def router_main(self):
     """
     Kick off router with logging and settings import
     """
     setup_logger('eventmq')
     import_settings()
     setup_wal_logger('eventmq-wal', conf.WAL)
     self.start(frontend_addr=conf.FRONTEND_ADDR,
                backend_addr=conf.BACKEND_ADDR,
                administrative_addr=conf.ADMINISTRATIVE_ADDR)
Exemplo n.º 6
0
    def jobmanager_main(self, broker_addr=None):
        """
        Kick off jobmanager with logging and settings import

        Args:
            broker_addr (str): The address of the broker to connect to.
        """
        setup_logger('')
        import_settings()
        import_settings(section='jobmanager')

        # If this manager was passed explicit options, favor those
        if self.queues:
            conf.QUEUES = self.queues

        if broker_addr:
            conf.WORKER_ADDR = broker_addr

        self.start(addr=conf.WORKER_ADDR, queues=conf.QUEUES)
Exemplo n.º 7
0
    def jobmanager_main(self, broker_addr=None):
        """
        Kick off jobmanager with logging and settings import

        Args:
            broker_addr (str): The address of the broker to connect to.
        """
        setup_logger('')
        import_settings()
        import_settings(section='jobmanager')

        # If this manager was passed explicit options, favor those
        if self.queues:
            conf.QUEUES = self.queues

        if broker_addr:
            conf.WORKER_ADDR = broker_addr

        self.start(addr=conf.WORKER_ADDR,
                   queues=conf.QUEUES)
Exemplo n.º 8
0
    def __init__(self, *args, **kwargs):
        super(Router, self).__init__(*args, **kwargs)  # Creates _meta

        setup_logger("eventmq")

        self.name = generate_device_name()
        logger.info('EventMQ Version {}'.format(__version__))
        logger.info('Initializing Router {}...'.format(self.name))

        self.poller = poller.Poller()

        self.incoming = receiver.Receiver()
        self.outgoing = receiver.Receiver()
        self.administrative_socket = receiver.Receiver()

        self.poller.register(self.incoming, poller.POLLIN)
        self.poller.register(self.outgoing, poller.POLLIN)
        self.poller.register(self.administrative_socket, poller.POLLIN)

        self.status = STATUS.ready

        #: Tracks the last time the worker queues were cleaned of dead workers
        self._meta['last_worker_cleanup'] = 0

        #: JobManager address by queue name. The lists here are Last Recently
        #: Used queues where a worker is popped off when given a job, and
        #: appeneded when one finishes. There is one entry per available
        #: worker slot, so you may see duplicate addresses.
        #:
        #: Example:
        #:     {'default': ['w1', 'w2', 'w1', 'w4']}
        self.queues = {}

        #: List of queues by workers. Meta data about the worker such as the
        #: queue memebership and timestamp of last message received are stored
        #: here.
        #:
        #: **Keys**
        #:  * ``queues``: list() of queue names and prioritiess the worker
        #:    belongs to. e.g. (10, 'default')
        #:  * ``hb``: monotonic timestamp of the last received message from
        #:    worker
        #:  * ``available_slots``: int count of jobs this manager can still
        #:    process.
        self.workers = {}

        #: Message buffer. When messages can't be sent because there are no
        #: workers available to take the job
        self.waiting_messages = {}

        # Key: Queue.name, Value: # of messages sent to workers on that queue
        # Includes REQUESTS in flight but not REQUESTS queued
        self.processed_message_counts = {}

        # Same as above but Key: Worker.uuid
        self.processed_message_counts_by_worker = {}

        #: Tracks the last time the scheduler queue was cleaned out of dead
        #: schedulers
        self._meta['last_scheduler_cleanup'] = 0

        #: Queue for schedulers to use:
        self.scheduler_queue = []

        #: Scheduler clients. Clients are able to send SCHEDULE commands that
        #: need to be routed to a scheduler, which will keep track of time and
        #: run the job.
        #: Contains dictionaries:
        #:     self.schedulers[<scheduler_zmq_id>] = {
        #:       'hb': <last_recv_heartbeat>,
        #:     }
        self.schedulers = {}

        #: Latency tracking dictionary
        #: Key: msgid of msg each REQUEST received and forwarded to a worker
        #: Value: (timestamp, queue_name)
        self.job_latencies = {}

        #: Excecuted function tracking dictionary
        #: Key: msgid of msg each REQUEST received and forwarded to a worker
        #: Value: (function_name, queue_name)
        #: Set to True when the router should die.
        self.received_disconnect = False

        # Tests skip setting the signals.
        if not kwargs.pop('skip_signal', False):
            signal.signal(signal.SIGHUP, self.sighup_handler)
            signal.signal(signal.SIGUSR1, self.handle_pdb)
Exemplo n.º 9
0
    def __init__(self, *args, **kwargs):
        """
        .. note::

           All args are optional unless otherwise noted.

        Args:
            name (str): unique name of this instance. By default a uuid will be
                 generated.
            queues (tuple): List of queue names to listen on.
            skip_signal (bool): Don't register the signal handlers. Useful for
                 testing.
        """
        super(JobManager, self).__init__(*args, **kwargs)

        setup_logger("eventmq")

        #: Define the name of this JobManager instance. Useful to know when
        #: referring to the logs.
        self.name = kwargs.pop('name', None) or generate_device_name()
        logger.info('EventMQ Version {}'.format(__version__))
        logger.info('Initializing JobManager {}...'.format(self.name))

        #: keep track of workers
        concurrent_jobs = kwargs.pop('concurrent_jobs', None)
        if concurrent_jobs is not None:
            conf.CONCURRENT_JOBS = concurrent_jobs

        #: List of queues that this job manager is listening on
        self.queues = kwargs.pop('queues', None)

        if not kwargs.pop('skip_signal', False):
            # handle any sighups by reloading config
            signal.signal(signal.SIGHUP, self.sighup_handler)
            signal.signal(signal.SIGTERM, self.sigterm_handler)
            signal.signal(signal.SIGINT, self.sigterm_handler)
            signal.signal(signal.SIGQUIT, self.sigterm_handler)
            signal.signal(signal.SIGUSR1, self.handle_pdb)

        #: JobManager starts out by INFORMing the router of it's existence,
        #: then telling the router that it is READY. The reply will be the unit
        #: of work.
        # Despite the name, jobs are received on this socket
        self.outgoing = Sender(name=self.name)

        self.poller = Poller()

        #: Stats and monitoring information

        #: Jobs in flight tracks all jobs currently executing.
        #: Key: msgid, Value: The message with all the details of the job
        self.jobs_in_flight = {}

        #: Running total number of REQUEST messages received on the broker
        self.total_requests = 0
        #: Running total number of READY messages sent to the broker
        self.total_ready_sent = 0
        #: Keep track of what pids are servicing our requests
        #: Key: pid, Value: # of jobs completed on the process with that pid
        self.pid_distribution = {}

        #: Setup worker queues
        self._mp_manager = MPManager()
        self.request_queue = self._mp_manager.Queue()
        self.finished_queue = self._mp_manager.Queue()
        self._setup()