Exemplo n.º 1
0
 def __init__(self, bus, objpath, store):
     dbus.service.Object.__init__(self, bus, objpath)
     self.store = store
     #: maps agentid (ex. inject-:1.234) to object path (ex:
     #: /agent/inject)
     self.clients = {}
     self.exiting = False
     #: locks[domain] is a set of (lockid, selector) whose processing
     #: has started (might even be finished). Allows several agents that
     #: perform the same stateless computation to run in parallel
     self.locks = defaultdict(set)
     signal.signal(signal.SIGTERM, self.sigterm_handler)
     #: maps agentids to their names
     self.agentnames = {}
     #: maps agentids to their serialized configuration - output altering
     #: options only
     self.agents_output_altering_options = {}
     #: maps agentids to their serialized configuration
     self.agents_full_config_txts = {}
     #: monotonically increasing user request counter
     self.userrequestid = 0
     #: number of descriptors
     self.descriptor_count = 0
     #: count descriptors marked as processed/processable by each uniquely
     #: configured agent
     self.descriptor_handled_count = {}
     #: uniq_conf_clients[(agent_name, config_txt)] = [agent_id, ...]
     self.uniq_conf_clients = defaultdict(list)
     #: retry_counters[(agent_name, config_txt, domain, selector)] = \
     #:     number of remaining retries
     self.retry_counters = defaultdict(dict)
     self.sched = Sched(self._sched_inject)
Exemplo n.º 2
0
 def __init__(self, options):
     Bus.__init__(self)
     #: stores currently held locks [(lockid, domain, selector)]
     self.locks = defaultdict(set)
     #: Next available agent id. Never decreases.
     self.agent_count = 0
     self.store = RAMStorage()  # TODO add support for DiskStorage ?
     # TODO save internal state at bus exit (only useful with DiskStorage)
     #: maps agentid (ex. inject-12) to agentdesc
     self.agent_descs = {}
     #: maps agentid to agent instance
     self.agents = {}
     self.threads = []
     #: maps agentids to their serialized configuration - output altering
     #: options only
     self.agents_output_altering_options = {}
     #: maps agentids to their serialized configuration
     self.agents_full_config_txts = {}
     #: monotonically increasing user request counter
     self.userrequestid = 0
     #: retry_counters[(agent_name, config_txt, domain, selector)] = \
     #:     number of remaining retries
     self.retry_counters = defaultdict(dict)
     self.sched = Sched(self._sched_inject)
Exemplo n.º 3
0
    def __init__(self, store, server_addr, heartbeat_interval=0):
        self.store = store
        #: maps agent_id (ex. inject-:1.234) to object path (ex: /agent/inject)
        self.clients = {}
        self.exiting = False
        #: locks[domain] is a set of (lockid, selector) whose processing
        #: has started (might even be finished). Allows several agents that
        #: perform the same stateless computation to run in parallel
        self.locks = defaultdict(set)
        signal.signal(signal.SIGTERM, self._sigterm_handler)
        #: maps agent_id to agent name
        self.agentnames = {}
        #: maps agent_id to agent's serialized configuration - output altering
        #: options only
        self.agents_output_altering_options = {}
        #: maps agent_id to agent's serialized configuration
        self.agents_full_config_txts = {}
        #: monotonically increasing user request counter
        self.userrequestid = 0
        #: number of descriptors
        self.descriptor_count = 0
        #: count descriptors marked as processed/processable by each uniquely
        #: configured agent
        self.descriptor_handled_count = {}
        #: uniq_conf_clients[(agent_name, config_txt)] = [agent_id, ...]
        self.uniq_conf_clients = defaultdict(list)
        #: retry_counters[(agent_name, config_txt, domain, selector)] = \
        #:     number of remaining retries
        self.retry_counters = defaultdict(dict)
        self.sched = Sched(self._sched_inject)
        #: last published agent id
        self.last_published_id = 0
        #: bus session id, to make sure agents were not registered to another
        #: bus master (ex. which has exited)
        self.session_id = os.urandom(5).encode('hex')

        # Connects to the rabbitmq server
        self.server_addr = (
            server_addr + "/%2F?connection_attempts=200&heartbeat_interval=" +
            str(heartbeat_interval))
        self.params = pika.URLParameters(self.server_addr)

        b = False
        while not b:
            try:
                self.connection = pika.BlockingConnection(self.params)
                b = True
            except pika.exceptions.ConnectionClosed:
                log.warning("Cannot connect to rabbitmq at: %s. Retrying...",
                            self.server_addr)
                time.sleep(0.5)

        self.channel = self.connection.channel()

        # Create the registration queue
        self.channel.queue_declare(queue="registration_queue")
        self.channel.queue_purge(queue="registration_queue")
        # Create the exchange for signals publish(master)/subscribe(slave)
        self.signal_exchange = self.channel.exchange_declare(
            exchange='rebus_signals', exchange_type='fanout')

        # Create the rpc queue
        self.channel.queue_declare(queue='rebus_master_rpc_highprio')
        self.channel.queue_purge(queue='rebus_master_rpc_highprio')
        self.channel.basic_consume(self._rpc_callback,
                                   queue='rebus_master_rpc_highprio',
                                   arguments={'x-priority': 1})
        self.channel.queue_declare(queue='rebus_master_rpc_lowprio')
        self.channel.queue_purge(queue='rebus_master_rpc_lowprio')
        self.channel.basic_consume(self._rpc_callback,
                                   queue='rebus_master_rpc_lowprio',
                                   arguments={'x-priority': 0})
        # bus is now ready to serve requests, publish registration IDs
        self._publish_ids(10000)