def create_connection(self): try: self.uri = pyro.create_uri(self.address, self.port, "ForArbiter", self.__class__.use_ssl) self.con = pyro.getProxy(self.uri) pyro.set_timeout(self.con, self.timeout) except Pyro_exp_pack , exp: self.con = None logger.log('Error : in creation connexion for %s : %s' % (self.get_name(), str(exp)))
def pynag_con_init(self, id, type="scheduler"): # Get teh good links tab for looping.. links = self.get_links_from_type(type) if links is None: logger.log("DBG: Type unknown for connection! %s" % type) return if type == "scheduler": # If sched is not active, I do not try to init # it is just useless is_active = links[id]["active"] if not is_active: return # If we try to connect too much, we slow down our tests if self.is_connection_try_too_close(links[id]): return # Ok, we can now update it links[id]["last_connection"] = time.time() # DBG: print "Init connection with", links[id]['uri'] running_id = links[id]["running_id"] # DBG: print "Running id before connection", running_id uri = links[id]["uri"] links[id]["con"] = Pyro.core.getProxyForURI(uri) try: # intial ping must be quick pyro.set_timeout(links[id]["con"], 5) links[id]["con"].ping() new_run_id = links[id]["con"].get_running_id() # data transfert can be longer pyro.set_timeout(links[id]["con"], 120) # The schedulers have been restart : it has a new run_id. # So we clear all verifs, they are obsolete now. if new_run_id != running_id: print "[%s] New running id for the %s %s : %s (was %s)" % ( self.name, type, links[id]["name"], new_run_id, running_id, ) links[id]["broks"].clear() # we must ask for a enw full broks if # it's a scheduler if type == "scheduler": print "[%s] I ask for a broks generation to the scheduler %s" % (self.name, links[id]["name"]) links[id]["con"].fill_initial_broks() # else: # print "I do nto ask for brok generation" links[id]["running_id"] = new_run_id except (Pyro.errors.ProtocolError, Pyro.errors.CommunicationError), exp: logger.log("[%s] Connexion problem to the %s %s : %s" % (self.name, type, links[id]["name"], str(exp))) links[id]["con"] = None return
def pynag_con_init(self, id, type='scheduler'): # Get teh good links tab for looping.. links = self.get_links_from_type(type) if links is None: logger.log('DBG: Type unknown for connexion! %s' % type) return if type == 'scheduler': # If sched is not active, I do not try to init # it is just useless is_active = links[id]['active'] if not is_active: return # If we try to connect too much, we slow down our tests if self.is_connexion_try_too_close(links[id]): return # Ok, we can now update it links[id]['last_connexion'] = time.time() # DBG: print "Init connexion with", links[id]['uri'] running_id = links[id]['running_id'] # DBG: print "Running id before connexion", running_id uri = links[id]['uri'] links[id]['con'] = Pyro.core.getProxyForURI(uri) try: # intial ping must be quick pyro.set_timeout(links[id]['con'], 5) links[id]['con'].ping() new_run_id = links[id]['con'].get_running_id() # data transfert can be longer pyro.set_timeout(links[id]['con'], 120) # The schedulers have been restart : it has a new run_id. # So we clear all verifs, they are obsolete now. if new_run_id != running_id: print "[%s] New running id for the %s %s : %s (was %s)" % ( self.name, type, links[id]['name'], new_run_id, running_id) links[id]['broks'].clear() # we must ask for a enw full broks if # it's a scheduler if type == 'scheduler': print "[%s] I ask for a broks generation to the scheduler %s" % ( self.name, links[id]['name']) links[id]['con'].fill_initial_broks() # else: # print "I do nto ask for brok generation" links[id]['running_id'] = new_run_id except (Pyro.errors.ProtocolError, Pyro.errors.CommunicationError), exp: logger.log("[%s] Connexion problem to the %s %s : %s" % (self.name, type, links[id]['name'], str(exp))) links[id]['con'] = None return
def put_conf(self, conf): if self.con is None: self.create_connection() #print "Connection is OK, now we put conf", conf #print "Try to put conf:", conf try: pyro.set_timeout(self.con, self.data_timeout) self.con.put_conf(conf) pyro.set_timeout(self.con, self.timeout) return True except Pyro_exp_pack , exp: self.con = None #print ''.join(Pyro.util.getPyroTraceback(exp)) return False
def put_conf(self, conf): if self.con is None: self.create_connexion() #print "Connexion is OK, now we put conf", conf #print "Try to put conf:", conf try: pyro.set_timeout(self.con, self.data_timeout) print "DBG: put conf to", self.con.__dict__ self.con.put_conf(conf) pyro.set_timeout(self.con, self.timeout) return True except Pyro_exp_pack, exp: self.con = None #print ''.join(Pyro.util.getPyroTraceback(exp)) return False
def create_connection(self): try: self.uri = pyro.create_uri(self.address, self.port, "ForArbiter", self.__class__.use_ssl) # By default Pyro got problem in connect() function that can take # long seconds to raise a timeout. And even with the _setTimeout() # call. So we change the whole default connect() timeout socket.setdefaulttimeout(self.timeout) self.con = pyro.getProxy(self.uri) # But the multiprocessing module is not copatible with it! # so we must disable it imadiatly after socket.setdefaulttimeout(None) pyro.set_timeout(self.con, self.timeout) except Pyro_exp_pack , exp: # But the multiprocessing module is not copatible with it! # so we must disable it imadiatly after socket.setdefaulttimeout(None) self.con = None logger.log('Error : in creation connection for %s : %s' % (self.get_name(), str(exp)))
def put_conf(self, conf): if self.con is None: self.create_connection() #print "Connection is OK, now we put conf", conf #print "Try to put conf:", conf # Maybe the connexion was not ok, bail out if not self.con: return False try: pyro.set_timeout(self.con, self.data_timeout) self.con.put_conf(conf) pyro.set_timeout(self.con, self.timeout) return True except Pyro_exp_pack, exp: self.con = None logger.error(''.join(PYRO_VERSION < "4.0" and Pyro.util.getPyroTraceback(exp) or Pyro.util.getPyroTraceback())) return False
def create_connection(self): try: self.uri = pyro.create_uri(self.arb_satmap['address'], self.arb_satmap['port'], "ForArbiter", self.__class__.use_ssl) # By default Pyro got problem in connect() function that can take # long seconds to raise a timeout. And even with the _setTimeout() # call. So we change the whole default connect() timeout socket.setdefaulttimeout(self.timeout) self.con = pyro.getProxy(self.uri) # But the multiprocessing module is not copatible with it! # so we must disable it imadiatly after socket.setdefaulttimeout(None) pyro.set_timeout(self.con, self.timeout) except Pyro_exp_pack, exp: # But the multiprocessing module is not compatible with it! # so we must disable it imadiatly after socket.setdefaulttimeout(None) self.con = None logger.error("Creating connection for %s: %s" % (self.get_name(), str(exp)))
def put_conf(self, conf): if self.con is None: self.create_connection() #print "Connection is OK, now we put conf", conf #print "Try to put conf:", conf # Maybe the connexion was not ok, bail out if not self.con: return False try: pyro.set_timeout(self.con, self.data_timeout) self.con.put_conf(conf) pyro.set_timeout(self.con, self.timeout) return True except Pyro_exp_pack, exp: self.con = None logger.error(''.join( PYRO_VERSION < "4.0" and Pyro.util.getPyroTraceback(exp) or Pyro.util.getPyroTraceback())) return False
raise SystemExit(CRITICAL) elif options.target not in daemon_types: print 'CRITICAL - target', options.target, 'is not a Shinken daemon!' parser.print_help() raise SystemExit(CRITICAL) uri = pyro.create_uri(options.hostname, options.portnum, PYRO_OBJECT, options.ssl) # Set the default socket connection to the timeout, by default it's 10s socket.setdefaulttimeout(float(options.timeout)) con = None try: con = Pyro.core.getProxyForURI(uri) pyro.set_timeout(con, options.timeout) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp raise SystemExit(CRITICAL) if options.daemon: # We just want a check for a single satellite daemon # Only OK or CRITICAL here daemon_name = options.daemon try: result = con.get_satellite_status(options.target, daemon_name) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp raise SystemExit(CRITICAL)
def create_connection(self): self.uri = pyro.create_uri(self.address, self.port, "ForArbiter", self.__class__.use_ssl) self.con = pyro.getProxy(self.uri) pyro.set_timeout(self.con, self.timeout)
class Broker(BaseSatellite): properties = BaseSatellite.properties.copy() properties.update({ 'pidfile': PathProp(default='brokerd.pid'), 'port': IntegerProp(default='7772'), 'local_log': PathProp(default='brokerd.log'), }) def __init__(self, config_file, is_daemon, do_replace, debug, debug_file): super(Broker, self).__init__('broker', config_file, is_daemon, do_replace, debug, debug_file) # Our arbiters self.arbiters = {} # Our pollers and reactionners self.pollers = {} self.reactionners = {} # Modules are load one time self.have_modules = False # Can have a queue of external_commands given by modules # will be processed by arbiter self.external_commands = [] # All broks to manage self.broks = [] # broks to manage # broks raised this turn and that needs to be put in self.broks self.broks_internal_raised = [] self.timeout = 1.0 # Schedulers have some queues. We can simplify the call by adding # elements into the proper queue just by looking at their type # Brok -> self.broks # TODO: better tag ID? # External commands -> self.external_commands def add(self, elt): cls_type = elt.__class__.my_type if cls_type == 'brok': # For brok, we TAG brok with our instance_id elt.instance_id = 0 self.broks_internal_raised.append(elt) return elif cls_type == 'externalcommand': logger.debug("Enqueuing an external command '%s'" % str(ExternalCommand.__dict__)) self.external_commands.append(elt) # Maybe we got a Message from the modules, it's way to ask something # like from now a full data from a scheduler for example. elif cls_type == 'message': # We got a message, great! logger.debug(str(elt.__dict__)) if elt.get_type() == 'NeedData': data = elt.get_data() # Full instance id means: I got no data for this scheduler # so give me all dumbass! if 'full_instance_id' in data: c_id = data['full_instance_id'] source = elt.source logger.info( 'The module %s is asking me to get all initial data from the scheduler %d' % (source, c_id)) # so we just reset the connection and the running_id, it will just get all new things try: self.schedulers[c_id]['con'] = None self.schedulers[c_id]['running_id'] = 0 except KeyError: # maybe this instance was not known, forget it logger.warning( "the module %s ask me a full_instance_id for an unknown ID (%d)!" % (source, c_id)) # Maybe a module tells me that it's dead, I must log it's last words... if elt.get_type() == 'ICrash': data = elt.get_data() logger.error( 'the module %s just crash! Please look at the traceback:' % data['name']) logger.error(data['trace']) # The module death will be looked for elsewhere and restarted. # Get the good tabs for links by the kind. If unknown, return None def get_links_from_type(self, type): t = {'scheduler': self.schedulers, 'arbiter': self.arbiters, \ 'poller': self.pollers, 'reactionner': self.reactionners} if type in t: return t[type] return None # Call by arbiter to get our external commands def get_external_commands(self): res = self.external_commands self.external_commands = [] return res # Check if we do not connect to often to this def is_connection_try_too_close(self, elt): now = time.time() last_connection = elt['last_connection'] if now - last_connection < 5: return True return False # initialize or re-initialize connection with scheduler or # arbiter if type == arbiter def pynag_con_init(self, id, type='scheduler'): # Get the good links tab for looping.. links = self.get_links_from_type(type) if links is None: logger.debug('Type unknown for connection! %s' % type) return if type == 'scheduler': # If sched is not active, I do not try to init # it is just useless is_active = links[id]['active'] if not is_active: return # If we try to connect too much, we slow down our tests if self.is_connection_try_too_close(links[id]): return # Ok, we can now update it links[id]['last_connection'] = time.time() # DBG: print "Init connection with", links[id]['uri'] running_id = links[id]['running_id'] # DBG: print "Running id before connection", running_id uri = links[id]['uri'] try: socket.setdefaulttimeout(3) links[id]['con'] = Pyro.core.getProxyForURI(uri) socket.setdefaulttimeout(None) except Pyro_exp_pack, exp: # But the multiprocessing module is not compatible with it! # so we must disable it immediately after socket.setdefaulttimeout(None) logger.info("Connection problem to the %s %s: %s" % (type, links[id]['name'], str(exp))) links[id]['con'] = None return try: # initial ping must be quick pyro.set_timeout(links[id]['con'], 5) links[id]['con'].ping() new_run_id = links[id]['con'].get_running_id() # data transfer can be longer pyro.set_timeout(links[id]['con'], 120) # The schedulers have been restarted: it has a new run_id. # So we clear all verifs, they are obsolete now. if new_run_id != running_id: logger.debug("[%s] New running id for the %s %s: %s (was %s)" % (self.name, type, links[id]['name'], new_run_id, running_id)) links[id]['broks'].clear() # we must ask for a new full broks if # it's a scheduler if type == 'scheduler': logger.debug( "[%s] I ask for a broks generation to the scheduler %s" % (self.name, links[id]['name'])) links[id]['con'].fill_initial_broks(self.name) # Ok all is done, we can save this new running id links[id]['running_id'] = new_run_id except Pyro_exp_pack, exp: logger.info("Connection problem to the %s %s: %s" % (type, links[id]['name'], str(exp))) links[id]['con'] = None return
parser.print_help() raise SystemExit, CRITICAL elif options.target not in daemon_types: print 'CRITICAL - target %s is not a Shinken daemon!' % options.target parser.print_help() raise SystemExit, CRITICAL uri = pyro.create_uri(options.hostname, options.portnum, PYRO_OBJECT , options.ssl) # Set the default socekt connexion to the timeout, by default it's 10s socket.setdefaulttimeout(float(options.timeout)) con = None try: con = Pyro.core.getProxyForURI(uri) pyro.set_timeout(con, float(options.timeout)) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp sys.exit(CRITICAL) if options.daemon: # We just want a check for a single satellite daemon # Only OK or CRITICAL here daemon_name = options.daemon try: result = con.get_satellite_status(options.target, daemon_name) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp raise SystemExit, CRITICAL
def create_connexion(self): self.uri = pyro.create_uri(self.address, self.port, "ForArbiter", self.__class__.use_ssl) self.con = pyro.getProxy(self.uri) pyro.set_timeout(self.con, self.timeout)
raise SystemExit(CRITICAL) elif options.target not in daemon_types: print 'CRITICAL - target', options.target, 'is not a Shinken daemon!' parser.print_help() raise SystemExit(CRITICAL) uri = pyro.create_uri(options.hostname, options.portnum, PYRO_OBJECT, options.ssl) # Set the default socket connection to the timeout, by default it's 10s socket.setdefaulttimeout(float(options.timeout)) con = None try: con = Pyro.core.getProxyForURI(uri) pyro.set_timeout(con, options.timeout) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp raise SystemExit(CRITICAL) if options.daemon: # We just want a check for a single satellite daemon # Only OK or CRITICAL here daemon_name = options.daemon try: result = con.get_satellite_status(options.target, daemon_name) except Exception, exp: print "CRITICAL : the Arbiter is not reachable : (%s)." % exp raise SystemExit(CRITICAL) if result: