Exemplo n.º 1
0
    def check_alive_instances(self):
        # Only for external
        for inst in self.instances:
            if not inst in self.to_restart:
                if inst.is_external and not inst.process.is_alive():
                    logger.error("The external module %s goes down unexpectedly!" % inst.get_name())
                    logger.info("Setting the module %s to restart" % inst.get_name())
                    # We clean its queues, they are no more useful
                    inst.clear_queues(self.manager)
                    self.to_restart.append(inst)
                    # Ok, no need to look at queue size now
                    continue

                # Now look for man queue size. If above value, the module should got a huge problem
                # and so bailout. It's not a perfect solution, more a watchdog
                # If max_queue_size is 0, don't check this
                if self.max_queue_size == 0:
                    continue
                # Ok, go launch the dog!
                queue_size = 0
                try:
                    queue_size = inst.to_q.qsize()
                except Exception, exp:
                    pass
                if queue_size > self.max_queue_size:
                    logger.error("The external module %s got a too high brok queue size (%s > %s)!" % (inst.get_name(), queue_size, self.max_queue_size))
                    logger.info("Setting the module %s to restart" % inst.get_name())
                    # We clean its queues, they are no more useful
                    inst.clear_queues(self.manager)
                    self.to_restart.append(inst)
Exemplo n.º 2
0
 def add(self, elt):
     cls_type = elt.__class__.my_type
     if cls_type == 'brok':
         # For brok, we TAG brok with our instance_id
         elt.instance_id = 0
         self.broks_internal_raised.append(elt)
         return
     elif cls_type == 'externalcommand':
         logger.debug("Enqueuing an external command '%s'" % str(ExternalCommand.__dict__))
         self.external_commands.append(elt)
     # Maybe we got a Message from the modules, it's way to ask something
     # like from now a full data from a scheduler for example.
     elif cls_type == 'message':
         # We got a message, great!
         logger.debug(str(elt.__dict__))
         if elt.get_type() == 'NeedData':
             data = elt.get_data()
             # Full instance id means: I got no data for this scheduler
             # so give me all dumbass!
             if 'full_instance_id' in data:
                 c_id = data['full_instance_id']
                 source = elt.source
                 logger.info('The module %s is asking me to get all initial data from the scheduler %d' % (source, c_id))
                 # so we just reset the connection and the running_id, it will just get all new things
                 try:
                     self.schedulers[c_id]['con'] = None
                     self.schedulers[c_id]['running_id'] = 0
                 except KeyError:  # maybe this instance was not known, forget it
                     logger.warning("the module %s ask me a full_instance_id for an unknown ID (%d)!" % (source, c_id))
         # Maybe a module tells me that it's dead, I must log it's last words...
         if elt.get_type() == 'ICrash':
             data = elt.get_data()
             logger.error('the module %s just crash! Please look at the traceback:' % data['name'])
             logger.error(data['trace'])
Exemplo n.º 3
0
    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug("[MemcacheRetention] asking me to update the retention objects")

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']


        # Now the flat file method
        for h_name in hosts:
            try:
                h = hosts[h_name]
                key = self.normalize_key("HOST-%s" % h_name)
                val = cPickle.dumps(h)
                self.mc.set(key, val)
            except:
                logger.error("[MemcacheRetention] error while saving host %s" % key)

        for (h_name, s_desc) in services:
            try:
                key = self.normalize_key("SERVICE-%s,%s" % (h_name, s_desc))
                s = services[(h_name, s_desc)]
                val = cPickle.dumps(s)
                self.mc.set(key, val)
            except:
                logger.error("[MemcacheRetention] error while saving service %s" % key)

        self.mc.disconnect_all()
        logger.info("Retention information updated in Memcache")
Exemplo n.º 4
0
 def is_me(self, lookup_name):
     logger.info("And arbiter is launched with the hostname:%s "
                 "from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
     if lookup_name:
         return lookup_name == self.get_name()
     else:
         return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
Exemplo n.º 5
0
def dark():
    r"""
                       .-.
                      |_:_|
                     /(_Y_)\
                    ( \/M\/ )
 '.               _.'-/'-'\-'._
   ':           _/.--'[[[[]'--.\_
     ':        /_'  : |::"| :  '.\
       ':     //   ./ |oUU| \.'  :\
         ':  _:'..' \_|___|_/ :   :|
           ':.  .'  |_[___]_|  :.':\
            [::\ |  :  | |  :   ; : \
             '-'   \/'.| |.' \  .;.' |
             |\_    \  '-'   :       |
             |  \    \ .:    :   |   |
             |   \    | '.   :    \  |
             /       \   :. .;       |
            /     |   |  :__/     :  \\
           |  |   |    \:   | \   |   ||
          /    \  : :  |:   /  |__|   /|
      snd |     : : :_/_|  /'._\  '--|_\
          /___.-/_|-'   \  \
                         '-'

"""
    logger.info(dark.__doc__)
Exemplo n.º 6
0
    def hook_load_retention(self, daemon):

        # Now the new redis way :)
        logger.info("[RedisRetention] asking me to load retention objects")

        # We got list of loaded data from retention server
        ret_hosts = {}
        ret_services = {}

        # We must load the data and format as the scheduler want :)
        for h in daemon.hosts:
            key = self._get_host_key(h.host_name)
            val = self.rc.get(key)
            if val is not None:
                val = cPickle.loads(val)
                ret_hosts[h.host_name] = val

        for s in daemon.services:
            key = self._get_service_key(s.host.host_name,
                                        s.service_description)
            val = self.rc.get(key)
            if val is not None:
                val = cPickle.loads(val)
                ret_services[(s.host.host_name, s.service_description)] = val

        all_data = {'hosts': ret_hosts, 'services': ret_services}

        # Ok, now come load them scheduler :)
        daemon.restore_retention_data(all_data)

        logger.info("[RedisRetention] Retention objects loaded successfully.")

        return True
Exemplo n.º 7
0
    def main(self):
        self.set_proctitle(self.name)

        # Daemon like init
        self.debug_output = []
        self.modules_dir = modulesctx.get_modulesdir()
        self.modules_manager = ModulesManager('webui', self.find_modules_path(), [])
        self.modules_manager.set_modules(self.modules)
        # We can now output some previously silenced debug output
        self.do_load_modules()
        for inst in self.modules_manager.instances:
            f = getattr(inst, 'load', None)
            if f and callable(f):
                f(self)
        for s in self.debug_output:
            print s
        del self.debug_output

        # Check if the Bottle view dir really exist
        if not os.path.exists(bottle.TEMPLATE_PATH[0]):
            logger.error("[WebUI] The view path do not exist at %s" % bottle.TEMPLATE_PATH)
            sys.exit(2)

        # Check directories
        # We check if the photo directory exists. If not, try to create it
        for d in [self.photo_dir, self.share_dir, self.config_dir]:
            logger.debug("[WebUI] Checking dir: %s", d)
            if not os.path.exists(d):
                try:
                    os.mkdir(d)
                    logger.info("[WebUI] Created dir: %s", d)
                except Exception, exp:
                    logger.error("[WebUI] Dir creation failed: %s", exp)
    def main(self):
        try:
            self.load_config_file()
            # Setting log level
            logger.setLevel(self.log_level)
            # Force the debug level if the daemon is said to start with such level
            if self.debug:
                logger.setLevel('DEBUG')
            
            self.look_for_early_exit()
            self.do_daemon_init_and_start()
            self.load_modules_manager()
            self.http_daemon.register(self.interface)
            self.http_daemon.register(self.istats)

            #self.inject = Injector(self.sched)
            #self.http_daemon.register(self.inject)

            self.http_daemon.unregister(self.interface)
            self.uri = self.http_daemon.uri
            logger.info("[scheduler] General interface is at: %s", self.uri)
            self.do_mainloop()
        except Exception, exp:
            self.print_unrecoverable(traceback.format_exc())
            raise
Exemplo n.º 9
0
 def init(self):
     logger.info("Try to open SQLite database at %s" % (self.uri))
     try:
         self.db = sqlite3.connect(self.uri, check_same_thread=False)
     except Exception, e:
         logger.error("Error %s:" % e)
         raise
Exemplo n.º 10
0
def publish_archive(archive):
    # Now really publish it
    api_key = CONFIG['shinken.io']['api_key']
    c = prepare_curl_connection('/push', post=1, verbose=1)
    c.setopt(c.HTTPPOST, [("api_key", api_key),
                          ("data",
                           (c.FORM_FILE, str(archive),
                            c.FORM_CONTENTTYPE, "application/x-gzip"))
                          ])
    response = StringIO()
    c.setopt(pycurl.WRITEFUNCTION, response.write)

    try:
        c.perform()
    except pycurl.error as exp:
        logger.error("There was a critical error : %s", exp)
        sys.exit(2)
        return
    r = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    if r != 200:
        logger.error("There was a critical error : %s", response.getvalue())
        sys.exit(2)
    else:
        ret  = json.loads(response.getvalue().replace('\\/', '/'))
        status = ret.get('status')
        text   = ret.get('text')
        if status == 200:
            logger.info(text)
        else:
            logger.error(text)
            sys.exit(2)
Exemplo n.º 11
0
    def launch_new_checks(self):
        for chk in self.checks:
            if chk.status == 'queue':
                logger.info("[Android SMS] Launching SMS for command %s" % chk.command)

                elts = chk.command.split(' ')

                # Check the command call first
                if len(elts) < 3:
                    chk.exit_status = 2
                    chk.get_outputs('The android SMS call %s is not valid. should be android_sms PHONENUMBER TEXT', 8012)
                    chk.status = 'done'
                    chk.execution_time = 0.1
                    continue

                # Should be android_sms PHONE TEXT
                phone = elts[1]
                text = ' '.join(elts[2:])

                # Go call the SMS :)
                try:
                    self.android.smsSend(phone, text)
                except Exception, exp:
                    chk.exit_status = 2
                    chk.get_outputs('The android SMS to %s got an error %s' % (phone, exp), 8012)
                    chk.status = 'done'
                    chk.execution_time = 0.1
                    continue

                logger.info("[Android SMS] Send SMS %s to %s" % text, str(phone))
                # And finish the notification
                chk.exit_status = 1
                chk.get_outputs('SMS sent to %s' % phone, 8012)
                chk.status = 'done'
                chk.execution_time = 0.01
Exemplo n.º 12
0
def get_instance(plugin):
    name = plugin.get_name()
    logger.info("Get a Syslog broker for plugin %s" % (name))

    # syslog.syslog priority defaults to (LOG_INFO | LOG_USER)
    facility = syslog.LOG_USER
    priority = syslog.LOG_INFO

    # Get configuration values, if any
    if hasattr(plugin, 'facility'):
        facility = plugin.facility
    if hasattr(plugin, 'priority'):
        priority = plugin.priority

    # Ensure config values have a string type compatible with
    # SysLogHandler.encodePriority
    if type(facility) in types.StringTypes:
        facility = types.StringType(facility)
    if type(priority) in types.StringTypes:
        priority = types.StringType(priority)

    # Convert facility / priority (integers or strings) to aggregated
    # priority value
    sh = SysLogHandler()
    try:
        priority = sh.encodePriority(facility, priority)
    except TypeError, e:
        logger.error("[%s] Couldn't get syslog priority, "
                     "reverting to defaults" % (name))
    def hook_late_configuration(self, arb):
        """ Read config and fill database """
        mac_resol = MacroResolver()
        mac_resol.init(arb.conf)
        for serv in arb.conf.services:
            if serv.check_command.command.module_type == 'snmp_booster':
                try:
                    # Serialize service
                    dict_serv = dict_serialize(serv,
                                               mac_resol,
                                               self.datasource)
                except Exception as exp:
                    logger.error("[SnmpBooster] [code 0907] [%s,%s] "
                                 "%s" % (serv.host.get_name(),
                                         serv.get_name(),
                                         str(exp)))
                    continue

                # We want to make a diff between arbiter insert and poller insert. Some backend may need it.
                try:
                    self.db_client.update_service_init(dict_serv['host'],
                                                       dict_serv['service'],
                                                       dict_serv)
                except Exception as exp:
                    logger.error("[SnmpBooster] [code 0909] [%s,%s] "
                                 "%s" % (dict_serv['host'],
                                         dict_serv['service'],
                                         str(exp)))
                    continue

        logger.info("[SnmpBooster] [code 0908] Done parsing")

        # Disconnect from database
        self.db_client.disconnect()
Exemplo n.º 14
0
    def manage_log_brok(self, brok):
        """
        Parse a Shinken log brok to enqueue a log line for Index insertion
        """
        d = date.today()
        index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")

        line = brok.data["log"]
        if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
            # Match log which NOT have to be stored
            logger.warning("[elastic-logs] do not store: %s", line)
            return

        logline = Logline(line=line)
        logline_dict = logline.as_dict()
        logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
        values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}

        # values = logline.as_dict()
        if logline.logclass != LOGCLASS_INVALID:
            logger.debug("[elastic-logs] store log line values: %s", values)
            self.logs_cache.append(values)
        else:
            logger.info("[elastic-logs] This line is invalid: %s", line)

        return
Exemplo n.º 15
0
    def manage_service_check_resultup_brok(self, b):
        """If a host is defined locally (in shinken) and not in GLPI,
           we must not edit GLPI datas !
        """
        if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
           'plugin_monitoring_services_id'         not in b.data:
            return list()

        logger.info("GLPI : data in DB %s " % b.data)
        new_data = copy.deepcopy(b.data)
        new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
        del new_data['perf_data']
        del new_data['output']
        del new_data['latency']
        del new_data['execution_time']
        try:
            new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
            del new_data['plugin_monitoring_servicescatalogs_id']
            table = 'glpi_plugin_monitoring_servicescatalogs'
        except:
            new_data['id'] = b.data['plugin_monitoring_services_id']
            del new_data['plugin_monitoring_services_id']
            table = 'glpi_plugin_monitoring_services'

        where_clause = {'id' : new_data['id']}
        #print "Update service : ", new_data
        query = self.db_backend.create_update_query(table, new_data, where_clause)
        return [query]
Exemplo n.º 16
0
def load_config(app):
    global params
    
    import os
    from webui2.config_parser import config_parser
    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
        logger.info("[WebUI-logs] Plugin configuration file: %s", configuration_file)
        scp = config_parser('#', '=')
        z = params.copy()
        z.update(scp.parse_config(configuration_file))
        params = z

        params['logs_type'] = [item.strip() for item in params['logs_type'].split(',')]
        if len(params['logs_hosts']) > 0:
            params['logs_hosts'] = [item.strip() for item in params['logs_hosts'].split(',')]
        if len(params['logs_services']) > 0:
            params['logs_services'] = [item.strip() for item in params['logs_services'].split(',')]
        
        logger.info("[WebUI-logs] configuration loaded.")
        logger.info("[WebUI-logs] configuration, fetching types: %s", params['logs_type'])
        logger.info("[WebUI-logs] configuration, hosts: %s", params['logs_hosts'])
        logger.info("[WebUI-logs] configuration, services: %s", params['logs_services'])
        return True
    except Exception, exp:
        logger.warning("[WebUI-logs] configuration file (%s) not available: %s", configuration_file, str(exp))
        return False
    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """

        try:
            self.max_workers = cpu_count()
        except NotImplementedError:
            pass

        t0 = time.time()
        logger.debug("[MongodbRetention] asking me to update the retention objects")

        all_data = daemon.get_retention_data()

        processes = []
        for i in xrange(self.max_workers):
            proc = Process(target=self.job, args=(all_data, i, self.max_workers))
            proc.start()
            processes.append(proc)

        # Allow 30s to join the sub-processes, should be enough
        for proc in processes:
            proc.join(30)

        logger.info("Retention information updated in Mongodb (%.2fs)" % (time.time() - t0))
Exemplo n.º 18
0
    def get_objects(self):
        if not hasattr(self, 'conn'):
            logger.error("[MySQLImport]: Problem during init phase")
            return {}

        # Create variables for result
        r = {}

        cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)

        # For all parameters
        for k, v in self.reqlist.iteritems():
            r[k] = []

            if(v != None):
                result_set = {}
                logger.info("[MySQLImport]: Getting %s configuration from database" % (k))

                try:
                    cursor.execute(v)
                    result_set = cursor.fetchall()
                except MySQLdb.Error, e:
                    logger.error("[MySQLImport]: Error %d: %s" % (e.args[0], e.args[1]))

                # Create set with result
                for row in result_set:
                    h = {}
                    for column in row:
                        if row[column]:
                            h[column] = row[column]
                    r[k].append(h)
Exemplo n.º 19
0
 def init_http(self):
     logger.info("[WS_Arbiter] Starting WS arbiter http socket")
     try:
         self.srv = run(host=self.host, port=self.port, server='wsgirefselect')
     except Exception, e:
         logger.error("[WS_Arbiter] Exception : %s" % str(e))
         raise
Exemplo n.º 20
0
def get_coffee():
    r"""

                        (
                          )     (
                           ___...(-------)-....___
                       .-""       )    (          ""-.
                 .-'``'|-._             )         _.-|
                /  .--.|   `""---...........---""`   |
               /  /    |                             |
               |  |    |                             |
                \  \   |                             |
                 `\ `\ |                             |
                   `\ `|                             |
                   _/ /\                             /
                  (__/  \                           /
               _..---""` \                         /`""---.._
            .-'           \                       /          '-.
           :               `-.__             __.-'              :
           :                  ) ""---...---"" (                 :
            '._               `"--...___...--"`              _.'
          jgs \""--..__                              __..--""/
               '._     "'"----.....______.....----"'"     _.'
                  `""--..,,_____            _____,,..--""`
                                `"'"----"'"`


"""
    logger.info(get_coffee.__doc__)
    def hook_save_retention(self, daemon):
        log_mgr = logger
        logger.info("[PickleRetentionGeneric] asking me to update the retention objects")

        # Now the flat file method
        try:
            # Open a file near the path, with .tmp extension
            # so in cae or problem, we do not lost the old one
            f = open(self.path + ".tmp", "wb")

            # We get interesting retention data from the daemon it self
            all_data = daemon.get_retention_data()

            # And we save it on file :)

            # s = cPickle.dumps(all_data)
            # s_compress = zlib.compress(s)
            cPickle.dump(all_data, f, protocol=cPickle.HIGHEST_PROTOCOL)
            # f.write(s_compress)
            f.close()

            # Now move the .tmp fiel to the real path
            shutil.move(self.path + ".tmp", self.path)
        except IOError, exp:
            log_mgr.log("Error: retention file creation failed, %s" % str(exp))
            return
Exemplo n.º 22
0
    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug("[RedisRetention] asking me to update retention objects")

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']

        # Now the flat file method
        for h_name in hosts:
            h = hosts[h_name]
            key = self._get_host_key(h_name)
            val = cPickle.dumps(h)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)

        for (h_name, s_desc) in services:
            s = services[(h_name, s_desc)]
            key = self._get_service_key(h_name, s_desc)
            val = cPickle.dumps(s)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)
        logger.info("Retention information updated in Redis")
Exemplo n.º 23
0
    def hook_late_configuration(self, arb):
        # We will return external commands to the arbiter, so
        # it can just manage it easily and in a generic way
        ext_cmds = []

        # If the file do not exist, we launch the command
        # and we bail out
        if not self._is_file_existing():
            self._launch_command()
            return

        self._is_mapping_file_changed()
        self._update_mapping()
        additions, removed = self._got_mapping_changes()

        for (father_k, son_k) in additions:
            son_type, son_name = son_k
            father_type, father_name = father_k
            logger.info("[Hot dependencies] Linked son : %s and its father: %s" % (son_name, father_name))
            if son_type == 'host' and father_type == 'host':
                son = arb.conf.hosts.find_by_name(son_name)
                father = arb.conf.hosts.find_by_name(father_name)
                if son is not None and father is not None:
                    logger.debug("[Hot dependencies] Found! %s %s" % (son_name, father_name))
                    if not son.is_linked_with_host(father):
                        logger.debug("[Hot dependencies] Doing simple link between %s and %s" % (son.get_name(), father.get_name()))
                        # Add a dep link between the son and the father
                        son.add_host_act_dependency(father, ['w', 'u', 'd'], None, True)
                else:
                    logger.debug("[Hot dependencies] Missing one of %s %s" % (son_name, father_name))
Exemplo n.º 24
0
    def commit_and_rotate_log_db(self):
        """Submit a commit or rotate the complete database file.

        This function is called whenever the mainloop doesn't handle a request.
        The database updates are committed every second.
        Every day at 00:05 the database contents with a timestamp of past days
        are moved to their own datafiles (one for each day). We wait until 00:05
        because in a distributed environment even after 00:00 (on the broker host)
        we might receive data from other hosts with a timestamp dating from yesterday.
        """
        if self.read_only:
            return
        now = time.time()
        if self.next_log_db_commit <= now:
            self.commit()
            logger.debug("[Logstore SQLite] commit.....")
            self.next_log_db_commit = now + 1
        if self.next_log_db_rotate <= now:
            logger.info("[Logstore SQLite] at %s we rotate the database file" % time.asctime(time.localtime(now)))
            # Take the current database file
            # Move the messages into daily files
            self.log_db_do_archive()

            today = datetime.date.today()
            today0005 = datetime.datetime(today.year, today.month, today.day, 0, 5, 0)
            if now < time.mktime(today0005.timetuple()):
                nextrotation = today0005
            else:
                nextrotation = today0005 + datetime.timedelta(days=1)

            # See you tomorrow
            self.next_log_db_rotate = time.mktime(nextrotation.timetuple())
            logger.info("[Logstore SQLite] next rotation at %s " % time.asctime(time.localtime(self.next_log_db_rotate)))
Exemplo n.º 25
0
def get_instance(plugin):
    logger.info("Get a RawSocket broker for plugin %s" % plugin.get_name())

    #Catch errors
    #path = plugin.path
    instance = RawSocket_broker(plugin)
    return instance
Exemplo n.º 26
0
    def main(self):
        self.set_proctitle(self.name)
        self.set_exit_handler()

        self.open()

        input = [self.fifo]

        while not self.interrupted:
            if input == []:
                time.sleep(1)
                continue
            try:
                inputready, outputready, exceptready = select.select(input, [], [], 1)
            except select.error, e:
                if e.args[0] == errno.EINTR:
                    logger.info("[%s] Received exit signal. Bailing out." % self.get_name())
                    return
            
            for s in inputready:
                ext_cmds = self.get()

                if ext_cmds:
                    for ext_cmd in ext_cmds:
                        self.from_q.put(ext_cmd)
                else:
                    self.fifo = self.open()
                    if self.fifo is not None:
                        input = [self.fifo]
                    else:
                        input = []
Exemplo n.º 27
0
    def process_check_result(self, databuffer, IV):
        # 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
        if not databuffer:
            logger.warning("[NSCA] Received an empty NSCA packet")
            return

        logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))

        payload_length = len(databuffer) - 208
        if payload_length != 512 and payload_length != 4096:
            logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
            
        if self.payload_length != -1 and payload_length != self.payload_length:
            logger.warning("[NSCA] Dropping packet with incorrect payload length.")
            return
            
        (timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
        current_time = time.time()
        check_result_age = current_time - timestamp
        if timestamp > current_time and self.check_future_packet:
            logger.warning("[NSCA] Dropping packet with future timestamp.")
        elif check_result_age > self.max_packet_age:
            logger.info(
                "[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
                (check_result_age, timestamp, hostname, service))
        else:
            self.post_command(timestamp, rc, hostname, service, output)
Exemplo n.º 28
0
    def do_pynag_con_init(self, id, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s', type)
            return

        if type == 'scheduler':
            # If sched is not active, I do not try to init
            # it is just useless
            is_active = links[id]['active']
            if not is_active:
                return

        # If we try to connect too much, we slow down our tests
        if self.is_connection_try_too_close(links[id]):
            return

        # Ok, we can now update it
        links[id]['last_connection'] = time.time()

        # DBG: print "Init connection with", links[id]['uri']
        running_id = links[id]['running_id']
        # DBG: print "Running id before connection", running_id
        uri = links[id]['uri']
        try:
            con = links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check'])
        except HTTPExceptions, exp:
            # But the multiprocessing module is not compatible with it!
            # so we must disable it immediately after
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return
Exemplo n.º 29
0
def get_instance(plugin):
    """ Return a module instance for the plugin manager """
    logger.info("Get a NSCA arbiter module for plugin %s" % plugin.get_name())

    host = getattr(plugin, 'host', '127.0.0.1')
    if host == '*':
        host = ''
    
    port = int(getattr(plugin, 'port', '5667'))
    buffer_length = int(getattr(plugin, 'buffer_length', '4096'))
    payload_length = int(getattr(plugin, 'payload_length', '-1'))
    encryption_method = int(getattr(plugin, 'encryption_method', '0'))

    backlog = int(getattr(plugin, 'backlog', '10'))

    password = getattr(plugin, 'password', '')
    if password == "" and encryption_method != 0:
        logger.error("[NSCA] No password specified whereas there is a encryption_method defined")
        logger.warning("[NSCA] Setting password to dummy to avoid crash!")
        password = "******"

    max_packet_age = min(int(getattr(plugin, 'max_packet_age', '30')), 900)
    check_future_packet = bool(getattr(plugin, 'check_future_packet', 0))

    instance = NSCA_arbiter(plugin, host, port,
            buffer_length, payload_length, encryption_method, password, max_packet_age, check_future_packet,
            backlog)
    return instance
    def __init__(self, mod_conf, pub_endpoint, serialize_to):
        from zmq import Context, PUB

        BaseModule.__init__(self, mod_conf)
        self.pub_endpoint = pub_endpoint
        self.serialize_to = serialize_to
        logger.info("[Zmq Broker] Binding to endpoint " + self.pub_endpoint)

        # This doesn't work properly in init()
        # sometimes it ends up beings called several
        # times and the address becomes already in use.
        self.context = Context()
        self.s_pub = self.context.socket(PUB)
        self.s_pub.bind(self.pub_endpoint)

        # Load the correct serialization function
        # depending on the serialization method
        # chosen in the configuration.
        if self.serialize_to == "msgpack":
            from msgpack import Packer

            packer = Packer(default=encode_monitoring_data)
            self.serialize = lambda msg: packer.pack(msg)
        elif self.serialize_to == "json":
            self.serialize = lambda msg: json.dumps(msg, cls=SetEncoder)
        else:
            raise Exception("[Zmq Broker] No valid serialization method defined (Got " + str(self.serialize_to) + ")!")
Exemplo n.º 31
0
def load_config(app):
    global params

    logger.info("[WebUI-worldmap] loading configuration ...")

    properties = {
        'worldmap-zoom': '{"default_zoom": 16}',
        'worldmap-lng': '{"default_lng": 5.080625}',
        'worldmap-lat': '{"default_lat": 45.054148}',
        'worldmap-hosts': '{"hosts_level": [1,2,3,4,5]}',
        'worldmap-services': '{"services_level": [1,2,3,4,5]}',
        'worldmap-layer': '{"layer": ""}',
    }

    for p, default in properties.items():
        params.update(
            json.loads(app.prefs_module.get_ui_common_preference(p, default)))

    logger.info("[WebUI-worldmap] configuration loaded.")
    logger.info("[WebUI-worldmap] configuration, params: %s", params)
Exemplo n.º 32
0
 def manage_update_service_status_brok(self, b):
     # Update business_impact value
     host_name = b.data['host_name']
     service_description = b.data['service_description']
     service_id = host_name + '/' + service_description
     if service_id not in self.services_downtime:
         logger.info(
             "[RawSocket] received service status update for an unknown service: %s",
             service_id)
         logger.info(
             "[RawSocket] setting service status for unknown service: %s",
             service_id)
         self.services_downtime[service_id] = b.data[
             'in_scheduled_downtime']
     else:
         logger.info(
             "[RawSocket] received service status update: %s - downtime=%s",
             (service_id, b.data['in_scheduled_downtime']))
         self.services_downtime[service_id] = b.data[
             'in_scheduled_downtime']
Exemplo n.º 33
0
    def manage_initial_broks_done_brok(self, b):
        if self.con is None:
            return
        logger.info(
            "[Active Directory UI] AD/LDAP: manage_initial_broks_done_brok, go for pictures"
        )

        searchScope = ldap.SCOPE_SUBTREE
        ## retrieve all attributes - again adjust to your needs - see documentation for more options
        #retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]

        logger.info("[Active Directory UI] Contacts? %d" %
                    len(self.app.datamgr.get_contacts()))

        for c in self.app.datamgr.get_contacts():
            logger.debug(
                "[Active Directory UI] Doing photo lookup for contact: %s" %
                c.get_name())
            elts = self.find_contact_entry(c)

            if elts is None:
                logger.warning("[Active Directory UI] No ldap entry for %s" %
                               c.get_name())
                continue

            # Ok, try to get photo from the entry
            try:
                photo = elts[self.photo_attr][0]
                try:
                    p = os.path.join(self.app.photo_dir, c.get_name() + '.jpg')
                    f = open(p, 'wb')
                    f.write(photo)
                    f.close()
                    logger.info("[Active Directory UI] Photo wrote for %s" %
                                c.get_name())
                except Exception, exp:
                    logger.error("[Active Directory UI] Cannot write %s : %s" %
                                 (p, str(exp)))
            except KeyError:
                logger.warning("[Active Directory UI] No photo for %s" %
                               c.get_name())
Exemplo n.º 34
0
    def tasks_are_working(self):
        MAX_FAILED_LOOPS = 3
        GOOD_ENOUGH_FINISHED_TASKS_PERCENTAGE = 4.5
        GOOD_ENOUGH_FINISHED_TASKS_NUMBER = 5

        logger.info('[SnmpPoller] tasks_are_working self.failed_loops=%d',
                    self.failed_loops)
        if self.failed_loops == -1:
            self.failed_loops = 0
            return True

        if self.last_unfinished_tasks > self.unfinished_tasks:
            logger.info(
                '[SnmpPoller] tasks_are_working last=%d <- unfinished=%d',
                self.last_unfinished_tasks, self.unfinished_tasks)
            self.last_unfinished_tasks = self.unfinished_tasks
            return True
        else:
            self.failed_loops += 1
            if self.failed_loops >= MAX_FAILED_LOOPS:
                logger.info(
                    "[SnmpPoller] tasks_are_working (self.failed_loops >= MAX_FAILED_LOOPS!!) unfinished_tasks=%d init_unfinished_tasks=%d",
                    self.unfinished_tasks, self.init_unfinished_tasks)
                try:
                    pct_unfinished_tasks = 100 * self.unfinished_tasks / float(
                        self.init_unfinished_tasks)
                except:
                    logger.warning(
                        "[SnmpPoller] tasks_are_working: unfinished_tasks=%d init_unfinished_tasks=%d",
                        self.unfinished_tasks, self.init_unfinished_tasks)
                    pct_unfinished_tasks = 0

                if pct_unfinished_tasks < GOOD_ENOUGH_FINISHED_TASKS_PERCENTAGE or self.unfinished_tasks < GOOD_ENOUGH_FINISHED_TASKS_NUMBER:
                    raise GoodEnoughSnmpPollerException(
                        "Unfinished tasks: %d (%.2f%%)" %
                        (self.unfinished_tasks, pct_unfinished_tasks))
                else:
                    return False
            else:
                return True
Exemplo n.º 35
0
    def join_with_timeout(self, timeout):
        logger.info("[SnmpPoller] join_with_timeout starts... tasks: %d",
                    self.unfinished_tasks)
        self.last_unfinished_tasks = self.unfinished_tasks
        self.init_unfinished_tasks = self.unfinished_tasks

        self.all_tasks_done.acquire()
        try:
            endtime = time.time() + timeout
            while self.unfinished_tasks:
                self.all_tasks_done.wait(1)  # let task start
                remaining = endtime - time.time()
                logger.info(
                    "[SnmpPoller] time remaining: %s unfinished tasks: %d",
                    remaining, self.unfinished_tasks)
                syslog.syslog(
                    syslog.LOG_DEBUG,
                    "[SnmpPoller] time remaining: %s unfinished tasks: %d" %
                    (remaining, self.unfinished_tasks))
                if not self.tasks_are_working():
                    raise TaskAreNotWorkingSnmpPollerException(
                        "[SnmpPoller] tasks are not working!")

                if remaining <= 0.0:
                    logger.info("[SnmpPoller] timeout!")
                    raise TimeoutSnmpPollerException(
                        "[SnmpPoller] polling timeout!")
                self.all_tasks_done.wait(5)
        except Exception, exc:
            logger.warning(
                "[SnmpPoller] join_with_timeout Exception -> (%s: %s)!",
                type(exc), str(exc))
            try:
                self.all_tasks_done.release()
            # except RuntimeError, exc:
            except Exception, exc:
                logger.warning(
                    "[SnmpPoller] join_with_timeout->Exception->self.all_tasks_done.release RuntimeError (type=%s: %s)!",
                    type(exc), exc)
Exemplo n.º 36
0
    def __init__(self, mod_conf):
        try:
            import pymongo
        except ImportError:
            logger.error('[WebUI-MongoDBPreferences] Can not import pymongo'
                         'Please install it with a 3.x+ version from '
                         'https://pypi.python.org/pypi/pymongo')
            raise

        self.uri = getattr(mod_conf, 'uri', 'mongodb://localhost')
        logger.info('[WebUI-MongoDBPreferences] mongo uri: %s' % self.uri)

        self.replica_set = getattr(mod_conf, 'replica_set', None)
        if self.replica_set and int(pymongo.version[0]) < 3:
            logger.error(
                '[WebUI-MongoDBPreferences] Can not initialize module with '
                'replica_set because your pymongo lib is too old. '
                'Please install it with a 3.x+ version from '
                'https://pypi.python.org/pypi/pymongo')
            return None

        self.database = getattr(mod_conf, 'database', 'shinken')
        self.username = getattr(mod_conf, 'username', None)
        self.password = getattr(mod_conf, 'password', None)
        logger.info('[WebUI-MongoDBPreferences] database: %s, user: %s',
                    self.database, self.username)

        self.mongodb_fsync = getattr(mod_conf, 'mongodb_fsync',
                                     "True") == "True"

        self.is_connected = False
        self.con = None
        self.db = None

        logger.info(
            "[WebUI-MongoDBPreferences] Trying to open a Mongodb connection to %s, database: %s"
            % (self.uri, self.database))
        self.open()
Exemplo n.º 37
0
    def __init__(self, mod_conf):
        self.uri = getattr(mod_conf, "uri", "mongodb://localhost")
        logger.info("[WebUI-MongoDBPreferences] mongo uri: %s", self.uri)

        self.replica_set = getattr(mod_conf, "replica_set", None)
        if self.replica_set and int(pymongo.version[0]) < 3:
            logger.error(
                "[WebUI-MongoDBPreferences] Can not initialize module with "
                "replica_set because your pymongo lib is too old. "
                "Please install it with a 3.x+ version from "
                "https://pypi.python.org/pypi/pymongo")
            return

        self.database = getattr(mod_conf, "database", "shinken")
        self.username = getattr(mod_conf, "username", None)
        self.password = getattr(mod_conf, "password", None)
        logger.info("[WebUI-MongoDBPreferences] database: %s, user: %s",
                    self.database, self.username)

        self.mongodb_fsync = getattr(mod_conf, "mongodb_fsync",
                                     "True") == "True"

        self.is_connected = False
        self.con = None
        self.db = None

        if self.uri:
            logger.info(
                "[WebUI-MongoDBPreferences] Trying to open a Mongodb connection to %s, database: %s",
                self.uri, self.database)
            self.open()
        else:
            logger.warning(
                "You do not have any MongoDB connection for user's preferences storage module installed. "
                "The Web UI dashboard and user's preferences will not be saved."
            )
Exemplo n.º 38
0
    def init(self):
        if not os.path.exists(self.path):
            logger.info(
                "[Quorum] Trying to create a quorum file %s of %s size" %
                (self.path, self.total_size))
            # Opening the file in create mode, and fill with fuull size of 0
            fd = os.open(FILE, os.O_CREAT | os.O_WRONLY)
            os.write(fd, '\0' * self.total_size)
            os.close(fd)
        else:
            # Assume that the file is quite as big as it should
            f = os.open(FILE, os.O_RDWR)
            end = os.lseek(f, 0, os.SEEK_END)
            # if the end offset is too smal, try to increase it
            if end < self.total_size:
                logger.info(
                    "[Quorum] Trying to increase the quorum file %s of %s size"
                    % (self.path, self.total_size))
                for p in range(end, self.total_size):
                    os.write(f, '\0')
            os.close(f)

        # Ok here we got a valid file
        logger.info("[Quorum] Quorum file checked")
Exemplo n.º 39
0
    def check_authentication(self, username, password):
        logger.info("[WebUI] Checking authentication for user: %s", username)
        self.user_picture = None

        c = self.datamgr.get_contact(username)
        if not c:
            logger.error(
                "[WebUI] You need to have a contact having the same name as your user: %s",
                username)
            return False

        logger.info("[WebUI] Requesting authentication for user: %s", username)
        r = self.auth_module.check_auth(username, password)
        if r:
            user = User.from_contact(c,
                                     picture=self.user_picture,
                                     use_gravatar=self.gravatar)
            self.user_picture = user.picture
            logger.info("[WebUI] User picture: %s", self.user_picture)
            return True

        logger.warning("[WebUI] The user '%s' has not been authenticated.",
                       username)
        return False
Exemplo n.º 40
0
 def init(self):
     logger.info("I open the HOT dependency module")
Exemplo n.º 41
0
def get_instance(plugin):
    logger.info("[Dummy Arbiter] Get a Dummy arbiter module for plugin %s",
                plugin.get_name())
    instance = Dummy_arbiter(plugin)
    return instance
Exemplo n.º 42
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        if 'receiver_name' in conf['global']:
            name = conf['global']['receiver_name']
        else:
            name = 'Unnamed receiver'
        self.name = name
        logger.load_obj(self, name)
        self.direct_routing = conf['global']['direct_routing']

        g_conf = conf['global']

        # If we've got something in the schedulers, we do not want it anymore
        for sched_id in conf['schedulers']:

            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                logger.info("[%s] We already got the conf %d (%s)" %
                            (self.name, sched_id,
                             conf['schedulers'][sched_id]['name']))
                wait_homerun = self.schedulers[sched_id]['wait_homerun']
                actions = self.schedulers[sched_id]['actions']
                external_commands = self.schedulers[sched_id][
                    'external_commands']
                con = self.schedulers[sched_id]['con']

            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            if s['name'] in g_conf['satellitemap']:
                s.update(g_conf['satellitemap'][s['name']])
            uri = 'http://%s:%s/' % (s['address'], s['port'])

            self.schedulers[sched_id]['uri'] = uri
            if already_got:
                self.schedulers[sched_id]['wait_homerun'] = wait_homerun
                self.schedulers[sched_id]['actions'] = actions
                self.schedulers[sched_id][
                    'external_commands'] = external_commands
                self.schedulers[sched_id]['con'] = con
            else:
                self.schedulers[sched_id]['wait_homerun'] = {}
                self.schedulers[sched_id]['actions'] = {}
                self.schedulers[sched_id]['external_commands'] = []
                self.schedulers[sched_id]['con'] = None
            self.schedulers[sched_id]['running_id'] = 0
            self.schedulers[sched_id]['active'] = s['active']

            # Do not connect if we are a passive satellite
            if self.direct_routing and not already_got:
                # And then we connect to it :)
                self.pynag_con_init(sched_id)

        logger.debug("[%s] Sending us configuration %s" % (self.name, conf))

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s " % mods)

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s" % use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()

        # Now create the external commander. It's just here to dispatch
        # the commands to schedulers
        e = ExternalCommandManager(None, 'receiver')
        e.load_receiver(self)
        self.external_command = e
Exemplo n.º 43
0
class Broker(BaseSatellite):
    properties = BaseSatellite.properties.copy()
    properties.update({
        'pidfile':   PathProp(default='brokerd.pid'),
        'port':      IntegerProp(default=7772),
        'local_log': PathProp(default='brokerd.log'),
    })


    def __init__(self, config_file, is_daemon, do_replace, debug, debug_file, profile=''):

        super(Broker, self).__init__('broker', config_file, is_daemon, do_replace, debug,
                                     debug_file)

        # Our arbiters
        self.arbiters = {}

        # Our pollers, reactionners and receivers
        self.pollers = {}
        self.reactionners = {}
        self.receivers = {}

        # Modules are load one time
        self.have_modules = False

        # Can have a queue of external_commands given by modules
        # will be processed by arbiter
        self.external_commands = []

        # All broks to manage
        self.broks = deque()  # broks to manage
        self.external_module_broks = deque()  # broks during this loop to send to external modules
        self.broks_lock = threading.RLock()  # to manage lock when managing broks
        # broks raised this turn and that needs to be put in self.broks
        self.broks_internal_raised = []
        # broks raised by the arbiters, we need a lock so the push can be in parallel
        # to our current activities and won't lock the arbiter
        self.arbiter_broks = []
        self.arbiter_broks_lock = threading.RLock()

        self.timeout = 1.0

        self.istats = IStats(self)


    # Schedulers have some queues. We can simplify the call by adding
    # elements into the proper queue just by looking at their type
    # Brok -> self.broks
    # TODO: better tag ID?
    # External commands -> self.external_commands
    def add(self, elt):
        cls_type = elt.__class__.my_type
        if cls_type == 'brok':
            # For brok, we TAG brok with our instance_id
            elt.instance_id = 0
            self.broks_internal_raised.append(elt)
            return
        elif cls_type == 'externalcommand':
            logger.debug("Enqueuing an external command '%s'", str(ExternalCommand.__dict__))
            self.external_commands.append(elt)
        # Maybe we got a Message from the modules, it's way to ask something
        # like from now a full data from a scheduler for example.
        elif cls_type == 'message':
            # We got a message, great!
            logger.debug(str(elt.__dict__))
            if elt.get_type() == 'NeedData':
                data = elt.get_data()
                # Full instance id means: I got no data for this scheduler
                # so give me all dumbass!
                if 'full_instance_id' in data:
                    c_id = data['full_instance_id']
                    source = elt.source
                    logger.info('The module %s is asking me to get all initial data '
                                'from the scheduler %d',
                                source, c_id)
                    # so we just reset the connection and the running_id,
                    # it will just get all new things
                    try:
                        self.schedulers[c_id]['con'] = None
                        self.schedulers[c_id]['running_id'] = 0
                    except KeyError:  # maybe this instance was not known, forget it
                        logger.warning("the module %s ask me a full_instance_id "
                                       "for an unknown ID (%d)!", source, c_id)
            # Maybe a module tells me that it's dead, I must log it's last words...
            if elt.get_type() == 'ICrash':
                data = elt.get_data()
                logger.error('the module %s just crash! Please look at the traceback:',
                             data['name'])
                logger.error(data['trace'])

                # The module death will be looked for elsewhere and restarted.


    # Get the good tabs for links by the kind. If unknown, return None
    def get_links_from_type(self, d_type):
        t = {'scheduler':   self.schedulers,
             'arbiter':     self.arbiters,
             'poller':      self.pollers,
             'reactionner': self.reactionners,
             'receiver':    self.receivers
             }
        if d_type in t:
            return t[d_type]
        return None


    # Check if we do not connect to often to this
    def is_connection_try_too_close(self, elt):
        now = time.time()
        last_connection = elt['last_connection']
        if now - last_connection < 5:
            return True
        return False


    # wrapper function for the real function do_
    # just for timing the connection
    def pynag_con_init(self, id, type='scheduler'):
        _t = time.time()
        r = self.do_pynag_con_init(id, type)
        statsmgr.timing('con-init.%s' % type, time.time() - _t, 'perf')
        return r


    # initialize or re-initialize connection with scheduler or
    # arbiter if type == arbiter
    def do_pynag_con_init(self, id, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s', type)
            return

        # default timeout for daemons like pollers/reactionners/...
        timeout = 3
        data_timeout = 120

        if type == 'scheduler':
            # If sched is not active, I do not try to init
            # it is just useless
            is_active = links[id]['active']
            if not is_active:
                return
            # schedulers also got real timeout to respect
            timeout = links[id]['timeout']
            data_timeout = links[id]['data_timeout']

        # If we try to connect too much, we slow down our tests
        if self.is_connection_try_too_close(links[id]):
            return

        # Ok, we can now update it
        links[id]['last_connection'] = time.time()

        # DBG: print "Init connection with", links[id]['uri']
        running_id = links[id]['running_id']
        # DBG: print "Running id before connection", running_id
        uri = links[id]['uri']
        try:
            con = links[id]['con'] = HTTPClient(uri=uri,
                                                strong_ssl=links[id]['hard_ssl_name_check'],
                                                timeout=timeout, data_timeout=data_timeout)
        except HTTPExceptions, exp:
            # But the multiprocessing module is not compatible with it!
            # so we must disable it immediately after
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return

        try:
            # initial ping must be quick
            con.get('ping')
            new_run_id = con.get('get_running_id')
            new_run_id = float(new_run_id)
            # data transfer can be longer

            # The schedulers have been restarted: it has a new run_id.
            # So we clear all verifs, they are obsolete now.
            if new_run_id != running_id:
                logger.debug("[%s] New running id for the %s %s: %s (was %s)",
                             self.name, type, links[id]['name'], new_run_id, running_id)
                del links[id]['broks'][:]
                # we must ask for a new full broks if
                # it's a scheduler
                if type == 'scheduler':
                    logger.debug("[%s] I ask for a broks generation to the scheduler %s",
                                 self.name, links[id]['name'])
                    con.get('fill_initial_broks', {'bname': self.name}, wait='long')
            # Ok all is done, we can save this new running id
            links[id]['running_id'] = new_run_id
        except HTTPExceptions, exp:
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return
Exemplo n.º 44
0
 def hook_late_configuration(self, conf):
     logger.info("[Dummy Arbiter] Dummy in hook late config")
Exemplo n.º 45
0
def get_instance(plugin):
    logger.info("Get an PNP UI module for plugin %s" % plugin.get_name())

    instance = PNP_Webui(plugin)
    return instance
Exemplo n.º 46
0
    def resolve_daterange(self, dateranges, entry):
        #print "Trying to resolve ", entry

        res = re.search(
            '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)',
            entry)
        if res is not None:
            #print "Good catch 1"
            (syear, smon, smday, eyear, emon, emday, skip_interval,
             other) = res.groups()
            dateranges.append(
                CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday,
                                  0, 0, skip_interval, other))
            return

        res = re.search('(\d{4})-(\d{2})-(\d{2}) / (\d+)[\s\t]*([0-9:, -]+)',
                        entry)
        if res is not None:
            #print "Good catch 2"
            (syear, smon, smday, skip_interval, other) = res.groups()
            eyear = syear
            emon = smon
            emday = smday
            dateranges.append(
                CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday,
                                  0, 0, skip_interval, other))
            return

        res = re.search(
            '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)',
            entry)
        if res is not None:
            #print "Good catch 3"
            (syear, smon, smday, eyear, emon, emday, other) = res.groups()
            dateranges.append(
                CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday,
                                  0, 0, 0, other))
            return

        res = re.search('(\d{4})-(\d{2})-(\d{2})[\s\t]*([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 4"
            (syear, smon, smday, other) = res.groups()
            eyear = syear
            emon = smon
            emday = smday
            dateranges.append(
                CalendarDaterange(syear, smon, smday, 0, 0, eyear, emon, emday,
                                  0, 0, 0, other))
            return

        res = re.search(
            '([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) / (\d+)[\s\t]*([0-9:, -]+)',
            entry)
        if res is not None:
            #print "Good catch 5"
            (swday, swday_offset, smon, ewday, ewday_offset, emon,
             skip_interval, other) = res.groups()
            dateranges.append(
                MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon,
                                      0, ewday, ewday_offset, skip_interval,
                                      other))
            return

        res = re.search(
            '([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)',
            entry)
        if res is not None:
            #print "Good catch 6"
            (t0, smday, t1, emday, skip_interval, other) = res.groups()
            if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
                swday = t0
                ewday = t1
                swday_offset = smday
                ewday_offset = emday
                dateranges.append(
                    WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0,
                                     ewday, ewday_offset, skip_interval,
                                     other))
                return
            elif t0 in Daterange.months and t1 in Daterange.months:
                smon = t0
                emon = t1
                dateranges.append(
                    MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0,
                                       0, skip_interval, other))
                return
            elif t0 == 'day' and t1 == 'day':
                dateranges.append(
                    MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0,
                                      skip_interval, other))
                return

        res = re.search(
            '([a-z]*) ([\d-]+) - ([\d-]+) / (\d+)[\s\t]*([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 7"
            (t0, smday, emday, skip_interval, other) = res.groups()
            if t0 in Daterange.weekdays:
                swday = t0
                swday_offset = smday
                ewday = swday
                ewday_offset = emday
                dateranges.append(
                    WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0,
                                     ewday, ewday_offset, skip_interval,
                                     other))
                return
            elif t0 in Daterange.months:
                smon = t0
                emon = smon
                dateranges.append(
                    MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0,
                                       0, skip_interval, other))
                return
            elif t0 == 'day':
                dateranges.append(
                    MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0,
                                      skip_interval, other))
                return

        res = re.search(
            '([a-z]*) ([\d-]+) ([a-z]*) - ([a-z]*) ([\d-]+) ([a-z]*) [\s\t]*([0-9:, -]+)',
            entry)
        if res is not None:
            #print "Good catch 8"
            (swday, swday_offset, smon, ewday, ewday_offset, emon,
             other) = res.groups()
            #print "Debug:", (swday, swday_offset, smon, ewday, ewday_offset, emon, other)
            dateranges.append(
                MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0, emon,
                                      0, ewday, ewday_offset, 0, other))
            return

        res = re.search('([a-z]*) ([\d-]+) - ([\d-]+)[\s\t]*([0-9:, -]+)',
                        entry)
        if res is not None:
            #print "Good catch 9"
            (t0, smday, emday, other) = res.groups()
            if t0 in Daterange.weekdays:
                swday = t0
                swday_offset = smday
                ewday = swday
                ewday_offset = emday
                dateranges.append(
                    WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0,
                                     ewday, ewday_offset, 0, other))
                return
            elif t0 in Daterange.months:
                smon = t0
                emon = smon
                dateranges.append(
                    MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0,
                                       0, 0, other))
                return
            elif t0 == 'day':
                dateranges.append(
                    MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0,
                                      other))
                return

        res = re.search(
            '([a-z]*) ([\d-]+) - ([a-z]*) ([\d-]+)[\s\t]*([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 10"
            (t0, smday, t1, emday, other) = res.groups()
            if t0 in Daterange.weekdays and t1 in Daterange.weekdays:
                swday = t0
                ewday = t1
                swday_offset = smday
                ewday_offset = emday
                dateranges.append(
                    WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0,
                                     ewday, ewday_offset, 0, other))
                return
            elif t0 in Daterange.months and t1 in Daterange.months:
                smon = t0
                emon = t1
                dateranges.append(
                    MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0,
                                       0, 0, other))
                return
            elif t0 == 'day' and t1 == 'day':
                dateranges.append(
                    MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0,
                                      other))
                return

        res = re.search('([a-z]*) ([\d-]+) ([a-z]*)[\s\t]*([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 11"
            (t0, swday_offset, t1, other) = res.groups()
            if t0 in Daterange.weekdays and t1 in Daterange.months:
                swday = t0
                smon = t1
                emon = smon
                ewday = swday
                ewday_offset = swday_offset
                dateranges.append(
                    MonthWeekDayDaterange(0, smon, 0, swday, swday_offset, 0,
                                          emon, 0, ewday, ewday_offset, 0,
                                          other))
                return

        res = re.search('([a-z]*) ([\d-]+)[\s\t]+([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 12"
            (t0, smday, other) = res.groups()
            if t0 in Daterange.weekdays:
                swday = t0
                swday_offset = smday
                ewday = swday
                ewday_offset = swday_offset
                dateranges.append(
                    WeekDayDaterange(0, 0, 0, swday, swday_offset, 0, 0, 0,
                                     ewday, ewday_offset, 0, other))
                return
            if t0 in Daterange.months:
                smon = t0
                emon = smon
                emday = smday
                dateranges.append(
                    MonthDateDaterange(0, smon, smday, 0, 0, 0, emon, emday, 0,
                                       0, 0, other))
                return
            if t0 == 'day':
                emday = smday
                dateranges.append(
                    MonthDayDaterange(0, 0, smday, 0, 0, 0, 0, emday, 0, 0, 0,
                                      other))
                return

        res = re.search('([a-z]*)[\s\t]+([0-9:, -]+)', entry)
        if res is not None:
            #print "Good catch 13"
            (t0, other) = res.groups()
            if t0 in Daterange.weekdays:
                day = t0
                dateranges.append(StandardDaterange(day, other))
                return
        logger.info("[timeentry::%s] no match for %s" %
                    (self.get_name(), entry))
        self.invalid_entries.append(entry)
Exemplo n.º 47
0
 def init(self):
     logger.info("[Dummy Broker] Initialization of the dummy broker module")
Exemplo n.º 48
0
def get_instance(mod_conf):
    logger.info("[Dummy Broker] Get a Dummy broker module for plugin %s",
                mod_conf.get_name())
    instance = Dummy_broker(mod_conf)
    return instance
Exemplo n.º 49
0
    def check_auth(self, username, password):
        """ Check username/password.
            If there is submodules, this method calls them one by one until one of them returns
            True. If no submodule can authenticate the user, then we try with internal
            authentication methods: htpasswd file, then contact password.

            This method returns a User object if authentication succeeded, else it returns None
        """
        self._user_login = None
        self._authenticator = None
        self._session = None
        self._user_info = None
        logger.info("[WebUI] Authenticating user '%s'", username)

        if self.modules:
            for mod in self.modules:
                try:
                    logger.info("[WebUI] Authenticating user '%s' with %s",
                                username, mod.get_name())
                    if mod.check_auth(username, password):
                        logger.info("[WebUI] User '%s' is authenticated by %s",
                                    username, mod.get_name())
                        self._authenticator = mod.get_name()
                        self._user_login = username

                        # Session identifier ?
                        f = getattr(mod, 'get_session', None)
                        if f and callable(f):
                            self._session = mod.get_session()
                            logger.info("[WebUI] User session: %s",
                                        self._session)

                        # User information ?
                        f = getattr(mod, 'get_user_info', None)
                        if f and callable(f):
                            self._user_info = mod.get_user_info()
                            logger.info("[WebUI] User info: %s",
                                        self._user_info)
                except Exception as exp:
                    logger.warning(
                        "[WebUI] The mod %s raised an exception: %s", str(exp))
                    logger.warning("[WebUI] Back trace: %s" %
                                   (traceback.format_exc()))

        if not self._user_login:
            logger.info("[WebUI] Internal htpasswd authentication")
            if self.app.htpasswd_file and self.check_apache_htpasswd_auth(
                    username, password):
                self._authenticator = 'htpasswd'
                self._user_login = username

        if not self._user_login:
            logger.info("[WebUI] Internal alignak backend authentication")
            if self.app.alignak_backend_endpoint:
                if self.check_alignak_auth(username, password):
                    self._authenticator = 'alignak'
                    self._user_login = username
                    self._session = self.app.frontend.get_logged_user_token()
                    self._user_info = self.app.frontend.get_logged_user()

        if not self._user_login:
            logger.info("[WebUI] Internal contact authentication")
            if self.check_cfg_password_auth(username, password):
                self._authenticator = 'contact'
                self._user_login = username

        if self._user_login:
            logger.info("[WebUI] user authenticated thanks to %s",
                        self._authenticator)
            return self._user_login

        return None
Exemplo n.º 50
0
 def init(self):
     logger.info(
         "[Dummy Arbiter] Initialization of the dummy arbiter module")
Exemplo n.º 51
0
 def init(self):
     logger.info("[NRPEPoller] Initialization of the nrpe poller module")
     self.i_am_dying = False
Exemplo n.º 52
0
 def __init__(self, modules, app):
     self.modules = modules
     self.app = app
     if not modules:
         logger.info("[WebUI] No module for %s. %s" %
                     (self.__class__.__name__, self._custom_log))
Exemplo n.º 53
0
def get_instance(mod_conf):
    logger.info("[NRPEPoller] Get a nrpe poller module for plugin %s" % mod_conf.get_name())
    return Nrpe_poller(mod_conf)
Exemplo n.º 54
0
    def open(self):
        """Open a connection to the mongodb server and check the connection by updating a documetn in a collection"""
        try:
            from pymongo import MongoClient
        except ImportError:
            logger.error(
                "[WebUI-MongoDBPreferences] Can not import pymongo.MongoClient"
            )
            raise

        try:
            if self.replica_set:
                self.con = MongoClient(self.uri,
                                       replicaSet=self.replica_set,
                                       fsync=self.mongodb_fsync)
            else:
                self.con = MongoClient(self.uri, fsync=self.mongodb_fsync)
            logger.info("[WebUI-MongoDBPreferences] connected to mongodb: %s",
                        self.uri)

            self.db = getattr(self.con, self.database)
            logger.info(
                "[WebUI-MongoDBPreferences] connected to the database: %s",
                self.database)

            if self.username and self.password:
                self.db.authenticate(self.username, self.password)
                logger.info(
                    "[WebUI-MongoDBPreferences] user authenticated: %s",
                    self.username)

            # Update a document test item in the collection to confirm correct connection
            logger.info(
                "[WebUI-MongoDBPreferences] updating connection test item in the collection ..."
            )
            self.db.ui_user_preferences.update_one(
                {"_id": "test-ui_prefs"}, {"$set": {
                    "last_test": time.time()
                }},
                upsert=True)
            logger.info(
                "[WebUI-MongoDBPreferences] updated connection test item")

            self.is_connected = True
            logger.info(
                "[WebUI-MongoDBPreferences] database connection established")
        except Exception as exp:
            logger.error("[WebUI-MongoDBPreferences] Exception: %s", str(exp))
            logger.debug(
                "[WebUI-MongoDBPreferences] Back trace of this kill: %s",
                traceback.format_exc())
            # Depending on exception type, should raise ...
            self.is_connected = False
            raise

        return self.is_connected
Exemplo n.º 55
0
def get_instance(plugin):
    logger.info(
        "[Logstore SQLite] Get an LogStore Sqlite module for plugin %s" %
        plugin.get_name())
    instance = LiveStatusLogStoreSqlite(plugin)
    return instance
Exemplo n.º 56
0
 def do_loop_turn(self):
     logger.info("[Dummy Arbiter] Raise a external command as example")
     e = ExternalCommand('Viva la revolution')
     self.from_q.put(e)
     time.sleep(1)
Exemplo n.º 57
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        if 'receiver_name' in conf['global']:
            name = conf['global']['receiver_name']
        else:
            name = 'Unnamed receiver'
        self.name = name
        self.api_key = conf['global']['api_key']
        self.secret = conf['global']['secret']
        self.http_proxy = conf['global']['http_proxy']
        self.statsd_host = conf['global']['statsd_host']
        self.statsd_port = conf['global']['statsd_port']
        self.statsd_prefix = conf['global']['statsd_prefix']
        self.statsd_enabled = conf['global']['statsd_enabled']
        
        statsmgr.register(self, self.name, 'receiver', 
                          api_key=self.api_key, secret=self.secret, http_proxy=self.http_proxy,
                          statsd_host=self.statsd_host, statsd_port=self.statsd_port, statsd_prefix=self.statsd_prefix, statsd_enabled=self.statsd_enabled)
        logger.load_obj(self, name)
        self.direct_routing = conf['global']['direct_routing']
        self.accept_passive_unknown_check_results = conf['global']['accept_passive_unknown_check_results']

        g_conf = conf['global']

        # If we've got something in the schedulers, we do not want it anymore
        for sched_id in conf['schedulers']:

            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                logger.info("[%s] We already got the conf %d (%s)", self.name, sched_id, conf['schedulers'][sched_id]['name'])
                wait_homerun = self.schedulers[sched_id]['wait_homerun']
                actions = self.schedulers[sched_id]['actions']
                external_commands = self.schedulers[sched_id]['external_commands']
                con = self.schedulers[sched_id]['con']

            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            if s['name'] in g_conf['satellitemap']:
                s.update(g_conf['satellitemap'][s['name']])

            proto = 'http'
            if s['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, s['address'], s['port'])

            self.schedulers[sched_id]['uri'] = uri
            if already_got:
                self.schedulers[sched_id]['wait_homerun'] = wait_homerun
                self.schedulers[sched_id]['actions'] = actions
                self.schedulers[sched_id]['external_commands'] = external_commands
                self.schedulers[sched_id]['con'] = con
            else:
                self.schedulers[sched_id]['wait_homerun'] = {}
                self.schedulers[sched_id]['actions'] = {}
                self.schedulers[sched_id]['external_commands'] = []
                self.schedulers[sched_id]['con'] = None
            self.schedulers[sched_id]['running_id'] = 0
            self.schedulers[sched_id]['active'] = s['active']
            self.schedulers[sched_id]['timeout'] = s['timeout']
            self.schedulers[sched_id]['data_timeout'] = s['data_timeout']
            
            # Do not connect if we are a passive satellite
            if self.direct_routing and not already_got:
                # And then we connect to it :)
                self.pynag_con_init(sched_id)



        logger.debug("[%s] Sending us configuration %s", self.name, conf)

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s ", mods)

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s", use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()
Exemplo n.º 58
0
                             self.name, type, links[id]['name'], new_run_id, running_id)
                del links[id]['broks'][:]
                # we must ask for a new full broks if
                # it's a scheduler
                if type == 'scheduler':
                    logger.debug("[%s] I ask for a broks generation to the scheduler %s",
                                 self.name, links[id]['name'])
                    con.get('fill_initial_broks', {'bname': self.name}, wait='long')
            # Ok all is done, we can save this new running id
            links[id]['running_id'] = new_run_id
        except HTTPExceptions, exp:
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return
        except KeyError, exp:
            logger.info("the %s '%s' is not initialized: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            traceback.print_stack()
            return

        logger.info("Connection OK to the %s %s", type, links[id]['name'])


    # Get a brok. Our role is to put it in the modules
    # DO NOT CHANGE data of b!!!
    # REF: doc/broker-modules.png (4-5)
    def manage_brok(self, b):
        # Call all modules if they catch the call
        for mod in self.modules_manager.get_internal_instances():
            try:
                mod.manage_brok(b)
Exemplo n.º 59
0
    def hook_early_configuration(self, arb):
        logger.info("[IpTag] in hook late config")
        for h in arb.conf.hosts:
            if not hasattr(h, 'address') and not hasattr(h, 'host_name'):
                continue

            if h.get_name() in self.ignore_hosts:
                logger.debug("[IP Tag] Ignoring host %s" % h.get_name())
                continue

            # The address to resolve
            addr = None

            # By default take the address, if not, take host_name
            if not hasattr(h, 'address'):
                addr = h.host_name
            else:
                addr = h.address

            logger.debug("[IP Tag] Looking for %s" % h.get_name())
            logger.debug("[IP Tag] Address is %s" % str(addr))
            h_ip = None
            try:
                IP(addr)
                # If we reach here, it's it was a real IP :)
                h_ip = addr
            except:
                pass

            # Ok, try again with name resolution
            if not h_ip:
                try:
                    h_ip = socket.gethostbyname(addr)
                except:
                    pass

            # Ok, maybe we succeed :)
            logger.debug("[IP Tag] Host ip is: %s" % str(h_ip))
            # If we got an ip that match and the object do not already got
            # the property, tag it!
            if h_ip and h_ip in self.ip_range:
                logger.debug("[IP Tag] Is in the range")
                # 4 cases: append , replace and set
                # append will join with the value if exist (on the END)
                # prepend will join with the value if exist (on the BEGINING)
                # replace will replace it if NOT existing
                # set put the value even if the property exists
                if self.method == 'append':
                    orig_v = getattr(h, self.property, '')
                    logger.debug("[IP Tag] Orig_v: %s" % str(orig_v))
                    new_v = ','.join([orig_v, self.value])
                    logger.debug("[IP Tag] Newv %s" % new_v)
                    setattr(h, self.property, new_v)

                # Same but we put before
                if self.method == 'prepend':
                    orig_v = getattr(h, self.property, '')
                    logger.debug("[File Tag] Orig_v: %s" % str(orig_v))
                    new_v = ','.join([self.value, orig_v])
                    logger.debug("[File Tag] Newv %s" % new_v)
                    setattr(h, self.property, new_v)

                if self.method == 'replace':
                    if not hasattr(h, self.property):
                        # Ok, set the value!
                        setattr(h, self.property, self.value)

                if self.method == 'set':
                    setattr(h, self.property, self.value)
Exemplo n.º 60
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.cur_conf = conf
        # Got our name from the globals
        g_conf = conf['global']
        if 'broker_name' in g_conf:
            name = g_conf['broker_name']
        else:
            name = 'Unnamed broker'
        self.name = name
        props_to_get = ['broks_batch', 'api_key', 'secret', 'http_proxy',
                        'statsd_host', 'statsd_port', 'statsd_prefix',
                        'statsd_enabled', 'statsd_interval', 'statsd_types',
                        'statsd_pattern']
        for prop in props_to_get:
            v = g_conf[prop]
            setattr(self, prop, v)
        self.harakiri_threshold = parse_memory_expr(g_conf['harakiri_threshold'])

        if self.harakiri_threshold is not None:
            self.raw_conf = self.new_conf
        else:
            self.raw_conf = None
        self.new_conf = None
        if self.aggressive_memory_management:
            free_memory()
        # We got a name so we can update the logger and the stats global objects
        logger.load_obj(self, name)
        statsmgr.register(self, name, 'broker',
                          api_key=self.api_key,
                          secret=self.secret,
                          http_proxy=self.http_proxy,
                          statsd_host=self.statsd_host,
                          statsd_port=self.statsd_port,
                          statsd_prefix=self.statsd_prefix,
                          statsd_enabled=self.statsd_enabled,
                          statsd_interval=self.statsd_interval,
                          statsd_types=self.statsd_types,
                          statsd_pattern=self.statsd_pattern)

        logger.debug("[%s] Sending us configuration %s", self.name, conf)
        # If we've got something in the schedulers, we do not
        # want it anymore
        # self.schedulers.clear()
        for sched_id in conf['schedulers']:
            # Must look if we already have it to do not overdie our broks
            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                broks = self.schedulers[sched_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = []
                running_id = 0
            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            # replacing scheduler address and port by those defined in satellitemap
            if s['name'] in g_conf['satellitemap']:
                s = dict(s)  # make a copy
                s.update(g_conf['satellitemap'][s['name']])
            proto = 'http'
            if s['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, s['address'], s['port'])
            self.schedulers[sched_id]['uri'] = uri

            self.schedulers[sched_id]['broks'] = broks
            self.schedulers[sched_id]['instance_id'] = s['instance_id']
            self.schedulers[sched_id]['running_id'] = running_id
            self.schedulers[sched_id]['active'] = s['active']
            self.schedulers[sched_id]['last_connection'] = 0
            self.schedulers[sched_id]['timeout'] = s['timeout']
            self.schedulers[sched_id]['data_timeout'] = s['data_timeout']

        logger.info("We have our schedulers: %s ", self.schedulers)

        # Now get arbiter
        for arb_id in conf['arbiters']:
            # Must look if we already have it
            already_got = arb_id in self.arbiters
            if already_got:
                broks = self.arbiters[arb_id]['broks']
            else:
                broks = []
            a = conf['arbiters'][arb_id]
            self.arbiters[arb_id] = a

            # replacing arbiter address and port by those defined in satellitemap
            if a['name'] in g_conf['satellitemap']:
                a = dict(a)  # make a copy
                a.update(g_conf['satellitemap'][a['name']])

            proto = 'http'
            if a['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, a['address'], a['port'])
            self.arbiters[arb_id]['uri'] = uri

            self.arbiters[arb_id]['broks'] = broks
            self.arbiters[arb_id]['instance_id'] = 0  # No use so all to 0
            self.arbiters[arb_id]['running_id'] = 0
            self.arbiters[arb_id]['last_connection'] = 0

            # We do not connect to the arbiter. Connection hangs

        logger.info("We have our arbiters: %s ", self.arbiters)

        # Now for pollers
        for pol_id in conf['pollers']:
            # Must look if we already have it
            already_got = pol_id in self.pollers
            if already_got:
                broks = self.pollers[pol_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = []
                running_id = 0
            p = conf['pollers'][pol_id]
            self.pollers[pol_id] = p

            # replacing poller address and port by those defined in satellitemap
            if p['name'] in g_conf['satellitemap']:
                p = dict(p)  # make a copy
                p.update(g_conf['satellitemap'][p['name']])

            proto = 'http'
            if p['use_ssl']:
                proto = 'https'

            uri = '%s://%s:%s/' % (proto, p['address'], p['port'])
            self.pollers[pol_id]['uri'] = uri

            self.pollers[pol_id]['broks'] = broks
            self.pollers[pol_id]['instance_id'] = 0  # No use so all to 0
            self.pollers[pol_id]['running_id'] = running_id
            self.pollers[pol_id]['last_connection'] = 0

        logger.info("We have our pollers: %s", self.pollers)

        # Now reactionners
        for rea_id in conf['reactionners']:
            # Must look if we already have it
            already_got = rea_id in self.reactionners
            if already_got:
                broks = self.reactionners[rea_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = []
                running_id = 0

            r = conf['reactionners'][rea_id]
            self.reactionners[rea_id] = r

            # replacing reactionner address and port by those defined in satellitemap
            if r['name'] in g_conf['satellitemap']:
                r = dict(r)  # make a copy
                r.update(g_conf['satellitemap'][r['name']])

            proto = 'http'
            if r['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, r['address'], r['port'])
            self.reactionners[rea_id]['uri'] = uri

            self.reactionners[rea_id]['broks'] = broks
            self.reactionners[rea_id]['instance_id'] = 0  # No use so all to 0
            self.reactionners[rea_id]['running_id'] = running_id
            self.reactionners[rea_id]['last_connection'] = 0

        logger.info("We have our reactionners: %s", self.reactionners)

        # Now receivers
        for rec_id in conf['receivers']:
            # Must look if we already have it
            already_got = rec_id in self.receivers
            if already_got:
                broks = self.receivers[rec_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = []
                running_id = 0

            r = conf['receivers'][rec_id]
            self.receivers[rec_id] = r

            # replacing reactionner address and port by those defined in satellitemap
            if r['name'] in g_conf['satellitemap']:
                r = dict(r)  # make a copy
                r.update(g_conf['satellitemap'][r['name']])

            proto = 'http'
            if r['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, r['address'], r['port'])
            self.receivers[rec_id]['uri'] = uri

            self.receivers[rec_id]['broks'] = broks
            self.receivers[rec_id]['instance_id'] = 0  # No use so all to 0
            self.receivers[rec_id]['running_id'] = running_id
            self.receivers[rec_id]['last_connection'] = 0

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s ", mods)

            # Ok now start, or restart them!
            # Set modules, init them and start external ones
            self.modules_manager.set_modules(self.modules)
            self.do_load_modules()
            self.modules_manager.start_external_instances()

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s", use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()

        # Connection init with Schedulers
        for sched_id in self.schedulers:
            self.pynag_con_init(sched_id, type='scheduler')

        for pol_id in self.pollers:
            self.pynag_con_init(pol_id, type='poller')

        for rea_id in self.reactionners:
            self.pynag_con_init(rea_id, type='reactionner')