Пример #1
0
    def hook_load_retention(self, daemon):

        # Now the new redis way :)
        logger.debug("[RedisRetention] asking me to load the retention objects")

        # We got list of loaded data from retention server
        ret_hosts = {}
        ret_services = {}

        # We must load the data and format as the scheduler want :)
        for h in daemon.hosts:
            key = "HOST-%s" % h.host_name
            if self.sentinel_servers:
                for attempt in range(self.wait_for_failover):
                    try:
                        val = self.mc.get(key)
                        break
                    except:
                        logger.warning("[RedisRetention] Couldn't connect to redis server, trying to reconnect...")
                        time.sleep(1)
                else:
                    logger.error("[RedisRetention] Timeout connecting to redis server!")
            else:
                val = self.mc.get(key)
            if val is not None:
                # redis get unicode, but we send string, so we are ok
                #val = str(unicode(val))
                val = cPickle.loads(val)
                ret_hosts[h.host_name] = val

        for s in daemon.services:
            key = "SERVICE-%s,%s" % (s.host.host_name, s.service_description)
            # space are not allowed in memcache key.. so change it by SPACE token
            key = key.replace(' ', 'SPACE')
            #print "Using key", key
            if self.sentinel_servers:
                for attempt in range(self.wait_for_failover):
                    try:
                        val = self.mc.get(key)
                        break
                    except:
                        logger.warning("[RedisRetention] Couldn't connect to redis server, trying to reconnect...")
                        time.sleep(1)
                else:
                    logger.error("[RedisRetention] Timeout connecting to redis server!")
            else:
                val = self.mc.get(key)
            if val is not None:
                #val = str(unicode(val))
                val = cPickle.loads(val)
                ret_services[(s.host.host_name, s.service_description)] = val

        all_data = {'hosts': ret_hosts, 'services': ret_services}

        # Ok, now comme load them scheduler :)
        daemon.restore_retention_data(all_data)

        logger.info("[RedisRetention] Retention objects loaded successfully.")

        return True
Пример #2
0
    def main(self):
        global app

        # Change process name (seen in ps or top)
        self.set_proctitle(self.name)

        # It's an external module, so we need to be sure that we manage
        # the signals
        self.set_exit_handler()

        # Go for Http open :)
        self.init_http()

        # We fill the global variable with our Queue() link
        # with the arbiter, because the page should be a non-class
        # one function
        app = self

        # We will loop forever on the http socket
        input = [self.srv.socket]

        # Main blocking loop
        while not self.interrupted:
            input = [self.srv.socket]
            try:
                inputready, _, _ = select.select(input, [], [], 1)
            except select.error, e:
                logger.warning("[WS_Arbiter] Exception: %s", str(e))
                continue
            for s in inputready:
                # If it's a web request, ask the webserver to do it
                if s == self.srv.socket:
                    self.srv.handle_request()
Пример #3
0
def get_page(name, type):
    global params

    # user = app.check_user_authentication()

    logger.debug("[WebUI-cvhost], get_page for %s, type: '%s'", name, type)

    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s.cfg" % (currentdir, type)
        logger.debug("Plugin configuration file: %s", configuration_file)
        scp = config_parser('#', '=')
        z = params.copy()
        z.update(scp.parse_config(configuration_file))
        params = z

        logger.debug("[WebUI-cvhost] configuration loaded.")
        logger.debug("[WebUI-cvhost] configuration, load: %s (%s)", params['svc_load_name'], params['svc_load_used'])
        logger.debug("[WebUI-cvhost] configuration, cpu: %s (%s)", params['svc_cpu_name'], params['svc_cpu_used'])
        logger.debug("[WebUI-cvhost] configuration, disk: %s (%s)", params['svc_dsk_name'], params['svc_dsk_used'])
        logger.debug("[WebUI-cvhost] configuration, memory: %s (%s)", params['svc_mem_name'], params['svc_mem_used'])
        logger.debug("[WebUI-cvhost] configuration, network: %s (%s)", params['svc_net_name'], params['svc_net_used'])
        # logger.info("[WebUI-cvhost] configuration, printer: %s (%s)", params['svc_prn_name'], params['svc_prn_used'])
    except Exception, exp:
        logger.warning("[WebUI-cvhost] configuration file (%s) not available or bad formed: %s", configuration_file, str(exp))
        app.redirect404()
        all_perfs = {}
        all_states = {}
        return {'app': app, 'config': type, 'all_perfs':all_perfs, 'all_states':all_states}
Пример #4
0
 def add(self, elt):
     cls_type = elt.__class__.my_type
     if cls_type == 'brok':
         # For brok, we TAG brok with our instance_id
         elt.instance_id = 0
         self.broks_internal_raised.append(elt)
         return
     elif cls_type == 'externalcommand':
         logger.debug("Enqueuing an external command '%s'" % str(ExternalCommand.__dict__))
         self.external_commands.append(elt)
     # Maybe we got a Message from the modules, it's way to ask something
     # like from now a full data from a scheduler for example.
     elif cls_type == 'message':
         # We got a message, great!
         logger.debug(str(elt.__dict__))
         if elt.get_type() == 'NeedData':
             data = elt.get_data()
             # Full instance id means: I got no data for this scheduler
             # so give me all dumbass!
             if 'full_instance_id' in data:
                 c_id = data['full_instance_id']
                 source = elt.source
                 logger.info('The module %s is asking me to get all initial data from the scheduler %d' % (source, c_id))
                 # so we just reset the connection and the running_id, it will just get all new things
                 try:
                     self.schedulers[c_id]['con'] = None
                     self.schedulers[c_id]['running_id'] = 0
                 except KeyError:  # maybe this instance was not known, forget it
                     logger.warning("the module %s ask me a full_instance_id for an unknown ID (%d)!" % (source, c_id))
         # Maybe a module tells me that it's dead, I must log it's last words...
         if elt.get_type() == 'ICrash':
             data = elt.get_data()
             logger.error('the module %s just crash! Please look at the traceback:' % data['name'])
             logger.error(data['trace'])
Пример #5
0
    def get_new_broks(self, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s' % type)
            return

        # We check for new check in each schedulers and put
        # the result in new_checks
        for sched_id in links:
            try:
                con = links[sched_id]['con']
                if con is not None:  # None = not initialized
                    t0 = time.time()
                    tmp_broks = con.get_broks(self.name)
                    logger.debug("%s Broks get in %s" % (len(tmp_broks), time.time() - t0))
                    for b in tmp_broks.values():
                        b.instance_id = links[sched_id]['instance_id']

                    # Ok, we can add theses broks to our queues
                    self.add_broks_to_queue(tmp_broks.values())

                else:  # no con? make the connection
                    self.pynag_con_init(sched_id, type=type)
            # Ok, con is not known, so we create it
            except KeyError, exp:
                logger.debug("Key error for get_broks : %s" % str(exp))
                try:
                    logger.debug(''.join(Pyro.util.getPyroTraceback(exp)))
                except:
                    pass
                self.pynag_con_init(sched_id, type=type)
            except Pyro.errors.ProtocolError, exp:
                logger.warning("Connection problem to the %s %s: %s" % (type, links[sched_id]['name'], str(exp)))
                links[sched_id]['con'] = None
Пример #6
0
    def get_live_data_log(self):
        """Like get_live_data, but for log objects"""
        # finalize the filter stacks
        self.mongo_time_filter_stack.and_elements(self.mongo_time_filter_stack.qsize())
        self.mongo_filter_stack.and_elements(self.mongo_filter_stack.qsize())
        if self.use_aggressive_sql:
            # Be aggressive, get preselected data from sqlite and do less
            # filtering in python. But: only a subset of Filter:-attributes
            # can be mapped to columns in the logs-table, for the others
            # we must use "always-true"-clauses. This can result in
            # funny and potentially ineffective sql-statements
            mongo_filter_func = self.mongo_filter_stack.get_stack()
        else:
            # Be conservative, get everything from the database between
            # two dates and apply the Filter:-clauses in python
            mongo_filter_func = self.mongo_time_filter_stack.get_stack()
        dbresult = []
        mongo_filter = mongo_filter_func()
        logger.debug("[Logstore MongoDB] Mongo filter is %s" % str(mongo_filter))
        # We can apply the filterstack here as well. we have columns and filtercolumns.
        # the only additional step is to enrich log lines with host/service-attributes
        # A timerange can be useful for a faster preselection of lines

        filter_element = eval('{ ' + mongo_filter + ' }')
        logger.debug("[LogstoreMongoDB] Mongo filter is %s" % str(filter_element))
        columns = ['logobject', 'attempt', 'logclass', 'command_name', 'comment', 'contact_name', 'host_name', 'lineno', 'message', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
        if not self.is_connected == CONNECTED:
            logger.warning("[LogStoreMongoDB] sorry, not connected")
        else:
            dbresult = [Logline([(c,) for c in columns], [x[col] for col in columns]) for x in self.db[self.collection].find(filter_element).sort([(u'time', pymongo.ASCENDING), (u'lineno', pymongo.ASCENDING)])]
        return dbresult
Пример #7
0
    def get_live_data(self, cs):
        """Find the objects which match the request.

        This function scans a list of objects (hosts, services, etc.) and
        applies the filter functions first. The remaining objects are
        converted to simple dicts which have only the keys that were
        requested through Column: attributes. """
        # We will use prefiltercolumns here for some serious speedup.
        # For example, if nagvis wants Filter: host_groups >= hgxy
        # we don't have to use the while list of hostgroups in
        # the innermost loop
        # Filter: host_groups >= linux-servers
        # host_groups is a service attribute
        # We can get all services of all hosts of all hostgroups and filter at the end
        # But it would save a lot of time to already filter the hostgroups. This means host_groups must be hard-coded
        # Also host_name, but then we must filter the second step.
        # And a mixture host_groups/host_name with FilterAnd/Or? Must have several filter functions

        handler = self.objects_get_handlers.get(self.table, None)
        if not handler:
            logger.warning("[Livestatus Query] Got unhandled table: %s" % (self.table))
            return []

        result = handler(self, cs)
        # Now we have a list of full objects (Host, Service, ....)

        #if self.limit:
        #    if isinstance(res, list):
        #        res = res[:self.limit]
        #    else:
        #        res = list(res)[:self.limit]

        return result
Пример #8
0
    def load(self):
        now = int(time.time())
        # We get all modules file with .py
        modules_files = [fname[:-3] for fname in os.listdir(self.modules_path)
                         if fname.endswith(".py")]

        # And directories
        modules_files.extend([fname for fname in os.listdir(self.modules_path)
                               if os.path.isdir(os.path.join(self.modules_path, fname))])

        # Now we try to load them
        # So first we add their dir into the sys.path
        if not self.modules_path in sys.path:
            sys.path.append(self.modules_path)

        # We try to import them, but we keep only the one of
        # our type
        del self.imported_modules[:]
        for fname in modules_files:
            #print "Try to load", fname
            try:
                m = __import__(fname)
                if not hasattr(m, 'properties'):
                    continue

                # We want to keep only the modules of our type
                if self.modules_type in m.properties['daemons']:
                    self.imported_modules.append(m)
            except Exception, exp:
                logger.warning("Importing module %s: %s" % (fname, exp))
Пример #9
0
    def get_instances(self):
        self.clear_instances()
        for (mod_conf, module) in self.modules_assoc:
            mod_conf.properties = module.properties.copy()
            try:
                inst = module.get_instance(mod_conf)
                if not isinstance(inst, BaseModule):
                    raise TypeError('Returned instance is not of type BaseModule (%s) !'
                                    % type(inst))
            except Exception as err:
                logger.error("The module %s raised an exception %s, I remove it! traceback=%s",
                             mod_conf.get_name(), err, traceback.format_exc())
            else:
                # Give the module the data to which module it is load from
                inst.set_loaded_into(self.modules_type)
                self.instances.append(inst)


        for inst in self.instances:
            # External are not init now, but only when they are started
            if not inst.is_external and not self.try_instance_init(inst):
                # If the init failed, we put in in the restart queue
                logger.warning("The module '%s' failed to init, I will try to restart it later",
                               inst.get_name())
                self.to_restart.append(inst)

        return self.instances
Пример #10
0
    def set_ui_user_preference(self, user, key, value):
        if not self.is_connected:
            if not self.open():
                logger.error("[WebUI-MongoDBPreferences] error during initialization, no database connection!")
                return None

        if not user:
            logger.warning("[WebUI-MongoDBPreferences] error set_ui_user_preference, no user!")
            return None

        try:
            # check a collection exist for this user
            u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
            if not u:
                # no collection for this user? create a new one
                self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})

            r = self.db.ui_user_preferences.update({'_id': user.get_name()}, {'$set': {key: value}})
            # Maybe there was no doc there, if so, create an empty one
            if not r:
                # Maybe the user exist, if so, get the whole user entry
                u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
                if not u:
                    logger.debug ("[WebUI-MongoDBPreferences] No user entry for %s, I create a new one", user.get_name())
                    self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})
                else:  # ok, it was just the key that was missing, just update it and save it
                    u[key] = value
                    logger.debug ("[WebUI-MongoDBPreferences] Just saving the new key in the user pref")
                    self.db.ui_user_preferences.save(u)
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None
Пример #11
0
def load_config(app):
    global params
    
    import os
    from webui2.config_parser import config_parser
    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
        logger.info("[WebUI-logs] Plugin configuration file: %s", configuration_file)
        scp = config_parser('#', '=')
        z = params.copy()
        z.update(scp.parse_config(configuration_file))
        params = z

        params['logs_type'] = [item.strip() for item in params['logs_type'].split(',')]
        if len(params['logs_hosts']) > 0:
            params['logs_hosts'] = [item.strip() for item in params['logs_hosts'].split(',')]
        if len(params['logs_services']) > 0:
            params['logs_services'] = [item.strip() for item in params['logs_services'].split(',')]
        
        logger.info("[WebUI-logs] configuration loaded.")
        logger.info("[WebUI-logs] configuration, fetching types: %s", params['logs_type'])
        logger.info("[WebUI-logs] configuration, hosts: %s", params['logs_hosts'])
        logger.info("[WebUI-logs] configuration, services: %s", params['logs_services'])
        return True
    except Exception, exp:
        logger.warning("[WebUI-logs] configuration file (%s) not available: %s", configuration_file, str(exp))
        return False
Пример #12
0
def get_instance(plugin):
    """ Return a module instance for the plugin manager """
    logger.info("Get a NSCA arbiter module for plugin %s" % plugin.get_name())

    host = getattr(plugin, 'host', '127.0.0.1')
    if host == '*':
        host = ''
    
    port = int(getattr(plugin, 'port', '5667'))
    buffer_length = int(getattr(plugin, 'buffer_length', '4096'))
    payload_length = int(getattr(plugin, 'payload_length', '-1'))
    encryption_method = int(getattr(plugin, 'encryption_method', '0'))

    backlog = int(getattr(plugin, 'backlog', '10'))

    password = getattr(plugin, 'password', '')
    if password == "" and encryption_method != 0:
        logger.error("[NSCA] No password specified whereas there is a encryption_method defined")
        logger.warning("[NSCA] Setting password to dummy to avoid crash!")
        password = "******"

    max_packet_age = min(int(getattr(plugin, 'max_packet_age', '30')), 900)
    check_future_packet = bool(getattr(plugin, 'check_future_packet', 0))

    instance = NSCA_arbiter(plugin, host, port,
            buffer_length, payload_length, encryption_method, password, max_packet_age, check_future_packet,
            backlog)
    return instance
Пример #13
0
def show_logs():
    user = checkauth()    

    message,db = getdb(params['db_name'])
    if not db:
        return {
            'app': app,
            'user': user, 
            'message': message,
            'params': params,
            'records': []
        }

    records=[]

    try:
        logger.warning("[Logs] Fetching records from database: %s (max %d)" % (params['logs_type'], params['logs_limit']))
        for log in db.logs.find({ "$and" : [ { "type" : { "$in": params['logs_type'] }}, { "host_name" : { "$in": params['logs_hosts'] }}, { "service_description" : { "$in": params['logs_services'] }}  ]}).sort("time", -1).limit(params['logs_limit']):
            records.append({
                "date" : int(log["time"]),
                "host" : log['host_name'],
                "service" : log['service_description'],
                "message" : log['message']
            })
        message = "%d records fetched from database." % len(records)
        logger.debug("[Logs] %d records fetched from database." % len(records))
    except Exception, exp:
        logger.error("[Logs] Exception when querying database: %s" % (str(exp)))
Пример #14
0
def decode_values(pktype, plen, buf):
    nvalues = short.unpack_from(buf, header.size)[0]
    off = header.size + short.size + nvalues
    valskip = double.size

    # check the packet head
    if ((valskip + 1) * nvalues + short.size + header.size) != plen:
        return []
    if double.size != number.size:
        return []

    result = []
    for dstype in map(ord, buf[header.size + short.size:off]):
        if (dstype == DS_TYPE_COUNTER or dstype == DS_TYPE_DERIVE or dstype == DS_TYPE_ABSOLUTE):
            v = (dstype, number.unpack_from(buf, off)[0])
            result.append(v)
            off += valskip
        elif dstype == DS_TYPE_GAUGE:
            v = (dstype, double.unpack_from(buf, off)[0])
            result.append(v)
            off += valskip
        else:
            logger.warning("[Collectd] DS type %i unsupported" % dstype)

    return result
Пример #15
0
    def manage_initial_broks_done_brok(self, b):
        if self.con is None:
            return
        logger.info("[Active Directory UI] AD/LDAP: manage_initial_broks_done_brok, go for pictures")

        searchScope = ldap.SCOPE_SUBTREE
        ## retrieve all attributes - again adjust to your needs - see documentation for more options
        #retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]

        logger.info("[Active Directory UI] Contacts? %d" % len(self.app.datamgr.get_contacts()))

        for c in self.app.datamgr.get_contacts():
            logger.debug("[Active Directory UI] Doing photo lookup for contact: %s" % c.get_name())
            elts = self.find_contact_entry(c)

            if elts is None:
                logger.warning("[Active Directory UI] No ldap entry for %s" % c.get_name())
                continue

            # Ok, try to get photo from the entry
            try:
                photo = elts[self.photo_attr][0]
                try:
                    p = os.path.join(self.app.photo_dir, c.get_name()+'.jpg')
                    f = open(p, 'wb')
                    f.write(photo)
                    f.close()
                    logger.info("[Active Directory UI] Photo wrote for %s" % c.get_name())
                except Exception, exp:
                    logger.error("[Active Directory UI] Cannot write %s : %s" % (p, str(exp)))
            except KeyError:
                logger.warning("[Active Directory UI] No photo for %s" % c.get_name())
Пример #16
0
    def load(self):
        if self.modules_path not in sys.path:
            sys.path.append(self.modules_path)

        modules_files = [fname
                         for fname in listdir(self.modules_path)
                         if isdir(join(self.modules_path, fname))]

        del self.imported_modules[:]
        for mod_name in modules_files:
            mod_file = abspath(join(self.modules_path, mod_name, 'module.py'))
            mod_dir = os.path.normpath(os.path.dirname(mod_file))
            mod = self.try_load(mod_name, mod_dir)
            if not mod:
                continue
            try:
                is_our_type = self.modules_type in mod.properties['daemons']
            except Exception as err:
                logger.warning("Bad module file for %s : cannot check its properties['daemons']"
                               "attribute : %s", mod_file, err)
            else:  # We want to keep only the modules of our type
                if is_our_type:
                    self.imported_modules.append(mod)

        # Now we want to find in theses modules the ones we are looking for
        del self.modules_assoc[:]
        for mod_conf in self.modules:
            module_type = uniform_module_type(mod_conf.module_type)
            for module in self.imported_modules:
                if uniform_module_type(module.properties['type']) == module_type:
                    self.modules_assoc.append((mod_conf, module))
                    break
            else:  # No module is suitable, we emit a Warning
                logger.warning("The module type %s for %s was not found in modules!",
                               module_type, mod_conf.get_name())
Пример #17
0
 def create_pack(self, buf, name):
     if not json:
         logger.warning("[Pack] cannot load the pack file '%s': missing json lib", name)
         return
     # Ok, go compile the code
     try:
         d = json.loads(buf)
         if "name" not in d:
             logger.error("[Pack] no name in the pack '%s'", name)
             return
         p = Pack({})
         p.pack_name = d["name"]
         p.description = d.get("description", "")
         p.macros = d.get("macros", {})
         p.templates = d.get("templates", [p.pack_name])
         p.path = d.get("path", "various/")
         p.doc_link = d.get("doc_link", "")
         p.services = d.get("services", {})
         p.commands = d.get("commands", [])
         if not p.path.endswith("/"):
             p.path += "/"
         # Ok, add it
         self[p.id] = p
     except ValueError, exp:
         logger.error("[Pack] error in loading pack file '%s': '%s'", name, exp)
Пример #18
0
 def create_pack(self, buf, name):
     if not json:
         logger.warning("[Pack] cannot load the pack file '%s': missing json lib", name)
         return
     # Ok, go compile the code
     try:
         d = json.loads(buf)
         if not 'name' in d:
             logger.error("[Pack] no name in the pack '%s'", name)
             return
         p = Pack({})
         p.pack_name = d['name']
         p.description = d.get('description', '')
         p.macros = d.get('macros', {})
         p.templates = d.get('templates', [p.pack_name])
         p.path = d.get('path', 'various/')
         p.doc_link = d.get('doc_link', '')
         p.services = d.get('services', {})
         p.commands = d.get('commands', [])
         if not p.path.endswith('/'):
             p.path += '/'
         # Ok, add it
         self[p.id] = p
     except ValueError, exp:
         logger.error("[Pack] error in loading pack file '%s': '%s'", name, exp)
Пример #19
0
    def linkify_hg_by_realms(self, realms):
        # Now we explode the realm value if we've got one
        # The group realm must not override a host one (warning?)
        for hg in self:
            if not hasattr(hg, 'realm'):
                continue

            # Maybe the value is void?
            if not hg.realm.strip():
                continue

            r = realms.find_by_name(hg.realm.strip())
            if r is not None:
                hg.realm = r
                logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
            else:
                err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
                hg.configuration_errors.append(err)
                hg.realm = None
                continue

            for h in hg:
                if h is None:
                    continue
                if h.realm is None or h.got_default_realm:  # default value not hasattr(h, 'realm'):
                    logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup rule (%s)",  \
                        hg.realm.get_name(), h.get_name(), hg.get_name())
                    h.realm = hg.realm
                else:
                    if h.realm != hg.realm:
                        logger.warning("[hostgroups] host %s it not in the same realm than it's hostgroup %s",  \
                            h.get_name(), hg.get_name())
Пример #20
0
    def commit_logs(self):
        """
        Peridically called (commit_period), this method prepares a bunch of queued logs (commit_colume) to insert them in the DB
        """
        if not self.logs_cache:
            return

        if not self.is_connected == CONNECTED:
            if not self.open():
                logger.warning("[mongo-logs] log commiting failed")
                logger.warning("[mongo-logs] %d lines to insert in database", len(self.logs_cache))
                return

        logger.debug("[mongo-logs] commiting ...")

        logger.debug("[mongo-logs] %d lines to insert in database (max insertion is %d lines)", len(self.logs_cache), self.commit_volume)

        # Flush all the stored log lines
        logs_to_commit = 1
        now = time.time()
        some_logs = []
        while True:
            try:
                # result = self.db[self.logs_collection].insert_one(self.logs_cache.popleft())
                some_logs.append(self.logs_cache.popleft())
                logs_to_commit = logs_to_commit + 1
                if logs_to_commit >= self.commit_volume:
                    break
            except IndexError:
                logger.debug("[mongo-logs] prepared all available logs for commit")
                break
            except Exception, exp:
                logger.error("[mongo-logs] exception: %s", str(exp))
Пример #21
0
 def get_module(self, mod_name):
     if self.modules_dir and self.modules_dir not in sys.path:
         sys.path.append(self.modules_dir)
     try:
         return importlib.import_module('.module', mod_name)
     except ImportError as err:
         logger.warning('Cannot import %s as a package (%s) ; trying as bare module..',
                        mod_name, err)
     mod_dir = abspath(join(self.modules_dir, mod_name))
     mod_file = join(mod_dir, 'module.py')
     if os.path.exists(mod_file):
         # important, equivalent to import fname from module.py:
         load_it = lambda: imp.load_source(mod_name, mod_file)
     else:
         load_it = lambda: imp.load_compiled(mod_name, mod_file+'c')
     # We add this dir to sys.path so the module can load local files too
     if mod_dir not in sys.path:
         sys.path.append(mod_dir)
     try:
         return load_it()
     except Exception as err:
         logger.warning("Importing module %s failed: %s ; backtrace=%s",
                        mod_name, err, traceback.format_exc())
         sys.path.remove(mod_dir)
         raise
Пример #22
0
    def manage_log_brok(self, brok):
        """
        Parse a Shinken log brok to enqueue a log line for Index insertion
        """
        d = date.today()
        index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")

        line = brok.data["log"]
        if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
            # Match log which NOT have to be stored
            logger.warning("[elastic-logs] do not store: %s", line)
            return

        logline = Logline(line=line)
        logline_dict = logline.as_dict()
        logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
        values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}

        # values = logline.as_dict()
        if logline.logclass != LOGCLASS_INVALID:
            logger.debug("[elastic-logs] store log line values: %s", values)
            self.logs_cache.append(values)
        else:
            logger.info("[elastic-logs] This line is invalid: %s", line)

        return
 def manage_initial_host_status_brok(self, b):
     try:
         logger.debug("[Checks forward] initial host status: %s" % str(b.data['customs']))
         self.cache_host_entities_id[b.data['host_name']] = b.data['customs']['_ENTITIESID']
         if self.cache_host_entities_id[b.data['host_name']] in self.glpi_entities:
             logger.info("[Checks forward] host %s checks will be forwarded (entity: %s)" % (b.data['host_name'], self.cache_host_entities_id[b.data['host_name']]))
     except:
         logger.warning("[Checks forward] no entity Id for host: %s" % (b.data['host_name']))
Пример #24
0
 def remove_twins(self):
     for id in self.twins:
         i = self.items[id]
         type = i.__class__.my_type
         logger.warning("[items] %s.%s is already defined '%s'" % (type, i.get_name(), getattr(i, 'imported_from', "unknown source")))
         del self[id]  # bye bye
     # do not remove twins, we should look in it, but just void it
     self.twins = []
Пример #25
0
 def remove_twins(self):
     for id in self.twins:
         i = self.items[id]
         type = i.__class__.my_type
         logger.warning("[items] %s.%s is already defined" % (type, i.get_name()))
         del self[id]  # bye bye
     # do not remove twins, we should look in it, but just void it
     self.twins = []
Пример #26
0
 def init(self):
     try:
         self.con = socket.socket()
         self.con.settimeout(10)
         self.con.connect((self.host, self.port))
     except Exception:
         logger.warning("[RawSocket broker] Failed to connect to host %s and port %d!"
                        % (self.host, self.port))
Пример #27
0
 def get_module(self, name):
     mod_path = os.path.join(self.modulesdir, name, 'module.py')
     try:
         r = imp.load_source(name, mod_path)
     except:
         logger.warning('The module %s cannot be founded or load' % mod_path)
         raise
     return r
Пример #28
0
def get_instance(plugin):
    logger.info("[GLPIdb Broker] Get a Glpi broker for plugin %s" % plugin.get_name())

    # First try to import
    try:
        from glpidb_broker import Glpidb_broker
    except ImportError, exp:
        logger.warning("[GLPIdb Broker] Warning: the plugin type %s is unavailable: %s" % (properties['type'], exp))
        return None
Пример #29
0
    def is_correct(self):
        b = True
        for dr in self.dateranges:
            b &= dr.is_correct()

        # Warn about non correct entries
        for e in self.invalid_entries:
            logger.warning("[timeperiod::%s] invalid entry '%s'" % (self.get_name(), e))
        return b
Пример #30
0
def get_instance(plugin):
    logger.info("[Host Perfdata Broker] Get a Host Perfdata broker for plugin %s" % plugin.get_name())

    # First try to import
    try:
        from host_perfdata_broker import Host_perfdata_broker
    except ImportError, exp:
        logger.warning("[Host Perfdata Broker] Warning: the plugin type %s is unavailable: %s" % ('host_perfdata', exp))
        return None
Пример #31
0
    def set_ui_user_preference(self, user, key, value):
        if not self.is_connected:
            if not self.open():
                logger.error(
                    "[WebUI-MongoDBPreferences] error during initialization, no database connection!"
                )
                return None

        if not user:
            logger.warning(
                "[WebUI-MongoDBPreferences] error set_ui_user_preference, no user!"
            )
            return None

        try:
            # check a collection exist for this user
            u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
            if not u:
                # no collection for this user? create a new one
                self.db.ui_user_preferences.save({
                    '_id': user.get_name(),
                    key: value
                })

            r = self.db.ui_user_preferences.update({'_id': user.get_name()},
                                                   {'$set': {
                                                       key: value
                                                   }})
            # Maybe there was no doc there, if so, create an empty one
            if not r:
                # Maybe the user exist, if so, get the whole user entry
                u = self.db.ui_user_preferences.find_one(
                    {'_id': user.get_name()})
                if not u:
                    logger.debug(
                        "[WebUI-MongoDBPreferences] No user entry for %s, I create a new one",
                        user.get_name())
                    self.db.ui_user_preferences.save({
                        '_id': user.get_name(),
                        key: value
                    })
                else:  # ok, it was just the key that was missing, just update it and save it
                    u[key] = value
                    logger.debug(
                        "[WebUI-MongoDBPreferences] Just saving the new key in the user pref"
                    )
                    self.db.ui_user_preferences.save(u)
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None
Пример #32
0
def get_page(name):
    global params

    # user = app.check_user_authentication()

    config = 'default'
    if '/' in name:
        config = name.split('/')[1]
        name = name.split('/')[0]

    # Find host type if provided in parameters ...
    # @mohierf: not yet implemented ...
    type = app.request.query.get('type', 'default')

    logger.debug("[WebUI-cvhost], get_page for %s (%s)", name,
                 app.request.query_string)

    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s.cfg" % (currentdir, config)
        logger.debug("Plugin configuration file: %s", configuration_file)
        scp = config_parser('#', '=')
        z = params.copy()
        z.update(scp.parse_config(configuration_file))
        params = z

        logger.debug("[WebUI-cvhost] configuration loaded.")
        logger.debug("[WebUI-cvhost] configuration, load: %s (%s)",
                     params['svc_load_name'], params['svc_load_used'])
        logger.debug("[WebUI-cvhost] configuration, cpu: %s (%s)",
                     params['svc_cpu_name'], params['svc_cpu_used'])
        logger.debug("[WebUI-cvhost] configuration, disk: %s (%s)",
                     params['svc_dsk_name'], params['svc_dsk_used'])
        logger.debug("[WebUI-cvhost] configuration, memory: %s (%s)",
                     params['svc_mem_name'], params['svc_mem_used'])
        logger.debug("[WebUI-cvhost] configuration, network: %s (%s)",
                     params['svc_net_name'], params['svc_net_used'])
        # logger.debug("[WebUI-cvhost] configuration, printer: %s (%s)", params['svc_prn_name'], params['svc_prn_used'])
    except Exception, exp:
        logger.warning(
            "[WebUI-cvhost] configuration file (%s) not available: %s",
            configuration_file, str(exp))
        all_perfs = {}
        all_states = {}
        return {
            'app': app,
            'config': config,
            'all_perfs': all_perfs,
            'all_states': all_states
        }
Пример #33
0
    def get_ui_external_links(self):
        logger.debug("[WebUI] Fetching UI external links ...")

        lst = []
        for mod in self.modules_manager.get_internal_instances():
            try:
                f = getattr(mod, 'get_external_ui_link', None)
                if f and callable(f):
                    lst.append(f())
            except Exception, exp:
                logger.warning("[WebUI] Warning: The mod %s raise an exception: %s, I'm tagging it to restart later", mod.get_name(), str(exp))
                logger.debug("[WebUI] Exception type: %s", type(exp))
                logger.debug("Back trace of this kill: %s", traceback.format_exc())
                self.modules_manager.set_to_restart(mod)
Пример #34
0
    def main(self):
        self.set_proctitle(self.name)
        self.set_exit_handler()

        # Open database connection
        self.open()

        db_commit_next_time = time.time()
        db_test_connection = time.time()

        while not self.interrupted:
            logger.debug("[krill-hostevents] queue length: %s",
                         self.to_q.qsize())
            now = time.time()

            # DB connection test ?
            if self.db_test_period and db_test_connection < now:
                logger.debug(
                    "[krill-hostevents] Testing database connection ...")
                # Test connection every 5 seconds ...
                db_test_connection = now + self.db_test_period
                if self.is_connected == DISCONNECTED:
                    logger.warning(
                        "[krill-hostevents] Trying to connect database ...")
                    self.open()

            # Logs commit ?
            if db_commit_next_time < now:
                logger.debug("[krill-hostevents] Logs commit time ...")
                # Commit periodically ...
                db_commit_next_time = now + self.commit_period
                self.commit_logs()

            # Logs rotation ?
            if self.next_logs_rotation < now:
                logger.debug("[krill-hostevents] Logs rotation time ...")
                self.rotate_logs()

            # Broks management ...
            l = self.to_q.get()
            for b in l:
                b.prepare()
                self.manage_brok(b)

            logger.debug("[krill-hostevents] time to manage %s broks (%3.4fs)",
                         len(l),
                         time.time() - now)

        # Close database connection
        self.close()
Пример #35
0
    def get_ui_common_preference(self, key):
        if not self.is_connected:
            if not self.open():
                logger.error(
                    "[WebUI-MongoDBPreferences] error during initialization, no database connection!"
                )
                return None

        try:
            e = self.db.ui_user_preferences.find_one({'_id': 'shinken-global'})
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None
Пример #36
0
    def is_correct(self):
        state = True
        cls = self.__class__

        for prop, entry in cls.properties.items():
            if not hasattr(self, prop) and entry.required:
                # This should raise an error afterwards?
                # Log the issue
                logger.warning("%s arbiterlink is missing %s property",
                               self.get_name(), prop)
                self.debug("%s arbiterlink is missing %s property" %
                           (self.get_name(), prop))
                state = False  # Bad boy...
        return state
Пример #37
0
    def load_retention_objects(self, sched):
        logger.debug(
            "[PickleRetention] asking me to load the retention objects")

        # Now the old flat file way :(
        logger.debug("[PickleRetention]Reading from retention_file %s" %
                     self.path)
        try:
            f = open(self.path, 'rb')
            all_data = cPickle.load(f)
            f.close()
        except (EOFError, ValueError, IOError), exp:
            logger.warning(repr(exp))
            return False
Пример #38
0
 def execute_query(self, query, do_debug=False):
     """Just run the query
     TODO: finish catch
     """
     if do_debug:
         logger.debug("[MysqlDB]I run query %s" % query)
     try:
         self.db_cursor.execute(query)
         self.db.commit()
         return True
     except IntegrityError, exp:
         logger.warning("[MysqlDB] A query raised an integrity error:" \
               " %s, %s" % (query, exp))
         return False
Пример #39
0
    def find_contact_entry(self, contact):
        if not self.active:
            return None

        if not contact:
            return None

        # First we try to connect, because there is no "KEEP ALIVE" option
        # available, so we will get a drop after one day...
        self.connect()

        logger.info("[Active Directory UI] AD/LDAP: search for contact %s" %
                    contact.get_name())
        searchScope = ldap.SCOPE_SUBTREE
        ## retrieve all attributes
        #retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]

        cname = contact.get_name()
        email = contact.email
        searchFilter = self.search_format % (cname, email)
        logger.info("[Active Directory UI] Filter %s" % str(searchFilter))
        try:
            ldap_result_id = self.con.search(self.basedn, searchScope,
                                             searchFilter,
                                             self.retrieveAttributes)
            result_set = []
            while 1:
                result_type, result_data = self.con.result(ldap_result_id, 0)
                if (result_data == []):
                    logger.warning("[Active Directory UI] No result for %s" %
                                   cname)
                    return None

                if result_type == ldap.RES_SEARCH_ENTRY:
                    (_, elts) = result_data[0]
                    if self.mode == 'openldap':
                        elts['dn'] = str(result_data[0][0])
                    try:
                        account_name = elts[self.name_id][0]
                    except Exception:
                        account_name = str(result_data[0])
                    # Got a result, try to get photo to write file
                    logger.info(
                        "[Active Directory UI] Find account principalname %s" %
                        account_name)
                    return elts
        except ldap.LDAPError, e:
            logger.error("[Active Directory UI] Ldap error: %s, %s" %
                         (e, str(e.__dict__)))
            return None
Пример #40
0
    def is_correct(self):
        b = True
        for dr in self.dateranges:
            d = dr.is_correct()
            if not d:
                logger.error("[timeperiod::%s] invalid daterange ",
                             self.get_name())
            b &= d

        # Warn about non correct entries
        for e in self.invalid_entries:
            logger.warning("[timeperiod::%s] invalid entry '%s'",
                           self.get_name(), e)
        return b
Пример #41
0
    def check_auth(self, user, password):
        # If we do not have an ldap uri, no auth :)
        if not self.ldap_uri:
            return False

        logger.debug(
            "[Active Directory UI] Trying to auth by ldap with user %s and password %s"
            % (user, password))

        c = self.app.datamgr.get_contact(user)

        if not c:
            logger.warning(
                "[Active Directory UI] AD/Ldap: invalid user %s (not founded)"
                % user)
            return False

        # first we need to find the principalname of this entry
        # because it can be a user name like j.gabes, but we should auth by ldap
        # with [email protected] for example
        elts = self.find_contact_entry(c)

        try:
            # On AD take the uid / principalename
            if self.mode == 'ad':
                # Maybe the entry is void....
                if self.auth_key in elts:
                    account_name = elts[self.auth_key][0]
            else:  # For openldap, use the full DN
                account_name = elts[self.auth_key]
        except KeyError:
            logger.warning(
                "[Active Directory UI] Cannot find the %s entry, so use the user entry"
                % self.auth_key)
            account_name = user

        local_con = ldap.initialize(self.ldap_uri)
        local_con.set_option(ldap.OPT_REFERRALS, 0)

        # Any errors will throw an ldap.LDAPError exception
        # or related exception so you can ignore the result
        try:
            local_con.simple_bind_s(account_name, password)
            logger.info(
                "[Active Directory UI] AD/Ldap Connection done with user %s and password %s"
                % (user, password))
            return True
        except ldap.LDAPError, exp:
            logger.error("[Active Directory UI] Ldap auth error: %s" %
                         str(exp))
Пример #42
0
    def push_external_commands_to_schedulers(self):
        # If we are not in a direct routing mode, just bailout after
        # faking resolving the commands
        if not self.direct_routing:
            self.external_commands.extend(self.unprocessed_external_commands)
            self.unprocessed_external_commands = []
            return

        commands_to_process = self.unprocessed_external_commands
        self.unprocessed_external_commands = []

        # Now get all external commands and put them into the
        # good schedulers
        for ext_cmd in commands_to_process:
            self.external_command.resolve_command(ext_cmd)

        # Now for all alive schedulers, send the commands
        for sched_id in self.schedulers:
            sched = self.schedulers[sched_id]
            extcmds = sched['external_commands']
            cmds = [extcmd.cmd_line for extcmd in extcmds]
            con = sched.get('con', None)
            sent = False
            if not con:
                logger.warning("The scheduler is not connected %s", sched)
                self.pynag_con_init(sched_id)
                con = sched.get('con', None)

            # If there are commands and the scheduler is alive
            if len(cmds) > 0 and con:
                logger.debug("Sending %d commands to scheduler %s", len(cmds),
                             sched)
                try:
                    # con.run_external_commands(cmds)
                    con.post('run_external_commands', {'cmds': cmds})
                    sent = True
                # Not connected or sched is gone
                except (HTTPExceptions, KeyError), exp:
                    logger.debug('manage_returns exception:: %s,%s ',
                                 type(exp), str(exp))
                    self.pynag_con_init(sched_id)
                    return
                except AttributeError, exp:  # the scheduler must  not be initialized
                    logger.debug('manage_returns exception:: %s,%s ',
                                 type(exp), str(exp))
                except Exception, exp:
                    logger.error(
                        "A satellite raised an unknown exception: %s (%s)",
                        exp, type(exp))
                    raise
Пример #43
0
    def manage_finished_checks(self):
        to_del = []

        # First look for checks in timeout
        for check in self.checks:
            if check.status == 'launched':
                check.con.look_for_timeout()

        # Now we look for finished checks
        for check in self.checks:
            # First manage check in error, bad formed
            if check.status == 'done':
                to_del.append(check)
                self.returns_queue.put(check)

            # Then we check for good checks
            elif check.status == 'launched' and check.con.is_done():
                con = check.con
                # unlink our object from the original check,
                # this might be necessary to allow the check to be again
                # serializable..
                del check.con
                if con.readwrite_error and check.retried < 2:
                    logger.warning(
                        '%s: Got an IO error (%s), retrying 1 more time.. (cur=%s)',
                        check.command, con.message, check.retried)
                    check.retried += 1
                    check.status = 'queue'
                    continue

                if check.retried:
                    logger.info('%s: Successfully retried check :)',
                                check.command)

                check.status = 'done'
                check.exit_status = con.rc
                check.get_outputs(con.message, 8012)
                check.execution_time = con.execution_time

                # and set this check for deleting
                # and try to send it
                to_del.append(check)
                self.returns_queue.put(check)

        # And delete finished checks
        for chk in to_del:
            self.checks.remove(chk)

        # count the number of check still in list
        return len(self.checks)
Пример #44
0
    def __init__(self, modules, app):
        """ Because it wouldn't make sense to use many submodules in this
            MetaModule, we only use the first one in the list of modules.
        """
        super(HelpdeskMetaModule, self).__init__(modules=modules, app=app)

        self.app = app
        self.module = None
        if modules:
            if len(modules) > 1:
                logger.warning(
                    '[WebUI] Too much helpdesk modules declared (%s > 1). Using %s.',
                    len(modules), modules[0])
            self.module = modules[0]
Пример #45
0
    def check_bad_dispatch(self):
        for elt in self.elements:
            if hasattr(elt, 'conf'):
                # If element has a conf, I do not care, it's a good dispatch
                # If dead: I do not ask it something, it won't respond..
                if elt.conf is None and elt.reachable:
                    # print "Ask", elt.get_name() , 'if it got conf'
                    if elt.have_conf():
                        logger.warning("The element %s have a conf and should "
                                       "not have one! I ask it to idle now",
                                       elt.get_name())
                        elt.active = False
                        elt.wait_new_conf()
                        # I do not care about order not send or not. If not,
                        # The next loop will resent it
                    # else:
                    #    print "No conf"

        # I ask satellites which sched_id they manage. If I do not agree, I ask
        # them to remove it
        for satellite in self.satellites:
            kind = satellite.get_my_type()
            if satellite.reachable:
                cfg_ids = satellite.managed_confs  # what_i_managed()
                # I do not care about satellites that do nothing, they already
                # do what I want :)
                if len(cfg_ids) != 0:
                    id_to_delete = []
                    for cfg_id in cfg_ids:
                        # DBG print kind, ":", satellite.get_name(), "manage cfg id:", cfg_id
                        # Ok, we search for realms that have the conf
                        for r in self.realms:
                            if cfg_id in r.confs:
                                # Ok we've got the realm, we check its to_satellites_managed_by
                                # to see if reactionner is in. If not, we remove he sched_id for it
                                if satellite not in r.to_satellites_managed_by[kind][cfg_id]:
                                    id_to_delete.append(cfg_id)
                    # Maybe we removed all cfg_id of this reactionner
                    # We can put it idle, no active and wait_new_conf
                    if len(id_to_delete) == len(cfg_ids):
                        satellite.active = False
                        logger.info("I ask %s to wait a new conf", satellite.get_name())
                        satellite.wait_new_conf()
                    else:
                        # It is not fully idle, just less cfg
                        for id in id_to_delete:
                            logger.info("I ask to remove configuration N%d from %s",
                                        id, satellite.get_name())
                            satellite.remove_from_conf(id)
Пример #46
0
    def check_cfg_password_auth(self, username, password):
        """ Embedded authentication with password stored in contact definition.
            Function imported from auth-cfg-password module.
        """
        logger.info("[WebUI-auth-cfg-password] Authenticating user '%s'",
                    username)

        c = self.app.datamgr.get_contact(name=username)
        if not c:
            c = self.app.datamgr.get_contacts()
            if not c:
                logger.error("[WebUI] the WebUI do not know any user! "
                             "Are you sure it is correctly initialized?")
            else:
                logger.error(
                    "[WebUI-auth-cfg-password] You need to have a contact "
                    "having the same name as your user: %s", username)
            self.app.request.environ['MSG'] = "You are not allowed to connect."
            return False
        p = None
        if isinstance(c, dict):
            p = c.get('password', None)
        else:
            p = c.password

        # basic checks
        if not p:
            logger.error(
                "[WebUI-auth-cfg-password] User %s does not have a password: connection refused",
                username)
            self.app.request.environ['MSG'] = "No user password set"
            return False

        if p == 'NOPASSWORDSET':
            logger.error(
                "[WebUI-auth-cfg-password] User %s still has the default password: connection refused",
                username)
            self.app.request.environ['MSG'] = "Default user password set"
            return False

        if p == password:
            logger.info("[WebUI-auth-cfg-password] Authenticated")
            return True

        self.app.request.environ['MSG'] = "Access denied"
        logger.warning(
            "[WebUI-auth-cfg-password] Authentication failed, password mismatch "
        )
        return False
Пример #47
0
    def main(self):
        self.set_proctitle(self.name)

        self.set_exit_handler()
        server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server.setblocking(0)
        server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        server.bind((self.host, self.port))
        server.listen(self.backlog)
        input = [server]
        databuffer = {}
        IVs = {}

        while not self.interrupted:
            # outputready and exceptready unused
            inputready, _, _ = select.select(input, [], [], 1)
            for s in inputready:
                if s == server:
                    # handle the server socket
                    try:
                        client, _ = server.accept()
                        iv = self.send_init_packet(client)
                        IVs[client] = iv
                        input.append(client)
                    except Exception as e:
                        logger.warning("[NSCA] Exception on socket connecting: %s", str(e))
                        continue
                else:
                    # handle all other sockets
                    try:
                        data = s.recv(self.buffer_length)
                        if s in databuffer:
                            databuffer[s] += data
                        else:
                            databuffer[s] = data
                    except Exception as e:
                        logger.warning("[NSCA] Exception on socket receiving: %s", str(e))
                        continue
                        
                    if len(data) == 0:
                        self.process_check_result(databuffer[s], IVs[s])
                        try:
                            # Closed socket
                            del databuffer[s]
                            del IVs[s]
                        except:
                            pass
                        s.close()
                        input.remove(s)
Пример #48
0
    def manage_brok_thread(self):
        logger.debug("[WebUI] manage_brok_thread start ...")

        while True:
            start = time.clock()
            l = self.to_q.get()

            # try to relaunch dead module
            self.check_and_del_zombie_modules()

            logger.debug(
                "[WebUI] manage_brok_thread got %d broks, queue length: %d",
                len(l), self.to_q.qsize())
            for b in l:
                b.prepare()
                self.wait_for_no_readers()
                try:
                    self.rg.manage_brok(b)

                    # Question:
                    # Do not send broks to internal modules ...
                    # No internal WebUI modules have something to do with broks!
                    for mod in self.modules_manager.get_internal_instances():
                        try:
                            mod.manage_brok(b)
                        except Exception, exp:
                            logger.warning(
                                "[WebUI] The mod %s raise an exception: %s, I'm tagging it to restart later",
                                mod.get_name(), str(exp))
                            logger.debug("[WebUI] Exception type: %s",
                                         self.name, type(exp))
                            logger.debug("[WebUI] Back trace of this kill: %s",
                                         traceback.format_exc())
                            self.modules_manager.set_to_restart(mod)
                except Exception, exp:
                    logger.error("[WebUI] manage_brok_thread exception")
                    msg = Message(id=0,
                                  type='ICrash',
                                  data={
                                      'name': self.get_name(),
                                      'exception': exp,
                                      'trace': traceback.format_exc()
                                  })
                    self.from_q.put(msg)
                    # wait 2 sec so we know that the broker got our message, and die
                    time.sleep(2)
                    # No need to raise here, we are in a thread, exit!
                    os._exit(2)
                finally:
Пример #49
0
    def get_user_auth(self):
        logger.warning("[WebUI] Deprecated - Getting authenticated user ...")
        self.user_picture = None

        username = webui_app.request.get_cookie("user",
                                                secret=self.auth_secret)
        if not username and not self.allow_anonymous:
            return None
        contact = self.datamgr.get_contact(username or 'anonymous')
        if not contact:
            return None

        user = User.from_contact(contact, self.user_picture, self.gravatar)
        self.user_picture = user.picture
        return user
Пример #50
0
 def try_best_load(cls, name, package=None):
     try:
         mod = importlib.import_module(name, package)
     except Exception as err:
         logger.warning("Cannot import %s : %s",
                        '%s.%s' % (package, name) if package else name, err)
         return
     # if the module have a 'properties' and a 'get_instance'
     # then we are happy and we'll use that:
     try:
         mod.properties
         mod.get_instance
     except AttributeError:
         return
     return mod
Пример #51
0
    def read_and_parse_sms(self):
        # Get only unread SMS of the inbox
        SMSmsgs = self.android.smsGetMessages(True, 'inbox').result
        to_mark = []
        cmds = []
        for message in SMSmsgs:
            # Read the message
            body = message['body'].encode('utf8', 'ignore')
            to_mark.append(message['_id'])
            logger.info('[Android SMS] Addr type : %s' % str(type(message['address'])))
            logger.info('[Android SMS] Message type: %s ' % str(type(body)))
            logger.info('[Android SMS] Message content : %s' % str(message))
            if body.startswith(('ack', 'Ack', 'ACK')):
                elts = body.split(' ')

                if len(elts) <= 1:
                    logger.warning("[Android SMS] Bad message length")
                    continue

                # Ok, look for host or host/service
                raw = elts[1]
                if '/' in raw:
                    elts = raw.split('/')
                    # If not service desc, bail out
                    if len(elts) == 1:
                        continue
                    hname = elts[0]
                    sdesc = ' '.join(elts[1:])
                    extcmd = 'ACKNOWLEDGE_SVC_PROBLEM;%s;%s;1;1;1;SMSPhoneAck;None\n' % (hname, sdesc)
                    e = ExternalCommand(extcmd)
                    cmds.append(e)
                else:
                    hname = raw
                    extcmd = 'ACKNOWLEDGE_HOST_PROBLEM;%s;1;1;1;SMSPhoneAck;None\n' % hname
                    e = ExternalCommand(extcmd)
                    cmds.append(e)

        # Mark all read messages as read
        r = self.android.smsMarkMessageRead(to_mark, True)

        logger.info("[Android SMS] Raise messages: %s" % str(cmds))
        for cmd in cmds:
            try:
                # Under android we got a queue here
                self.returns_queue.put(cmd)
            except IOError, exp:
                logger.error("[Android SMS] %d eiting: %s" % (self.id, str(exp)))
                sys.exit(2)
Пример #52
0
    def manage_log_brok(self, b):
        """ Intercept log type brok and append state change to the SLA database if needed """

        data = b.data
        line = data['log']

        if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
            # Match log which NOT have to be stored
            return

        try:
            logline = Logline(line=line)
            values = logline.as_dict()
            if values['state_type'] != 'HARD' or values['logclass'] != 1:
                return

            if logline.logclass != LOGCLASS_INVALID:
                logger.debug(
                    '[hokuto-log-cacher] %s %s %s.%s' %
                    (values['time'], values['state'], values['host_name'],
                     values['service_description']))
                with sqlite3.connect(self.db_path) as conn:
                    if not self.check_db_has_sla(
                            conn):  # Abort if the table doesn't exist.
                        # This may happen if hokuto was just installed and the broker receives data
                        # before Hokuto initializes the database
                        logger.warning(
                            "[hokuto-log-cacher] A log brok was skipped: hokuto's database wasn't ready to receive it. Launching Hokuto once should solve this problem."
                        )
                        return
                    row = conn.execute(
                        "SELECT state FROM sla WHERE host_name=? AND service_description=? ORDER BY time DESC LIMIT 1",
                        (values['host_name'],
                         values['service_description'])).fetchone()
                    #lastState = Sla.query\
                    #               .filter_by(host_name = values['host_name'], service_description = values['service_description'])\
                    #               .order_by(Sla.time.desc())\
                    #               .first()

                    if row is None or row[0] != values['state']:
                        conn.execute(
                            'INSERT INTO sla (host_name, service_description, state, time) VALUES (?, ?, ?, ?)',
                            (values['host_name'],
                             values['service_description'], values['state'],
                             values['service_description']))

        except Exception, exp:
            logger.error("[hokuto-log-cacher] %s" % str(exp))
Пример #53
0
class DBOracle(DB):
    """Manage connection and query execution against Oracle databases."""
    def __init__(self, user, password, database, table_prefix=''):
        self.user = user
        self.password = password
        self.database = database
        self.table_prefix = table_prefix

    def connect_database(self):
        """Create the database connection
        TODO: finish (begin :) ) error catch and conf parameters...
        """

        connstr = '%s/%s@%s' % (self.user, self.password, self.database)

        self.db = connect_function(connstr)
        self.db_cursor = self.db.cursor()
        self.db_cursor.arraysize = 50

    def execute_query(self, query):
        """ Execute a query against an Oracle database.
        """
        logger.debug("[DBOracle] Execute Oracle query %s\n" % (query))
        try:
            self.db_cursor.execute(query)
            self.db.commit()
        except IntegrityError_exp, exp:
            logger.warning("[DBOracle] Warning: a query raise an integrity error:" \
                  " %s, %s" % (query, exp))
        except ProgrammingError_exp, exp:
            logger.warning("[DBOracle] Warning: a query raise a programming error:" \
                  " %s, %s" % (query, exp))
Пример #54
0
    def check_alignak_auth(self, username, password):
        ''' Embedded authentication against Alignak backend.
        '''
        logger.info("[WebUI-auth-alignak] Authenticating user '%s'", username)

        try:
            self.app.frontend.logout()
            self.app.frontend.login(username, password)
            logger.info("[WebUI-auth-alignak] Authenticated")
            return True
        except:
            logger.error("[WebUI-auth-alignak] could not connect to Alignak backend")
            return False

        logger.warning("[WebUI-auth-alignak] Authentication failed, password mismatch")
        return False
Пример #55
0
 def _format_csv_value(self, value):
     if isinstance(value, list):
         return self.separators.list.join(str(x) for x in value)
     elif isinstance(value, bool):
         return '1' if value else '0'
     else:
         try:
             return str(value)
         except UnicodeEncodeError as err:
             logger.warning('UnicodeEncodeError on str() of: %r : %s' %
                            (value, err))
             return value.encode('utf-8', 'replace')
         except Exception as err:
             logger.warning('Unexpected error on str() of: %r : %s' %
                            (value, err))
             return ''
Пример #56
0
    def is_correct(self):
        state = True
        properties = self.__class__.properties

        # Raised all previously saw errors like unknown contacts and co
        if self.configuration_errors != []:
            state = False
            for err in self.configuration_errors:
                logger.error("[item::%s] %s", self.get_name(), err)

        for prop, entry in properties.items():
            if not hasattr(self, prop) and entry.required:
                logger.warning("[item::%s] %s property is missing", self.get_name(), prop)
                state = False

        return state
Пример #57
0
    def lockable_function(self, f):
        logger.warning("[WebUI] lockable_function, someone want lock!!!!")

        def lock_version(**args):
            self.wait_for_no_writers()
            try:
                return f(**args)
            finally:
                # We can remove us as a reader from now. It's NOT an atomic operation
                # so we REALLY not need a lock here (yes, I try without and I got
                # a not so accurate value there....)
                self.global_lock.acquire()
                self.nb_readers -= 1
                self.global_lock.release()

        return lock_version
Пример #58
0
 def get_module(self, name):
     mod_dir  = os.path.abspath(os.path.join(self.modules_dir, name))
     if not mod_dir in sys.path:
         sys.path.append(mod_dir)
     mod_path = os.path.join(self.modules_dir, name, 'module.py')
     if not os.path.exists(mod_path):
         mod_path = os.path.join(self.modules_dir, name, 'module.pyc')
     try:
         if mod_path.endswith('.py'):
             r = imp.load_source(name, mod_path)
         else:
             r = imp.load_compiled(name, mod_path)
     except:
         logger.warning('The module %s cannot be founded or load', mod_path)
         raise
     return r
Пример #59
0
    def __init__(self, modconf):
        BaseModule.__init__(self, modconf)
        self.plugins = []
        # Change. The var folder is not defined based upon '.', but upon ../var from the process name (shinken-broker)
        # When the database_file variable, the default variable was calculated from '.'... Depending on where you were
        # when you ran the command the behavior changed.
        self.database_file = getattr(
            modconf, 'database_file',
            os.path.join(os.path.abspath('.'), 'livestatus.db'))
        self.archive_path = getattr(
            modconf, 'archive_path',
            os.path.join(os.path.dirname(self.database_file), 'archives'))
        try:
            os.stat(self.archive_path)
        except:
            os.mkdir(self.archive_path)
        max_logs_age = getattr(modconf, 'max_logs_age', '365')
        maxmatch = re.match(r'^(\d+)([dwmy]*)$', max_logs_age)
        if maxmatch is None:
            logger.warning(
                "[Logstore SQLite] Warning: wrong format for max_logs_age. Must be <number>[d|w|m|y] or <number> and not %s"
                % max_logs_age)
            return None
        else:
            if not maxmatch.group(2):
                self.max_logs_age = int(maxmatch.group(1))
            elif maxmatch.group(2) == 'd':
                self.max_logs_age = int(maxmatch.group(1))
            elif maxmatch.group(2) == 'w':
                self.max_logs_age = int(maxmatch.group(1)) * 7
            elif maxmatch.group(2) == 'm':
                self.max_logs_age = int(maxmatch.group(1)) * 31
            elif maxmatch.group(2) == 'y':
                self.max_logs_age = int(maxmatch.group(1)) * 365
        self.use_aggressive_sql = (getattr(modconf, 'use_aggressive_sql',
                                           '0') == '1')
        self.read_only = (getattr(modconf, 'read_only', '0') == '1')

        # This stack is used to create a full-blown select-statement
        self.sql_filter_stack = LiveStatusSqlStack()
        # This stack is used to create a minimal select-statement which
        # selects only by time >= and time <=
        self.sql_time_filter_stack = LiveStatusSqlStack()

        # Now sleep one second, so that won't get lineno collisions with the last second
        time.sleep(1)
        Logline.lineno = 0
Пример #60
0
    def expand_hosts_expression(self, pattern, hosts, services, running=False):
        error = None
        node = DependencyNode()
        node.operand = '&'
        elts = pattern.split(',')
        # Flags is the left part of the first : charcter
        flags = elts[0].strip().split(":")[0]
        # Name is the right part of the first : charcter
        name = ":".join(elts[0].strip().split(":")[1:])
        permissive = "p" in flags

        # Look if we have a service
        if len(elts) > 1:
            got_service = True
            service_description = ",%s" % elts[1].strip()
        else:
            got_service = False
            service_description = ""

        if "g" in flags:
            expanded_hosts, error = self.lookup_hosts_by_group(name, hosts)
        elif "r" in flags:
            expanded_hosts, error = self.lookup_hosts_by_regex(name, hosts)
        else:
            error = "Business rule uses unknown host expansion type"

        if error is not None:
            node.configuration_errors.append(error)
            return node

        for host_name in expanded_hosts:
            expr = "%s%s" % (host_name, service_description)
            o = self.eval_cor_pattern(expr, hosts, services, running)

            if not o.is_valid():
                if got_service is True and permissive is True:
                    # Invalid node is not added (error is ignored).
                    logger.warning(
                        "Business rule got an unknown service for %s. Ignored."
                        % host_name)
                else:
                    # Add the invalid node for error to be reported by arbiter.
                    node.sons.append(o)
            else:
                node.sons.append(o)

        return node