def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """

        try:
            self.max_workers = cpu_count()
        except NotImplementedError:
            pass

        t0 = time.time()
        logger.debug("[MongodbRetention] asking me to update the retention objects")

        all_data = daemon.get_retention_data()

        processes = []
        for i in xrange(self.max_workers):
            proc = Process(target=self.job, args=(all_data, i, self.max_workers))
            proc.start()
            processes.append(proc)

        # Allow 30s to join the sub-processes, should be enough
        for proc in processes:
            proc.join(30)

        logger.info("Retention information updated in Mongodb (%.2fs)" % (time.time() - t0))
Пример #2
0
def inventor(look_at):
    # Now really publish it
    inventory = CONFIG['paths']['inventory']
    logger.debug("dumping inventory %s" % inventory)
    # get all sub-direcotries
 
    for d in os.listdir(inventory):
        if os.path.exists(os.path.join(inventory, d, 'package.json')):
            if not look_at or d in look_at:
                print d
            # If asked, dump the content.package content
            if look_at or d in look_at:
                content_p = os.path.join(inventory, d, 'content.json')
                if not os.path.exists(content_p):
                    logger.error('Missing %s file' % content_p)
                    continue
                try:
                    j = json.loads(open(content_p, 'r').read())
                except Exception, exp:
                    logger.error('Bad %s file "%s"' % (content_p, exp))
                    continue
                for d in j:
                    s = ''
                    if d['type'] == '5': # tar direcotry
                        s += '(d)'
                    else:
                        s += '(f)'
                    s += d['name']
                    print s
Пример #3
0
    def manage_service_check_result_brok(self, b):
        data = b.data
        name = "%s.%s" % (
            self.illegal_char.sub('_', data['host_name']),
            self.illegal_char.sub('_', data['service_description'])
        )

        post_data = []

        post_data.extend(
            self.get_check_result_perfdata_points(
                b.data['perf_data'],
                b.data['last_chk'],
                name
            )
        )

        post_data.extend(
            self.get_state_update_points(b.data, name)
        )

        try:
            logger.debug("[influxdb broker] Launching: %s" % str(post_data))
        except UnicodeEncodeError:
            pass

        self.buffer.extend(post_data)
Пример #4
0
def get_page():
    commands_list = []

    try:
        # Getting lists of informations for the commands
        time_stamp_list = []
        host_name_list = []
        service_description_list = []
        return_code_list = []
        output_list = []
        time_stamp_list = request.forms.getall(key="time_stamp")
        logger.debug("[WS_Arbiter] time_stamp_list: %s" % (time_stamp_list))
        host_name_list = request.forms.getall(key="host_name")
        logger.debug("[WS_Arbiter] host_name_list: %s" % (host_name_list))
        service_description_list = request.forms.getall(key="service_description")
        logger.debug("[WS_Arbiter] service_description_list: %s" % (service_description_list))
        return_code_list = request.forms.getall(key="return_code")
        logger.debug("[WS_Arbiter] return_code_list: %s" % (return_code_list))
        output_list = request.forms.getall(key="output")
        logger.debug("[WS_Arbiter] output_list: %s" % (output_list))
        commands_list = get_commands(
            time_stamp_list, host_name_list, service_description_list, return_code_list, output_list
        )
    except Exception, e:
        logger.error("[WS_Arbiter] failed to get the lists: %s" % str(e))
        commands_list = []
Пример #5
0
    def get_live_data_log(self):
        """Like get_live_data, but for log objects"""
        # finalize the filter stacks
        self.mongo_time_filter_stack.and_elements(self.mongo_time_filter_stack.qsize())
        self.mongo_filter_stack.and_elements(self.mongo_filter_stack.qsize())
        if self.use_aggressive_sql:
            # Be aggressive, get preselected data from sqlite and do less
            # filtering in python. But: only a subset of Filter:-attributes
            # can be mapped to columns in the logs-table, for the others
            # we must use "always-true"-clauses. This can result in
            # funny and potentially ineffective sql-statements
            mongo_filter_func = self.mongo_filter_stack.get_stack()
        else:
            # Be conservative, get everything from the database between
            # two dates and apply the Filter:-clauses in python
            mongo_filter_func = self.mongo_time_filter_stack.get_stack()
        dbresult = []
        mongo_filter = mongo_filter_func()
        logger.debug("[Logstore MongoDB] Mongo filter is %s" % str(mongo_filter))
        # We can apply the filterstack here as well. we have columns and filtercolumns.
        # the only additional step is to enrich log lines with host/service-attributes
        # A timerange can be useful for a faster preselection of lines

        filter_element = eval('{ ' + mongo_filter + ' }')
        logger.debug("[LogstoreMongoDB] Mongo filter is %s" % str(filter_element))
        columns = ['logobject', 'attempt', 'logclass', 'command_name', 'comment', 'contact_name', 'host_name', 'lineno', 'message', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
        if not self.is_connected == CONNECTED:
            logger.warning("[LogStoreMongoDB] sorry, not connected")
        else:
            dbresult = [Logline([(c,) for c in columns], [x[col] for col in columns]) for x in self.db[self.collection].find(filter_element).sort([(u'time', pymongo.ASCENDING), (u'lineno', pymongo.ASCENDING)])]
        return dbresult
Пример #6
0
 def add(self, elt):
     cls_type = elt.__class__.my_type
     if cls_type == 'brok':
         # For brok, we TAG brok with our instance_id
         elt.instance_id = 0
         self.broks_internal_raised.append(elt)
         return
     elif cls_type == 'externalcommand':
         logger.debug("Enqueuing an external command '%s'" % str(ExternalCommand.__dict__))
         self.external_commands.append(elt)
     # Maybe we got a Message from the modules, it's way to ask something
     # like from now a full data from a scheduler for example.
     elif cls_type == 'message':
         # We got a message, great!
         logger.debug(str(elt.__dict__))
         if elt.get_type() == 'NeedData':
             data = elt.get_data()
             # Full instance id means: I got no data for this scheduler
             # so give me all dumbass!
             if 'full_instance_id' in data:
                 c_id = data['full_instance_id']
                 source = elt.source
                 logger.info('The module %s is asking me to get all initial data from the scheduler %d' % (source, c_id))
                 # so we just reset the connection and the running_id, it will just get all new things
                 try:
                     self.schedulers[c_id]['con'] = None
                     self.schedulers[c_id]['running_id'] = 0
                 except KeyError:  # maybe this instance was not known, forget it
                     logger.warning("the module %s ask me a full_instance_id for an unknown ID (%d)!" % (source, c_id))
         # Maybe a module tells me that it's dead, I must log it's last words...
         if elt.get_type() == 'ICrash':
             data = elt.get_data()
             logger.error('the module %s just crash! Please look at the traceback:' % data['name'])
             logger.error(data['trace'])
Пример #7
0
def set_value(obj_ref, output=None, perfdata=None, return_code=None):
    obj = get_object(obj_ref)
    if not obj:
        return
    output = output or obj.output
    perfdata = perfdata or obj.perf_data
    if return_code is None:
      return_code = obj.state_id

    logger.debug("[trigger] Setting %s %s %s for object %s" % (output, perfdata, return_code, obj.get_full_name()))

    if perfdata:
        output = output + ' | ' + perfdata

    now = time.time()
    cls = obj.__class__
    i = obj.launch_check(now, force=True)
    for chk in obj.checks_in_progress:
        if chk.id == i:
            logger.debug("[trigger] I found the check I want to change")
            c = chk
            # Now we 'transform the check into a result'
            # So exit_status, output and status is eaten by the host
            c.exit_status = return_code
            c.get_outputs(output, obj.max_plugins_output_length)
            c.status = 'waitconsume'
            c.check_time = now
            # IMPORTANT: tag this check as from a trigger, so we will not
            # loop in an infinite way for triggers checks!
            c.from_trigger = True
Пример #8
0
    def get_metric_and_value(self, perf_data):
        res = []
        metrics = PerfDatas(perf_data)

        for e in metrics:
            try:
                logger.debug("[Graphite UI] groking: %s" % str(e))
            except UnicodeEncodeError:
                pass

            name = self.illegal_char.sub('_', e.name)
            name = self.multival.sub(r'.*', name)

            # get metric value and its thresholds values if they exist
            name_value = {name: (e.value, e.uom)}
            if e.warning and e.critical:
                name_value[name + '_warn'] = e.warning
                name_value[name + '_crit'] = e.critical
            # bailout if need
            if name_value[name] == '':
                continue
            try:
                logger.debug("[Graphite UI] Got in the end: %s, %s" % (name, e.value))
            except UnicodeEncodeError:
                pass
            for key, value in name_value.items():
                res.append((key, value))
        return res
Пример #9
0
    def manage_log_brok(self, brok):
        """
        Parse a Shinken log brok to enqueue a log line for Index insertion
        """
        d = date.today()
        index_name = self.index_prefix + "-" + d.strftime("%Y.%m.%d")

        line = brok.data["log"]
        if re.match("^\[[0-9]*\] [A-Z][a-z]*.:", line):
            # Match log which NOT have to be stored
            logger.warning("[elastic-logs] do not store: %s", line)
            return

        logline = Logline(line=line)
        logline_dict = logline.as_dict()
        logline_dict.update({"@timestamp": datetime.utcfromtimestamp(int(logline_dict["time"])).isoformat() + "Z"})
        values = {"_index": index_name, "_type": "shinken-logs", "_source": logline_dict}

        # values = logline.as_dict()
        if logline.logclass != LOGCLASS_INVALID:
            logger.debug("[elastic-logs] store log line values: %s", values)
            self.logs_cache.append(values)
        else:
            logger.info("[elastic-logs] This line is invalid: %s", line)

        return
Пример #10
0
    def get_new_broks(self, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s', type)
            return

        # We check for new check in each schedulers and put
        # the result in new_checks
        for sched_id in links:
            try:
                con = links[sched_id]['con']
                if con is not None:  # None = not initialized
                    t0 = time.time()
                    # Before ask a call that can be long, do a simple ping to be sure it is alive
                    con.get('ping')
                    tmp_broks = con.get('get_broks', {'bname':self.name}, wait='long')
                    try:
                        _t = base64.b64decode(tmp_broks)
                        _t = zlib.decompress(_t)
                        tmp_broks = cPickle.loads(_t)
                    except (TypeError, zlib.error, cPickle.PickleError), exp:
                        logger.error('Cannot load broks data from %s : %s', links[sched_id]['name'], exp)
                        links[sched_id]['con'] = None
                        continue
                    logger.debug("%s Broks get in %s", len(tmp_broks), time.time() - t0)
                    for b in tmp_broks.values():
                        b.instance_id = links[sched_id]['instance_id']
                    # Ok, we can add theses broks to our queues
                    self.add_broks_to_queue(tmp_broks.values())

                else:  # no con? make the connection
                    self.pynag_con_init(sched_id, type=type)
Пример #11
0
def show_minemap():
    user = app.request.environ['USER']

    # Apply search filter if exists ...
    search = app.request.query.get('search', "type:host")
    if not "type:host" in search:
        search = "type:host "+search
    logger.debug("[WebUI-worldmap] search parameters '%s'", search)
    items = app.datamgr.search_hosts_and_services(search, user, get_impacts=False)
    
    # Fetch elements per page preference for user, default is 25
    elts_per_page = app.prefs_module.get_ui_user_preference(user, 'elts_per_page', 25)

    # We want to limit the number of elements
    step = int(app.request.GET.get('step', elts_per_page))
    start = int(app.request.GET.get('start', '0'))
    end = int(app.request.GET.get('end', start + step))
        
    # If we overflow, came back as normal
    total = len(items)
    if start > total:
        start = 0
        end = step

    navi = app.helper.get_navi(total, start, step=step)

    return {'navi': navi, 'search_string': search, 'items': items[start:end], 'page': "minemap"}
Пример #12
0
    def open(self):
        """
        Connect to the Mongo DB with configured URI.

        Execute a command to check if connected on master to activate immediate connection to
        the DB because we need to know if DB server is available.

        Update log rotation time to force a log rotation
        """
        self.con = MongoClient(self.uri, connect=False)
        logger.info("[mongo-logs] trying to connect MongoDB: %s", self.uri)
        try:
            result = self.con.admin.command("ismaster")
            logger.info("[mongo-logs] connected to MongoDB, admin: %s", result)
            logger.debug("[mongo-logs] server information: %s", self.con.server_info())

            self.db = getattr(self.con, self.database)
            logger.info("[mongo-logs] connected to the database: %s (%s)", self.database, self.db)

            self.is_connected = CONNECTED
            self.next_logs_rotation = time.time()

            logger.info('[mongo-logs] database connection established')
        except ConnectionFailure as e:
            logger.error("[mongo-logs] Server is not available: %s", str(e))
            return False
        except Exception as e:
            logger.error("[mongo-logs] Could not open the database", str(e))
            raise MongoLogsError

        return True
Пример #13
0
    def do_pynag_con_init(self, id, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s', type)
            return

        if type == 'scheduler':
            # If sched is not active, I do not try to init
            # it is just useless
            is_active = links[id]['active']
            if not is_active:
                return

        # If we try to connect too much, we slow down our tests
        if self.is_connection_try_too_close(links[id]):
            return

        # Ok, we can now update it
        links[id]['last_connection'] = time.time()

        # DBG: print "Init connection with", links[id]['uri']
        running_id = links[id]['running_id']
        # DBG: print "Running id before connection", running_id
        uri = links[id]['uri']
        try:
            con = links[id]['con'] = HTTPClient(uri=uri, strong_ssl=links[id]['hard_ssl_name_check'])
        except HTTPExceptions, exp:
            # But the multiprocessing module is not compatible with it!
            # so we must disable it immediately after
            logger.info("Connection problem to the %s %s: %s", type, links[id]['name'], str(exp))
            links[id]['con'] = None
            return
Пример #14
0
    def post(self, path, args, wait='short'):
        size = 0
        # Take args, pickle them and then compress the result
        for (k,v) in args.iteritems():
            args[k] = zlib.compress(cPickle.dumps(v), 2)
            size += len(args[k])
        # Ok go for it!
        logger.debug('Posting to %s: %sB' % (self.uri+path, size))
        
        c = self.con
        c.setopt(pycurl.HTTPGET, 0)
        c.setopt(c.POST, 1)

        # For the TIMEOUT, it will depends if we are waiting for a long query or not
        # long:data_timeout, like for huge broks receptions
        # short:timeout, like for just "ok" connection
        if wait == 'short':
            c.setopt(c.TIMEOUT, self.timeout)
        else:
            c.setopt(c.TIMEOUT, self.data_timeout)
        #if proxy:
        #    c.setopt(c.PROXY, proxy)
        # Pycurl want a list of tuple as args
        postargs = [(k,v) for (k,v) in args.iteritems()]
        c.setopt(c.HTTPPOST, postargs)
        c.setopt(c.URL, str(self.uri+path))
        # Ok now manage the response
        response = StringIO()
        c.setopt(pycurl.WRITEFUNCTION, response.write)
        #c.setopt(c.VERBOSE, 1)
        try:
            c.perform()
        except pycurl.error, error:
            errno, errstr = error
            raise HTTPException ('Connexion error to %s : %s' % (self.uri, errstr))
Пример #15
0
    def get_graph_uris(self, elt, graphstart=None, graphend=None, duration=None, source='detail'):
        ''' Aggregate the get_graph_uris of all the submodules. 
            The source parameter defines the source of the calling: 
            Are we displaying graphs for the element detail page (detail), 
            or a widget in the dashboard (dashboard) ?
            
            If duration is not None, we consider it as a number of seconds to graph and 
            we call the module get_relative_graphs_uri
            
            If get_relative_graphs_uri is not a module function we compute graphstart and 
            graphend and we call we call the module get_graphs_uri
            
            If graphstart and graphend are not None, we call the module get_graphs_uri
        '''
        uris = []
        for mod in self.modules:
            if not duration:
                uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
            else:
                f = getattr(mod, 'get_relative_graph_uris', None)
                if f and callable(f):
                    uris.extend(f(elt, duration, source))
                else:
                    graphend = time.time()
                    graphstart = graphend - duration
                    uris.extend(mod.get_graph_uris(elt, graphstart, graphend, source))
                
            logger.debug("[WebUI] Got graphs: %s", uris)

        for uri in uris:
            uri['img_src'] = '/graph?url=' + urllib.quote(uri['img_src'])

        return uris
Пример #16
0
def allperfs(obj_ref):
    """ Get all perfdatas from a service or a host
    """
    obj = get_object(obj_ref)
    p = PerfDatas(obj.perf_data)
    logger.debug("[trigger] I get all perfdatas")
    return dict([(metric.name, p[metric.name]) for metric in p])
Пример #17
0
    def linkify_hg_by_realms(self, realms):
        # Now we explode the realm value if we've got one
        # The group realm must not override a host one (warning?)
        for hg in self:
            if not hasattr(hg, 'realm'):
                continue

            # Maybe the value is void?
            if not hg.realm.strip():
                continue

            r = realms.find_by_name(hg.realm.strip())
            if r is not None:
                hg.realm = r
                logger.debug("[hostgroups] %s is in %s realm", hg.get_name(), r.get_name())
            else:
                err = "the hostgroup %s got an unknown realm '%s'" % (hg.get_name(), hg.realm)
                hg.configuration_errors.append(err)
                hg.realm = None
                continue

            for h in hg:
                if h is None:
                    continue
                if h.realm is None or h.got_default_realm:  # default value not hasattr(h, 'realm'):
                    logger.debug("[hostgroups] apply a realm %s to host %s from a hostgroup rule (%s)",  \
                        hg.realm.get_name(), h.get_name(), hg.get_name())
                    h.realm = hg.realm
                else:
                    if h.realm != hg.realm:
                        logger.warning("[hostgroups] host %s it not in the same realm than it's hostgroup %s",  \
                            h.get_name(), hg.get_name())
Пример #18
0
    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug("[MemcacheRetention] asking me to update the retention objects")

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']


        # Now the flat file method
        for h_name in hosts:
            try:
                h = hosts[h_name]
                key = self.normalize_key("HOST-%s" % h_name)
                val = cPickle.dumps(h)
                self.mc.set(key, val)
            except:
                logger.error("[MemcacheRetention] error while saving host %s" % key)

        for (h_name, s_desc) in services:
            try:
                key = self.normalize_key("SERVICE-%s,%s" % (h_name, s_desc))
                s = services[(h_name, s_desc)]
                val = cPickle.dumps(s)
                self.mc.set(key, val)
            except:
                logger.error("[MemcacheRetention] error while saving service %s" % key)

        self.mc.disconnect_all()
        logger.info("Retention information updated in Memcache")
Пример #19
0
    def commit_and_rotate_log_db(self):
        """Submit a commit or rotate the complete database file.

        This function is called whenever the mainloop doesn't handle a request.
        The database updates are committed every second.
        Every day at 00:05 the database contents with a timestamp of past days
        are moved to their own datafiles (one for each day). We wait until 00:05
        because in a distributed environment even after 00:00 (on the broker host)
        we might receive data from other hosts with a timestamp dating from yesterday.
        """
        if self.read_only:
            return
        now = time.time()
        if self.next_log_db_commit <= now:
            self.commit()
            logger.debug("[Logstore SQLite] commit.....")
            self.next_log_db_commit = now + 1
        if self.next_log_db_rotate <= now:
            logger.info("[Logstore SQLite] at %s we rotate the database file" % time.asctime(time.localtime(now)))
            # Take the current database file
            # Move the messages into daily files
            self.log_db_do_archive()

            today = datetime.date.today()
            today0005 = datetime.datetime(today.year, today.month, today.day, 0, 5, 0)
            if now < time.mktime(today0005.timetuple()):
                nextrotation = today0005
            else:
                nextrotation = today0005 + datetime.timedelta(days=1)

            # See you tomorrow
            self.next_log_db_rotate = time.mktime(nextrotation.timetuple())
            logger.info("[Logstore SQLite] next rotation at %s " % time.asctime(time.localtime(self.next_log_db_rotate)))
Пример #20
0
    def hook_save_retention(self, daemon):
        """
        main function that is called in the retention creation pass
        """
        logger.debug("[RedisRetention] asking me to update retention objects")

        all_data = daemon.get_retention_data()

        hosts = all_data['hosts']
        services = all_data['services']

        # Now the flat file method
        for h_name in hosts:
            h = hosts[h_name]
            key = self._get_host_key(h_name)
            val = cPickle.dumps(h)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)

        for (h_name, s_desc) in services:
            s = services[(h_name, s_desc)]
            key = self._get_service_key(h_name, s_desc)
            val = cPickle.dumps(s)
            if self.expire_time:
                self.rc.set(key, val, ex=self.expire_time)
            else:
                self.rc.set(key, val)
        logger.info("Retention information updated in Redis")
Пример #21
0
 def init(self):
     """
     Called by Scheduler to say 'let's prepare yourself guy'
     """
     logger.debug("Initialization of the memcache module")
     #self.return_queue = self.properties['from_queue']
     self.mc = memcache.Client(['%s:%s' % (self.server, self.port)], debug=0)
Пример #22
0
def grab_package(pname):
    cprint('Grabbing : ' , end='')
    cprint('%s' %  pname, 'green')

    # Now really publish it
    proxy = CONFIG['shinken.io']['proxy']
    api_key = CONFIG['shinken.io']['api_key']

    # Ok we will push the file with a 10s timeout
    c = pycurl.Curl()
    c.setopt(c.POST, 0)
    c.setopt(c.CONNECTTIMEOUT, 10)
    c.setopt(c.TIMEOUT, 10)
    if proxy:
        c.setopt(c.PROXY, proxy)

    c.setopt(c.URL, str('shinken.io/grab/%s' % pname))
    response = StringIO()
    c.setopt(pycurl.WRITEFUNCTION, response.write)
    #c.setopt(c.VERBOSE, 1)
    c.perform()
    r = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    if r != 200:
        logger.error("There was a critical error : %s" % response.getvalue())
        sys.exit(2)
    else:
        ret = response.getvalue()
        logger.debug("CURL result len : %d " % len(ret))
        return ret
Пример #23
0
def get_instance(plugin):
    logger.debug("[MySQLImport]: Get MySQL importer instance for plugin %s" % plugin.get_name())
    if not MySQLdb:
        raise Exception('Missing module python-mysqldb. Please install it.')
    host = plugin.host
    login = plugin.login
    password = plugin.password
    database = plugin.database
    reqlist = {}
    reqlist['hosts'] = getattr(plugin, 'reqhosts', None)
    reqlist['commands'] = getattr(plugin, 'reqcommands', None)
    reqlist['timeperiods'] = getattr(plugin, 'reqtimeperiods', None)
    reqlist['notificationways'] = getattr(plugin, 'reqnotificationways', None)
    reqlist['services'] = getattr(plugin, 'reqservices', None)
    reqlist['servicegroups'] = getattr(plugin, 'reqservicegroups', None)
    reqlist['contacts'] = getattr(plugin, 'reqcontacts', None)
    reqlist['contactgroups'] = getattr(plugin, 'reqcontactgroups', None)
    reqlist['hostgroups'] = getattr(plugin, 'reqhostgroups', None)
    reqlist['hostdependencies'] = getattr(plugin, 'reqhostdependencies', None)
    reqlist['servicedependencies'] = getattr(plugin, 'reqservicedependencies', None)
    reqlist['realms'] = getattr(plugin, 'reqrealms', None)
    reqlist['schedulers'] = getattr(plugin, 'reqschedulers', None)
    reqlist['pollers'] = getattr(plugin, 'reqpollers', None)
    reqlist['brokers'] = getattr(plugin, 'reqbrokers', None)
    reqlist['reactionners'] = getattr(plugin, 'reqreactionners', None)
    reqlist['receivers'] = getattr(plugin, 'reqreceivers', None)

    instance = MySQL_importer_arbiter(plugin, host, login, password, database, reqlist)
    return instance
Пример #24
0
    def process_check_result(self, databuffer, IV):
        # 208 is the size of fixed received data ... NSCA packets are 208+512 (720) or 208+4096 (4304)
        if not databuffer:
            logger.warning("[NSCA] Received an empty NSCA packet")
            return

        logger.debug("[NSCA] Received NSCA packet: %s", binascii.hexlify(databuffer))

        payload_length = len(databuffer) - 208
        if payload_length != 512 and payload_length != 4096:
            logger.warning("[NSCA] Received packet with unusual payload length: %d.", payload_length)
            
        if self.payload_length != -1 and payload_length != self.payload_length:
            logger.warning("[NSCA] Dropping packet with incorrect payload length.")
            return
            
        (timestamp, rc, hostname, service, output) = self.read_check_result(databuffer, IV, payload_length)
        current_time = time.time()
        check_result_age = current_time - timestamp
        if timestamp > current_time and self.check_future_packet:
            logger.warning("[NSCA] Dropping packet with future timestamp.")
        elif check_result_age > self.max_packet_age:
            logger.info(
                "[NSCA] Dropping packet with stale timestamp - packet was %s seconds old. Timestamp: %s for %s/%s" % \
                (check_result_age, timestamp, hostname, service))
        else:
            self.post_command(timestamp, rc, hostname, service, output)
Пример #25
0
def get_commands(time_stamps, hosts, services, return_codes, outputs):
    """Composing a command list based on the information received in
    POST request"""

    commands = []

    current_time_stamp = int(time.time())

    def _compose_command(t, h, s, r, o):
        """Simple function to create a command from the inputs"""
        cmd = ""
        if not s or s == "":
            cmd = '[%s] PROCESS_HOST_CHECK_RESULT;%s;%s;%s' % (t if t is not None else current_time_stamp, h, r, o)
        else:
            cmd = '[%s] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % (t if t is not None else current_time_stamp, h, s, r, o)
        logger.debug("[Ws_arbiter] CMD: %s" % (cmd))
        commands.append(cmd)

    # Trivial case: empty commmand list
    if (return_codes is None or len(return_codes) == 0):
        return commands

    # Sanity check: if we get N return codes, we must have N hosts.
    # The other values could be None
    if (len(return_codes) != len(hosts)):
        logger.error("[Ws_arbiter] number of return codes (%d) does not match number of hosts (%d)" % (len(return_codes), len(hosts)))
        abort(400, "number of return codes does not match number of hosts")

    map(_compose_command, time_stamps, hosts, services, return_codes, outputs)
    logger.debug("[Ws_arbiter] commands = %s" % (str(commands)))
    return commands
Пример #26
0
    def set_ui_user_preference(self, user, key, value):
        if not self.is_connected:
            if not self.open():
                logger.error("[WebUI-MongoDBPreferences] error during initialization, no database connection!")
                return None

        if not user:
            logger.warning("[WebUI-MongoDBPreferences] error set_ui_user_preference, no user!")
            return None

        try:
            # check a collection exist for this user
            u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
            if not u:
                # no collection for this user? create a new one
                self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})

            r = self.db.ui_user_preferences.update({'_id': user.get_name()}, {'$set': {key: value}})
            # Maybe there was no doc there, if so, create an empty one
            if not r:
                # Maybe the user exist, if so, get the whole user entry
                u = self.db.ui_user_preferences.find_one({'_id': user.get_name()})
                if not u:
                    logger.debug ("[WebUI-MongoDBPreferences] No user entry for %s, I create a new one", user.get_name())
                    self.db.ui_user_preferences.save({'_id': user.get_name(), key: value})
                else:  # ok, it was just the key that was missing, just update it and save it
                    u[key] = value
                    logger.debug ("[WebUI-MongoDBPreferences] Just saving the new key in the user pref")
                    self.db.ui_user_preferences.save(u)
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None
Пример #27
0
    def hook_tick(self, brok):
        """Each second the broker calls the hook_tick function
        Every tick try to flush the buffer
        """

        if self.buffer == []:
            return

        # Todo : why we need this?
        if self.ticks >= self.tick_limit:
            # If the number of ticks where data was not
            # sent successfully to the raw socket reaches the buffer limit.
            # Reset the buffer and reset the ticks
            self.buffer = []
            self.ticks = 0
            return

        # Real memory size
        if sum(x.__sizeof__() for x in self.buffer) > self.max_buffer_size:
            logger.debug("[RawSocket broker] Buffer size exceeded. I delete %d lines"
                         % self.lines_deleted)
            self.buffer = self.buffer[self.lines_deleted:]

        self.ticks += 1

        try:
            self.con.sendall('\n'.join(self.buffer).encode('UTF-8') + '\n')
        except IOError, err:
            logger.error("[RawSocket broker] Failed sending to the Raw network socket! IOError:%s"
                         % str(err))
            self.init()
            return
Пример #28
0
    def manage_unknown_service_check_result_brok(self, b):
        data = b.data

        tags = {
            "host_name": data['host_name'],
            "service_description": data['service_description']
        }

        post_data = []

        post_data.extend(
            self.get_check_result_perfdata_points(
                b.data['perf_data'],
                b.data['time_stamp'],
                tags=tags
            )
        )

        try:
            logger.debug(
                "[influxdb broker] Generated points: %s" % str(post_data))
        except UnicodeEncodeError:
            pass

        self.extend_buffer(post_data)
Пример #29
0
def do_recheck():
    # Getting lists of informations for the commands
    time_stamp          = request.forms.get('time_stamp', int(time.time()))
    host_name           = request.forms.get('host_name', '')
    service_description = request.forms.get('service_description', '')
    logger.debug("[WS_Arbiter] Timestamp '%s' - host: '%s', service: '%s'" % (time_stamp,
                                                                              host_name,
                                                                              service_description
                                                                             )
                )

    if not host_name:
        abort(400, 'Missing parameter host_name')

    if service_description:
        # SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
        command = '[%s] SCHEDULE_FORCED_SVC_CHECK;%s;%s;%s\n' % (time_stamp,
                                                                 host_name,
                                                                 service_description,
                                                                 time_stamp)
    else:
        # SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
        command = '[%s] SCHEDULE_FORCED_HOST_CHECK;%s;%s\n' % (time_stamp,
                                                               host_name,
                                                               time_stamp)

    # We check for auth if it's not anonymously allowed
    check_auth()

    # Adding commands to the main queue()
    logger.debug("[WS_Arbiter] command =  %s" % command)
    ext = ExternalCommand(command)
    app.from_q.put(ext)
Пример #30
0
    def manage_initial_broks_done_brok(self, b):
        if self.con is None:
            return
        logger.info("[Active Directory UI] AD/LDAP: manage_initial_broks_done_brok, go for pictures")

        searchScope = ldap.SCOPE_SUBTREE
        ## retrieve all attributes - again adjust to your needs - see documentation for more options
        #retrieveAttributes = ["userPrincipalName", "thumbnailPhoto", "samaccountname", "email"]

        logger.info("[Active Directory UI] Contacts? %d" % len(self.app.datamgr.get_contacts()))

        for c in self.app.datamgr.get_contacts():
            logger.debug("[Active Directory UI] Doing photo lookup for contact: %s" % c.get_name())
            elts = self.find_contact_entry(c)

            if elts is None:
                logger.warning("[Active Directory UI] No ldap entry for %s" % c.get_name())
                continue

            # Ok, try to get photo from the entry
            try:
                photo = elts[self.photo_attr][0]
                try:
                    p = os.path.join(self.app.photo_dir, c.get_name()+'.jpg')
                    f = open(p, 'wb')
                    f.write(photo)
                    f.close()
                    logger.info("[Active Directory UI] Photo wrote for %s" % c.get_name())
                except Exception, exp:
                    logger.error("[Active Directory UI] Cannot write %s : %s" % (p, str(exp)))
            except KeyError:
                logger.warning("[Active Directory UI] No photo for %s" % c.get_name())
Пример #31
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        if 'receiver_name' in conf['global']:
            name = conf['global']['receiver_name']
        else:
            name = 'Unnamed receiver'
        self.name = name
        logger.load_obj(self, name)
        self.direct_routing = conf['global']['direct_routing']

        g_conf = conf['global']

        # If we've got something in the schedulers, we do not want it anymore
        for sched_id in conf['schedulers']:

            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                logger.info("[%s] We already got the conf %d (%s)" %
                            (self.name, sched_id,
                             conf['schedulers'][sched_id]['name']))
                wait_homerun = self.schedulers[sched_id]['wait_homerun']
                actions = self.schedulers[sched_id]['actions']
                external_commands = self.schedulers[sched_id][
                    'external_commands']
                con = self.schedulers[sched_id]['con']

            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            if s['name'] in g_conf['satellitemap']:
                s.update(g_conf['satellitemap'][s['name']])

            proto = 'http'
            if s['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, s['address'], s['port'])

            self.schedulers[sched_id]['uri'] = uri
            if already_got:
                self.schedulers[sched_id]['wait_homerun'] = wait_homerun
                self.schedulers[sched_id]['actions'] = actions
                self.schedulers[sched_id][
                    'external_commands'] = external_commands
                self.schedulers[sched_id]['con'] = con
            else:
                self.schedulers[sched_id]['wait_homerun'] = {}
                self.schedulers[sched_id]['actions'] = {}
                self.schedulers[sched_id]['external_commands'] = []
                self.schedulers[sched_id]['con'] = None
            self.schedulers[sched_id]['running_id'] = 0
            self.schedulers[sched_id]['active'] = s['active']

            # Do not connect if we are a passive satellite
            if self.direct_routing and not already_got:
                # And then we connect to it :)
                self.pynag_con_init(sched_id)

        logger.debug("[%s] Sending us configuration %s" % (self.name, conf))

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s " % mods)

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s" % use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()

        # Now create the external commander. It's just here to dispatch
        # the commands to schedulers
        e = ExternalCommandManager(None, 'receiver')
        e.load_receiver(self)
        self.external_command = e
Пример #32
0
 def set_proxy(self, proxy):
     if proxy:
         logger.debug('PROXY SETTING PROXY %s', proxy)
         self.get_con.setopt(pycurl.PROXY, proxy)
         self.post_con.setopt(pycurl.PROXY, proxy)
         self.put_con.setopt(pycurl.PROXY, proxy)
Пример #33
0
    def check_auth(self, username, password):
        """ Check username/password.
            If there is submodules, this method calls them one by one until one of them returns
            True. If no submodule can authenticate the user, then we try with internal
            authentication methods: htpasswd file, then contact password.

            This method returns a User object if authentication succeeded, else it returns None
        """
        self._user_login = None
        self._authenticator = None
        self._session = None
        self._user_info = None
        logger.info("[WebUI] Authenticating user '%s'", username)

        self.app.request.environ['MSG'] = "Unable to authenticate a user"

        if self.modules:
            for mod in self.modules:
                try:
                    logger.info("[WebUI] Authenticating user '%s' with %s",
                                username, mod.get_name())
                    if mod.check_auth(username, password):
                        logger.debug(
                            "[WebUI] User '%s' is authenticated thanks to %s",
                            username, mod.get_name())
                        self._authenticator = mod.get_name()
                        self._user_login = username

                        # Session identifier ?
                        f = getattr(mod, 'get_session', None)
                        if f and callable(f):
                            self._session = mod.get_session()
                            logger.info("[WebUI] User session: %s",
                                        self._session)

                        # User information ?
                        f = getattr(mod, 'get_user_info', None)
                        if f and callable(f):
                            self._user_info = mod.get_user_info()
                            logger.info("[WebUI] User info: %s",
                                        self._user_info)
                except Exception as exp:
                    logger.warning("[WebUI] Exception: %s", str(exp))
                    logger.warning("[WebUI] Back trace: %s",
                                   traceback.format_exc())

        if not self._user_login:
            logger.info("[WebUI] Internal htpasswd authentication")
            if self.app.htpasswd_file and self.check_apache_htpasswd_auth(
                    username, password):
                self._authenticator = 'htpasswd'
                self._user_login = username

        if not self._user_login:
            logger.info("[WebUI] Internal contact authentication")
            if self.check_cfg_password_auth(username, password):
                self._authenticator = 'contact'
                self._user_login = username

        if self._user_login:
            logger.info("[WebUI] user authenticated thanks to %s",
                        self._authenticator)
            self.app.request.environ['MSG'] = "Welcome to the WebUI"
            return self._user_login

        return None
Пример #34
0
    def check_apache_htpasswd_auth(self, username, password):
        """ Embedded authentication with password in Apache htpasswd file.
            Function imported from auth-htpasswd module.
        """
        logger.info("[WebUI-auth-htpasswd] Authenticating user '%s'", username)

        try:
            f = open(self.app.htpasswd_file, 'r')
            for line in f.readlines():
                line = line.strip()
                # Bypass bad lines
                if ':' not in line:
                    continue
                if line.startswith('#'):
                    continue
                elts = line.split(':')
                name = elts[0]
                my_hash = elts[1]

                if my_hash[:5] == '$apr1' or my_hash[:3] == '$1$':
                    h = my_hash.split('$')
                    magic = h[1]
                    salt = h[2]
                elif my_hash[0] == '$':
                    h = my_hash.split('$')
                    magic = h[1]
                else:
                    magic = None
                    salt = my_hash[:2]

                # If we match the username, look at the crypt
                if name == username:
                    if md5_available and magic == 'apr1':
                        valid_hash = (apache_md5_crypt(password,
                                                       salt) == my_hash)
                    elif md5_available and magic == '1':
                        valid_hash = (unix_md5_crypt(password,
                                                     salt) == my_hash)
                    elif passlib_available and (magic[0] == '2'):
                        valid_hash = bcrypt.verify(password, my_hash)
                    elif passlib_available and magic == '5':
                        valid_hash = sha256_crypt.verify(password, my_hash)
                    elif passlib_available and magic == '6':
                        valid_hash = sha512_crypt.verify(password, my_hash)
                    elif magic is None:
                        valid_hash = (crypt.crypt(password, salt) == my_hash)

                    if valid_hash:
                        logger.info("[WebUI-auth-htpasswd] Authenticated")
                        return True
                else:
                    logger.debug(
                        "[WebUI-auth-htpasswd] Authentication failed, "
                        "invalid name: %s / %s", name, username)
        except Exception as exp:
            logger.info(
                "[WebUI-auth-htpasswd] Authentication against apache passwd "
                "file failed, exception: %s", str(exp))
        finally:
            try:
                f.close()
            except Exception:
                pass

        return False
Пример #35
0
def user_login():
    logger.debug("[WebUI] user login request, remote user enabled: %s: %s",
                 app.remote_user_enable, app.remote_user_variable)
    for header in app.request.headers:
        logger.debug("[WebUI] X request header: %s = %s", header,
                     app.request.headers[header])

    err = app.request.GET.get('error', None)
    if err:
        logger.warning("[WebUI] login page with error message: %s", err)

    cookie_value = app.request.get_cookie(app.session_cookie,
                                          secret=app.auth_secret)
    if cookie_value:
        logger.info("[WebUI] user login request, existing cookie found: %s",
                    cookie_value)
        bottle.redirect(app.get_url("Dashboard"))

    elif app.remote_user_enable in ['1', '2']:
        logger.debug("[WebUI] user login request, no existing cookie found")
        if not err:
            user_name = None
            if app.remote_user_enable == '1':
                logger.debug("[WebUI] search %s in request headers",
                             app.remote_user_variable)
                if app.remote_user_variable in app.request.headers:
                    user_name = app.request.headers[app.remote_user_variable]
                    logger.debug(
                        "[WebUI] remote user found in request headers: %s",
                        user_name)

            if app.remote_user_enable == '2':
                logger.debug("[WebUI] search %s in WSGI environment",
                             app.remote_user_variable)
                if app.remote_user_variable in app.request.environ:
                    user_name = app.request.environ[app.remote_user_variable]
                    logger.debug(
                        "[WebUI] remote user found in WSGI environment: %s",
                        user_name)

            if not user_name:
                logger.warning(
                    "[WebUI] remote user is enabled but no authenticated "
                    "user name was found")
                bottle.redirect(app.get_url("GetLogin"))

            c = app.datamgr.get_contact(name=user_name)
            if c:
                cookie_value = {
                    'login': user_name,
                    'session': app.user_session,
                    'info': app.user_info
                }
                app.response.set_cookie(str(app.session_cookie),
                                        cookie_value,
                                        secret=app.auth_secret,
                                        path='/')
                bottle.redirect(app.get_url("Dashboard"))

    logger.info("[WebUI] session user message - get: %s",
                app.request.environ.get('MSG', 'None...'))

    return {
        'msg_text': err,
        'login_text': app.login_text,
        'company_logo': app.company_logo
    }
Пример #36
0
    def dispatch(self):
        # Ok, we pass at least one time in dispatch, so now errors are True errors
        self.first_dispatch_done = True

        # If no needed to dispatch, do not dispatch :)
        if not self.dispatch_ok:
            for r in self.realms:
                conf_to_dispatch = [
                    cfg for cfg in r.confs.values() if not cfg.is_assigned
                ]
                nb_conf = len(conf_to_dispatch)
                if nb_conf > 0:
                    logger.info("Dispatching Realm %s" % r.get_name())
                    logger.info('[%s] Dispatching %d/%d configurations' %
                                (r.get_name(), nb_conf, len(r.confs)))

                # Now we get in scheds all scheduler of this realm and upper so
                # we will send them conf (in this order)
                scheds = self.get_scheduler_ordered_list(r)

                if nb_conf > 0:
                    print_string = '[%s] Schedulers order: %s' % (
                        r.get_name(), ','.join([s.get_name() for s in scheds]))
                    logger.info(print_string)

                # Try to send only for alive members
                scheds = [s for s in scheds if s.alive]

                # Now we do the real job
                # every_one_need_conf = False
                for conf in conf_to_dispatch:
                    logger.info('[%s] Dispatching configuration %s' %
                                (r.get_name(), conf.id))

                    # If there is no alive schedulers, not good...
                    if len(scheds) == 0:
                        logger.info(
                            '[%s] but there a no alive schedulers in this realm!'
                            % r.get_name())

                    # we need to loop until the conf is assigned
                    # or when there are no more schedulers available
                    while True:
                        try:
                            sched = scheds.pop()
                        except IndexError:  # No more schedulers.. not good, no loop
                            # need_loop = False
                            # The conf does not need to be dispatch
                            cfg_id = conf.id
                            for kind in ('reactionner', 'poller', 'broker',
                                         'receiver'):
                                r.to_satellites[kind][cfg_id] = None
                                r.to_satellites_need_dispatch[kind][
                                    cfg_id] = False
                                r.to_satellites_managed_by[kind][cfg_id] = []
                            break

                        logger.info(
                            '[%s] Trying to send conf %d to scheduler %s' %
                            (r.get_name(), conf.id, sched.get_name()))
                        if not sched.need_conf:
                            logger.info(
                                '[%s] The scheduler %s do not need conf, sorry'
                                % (r.get_name(), sched.get_name()))
                            continue

                        # We tag conf with the instance_name = scheduler_name
                        instance_name = sched.scheduler_name
                        # We give this configuraton a new 'flavor'
                        conf.push_flavor = random.randint(1, 1000000)
                        # REF: doc/shinken-conf-dispatching.png (3)
                        # REF: doc/shinken-scheduler-lost.png (2)
                        override_conf = sched.get_override_configuration()
                        satellites_for_sched = r.get_satellites_links_for_scheduler(
                        )
                        s_conf = r.serialized_confs[conf.id]
                        # Prepare the conf before sending it
                        conf_package = {
                            'conf': s_conf,
                            'override_conf': override_conf,
                            'modules': sched.modules,
                            'satellites': satellites_for_sched,
                            'instance_name': sched.scheduler_name,
                            'push_flavor': conf.push_flavor,
                            'skip_initial_broks': sched.skip_initial_broks,
                        }

                        t1 = time.time()
                        is_sent = sched.put_conf(conf_package)
                        logger.debug("Conf is sent in %d" % (time.time() - t1))
                        if not is_sent:
                            logger.warning(
                                '[%s] configuration dispatching error for scheduler %s'
                                % (r.get_name(), sched.get_name()))
                            continue

                        logger.info(
                            '[%s] Dispatch OK of conf in scheduler %s' %
                            (r.get_name(), sched.get_name()))

                        sched.conf = conf
                        sched.push_flavor = conf.push_flavor
                        sched.need_conf = False
                        conf.is_assigned = True
                        conf.assigned_to = sched

                        # We update all data for this scheduler
                        sched.managed_confs = {conf.id: conf.push_flavor}

                        # Now we generate the conf for satellites:
                        cfg_id = conf.id
                        for kind in ('reactionner', 'poller', 'broker',
                                     'receiver'):
                            r.to_satellites[kind][
                                cfg_id] = sched.give_satellite_cfg()
                            r.to_satellites_need_dispatch[kind][cfg_id] = True
                            r.to_satellites_managed_by[kind][cfg_id] = []

                        # Ok, the conf is dispatched, no more loop for this
                        # configuration
                        break

            # We pop conf to dispatch, so it must be no more conf...
            conf_to_dispatch = [
                cfg for cfg in self.conf.confs.values() if not cfg.is_assigned
            ]
            nb_missed = len(conf_to_dispatch)
            if nb_missed > 0:
                logger.warning(
                    "All schedulers configurations are not dispatched, %d are missing"
                    % nb_missed)
            else:
                logger.info(
                    "OK, all schedulers configurations are dispatched :)")
                self.dispatch_ok = True

            # Sched without conf in a dispatch ok are set to no need_conf
            # so they do not raise dispatch where no use
            if self.dispatch_ok:
                for sched in self.schedulers.items.values():
                    if sched.conf is None:
                        # print "Tagging sched", sched.get_name(), "so it do not ask anymore for conf"
                        sched.need_conf = False

            arbiters_cfg = {}
            for arb in self.arbiters:
                arbiters_cfg[arb.id] = arb.give_satellite_cfg()

            # We put the satellites conf with the "new" way so they see only what we want
            for r in self.realms:
                for cfg in r.confs.values():
                    cfg_id = cfg.id
                    # flavor if the push number of this configuration send to a scheduler
                    flavor = cfg.push_flavor
                    for kind in ('reactionner', 'poller', 'broker',
                                 'receiver'):
                        if r.to_satellites_need_dispatch[kind][cfg_id]:
                            cfg_for_satellite_part = r.to_satellites[kind][
                                cfg_id]

                            # make copies of potential_react list for sort
                            satellites = []
                            for satellite in r.get_potential_satellites_by_type(
                                    kind):
                                satellites.append(satellite)
                            satellites.sort(alive_then_spare_then_deads)

                            # Only keep alive Satellites and reachable ones
                            satellites = [
                                s for s in satellites
                                if s.alive and s.reachable
                            ]

                            # If we got a broker, we make the list to pop a new
                            # item first for each scheduler, so it will smooth the load
                            # But the spare must stay at the end ;)
                            if kind == "broker":
                                nospare = [
                                    s for s in satellites if not s.spare
                                ]
                                # Should look over the list, not over
                                if len(nospare) != 0:
                                    idx = cfg_id % len(nospare)
                                    #print "No spare", nospare
                                    spares = [s for s in satellites if s.spare]
                                    #print "Spare", spares
                                    #print "Got 1", nospare[idx:]
                                    #print "Got 2", nospare[:-idx+1]
                                    new_satellites = nospare[idx:]
                                    for _b in nospare[:-idx + 1]:
                                        if _b not in new_satellites:
                                            new_satellites.append(_b)
                                    #new_satellites.extend(nospare[:-idx+1])
                                    #print "New satellites", cfg_id, new_satellites
                                    #for s in new_satellites:
                                    #    print "New satellites", cfg_id, s.get_name()
                                    satellites = new_satellites
                                    satellites.extend(spares)

                            # Dump the order where we will send conf
                            satellite_string = "[%s] Dispatching %s satellite with order: " % (
                                r.get_name(), kind)
                            for satellite in satellites:
                                satellite_string += '%s (spare:%s), ' % (
                                    satellite.get_name(), str(satellite.spare))
                            logger.info(satellite_string)

                            # Now we dispatch cfg to every one ask for it
                            nb_cfg_sent = 0
                            for satellite in satellites:
                                # Send only if we need, and if we can
                                if nb_cfg_sent < r.get_nb_of_must_have_satellites(
                                        kind) and satellite.alive:
                                    satellite.cfg['schedulers'][
                                        cfg_id] = cfg_for_satellite_part
                                    if satellite.manage_arbiters:
                                        satellite.cfg[
                                            'arbiters'] = arbiters_cfg

                                    # Brokers should have poller/reactionners links too
                                    if kind == "broker":
                                        r.fill_broker_with_poller_reactionner_links(
                                            satellite)

                                    is_sent = False
                                    # Maybe this satellite already got this configuration, so skip it
                                    if satellite.do_i_manage(cfg_id, flavor):
                                        logger.info(
                                            '[%s] Skipping configuration %d send to the %s %s: it already got it'
                                            % (r.get_name(), cfg_id, kind,
                                               satellite.get_name()))
                                        is_sent = True
                                    else:  # ok, it really need it :)
                                        logger.info(
                                            '[%s] Trying to send configuration to %s %s'
                                            % (r.get_name(), kind,
                                               satellite.get_name()))
                                        is_sent = satellite.put_conf(
                                            satellite.cfg)

                                    if is_sent:
                                        satellite.active = True
                                        logger.info(
                                            '[%s] Dispatch OK of configuration %s to %s %s'
                                            % (r.get_name(), cfg_id, kind,
                                               satellite.get_name()))
                                        # We change the satellite configuration, update our data
                                        satellite.known_conf_managed_push(
                                            cfg_id, flavor)

                                        nb_cfg_sent += 1
                                        r.to_satellites_managed_by[kind][
                                            cfg_id].append(satellite)

                                        # If we got a broker, the conf_id must be sent to only ONE
                                        # broker, so here it's done, we are happy.
                                        if kind == "broker":
                                            break

                                        #If receiver, we must send the hostnames of this configuration
                                        if kind == 'receiver':
                                            hnames = [
                                                h.get_name() for h in cfg.hosts
                                            ]
                                            logger.debug(
                                                "[%s] Sending %s hostnames to the receiver %s"
                                                % (r.get_name(), len(hnames),
                                                   satellite.get_name()))
                                            satellite.push_host_names(
                                                cfg_id, hnames)
                            # else:
                            #    #I've got enough satellite, the next ones are considered spares
                            if nb_cfg_sent == r.get_nb_of_must_have_satellites(
                                    kind):
                                logger.info("[%s] OK, no more %s sent need" %
                                            (r.get_name(), kind))
                                r.to_satellites_need_dispatch[kind][
                                    cfg_id] = False

            # And now we dispatch receivers. It's easier, they need ONE conf
            # in all their life :)
            for r in self.realms:
                for rec in r.receivers:
                    if rec.need_conf:
                        logger.info(
                            '[%s] Trying to send configuration to receiver %s'
                            % (r.get_name(), rec.get_name()))
                        is_sent = rec.put_conf(rec.cfg)
                        if is_sent:
                            rec.active = True
                            rec.need_conf = False
                            logger.info(
                                '[%s] Dispatch OK of configuration to receiver %s'
                                % (r.get_name(), rec.get_name()))
                        else:
                            logger.error(
                                '[%s] Dispatching failed for receiver %s' %
                                (r.get_name(), rec.get_name()))
Пример #37
0
 def execute_query(self, query):
     """Just run the query"""
     logger.debug("[SqliteDB] Info: I run query '%s'" % query)
     self.db_cursor.execute(query)
     self.db.commit()
Пример #38
0
    def handle_request_and_fail(self, data):
        """Execute the livestatus request.

        This function creates a LiveStatusRequest method, calls the parser,
        handles the execution of the request and formatting of the result.

        """
        request = LiveStatusRequest(data, self.datamgr, self.query_cache,
                                    self.db, self.pnp_path, self.return_queue,
                                    self.counters)
        request.parse_input(data)
        if sorted([q.my_type
                   for q in request.queries]) == ['command', 'query', 'wait']:
            # The Multisite way
            for query in [
                    q for q in request.queries if q.my_type == 'command'
            ]:
                result = query.launch_query()
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()
            output = [q for q in request.queries if q.my_type == 'wait'
                      ] + [q for q in request.queries if q.my_type == 'query']
        elif sorted([q.my_type for q in request.queries]) == ['query', 'wait']:
            # The Thruk way
            output = [q for q in request.queries if q.my_type == 'wait'
                      ] + [q for q in request.queries if q.my_type == 'query']
            keepalive = True
        elif sorted([q.my_type
                     for q in request.queries]) == ['command', 'query']:
            for query in [
                    q for q in request.queries if q.my_type == 'command'
            ]:
                result = query.launch_query()
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()
            for query in [q for q in request.queries if q.my_type == 'query']:
                # This was a simple query, respond immediately
                result = query.launch_query()
                # Now bring the retrieved information to a form which can be sent back to the client
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()

        elif sorted([q.my_type for q in request.queries]) == ['query']:
            for query in [q for q in request.queries if q.my_type == 'query']:
                # This was a simple query, respond immediately
                result = query.launch_query()
                # Now bring the retrieved information to a form which can be sent back to the client
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()
        elif sorted([q.my_type for q in request.queries]) == ['command']:
            for query in [
                    q for q in request.queries if q.my_type == 'command'
            ]:
                result = query.launch_query()
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()
        elif [q.my_type for q in request.queries
              if q.my_type != 'command'] == []:
            # Only external commands. Thruk uses it when it sends multiple
            # objects into a downtime.
            for query in [
                    q for q in request.queries if q.my_type == 'command'
            ]:
                result = query.launch_query()
                response = query.response
                response.format_live_data(result, query.columns, query.aliases)
                output, keepalive = response.respond()
        else:
            # We currently do not handle this kind of composed request
            output = ""
            logger.error(
                "[Livestatus] We currently do not handle this kind of composed request"
            )
            print sorted([q.my_type for q in request.queries])

        logger.debug("[Livestatus] Request duration %.4fs" %
                     (time.time() - request.tic))
        return output, keepalive
Пример #39
0
def do_publish(to_pack='.'):
    logger.debug("WILL CALL PUBLISH.py with %s" % to_pack)
    archive = create_archive(to_pack)
    publish_archive(archive)
Пример #40
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        if 'receiver_name' in conf['global']:
            name = conf['global']['receiver_name']
        else:
            name = 'Unnamed receiver'
        self.name = name
        self.api_key = conf['global']['api_key']
        self.secret = conf['global']['secret']
        self.http_proxy = conf['global']['http_proxy']
        self.statsd_host = conf['global']['statsd_host']
        self.statsd_port = conf['global']['statsd_port']
        self.statsd_prefix = conf['global']['statsd_prefix']
        self.statsd_enabled = conf['global']['statsd_enabled']

        statsmgr.register(self, self.name, 'receiver',
                          api_key=self.api_key, secret=self.secret, http_proxy=self.http_proxy,
                          statsd_host=self.statsd_host, statsd_port=self.statsd_port,
                          statsd_prefix=self.statsd_prefix, statsd_enabled=self.statsd_enabled)
        logger.load_obj(self, name)
        self.direct_routing = conf['global']['direct_routing']
        self.accept_passive_unknown_check_results = \
            conf['global']['accept_passive_unknown_check_results']

        g_conf = conf['global']

        # If we've got something in the schedulers, we do not want it anymore
        for sched_id in conf['schedulers']:

            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                logger.info("[%s] We already got the conf %d (%s)",
                            self.name, sched_id, conf['schedulers'][sched_id]['name'])
                wait_homerun = self.schedulers[sched_id]['wait_homerun']
                actions = self.schedulers[sched_id]['actions']
                external_commands = self.schedulers[sched_id]['external_commands']
                con = self.schedulers[sched_id]['con']

            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            if s['name'] in g_conf['satellitemap']:
                s.update(g_conf['satellitemap'][s['name']])

            proto = 'http'
            if s['use_ssl']:
                proto = 'https'
            uri = '%s://%s:%s/' % (proto, s['address'], s['port'])

            self.schedulers[sched_id]['uri'] = uri
            if already_got:
                self.schedulers[sched_id]['wait_homerun'] = wait_homerun
                self.schedulers[sched_id]['actions'] = actions
                self.schedulers[sched_id]['external_commands'] = external_commands
                self.schedulers[sched_id]['con'] = con
            else:
                self.schedulers[sched_id]['wait_homerun'] = {}
                self.schedulers[sched_id]['actions'] = {}
                self.schedulers[sched_id]['external_commands'] = []
                self.schedulers[sched_id]['con'] = None
            self.schedulers[sched_id]['running_id'] = 0
            self.schedulers[sched_id]['active'] = s['active']
            self.schedulers[sched_id]['timeout'] = s['timeout']
            self.schedulers[sched_id]['data_timeout'] = s['data_timeout']

            # Do not connect if we are a passive satellite
            if self.direct_routing and not already_got:
                # And then we connect to it :)
                self.pynag_con_init(sched_id)



        logger.debug("[%s] Sending us configuration %s", self.name, conf)

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s ", mods)

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s", use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()
Пример #41
0
def user_login():
    logger.debug("[WebUI] user login request, remote user enabled: %s: %s",
                 app.remote_user_enable, app.remote_user_variable)
    for header in app.request.headers:
        logger.debug("[WebUI] X request header: %s = %s", header,
                     app.request.headers[header])

    err = app.request.GET.get('error', None)
    if err:
        logger.warning("[WebUI] login page with error message: %s", err)

    cookie_value = app.request.get_cookie(app.session_cookie,
                                          secret=app.auth_secret)
    if cookie_value:
        logger.info("[WebUI] user login request, existing cookie found: %s",
                    cookie_value)
        # For Alignak backend
        if app.alignak_backend_endpoint:
            if 'session' in cookie_value:
                app.user_session = cookie_value['session']
                # For alignak backend
                try:
                    if app.frontend.connect(app.user_session):
                        bottle.redirect(app.get_url("Dashboard"))
                except Exception:
                    pass

                app.response.set_cookie(str(app.session_cookie),
                                        '',
                                        secret=app.auth_secret,
                                        path='/')
                bottle.redirect("/user/login")

        bottle.redirect(app.get_url("Dashboard"))

    elif app.remote_user_enable in ['1', '2']:
        logger.debug("[WebUI] user login request, no existing cookie found")
        if not err:
            user_name = None
            if app.remote_user_enable == '1':
                logger.debug("[WebUI] search %s in request headers",
                             app.remote_user_variable)
                if app.remote_user_variable in app.request.headers:
                    user_name = app.request.headers[app.remote_user_variable]
                    logger.debug(
                        "[WebUI] remote user found in request headers: %s",
                        user_name)

            if app.remote_user_enable == '2':
                logger.debug("[WebUI] search %s in WSGI environment",
                             app.remote_user_variable)
                if app.remote_user_variable in app.request.environ:
                    user_name = app.request.environ[app.remote_user_variable]
                    logger.debug(
                        "[WebUI] remote user found in WSGI environment: %s",
                        user_name)

            if not user_name:
                logger.warning(
                    "[WebUI] remote user is enabled but no authenticated user name was found"
                )
                bottle.redirect(app.get_url("GetLogin"))

            c = app.datamgr.get_contact(name=user_name)
            if c:
                cookie_value = {
                    'login': user_name,
                    'session': app.user_session,
                    'info': app.user_info
                }
                app.response.set_cookie(str(app.session_cookie),
                                        cookie_value,
                                        secret=app.auth_secret,
                                        path='/')
                bottle.redirect(app.get_url("Dashboard"))

    return {
        'msg_text': err,
        'login_text': app.login_text,
        'company_logo': app.company_logo,
        'path': app.request.query.get('path', '')
    }
Пример #42
0
def install_package(pname, raw):
    logger.debug("Installing the package %s (size:%d)" % (pname, len(raw)))
    if len(raw) == 0:
        logger.error('The package %s cannot be found' % pname)
        return
    tmpdir = os.path.join(tempfile.gettempdir(), pname)
    logger.debug("Unpacking the package into %s" % tmpdir)

    if os.path.exists(tmpdir):
        logger.debug("Removing previous tmp dir %s" % tmpdir)
        shutil.rmtree(tmpdir)
    logger.debug("Creating temporary dir %s" % tmpdir)
    os.mkdir(tmpdir)

    package_content = []

    # open a file with the content
    f = StringIO(raw)
    tar_file = tarfile.open(fileobj=f, mode="r")
    logger.debug("Tar file contents:")
    for i in tar_file.getmembers():
        path = i.name
        if path == '.':
            continue
        if path.startswith('/') or '..' in path:
            logger.error("SECURITY: the path %s seems dangerous!" % path)
            return
        # Adding all files into the package_content list
        package_content.append({
            'name': i.name,
            'mode': i.mode,
            'type': i.type,
            'size': i.size
        })
        logger.debug("\t%s" % path)
    # Extract all in the tmpdir
    tar_file.extractall(tmpdir)
    tar_file.close()

    # Now we look at the package.json that will give us our name and co
    package_json_p = os.path.join(tmpdir, 'package.json')
    if not os.path.exists(package_json_p):
        logger.error("Error : bad archive : Missing file %s" % package_json_p)
        return None
    package_json = read_package_json(open(package_json_p))
    logger.debug("Package.json content %s " % package_json)

    modules_dir = CONFIG['paths']['modules']
    share_dir = CONFIG['paths']['share']
    packs_dir = CONFIG['paths']['packs']
    etc_dir = CONFIG['paths']['etc']
    doc_dir = CONFIG['paths']['doc']
    inventory_dir = CONFIG['paths']['inventory']
    libexec_dir = CONFIG['paths'].get(
        'libexec', os.path.join(CONFIG['paths']['lib'], 'libexec'))
    test_dir = CONFIG['paths'].get('test', '/__DONOTEXISTS__')
    for d in (modules_dir, share_dir, packs_dir, doc_dir, inventory_dir):
        if not os.path.exists(d):
            logger.error("The installation directory %s is missing!" % d)
            return

    # Now install the package from $TMP$/share/* to $SHARE$/*
    p_share = os.path.join(tmpdir, 'share')
    logger.debug("TMPDIR:%s aahre_dir:%s pname:%s" %
                 (tmpdir, share_dir, pname))
    if os.path.exists(p_share):
        logger.info("Installing the share package data")
        # shutil will do the create dir
        _copytree(p_share, share_dir)
        logger.info("Copy done in the share directory %s" % share_dir)

    logger.debug("TMPDIR:%s modules_dir:%s pname:%s" %
                 (tmpdir, modules_dir, pname))
    # Now install the package from $TMP$/module/* to $MODULES$/pname/*
    p_module = os.path.join(tmpdir, 'module')
    if os.path.exists(p_module):
        logger.info("Installing the module package data")
        mod_dest = os.path.join(modules_dir, pname)
        if os.path.exists(mod_dest):
            logger.info("Removing previous module install at %s" % mod_dest)

            shutil.rmtree(mod_dest)
        # shutil will do the create dir
        shutil.copytree(p_module, mod_dest)
        logger.info("Copy done in the module directory %s" % mod_dest)

    p_doc = os.path.join(tmpdir, 'doc')
    logger.debug("TMPDIR:%s doc_dir:%s pname:%s" % (tmpdir, doc_dir, pname))
    # Now install the package from $TMP$/doc/* to $MODULES$/doc/source/89_packages/pname/*
    if os.path.exists(p_doc):
        logger.info("Installing the doc package data")
        doc_dest = os.path.join(doc_dir, 'source', '89_packages', pname)
        if os.path.exists(doc_dest):
            logger.info("Removing previous doc install at %s" % doc_dest)

            shutil.rmtree(doc_dest)
        # shutil will do the create dir
        shutil.copytree(p_doc, doc_dest)
        logger.info("Copy done in the doc directory %s" % doc_dest)

    # Now install the pack from $TMP$/pack/* to $PACKS$/pname/*
    p_pack = os.path.join(tmpdir, 'pack')
    if os.path.exists(p_pack):
        logger.info("Installing the pack package data")
        pack_dest = os.path.join(packs_dir, pname)
        if os.path.exists(pack_dest):
            logger.info("Removing previous pack install at %s" % pack_dest)
            shutil.rmtree(pack_dest)
        # shutil will do the create dir
        shutil.copytree(p_pack, pack_dest)
        logger.info("Copy done in the pack directory %s" % pack_dest)

    # Now install the etc from $TMP$/etc/* to $ETC$/etc/*
    p_etc = os.path.join(tmpdir, 'etc')
    if os.path.exists(p_etc):
        logger.info("Merging the etc package data into your etc directory")
        # We don't use shutils because it NEED etc_dir to be non existant...
        # Come one guys..... cp is not as terrible as this...
        _copytree(p_etc, etc_dir)
        logger.info("Copy done in the etc directory %s" % etc_dir)

    # Now install the tests from $TMP$/tests/* to $TESTS$/tests/*
    # if the last one is specified on the configuration file (optionnal)
    p_tests = os.path.join(tmpdir, 'test')
    if os.path.exists(p_tests) and os.path.exists(test_dir):
        logger.info("Merging the test package data into your test directory")
        # We don't use shutils because it NEED etc_dir to be non existant...
        # Come one guys..... cp is not as terrible as this...
        logger.debug("COPYING %s into %s" % (p_tests, test_dir))
        _copytree(p_tests, test_dir)
        logger.info("Copy done in the test directory %s" % test_dir)

    # Now install the libexec things from $TMP$/libexec/* to $LIBEXEC$/*
    # but also chmod a+x the plugins copied
    p_libexec = os.path.join(tmpdir, 'libexec')
    if os.path.exists(p_libexec) and os.path.exists(libexec_dir):
        logger.info(
            "Merging the libexec package data into your libexec directory")
        logger.debug("COPYING %s into %s" % (p_libexec, libexec_dir))
        # Before be sure all files in there are +x
        _chmodplusx(p_libexec)
        _copytree(p_libexec, libexec_dir)
        logger.info("Copy done in the libexec directory %s" % libexec_dir)

    # then samve the package.json into the inventory dir
    p_inv = os.path.join(inventory_dir, pname)
    if not os.path.exists(p_inv):
        os.mkdir(p_inv)
    shutil.copy2(package_json_p, os.path.join(p_inv, 'package.json'))
    # and the package content
    cont = open(os.path.join(p_inv, 'content.json'), 'w')
    cont.write(json.dumps(package_content))
    cont.close()

    # We now clean (rm) the tmpdir we don't need any more
    try:
        shutil.rmtree(tmpdir, ignore_errors=True)
        # cannot remove? not a crime
    except OSError:
        pass

    # THE END, output all is OK :D
    cprint('OK ', 'green', end='')
    cprint('%s' % pname)
Пример #43
0
    def push_external_commands_to_schedulers(self):
        # If we are not in a direct routing mode, just bailout after
        # faking resolving the commands
        if not self.direct_routing:
            self.external_commands.extend(self.unprocessed_external_commands)
            self.unprocessed_external_commands = []
            return

        # Now get all external commands and put them into the
        # good schedulers
        for ext_cmd in self.unprocessed_external_commands:
            self.external_command.resolve_command(ext_cmd)
            self.external_commands.append(ext_cmd)

        # And clean the previous one
        self.unprocessed_external_commands = []

        # Now for all alive schedulers, send the commands
        for sched_id in self.schedulers:
            sched = self.schedulers[sched_id]
            extcmds = sched['external_commands']
            cmds = [extcmd.cmd_line for extcmd in extcmds]
            con = sched.get('con', None)
            sent = False
            if not con:
                logger.warning("The scheduler is not connected" % sched)
                self.pynag_con_init(sched_id)
                con = sched.get('con', None)

            # If there are commands and the scheduler is alive
            if len(cmds) > 0 and con:
                logger.debug("Sending %d commands to scheduler %s" %
                             (len(cmds), sched))
                try:
                    con.run_external_commands(cmds)
                    sent = True
                # Not connected or sched is gone
                except (Pyro_exp_pack, KeyError), exp:
                    logger.debug('manage_returns exception:: %s,%s ' %
                                 (type(exp), str(exp)))
                    try:
                        logger.debug(
                            ''.join(PYRO_VERSION < "4.0"
                                    and Pyro.util.getPyroTraceback(exp)
                                    or Pyro.util.getPyroTraceback()))
                    except:
                        pass
                    self.pynag_con_init(sched_id)
                    return
                except AttributeError, exp:  # the scheduler must  not be initialized
                    logger.debug('manage_returns exception:: %s,%s ' %
                                 (type(exp), str(exp)))
                except Exception, exp:
                    logger.error(
                        "A satellite raised an unknown exception: %s (%s)" %
                        (exp, type(exp)))
                    try:
                        logger.debug(
                            ''.join(PYRO_VERSION < "4.0"
                                    and Pyro.util.getPyroTraceback(exp)
                                    or Pyro.util.getPyroTraceback()))
                    except:
                        pass
                    raise
Пример #44
0
    c.setopt(pycurl.WRITEFUNCTION, response.write)
    #c.setopt(c.VERBOSE, 1)
    try:
        c.perform()
    except pycurl.error, exp:
        logger.error("There was a critical error : %s" % exp)
        return ''

    r = c.getinfo(pycurl.HTTP_CODE)
    c.close()
    if r != 200:
        logger.error("There was a critical error : %s" % response.getvalue())
        sys.exit(2)
    else:
        ret = response.getvalue()
        logger.debug("CURL result len : %d " % len(ret))
        return ret


def grab_local(d):
    # First try to look if the directory we are trying to pack is valid
    to_pack = os.path.abspath(d)
    if not os.path.exists(to_pack):
        err = "Error : the directory to install is missing %s" % to_pack
        logger.error(err)
        raise Exception(err)

    package_json_p = os.path.join(to_pack, 'package.json')
    if not os.path.exists(package_json_p):
        logger.error("Error : Missing file %s" % package_json_p)
        sys.exit(2)
Пример #45
0
 def wait_new_conf(self):
     logger.debug("Arbiter wants me to wait for a new configuration")
     self.app.sched.die()
     super(IForArbiter, self).wait_new_conf()
Пример #46
0
    def __init__(self,
                 sqlite_cursor=None,
                 sqlite_row=None,
                 line=None,
                 srcdict=None):
        if srcdict != None:
            for col in Logline.columns:
                logger.info("[Livestatus Log Lines] Set %s, %s" %
                            (col, srcdict[col]))
                setattr(self, col, srcdict[col])
        elif sqlite_cursor != None and sqlite_row != None:
            for idx, col in enumerate(sqlite_cursor):
                if col[0] == 'class':
                    setattr(self, 'logclass', sqlite_row[idx])
                else:
                    setattr(self, col[0], sqlite_row[idx])
        elif line != None:
            if isinstance(line, unicode):
                line = line.encode('UTF-8').rstrip()

            # [1278280765] SERVICE ALERT: test_host_0
            if line[0] != '[' and line[11] != ']':
                logger.warning("[Livestatus Log Lines] Invalid line: %s" %
                               line)
                raise LoglineWrongFormat
            else:
                service_states = {
                    'OK': 0,
                    'WARNING': 1,
                    'CRITICAL': 2,
                    'UNKNOWN': 3,
                    'RECOVERY': 0
                }
                host_states = {
                    'UP': 0,
                    'DOWN': 1,
                    'UNREACHABLE': 2,
                    'UNKNOWN': 3,
                    'RECOVERY': 0
                }

                # type is 0:info, 1:state, 2:program, 3:notification, 4:passive, 5:command
                logobject = LOGOBJECT_INFO
                logclass = LOGCLASS_INVALID
                attempt, state = [0] * 2
                command_name, comment, contact_name, host_name, message, plugin_output, service_description, state_type = [
                    ''
                ] * 8
                time = line[1:11]
                first_type_pos = line.find(' ') + 1
                last_type_pos = line.find(':')
                first_detail_pos = last_type_pos + 2
                type = line[first_type_pos:last_type_pos]
                options = line[first_detail_pos:]
                message = line
                if type == 'CURRENT SERVICE STATE':
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_STATE
                    host_name, service_description, state, state_type, attempt, plugin_output = options.split(
                        ';', 5)
                elif type == 'INITIAL SERVICE STATE':
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_STATE
                    host_name, service_description, state, state_type, attempt, plugin_output = options.split(
                        ';', 5)
                elif type == 'SERVICE ALERT':
                    # SERVICE ALERT: srv-40;Service-9;CRITICAL;HARD;1;[Errno 2] No such file or directory
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_ALERT
                    host_name, service_description, state, state_type, attempt, plugin_output = options.split(
                        ';', 5)
                    state = service_states[state]
                elif type == 'SERVICE DOWNTIME ALERT':
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_ALERT
                    host_name, service_description, state_type, comment = options.split(
                        ';', 3)
                elif type == 'SERVICE FLAPPING ALERT':
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_ALERT
                    host_name, service_description, state_type, comment = options.split(
                        ';', 3)

                elif type == 'CURRENT HOST STATE':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_STATE
                    host_name, state, state_type, attempt, plugin_output = options.split(
                        ';', 4)
                elif type == 'INITIAL HOST STATE':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_STATE
                    host_name, state, state_type, attempt, plugin_output = options.split(
                        ';', 4)
                elif type == 'HOST ALERT':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_ALERT
                    host_name, state, state_type, attempt, plugin_output = options.split(
                        ';', 4)
                    state = host_states[state]
                elif type == 'HOST DOWNTIME ALERT':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_ALERT
                    host_name, state_type, comment = options.split(';', 2)
                elif type == 'HOST FLAPPING ALERT':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_ALERT
                    host_name, state_type, comment = options.split(';', 2)

                elif type == 'SERVICE NOTIFICATION':
                    # tust_cuntuct;test_host_0;test_ok_0;CRITICAL;notify-service;i am CRITICAL  <-- normal
                    # SERVICE NOTIFICATION: test_contact;test_host_0;test_ok_0;DOWNTIMESTART (OK);notify-service;OK
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_NOTIFICATION
                    contact_name, host_name, service_description, state_type, command_name, check_plugin_output = options.split(
                        ';', 5)
                    if '(' in state_type:  # downtime/flapping/etc-notifications take the type UNKNOWN
                        state_type = 'UNKNOWN'
                    state = service_states[state_type]
                elif type == 'HOST NOTIFICATION':
                    # tust_cuntuct;test_host_0;DOWN;notify-host;i am DOWN
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_NOTIFICATION
                    contact_name, host_name, state_type, command_name, check_plugin_output = options.split(
                        ';', 4)
                    if '(' in state_type:
                        state_type = 'UNKNOWN'
                    state = host_states[state_type]

                elif type == 'PASSIVE SERVICE CHECK':
                    logobject = LOGOBJECT_SERVICE
                    logclass = LOGCLASS_PASSIVECHECK
                    host_name, service_description, state, check_plugin_output = options.split(
                        ';', 3)
                elif type == 'PASSIVE HOST CHECK':
                    logobject = LOGOBJECT_HOST
                    logclass = LOGCLASS_PASSIVECHECK
                    host_name, state, check_plugin_output = options.split(
                        ';', 2)
                elif type == 'SERVICE EVENT HANDLER':
                    # SERVICE EVENT HANDLER: test_host_0;test_ok_0;CRITICAL;SOFT;1;eventhandler
                    logobject = LOGOBJECT_SERVICE
                    host_name, service_description, state, state_type, attempt, command_name = options.split(
                        ';', 5)
                    state = service_states[state]

                elif type == 'HOST EVENT HANDLER':
                    logobject = LOGOBJECT_HOST
                    host_name, state, state_type, attempt, command_name = options.split(
                        ';', 4)
                    state = host_states[state]

                elif type == 'EXTERNAL COMMAND':
                    logobject = LOGOBJECT_INFO
                    logclass = LOGCLASS_COMMAND
                elif type.startswith('starting...') or \
                     type.startswith('shutting down...') or \
                     type.startswith('Bailing out') or \
                     type.startswith('active mode...') or \
                     type.startswith('standby mode...') or \
                     type.startswith('Warning'):
                    logobject = LOGOBJECT_INFO
                    logclass = LOGCLASS_PROGRAM
                else:
                    logger.debug("[Livestatus Log Lines] Does not match")
                    pass

                Logline.id += 1
                self.lineno = Logline.id
                setattr(self, 'logobject', int(logobject))
                setattr(self, 'attempt', int(attempt))
                setattr(self, 'logclass', int(logclass))
                setattr(self, 'command_name', command_name)
                setattr(self, 'comment', comment)
                setattr(self, 'contact_name', contact_name)
                setattr(self, 'host_name', host_name)
                setattr(self, 'message', message)
                setattr(
                    self, 'options', ''
                )  # Fix a mismatch of number of fields with old databases and new ones
                setattr(self, 'plugin_output', plugin_output)
                setattr(self, 'service_description', service_description)
                setattr(self, 'state', state)
                setattr(self, 'state_type', state_type)
                setattr(self, 'time', int(time))
                setattr(self, 'type', type)
Пример #47
0
    def setup_new_conf(self):
        conf = self.new_conf
        self.new_conf = None
        self.cur_conf = conf
        # Got our name from the globals
        g_conf = conf['global']
        if 'broker_name' in g_conf:
            name = g_conf['broker_name']
        else:
            name = 'Unnamed broker'
        self.name = name
        logger.load_obj(self, name)

        logger.debug("[%s] Sending us configuration %s" % (self.name, conf))
        # If we've got something in the schedulers, we do not
        # want it anymore
        # self.schedulers.clear()
        for sched_id in conf['schedulers']:
            # Must look if we already have it to do not overdie our broks
            already_got = False

            # We can already got this conf id, but with another address
            if sched_id in self.schedulers:
                new_addr = conf['schedulers'][sched_id]['address']
                old_addr = self.schedulers[sched_id]['address']
                new_port = conf['schedulers'][sched_id]['port']
                old_port = self.schedulers[sched_id]['port']
                # Should got all the same to be ok :)
                if new_addr == old_addr and new_port == old_port:
                    already_got = True

            if already_got:
                broks = self.schedulers[sched_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = {}
                running_id = 0
            s = conf['schedulers'][sched_id]
            self.schedulers[sched_id] = s

            # replacing scheduler address and port by those defined in satellitemap
            if s['name'] in g_conf['satellitemap']:
                s = dict(s)  # make a copy
                s.update(g_conf['satellitemap'][s['name']])
            uri = pyro.create_uri(s['address'], s['port'], 'Broks',
                                  self.use_ssl)
            self.schedulers[sched_id]['uri'] = uri

            self.schedulers[sched_id]['broks'] = broks
            self.schedulers[sched_id]['instance_id'] = s['instance_id']
            self.schedulers[sched_id]['running_id'] = running_id
            self.schedulers[sched_id]['active'] = s['active']
            self.schedulers[sched_id]['last_connection'] = 0

        logger.info("We have our schedulers: %s " % self.schedulers)

        # Now get arbiter
        for arb_id in conf['arbiters']:
            # Must look if we already have it
            already_got = arb_id in self.arbiters
            if already_got:
                broks = self.arbiters[arb_id]['broks']
            else:
                broks = {}
            a = conf['arbiters'][arb_id]
            self.arbiters[arb_id] = a

            # replacing arbiter address and port by those defined in satellitemap
            if a['name'] in g_conf['satellitemap']:
                a = dict(a)  # make a copy
                a.update(g_conf['satellitemap'][a['name']])
            uri = pyro.create_uri(a['address'], a['port'], 'Broks',
                                  self.use_ssl)
            self.arbiters[arb_id]['uri'] = uri

            self.arbiters[arb_id]['broks'] = broks
            self.arbiters[arb_id]['instance_id'] = 0  # No use so all to 0
            self.arbiters[arb_id]['running_id'] = 0
            self.arbiters[arb_id]['last_connection'] = 0

            # We do not connect to the arbiter. Connection hangs

        logger.info("We have our arbiters: %s " % self.arbiters)

        # Now for pollers
        for pol_id in conf['pollers']:
            # Must look if we already have it
            already_got = pol_id in self.pollers
            if already_got:
                broks = self.pollers[pol_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = {}
                running_id = 0
            p = conf['pollers'][pol_id]
            self.pollers[pol_id] = p

            # replacing poller address and port by those defined in satellitemap
            if p['name'] in g_conf['satellitemap']:
                p = dict(p)  # make a copy
                p.update(g_conf['satellitemap'][p['name']])
            uri = pyro.create_uri(p['address'], p['port'], 'Broks',
                                  self.use_ssl)
            self.pollers[pol_id]['uri'] = uri

            self.pollers[pol_id]['broks'] = broks
            self.pollers[pol_id]['instance_id'] = 0  # No use so all to 0
            self.pollers[pol_id]['running_id'] = running_id
            self.pollers[pol_id]['last_connection'] = 0

#                    #And we connect to it
#                    self.app.pynag_con_init(pol_id, 'poller')

        logger.info("We have our pollers: %s" % self.pollers)

        # Now reactionners
        for rea_id in conf['reactionners']:
            # Must look if we already have it
            already_got = rea_id in self.reactionners
            if already_got:
                broks = self.reactionners[rea_id]['broks']
                running_id = self.schedulers[sched_id]['running_id']
            else:
                broks = {}
                running_id = 0

            r = conf['reactionners'][rea_id]
            self.reactionners[rea_id] = r

            # replacing reactionner address and port by those defined in satellitemap
            if r['name'] in g_conf['satellitemap']:
                r = dict(r)  # make a copy
                r.update(g_conf['satellitemap'][r['name']])
            uri = pyro.create_uri(r['address'], r['port'], 'Broks',
                                  self.use_ssl)
            self.reactionners[rea_id]['uri'] = uri

            self.reactionners[rea_id]['broks'] = broks
            self.reactionners[rea_id]['instance_id'] = 0  # No use so all to 0
            self.reactionners[rea_id]['running_id'] = running_id
            self.reactionners[rea_id]['last_connection'] = 0

#                    #And we connect to it
#                    self.app.pynag_con_init(rea_id, 'reactionner')

        logger.info("We have our reactionners: %s" % self.reactionners)

        if not self.have_modules:
            self.modules = mods = conf['global']['modules']
            self.have_modules = True
            logger.info("We received modules %s " % mods)

            # Ok now start, or restart them!
            # Set modules, init them and start external ones
            self.modules_manager.set_modules(self.modules)
            self.do_load_modules()
            self.modules_manager.start_external_instances()

        # Set our giving timezone from arbiter
        use_timezone = conf['global']['use_timezone']
        if use_timezone != 'NOTSET':
            logger.info("Setting our timezone to %s" % use_timezone)
            os.environ['TZ'] = use_timezone
            time.tzset()

        # Connection init with Schedulers
        for sched_id in self.schedulers:
            self.pynag_con_init(sched_id, type='scheduler')

        for pol_id in self.pollers:
            self.pynag_con_init(pol_id, type='poller')

        for rea_id in self.reactionners:
            self.pynag_con_init(rea_id, type='reactionner')
Пример #48
0
    def setup_new_conf(self):
        pk = self.new_conf
        conf_raw = pk['conf']
        override_conf = pk['override_conf']
        modules = pk['modules']
        satellites = pk['satellites']
        instance_name = pk['instance_name']
        push_flavor = pk['push_flavor']
        skip_initial_broks = pk['skip_initial_broks']

        t0 = time.time()
        conf = cPickle.loads(conf_raw)
        logger.debug("Conf received at %d. Unserialized in %d secs" %
                     (t0, time.time() - t0))

        self.new_conf = None

        # Tag the conf with our data
        self.conf = conf
        self.conf.push_flavor = push_flavor
        self.conf.instance_name = instance_name
        self.conf.skip_initial_broks = skip_initial_broks

        self.cur_conf = conf
        self.override_conf = override_conf
        self.modules = modules
        self.satellites = satellites
        #self.pollers = self.app.pollers

        if self.conf.human_timestamp_log:
            logger.set_human_format()

        # Now We create our pollers
        for pol_id in satellites['pollers']:
            # Must look if we already have it
            already_got = pol_id in self.pollers
            p = satellites['pollers'][pol_id]
            self.pollers[pol_id] = p

            if p['name'] in override_conf['satellitemap']:
                p = dict(p)  # make a copy
                p.update(override_conf['satellitemap'][p['name']])

            uri = pyro.create_uri(p['address'], p['port'], 'Schedulers',
                                  self.use_ssl)
            self.pollers[pol_id]['uri'] = uri
            self.pollers[pol_id]['last_connection'] = 0

        # First mix conf and override_conf to have our definitive conf
        for prop in self.override_conf:
            #print "Overriding the property %s with value %s" % (prop, self.override_conf[prop])
            val = self.override_conf[prop]
            setattr(self.conf, prop, val)

        if self.conf.use_timezone != '':
            logger.debug("Setting our timezone to %s" %
                         str(self.conf.use_timezone))
            os.environ['TZ'] = self.conf.use_timezone
            time.tzset()

        if len(self.modules) != 0:
            logger.debug("I've got %s modules" % str(self.modules))

        # TODO: if scheduler had previous modules instanciated it must clean them!
        self.modules_manager.set_modules(self.modules)
        self.do_load_modules()

        # give it an interface
        # But first remove previous interface if exists
        if self.ichecks is not None:
            logger.debug(
                "Deconnecting previous Check Interface from pyro_daemon")
            self.pyro_daemon.unregister(self.ichecks)
        # Now create and connect it
        self.ichecks = IChecks(self.sched)
        self.uri = self.pyro_daemon.register(self.ichecks, "Checks")
        logger.debug("The Checks Interface uri is: %s" % self.uri)

        # Same for Broks
        if self.ibroks is not None:
            logger.debug(
                "Deconnecting previous Broks Interface from pyro_daemon")
            self.pyro_daemon.unregister(self.ibroks)
        # Create and connect it
        self.ibroks = IBroks(self.sched)
        self.uri2 = self.pyro_daemon.register(self.ibroks, "Broks")
        logger.debug("The Broks Interface uri is: %s" % self.uri2)

        logger.info("Loading configuration.")
        self.conf.explode_global_conf()

        # we give sched it's conf
        self.sched.reset()
        self.sched.load_conf(self.conf)
        self.sched.load_satellites(self.pollers, self.reactionners)

        # We must update our Config dict macro with good value
        # from the config parameters
        self.sched.conf.fill_resource_macros_names_macros()
        #print "DBG: got macros", self.sched.conf.macros

        # Creating the Macroresolver Class & unique instance
        m = MacroResolver()
        m.init(self.conf)

        #self.conf.dump()
        #self.conf.quick_debug()

        # Now create the external commander
        # it's a applyer: it role is not to dispatch commands,
        # but to apply them
        e = ExternalCommandManager(self.conf, 'applyer')

        # Scheduler need to know about external command to
        # activate it if necessary
        self.sched.load_external_command(e)

        # External command need the sched because he can raise checks
        e.load_scheduler(self.sched)

        # We clear our schedulers managed (it's us :) )
        # and set ourself in it
        self.schedulers = {self.conf.instance_id: self.sched}
Пример #49
0
class Ip_Tag_Arbiter(BaseModule):
    def __init__(self,
                 mod_conf,
                 ip_range,
                 prop,
                 value,
                 method,
                 ignore_hosts=None):
        BaseModule.__init__(self, mod_conf)
        self.ip_range = IP(ip_range)
        self.property = prop
        self.value = value
        self.method = method
        if ignore_hosts:
            self.ignore_hosts = ignore_hosts.split(', ')
            logger.debug("[IP Tag] Ignoring hosts : %s" % self.ignore_hosts)
        else:
            self.ignore_hosts = []
        self.pool_size = int(getattr(mod_conf, 'pool_size', '1'))

    # Called by Arbiter to say 'let's prepare yourself guy'
    def init(self):
        logger.info("[IP Tag] Initialization of the ip range tagger module")

    def hook_early_configuration(self, arb):
        logger.info("[IpTag] in hook late config")

        # Get a pool for gevent jobs
        if Pool:
            pool = Pool(100)
        else:
            pool = None

        for h in arb.conf.hosts:
            if not hasattr(h, 'address') and not hasattr(h, 'host_name'):
                continue

            if h.get_name() in self.ignore_hosts:
                logger.debug("[IP Tag] Ignoring host %s" % h.get_name())
                continue

            # The address to resolve
            addr = None

            # By default take the address, if not, take host_name
            if not hasattr(h, 'address'):
                addr = h.host_name
            else:
                addr = h.address

            logger.debug("[IP Tag] Looking for %s" % h.get_name())
            logger.debug("[IP Tag] Address is %s" % str(addr))
            h_ip = None
            try:
                IP(addr)
                # If we reach here, it's it was a real IP :)
                h_ip = addr
            except:
                pass

            if pool:
                pool.spawn(self.job, h, h_ip, addr)
            else:
                self.job(h, h_ip, addr)

        # Now wait for all jobs to finish if need
        if pool:
            pool.join()

    # Main job, will manage eachhost in asyncronous mode thanks to gevent
    def job(self, h, h_ip, addr):
        # Ok, try again with name resolution
        if not h_ip:
            try:
                h_ip = socket.gethostbyname(addr)
            except Exception, exp:
                pass

        # Ok, maybe we succeed :)
        logger.debug("[IP Tag] Host ip is: %s" % str(h_ip))
        # If we got an ip that match and the object do not already got
        # the property, tag it!
        if h_ip and h_ip in self.ip_range:
            logger.debug("[IP Tag] Is in the range")
            # 4 cases: append , replace and set
            # append will join with the value if exist (on the END)
            # prepend will join with the value if exist (on the BEGINING)
            # replace will replace it if NOT existing
            # set put the value even if the property exists
            if self.method == 'append':
                orig_v = getattr(h, self.property, '')
                new_v = ','.join([orig_v, self.value])
                setattr(h, self.property, new_v)

            # Same but we put before
            if self.method == 'prepend':
                orig_v = getattr(h, self.property, '')
                new_v = ','.join([self.value, orig_v])
                setattr(h, self.property, new_v)

            if self.method == 'replace':
                if not hasattr(h, self.property):
                    # Ok, set the value!
                    setattr(h, self.property, self.value)

            if self.method == 'set':
                setattr(h, self.property, self.value)
Пример #50
0
class Broker(BaseSatellite):

    properties = BaseSatellite.properties.copy()
    properties.update({
        'pidfile': PathProp(default='brokerd.pid'),
        'port': IntegerProp(default='7772'),
        'local_log': PathProp(default='brokerd.log'),
    })

    def __init__(self, config_file, is_daemon, do_replace, debug, debug_file):

        super(Broker, self).__init__('broker', config_file, is_daemon,
                                     do_replace, debug, debug_file)

        # Our arbiters
        self.arbiters = {}

        # Our pollers and reactionners
        self.pollers = {}
        self.reactionners = {}

        # Modules are load one time
        self.have_modules = False

        # Can have a queue of external_commands given by modules
        # will be processed by arbiter
        self.external_commands = []

        # All broks to manage
        self.broks = []  # broks to manage
        # broks raised this turn and that needs to be put in self.broks
        self.broks_internal_raised = []

        self.timeout = 1.0

    # Schedulers have some queues. We can simplify the call by adding
    # elements into the proper queue just by looking at their type
    # Brok -> self.broks
    # TODO: better tag ID?
    # External commands -> self.external_commands
    def add(self, elt):
        cls_type = elt.__class__.my_type
        if cls_type == 'brok':
            # For brok, we TAG brok with our instance_id
            elt.instance_id = 0
            self.broks_internal_raised.append(elt)
            return
        elif cls_type == 'externalcommand':
            logger.debug("Enqueuing an external command '%s'" %
                         str(ExternalCommand.__dict__))
            self.external_commands.append(elt)
        # Maybe we got a Message from the modules, it's way to ask something
        # like from now a full data from a scheduler for example.
        elif cls_type == 'message':
            # We got a message, great!
            logger.debug(str(elt.__dict__))
            if elt.get_type() == 'NeedData':
                data = elt.get_data()
                # Full instance id means: I got no data for this scheduler
                # so give me all dumbass!
                if 'full_instance_id' in data:
                    c_id = data['full_instance_id']
                    source = elt.source
                    logger.info(
                        'The module %s is asking me to get all initial data from the scheduler %d'
                        % (source, c_id))
                    # so we just reset the connection and the running_id, it will just get all new things
                    try:
                        self.schedulers[c_id]['con'] = None
                        self.schedulers[c_id]['running_id'] = 0
                    except KeyError:  # maybe this instance was not known, forget it
                        logger.warning(
                            "the module %s ask me a full_instance_id for an unknown ID (%d)!"
                            % (source, c_id))
            # Maybe a module tells me that it's dead, I must log it's last words...
            if elt.get_type() == 'ICrash':
                data = elt.get_data()
                logger.error(
                    'the module %s just crash! Please look at the traceback:' %
                    data['name'])
                logger.error(data['trace'])

                # The module death will be looked for elsewhere and restarted.

    # Get the good tabs for links by the kind. If unknown, return None
    def get_links_from_type(self, type):
        t = {'scheduler': self.schedulers, 'arbiter': self.arbiters, \
             'poller': self.pollers, 'reactionner': self.reactionners}
        if type in t:
            return t[type]
        return None

    # Call by arbiter to get our external commands
    def get_external_commands(self):
        res = self.external_commands
        self.external_commands = []
        return res

    # Check if we do not connect to often to this
    def is_connection_try_too_close(self, elt):
        now = time.time()
        last_connection = elt['last_connection']
        if now - last_connection < 5:
            return True
        return False

    # initialize or re-initialize connection with scheduler or
    # arbiter if type == arbiter
    def pynag_con_init(self, id, type='scheduler'):
        # Get the good links tab for looping..
        links = self.get_links_from_type(type)
        if links is None:
            logger.debug('Type unknown for connection! %s' % type)
            return

        if type == 'scheduler':
            # If sched is not active, I do not try to init
            # it is just useless
            is_active = links[id]['active']
            if not is_active:
                return

        # If we try to connect too much, we slow down our tests
        if self.is_connection_try_too_close(links[id]):
            return

        # Ok, we can now update it
        links[id]['last_connection'] = time.time()

        # DBG: print "Init connection with", links[id]['uri']
        running_id = links[id]['running_id']
        # DBG: print "Running id before connection", running_id
        uri = links[id]['uri']

        try:
            socket.setdefaulttimeout(3)
            links[id]['con'] = Pyro.core.getProxyForURI(uri)
            socket.setdefaulttimeout(None)
        except Pyro_exp_pack, exp:
            # But the multiprocessing module is not compatible with it!
            # so we must disable it immediately after
            socket.setdefaulttimeout(None)
            logger.info("Connection problem to the %s %s: %s" %
                        (type, links[id]['name'], str(exp)))
            links[id]['con'] = None
            return

        try:
            # initial ping must be quick
            pyro.set_timeout(links[id]['con'], 5)
            links[id]['con'].ping()
            new_run_id = links[id]['con'].get_running_id()
            # data transfer can be longer
            pyro.set_timeout(links[id]['con'], 120)

            # The schedulers have been restarted: it has a new run_id.
            # So we clear all verifs, they are obsolete now.
            if new_run_id != running_id:
                logger.debug("[%s] New running id for the %s %s: %s (was %s)" %
                             (self.name, type, links[id]['name'], new_run_id,
                              running_id))
                links[id]['broks'].clear()
                # we must ask for a new full broks if
                # it's a scheduler
                if type == 'scheduler':
                    logger.debug(
                        "[%s] I ask for a broks generation to the scheduler %s"
                        % (self.name, links[id]['name']))
                    links[id]['con'].fill_initial_broks(self.name)
            # Ok all is done, we can save this new running id
            links[id]['running_id'] = new_run_id
        except Pyro_exp_pack, exp:
            logger.info("Connection problem to the %s %s: %s" %
                        (type, links[id]['name'], str(exp)))
            links[id]['con'] = None
            return
Пример #51
0
    def get_graph_uris(self, elt, graphstart, graphend, source='detail'):
        # Ugly to hard-code such values. But where else should I put them ?
        fontsize = {'detail': '8', 'dashboard': '18'}
        if not elt:
            return []

        t = elt.__class__.my_type
        r = []

        # Hanling Graphite variables
        data_source = ""
        graphite_pre = ""
        graphite_post = ""
        if self.graphite_data_source:
            data_source = ".%s" % self.graphite_data_source
        if t == 'host':
            if "_GRAPHITE_PRE" in elt.customs:
                graphite_pre = "%s." % elt.customs["_GRAPHITE_PRE"]
        elif t == 'service':
            if "_GRAPHITE_PRE" in elt.host.customs:
                graphite_pre = "%s." % elt.host.customs["_GRAPHITE_PRE"]
            if "_GRAPHITE_POST" in elt.customs:
                graphite_post = ".%s" % elt.customs["_GRAPHITE_POST"]

        # Format the start & end time (and not only the date)
        d = datetime.fromtimestamp(graphstart)
        d = d.strftime('%H:%M_%Y%m%d')
        e = datetime.fromtimestamp(graphend)
        e = e.strftime('%H:%M_%Y%m%d')

        filename = elt.check_command.get_name().split('!')[0] + '.graph'

        # Do we have a template for the given source?
        thefile = os.path.join(self.templates_path, source, filename)

        # If not try to use the one for the parent folder
        if not os.path.isfile(thefile):
            # In case of CHECK_NRPE, the check_name is in second place
            if len(elt.check_command.get_name().split('!')) > 1:
                filename = elt.check_command.get_name().split(
                    '!')[0] + '_' + elt.check_command.get_name().split(
                        '!')[1] + '.graph'
                thefile = os.path.join(self.templates_path, source, filename)
            if not os.path.isfile(thefile):
                thefile = os.path.join(self.templates_path, filename)

        logger.debug("[ui-graphite] template=%s", thefile)
        if os.path.isfile(thefile):
            template_html = ''
            with open(thefile, 'r') as template_file:
                template_html += template_file.read()
            # Read the template file, as template string python object

            html = Template(template_html)
            # Build the dict to instantiate the template string
            values = {}
            if t == 'host':
                values['host'] = graphite_pre + self.illegal_char.sub(
                    "_", elt.host_name) + data_source
                values['service'] = '__HOST__'
            if t == 'service':
                values['host'] = graphite_pre + self.illegal_char.sub(
                    "_", elt.host.host_name) + data_source
                values['service'] = self.illegal_char.sub(
                    "_", elt.service_description) + graphite_post
            values['uri'] = self.uri
            # Split, we may have several images.
            for img in html.substitute(values).split('\n'):
                if not img == "":
                    v = {}
                    v['link'] = self.uri
                    v['img_src'] = img.replace(
                        '"', "'") + "&from=" + d + "&until=" + e
                    v['img_src'] = self._replaceFontSize(
                        v['img_src'], fontsize[source])
                    r.append(v)
            # No need to continue, we have the images already.
            return r

        # If no template is present, then the usual way

        if t == 'host':
            couples = self.get_metric_and_value(elt.perf_data)

            # If no values, we can exit now
            if len(couples) == 0:
                return []

            # Remove all non alpha numeric character
            host_name = self.illegal_char.sub('_', elt.host_name)

            # Send a bulk of all metrics at once
            for (metric, _) in couples:
                uri = self.uri + 'render/?width=586&height=308&lineMode=connected&from=' + d + "&until=" + e
                if re.search(r'_warn|_crit', metric):
                    continue
                target = "&target=%s%s%s.__HOST__.%s" % (
                    graphite_pre, host_name, data_source, metric)
                uri += target + target + "?????"
                v = {}
                v['link'] = self.uri
                v['img_src'] = uri
                v['img_src'] = self._replaceFontSize(v['img_src'],
                                                     fontsize[source])
                r.append(v)

            return r
        if t == 'service':
            couples = self.get_metric_and_value(elt.perf_data)

            # If no values, we can exit now
            if len(couples) == 0:
                return []

            # Remove all non alpha numeric character
            desc = self.illegal_char.sub('_', elt.service_description)
            host_name = self.illegal_char.sub('_', elt.host.host_name)

            # Send a bulk of all metrics at once
            for (metric, value) in couples:
                uri = self.uri + 'render/?width=586&height=308&lineMode=connected&from=' + d + "&until=" + e
                if re.search(r'_warn|_crit', metric):
                    continue
                elif value[1] == '%':
                    uri += "&yMin=0&yMax=100"
                target = "&target=%s%s%s.%s.%s%s" % (graphite_pre, host_name,
                                                     data_source, desc, metric,
                                                     graphite_post)
                uri += target + target + "?????"
                v = {}
                v['link'] = self.uri
                v['img_src'] = uri
                v['img_src'] = self._replaceFontSize(v['img_src'],
                                                     fontsize.get(source, 10))
                r.append(v)
            return r

        # Oups, bad type?
        return []
Пример #52
0
        # REF: doc/broker-modules.png (3)
        # We put to external queues broks that was not already send
        t0 = time.time()
        # We are sending broks as a big list, more efficient than one by one
        queues = self.modules_manager.get_external_to_queues()
        to_send = [
            b for b in self.broks if getattr(b, 'need_send_to_ext', True)
        ]

        for q in queues:
            q.put(to_send)

        # No more need to send them
        for b in to_send:
            b.need_send_to_ext = False
        logger.debug("Time to send %s broks (%d secs)" %
                     (len(to_send), time.time() - t0))

        # We must had new broks at the end of the list, so we reverse the list
        self.broks.reverse()

        start = time.time()
        while len(self.broks) != 0:
            now = time.time()
            # Do not 'manage' more than 1s, we must get new broks
            # every 1s
            if now - start > 1:
                break

            b = self.broks.pop()
            # Ok, we can get the brok, and doing something with it
            # REF: doc/broker-modules.png (4-5)
Пример #53
0
    def open(self):
        try:
            from pymongo import MongoClient
        except ImportError:
            logger.error(
                '[WebUI-MongoDBPreferences] Can not import pymongo.MongoClient'
            )
            raise

        try:
            if self.replica_set:
                self.con = MongoClient(self.uri,
                                       replicaSet=self.replica_set,
                                       fsync=self.mongodb_fsync)
            else:
                self.con = MongoClient(self.uri, fsync=self.mongodb_fsync)
            logger.info("[WebUI-MongoDBPreferences] connected to mongodb: %s",
                        self.uri)

            self.db = getattr(self.con, self.database)
            logger.info(
                "[WebUI-MongoDBPreferences] connected to the database: %s",
                self.database)

            if self.username and self.password:
                self.db.authenticate(self.username, self.password)
                logger.info(
                    "[WebUI-MongoDBPreferences] user authenticated: %s",
                    self.username)

            # Check if a document exists in the preferences collection ...
            logger.info(
                '[WebUI-MongoDBPreferences] searching connection test item in the collection ...'
            )
            u = self.db.ui_user_preferences.find_one({'_id': 'shinken-test'})
            if not u:
                # No document ... create a new one!
                logger.debug(
                    '[WebUI-MongoDBPreferences] not found connection test item in the collection'
                )
                r = self.db.ui_user_preferences.save({
                    '_id': 'shinken-test',
                    'last_test': time.time()
                })
                logger.info(
                    '[WebUI-MongoDBPreferences] updated connection test item')
            else:
                # Found document ... update!
                logger.debug(
                    '[WebUI-MongoDBPreferences] found connection test item in the collection'
                )
                r = self.db.ui_user_preferences.update(
                    {'_id': 'shinken-test'},
                    {'$set': {
                        'last_test': time.time()
                    }})
                logger.info(
                    '[WebUI-MongoDBPreferences] updated connection test item')

            self.is_connected = True
            logger.info(
                '[WebUI-MongoDBPreferences] database connection established')
        except Exception, e:
            logger.error("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            logger.debug("[WebUI-MongoDBPreferences] Exception type: %s",
                         type(e))
            logger.debug(
                "[WebUI-MongoDBPreferences] Back trace of this kill: %s",
                traceback.format_exc())
            # Depending on exception type, should raise ...
            self.is_connected = False
            raise
Пример #54
0
def get_instance(plugin):
    logger.debug("[Graphite UI]Get an GRAPHITE UI module for plugin %s" %
                 plugin.get_name())

    instance = Graphite_Webui(plugin)
    return instance
Пример #55
0
 def __call__(self, *args):
     logger.debug("Calling %s with arguments %s" % (self.f.func_name, args))
     return self.f(*args)
Пример #56
0
    except Exception, e:
        logger.error("[Ws_arbiter] failed to get the lists: %s" % str(e))
        commands_list = []

    # We check for auth if it's not anonymously allowed
    if app.username != 'anonymous':
        basic = parse_auth(request.environ.get('HTTP_AUTHORIZATION', ''))
        # Maybe the user not even ask for user/pass. If so, bail out
        if not basic:
            abort(401, 'Authentication required')
        # Maybe he do not give the good credential?
        if basic[0] != app.username or basic[1] != app.password:
            abort(403, 'Authentication denied')

    # Adding commands to the main queue()
    logger.debug("[Ws_arbiter] commands =  %s" % str(sorted(commands_list)))
    for c in sorted(commands_list):
        ext = ExternalCommand(c)
        app.from_q.put(ext)

    # OK here it's ok, it will return a 200 code


# This module will open an HTTP service, where a user can send a command, like a check
# return.
class Ws_arbiter(BaseModule):
    def __init__(self, modconf):
        BaseModule.__init__(self, modconf)
        try:
            self.username = getattr(modconf, 'username', 'anonymous')
            self.password = getattr(modconf, 'password', '')
Пример #57
0
        try:
            p = PerfDatas(s.perf_data)
            for m in p:
                if m.name and m.value is not None:
                    logger.debug("[WebUI-cvhost], metric '%s' = %s, uom: %s",
                                 m.name, m.value, m.uom)
                    if re.search(params['svc_dsk_used'], m.name) and re.match(
                            params['svc_dsk_uom'], m.uom):
                        all[m.name] = m.value
                        logger.debug("[WebUI-cvhost], got '%s' = %s", m.name,
                                     m.value)
        except Exception, exp:
            logger.warning("[WebUI-cvhost] get_disks, exception: %s", str(exp))

    logger.debug("[WebUI-cvhost], get_disks %s", all)
    return state, all


def get_memory(h):
    all = {}
    state = 'UNKNOWN'

    s = _findServiceByName(h, params['svc_mem_name'])
    if s:
        logger.debug("[WebUI-cvhost], found %s", s.get_full_name())
        state = s.state

        try:
            p = PerfDatas(s.perf_data)
            for m in p:
Пример #58
0
                logger.error(
                    "[WebUI-MongoDBPreferences] error during initialization, no database connection!"
                )
                return None

        try:
            e = self.db.ui_user_preferences.find_one({'_id': 'shinken-global'})
        except Exception, e:
            logger.warning("[WebUI-MongoDBPreferences] Exception: %s", str(e))
            self.is_connected = False
            return None

        # Maybe it's a new entry or missing this parameter, bail out
        if not e or key not in e:
            logger.debug(
                "[WebUI-MongoDBPreferences] new parameter of not stored preference: %s",
                key)
            return None

        return e.get(key)

    # We will get in the mongodb database the user preference entry, and get the key
    # they are asking us
    def get_ui_user_preference(self, user, key):
        if not self.is_connected:
            if not self.open():
                logger.error(
                    "[WebUI-MongoDBPreferences] error during initialization, no database connection!"
                )
                return None
Пример #59
0
# You should have received a copy of the GNU Affero General Public License
# along with Shinken.  If not, see <http://www.gnu.org/licenses/>.

import os

from config_parser import ConfigParser

from shinken.log import logger

# Get plugin's parameters from configuration file (not useful currently but future ideas ...)
params = {'fake': "fake"}

plugin_name = os.path.splitext(os.path.basename(__file__))[0]
currentdir = os.path.dirname(os.path.realpath(__file__))
configuration_file = "%s/%s" % (currentdir, 'plugin.cfg')
logger.debug("Plugin configuration file: %s", configuration_file)
try:
    scp = ConfigParser('#', '=')
    params = scp.parse_config(configuration_file)

    # mongo_host = params['mongo_host']
    params['fake'] = params['fake']

    logger.debug("WebUI plugin '%s', configuration loaded.", plugin_name)
    # logger.debug("Plugin %s configuration, database: %s (%s)",
    # plugin_name, params['mongo_host'], params['mongo_port'])
except Exception as exp:
    logger.warning(
        "WebUI plugin '%s', configuration file (%s) not available: %s",
        plugin_name, configuration_file, str(exp))
Пример #60
0
def get_page(name):
    global params

    # user = app.check_user_authentication()

    config = 'default'
    if '/' in name:
        config = name.split('/')[1]
        name = name.split('/')[0]

    # Find host type if provided in parameters ...
    # @mohierf: not yet implemented ...
    type = app.request.query.get('type', 'default')

    logger.debug("[WebUI-cvhost], get_page for %s (%s)", name,
                 app.request.query_string)

    try:
        currentdir = os.path.dirname(os.path.realpath(__file__))
        configuration_file = "%s/%s.cfg" % (currentdir, config)
        logger.debug("Plugin configuration file: %s", configuration_file)
        scp = config_parser('#', '=')
        z = params.copy()
        z.update(scp.parse_config(configuration_file))
        params = z

        logger.debug("[WebUI-cvhost] configuration loaded.")
        logger.debug("[WebUI-cvhost] configuration, load: %s (%s)",
                     params['svc_load_name'], params['svc_load_used'])
        logger.debug("[WebUI-cvhost] configuration, cpu: %s (%s)",
                     params['svc_cpu_name'], params['svc_cpu_used'])
        logger.debug("[WebUI-cvhost] configuration, disk: %s (%s)",
                     params['svc_dsk_name'], params['svc_dsk_used'])
        logger.debug("[WebUI-cvhost] configuration, memory: %s (%s)",
                     params['svc_mem_name'], params['svc_mem_used'])
        logger.debug("[WebUI-cvhost] configuration, network: %s (%s)",
                     params['svc_net_name'], params['svc_net_used'])
        # logger.debug("[WebUI-cvhost] configuration, printer: %s (%s)", params['svc_prn_name'], params['svc_prn_used'])
    except Exception, exp:
        logger.warning(
            "[WebUI-cvhost] configuration file (%s) not available: %s",
            configuration_file, str(exp))
        all_perfs = {}
        all_states = {}
        return {
            'app': app,
            'config': config,
            'all_perfs': all_perfs,
            'all_states': all_states
        }