示例#1
0
    def check(self, sched, config):
        while True:
            try:
                warnRange = config["triggerrate_warn"]
                warn = self.parse_range(warnRange)
                critRange = config["triggerrate_crit"]
                crit = self.parse_range(critRange)
            except:
                logger.critical("Unable to read config.ini in %s" % self.nagiosResult.serviceName)
                self.nagiosResult.status_code = CRITICAL

            wmin, wmax = warn
            cmin, cmax = crit

            self.triggerRateValues = self.interpreter.getTriggerRate()
            self.lastupdate = self.triggerRateValues.date
            self.trate = self.triggerRateValues.triggerRate

            if self.trate <= cmin or self.trate >= cmax:
                self.nagiosResult.status_code = CRITICAL
            elif self.trate <= wmin or self.trate >= wmax:
                self.nagiosResult.status_code = WARNING
            else:
                self.nagiosResult.status_code = OK

            if self.lastupdate:
                # Timestamp is in GPS time.  Naively treat it as a local
                # timestamp, then subtract the LabVIEW-determined offset
                # between pc clock time (UTC time) and GPS time.

                # Read offset from file
                dt_config = EConfigParser()
                dt_config.read(TIME_DIFF_INI)
                offset = dt_config.ifgetint("HiSPARC", "time_difference", 1e6)

                # Naively calculate timestamp and adjust for pc / gps offset
                t = calendar.timegm(self.lastupdate.timetuple())
                t += offset
                # Calculate time difference between UTC trigger and 'UTC now'
                dt = time.time() - t
            else:
                # Never updated, make dt very large
                dt = 1e6

            # If last update was significantly longer than time between monitor
            # upload checks, detector is probably stalled
            interval = int(config["triggerrate_interval"])
            if abs(dt) > (2 * interval):
                self.nagiosResult.description = (
                    "No recent triggers. " "Trigger rate: %.2f. Last " "update: %d seconds ago" % (self.trate, dt)
                )
                self.nagiosResult.status_code = CRITICAL
            else:
                self.nagiosResult.description = "Trigger rate: %.2f. Last " "update: %d seconds ago" % (self.trate, dt)
            yield (self.nagiosResult)
示例#2
0
    def __init__(self):
        self.scheduler = sched.scheduler(time.time, time.sleep)
        self.checker = Checker()
        self.config = EConfigParser()
        self.config.read([CONFIG_INI, PERSISTENT_INI])
        # Time between checks in seconds
        self.timeBetweenChecks = self.config.ifgetint(
            'Update', 'IntervalBetweenChecks', 1800)
        # Start and stop time of the interval in which checks may be
        # performed on a day (in seconds since midnight) e.g. 7200 is 2am
        self.timeStartCheckInterval = self.config.ifgetint(
            'Update', 'CheckerIntervalStartTime', 0)
        self.timeStopCheckInterval = self.config.ifgetint(
            'Update', 'CheckerIntervalStopTime', 24 * 60 * 60)
        # Bool determine if there will be an initial delay
        self.checkerInitialDelay = self.config.get(
            'Update', 'CheckerInitialDelay', 0)

        # Setup the log mode
        log_dirname = '../../persistent/logs/updater/'
        # Making sure the directory exists
        if not os.access(log_dirname, os.F_OK):
            os.makedirs(log_dirname)
        log_filename = os.path.join(log_dirname, 'updater')

        # Remove any existing handlers
        logger.handlers = []

        # Add file handler
        handler = TimedRotatingFileHandler(log_filename, when='midnight',
                                           backupCount=14, utc=True)
        handler.setFormatter(formatter_file)
        logger.addHandler(handler)

        # Add handler which prints to the screen
        handler = logging.StreamHandler()
        handler.setFormatter(formatter_screen)
        logger.addHandler(handler)

        # Default logging level
        logger.setLevel(level=logging.DEBUG)

        # Logging level for the handlers
        for i, target in enumerate(['File', 'Screen']):
            log_level = self.config.ifgetstr('Logging', '%sLevel' % target,
                                             'debug')
            if log_level in LEVELS:
                logger.handlers[i].setLevel(level=LEVELS[log_level])
                logger.info('%s logging level set to %s' % (target, log_level))
            else:
                logger.warning("Illegal %s logging level '%s' in config, "
                               "using debug" % (target, log_level))
示例#3
0
    def __init__(self):
        self.config = EConfigParser()
        self.config.read([CONFIG_INI, PERSISTENT_INI, PASSWORD_INI])

        # Setup the log mode
        log_dirname = '../../persistent/logs/hsmonitor/'
        # Making sure the directory exists
        if not os.access(log_dirname, os.F_OK):
            os.makedirs(log_dirname)
        log_filename = os.path.join(log_dirname, 'hsmonitor')

        # Remove any existing handlers
        logger.handlers = []

        # Add file handler
        handler = TimedConcurrentRotatingFileHandler(
            log_filename, when='midnight', backupCount=14, utc=True)
        handler.setFormatter(formatter_file)
        logger.addHandler(handler)

        # Add handler which prints to the screen
        handler = logging.StreamHandler()
        handler.setFormatter(formatter_screen)
        logger.addHandler(handler)

        # Default logging level
        logger.setLevel(level=logging.DEBUG)

        # Logging level for the handlers
        for i, target in enumerate(['File', 'Screen']):
            log_level = self.config.ifgetstr('Logging', '%sLevel' % target,
                                             'debug')
            if log_level in LEVELS:
                logger.handlers[i].setLevel(level=LEVELS[log_level])
                logger.info('%s logging level set to %s' % (target, log_level))
            else:
                logger.warning("Illegal %s logging level '%s' in config, "
                               "using debug" % (target, log_level))

        # List of all the threads
        self.hsThreads = []

        # Assume one server (datastore)
        # if the local is also specified it will be added
        self.numServers = 1
示例#4
0
    def __init__(self):
        # Setup the log mode
        setLogMode(MODE_BOTH)

        # Read the configuration file
        try:
            self.cfg = EConfigParser()
            self.cfg.read([CONFIG_INI_PATH1, CONFIG_INI_PATH2,
                           CONFIG_INI_PATH3])
        except:
            log("HsMonitor: Cannot open the config file!", severity=2)
            return
        else:
            log("HsMonitor: Initialize variables.")

            # List of all the threads
            self.hsThreads = []
        # Assume one server (datastore)
        # if the local is also specified it will be added
        self.numServers = 1
    def __init__(self):
        # setup the log mode
        file = 'log-testfornagiospushfromhisparc'
        handler = logging.handlers.TimedRotatingFileHandler(
            file, when='midnight', backupCount=14)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(level=logging.DEBUG)

        # read the configuration file
        try:
            self.cfg = EConfigParser()
            self.cfg.read([CONFIG_INI_PATH1, CONFIG_INI_PATH2])
        except:
            logger.error("Cannot open the config file!", severity=2)
            return
        else:
            logger.debug("Initilize variables")

            # list of all the threads
            self.hsThreads = []
        # Assume one server (eventwarehouse)
        # if the local is also specified it will be added
        self.numServers = 1
示例#6
0
class HsMonitor:
    def __init__(self):
        # Setup the log mode
        setLogMode(MODE_BOTH)

        # Read the configuration file
        try:
            self.cfg = EConfigParser()
            self.cfg.read([CONFIG_INI_PATH1, CONFIG_INI_PATH2,
                           CONFIG_INI_PATH3])
        except:
            log("HsMonitor: Cannot open the config file!", severity=2)
            return
        else:
            log("HsMonitor: Initialize variables.")

            # List of all the threads
            self.hsThreads = []
        # Assume one server (datastore)
        # if the local is also specified it will be added
        self.numServers = 1

    def startAll(self):
        """Setup and start all threads."""
        try:
            # Create StorageManager and Interpreter for BufferListener
            storMan = StorageManager()
            interpr = Interpreter(storMan)

            # Create BufferListener
            buffLis = self.createBufferListener(interpr)

            if buffLis.conn:
                self.hsThreads.append(buffLis)

            # Check scheduler
            # Get the nagios configuration section from config file
            nagiosConf = self.cfg.itemsdict('NagiosPush')
            machine = re.search('([a-z0-9]+).zip',
                                self.cfg.get('Station', 'Certificate'))
            nagiosConf['machine_name'] = machine.group(1)
            checkSched = self.createCheckScheduler(interpr, nagiosConf)
            eventRate = checkSched.getEventRate()
            storMan.addObserver(eventRate)
            self.hsThreads.append(checkSched)

            # Uploader central
            up = self.createUploader(0, "Upload-datastore", nagiosConf)
            self.hsThreads.append(up)
            storMan.addObserver(up)
            up.setNumServer(self.numServers)

            # Try local server
            try:
                up2 = self.createUploader(1, "Upload-local", nagiosConf)
                self.hsThreads.append(up2)
                storMan.addObserver(up2)
                self.numServers += 1
                up.setNumServer(self.numServers)
                up2.setNumServer(self.numServers)
            except Exception, msg:
                log("HsMonitor: Error while parsing local server: %s." % msg)
                log("HsMonitor: Will not upload to local server!")

            # Set number of servers for our own StorageManager
            storMan.setNumServer(self.numServers)
            storMan.clearOldUploadedEvents()

            # Start all threads, running their run() function.
            for thread in self.hsThreads:
                thread.start()

        except Exception, msg:
            log("Error HsMonitor: %s" % msg, severity=2)
            exit(1)
class HsMonitor(object):

    def __init__(self):
        # setup the log mode
        file = 'log-testfornagiospushfromhisparc'
        handler = logging.handlers.TimedRotatingFileHandler(
            file, when='midnight', backupCount=14)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(level=logging.DEBUG)

        # read the configuration file
        try:
            self.cfg = EConfigParser()
            self.cfg.read([CONFIG_INI_PATH1, CONFIG_INI_PATH2])
        except:
            logger.error("Cannot open the config file!", severity=2)
            return
        else:
            logger.debug("Initilize variables")

            # list of all the threads
            self.hsThreads = []
        # Assume one server (eventwarehouse)
        # if the local is also specified it will be added
        self.numServers = 1

    def createBufferListener(self, interpreter):
        # get the information from configuration file
        bufferdb = {}
        bufferdb['host'] = self.cfg.get('BufferDB', 'Host')
        bufferdb['db'] = self.cfg.get('BufferDB', 'DB')
        bufferdb['user'] = self.cfg.get('BufferDB', 'Username')
        bufferdb['password'] = self.cfg.get('BufferDB', 'Password')
        bufferdb['poll_interval'] = self.cfg.get('BufferDB', 'Poll_Interval')
        bufferdb['poll_limit'] = self.cfg.get('BufferDB', 'Poll_Limit')
        bufferdb['keep_buffer_data'] = self.cfg.get('BufferDB',
                                                    'KeepBufferData')

        # create an instance of BufferListener class
        buffLis = BufferListener.BufferListener(bufferdb, interpreter)

        if not buffLis:
            logger.error("Cannot connect to the buffer database!")
            return None
        # TODO better error handling

        return buffLis

    def createCheckScheduler(self, interpreter):
        # get the nagios configuration section from config file
        nagiosConf = self.cfg.itemsdict('NagiosPush')
        machine = re.search('([a-z0-9]+).zip',
                                self.cfg.get('Station', 'Certificate'))
        if machine is None:
            nagiosConf['machine_name'] = 'test32bit'
        else:
            nagiosConf['machine_name'] = machine.group(1)
        checkSched = CheckScheduler(nagiosConf, interpreter)
        return checkSched

    def createUploader(self, serverID, section_name, numServers):
        # TODO create default values if parameter doesn't exist
        stationID = self.cfg.get("Station", "StationID")
        url = self.cfg.get(section_name, "URL")
        passw = self.cfg.get(section_name, "Password")
        minbs = self.cfg.ifgetint(section_name, "MinBatchSize", 50)
        maxbs = self.cfg.ifgetint(section_name, "MaxBatchSize", 50)
        if (minbs > maxbs):
            raise Exception("Minimum batch size must be less than maximum")
        minwait = self.cfg.ifgetfloat(section_name, "MinWait", 1.0)
        maxwait = self.cfg.ifgetfloat(section_name, "MaxWait", 60.0)
        up = Uploader(serverID, numServers, stationID, passw, url,
                      minwait, maxwait, minbs, maxbs)
        return up
示例#8
0
    def check(self, sched, config):
        while True:
            try:
                warnRange = config['triggerrate_warn']
                warn = self.parse_range(warnRange)
                critRange = config['triggerrate_crit']
                crit = self.parse_range(critRange)
            except:
                logger.critical('Unable to read config.ini in %s' %
                                self.nagiosResult.serviceName)
                self.nagiosResult.status_code = CRITICAL

            wmin, wmax = warn
            cmin, cmax = crit

            self.triggerRateValues = self.interpreter.getTriggerRate()
            self.lastupdate = self.triggerRateValues.date
            self.trate = self.triggerRateValues.triggerRate

            if self.trate <= cmin or self.trate >= cmax:
                self.nagiosResult.status_code = CRITICAL
            elif self.trate <= wmin or self.trate >= wmax:
                self.nagiosResult.status_code = WARNING
            else:
                self.nagiosResult.status_code = OK

            if self.lastupdate:
                # Timestamp is in GPS time.  Naively treat it as a local
                # timestamp, then subtract the LabVIEW-determined offset
                # between pc clock time (UTC time) and GPS time.

                # Read offset from DAQ config.
                # This offset is (DAQ GPS time - PC clock time).
                # (the number of seconds GPS time is ahead of the PC clock)
                # Note: this offset changed sign between v3 and v4 of the DAQ
                dt_config = EConfigParser()
                dt_config.read(TIME_DIFF_INI)
                offset = dt_config.ifgetint('HiSPARC', 'time_difference', 1e6)

                # calculate event (trigger) time and transform to local PC time
                t_trigger = calendar.timegm(self.lastupdate.timetuple())
                t_trigger -= offset

                # Calculate time difference between current and trigger time
                dt = time.time() - t_trigger
            else:
                # Never updated, make dt very large
                dt = 1e6

            # If last update was significantly longer than time between monitor
            # upload checks, detector is probably stalled
            interval = int(config['triggerrate_interval'])
            if abs(dt) > (2 * interval):
                self.nagiosResult.description = ("No recent triggers. "
                                                 "Trigger rate: %.2f. Last "
                                                 "update: %d seconds ago" %
                                                 (self.trate, dt))
                self.nagiosResult.status_code = CRITICAL
            else:
                self.nagiosResult.description = ("Trigger rate: %.2f. Last "
                                                 "update: %d seconds ago" %
                                                 (self.trate, dt))
            yield (self.nagiosResult)
示例#9
0
class HsMonitor(object):

    """The HiSPARC Monitor

    This process spawns several threads to perform tasks.

    - BufferListener: read messages from the MySQL database.
    - CheckScheduler: report status to Nagios.
    - Uploader: upload message to a datastore server.

    """

    def __init__(self):
        self.config = EConfigParser()
        self.config.read([CONFIG_INI, PERSISTENT_INI, PASSWORD_INI])

        # Setup the log mode
        log_dirname = '../../persistent/logs/hsmonitor/'
        # Making sure the directory exists
        if not os.access(log_dirname, os.F_OK):
            os.makedirs(log_dirname)
        log_filename = os.path.join(log_dirname, 'hsmonitor')

        # Remove any existing handlers
        logger.handlers = []

        # Add file handler
        handler = TimedConcurrentRotatingFileHandler(
            log_filename, when='midnight', backupCount=14, utc=True)
        handler.setFormatter(formatter_file)
        logger.addHandler(handler)

        # Add handler which prints to the screen
        handler = logging.StreamHandler()
        handler.setFormatter(formatter_screen)
        logger.addHandler(handler)

        # Default logging level
        logger.setLevel(level=logging.DEBUG)

        # Logging level for the handlers
        for i, target in enumerate(['File', 'Screen']):
            log_level = self.config.ifgetstr('Logging', '%sLevel' % target,
                                             'debug')
            if log_level in LEVELS:
                logger.handlers[i].setLevel(level=LEVELS[log_level])
                logger.info('%s logging level set to %s' % (target, log_level))
            else:
                logger.warning("Illegal %s logging level '%s' in config, "
                               "using debug" % (target, log_level))

        # List of all the threads
        self.hsThreads = []

        # Assume one server (datastore)
        # if the local is also specified it will be added
        self.numServers = 1

    def startAll(self):
        """Setup and start all threads."""
        try:
            # Create StorageManager and Interpreter for BufferListener
            storMan = StorageManager()
            interpr = Interpreter(storMan)

            # Create BufferListener
            buffLis = self.createBufferListener(interpr)

            if buffLis.conn:
                self.hsThreads.append(buffLis)

            # Check scheduler
            # Get the nagios configuration section from config file
            nagiosConf = self.config.itemsdict('NagiosPush')
            machine = re.search('([a-z0-9]+).zip',
                                self.config.get('Station', 'Certificate'))
            nagiosConf['machine_name'] = machine.group(1)
            checkSched = self.createCheckScheduler(interpr, nagiosConf)
            eventRate = checkSched.getEventRate()
            storMan.addObserver(eventRate)
            self.hsThreads.append(checkSched)

            # Uploader central
            up = self.createUploader(0, "Upload-datastore", nagiosConf)
            self.hsThreads.append(up)
            storMan.addObserver(up)
            up.setNumServer(self.numServers)

            # Try local server
            #try:
            #    up2 = self.createUploader(1, "Upload-local", nagiosConf)
            #    self.hsThreads.append(up2)
            #    storMan.addObserver(up2)
            #    self.numServers += 1
            #    up.setNumServer(self.numServers)
            #    up2.setNumServer(self.numServers)
            #except Exception, msg:
            #    logger.debug("Error while parsing local server: %s." % msg)
            #    logger.debug("Will not upload to a local server.")#

            # Set number of servers for our own StorageManager
            storMan.setNumServer(self.numServers)
            storMan.clearOldUploadedEvents()

            # Start all threads, running their run() function.
            for thread in self.hsThreads:
                thread.start()

        except Exception, msg:
            logger.critical("Error HsMonitor: %s" % msg)
            sys.exit(1)
示例#10
0
class Updater(object):

    def __init__(self):
        self.scheduler = sched.scheduler(time.time, time.sleep)
        self.checker = Checker()
        self.config = EConfigParser()
        self.config.read([CONFIG_INI, PERSISTENT_INI])
        # Time between checks in seconds
        self.timeBetweenChecks = self.config.ifgetint(
            'Update', 'IntervalBetweenChecks', 1800)
        # Start and stop time of the interval in which checks may be
        # performed on a day (in seconds since midnight) e.g. 7200 is 2am
        self.timeStartCheckInterval = self.config.ifgetint(
            'Update', 'CheckerIntervalStartTime', 0)
        self.timeStopCheckInterval = self.config.ifgetint(
            'Update', 'CheckerIntervalStopTime', 24 * 60 * 60)
        # Bool determine if there will be an initial delay
        self.checkerInitialDelay = self.config.get(
            'Update', 'CheckerInitialDelay', 0)

        # Setup the log mode
        log_dirname = '../../persistent/logs/updater/'
        # Making sure the directory exists
        if not os.access(log_dirname, os.F_OK):
            os.makedirs(log_dirname)
        log_filename = os.path.join(log_dirname, 'updater')

        # Remove any existing handlers
        logger.handlers = []

        # Add file handler
        handler = TimedRotatingFileHandler(log_filename, when='midnight',
                                           backupCount=14, utc=True)
        handler.setFormatter(formatter_file)
        logger.addHandler(handler)

        # Add handler which prints to the screen
        handler = logging.StreamHandler()
        handler.setFormatter(formatter_screen)
        logger.addHandler(handler)

        # Default logging level
        logger.setLevel(level=logging.DEBUG)

        # Logging level for the handlers
        for i, target in enumerate(['File', 'Screen']):
            log_level = self.config.ifgetstr('Logging', '%sLevel' % target,
                                             'debug')
            if log_level in LEVELS:
                logger.handlers[i].setLevel(level=LEVELS[log_level])
                logger.info('%s logging level set to %s' % (target, log_level))
            else:
                logger.warning("Illegal %s logging level '%s' in config, "
                               "using debug" % (target, log_level))

    def checkIfUpdateToInstall(self):
        """Check if there is already an admin update to install

        Also check if you are currently in user or admin mode

        """
        is_admin = checkFiles.checkIfAdmin()
        currentAdmin = self.config.ifgetint("Version", "CurrentAdmin", 0)
        currentUser = self.config.ifgetint("Version", "CurrentUser", 0)

        logger.info("You are Administrator: %s" % is_admin)
        logger.info("Current Admin Version: %s" % currentAdmin)
        logger.info("Current User Version:  %s" % currentUser)

        if is_admin:
            location = "../../persistent/downloads"
            found, file_found = checkFiles.checkIfNewerFileExists(
                location, ADMINUPDATE_NAME, int(currentAdmin))
            if found:
                logger.info("Found: %s" % file_found)
                os.system(".\\runAdminUpdate.bat "
                          "../../persistent/downloads/%s" % file_found)

    def calculateInitialDelay(self):
        if self.checkerInitialDelay == 1:
            # Calculate the time now
            # ..and the time it is be today at four and at noon
            now = int(time.time())
            last_midnight = now - (now % 86400)
            today_at_starttime = last_midnight + self.timeStartCheckInterval
            today_at_stoptime = last_midnight + self.timeStopCheckInterval

            # Check if you are allowed to update already
            # (in the interval between starttime and stoptime)
            if today_at_starttime < now < today_at_stoptime:
                today_random_moment = random.randint(now, today_at_stoptime)
                return today_random_moment - now
            else:
                tomorrow_at_four = today_at_starttime + 86400
                tomorrow_at_noon = today_at_stoptime + 86400
                tomorrow_random_moment = random.randint(tomorrow_at_four,
                                                        tomorrow_at_noon)
                return tomorrow_random_moment - now
        else:
            return 0

    def performOneUpdateCheck(self):
        """First check for and update to install

        The first check may be delayed to ensure it occurs during a
        certain period of the day.

        """
        delay = self.calculateInitialDelay()
        self.scheduler.enter(delay, 1, self.checker.checkForUpdates, [])
        self.scheduler.run()

    def performContinuousCheck(self):
        """Continuously check if there for new updates from the server

        Checks are performed on an interval determined by 'timeBetweenChecks'.

        """
        while True:
            self.scheduler.enter(self.timeBetweenChecks, 1,
                                 self.checker.checkForUpdates, [])
            self.scheduler.run()