Beispiel #1
0
    def get_tablenames(project):
        """
        get available tablenames, for one particular project
        uses directory listing in raw subdirectory for this purpose

        ex: Datalogger/get_tablenames/{projectname}
        <projectname> has to be something from Datalogger/get_projects

        returns:
        json(list of possible tablenames for given project)
        """
        return json.dumps(DataLogger.get_tablenames(basedir, project))
Beispiel #2
0
def main():
    for datestring in tuple(DataLogger.datewalker(startdate, args.enddate)):
        start_ts, stop_ts = DataLogger.get_ts_for_datestring(datestring)
        logging.debug("working on datestring %s (from %s to %s)", datestring, start_ts, stop_ts)
        for project in DataLogger.get_projects(args.basedir):
            if args.project is not None:
                if project != args.project:
                    logging.debug("skipping project %s", project)
                    continue
            logging.debug("working on project %s", project)
            for tablename in DataLogger.get_tablenames(args.basedir, project):
                if args.tablename is not None:
                    if tablename != args.tablename:
                        logging.debug("skipping tablename %s", tablename)
                        continue
                    logging.debug("working on tablename %s", tablename)
                archive(project, tablename, datestring)
Beispiel #3
0
def main():
    project = "nagios"
    for tablename in DataLogger.get_tablenames(BASEDIR, project):
        datalogger = DataLogger(BASEDIR, project, tablename)
        for datestring in datewalker("2015-04-01", "2015-09-23"):
            print datestring, tablename
            try:
                caches = datalogger.get_caches(datestring)
                #for cachetype, cachedata in caches.items():
                #    print "Caches for %s" % cachetype
                #    for key, filename in cachedata["keys"].items():
                #        print "\tfound %s in\n\t\t%s" % (key, filename)
                # there should be only one tsa file
                #print "Number of caches TimeseriesArray objects:", len(caches["tsa"]["keys"])
                #print "Number of caches TimeseriesArrayStats objects:", len(caches["tsastat"]["keys"])
                #print "Number of caches Timeseries objects:", len(caches["ts"]["keys"])
                #print "Number of caches TimeseriesStats objects:", len(caches["tsstat"]["keys"])
                if len(caches["tsa"]["keys"]) == 0:
                    print datestring, "TimeseriesArray cache missing"
                    datalogger.load_tsa(datestring)
                else:
                    #datalogger[datestring] # read from raw, and store tsa and ts caches
                    if len(caches["tsa"]["keys"]) != len(caches["tsastat"]["keys"]):
                        print datestring, "TimeseriesArrayStats caches missing"
                        datalogger.load_tsastats(datestring)
                    else:
                        if len(caches["ts"]["keys"]) != len(caches["tsstat"]["keys"]):
                            print datestring, "Number ob Timeseries and TimeseriesStats should be the same"
                        if len(caches["ts"]["keys"]) > len(caches["tsstat"]["keys"]):
                            print datestring, "some missing TimeseriesStats"
            except tilak_datalogger.DataLoggerRawFileMissing as exc:
                #logging.exception(exc)
                logging.info("%s no RAW Data available", datestring)
                pass
            except StandardError as exc:
                logging.exception(exc)
                pass
Beispiel #4
0
 def setUp(self):
     self.basedir = "/var/rrd"
     self.datestring = "2015-11-30"
     self.project = DataLogger.get_projects(self.basedir)[0]
     self.tablename = DataLogger.get_tablenames(self.basedir, self.project)[0]
     self.datalogger = DataLogger(self.basedir, self.project, self.tablename)