Beispiel #1
0
def loadStorageSchemas(conf_file):
    schema_list = []
    config = OrderedConfigParser()
    config.read(conf_file)

    for section in config.sections():
        options = dict(config.items(section))

        pattern = options.get('pattern')
        xff = float(options.get('xfilesfactor'))
        agg = options.get('aggregationmethod')
        retentions = options.get('retentions').split(',')
        archives = [Archive.fromString(s).getTuple() for s in retentions]
        cache_retention = kenshin.RetentionParser.parse_time_str(
            options.get('cacheretention'))
        metrics_max_num = options.get('metricsperfile')
        cache_ratio = 1.2

        try:
            kenshin.validate_archive_list(archives, xff)
        except kenshin.InvalidConfig:
            log.err("Invalid schema found in %s." % section)

        schema = PatternSchema(section, pattern, float(xff), agg, archives,
                               int(cache_retention), int(metrics_max_num),
                               float(cache_ratio))
        schema_list.append(schema)
    schema_list.append(defaultSchema)
    return schema_list
Beispiel #2
0
def loadStorageSchemas(conf_file):
    schema_list = []
    config = OrderedConfigParser()
    config.read(conf_file)

    for section in config.sections():
        options = dict(config.items(section))

        pattern = options.get('pattern')
        xff = float(options.get('xfilesfactor'))
        agg = options.get('aggregationmethod')
        retentions = options.get('retentions').split(',')
        archives = [Archive.fromString(s).getTuple() for s in retentions]
        cache_retention = kenshin.RetentionParser.parse_time_str(
            options.get('cacheretention'))
        metrics_max_num = options.get('metricsperfile')
        cache_ratio = 1.2

        try:
            kenshin.validate_archive_list(archives, xff)
        except kenshin.InvalidConfig:
            log.err("Invalid schema found in %s." % section)

        schema = PatternSchema(section, pattern, float(xff), agg, archives,
                               int(cache_retention), int(metrics_max_num),
                               float(cache_ratio))
        schema_list.append(schema)
    schema_list.append(defaultSchema)
    return schema_list
Beispiel #3
0
 def stopService(self):
     try:
         file_cache_idxs = MetricCache.getAllFileCaches()
         writeCachedDataPointsWhenStop(file_cache_idxs)
     except Exception as e:
         log.err('write error when stopping service: %s' % e)
     Service.stopService(self)
Beispiel #4
0
 def stopService(self):
     try:
         file_cache_idxs = MetricCache.getAllFileCaches()
         writeCachedDataPointsWhenStop(file_cache_idxs)
     except Exception as e:
         log.err('write error when stopping service: %s' % e)
     Service.stopService(self)
Beispiel #5
0
 def __call__(self, *args, **kwargs):
     for h in self.handlers:
         try:
             h(*args, **kwargs)
         except Exception as e:
             log.err(None,
                     "Exception %s in %s event handler: args=%s, kwargs=%s"
                     % (e, self.name, args, kwargs))
Beispiel #6
0
 def __call__(self, *args, **kwargs):
     for h in self.handlers:
         try:
             h(*args, **kwargs)
         except Exception as e:
             log.err(
                 None,
                 "Exception %s in %s event handler: args=%s, kwargs=%s" %
                 (e, self.name, args, kwargs))
Beispiel #7
0
def writeCachedDataPointsWhenStop(file_cache_idxs):
    pop_func = MetricCache.pop
    for schema_name, file_idx in file_cache_idxs:
        datapoints = pop_func(schema_name, file_idx, int(time.time()), False)
        if datapoints:
            file_path = getFilePath(schema_name, file_idx)
            try:
                kenshin.update(file_path, datapoints)
            except Exception as e:
                log.err('Error writing to %s: %s' % (file_path, e))
Beispiel #8
0
def writeCachedDataPointsWhenStop(file_cache_idxs):
    pop_func = MetricCache.pop
    for schema_name, file_idx in file_cache_idxs:
        datapoints = pop_func(schema_name, file_idx, int(time.time()), False)
        if datapoints:
            file_path = getFilePath(schema_name, file_idx)
            try:
                kenshin.update(file_path, datapoints)
            except Exception as e:
                log.err('Error writing to %s: %s' % (file_path, e))
Beispiel #9
0
def writeForever():
    while reactor.running:
        write = False
        try:
            file_cache_idxs = MetricCache.writableFileCaches()
            if file_cache_idxs:
                write = writeCachedDataPoints(file_cache_idxs)
        except Exception as e:
            log.err('write error: %s' % e)
        # The writer thread only sleeps when there is no write
        # or an error occurs
        if not write:
            time.sleep(1)
Beispiel #10
0
def writeForever():
    while reactor.running:
        write = False
        try:
            file_cache_idxs = MetricCache.writableFileCaches()
            if file_cache_idxs:
                write = writeCachedDataPoints(file_cache_idxs)
        except Exception as e:
            log.err('write error: %s' % e)
        # The writer thread only sleeps when there is no write
        # or an error occurs
        if not write:
            time.sleep(1)
Beispiel #11
0
    def put(self, pos_idx, datapoint):
        with self.lock:
            try:
                base_idx = self.base_idxs[pos_idx]
                ts, val = datapoint

                self.max_ts = max(self.max_ts, ts)
                if self.start_ts is None:
                    self.start_ts = ts - ts % self.resolution
                    idx = base_idx
                else:
                    offset = (ts - self.start_ts) / self.resolution
                    idx = base_idx + (self.start_offset + offset) % self.cache_size

                self.points[idx] = val
            except Exception as e:
                log.err('put error in FileCache: %s' % e)
Beispiel #12
0
    def put(self, pos_idx, datapoint):
        with self.lock:
            try:
                base_idx = self.base_idxs[pos_idx]
                ts, val = datapoint

                self.max_ts = max(self.max_ts, ts)
                if self.start_ts is None:
                    self.start_ts = ts - ts % self.resolution
                    idx = base_idx
                else:
                    offset = (ts - self.start_ts) / self.resolution
                    idx = base_idx + (self.start_offset +
                                      offset) % self.cache_size

                self.points[idx] = val
            except Exception as e:
                log.err('put error in FileCache: %s' % e)
Beispiel #13
0
    def put(self, pos_idx, datapoint):
        log.debug("retention: %s, cache_size: %s, points_num: %s" %
                  (self.retention, self.cache_size, self.points_num))
        with self.lock:
            try:
                base_idx = self.base_idxs[pos_idx]
                ts, val = datapoint

                self.max_ts = max(self.max_ts, ts)
                if self.start_ts is None:
                    self.start_ts = ts - ts % self.resolution
                    idx = base_idx
                else:
                    offset = (ts - self.start_ts) / self.resolution
                    idx = base_idx + (self.start_offset + offset) % self.cache_size

                log.debug("put idx: %s, ts: %s, start_ts: %s, start_offset: %s, retention: %s" %
                          (idx, ts, self.start_ts, self.start_offset, self.retention))
                self.points[idx] = val
            except Exception as e:
                log.err('put error in FileCache: %s' % e)
Beispiel #14
0
def writeCachedDataPoints(file_cache_idxs):
    pop_func = MetricCache.pop
    for schema_name, file_idx in file_cache_idxs:
        datapoints = pop_func(schema_name, file_idx)
        file_path = getFilePath(schema_name, file_idx)

        try:
            t1 = time.time()
            kenshin.update(file_path, datapoints)
            update_time = time.time() - t1
        except Exception as e:
            log.err('Error writing to %s: %s' % (file_path, e))
            instrumentation.incr('errors')
        else:
            point_cnt = len(datapoints)
            instrumentation.incr('committedPoints', point_cnt)
            instrumentation.append('updateTimes', update_time)

            if settings.LOG_UPDATES:
                log.updates("wrote %d datapoints for %s in %.5f secs" %
                            (point_cnt, schema_name, update_time))

    return True
Beispiel #15
0
def writeCachedDataPoints(file_cache_idxs):
    pop_func = MetricCache.pop
    for schema_name, file_idx in file_cache_idxs:
        datapoints = pop_func(schema_name, file_idx)
        file_path = getFilePath(schema_name, file_idx)

        try:
            t1 = time.time()
            kenshin.update(file_path, datapoints)
            update_time = time.time() - t1
        except Exception as e:
            log.err('Error writing to %s: %s' % (file_path, e))
            instrumentation.incr('errors')
        else:
            point_cnt = len(datapoints)
            instrumentation.incr('committedPoints', point_cnt)
            instrumentation.append('updateTimes', update_time)

            if settings.LOG_UPDATES:
                log.updates("wrote %d datapoints for %s in %.5f secs" %
                            (point_cnt, schema_name, update_time))

    return True