Beispiel #1
0
    def get(self, end_ts=None, clear=False):
        with self.lock:
            if self.metricEmpty():
                return []
            begin_offset = self.start_offset
            if end_ts:
                end_offset = self.get_offset(end_ts)
            else:
                end_offset = (begin_offset + self.points_num) % self.cache_size

            log.debug("begin_offset: %s, end_offset: %s end_ts: %s, clear: %s" %
                      (begin_offset, end_offset, end_ts, clear,))

            rs = [None] * self.metrics_max_num
            if begin_offset < end_offset:
                length = end_offset - begin_offset
                for i, base_idx in enumerate(self.base_idxs):
                    begin_idx = base_idx + begin_offset
                    end_idx = base_idx + end_offset
                    val = self.points[begin_idx: end_idx]
                    rs[i] = val
                    if clear:
                        self.clearPoint(begin_idx, end_idx)
            else:
                # wrap around
                length = self.cache_size - begin_offset + end_offset
                for i, base_idx in enumerate(self.base_idxs):
                    begin_idx = base_idx + begin_offset
                    end_idx = base_idx + end_offset
                    val = self.points[begin_idx: base_idx+self.cache_size]
                    val += self.points[base_idx: begin_idx]
                    rs[i] = val
                    if clear:
                        self.clearPoint(begin_idx, base_idx+self.cache_size)
                        self.clearPoint(base_idx, end_idx)

            # timestamps
            timestamps = [self.start_ts + i * self.resolution
                          for i in range(length)]

            if clear:
                next_ts = timestamps[-1] + self.resolution
                if self.max_ts < next_ts:
                    self.start_ts = None
                    self.start_offset = 0
                else:
                    self.start_ts = next_ts
                    self.start_offset = end_offset

            return zip(timestamps, zip(*rs))
Beispiel #2
0
 def stringReceived(self, data):
     try:
         datapoints = pickle.loads(data)
         log.debug(datapoints)
     except:
         log.listener("invalid pickle received from %s, ignoring" %
                      self.peerName)
     for metric, (timestamp, value) in datapoints:
         try:
             datapoint = int(timestamp), float(value)
         except Exception as e:
             log.debug("error in pickle receiver for: %s, error: %s" %
                       (metric, e))
             continue
         self.metricReceived(metric, datapoint)
Beispiel #3
0
    def put(self, pos_idx, datapoint):
        log.debug("retention: %s, cache_size: %s, points_num: %s" %
                  (self.retention, self.cache_size, self.points_num))
        with self.lock:
            try:
                base_idx = self.base_idxs[pos_idx]
                ts, val = datapoint

                self.max_ts = max(self.max_ts, ts)
                if self.start_ts is None:
                    self.start_ts = ts - ts % self.resolution
                    idx = base_idx
                else:
                    offset = (ts - self.start_ts) / self.resolution
                    idx = base_idx + (self.start_offset + offset) % self.cache_size

                log.debug("put idx: %s, ts: %s, start_ts: %s, start_offset: %s, retention: %s" %
                          (idx, ts, self.start_ts, self.start_offset, self.retention))
                self.points[idx] = val
            except Exception as e:
                log.err('put error in FileCache: %s' % e)
Beispiel #4
0
def writeCachedDataPoints(file_cache_idxs):
    pop_func = MetricCache.pop
    for schema_name, file_idx in file_cache_idxs:
        datapoints = pop_func(schema_name, file_idx)
        file_path = getFilePath(schema_name, file_idx)

        try:
            t1 = time.time()
            log.debug('filepath: %s, datapoints: %s' % (file_path, datapoints))
            kenshin.update(file_path, datapoints)
            update_time = time.time() - t1
        except Exception as e:
            log.err('Error writing to %s: %s' % (file_path, e))
            instrumentation.incr('errors')
        else:
            point_cnt = len(datapoints)
            instrumentation.incr('committedPoints', point_cnt)
            instrumentation.append('updateTimes', update_time)

            if settings.LOG_UPDATES:
                log.updates("wrote %d datapoints for %s in %.5f secs" %
                            (point_cnt, schema_name, update_time))

    return True
Beispiel #5
0
 def put(self, metric, datapoint):
     log.debug("MetricCache received (%s, %s)" % (metric, datapoint))
     (schema_name, file_idx, pos_idx) = self.getMetricIdx(metric)
     file_cache = self.schema_caches[schema_name][file_idx]
     file_cache.put(pos_idx, datapoint)