コード例 #1
0
 def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
     pipe = self._client.pipeline(transaction=False)
     for metric in metrics:
         metric_key = self._metric_key(metric)
         unagg_key = self._unaggregated_field(version)
         # Create the metric if it was not created
         pipe.hsetnx(metric_key, unagg_key, "")
         # Get the data
         pipe.hget(metric_key, unagg_key)
     ts = {
         # Replace "" by None
         metric: data or None
         for metric, (created, data)
         in six.moves.zip(metrics, utils.grouper(pipe.execute(), 2))
     }
     return ts
コード例 #2
0
ファイル: redis.py プロジェクト: luo-zn/gnocchi
 def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
     pipe = self._client.pipeline(transaction=False)
     for metric in metrics:
         metric_key = self._metric_key(metric)
         unagg_key = self._unaggregated_field(version)
         # Create the metric if it was not created
         pipe.hsetnx(metric_key, unagg_key, "")
         # Get the data
         pipe.hget(metric_key, unagg_key)
     ts = {
         # Replace "" by None
         metric: data or None
         for metric, (created, data)
         in six.moves.zip(metrics, utils.grouper(pipe.execute(), 2))
     }
     return ts
コード例 #3
0
def bulk_delete(conn, bucket, objects):
    # NOTE(jd) The maximum object to delete at once is 1000
    # TODO(jd) Parallelize?
    deleted = 0
    for obj_slice in utils.grouper(objects, 1000):
        d = {
            'Objects': [{'Key': o} for o in obj_slice],
            # FIXME(jd) Use Quiet mode, but s3rver does not seem to
            # support it
            # 'Quiet': True,
        }
        response = conn.delete_objects(
            Bucket=bucket,
            Delete=d)
        deleted += len(response['Deleted'])
    LOG.debug('%s objects deleted, %s objects skipped',
              deleted, len(objects) - deleted)
コード例 #4
0
ファイル: s3.py プロジェクト: luo-zn/gnocchi
def bulk_delete(conn, bucket, objects):
    # NOTE(jd) The maximum object to delete at once is 1000
    # TODO(jd) Parallelize?
    deleted = 0
    for obj_slice in utils.grouper(objects, 1000):
        d = {
            'Objects': [{'Key': o} for o in obj_slice],
            # FIXME(jd) Use Quiet mode, but s3rver does not seem to
            # support it
            # 'Quiet': True,
        }
        response = conn.delete_objects(
            Bucket=bucket,
            Delete=d)
        deleted += len(response['Deleted'])
    LOG.debug('%s objects deleted, %s objects skipped',
              deleted, len(objects) - deleted)
コード例 #5
0
from gnocchi import utils


WORST_CASE_BYTES_PER_POINT = 8.04


if (len(sys.argv) - 1) % 2 != 0:
    print("Usage: %s <granularity> <timespan> ... <granularity> <timespan>"
          % sys.argv[0])
    sys.exit(1)


def sizeof_fmt(num, suffix='B'):
    for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
        if abs(num) < 1024.0:
            return "%3.1f%s%s" % (num, unit, suffix)
        num /= 1024.0
    return "%.1f%s%s" % (num, 'Yi', suffix)


size = 0
for g, t in utils.grouper(sys.argv[1:], 2):
    granularity = utils.to_timespan(g)
    timespan = utils.to_timespan(t)
    points = timespan.total_seconds() / granularity.total_seconds()
    cursize = points * WORST_CASE_BYTES_PER_POINT
    size += cursize
    print("%s over %s = %d points = %s" % (g, t, points, sizeof_fmt(cursize)))

print("Total: " + sizeof_fmt(size))
コード例 #6
0
WORST_CASE_BYTES_PER_POINT = 8.04


if (len(sys.argv) - 2) % 2 != 0:
    print("Usage: %s <number of agg methods> <granularity> <timespan> ..."
          % sys.argv[0])
    sys.exit(1)


def sizeof_fmt(num, suffix='B'):
    for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'):
        if abs(num) < 1024.0:
            return "%3.1f%s%s" % (num, unit, suffix)
        num /= 1024.0
    return "%.1f%s%s" % (num, 'Yi', suffix)


size = 0
agg_methods = int(sys.argv[1])
for g, t in utils.grouper(sys.argv[2:], 2):
    granularity = utils.to_timespan(g)
    timespan = utils.to_timespan(t)
    points = timespan / granularity
    cursize = points * WORST_CASE_BYTES_PER_POINT
    size += cursize
    print("%s over %s = %d points = %s" % (g, t, points, sizeof_fmt(cursize)))

size *= agg_methods

print("Total: " + sizeof_fmt(size))