コード例 #1
0
ファイル: injector.py プロジェクト: yi-cloud/gnocchi
def _inject(inc,
            coord,
            store,
            idx,
            metrics,
            measures,
            archive_policy_name="low",
            process=False,
            interval=None):
    LOG.info("Creating %d metrics", metrics)
    with utils.StopWatch() as sw:
        metric_ids = [
            idx.create_metric(uuid.uuid4(), "admin", archive_policy_name).id
            for _ in range(metrics)
        ]
    LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed())

    LOG.info("Generating %d measures per metric for %d metrics… ", measures,
             metrics)
    now = numpy.datetime64(utils.utcnow())
    with utils.StopWatch() as sw:
        measures = {
            m_id: [
                incoming.Measure(now + numpy.timedelta64(seconds=s),
                                 random.randint(-999999, 999999))
                for s in range(measures)
            ]
            for m_id in metric_ids
        }
    LOG.info("… done in %.2fs", sw.elapsed())

    interval_timer = utils.StopWatch().start()

    while True:
        interval_timer.reset()
        with utils.StopWatch() as sw:
            inc.add_measures_batch(measures)
        total_measures = sum(map(len, measures.values()))
        LOG.info("Pushed %d measures in %.2fs", total_measures, sw.elapsed())

        if process:
            c = chef.Chef(coord, inc, idx, store)

            with utils.StopWatch() as sw:
                for s in inc.iter_sacks():
                    c.process_new_measures_for_sack(s, blocking=True)
            LOG.info("Processed %d sacks in %.2fs", inc.NUM_SACKS,
                     sw.elapsed())
            LOG.info("Speed: %.2f measures/s",
                     float(total_measures) / sw.elapsed())

        if interval is None:
            break
        time.sleep(max(0, interval - interval_timer.elapsed()))

    return total_measures
コード例 #2
0
ファイル: __init__.py プロジェクト: aneeshkp/gnocchi
    def _get_unaggregated_timeserie_and_unserialize(
            self, metric, block_size, back_window):
        """Retrieve unaggregated timeserie for a metric and unserialize it.

        Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot
        be retrieved, returns None.

        """
        with utils.StopWatch() as sw:
            raw_measures = (
                self._get_unaggregated_timeserie(
                    metric)
            )
        if not raw_measures:
            return
        LOG.debug(
            "Retrieve unaggregated measures "
            "for %s in %.2fs",
            metric.id, sw.elapsed())
        try:
            return carbonara.BoundTimeSerie.unserialize(
                raw_measures, block_size, back_window)
        except carbonara.InvalidData:
            raise CorruptionError(
                "Data corruption detected for %s "
                "unaggregated timeserie" % metric.id)
コード例 #3
0
ファイル: metricd.py プロジェクト: amar266/gnocchi
 def __init__(self, worker_id, conf):
     super(MetricProcessor,
           self).__init__(worker_id, conf,
                          conf.metricd.metric_processing_delay)
     self._tasks = []
     self.group_state = None
     self.sacks_with_measures_to_process = set()
     # This stores the last time the processor did a scan on all the sack it
     # is responsible for
     self._last_full_sack_scan = utils.StopWatch().start()
コード例 #4
0
    def run(self):
        self._configure()
        # Delay startup so workers are jittered.
        time.sleep(self.startup_delay)

        while not self._shutdown.is_set():
            with utils.StopWatch() as timer:
                self._run_job()
            self._shutdown.wait(max(0, self.interval_delay - timer.elapsed()))
        self._shutdown_done.set()
コード例 #5
0
 def __init__(self, worker_id, conf):
     super(MetricProcessor, self).__init__(
         worker_id, conf, conf.metricd.metric_processing_delay)
     self._tasks = []
     self.group_state = None
     self.sacks_with_measures_to_process = set()
     # This stores the last time the processor did a scan on all the sack it
     # is responsible for
     self._last_full_sack_scan = utils.StopWatch().start()
     # Only update the list of sacks to process every
     # metric_processing_delay
     self._get_sacks_to_process = cachetools.func.ttl_cache(
         ttl=conf.metricd.metric_processing_delay
     )(self._get_sacks_to_process)
コード例 #6
0
ファイル: metricd.py プロジェクト: rabi/gnocchi
    def run(self):
        self._configure()
        # Delay startup so workers are jittered.
        time.sleep(self.startup_delay)

        while not self._shutdown.is_set():
            with utils.StopWatch() as timer:
                try:
                    self._run_job()
                except Exception:
                    LOG.error("Unexpected error during %s job",
                              self.name,
                              exc_info=True)
            self._wake_up.wait(max(0, self.interval_delay - timer.elapsed()))
            self._wake_up.clear()
        self._shutdown_done.set()
コード例 #7
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_start_stop(self):
     watch = utils.StopWatch()
     watch.start()
     watch.stop()
コード例 #8
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_no_states(self):
     watch = utils.StopWatch()
     self.assertRaises(RuntimeError, watch.stop)
コード例 #9
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_context_manager(self):
     with utils.StopWatch() as watch:
         pass
     self.assertGreater(watch.elapsed(), 0)
コード例 #10
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_elapsed(self):
     watch = utils.StopWatch()
     watch.start()
     watch.stop()
     elapsed = watch.elapsed()
     self.assertAlmostEqual(elapsed, watch.elapsed())
コード例 #11
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_no_elapsed(self):
     watch = utils.StopWatch()
     self.assertRaises(RuntimeError, watch.elapsed)
コード例 #12
0
ファイル: __init__.py プロジェクト: tanyunshi/gnocchi
 def __enter__(self):
     self.sw = utils.StopWatch()
     self.sw.start()
     return self
コード例 #13
0
ファイル: __init__.py プロジェクト: aneeshkp/gnocchi
    def _compute_and_store_timeseries(self, metric, measures):
        # NOTE(mnaser): The metric could have been handled by
        #               another worker, ignore if no measures.
        if len(measures) == 0:
            LOG.debug("Skipping %s (already processed)", metric)
            return

        measures.sort(order='timestamps')

        agg_methods = list(metric.archive_policy.aggregation_methods)
        block_size = metric.archive_policy.max_block_size
        back_window = metric.archive_policy.back_window
        definition = metric.archive_policy.definition
        # NOTE(sileht): We keep one more blocks to calculate rate of change
        # correctly
        if any(filter(lambda x: x.startswith("rate:"), agg_methods)):
            back_window += 1

        try:
            ts = self._get_unaggregated_timeserie_and_unserialize(
                metric, block_size=block_size, back_window=back_window)
        except MetricDoesNotExist:
            try:
                self._create_metric(metric)
            except MetricAlreadyExists:
                # Created in the mean time, do not worry
                pass
            ts = None
        except CorruptionError as e:
            LOG.error(e)
            ts = None

        if ts is None:
            # This is the first time we treat measures for this
            # metric, or data are corrupted, create a new one
            ts = carbonara.BoundTimeSerie(block_size=block_size,
                                          back_window=back_window)
            current_first_block_timestamp = None
        else:
            current_first_block_timestamp = ts.first_block_timestamp()

        # NOTE(jd) This is Python where you need such
        # hack to pass a variable around a closure,
        # sorry.
        computed_points = {"number": 0}

        def _map_add_measures(bound_timeserie):
            # NOTE (gordc): bound_timeserie is entire set of
            # unaggregated measures matching largest
            # granularity. the following takes only the points
            # affected by new measures for specific granularity
            tstamp = max(bound_timeserie.first, measures['timestamps'][0])
            new_first_block_timestamp = bound_timeserie.first_block_timestamp()
            computed_points['number'] = len(bound_timeserie)
            for d in definition:
                ts = bound_timeserie.group_serie(
                    d.granularity, carbonara.round_timestamp(
                        tstamp, d.granularity))

                self._map_in_thread(
                    self._add_measures,
                    ((aggregation, d, metric, ts,
                        current_first_block_timestamp,
                        new_first_block_timestamp)
                        for aggregation in agg_methods))

        with utils.StopWatch() as sw:
            ts.set_values(measures,
                          before_truncate_callback=_map_add_measures)

        number_of_operations = (len(agg_methods) * len(definition))
        perf = ""
        elapsed = sw.elapsed()
        if elapsed > 0:
            perf = " (%d points/s, %d measures/s)" % (
                ((number_of_operations * computed_points['number']) /
                    elapsed),
                ((number_of_operations * len(measures)) / elapsed)
            )
        LOG.debug("Computed new metric %s with %d new measures "
                  "in %.2f seconds%s",
                  metric.id, len(measures), elapsed, perf)

        self._store_unaggregated_timeserie(metric, ts.serialize())
コード例 #14
0
    def compute_and_store_timeseries(self, metric, measures):
        # NOTE(mnaser): The metric could have been handled by
        #               another worker, ignore if no measures.
        if len(measures) == 0:
            LOG.debug("Skipping %s (already processed)", metric)
            return

        measures = numpy.sort(measures, order='timestamps')

        agg_methods = list(metric.archive_policy.aggregation_methods)
        block_size = metric.archive_policy.max_block_size
        back_window = metric.archive_policy.back_window
        definition = metric.archive_policy.definition
        # NOTE(sileht): We keep one more blocks to calculate rate of change
        # correctly
        if any(filter(lambda x: x.startswith("rate:"), agg_methods)):
            back_window += 1

        with utils.StopWatch() as sw:
            raw_measures = (
                self._get_or_create_unaggregated_timeseries(
                    [metric])[metric]
            )
        LOG.debug("Retrieve unaggregated measures for %s in %.2fs",
                  metric.id, sw.elapsed())

        if raw_measures is None:
            ts = None
        else:
            try:
                ts = carbonara.BoundTimeSerie.unserialize(
                    raw_measures, block_size, back_window)
            except carbonara.InvalidData:
                LOG.error("Data corruption detected for %s "
                          "unaggregated timeserie, creating a new one",
                          metric.id)
                ts = None

        if ts is None:
            # This is the first time we treat measures for this
            # metric, or data are corrupted, create a new one
            ts = carbonara.BoundTimeSerie(block_size=block_size,
                                          back_window=back_window)
            current_first_block_timestamp = None
        else:
            current_first_block_timestamp = ts.first_block_timestamp()

        # NOTE(jd) This is Python where you need such
        # hack to pass a variable around a closure,
        # sorry.
        computed_points = {"number": 0}

        def _map_add_measures(bound_timeserie):
            # NOTE (gordc): bound_timeserie is entire set of
            # unaggregated measures matching largest
            # granularity. the following takes only the points
            # affected by new measures for specific granularity
            tstamp = max(bound_timeserie.first, measures['timestamps'][0])
            new_first_block_timestamp = bound_timeserie.first_block_timestamp()
            computed_points['number'] = len(bound_timeserie)

            for granularity, aggregations in itertools.groupby(
                    # No need to sort the aggregation, they are already
                    metric.archive_policy.aggregations,
                    ATTRGETTER_GRANULARITY):
                ts = bound_timeserie.group_serie(
                    granularity, carbonara.round_timestamp(
                        tstamp, granularity))

                self._add_measures(metric, aggregations, ts,
                                   current_first_block_timestamp,
                                   new_first_block_timestamp)

        with utils.StopWatch() as sw:
            ts.set_values(measures,
                          before_truncate_callback=_map_add_measures)

        number_of_operations = (len(agg_methods) * len(definition))
        perf = ""
        elapsed = sw.elapsed()
        if elapsed > 0:
            perf = " (%d points/s, %d measures/s)" % (
                ((number_of_operations * computed_points['number']) /
                    elapsed),
                ((number_of_operations * len(measures)) / elapsed)
            )
        LOG.debug("Computed new metric %s with %d new measures "
                  "in %.2f seconds%s",
                  metric.id, len(measures), elapsed, perf)

        self._store_unaggregated_timeseries([(metric, ts.serialize())])