Exemple #1
0
    def flush(self):

        if self._trs_to_flush is not None:
            log.debug("A flush is already in progress, not doing anything")
            return

        to_flush = []
        # Do we have something to do ?
        now = datetime.now()
        for tr in self._transactions:
            if tr.time_to_flush(now):
                to_flush.append(tr)

        count = len(to_flush)
        should_log = self._flush_count + 1 <= FLUSH_LOGGING_INITIAL or \
            (self._flush_count + 1) % FLUSH_LOGGING_PERIOD == 0
        if count > 0:
            if should_log:
                log.info(
                    "Flushing %s transaction%s during flush #%s" %
                    (count, util.plural(count), str(self._flush_count + 1)))
            else:
                log.debug(
                    "Flushing %s transaction%s during flush #%s" %
                    (count, util.plural(count), str(self._flush_count + 1)))

            timer = util.Timer()
            self._trs_to_flush = to_flush
            self.flush_next()
            # The emit time is reported on the next run.
            dimensions = self._set_dimensions({
                'component': 'monasca-agent',
                'service': 'monitoring'
            })
            emit_measurement = metrics.Measurement('monasca.emit_time_sec',
                                                   time.time(), timer.step(),
                                                   dimensions)
            MetricTransaction([emit_measurement],
                              headers={'Content-Type': 'application/json'})
        else:
            if should_log:
                log.info("No transaction to flush during flush #%s" %
                         str(self._flush_count + 1))
            else:
                log.debug("No transaction to flush during flush #%s" %
                          str(self._flush_count + 1))

        if self._flush_count + 1 == FLUSH_LOGGING_INITIAL:
            log.info(
                "First flushes done, next flushes will be logged every %s flushes."
                % FLUSH_LOGGING_PERIOD)

        self._flush_count += 1

        check_status.ForwarderStatus(
            queue_length=self._total_count,
            queue_size=self._total_size,
            flush_count=self._flush_count,
            transactions_received=self._transactions_received,
            transactions_flushed=self._transactions_flushed).persist()
    def flush(self):

        if self._trs_to_flush is not None:
            log.debug("A flush is already in progress, not doing anything")
            return

        to_flush = []
        # Do we have something to do ?
        now = datetime.now()
        for tr in self._transactions:
            if tr.time_to_flush(now):
                to_flush.append(tr)

        count = len(to_flush)
        should_log = self._flush_count + 1 <= FLUSH_LOGGING_INITIAL or \
            (self._flush_count + 1) % FLUSH_LOGGING_PERIOD == 0
        if count > 0:
            if should_log:
                log.info("Flushing %s transaction%s during flush #%s" %
                         (count, util.plural(count), str(self._flush_count + 1)))
            else:
                log.debug("Flushing %s transaction%s during flush #%s" %
                          (count, util.plural(count), str(self._flush_count + 1)))

            timer = util.Timer()
            self._trs_to_flush = to_flush
            self.flush_next()
            # The emit time is reported on the next run.
            dimensions = self._set_dimensions({'component': 'monasca-agent', 'service': 'monitoring'})
            emit_measurement = metrics.Measurement('monasca.emit_time_sec', time.time(), timer.step(), dimensions)
            MetricTransaction([emit_measurement], headers={'Content-Type': 'application/json'})
        else:
            if should_log:
                log.info("No transaction to flush during flush #%s" % str(self._flush_count + 1))
            else:
                log.debug("No transaction to flush during flush #%s" % str(self._flush_count + 1))

        if self._flush_count + 1 == FLUSH_LOGGING_INITIAL:
            log.info("First flushes done, next flushes will be logged every %s flushes." %
                     FLUSH_LOGGING_PERIOD)

        self._flush_count += 1

        check_status.ForwarderStatus(queue_length=self._total_count,
                                     queue_size=self._total_size,
                                     flush_count=self._flush_count,
                                     transactions_received=self._transactions_received,
                                     transactions_flushed=self._transactions_flushed).persist()
Exemple #3
0
 def tr_error(self, tr):
     tr.inc_error_count()
     tr.compute_next_flush(self._MAX_WAIT_FOR_REPLAY)
     log.warn(
         "Transaction %d in error (%s error%s), it will be replayed after %s"
         % (tr.get_id(), tr.get_error_count(),
            util.plural(tr.get_error_count()), tr.get_next_flush()))
Exemple #4
0
    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL \
                or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func("Flush #%s: flushed %s metric%s" %
                     (self.flush_count, count, util.plural(count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes."
                    % (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
        except Exception:
            log.exception("Error flushing metrics")
Exemple #5
0
    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func(
                "Flush #%s: flushed %s metric%s" %
                (self.flush_count,
                 count,
                 util.plural(count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes." %
                    (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))
        except Exception:
            log.exception("Error flushing metrics")
Exemple #6
0
    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            events = self.aggregator.flush_events()
            event_count = len(events)
            if event_count:
                log.warn('Event received but events are not available in the monasca api')

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func(
                "Flush #%s: flushed %s metric%s and %s event%s" %
                (self.flush_count,
                 count,
                 util.plural(count),
                 event_count,
                 util.plural(event_count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes." %
                    (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))

            # Persist a status message.
            packet_count = self.aggregator.total_count
            packets_per_second = self.aggregator.packets_per_second(self.interval)
            check_status.MonascaStatsdStatus(flush_count=self.flush_count,
                                             packet_count=packet_count,
                                             packets_per_second=packets_per_second,
                                             metric_count=count,
                                             event_count=event_count).persist()

        except Exception:
            log.exception("Error flushing metrics")
Exemple #7
0
    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            events = self.aggregator.flush_events()
            event_count = len(events)
            if event_count:
                log.warn(
                    'Event received but events are not available in the monasca api'
                )

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func("Flush #%s: flushed %s metric%s and %s event%s" %
                     (self.flush_count, count, util.plural(count), event_count,
                      util.plural(event_count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes."
                    % (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))

            # Persist a status message.
            packet_count = self.aggregator.total_count
            packets_per_second = self.aggregator.packets_per_second(
                self.interval)
            check_status.MonascaStatsdStatus(
                flush_count=self.flush_count,
                packet_count=packet_count,
                packets_per_second=packets_per_second,
                metric_count=count,
                event_count=event_count).persist()

        except Exception:
            log.exception("Error flushing metrics")
 def tr_error(self, tr):
     tr.inc_error_count()
     tr.compute_next_flush(self._MAX_WAIT_FOR_REPLAY)
     log.warn(
         "Transaction %d in error (%s error%s), it will be replayed after %s" %
         (tr.get_id(),
          tr.get_error_count(),
          util.plural(tr.get_error_count()),
          tr.get_next_flush()))