def busy_run():
     w = Watchdog(5)
     w.reset()
     x = 0
     while True:
         x = random()
     return x
示例#2
0
 def busy_run():
     w = Watchdog(5)
     w.reset()
     x = 0
     while True:
         x = random()
     return x
示例#3
0
    def __init__(self, interval, aggregator, api_host, use_watchdog=False, event_chunk_size=None):
        threading.Thread.__init__(self)
        self.interval = int(interval)
        self.finished = threading.Event()
        self.aggregator = aggregator
        self.flush_count = 0
        self.log_count = 0

        self.watchdog = None
        if use_watchdog:
            from monasca_agent.common.util import Watchdog
            self.watchdog = Watchdog(WATCHDOG_TIMEOUT)

        self.api_host = api_host
        self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
示例#4
0
 def use_lots_of_memory():
     # Skip this step on travis
     if os.environ.get('TRAVIS', False):
         return
     a = CollectorDaemon(12345, {})
     a._watchdog = Watchdog(30, 50)
     a._tr_manager = MemoryHogTxManager()
     a.run()
示例#5
0
    def __init__(self, interval, aggregator, api_host, use_watchdog=False, event_chunk_size=None):
        threading.Thread.__init__(self)
        self.interval = int(interval)
        self.finished = threading.Event()
        self.aggregator = aggregator
        self.flush_count = 0
        self.log_count = 0

        self.watchdog = None
        if use_watchdog:
            from monasca_agent.common.util import Watchdog
            self.watchdog = Watchdog(WATCHDOG_TIMEOUT)

        self.api_host = api_host
        self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE
示例#6
0
class Reporter(threading.Thread):
    """The reporter periodically sends the aggregated metrics to the
    server.
    """

    def __init__(self, interval, aggregator, api_host, use_watchdog=False, event_chunk_size=None):
        threading.Thread.__init__(self)
        self.interval = int(interval)
        self.finished = threading.Event()
        self.aggregator = aggregator
        self.flush_count = 0
        self.log_count = 0

        self.watchdog = None
        if use_watchdog:
            from monasca_agent.common.util import Watchdog
            self.watchdog = Watchdog(WATCHDOG_TIMEOUT)

        self.api_host = api_host
        self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE

    @staticmethod
    def serialize_metrics(metrics):
        return json.dumps({"series": metrics})

    def stop(self):
        log.info("Stopping reporter")
        self.finished.set()

    def run(self):

        log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
        log.debug("Watchdog enabled: %s" % bool(self.watchdog))

        # Persist a start-up message.
        check_status.MonascaStatsdStatus().persist()

        while not self.finished.isSet():  # Use camel case isSet for 2.4 support.
            self.finished.wait(self.interval)
            self.flush()
            if self.watchdog:
                self.watchdog.reset()

        # Clean up the status messages.
        log.debug("Stopped reporter")
        check_status.MonascaStatsdStatus.remove_latest_status()

    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            events = self.aggregator.flush_events()
            event_count = len(events)
            if event_count:
                log.warn('Event received but events are not available in the monasca api')

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func(
                "Flush #%s: flushed %s metric%s and %s event%s" %
                (self.flush_count,
                 count,
                 util.plural(count),
                 event_count,
                 util.plural(event_count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes." %
                    (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))

            # Persist a status message.
            packet_count = self.aggregator.total_count
            packets_per_second = self.aggregator.packets_per_second(self.interval)
            check_status.MonascaStatsdStatus(flush_count=self.flush_count,
                                             packet_count=packet_count,
                                             packets_per_second=packets_per_second,
                                             metric_count=count,
                                             event_count=event_count).persist()

        except Exception:
            log.exception("Error flushing metrics")
 def normal_run():
     w = Watchdog(2)
     w.reset()
     for i in range(5):
         time.sleep(1)
         w.reset()
 def hanging_net():
     w = Watchdog(5)
     w.reset()
     x = url.urlopen("http://localhost:31834")
     print("ERROR Net call returned", x)
     return True
示例#9
0
class Reporter(threading.Thread):
    """
    The reporter periodically sends the aggregated metrics to the
    server.
    """

    def __init__(self, interval, aggregator, api_host, use_watchdog=False, event_chunk_size=None):
        threading.Thread.__init__(self)
        self.interval = int(interval)
        self.finished = threading.Event()
        self.aggregator = aggregator
        self.flush_count = 0
        self.log_count = 0

        self.watchdog = None
        if use_watchdog:
            from monasca_agent.common.util import Watchdog
            self.watchdog = Watchdog(WATCHDOG_TIMEOUT)

        self.api_host = api_host
        self.event_chunk_size = event_chunk_size or EVENT_CHUNK_SIZE

    @staticmethod
    def serialize_metrics(metrics):
        return json.dumps({"series": metrics})

    def stop(self):
        log.info("Stopping reporter")
        self.finished.set()

    def run(self):

        log.info("Reporting to %s every %ss" % (self.api_host, self.interval))
        log.debug("Watchdog enabled: %s" % bool(self.watchdog))

        # Persist a start-up message.
        check_status.MonascaStatsdStatus().persist()

        while not self.finished.isSet():  # Use camel case isSet for 2.4 support.
            self.finished.wait(self.interval)
            self.flush()
            if self.watchdog:
                self.watchdog.reset()

        # Clean up the status messages.
        log.debug("Stopped reporter")
        check_status.MonascaStatsdStatus.remove_latest_status()

    def flush(self):
        try:
            self.flush_count += 1
            self.log_count += 1

            metrics = self.aggregator.flush()
            count = len(metrics)
            if self.flush_count % FLUSH_LOGGING_PERIOD == 0:
                self.log_count = 0
            if count:
                try:
                    emitter.http_emitter(metrics, log, self.api_host)
                except Exception:
                    log.exception("Error running emitter.")

            events = self.aggregator.flush_events()
            event_count = len(events)
            if event_count:
                log.warn('Event received but events are not available in the monasca api')

            should_log = self.flush_count <= FLUSH_LOGGING_INITIAL or self.log_count <= FLUSH_LOGGING_COUNT
            log_func = log.info
            if not should_log:
                log_func = log.debug
            log_func(
                "Flush #%s: flushed %s metric%s and %s event%s" %
                (self.flush_count,
                 count,
                 util.plural(count),
                 event_count,
                 util.plural(event_count)))
            if self.flush_count == FLUSH_LOGGING_INITIAL:
                log.info(
                    "First flushes done, %s flushes will be logged every %s flushes." %
                    (FLUSH_LOGGING_COUNT, FLUSH_LOGGING_PERIOD))

            # Persist a status message.
            packet_count = self.aggregator.total_count
            packets_per_second = self.aggregator.packets_per_second(self.interval)
            check_status.MonascaStatsdStatus(flush_count=self.flush_count,
                                             packet_count=packet_count,
                                             packets_per_second=packets_per_second,
                                             metric_count=count,
                                             event_count=event_count).persist()

        except Exception:
            log.exception("Error flushing metrics")
示例#10
0
 def fast_tornado():
     a = CollectorDaemon(12345, {})
     a._watchdog = Watchdog(6)
     a._tr_manager = MockTxManager()
     a.run()
示例#11
0
 def normal_run():
     w = Watchdog(2)
     w.reset()
     for i in range(5):
         time.sleep(1)
         w.reset()
示例#12
0
 def hanging_net():
     w = Watchdog(5)
     w.reset()
     x = url.urlopen("http://localhost:31834")
     print("ERROR Net call returned", x)
     return True