Beispiel #1
0
    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            LOG.debug("%20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, diff, idiff, self.processed,
                       self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0
Beispiel #2
0
 def process_request(self, request: HttpRequest) -> HttpResponse:
     self._tracker = ClassTracker()
     for cls in apps.get_models() + self.classes:
         self._tracker.track_class(cls)
     self._tracker.create_snapshot('before')
     self.record_stats({'before': ProcessMemoryInfo()})
     response = super(MemoryPanel, self).process_request(request)
     self.record_stats({'after': ProcessMemoryInfo()})
     self._tracker.create_snapshot('after')
     stats = self._tracker.stats
     stats.annotate()
     self.record_stats({'stats': stats})
     return response
Beispiel #3
0
    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            _get_child_logger().debug("%20s %20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, self.exchange, diff, idiff, self.processed,
                      self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0
Beispiel #4
0
 def profiler_handler(req):
     self._tracker.create_snapshot('before')
     before = ProcessMemoryInfo()
     req_before = get_memory_object(req.registry)
     try:
         result = handler(req)
     except:
         raise
     finally:
         after = ProcessMemoryInfo()
         self._tracker.create_snapshot('after')
         class_stats = self._tracker.stats
         class_stats.annotate()
         self.stats = dict(
             before=before,
             after=after,
             class_stats=class_stats,
             req_before=req_before,
             req_after=get_memory_object(req.registry),
         )
     return result
Beispiel #5
0
def _getProcessMemory():
    """Utility function that defined the logic to get memory."""
    return ProcessMemoryInfo().rss
Beispiel #6
0
 def process_response(self, request, response):
     self.record_stats({'after': ProcessMemoryInfo()})
     self._tracker.create_snapshot('after')
     stats = self._tracker.stats
     stats.annotate()
     self.record_stats({'stats': stats})
Beispiel #7
0
 def process_request(self, request):
     self._tracker = ClassTracker()
     for cls in apps.get_models() + self.classes:
         self._tracker.track_class(cls)
     self._tracker.create_snapshot('before')
     self.record_stats({'before': ProcessMemoryInfo()})
Beispiel #8
0
class NovaConsumer(kombu.mixins.ConsumerMixin):
    def __init__(self, name, connection, deployment, durable, queue_arguments):
        self.connection = connection
        self.deployment = deployment
        self.durable = durable
        self.queue_arguments = queue_arguments
        self.name = name
        self.last_time = None
        self.pmi = None
        self.processed = 0
        self.total_processed = 0

    def _create_exchange(self, name, type, exclusive=False, auto_delete=False):
        return kombu.entity.Exchange(name,
                                     type=type,
                                     exclusive=exclusive,
                                     durable=self.durable,
                                     auto_delete=auto_delete)

    def _create_queue(self,
                      name,
                      nova_exchange,
                      routing_key,
                      exclusive=False,
                      auto_delete=False):
        return kombu.Queue(name,
                           nova_exchange,
                           durable=self.durable,
                           auto_delete=exclusive,
                           exclusive=auto_delete,
                           queue_arguments=self.queue_arguments,
                           routing_key=routing_key)

    def get_consumers(self, Consumer, channel):
        nova_exchange = self._create_exchange("nova", "topic")

        nova_queues = [
            self._create_queue('monitor.info', nova_exchange, 'monitor.info'),
            self._create_queue('monitor.error', nova_exchange, 'monitor.error')
        ]

        return [Consumer(queues=nova_queues, callbacks=[self.on_nova])]

    def _process(self, message):
        routing_key = message.delivery_info['routing_key']

        body = str(message.body)
        args = (routing_key, json.loads(body))
        asJson = json.dumps(args)

        # save raw and ack the message
        raw = views.process_raw_data(self.deployment, args, asJson)

        if raw:
            self.processed += 1
            message.ack()
            views.post_process(raw, args[1])

        self._check_memory()

    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            LOG.debug("%20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, diff, idiff, self.processed,
                       self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0

    def on_nova(self, body, message):
        try:
            self._process(message)
        except Exception, e:
            LOG.debug("Problem: %s\nFailed message body:\n%s" %
                      (e, json.loads(str(message.body))))
            raise
Beispiel #9
0
def root():
    """Get overview."""
    pmi = ProcessMemoryInfo()
    return dict(processinfo=pmi)
Beispiel #10
0
 def process_request(self, request):
     self._tracker = ClassTracker()
     for cls in get_models() + self.classes:
         self._tracker.track_class(cls, keep=True)
     self._tracker.create_snapshot('before')
     self._before = ProcessMemoryInfo()
Beispiel #11
0
class Consumer(kombu.mixins.ConsumerMixin):
    def __init__(self,
                 name,
                 connection,
                 deployment,
                 durable,
                 queue_arguments,
                 exchange,
                 topics,
                 connect_max_retries=10,
                 stats=None):
        self.connect_max_retries = connect_max_retries
        self.retry_attempts = 0
        self.connection = connection
        self.deployment = deployment
        self.durable = durable
        self.queue_arguments = queue_arguments
        self.name = name
        self.last_time = None
        self.pmi = None
        self.processed = 0
        self.total_processed = 0
        self.topics = topics
        self.exchange = exchange
        if stats is not None:
            self.stats = stats
        else:
            self.stats = dict()
        signal.signal(signal.SIGTERM, self._shutdown)

    def _create_exchange(self, name, type, exclusive=False, auto_delete=False):
        return message_service.create_exchange(name,
                                               exchange_type=type,
                                               exclusive=exclusive,
                                               durable=self.durable,
                                               auto_delete=auto_delete)

    def _create_queue(self,
                      name,
                      nova_exchange,
                      routing_key,
                      exclusive=False,
                      auto_delete=False):
        return message_service.create_queue(
            name,
            nova_exchange,
            durable=self.durable,
            auto_delete=exclusive,
            exclusive=auto_delete,
            queue_arguments=self.queue_arguments,
            routing_key=routing_key)

    def get_consumers(self, Consumer, channel):
        exchange = self._create_exchange(self.exchange, "topic")

        queues = [
            self._create_queue(topic['queue'], exchange, topic['routing_key'])
            for topic in self.topics
        ]

        return [Consumer(queues=queues, callbacks=[self.on_nova])]

    def _process(self, message):
        routing_key = message.delivery_info['routing_key']

        body = str(message.body)
        args = (routing_key, json.loads(body))
        asJson = json.dumps(args)
        # save raw and ack the message
        raw, notif = views.process_raw_data(self.deployment, args, asJson,
                                            self.exchange)

        self.processed += 1
        message.ack()
        POST_PROCESS_METHODS[raw.get_name()](raw, notif)

        self._check_memory()

    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            _get_child_logger().debug(
                "%20s %20s %6dk/%6dk ram, "
                "%3d/%4d msgs @ %6dk/msg" %
                (self.name, self.exchange, diff, idiff, self.processed,
                 self.total_processed, per_message))
            self.stats['timestamp'] = utc
            self.stats['total_processed'] = self.total_processed
            self.stats['processed'] = self.processed
            self.last_vsz = self.pmi.vsz
            self.processed = 0

    def on_nova(self, body, message):
        try:
            self._process(message)
        except ValueError, e:
            _get_child_logger().error(
                "Error: %s\nMalformed message body found : \n%s" %
                (e, str(message.body)))
            # Mark message as read to avoid re-reading the malformed message.
            message.ack()
        except Exception, e:
            _get_child_logger().error("Problem: %s\nFailed message body:\n%s" %
                                      (e, str(message.body)))
            raise
Beispiel #12
0
        def track_memory_wrapper(*args, **kwargs):

            memory_info = {}
            tracker = ClassTracker()
            for cls in apps.get_models() + [Context, Template]:
                # track all models from registered apps, plus some standard Django ones
                tracker.track_class(cls)

            try:
                tracker.create_snapshot("before")
                memory_info["before"] = ProcessMemoryInfo()
                result = fn(*args, **kwargs)
                memory_info["after"] = ProcessMemoryInfo()
                tracker.create_snapshot("after")
                memory_info["stats"] = tracker.stats
                memory_info["stats"].annotate()
                return result

            finally:

                # record a whole bunch of memory statistics...
                resources = [
                    ("resident set size", memory_info["after"].rss),
                    ("virtual size", memory_info["after"].vsz),
                ]
                resources.extend(memory_info["after"] - memory_info["before"])
                resources = [(k, pp(v)) for k, v in resources]
                resources.extend(memory_info["after"].os_specific)

                # record each tracked class as of the final snapshot...
                classes_stats = []
                snapshot = memory_info["stats"].snapshots[-1]
                for class_name in memory_info["stats"].tracked_classes:
                    # history is a list of tuples that is updated on every creation/deletions: (timestamp, n_instances)
                    history = [
                        n for _, n in memory_info["stats"].history[class_name]
                    ]
                    if history:
                        classes_stats.append({
                            "name":
                            class_name,
                            "n_instances":
                            len(history),
                            "min_instances":
                            min(history),
                            "max_instances":
                            max(history),
                            "size":
                            pp(
                                snapshot.classes.get(class_name,
                                                     {}).get("sum", 0)),
                        })

                if not path:
                    stream = sys.stdout
                else:
                    stream = open(path, "w")

                print("\nRESOURCES", file=stream)
                for k, v in resources:
                    print(f"{k:<26}: {v:>10}", file=stream)
                print("\nCLASSES", file=stream)
                for class_stats in classes_stats:
                    print(
                        "{name}: created/deleted {n_instances} times for a min/max of {min_instances}/{max_instances} instances: {size:>10}"
                        .format(**class_stats),
                        file=stream,
                    )

                stream.closed
                tracker.detach_all_classes()
Beispiel #13
0
class NovaConsumer(kombu.mixins.ConsumerMixin):
    def __init__(self, name, connection, deployment, durable, queue_arguments):
        self.connection = connection
        self.deployment = deployment
        self.durable = durable
        self.queue_arguments = queue_arguments
        self.name = name
        self.last_time = None
        self.pmi = None
        self.processed = 0
        self.total_processed = 0

    def _create_exchange(self, name, type, exclusive=False, auto_delete=False):
        return kombu.entity.Exchange(name, type=type, exclusive=exclusive,
                                     durable=self.durable, auto_delete=auto_delete)

    def _create_queue(self, name, nova_exchange, routing_key, exclusive=False,
                     auto_delete=False):
        return kombu.Queue(name, nova_exchange, durable=self.durable,
                           auto_delete=exclusive, exclusive=auto_delete,
                           queue_arguments=self.queue_arguments,
                           routing_key=routing_key)

    def get_consumers(self, Consumer, channel):
        nova_exchange = self._create_exchange("nova", "topic")

        nova_queues = [
            self._create_queue('monitor.info', nova_exchange, 'monitor.info'),
            self._create_queue('monitor.error', nova_exchange, 'monitor.error')
        ]

        return [Consumer(queues=nova_queues, callbacks=[self.on_nova])]

    def _process(self, message):
        routing_key = message.delivery_info['routing_key']

        body = str(message.body)
        args = (routing_key, json.loads(body))
        asJson = json.dumps(args)

        raw = views.process_raw_data(self.deployment, args, asJson)
        if raw:
            self.processed += 1

        self._check_memory()

    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            LOG.debug("%20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, diff, idiff, self.processed,
                      self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0

    def on_nova(self, body, message):
        try:
            self._process(message)
        except Exception, e:
            LOG.exception("Problem %s" % e)
        message.ack()
Beispiel #14
0
 def process_response(self, request, response):
     self._after = ProcessMemoryInfo()
     self._tracker.create_snapshot('after')
Beispiel #15
0
def process():
    """Get process overview."""
    pmi = ProcessMemoryInfo()
    threads = get_current_threads()
    return dict(info=pmi, threads=threads)
Beispiel #16
0
class Consumer(kombu.mixins.ConsumerMixin):
    def __init__(self, name, connection, deployment, durable, queue_arguments,
                 exchange, topics, connect_max_retries=10):
        self.connect_max_retries = connect_max_retries
        self.retry_attempts = 0
        self.connection = connection
        self.deployment = deployment
        self.durable = durable
        self.queue_arguments = queue_arguments
        self.name = name
        self.last_time = None
        self.pmi = None
        self.processed = 0
        self.total_processed = 0
        self.topics = topics
        self.exchange = exchange
        signal.signal(signal.SIGTERM, self._shutdown)

    def _create_exchange(self, name, type, exclusive=False, auto_delete=False):
        return message_service.create_exchange(name, exchange_type=type,
                                               exclusive=exclusive,
                                               durable=self.durable,
                                               auto_delete=auto_delete)

    def _create_queue(self, name, nova_exchange, routing_key, exclusive=False,
                     auto_delete=False):
        return message_service.create_queue(
            name, nova_exchange, durable=self.durable, auto_delete=exclusive,
            exclusive=auto_delete, queue_arguments=self.queue_arguments,
            routing_key=routing_key)

    def get_consumers(self, Consumer, channel):
        exchange = self._create_exchange(self.exchange, "topic")

        queues = [self._create_queue(topic['queue'], exchange,
                                     topic['routing_key'])
                  for topic in self.topics]

        return [Consumer(queues=queues, callbacks=[self.on_nova])]

    def _process(self, message):
        routing_key = message.delivery_info['routing_key']

        body = str(message.body)
        args = (routing_key, json.loads(body))
        asJson = json.dumps(args)
        # save raw and ack the message
        raw, notif = views.process_raw_data(
            self.deployment, args, asJson, self.exchange)

        self.processed += 1
        message.ack()
        POST_PROCESS_METHODS[raw.get_name()](raw, notif)

        self._check_memory()

    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            _get_child_logger().debug("%20s %20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, self.exchange, diff, idiff, self.processed,
                      self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0

    def on_nova(self, body, message):
        try:
            self._process(message)
        except Exception, e:
            _get_child_logger().debug("Problem: %s\nFailed message body:\n%s" %
                      (e, json.loads(str(message.body))))
            raise
Beispiel #17
0
class NovaConsumer(object):
    def __init__(self, name, connection, deployment, durable):
        self.connection = connection
        self.deployment = deployment
        self.durable = durable
        self.name = name
        self.last_time = None
        self.pmi = None
        self.processed = 0
        self.total_processed = 0
        self.channel = connection.channel()

        self.nova_exchange = kombu.entity.Exchange("nova", type="topic",
                                    exclusive=False, durable=self.durable,
                                    auto_delete=False)

        self.nova_queues = [
            kombu.Queue("stacktash.notifications.info", self.nova_exchange,
                        durable=self.durable, auto_delete=False,
                        exclusive=False, routing_key='notifications.info'),
            kombu.Queue("stacktash.notifications.error", self.nova_exchange,
                        durable=self.durable, auto_delete=False,
                        exclusive=False, routing_key='notifications.error'),
        ]

    def run(self):
        self.consumer = Consumer(channel=self.channel,
                                 queues=self.nova_queues,
                                 callbacks=[self.on_nova])
        while True:
            self.consumer.consume()
            self.connection.drain_events()
            eventlet.sleep(0)

    def _process(self, body, message):
        routing_key = message.delivery_info['routing_key']
        payload = (routing_key, body)
        # make sure jsonable body.
        json.dumps(payload)

        body = str(message.body)
        args = (routing_key, json.loads(body))
        asJson = json.dumps(args)

        raw = dbapi.process_raw_data(self.deployment, args, asJson)
        if raw:
            self.processed += 1

        self._check_memory()

    def _check_memory(self):
        if not self.pmi:
            self.pmi = ProcessMemoryInfo()
            self.last_vsz = self.pmi.vsz
            self.initial_vsz = self.pmi.vsz

        utc = datetime.datetime.utcnow()
        check = self.last_time is None
        if self.last_time:
            diff = utc - self.last_time
            if diff.seconds > 30:
                check = True
        if check:
            self.last_time = utc
            self.pmi.update()
            diff = (self.pmi.vsz - self.last_vsz) / 1000
            idiff = (self.pmi.vsz - self.initial_vsz) / 1000
            self.total_processed += self.processed
            per_message = 0
            if self.total_processed:
                per_message = idiff / self.total_processed
            LOG.debug("%20s %6dk/%6dk ram, "
                      "%3d/%4d msgs @ %6dk/msg" %
                      (self.name, diff, idiff, self.processed,
                      self.total_processed, per_message))
            self.last_vsz = self.pmi.vsz
            self.processed = 0

    def on_nova(self, body, message):
        try:
            self._process(body, message)
        except Exception, e:
            LOG.exception("Problem %s" % e)
        finally: