Ejemplo n.º 1
0
                                                           '(after timeout!)' if targetkey in
                                                           self.outstanding_requests else ''),
                                                           logLevel=logging.DEBUG)
                d = g.gather()
                curtime = datetime.datetime.now().isoformat()
                self.outstanding_requests[targetkey] = [d, curtime, 0, g]
                d.addCallback(handle_success, g.context)
                d.addErrback(handle_errors, g.context)
            else:
                self.outstanding_requests[targetkey][2] += 1
                self.log_msg('Skipping: another outstanding request to "%s" (%s) is found from %s.' %
                             (g.context, hostname, self.outstanding_requests[targetkey][1]),
                             logLevel=logging.DEBUG)


provideSubscriptionAdapter(subscription_factory(MetricsDaemonProcess), adapts=(Proc,))


class VirtualComputeMetricGatherer(Adapter):
    """Gathers VM metrics using IVirtualizationContainerSubmitter"""

    implements(IMetricsGatherer)
    context(IManageable)

    @defer.inlineCallbacks
    def gather(self):
        self._killhook = defer.Deferred()
        yield self.gather_vms()
        yield self.gather_phy()

    def kill(self):
Ejemplo n.º 2
0
        config = get_config()
        self.interval = config.getint('db', 'pack_interval')

    @defer.inlineCallbacks
    def run(self):
        while True:
            try:
                if not self.paused:
                    yield self.pack()
            except Exception:
                import traceback
                traceback.print_exc()
                pass

            yield async_sleep(self.interval)

    @db.ro_transact
    def pack(self):
        storage_type = get_config().get('db', 'storage_type')

        if storage_type == 'zeo':
            print "[db_pack] zeo pack not implemented yet, please setup cron to run bin/zeopack -u db/socket"
        elif storage_type == 'embedded':
            d = db.get_db()
            d.pack(time.time())


provideSubscriptionAdapter(subscription_factory(PackDaemonProcess),
                           adapts=(Proc, ))
        for line in format_(summary_):
            logger.info(line)
        log.msg('Profiling memory done', system=self.__name__)
        return defer.succeed(None)

    def track_changes(self):
        log.msg('Profiling memory (tracking changes)...', system=self.__name__)
        logger.info('Change summary follows')
        summary_ = self.summary_tracker.diff()
        for line in format_(summary_):
            logger.info(line)
        log.msg('Profiling memory (tracking changes) done', system=self.__name__)
        return defer.succeed(None)


provideSubscriptionAdapter(subscription_factory(MemoryProfilerDaemonProcess), adapts=(Proc,))


def format_(rows, limit=15, sort='size', order='descending'):
    """Format the rows as a summary.

    Keyword arguments:
    limit -- the maximum number of elements to be listed
    sort  -- sort elements by 'size', 'type', or '#'
    order -- sort 'ascending' or 'descending'

    Heavily based on pympler.summary.print_
    """
    localrows = []
    for row in rows:
        localrows.append(list(row))
Ejemplo n.º 4
0
            logger.info(line)
        log.msg('Profiling memory done', system=self.__name__)
        return defer.succeed(None)

    def track_changes(self):
        log.msg('Profiling memory (tracking changes)...', system=self.__name__)
        logger.info('Change summary follows')
        summary_ = self.summary_tracker.diff()
        for line in format_(summary_):
            logger.info(line)
        log.msg('Profiling memory (tracking changes) done',
                system=self.__name__)
        return defer.succeed(None)


provideSubscriptionAdapter(subscription_factory(MemoryProfilerDaemonProcess),
                           adapts=(Proc, ))


def format_(rows, limit=15, sort='size', order='descending'):
    """Format the rows as a summary.

    Keyword arguments:
    limit -- the maximum number of elements to be listed
    sort  -- sort elements by 'size', 'type', or '#'
    order -- sort 'ascending' or 'descending'

    Heavily based on pympler.summary.print_
    """
    localrows = []
    for row in rows:
Ejemplo n.º 5
0
        if not self.try_index(searcher, model, event):
            log.msg("cannot (un)index %s %s" % (model, type(event).__name__), system="indexer")

    def try_index(self, searcher, model, event):
        path = canonical_path(model)
        op = 'un' if IModelDeletedEvent.providedBy(event) else ''

        log.msg("%sindexing %s %s" % (op, path, type(event).__name__), system="indexer")

        objs, unresolved_path = traverse_path(db.get_root()['oms_root'], path)
        if unresolved_path and not IModelDeletedEvent.providedBy(event):
            return False

        obj = objs[-1]

        try:
            if IModelDeletedEvent.providedBy(event):
                searcher.unindex_object(obj)
            else:
                searcher._index_object(obj)
        except NotYet:
            return False

        log.msg("%sindexed %s %s" % (op, path, type(event).__name__), system="indexer")
        return True

    def reindex(self):
        ReindexAction(None).execute(DetachedProtocol(), object())

provideSubscriptionAdapter(subscription_factory(IndexerDaemonProcess), adapts=(Proc,))
Ejemplo n.º 6
0
            yield async_sleep(self.interval)

    @defer.inlineCallbacks
    def ping_check(self):

        @db.ro_transact
        def get_computes():
            oms_root = db.get_root()['oms_root']
            res = [(i, i.hostname)
                   for i in map(follow_symlinks, oms_root['computes'].listcontent())
                   if ICompute.providedBy(i)]

            return res

        ping_actions = []
        for i, hostname in (yield get_computes()):
            action = PingCheckAction(i)
            d = action.execute(DetachedProtocol(), object())
            ping_actions.append((hostname, d))

        def handle_errors(e, c):
            e.trap(Exception)
            log.msg("Got exception when pinging compute '%s': %s" % (c, e), system='ping-check')
            if get_config().getboolean('debug', 'print_exceptions'):
                log.err(system='ping-check')

        for c, deferred in ping_actions:
            deferred.addErrback(handle_errors, c)

provideSubscriptionAdapter(subscription_factory(PingCheckDaemonProcess), adapts=(Proc,))
Ejemplo n.º 7
0
                                                           '(after timeout!)' if targetkey in
                                                           self.outstanding_requests else ''),
                                                           logLevel=logging.DEBUG)
                d = g.gather()
                curtime = datetime.datetime.now().isoformat()
                self.outstanding_requests[targetkey] = [d, curtime, 0, g]
                d.addCallback(handle_success, g.context)
                d.addErrback(handle_errors, g.context)
            else:
                self.outstanding_requests[targetkey][2] += 1
                self.log_msg('Skipping: another outstanding request to "%s" (%s) is found from %s.' %
                             (g.context, hostname, self.outstanding_requests[targetkey][1]),
                             logLevel=logging.DEBUG)


provideSubscriptionAdapter(subscription_factory(MetricsDaemonProcess), adapts=(Proc,))


class VirtualComputeMetricGatherer(Adapter):
    """Gathers VM metrics using IVirtualizationContainerSubmitter"""

    implements(IMetricsGatherer)
    context(IManageable)

    @defer.inlineCallbacks
    def gather(self):
        self._killhook = defer.Deferred()
        yield self.gather_vms()
        yield self.gather_phy()

    def kill(self):