Beispiel #1
0
    def do_loop_turn(self):
        loop_time = time.time()
        with self.broks_lock:
            nb_broks = len(self.broks)
            nb_external_broks = len(self.external_module_broks)
        logger.debug("[Broks] Begin Loop: managing queue broks [%d]" % nb_broks)

        self.broks_done = 0
        # FIXME: Does it come from a structure only known from enterprise ?
        # for mod in self.modules_manager.get_internal_instances():
        #     self.local_module_stats[mod.get_name()] = 0

        # Dump modules Queues size
        external_modules = [
            external_module for external_module in self.modules_manager.instances
            if external_module.is_external
        ]
        for external_module in external_modules:
            try:
                logger.debug("[Broks] External Queue len (%s): %s" % (
                    external_module.get_name(), external_module.to_q.qsize()
                ))
            except Exception as exp:
                logger.debug("External Queue len (%s): Exception! %s" % (external_module.get_name(), exp))

        # Begin to clean modules
        self.check_and_del_zombie_modules()

        # Maybe the arbiter ask us to wait for a new conf
        # If true, we must restart all...
        if self.cur_conf is None:
            # Clean previous run from useless objects and close modules
            self.clean_previous_run()

            self.wait_for_initial_conf()
            # we may have been interrupted or so; then
            # just return from this loop turn
            if not self.new_conf:
                return
            self.setup_new_conf()

        # Now we check if arbiter speak to us in the pyro_daemon.
        # If so, we listen for it
        # When it pushes conf to us, we reinit connections
        self.watch_for_new_conf(0.0)
        if self.new_conf:
            if self.graceful_enabled and self.switch_process() is True:
                # Child successfully spawned, we're exiting
                return
            self.setup_new_conf()

        # Maybe the last loop we raised some broks internally we should
        # integrate them in broks
        self.interger_internal_broks()
        # Also reap broks sent from the arbiters
        self.interger_arbiter_broks()

        # Main job, go get broks in our distants daemons
        types = ['scheduler', 'poller', 'reactionner', 'receiver']
        for _type in types:
            _t = time.time()
            # And from schedulers
            self.get_new_broks(type=_type)
            statsmgr.timing('core.broker.get-new-broks.%s' % _type, time.time() - _t,
                            'perf')

        # We will works this turn with a copy of the broks, so we won't be
        # impacted by possible other threads (modules or so)
        with self.broks_lock:
            broks = copy.copy(self.broks)
            to_send = list(self.external_module_broks)
            self.broks = deque()
            self.external_module_broks = deque()

        # and for external queues
        # REF: doc/broker-modules.png (3)
        # We put to external queues broks that was not already send
        t0 = time.time()
        # We are sending broks as a big list, more efficient than one by one
        queues = self.modules_manager.get_external_to_queues()

        for q in queues:
            try:
                q.put(to_send)
            # we catch but the kill detector on the next loop will detect the
            # fail module and will manage it
            except Exception:
                logger.error(
                    'FAIL TO PUSH DATA TO EXTERNAL MODULE  this module will '
                    'be detected and restart.'
                )

        statsmgr.timing('core.broker.put-to-external-queue', time.time() - t0, 'perf')
        logger.debug("[Broks] Time to send [%s] broks to module ([%.3f] secs)" % (len(to_send), time.time() - t0))

        start = time.time()
        while len(broks) != 0:
            now = time.time()

            # Do not 'manage' more than 1s, we must get new broks
            # every 1s
            if now - start > 1:
                # so we must remerge our last broks with the main broks to do not
                # lost them
                with self.broks_lock:
                    logger.debug(
                        'Cannot manage all remaining broks [%d] in a loop '
                        'turn, push bask this broks in the queue.' % len(broks)
                    )
                    self.broks.extendleft(broks)
                break

            try:
                b = broks.pop()
            except IndexError:  # no more broks, maybe a daemon stop, not a problem, catch it
                break

            # Ok, we can get the brok, and doing something with it
            # REF: doc/broker-modules.png (4-5)
            # We un serialize the brok before consume it
            b.prepare()
            _t = time.time()
            self.manage_brok(b)
            statsmgr.timing('core.broker.manage-brok', time.time() - _t, 'perf')

        # Maybe external modules raised 'objects' we should get them
        nb_object_get = self.get_objects_from_from_queues()
        logger.debug(
            '[stats] nb object get control queues of external module [%d]' %
            nb_object_get
        )

        # Say to modules it's a new tick :)
        self.hook_point('tick')

        logger.debug('[stats] broks done this loop %d/%d' % (self.broks_done, nb_broks))

        time.sleep(max(0.01, min(1.0, 1.0 - (time.time() - loop_time))))
        # Checks if memory consumption did not exceed allowed thresold
        self.check_memory_usage()
Beispiel #2
0
                    def f_wrapper():
                        t0 = time.time()
                        args_time = aqu_lock_time = calling_time = json_time = 0
                        need_lock = getattr(f, 'need_lock', True)

                        # Warning : put the bottle.response set inside the wrapper
                        # because outside it will break bottle
                        d = {}
                        method = getattr(f, 'method', 'get').lower()
                        for aname in args:
                            v = None
                            if method == 'post':
                                v = bottle.request.forms.get(aname, None)
                                # Post args are zlibed and cPickled (but in
                                # safemode)
                                if v is not None:
                                    v = zlib.decompress(v)
                                    v = SafeUnpickler.loads(v)
                            elif method == 'get':
                                v = bottle.request.GET.get(aname, None)
                            if v is None:
                                # Maybe we got a default value?
                                default_args = self.registered_fun_defaults.get(fname, {})
                                if aname not in default_args:
                                    raise Exception('Missing argument %s' % aname)
                                v = default_args[aname]
                            d[aname] = v

                        t1 = time.time()
                        args_time = t1 - t0

                        if need_lock:
                            logger.debug("HTTP: calling lock for %s", fname)
                            lock.acquire()

                        t2 = time.time()
                        aqu_lock_time = t2 - t1

                        try:
                            ret = f(**d)
                        # Always call the lock release if need
                        finally:
                            # Ok now we can release the lock
                            if need_lock:
                                lock.release()

                        t3 = time.time()
                        calling_time = t3 - t2

                        encode = getattr(f, 'encode', 'json').lower()
                        j = json.dumps(ret)
                        t4 = time.time()
                        json_time = t4 - t3

                        global_time = t4 - t0
                        logger.debug("Debug perf: %s [args:%s] [aqu_lock:%s]"
                                     "[calling:%s] [json:%s] [global:%s]",
                                     fname, args_time, aqu_lock_time, calling_time, json_time,
                                     global_time)
                        lst = [('args', args_time), ('aqulock', aqu_lock_time),
                               ('calling', calling_time), ('json', json_time),
                               ('global', global_time)]
                        # increase the stats timers
                        for (k, _t) in lst:
                            statsmgr.timing('http.%s.%s' % (fname, k), _t, 'perf')

                        return j
Beispiel #3
0
 def pynag_con_init(self, id, type='scheduler'):
     _t = time.time()
     r = self.do_pynag_con_init(id, type)
     statsmgr.timing('con-init.%s' % type, time.time() - _t, 'perf')
     return r
Beispiel #4
0
                def f_wrapper():
                    t0 = time.time()
                    args_time = aqu_lock_time = calling_time = json_time = 0
                    need_lock = getattr(f, 'need_lock', True)

                    # Warning : put the bottle.response set inside the wrapper
                    # because outside it will break bottle
                    d = {}
                    method = getattr(f, 'method', 'get').lower()
                    for aname in args:
                        v = None
                        if method == 'post':
                            v = bottle.request.forms.get(aname, None)
                            # Post args are zlibed and cPickled (but in
                            # safemode)
                            if v is not None:
                                v = zlib.decompress(v)
                                v = SafeUnpickler.loads(v)
                        elif method == 'get':
                            v = bottle.request.GET.get(aname, None)
                        if v is None:
                            # Maybe we got a default value?
                            default_args = self.registered_fun_defaults.get(
                                fname, {})
                            if aname not in default_args:
                                raise Exception('Missing argument %s' % aname)
                            v = default_args[aname]
                        d[aname] = v

                    t1 = time.time()
                    args_time = t1 - t0

                    if need_lock:
                        logger.debug("HTTP: calling lock for %s", fname)
                        lock.acquire()

                    t2 = time.time()
                    aqu_lock_time = t2 - t1

                    try:
                        ret = f(**d)
                    # Always call the lock release if need
                    finally:
                        # Ok now we can release the lock
                        if need_lock:
                            lock.release()

                    t3 = time.time()
                    calling_time = t3 - t2

                    encode = getattr(f, 'encode', 'json').lower()
                    j = json.dumps(ret)
                    t4 = time.time()
                    json_time = t4 - t3

                    global_time = t4 - t0
                    logger.debug(
                        "Debug perf: %s [args:%s] [aqu_lock:%s]"
                        "[calling:%s] [json:%s] [global:%s]", fname, args_time,
                        aqu_lock_time, calling_time, json_time, global_time)
                    lst = [('args', args_time), ('aqulock', aqu_lock_time),
                           ('calling', calling_time), ('json', json_time),
                           ('global', global_time)]
                    # increase the stats timers
                    for (k, _t) in lst:
                        statsmgr.timing('http.%s.%s' % (fname, k), _t, 'perf')

                    logger.info(
                        "[api]:{func}, [args]:{args} [cost]:{cost}".format(
                            func=fname, args=json.dumps(d), cost=calling_time))
                    return j
Beispiel #5
0
 def pynag_con_init(self, id, type="scheduler"):
     _t = time.time()
     r = self.do_pynag_con_init(id, type)
     statsmgr.timing("con-init.%s" % type, time.time() - _t, "perf")
     return r
Beispiel #6
0
        if self.new_conf:
            self.setup_new_conf()

        # Maybe the last loop we raised some broks internally
        # we should integrate them in broks
        self.interger_internal_broks()
        # Also reap broks sent from the arbiters
        self.interger_arbiter_broks()

        # Main job, go get broks in our distants daemons
        types = ["scheduler", "poller", "reactionner", "receiver"]
        for _type in types:
            _t = time.time()
            # And from schedulers
            self.get_new_broks(type=_type)
            statsmgr.timing("core.broker.get-new-broks.%s" % _type, time.time() - _t, "perf")

        # Sort the brok list by id
        self.broks.sort(sort_by_ids)

        # and for external queues
        # REF: doc/broker-modules.png (3)
        # We put to external queues broks that was not already send
        t0 = time.time()
        # We are sending broks as a big list, more efficient than one by one
        ext_modules = self.modules_manager.get_external_instances()
        to_send = [b for b in self.broks if getattr(b, "need_send_to_ext", True)]

        # Send our pack to all external modules to_q queue so they can get the wole packet
        # beware, the sub-process/queue can be die/close, so we put to restart the whole module
        # instead of killing ourself :)
Beispiel #7
0
        if self.new_conf:
            self.setup_new_conf()

        # Maybe the last loop we raised some broks internally
        # we should integrate them in broks
        self.interger_internal_broks()
        # Also reap broks sent from the arbiters
        self.interger_arbiter_broks()

        # Main job, go get broks in our distants daemons
        types = ['scheduler', 'poller', 'reactionner', 'receiver']
        for _type in types:
            _t = time.time()
            # And from schedulers
            self.get_new_broks(type=_type)
            statsmgr.timing('core.broker.get-new-broks.%s' % _type,
                            time.time() - _t, 'perf')

        # Sort the brok list by id
        self.broks.sort(sort_by_ids)

        # and for external queues
        # REF: doc/broker-modules.png (3)
        # We put to external queues broks that was not already send
        t0 = time.time()
        # We are sending broks as a big list, more efficient than one by one
        ext_modules = self.modules_manager.get_external_instances()
        to_send = [
            b for b in self.broks if getattr(b, 'need_send_to_ext', True)
        ]

        # Send our pack to all external modules to_q queue so they can get the wole packet