コード例 #1
0
    def _get_object(self, o_type, o_name=None):
        """Get an object from the scheduler

        Returns None if the required object type (`o_type`) is not known.
        Else returns the serialized object if found. The object is searched first with
        o_name as its name and then with o_name as its uuid.

        :param o_type: searched object type
        :type o_type: str
        :param name: searched object name
        :type name: str
        :return: serialized object
        :rtype: str
        """
        try:
            o_found = None
            o_list = self._get_objects(o_type)
            if o_list:
                if o_name is None:
                    return serialize(o_list, True) if o_list else None
                # We expected a name...
                o_found = o_list.find_by_name(o_name)
                if not o_found:
                    # ... but perharps we got an object uuid
                    o_found = o_list[o_name]
        except Exception:  # pylint: disable=broad-except
            return None
        return serialize(o_found, True) if o_found else None
コード例 #2
0
    def _get_object(self, o_type, o_name=None):
        """Get an object from the scheduler

        Returns None if the required object type (`o_type`) is not known.
        Else returns the serialized object if found. The object is searched first with
        o_name as its name and then with o_name as its uuid.

        :param o_type: searched object type
        :type o_type: str
        :param name: searched object name
        :type name: str
        :return: serialized object
        :rtype: str
        """
        try:
            o_found = None
            o_list = self._get_objects(o_type)
            if o_list:
                if o_name is None:
                    return serialize(o_list, True) if o_list else None
                # We expected a name...
                o_found = o_list.find_by_name(o_name)
                if not o_found:
                    # ... but perharps we got an object uuid
                    o_found = o_list[o_name]
        except Exception:  # pylint: disable=broad-except
            return None
        return serialize(o_found, True) if o_found else None
コード例 #3
0
    def _checks(self, do_checks=False, do_actions=False, poller_tags=None,
                reactionner_tags=None, worker_name='none', module_types=None):
        """Get checks from scheduler, used by poller or reactionner when they are
        in active mode (passive = False)

        This function is not intended for external use. Let the poller and reactionner
        manage all this stuff by themselves ;)

        :param do_checks: used for poller to get checks
        :type do_checks: bool
        :param do_actions: used for reactionner to get actions
        :type do_actions: bool
        :param poller_tags: poller tags to filter on this poller
        :type poller_tags: list
        :param reactionner_tags: reactionner tags to filter on this reactionner
        :type reactionner_tags: list
        :param worker_name: Worker name asking (so that the scheduler add it to actions objects)
        :type worker_name: str
        :param module_types: Module type to filter actions/checks
        :type module_types: list
        :return: serialized check/action list
        :rtype: str
        """
        if poller_tags is None:
            poller_tags = ['None']
        if reactionner_tags is None:
            reactionner_tags = ['None']
        if module_types is None:
            module_types = ['fork']
        do_checks = (do_checks == 'True')
        do_actions = (do_actions == 'True')
        res = self.app.sched.get_to_run_checks(do_checks, do_actions, poller_tags, reactionner_tags,
                                               worker_name, module_types)

        return serialize(res, True)
コード例 #4
0
    def post(self, path, args, wait=False):
        """POST an HTTP request to a daemon

        :param path: path to do the request
        :type path: str
        :param args: args to add in the request
        :type args: dict
        :param wait: True for a long timeout
        :type wait: bool
        :return: Content of the HTTP response if server returned 200
        :rtype: str
        """
        uri = self.make_uri(path)
        timeout = self.make_timeout(wait)
        for (key, value) in list(args.items()):
            args[key] = serialize(value, True)
        try:
            logger.debug("post: %s, timeout: %s, params: %s", uri, timeout,
                         args)
            rsp = self._requests_con.post(uri,
                                          json=args,
                                          timeout=timeout,
                                          verify=self.strong_ssl)
            logger.debug("got: %d - %s", rsp.status_code, rsp.text)
            if rsp.status_code != 200:
                raise HTTPClientDataException(rsp.status_code, rsp.text, uri)
            return rsp.content
        except (requests.Timeout, requests.ConnectTimeout):
            raise HTTPClientTimeoutException(timeout, uri)
        except requests.ConnectionError as exp:
            raise HTTPClientConnectionException(uri, exp.args[0])
        except Exception as exp:
            raise HTTPClientException('Request error to %s: %s' % (uri, exp))
コード例 #5
0
    def post(self, path, args, wait='short'):
        """Do a POST HTTP request

        :param path: path to do the request
        :type path: str
        :param args: args to add in the request
        :type args: dict
        :param wait: timeout policy (short / long)
        :type wait: int
        :return: Content of the HTTP response if server returned 200
        :rtype: str
        """
        uri = self.make_uri(path)
        timeout = self.make_timeout(wait)
        for (key, value) in args.iteritems():
            args[key] = serialize(value, True)
        try:
            rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl)
            if rsp.status_code != 200:
                raise Exception("HTTP POST not OK: %s ; text=%r" % (rsp.status_code, rsp.text))
        except (requests.Timeout, requests.ConnectTimeout):
            raise HTTPClientTimeoutException(timeout, uri)
        except requests.ConnectionError as exp:
            raise HTTPClientConnectionException(uri, exp.message)
        except Exception as err:
            raise HTTPClientException('Request error to %s: %s' % (uri, err))
        return rsp.content
コード例 #6
0
ファイル: client.py プロジェクト: Alignak-monitoring/alignak
    def post(self, path, args, wait=False):
        """POST an HTTP request to a daemon

        :param path: path to do the request
        :type path: str
        :param args: args to add in the request
        :type args: dict
        :param wait: True for a long timeout
        :type wait: bool
        :return: Content of the HTTP response if server returned 200
        :rtype: str
        """
        uri = self.make_uri(path)
        timeout = self.make_timeout(wait)
        for (key, value) in list(args.items()):
            args[key] = serialize(value, True)
        try:
            logger.debug("post: %s, timeout: %s, params: %s", uri, timeout, args)
            rsp = self._requests_con.post(uri, json=args, timeout=timeout, verify=self.strong_ssl)
            logger.debug("got: %d - %s", rsp.status_code, rsp.text)
            if rsp.status_code != 200:
                raise HTTPClientDataException(rsp.status_code, rsp.text, uri)
            return rsp.content
        except (requests.Timeout, requests.ConnectTimeout):
            raise HTTPClientTimeoutException(timeout, uri)
        except requests.ConnectionError as exp:
            raise HTTPClientConnectionException(uri, exp.args[0])
        except Exception as exp:
            raise HTTPClientException('Request error to %s: %s' % (uri, exp))
コード例 #7
0
    def get_broks(self, bname):  # pylint: disable=W0613
        """Get broks from the daemon

        :return: Brok list serialized
        :rtype: dict
        """
        with self.app.lock:
            res = self.app.get_broks()

        return serialize(res, True)
コード例 #8
0
ファイル: generic_interface.py プロジェクト: jbiousse/alignak
    def _events(self):
        """Get the monitoring events from the daemon

        This is used by the arbiter to get the monitoring events from all its satellites

        :return: Events list serialized
        :rtype: list
        """
        with self.app.events_lock:
            res = self.app.get_events()
        return serialize(res, True)
コード例 #9
0
    def _events(self):
        """Get the monitoring events from the daemon

        This is used by the arbiter to get the monitoring events from all its satellites

        :return: Events list serialized
        :rtype: list
        """
        with self.app.events_lock:
            res = self.app.get_events()
        return serialize(res, True)
コード例 #10
0
ファイル: generic_interface.py プロジェクト: jbiousse/alignak
    def _broks(self, broker_name):  # pylint: disable=unused-argument
        """Get the broks from the daemon

        This is used by the brokers to get the broks list of a daemon

        :return: Brok list serialized
        :rtype: dict
        """
        with self.app.broks_lock:
            res = self.app.get_broks()
        return serialize(res, True)
コード例 #11
0
    def _broks(self, broker_name):  # pylint: disable=unused-argument
        """Get the broks from the daemon

        This is used by the brokers to get the broks list of a daemon

        :return: Brok list serialized
        :rtype: dict
        """
        with self.app.broks_lock:
            res = self.app.get_broks()
        return serialize(res, True)
コード例 #12
0
    def serialize(self):
        """This function serialize into a simple dict object.
        It is used when transferring data to other daemons over the network (http)

        Here we directly return all attributes

        :return: json representation of a DependencyNode
        :rtype: dict
        """
        return {'operand': self.operand, 'sons': [serialize(elem) for elem in self.sons],
                'of_values': self.of_values, 'is_of_mul': self.is_of_mul,
                'not_value': self.not_value}
コード例 #13
0
    def get_returns(self, sched_id):
        """Get actions returns (serialized)
        for the scheduler with _id = sched_id

        :param sched_id: id of the scheduler
        :type sched_id: int
        :return: serialized list
        :rtype: str
        """
        with self.app.lock:
            ret = self.app.get_return_for_passive(sched_id)
            return serialize(ret, True)
コード例 #14
0
    def _results(self, scheduler_instance_id):
        """Get the results of the executed actions for the scheduler which instance id is provided

        Calling this method for daemons that are not configured as passive do not make sense.
        Indeed, this service should only be exposed on poller and reactionner daemons.

        :param scheduler_instance_id: instance id of the scheduler
        :type scheduler_instance_id: string
        :return: serialized list
        :rtype: str
        """
        with self.app.lock:
            res = self.app.get_results_from_passive(scheduler_instance_id)
        return serialize(res, True)
コード例 #15
0
    def get_external_commands(self):
        """Get the external commands from the daemon (internal)
        Use a lock for this call (not a global one, just for this method)

        :return: serialized external command list
        :rtype: str
        """
        if hasattr(self.app, 'external_commands_lock'):
            with self.app.external_commands_lock:
                cmds = self.app.get_external_commands()
                raw = serialize(cmds, True)
        else:
            raw = []
        return raw
コード例 #16
0
ファイル: generic_interface.py プロジェクト: jbiousse/alignak
    def _results(self, scheduler_instance_id):
        """Get the results of the executed actions for the scheduler which instance id is provided

        Calling this method for daemons that are not configured as passive do not make sense.
        Indeed, this service should only be exposed on poller and reactionner daemons.

        :param scheduler_instance_id: instance id of the scheduler
        :type scheduler_instance_id: string
        :return: serialized list
        :rtype: str
        """
        with self.app.lock:
            res = self.app.get_results_from_passive(scheduler_instance_id)
        return serialize(res, True)
コード例 #17
0
    def serialize(self):
        """This function serialize into a simple dict object.
        It is used when transferring data to other daemons over the network (http)

        Here we directly return all attributes

        :return: json representation of a DependencyNode
        :rtype: dict
        """
        return {
            'operand': self.operand,
            'sons': [serialize(elem) for elem in self.sons],
            'of_values': self.of_values,
            'is_of_mul': self.is_of_mul,
            'not_value': self.not_value
        }
コード例 #18
0
ファイル: brok.py プロジェクト: Alignak-monitoring/alignak
    def __init__(self, params, parsing=True):
        # pylint: disable=unused-argument
        """
        :param params: initialization parameters
        :type params: dict
        :param parsing: not used but necessary for serialization/unserialization
        :type parsing: bool
        """
        self.uuid = params.get('uuid', get_a_new_object_id())
        self.prepared = params.get('prepared', False)
        self.creation_time = params.get('creation_time', time.time())
        self.type = params.get('type', u'unknown')
        self.instance_id = params.get('instance_id', None)

        # Need to behave differently when un-serializing
        if 'uuid' in params:
            self.data = params['data']
        else:
            self.data = serialize(params['data'])
コード例 #19
0
ファイル: brok.py プロジェクト: jbiousse/alignak
    def __init__(self, params, parsing=True):
        # pylint: disable=unused-argument
        """
        :param params: initialization parameters
        :type params: dict
        :param parsing: not used but necessary for serialization/unserialization
        :type parsing: bool
        """
        self.uuid = params.get('uuid', get_a_new_object_id())
        self.prepared = params.get('prepared', False)
        self.creation_time = params.get('creation_time', time.time())
        self.type = params.get('type', u'unknown')
        self.instance_id = params.get('instance_id', None)

        # Need to behave differently when un-serializing
        if 'uuid' in params:
            self.data = params['data']
        else:
            self.data = serialize(params['data'])
コード例 #20
0
ファイル: scheduler_interface.py プロジェクト: jgmel/alignak
    def get_checks(self,
                   do_checks=False,
                   do_actions=False,
                   poller_tags=None,
                   reactionner_tags=None,
                   worker_name='none',
                   module_types=None):
        """Get checks from scheduler, used by poller or reactionner (active ones)

        :param do_checks: used for poller to get checks
        :type do_checks: bool
        :param do_actions: used for reactionner to get actions
        :type do_actions: bool
        :param poller_tags: pollers tags to filter on this poller
        :type poller_tags: list
        :param reactionner_tags: reactionner tags to filter on this reactionner
        :type reactionner_tags: list
        :param worker_name: Worker name asking (so that the scheduler add it to actions objects)
        :type worker_name: str
        :param module_types: Module type to filter actions/checks
        :type module_types: list
        :return: serialized check/action list
        :rtype: str
        """
        if poller_tags is None:
            poller_tags = ['None']
        if reactionner_tags is None:
            reactionner_tags = ['None']
        if module_types is None:
            module_types = ['fork']
        do_checks = (do_checks == 'True')
        do_actions = (do_actions == 'True')
        res = self.app.sched.get_to_run_checks(do_checks, do_actions,
                                               poller_tags, reactionner_tags,
                                               worker_name, module_types)
        # Count actions got by the poller/reactionner
        if do_checks:
            self.app.nb_pulled_checks += len(res)
        if do_actions:
            self.app.nb_pulled_actions += len(res)
        # self.app.sched.nb_checks_send += len(res)

        return serialize(res, True)
コード例 #21
0
    def __init__(self, params, parsing=True):
        if not parsing:
            if params is None:
                return
            for key, value in params.iteritems():
                setattr(self, key, value)

            if not hasattr(self, 'uuid'):
                self.uuid = uuid.uuid4().hex
            return
        self.uuid = params.get('uuid', uuid.uuid4().hex)
        self.type = params['type']
        self.instance_id = params.get('instance_id', None)
        # Again need to behave differently when un-serializing
        if 'uuid' in params:
            self.data = params['data']
        else:
            self.data = serialize(params['data'])
        self.prepared = params.get('prepared', False)
        self.creation_time = params.get('creation_time', time.time())
コード例 #22
0
    def _checks(self,
                do_checks=False,
                do_actions=False,
                poller_tags=None,
                reactionner_tags=None,
                worker_name='none',
                module_types=None):
        """Get checks from scheduler, used by poller or reactionner when they are
        in active mode (passive = False)

        This function is not intended for external use. Let the poller and reactionner
        manage all this stuff by themselves ;)

        :param do_checks: used for poller to get checks
        :type do_checks: bool
        :param do_actions: used for reactionner to get actions
        :type do_actions: bool
        :param poller_tags: poller tags to filter on this poller
        :type poller_tags: list
        :param reactionner_tags: reactionner tags to filter on this reactionner
        :type reactionner_tags: list
        :param worker_name: Worker name asking (so that the scheduler add it to actions objects)
        :type worker_name: str
        :param module_types: Module type to filter actions/checks
        :type module_types: list
        :return: serialized check/action list
        :rtype: str
        """
        if poller_tags is None:
            poller_tags = ['None']
        if reactionner_tags is None:
            reactionner_tags = ['None']
        if module_types is None:
            module_types = ['fork']
        do_checks = (do_checks == 'True')
        do_actions = (do_actions == 'True')
        res = self.app.sched.get_to_run_checks(do_checks, do_actions,
                                               poller_tags, reactionner_tags,
                                               worker_name, module_types)

        return serialize(res, True)
コード例 #23
0
    def _broks(self, broker_name):
        """Get the broks from a scheduler, used by brokers

        This is used by the brokers to get the broks list of a scheduler

        :param broker_name: broker name, used to filter broks
        :type broker_name: str
        :return: serialized brok list
        :rtype: dict
        """
        logger.debug("Getting broks for %s from the scheduler", broker_name)
        for broker_link in list(self.app.brokers.values()):
            if broker_name == broker_link.name:
                break
        else:
            logger.warning("Requesting broks for an unknown broker: %s", broker_name)
            return {}

        # Now get the broks for this specific broker
        with self.app.broks_lock:
            res = self.app.get_broks(broker_name)

        return serialize(res, True)
コード例 #24
0
    def _broks(self, broker_name):
        """Get the broks from a scheduler, used by brokers

        This is used by the brokers to get the broks list of a scheduler

        :param broker_name: broker name, used to filter broks
        :type broker_name: str
        :return: serialized brok list
        :rtype: dict
        """
        logger.debug("Getting broks for %s from the scheduler", broker_name)
        for broker_link in list(self.app.brokers.values()):
            if broker_name == broker_link.name:
                break
        else:
            logger.warning("Requesting broks for an unknown broker: %s",
                           broker_name)
            return {}

        # Now get the broks for this specific broker
        with self.app.broks_lock:
            res = self.app.get_broks(broker_name)

        return serialize(res, True)
コード例 #25
0
ファイル: scheduler_interface.py プロジェクト: jgmel/alignak
    def get_broks(self, bname):
        """Get broks from scheduler, used by brokers

        :param bname: broker name, used to filter broks
        :type bname: str
        :return: serialized brok list
        :rtype: dict
        """
        # Maybe it was not registered as it should, if so,
        # do it for it
        if bname not in self.app.sched.brokers:
            self.fill_initial_broks(bname)
        elif not self.app.sched.brokers[bname]['initialized']:
            self.fill_initial_broks(bname)

        if bname not in self.app.sched.brokers:
            return {}

        # Now get the broks for this specific broker
        res = self.app.sched.get_broks(bname)

        # we do not more have a full broks in queue
        self.app.sched.brokers[bname]['has_full_broks'] = False
        return serialize(res, True)
コード例 #26
0
ファイル: dispatcher.py プロジェクト: andy-freefly/alignak
    def prepare_dispatch(self):
        # pylint:disable=too-many-branches, too-many-statements, too-many-locals
        """
        Prepare dispatch, so prepare for each daemon (schedulers, brokers, receivers, reactionners,
        pollers)

        This function will only prepare something if self.new_to_dispatch is False
        It will reset the first_dispatch_done flag

        A DispatcherError exception is raised if a configuration is already prepared! Unset the
        new_to_dispatch flag before calling!

        :return: None
        """
        if self.new_to_dispatch:
            raise DispatcherError("A configuration is already prepared!")

        # So we are preparing a new dispatching...
        self.new_to_dispatch = True
        self.first_dispatch_done = False

        # Update Alignak name for all the satellites
        for daemon_link in self.all_daemons_links:
            daemon_link.cfg.update(
                {'alignak_name': self.alignak_conf.alignak_name})

        logger.info("Preparing realms dispatch:")

        # Prepare the arbiters configuration
        master_arbiter_cfg = arbiters_cfg = {}
        for arbiter_link in self.get_satellites_list('arbiters'):
            # # If not me and not a spare arbiter...
            # if arbiter_link == self.arbiter_link:
            #     # I exclude myself from the dispatching, I have my configuration ;)
            #     continue

            if not arbiter_link.active:
                # I exclude the daemons that are not active
                continue

            arbiter_cfg = arbiter_link.cfg
            arbiter_cfg.update({
                'managed_hosts_names':
                [h.get_name() for h in self.alignak_conf.hosts],
                'modules':
                serialize(arbiter_link.modules, True),
                'managed_conf_id':
                self.alignak_conf.instance_id,
                'push_flavor':
                ''
            })

            # Hash the configuration
            cfg_string = json.dumps(arbiter_cfg,
                                    sort_keys=True).encode('utf-8')
            arbiter_cfg['hash'] = hashlib.sha1(cfg_string).hexdigest()

            # Update the arbiters list, but do not include the whole conf
            arbiters_cfg[arbiter_link.uuid] = arbiter_cfg['self_conf']

            # Not for the master arbiter...
            if arbiter_link != self.arbiter_link:
                arbiter_cfg.update({
                    'arbiters':
                    master_arbiter_cfg,
                    'whole_conf':
                    self.alignak_conf.spare_arbiter_conf,
                })

                # Hash the whole configuration
                try:
                    s_conf_part = json.dumps(arbiter_cfg,
                                             sort_keys=True).encode('utf-8')
                except UnicodeDecodeError:
                    pass
                arbiter_cfg['hash'] = hashlib.sha1(s_conf_part).hexdigest()

            # Dump the configuration part size
            pickled_conf = pickle.dumps(arbiter_cfg)
            logger.info('   arbiter configuration size: %d bytes',
                        sys.getsizeof(pickled_conf))

            # The configuration is assigned to the arbiter
            # todo: perhaps this should be done in the realms (like schedulers and satellites)?
            arbiter_link.cfg = arbiter_cfg
            arbiter_link.cfg_to_manage = self.alignak_conf
            arbiter_link.push_flavor = arbiter_cfg['push_flavor']
            arbiter_link.hash = arbiter_cfg['hash']
            arbiter_link.need_conf = False
            arbiter_link.configuration_sent = False

            # If not me and not a spare arbiter...
            if arbiter_link == self.arbiter_link:
                # The master arbiter configuration for the other satellites
                master_arbiter_cfg = {
                    self.arbiter_link.uuid: arbiter_cfg['self_conf']
                }

            logger.info('   arbiter configuration prepared for %s',
                        arbiter_link.name)

        # main_realm = self.alignak_conf.realms.find_by_name('All')
        # all_realms = main_realm.all_sub_members
        # for realm_uuid in all_realms:
        #     realm = self.alignak_conf.realms[realm_uuid]
        #     logger.info("- realm %s: %s", realm_uuid, realm)

        for realm in self.alignak_conf.realms:
            logger.info("- realm %s: %d configuration part(s)", realm.name,
                        len(realm.parts))

            # parts_to_dispatch is a list of configuration parts built when
            # the configuration is split into parts for the realms and their schedulers
            # Only get the parts that are not yet assigned to a scheduler
            parts_to_dispatch = [
                cfg for cfg in list(realm.parts.values())
                if not cfg.is_assigned
            ]
            if not parts_to_dispatch:
                logger.info('  no configuration to dispatch for this realm!')
                continue

            logger.info(" preparing the dispatch for schedulers:")

            # Now we get all the schedulers of this realm and upper
            # schedulers = self.get_scheduler_ordered_list(realm)
            schedulers = realm.get_potential_satellites_by_type(
                self.get_satellites_list('schedulers'), 'scheduler')
            if not schedulers:
                logger.error('  no available schedulers in this realm (%s)!',
                             realm)
                continue
            logger.info("  realm schedulers: %s",
                        ','.join([s.get_name() for s in schedulers]))

            for cfg_part in parts_to_dispatch:
                logger.info("  .assigning configuration part %s (%s), name:%s",
                            cfg_part.instance_id, cfg_part.uuid,
                            cfg_part.config_name)

                # we need to loop until the configuration part is assigned to a scheduler
                # or no more scheduler is available
                while True:
                    try:
                        scheduler_link = schedulers.pop()
                    except IndexError:  # No more schedulers.. not good, no loop
                        # The configuration part do not need to be dispatched anymore
                        # todo: should be managed inside the Realm class!
                        logger.error("No more scheduler link: %s", realm)
                        for sat_type in ('reactionner', 'poller', 'broker',
                                         'receiver'):
                            realm.to_satellites[sat_type][
                                cfg_part.instance_id] = None
                            realm.to_satellites_need_dispatch[sat_type][cfg_part.instance_id] = \
                                False
                            realm.to_satellites_managed_by[sat_type][
                                cfg_part.instance_id] = []
                        break

                    # if scheduler_link.manage_sub_realms:
                    #     logger.warning('[%s] The scheduler %s is configured to manage sub realms.'
                    #                    ' This is not yet possible, sorry!',
                    #                    realm.name, scheduler_link.name)
                    #     scheduler_link.manage_sub_realms = False
                    #     continue

                    if not scheduler_link.need_conf:
                        logger.info(
                            '[%s] The scheduler %s do not need any configuration, sorry',
                            realm.name, scheduler_link.name)
                        continue

                    logger.debug(
                        "   preparing configuration part '%s' for the scheduler '%s'",
                        cfg_part.instance_id, scheduler_link.name)
                    logger.debug("   - %d hosts, %d services",
                                 len(cfg_part.hosts), len(cfg_part.services))

                    # Serialization and hashing
                    s_conf_part = serialize(realm.parts[cfg_part.instance_id])
                    try:
                        s_conf_part = s_conf_part.encode('utf-8')
                    except UnicodeDecodeError:
                        pass
                    cfg_part.push_flavor = hashlib.sha1(
                        s_conf_part).hexdigest()

                    # We generate the scheduler configuration for the satellites:
                    # ---
                    sat_scheduler_cfg = scheduler_link.give_satellite_cfg()
                    sat_scheduler_cfg.update({
                        'managed_hosts_names':
                        [h.get_name() for h in cfg_part.hosts],
                        'managed_conf_id':
                        cfg_part.instance_id,
                        'push_flavor':
                        cfg_part.push_flavor
                    })
                    # Generate a configuration hash
                    cfg_string = json.dumps(sat_scheduler_cfg,
                                            sort_keys=True).encode('utf-8')
                    sat_scheduler_cfg['hash'] = hashlib.sha1(
                        cfg_string).hexdigest()

                    logger.debug(' satellite scheduler configuration: %s',
                                 sat_scheduler_cfg)
                    for sat_type in ('reactionner', 'poller', 'broker',
                                     'receiver'):
                        realm.to_satellites[sat_type][
                            cfg_part.instance_id] = sat_scheduler_cfg
                        realm.to_satellites_need_dispatch[sat_type][
                            cfg_part.instance_id] = True
                        realm.to_satellites_managed_by[sat_type][
                            cfg_part.instance_id] = []
                    # ---

                    scheduler_link.cfg.update({
                        # Global instance configuration
                        'instance_id':
                        scheduler_link.instance_id,
                        'instance_name':
                        scheduler_link.name,
                        'schedulers': {
                            scheduler_link.uuid: sat_scheduler_cfg
                        },
                        'arbiters':
                        arbiters_cfg if scheduler_link.manage_arbiters else {},
                        'satellites':
                        realm.get_links_for_a_scheduler(
                            self.pollers, self.reactionners, self.brokers),
                        'modules':
                        serialize(scheduler_link.modules, True),
                        'conf_part':
                        serialize(realm.parts[cfg_part.instance_id]),
                        'managed_conf_id':
                        cfg_part.instance_id,
                        'push_flavor':
                        cfg_part.push_flavor,
                        'override_conf':
                        scheduler_link.get_override_configuration()
                    })

                    # Hash the whole configuration
                    cfg_string = json.dumps(scheduler_link.cfg,
                                            sort_keys=True).encode('utf-8')
                    scheduler_link.cfg['hash'] = hashlib.sha1(
                        cfg_string).hexdigest()

                    # Dump the configuration part size
                    pickled_conf = pickle.dumps(scheduler_link.cfg)
                    logger.info("   scheduler configuration size: %d bytes",
                                sys.getsizeof(pickled_conf))
                    logger.info("   scheduler satellites:")
                    satellites = realm.get_links_for_a_scheduler(
                        self.pollers, self.reactionners, self.brokers)
                    for sat_type in satellites:
                        logger.info("   - %s", sat_type)
                        for sat_link_uuid in satellites[sat_type]:
                            satellite = satellites[sat_type][sat_link_uuid]
                            logger.info("   %s", satellite['name'])

                    # The configuration part is assigned to a scheduler
                    cfg_part.is_assigned = True
                    cfg_part.scheduler_link = scheduler_link
                    scheduler_link.cfg_to_manage = cfg_part
                    scheduler_link.push_flavor = cfg_part.push_flavor
                    scheduler_link.hash = scheduler_link.cfg['hash']
                    scheduler_link.need_conf = False
                    scheduler_link.configuration_sent = False

                    logger.info('   configuration %s (%s) assigned to %s',
                                cfg_part.instance_id, cfg_part.push_flavor,
                                scheduler_link.name)

                    # The configuration part is assigned to a scheduler, no need to go further ;)
                    break

            logger.info(" preparing the dispatch for satellites:")
            for cfg_part in list(realm.parts.values()):
                logger.info("  .configuration part %s (%s), name:%s",
                            cfg_part.instance_id, cfg_part.uuid,
                            cfg_part.config_name)
                for sat_type in ('reactionner', 'poller', 'broker',
                                 'receiver'):
                    if cfg_part.instance_id not in realm.to_satellites_need_dispatch[
                            sat_type]:
                        logger.warning("   nothing to dispatch for %ss",
                                       sat_type)
                        return

                    if not realm.to_satellites_need_dispatch[sat_type][
                            cfg_part.instance_id]:
                        logger.warning("   no need to dispatch to %ss",
                                       sat_type)
                        return

                    # Get the list of the concerned satellites
                    satellites = realm.get_potential_satellites_by_type(
                        self.satellites, sat_type)
                    if satellites:
                        logger.info(
                            "  realm %ss: %s", sat_type,
                            ','.join([s.get_name() for s in satellites]))
                    else:
                        logger.info("   no %s satellites", sat_type)

                    # Now we dispatch cfg to every one ask for it
                    nb_cfg_prepared = 0
                    for sat_link in satellites:
                        if not sat_link.active:
                            # I exclude the daemons that are not active
                            continue

                        if nb_cfg_prepared > realm.get_nb_of_must_have_satellites(
                                sat_type):
                            logger.warning(
                                "Too much configuration parts prepared "
                                "for the expected satellites count. "
                                "Realm: %s, satellite: %s - prepared: %d out of %d",
                                realm.name, sat_link.name, nb_cfg_prepared,
                                realm.get_nb_of_must_have_satellites(sat_type))
                            # Fred - 2018-07-20 - temporary disable this error raising!
                            # raise DispatcherError("Too much configuration parts prepared "
                            #                       "for the expected satellites count. "
                            #                       "This should never happen!")

                        logger.info(
                            "   preparing configuration part '%s' for the %s '%s'",
                            cfg_part.instance_id, sat_type, sat_link.name)

                        sat_link.cfg.update({
                            # Global instance configuration
                            'arbiters':
                            arbiters_cfg if sat_link.manage_arbiters else {},
                            'modules':
                            serialize(sat_link.modules, True),
                            'managed_conf_id':
                            'see_my_schedulers',
                            'global_conf':
                            self.global_conf
                        })
                        sat_link.cfg['schedulers'].update({
                            cfg_part.uuid:
                            realm.to_satellites[sat_type][cfg_part.instance_id]
                        })

                        # Brokers should have pollers and reactionners links too
                        if sat_type == "broker":
                            sat_link.cfg.update({
                                'satellites':
                                realm.get_links_for_a_broker(
                                    self.pollers, self.reactionners,
                                    self.receivers, self.alignak_conf.realms,
                                    sat_link.manage_sub_realms)
                            })

                        # Hash the whole configuration
                        cfg_string = json.dumps(sat_link.cfg,
                                                sort_keys=True).encode('utf-8')
                        sat_link.cfg['hash'] = hashlib.sha1(
                            cfg_string).hexdigest()

                        # Dump the configuration part size
                        pickled_conf = pickle.dumps(sat_link.cfg)
                        logger.info('   %s configuration size: %d bytes',
                                    sat_type, sys.getsizeof(pickled_conf))

                        # The configuration part is assigned to a satellite
                        sat_link.cfg_to_manage = cfg_part
                        sat_link.push_flavor = cfg_part.push_flavor
                        sat_link.hash = sat_link.cfg['hash']
                        sat_link.need_conf = False
                        sat_link.configuration_sent = False

                        logger.info('   configuration %s (%s) assigned to %s',
                                    cfg_part.instance_id, cfg_part.push_flavor,
                                    sat_link.name)

                        nb_cfg_prepared += 1
                        realm.to_satellites_managed_by[sat_type][
                            cfg_part.instance_id].append(sat_link)

                        # I've got enough satellite, the next ones are considered unuseful!
                        if nb_cfg_prepared == realm.get_nb_of_must_have_satellites(
                                sat_type):
                            logger.info("   no more %s needed in this realm.",
                                        sat_type)
                            realm.to_satellites_need_dispatch[sat_type][
                                cfg_part.instance_id] = False

        nb_missed = len([
            cfg for cfg in list(self.alignak_conf.parts.values())
            if not cfg.is_assigned
        ])
        if nb_missed > 0:
            logger.warning(
                "Some configuration parts are not dispatched, %d are missing",
                nb_missed)
        else:
            logger.info("All configuration parts are assigned "
                        "to schedulers and their satellites :)")

        # Schedulers without a configuration in a dispatch ok do not need a configuration
        # so they do not raise dispatching errors if they are not used
        for scheduler_link in self.schedulers:
            if not scheduler_link.cfg_to_manage:
                # "so it do not ask anymore for conf"
                logger.warning('The scheduler %s do not need a configuration!',
                               scheduler_link.name)
                scheduler_link.need_conf = False