Ejemplo n.º 1
0
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(self.host,
                          self.user,
                          self.password,
                          self.database,
                          self.character_set,
                          table_prefix='nagios_')
        self.connect_database()

        #Cache for hosts and services
        #will be flushed when we got a net instance id
        #or something like that
        self.services_cache = {}
        self.hosts_cache = {}
    def init(self):
        logger.log("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database, 
                          self.character_set, table_prefix='nagios_', port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc) to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.log("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []
    def init(self):
        from shinken.db_mysql import DBMysql
        logger.info("[glpidb] Creating a mysql backend : %s (%s)" % (self.host, self.database))
        self.db_backend = DBMysql(self.host, self.user, self.password, self.database, self.character_set)

        logger.info("[glpidb] Connecting to database ...")
        self.db_backend.connect_database()
        logger.info("[glpidb] Connected")
Ejemplo n.º 4
0
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(self.host, self.user, self.password, self.database, self.character_set, table_prefix='nagios_')
        self.connect_database()

        #Cache for hosts and services
        #will be flushed when we got a net instance id
        #or something like that
        self.services_cache = {}
        self.hosts_cache = {}
Ejemplo n.º 5
0
    def __init__(self, modconf, host=None, user=None, password=None, database=None, character_set=None, database_path=None):
        # Mapping for name of data, rename attributes and transform function
        self.mapping = {
           # Host
           'host_check_result': {
               'plugin_monitoring_services_id': {'transform': None},
               'event': {'transform': None},
               'perf_data': {'transform': None},
               'output': {'transform': None},
               'state': {'transform': None},
               'latency': {'transform': None},
               'execution_time': {'transform': None},
               'state_type': {'transform': None},
               },
           # Service
           'service_check_result': {
               'plugin_monitoring_services_id': {'transform': None},
               'plugin_monitoring_servicescatalogs_id': {'transform': None},
               'event': {'transform': None},
               'perf_data': {'transform': None},
               'output': {'transform': None},
               'state': {'transform': None},
               'latency': {'transform': None},
               'execution_time': {'transform': None},
               'state_type': {'transform': None},
               }
           }
        # Last state of check
        #self.checkstatus = {
        #    '0': None,
        #    }
        BaseModule.__init__(self, modconf)
        self.host = host
        self.user = user
        self.password = password
        self.database = database
        self.character_set = character_set
        self.database_path = database_path

        from shinken.db_mysql import DBMysql
        logger.info("[GLPIdb Broker] Creating a mysql backend")
        self.db_backend = DBMysql(host, user, password, database, character_set)
Ejemplo n.º 6
0
class TestConfig(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def create_db(self):
        self.db = DBMysql(host='localhost',
                          user='******',
                          password='******',
                          database='merlin',
                          character_set='utf8')

    def test_connect_database(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
        except Exception:  # arg, no database here? sic!
            pass

    def test_execute_query(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
            q = "DELETE FROM service WHERE instance_id = '0'"
            self.db.execute_query(q)
        except Exception:
            pass
Ejemplo n.º 7
0
class TestConfig(ShinkenTest):
    # setUp is inherited from ShinkenTest

    def create_db(self):
        self.db = DBMysql(host='localhost', user='******', password='******', database='merlin', character_set='utf8')

    def test_connect_database(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
        except Exception:  # arg, no database here? sic!
            pass

    def test_execute_query(self):
        if not DBMysql:
            return
        self.create_db()
        try:
            self.db.connect_database()
            q = "DELETE FROM service WHERE instance_id = '0'"
            self.db.execute_query(q)
        except Exception:
            pass
Ejemplo n.º 8
0
    def init(self):
        logger.info("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database,
                          self.character_set, table_prefix=self.prefix,
                          port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc)
        # to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns " \
                "where TABLE_SCHEMA='ndo' and " \
                "TABLE_NAME='%sservicestatus' and " \
                "COLUMN_NAME='long_output';" % self.prefix

        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.info("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []
Ejemplo n.º 9
0
    def __init__(self, modconf, host=None, user=None, password=None, database=None, character_set=None, database_path=None):
        #Mapping for name of data, rename attributes and transform function
        self.mapping = {
           #Host
           'host_check_result' : {
               'plugin_monitoring_services_id' : {'transform' : None},
               'event' : {'transform' : None},
               'perf_data' : {'transform' : None},
               'output' : {'transform' : None},
               'state' : {'transform' : None},
               'latency' : {'transform' : None},
               'execution_time' : {'transform' : None},
               'state_type' : {'transform' : None},
               },
           #Service
           'service_check_result' : {
               'plugin_monitoring_services_id' : {'transform' : None},
               'plugin_monitoring_servicescatalogs_id' : {'transform' : None},
               'event' : {'transform' : None},
               'perf_data' : {'transform' : None},
               'output' : {'transform' : None},
               'state' : {'transform' : None},
               'latency' : {'transform' : None},
               'execution_time' : {'transform' : None},
               'state_type' : {'transform' : None},
               }
           }
        # Last state of check
#        self.checkstatus = {
#           '0' : None,
#           }
        BaseModule.__init__(self, modconf)
        self.host = host
        self.user = user
        self.password = password
        self.database = database
        self.character_set = character_set
        self.database_path = database_path
        
        from shinken.db_mysql import DBMysql
        print "Creating a mysql backend"
        self.db_backend = DBMysql(host, user, password, database, character_set)
Ejemplo n.º 10
0
    def __init__(self, modconf, backend, host=None, user=None, password=None, database=None, character_set=None, database_path=None):
        # Mapping for name of data, rename attributes and transform function
        self.mapping = {
            # Program status
            'program_status': {'program_start': {'transform': None},
                                'pid': {'transform': None},
                                'last_alive': {'transform': None},
                                'is_running': {'transform': None},
                                'instance_id': {'transform': None},
                                },
            # Program status update (every 10s)
            'update_program_status': {'program_start': {'transform': None},
                                'pid': {'transform': None},
                                'last_alive': {'transform': None},
                                'is_running': {'transform': None},
                                'instance_id': {'transform': None},
                                },
            # Host
            'initial_host_status': {
                'id': {'transform': None},
                'instance_id': {'transform': None},
                'host_name': {'transform': None},
                'alias': {'transform': None},
                'display_name': {'transform': None},
                'address': {'transform': None},
                'contact_groups': {'transform': None},
                'contacts': {'transform': None},
                'initial_state': {'transform': None},
                'max_check_attempts': {'transform': None},
                'check_interval': {'transform': None},
                'retry_interval': {'transform': None},
                'active_checks_enabled': {'transform': None},
                'passive_checks_enabled': {'transform': None},
                'obsess_over_host': {'transform': None},
                'check_freshness': {'transform': None},
                'freshness_threshold': {'transform': None},
                'event_handler_enabled': {'transform': None},
                'low_flap_threshold': {'transform': None},
                'high_flap_threshold': {'transform': None},
                'flap_detection_enabled': {'transform': None},
                'process_perf_data': {'transform': None},
                'notification_interval': {'transform': None},
                'first_notification_delay': {'transform': None},
                'notifications_enabled': {'transform': None},
                'notes': {'transform': None},
                'notes_url': {'transform': None},
                'action_url': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'next_chk': {'transform': None, 'name': 'next_check'},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'state_id': {'transform': None, 'name': 'current_state'},
                'state_type_id': {'transform': None, 'name': 'state_type'},
                'current_event_id': {'transform': None},
                'last_event_id': {'transform': None},
                'last_state_id': {'transform': None, 'name': 'last_state'},
                'last_state_change': {'transform': None},
                'last_hard_state_change': {'transform': None},
                'last_hard_state': {'transform': last_hard_state_to_int},
                'is_flapping': {'transform': None},
                'flapping_comment_id': {'transform': None},
                'percent_state_change': {'transform': None},
                'problem_has_been_acknowledged': {'transform': None},
                'acknowledgement_type': {'transform': None},
                'check_type': {'transform': None},
                'has_been_checked': {'transform': None},
                'should_be_scheduled': {'transform': None},
                'last_problem_id': {'transform': None},
                'current_problem_id': {'transform': None},
                'execution_time': {'transform': None},
                'last_notification': {'transform': None},
                'current_notification_number': {'transform': None},
                'current_notification_id': {'transform': None},
                'check_flapping_recovery_notification': {'transform': None},
                'scheduled_downtime_depth': {'transform': None},
                'pending_flex_downtime': {'transform': None},
                },
            'update_host_status': {
                'id': {'transform': None},
                'instance_id': {'transform': None},
                'host_name': {'transform': None},
                'alias': {'transform': None},
                'display_name': {'transform': None},
                'address': {'transform': None},
                'initial_state': {'transform': None},
                'max_check_attempts': {'transform': None},
                'check_interval': {'transform': None},
                'retry_interval': {'transform': None},
                'active_checks_enabled': {'transform': None},
                'passive_checks_enabled': {'transform': None},
                'obsess_over_host': {'transform': None},
                'check_freshness': {'transform': None},
                'freshness_threshold': {'transform': None},
                'event_handler_enabled': {'transform': None},
                'low_flap_threshold': {'transform': None},
                'high_flap_threshold': {'transform': None},
                'flap_detection_enabled': {'transform': None},
                'process_perf_data': {'transform': None},
                'notification_interval': {'transform': None},
                'first_notification_delay': {'transform': None},
                'notifications_enabled': {'transform': None},
                'notes': {'transform': None},
                'notes_url': {'transform': None},
                'action_url': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'next_chk': {'transform': None, 'name': 'next_check'},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'state_id': {'transform': None, 'name': 'current_state'},
                'state_type_id': {'transform': None, 'name': 'state_type'},
                'current_event_id': {'transform': None},
                'last_event_id': {'transform': None},
                'last_state_id': {'transform': None, 'name': 'last_state'},
                'last_state_change': {'transform': None},
                'last_hard_state_change': {'transform': None},
                'last_hard_state': {'transform': last_hard_state_to_int},
                'is_flapping': {'transform': None},
                'flapping_comment_id': {'transform': None},
                'percent_state_change': {'transform': None},
                'problem_has_been_acknowledged': {'transform': None},
                'acknowledgement_type': {'transform': None},
                'check_type': {'transform': None},
                'has_been_checked': {'transform': None},
                'should_be_scheduled': {'transform': None},
                'last_problem_id': {'transform': None},
                'current_problem_id': {'transform': None},
                'execution_time': {'transform': None},
                'last_notification': {'transform': None},
                'current_notification_number': {'transform': None},
                'current_notification_id': {'transform': None},
                'check_flapping_recovery_notification': {'transform': None},
                'scheduled_downtime_depth': {'transform': None},
                'pending_flex_downtime': {'transform': None},
                },
            'host_check_result': {
                'latency': {'transform': None},
                'last_time_unreachable': {'transform': None},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'check_type': {'transform': None},
                'state_type_id': {'transform': None, 'name': 'state_type'},
                'execution_time': {'transform': None},
                'start_time': {'transform': None},
                'acknowledgement_type': {'transform': None},
                'return_code': {'transform': None},
                'last_time_down': {'transform': None},
                'instance_id': {'transform': None},
                'long_output': {'transform': None},
                'end_time': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'timeout': {'transform': None},
                'output': {'transform': None},
                'state_id': {'transform': None, 'name': 'current_state'},
                'last_time_up': {'transform': None},
                'early_timeout': {'transform': None},
                'perf_data': {'transform': None},
                'host_name': {'transform': None},
                },
            'host_next_schedule': {
                'instance_id': {'transform': None},
                'next_chk': {'transform': None, 'name': 'next_check'},
                'host_name': {'transform': None},
                },
            # Service
            'initial_service_status': {
                'id': {'transform': None},
                'instance_id': {'transform': None},
                'host_name': {'transform': None},
                'service_description': {'transform': None},
                'display_name': {'transform': None},
                'is_volatile': {'transform': None},
                'initial_state': {'transform': None},
                'max_check_attempts': {'transform': None},
                'check_interval': {'transform': None},
                'retry_interval': {'transform': None},
                'active_checks_enabled': {'transform': None},
                'passive_checks_enabled': {'transform': None},
                'obsess_over_service': {'transform': None},
                'check_freshness': {'transform': None},
                'freshness_threshold': {'transform': None},
                'event_handler_enabled': {'transform': None},
                'low_flap_threshold': {'transform': None},
                'high_flap_threshold': {'transform': None},
                'flap_detection_enabled': {'transform': None},
                'process_perf_data': {'transform': None},
                'notification_interval': {'transform': None},
                'first_notification_delay': {'transform': None},
                'notifications_enabled': {'transform': None},
                'notes': {'transform': None},
                'notes_url': {'transform': None},
                'action_url': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'next_chk': {'transform': None, 'name': 'next_check'},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'state_id': {'transform': None, 'name': 'current_state'},
                'current_event_id': {'transform': None},
                'last_event_id': {'transform': None},
                'last_state_id': {'transform': None, 'name': 'last_state'},
                'last_state_change': {'transform': None},
                'last_hard_state_change': {'transform': None},
                'last_hard_state': {'transform': last_hard_state_to_int},
                'state_type_id': {'transform': None, 'name': 'state_type'},
                'is_flapping': {'transform': None},
                'flapping_comment_id': {'transform': None},
                'percent_state_change': {'transform': None},
                'problem_has_been_acknowledged': {'transform': None},
                'acknowledgement_type': {'transform': None},
                'check_type': {'transform': None},
                'has_been_checked': {'transform': None},
                'should_be_scheduled': {'transform': None},
                'last_problem_id': {'transform': None},
                'current_problem_id': {'transform': None},
                'execution_time': {'transform': None},
                'last_notification': {'transform': None},
                'current_notification_number': {'transform': None},
                'current_notification_id': {'transform': None},
                'check_flapping_recovery_notification': {'transform': None},
                'scheduled_downtime_depth': {'transform': None},
                'pending_flex_downtime': {'transform': None},
                },
            'update_service_status': {
                'id': {'transform': None},
                'instance_id': {'transform': None},
                'host_name': {'transform': None},
                'service_description': {'transform': None},
                'display_name': {'transform': None},
                'is_volatile': {'transform': None},
                'initial_state': {'transform': None},
                'max_check_attempts': {'transform': None},
                'check_interval': {'transform': None},
                'retry_interval': {'transform': None},
                'active_checks_enabled': {'transform': None},
                'passive_checks_enabled': {'transform': None},
                'obsess_over_service': {'transform': None},
                'check_freshness': {'transform': None},
                'freshness_threshold': {'transform': None},
                'event_handler_enabled': {'transform': None},
                'low_flap_threshold': {'transform': None},
                'high_flap_threshold': {'transform': None},
                'flap_detection_enabled': {'transform': None},
                'process_perf_data': {'transform': None},
                'notification_interval': {'transform': None},
                'first_notification_delay': {'transform': None},
                'notifications_enabled': {'transform': None},
                'notes': {'transform': None},
                'notes_url': {'transform': None},
                'action_url': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'next_chk': {'transform': None, 'name': 'next_check'},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'state_id': {'transform': None, 'name': 'current_state'},
                'current_event_id': {'transform': None},
                'last_event_id': {'transform': None},
                'last_state_id': {'transform': None, 'name': 'last_state'},
                'last_state_change': {'transform': None},
                'last_hard_state_change': {'transform': None},
                'last_hard_state': {'transform': last_hard_state_to_int},
                'state_type_id': {'transform': None, 'name': 'current_state'},
                'is_flapping': {'transform': None},
                'flapping_comment_id': {'transform': None},
                'percent_state_change': {'transform': None},
                'problem_has_been_acknowledged': {'transform': None},
                'acknowledgement_type': {'transform': None},
                'check_type': {'transform': None},
                'has_been_checked': {'transform': None},
                'should_be_scheduled': {'transform': None},
                'last_problem_id': {'transform': None},
                'current_problem_id': {'transform': None},
                'execution_time': {'transform': None},
                'last_notification': {'transform': None},
                'current_notification_number': {'transform': None},
                'current_notification_id': {'transform': None},
                'check_flapping_recovery_notification': {'transform': None},
                'scheduled_downtime_depth': {'transform': None},
                'pending_flex_downtime': {'transform': None},
                },
            'service_check_result': {
                'check_type': {'transform': None},
                'last_time_critical': {'transform': None},
                'last_time_warning': {'transform': None},
                'latency': {'transform': None},
                'last_chk': {'transform': None, 'name': 'last_check'},
                'last_time_ok': {'transform': None},
                'end_time': {'transform': None},
                'last_time_unknown': {'transform': None},
                'execution_time': {'transform': None},
                'start_time': {'transform': None},
                'return_code': {'transform': None},
                'output': {'transform': None},
                'service_description': {'transform': None},
                'early_timeout': {'transform': None},
                'attempt': {'transform': None, 'name': 'current_attempt'},
                'state_type_id': {'transform': None, 'name': 'state_type'},
                'acknowledgement_type': {'transform': None},
                'instance_id': {'transform': None},
                'long_output': {'transform': None},
                'host_name': {'transform': None},
                'timeout': {'transform': None},
                'state_id': {'transform': None, 'name': 'current_state'},
                'perf_data': {'transform': None},
                },
            'service_next_schedule': {
                'next_chk': {'transform': None, 'name': 'next_check'},
                'service_description': {'transform': None},
                'instance_id': {'transform': None},
                'host_name': {'transform': None},
                },

            # Contact
            'initial_contact_status': {
                'service_notifications_enabled': {'transform': None},
                'can_submit_commands': {'transform': None},
                'contact_name': {'transform': None},
                'id': {'transform': None},
                'retain_status_information': {'transform': None},
                'address1': {'transform': None},
                'address2': {'transform': None},
                'address3': {'transform': None},
                'address4': {'transform': None},
                'address5': {'transform': None},
                'address6': {'transform': None},
                #'service_notification_commands': {'transform': get_objs_names},
                'pager': {'transform': None},
                #'host_notification_period': {'transform': get_obj_name},
                'host_notifications_enabled': {'transform': None},
                #'host_notification_commands': {'transform': get_objs_names},
                #'service_notification_period': {'transform': get_obj_name},
                'email': {'transform': None},
                'alias': {'transform': None},
                'host_notification_options': {'transform': list_to_comma},
                'service_notification_options': {'transform': list_to_comma},
                },
            # Contact group
            'initial_contactgroup_status': {
                'contactgroup_name': {'transform': None},
                'alias': {'transform': None},
                'instance_id': {'transform': None},
                'id': {'transform': None},
                'members': {'transform': None},
                },
            # Host group
            'initial_hostgroup_status': {
                'hostgroup_name': {'transform': None},
                'notes': {'transform': None},
                'instance_id': {'transform': None},
                'action_url': {'transform': None},
                'notes_url': {'transform': None},
                'members': {'transform': None},
                'id': {'transform': None},
                }
            }
        BaseModule.__init__(self, modconf)
        self.backend = backend
        self.host = host
        self.user = user
        self.password = password
        self.database = database
        self.character_set = character_set
        self.database_path = database_path


        # Now get a backend_db of our backend type
        if backend == 'mysql':
            #from mysql_backend import Mysql_backend
            from shinken.db_mysql import DBMysql
            print "Creating a mysql backend"
            self.db_backend = DBMysql(host, user, password, database, character_set)

        if backend == 'sqlite':
            #from sqlite_backend import Sqlite_backend
            from shinken.db_sqlite import DBSqlite
            print "Creating a sqlite backend"
            self.db_backend = DBSqlite(self.database_path)
Ejemplo n.º 11
0
class Ndodb_Mysql_broker(BaseModule):

    """ This Class is a plugin for the Shinken Broker. It is in charge
    to brok information into the database. For the moment
    only Mysql is supported. This code is __imported__ from Broker.
    The managed_brok function is called by Broker for manage the broks. It calls
    the manage_*_brok functions that create queries, and then run queries.

    """

    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status': {
                'program_start': {'name': 'program_start_time', 'transform': de_unixify},
                'pid': {'name': 'process_id', 'transform': None},
                'last_alive': {'name': 'status_update_time', 'transform': de_unixify},
                'is_running': {'name': 'is_currently_running', 'transform': None},
                'last_log_rotation': {'name': 'last_log_rotation', 'transform': de_unixify},
                'last_command_check': {'name': 'last_command_check', 'transform': de_unixify}
                },
            }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        self.prefix = getattr(conf, 'prefix', 'nagios_')

        # Centreon ndo add some fields like long_output
        # that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronize_database_id = int(conf.synchronize_database_id)

    # Called by Broker so we can do init stuff
    # TODO: add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.info("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database,
                          self.character_set, table_prefix=self.prefix,
                          port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc)
        # to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns " \
                "where TABLE_SCHEMA='ndo' and " \
                "TABLE_NAME='%sservicestatus' and " \
                "COLUMN_NAME='long_output';" % self.prefix

        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.info("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)

        # If we synchronize, must look for id change
        if self.synchronize_database_id != '0' and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronize database id
            # so we wait for the instance name
            if 'instance_name' not in new_b.data:
                self.todo.append(new_b)
                return

            # We convert the id to write properly in the base using the
            # instance_name to reuse the instance_id in the base.
            else:
                new_b.data['instance_id'] = self.convert_id(
                    new_b.data['instance_id'], new_b.data['instance_name']
                    )

                self.todo.append(new_b)
                for brok in self.todo:
                    # We have to put the good instance ID to all brok waiting
                    # in the list then execute the query
                    brok.data['instance_id'] = new_b.data['instance_id']
                    queries = BaseModule.manage_brok(self, brok)
                    if queries is not None:
                        for q in queries:
                            self.db.execute_query(q)
                # We've finished to manage the todo, so we empty it
                self.todo = []
                return

        # Executed if we don't synchronize or there is no instance_id
        queries = BaseModule.manage_brok(self, new_b)

        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return

    # Create the database connection
    # Exception is raised if a arg is bad.
    def connect_database(self):
        try:
            self.db.connect_database()
        except _mysql_exceptions.OperationalError, exp:
            logger.info(
                "[MySQL/NDO] Module raised an exception: %s ." \
                "Please check the arguments!" % \
                exp)
            raise
Ejemplo n.º 12
0
class Glpidb_broker(BaseModule):
    def __init__(self,
                 modconf,
                 host=None,
                 user=None,
                 password=None,
                 database=None,
                 character_set=None,
                 database_path=None):
        # Mapping for name of data, rename attributes and transform function
        self.mapping = {
            # Host
            'host_check_result': {
                'plugin_monitoring_services_id': {
                    'transform': None
                },
                'event': {
                    'transform': None
                },
                'perf_data': {
                    'transform': None
                },
                'output': {
                    'transform': None
                },
                'state': {
                    'transform': None
                },
                'latency': {
                    'transform': None
                },
                'execution_time': {
                    'transform': None
                },
                'state_type': {
                    'transform': None
                },
            },
            # Service
            'service_check_result': {
                'plugin_monitoring_services_id': {
                    'transform': None
                },
                'plugin_monitoring_servicescatalogs_id': {
                    'transform': None
                },
                'event': {
                    'transform': None
                },
                'perf_data': {
                    'transform': None
                },
                'output': {
                    'transform': None
                },
                'state': {
                    'transform': None
                },
                'latency': {
                    'transform': None
                },
                'execution_time': {
                    'transform': None
                },
                'state_type': {
                    'transform': None
                },
            }
        }
        # Last state of check
        #self.checkstatus = {
        #    '0': None,
        #    }
        BaseModule.__init__(self, modconf)
        self.host = host
        self.user = user
        self.password = password
        self.database = database
        self.character_set = character_set
        self.database_path = database_path

        from shinken.db_mysql import DBMysql
        logger.info("[GLPIdb Broker] Creating a mysql backend")
        self.db_backend = DBMysql(host, user, password, database,
                                  character_set)

    # Called by Broker so we can do init stuff
    # TODO: add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.info("[GLPIdb Broker] I connect to Glpi database")
        self.db_backend.connect_database()

    def preprocess(self, type, brok, checkst):
        new_brok = copy.deepcopy(brok)
        # Only preprocess if we can apply a mapping
        if type in self.mapping:
            logger.debug("[GLPIdb Broker] brok data: %s" % str(brok.data))
            try:
                s = brok.data['service_description'].split('-')
                try:
                    if 'businessrules' in s[2]:
                        new_brok.data[
                            'plugin_monitoring_servicescatalogs_id'] = s[1]
                except:
                    new_brok.data['plugin_monitoring_services_id'] = s[1]
                    new_brok.data['event'] = brok.data['output']
            except:
                try:
                    s = brok.data['host_name'].split('-')
                    new_brok.data['plugin_monitoring_services_id'] = s[1]
                    new_brok.data['event'] = brok.data['output']
                except:
                    pass
            to_del = []
            to_add = []
            mapping = self.mapping[brok.type]
            for prop in new_brok.data:
                # ex: 'name': 'program_start_time', 'transform'
                if prop in mapping:
                    logger.debug("[GLPIdb Broker] Got a prop to change: %s" %
                                 prop)
                    val = new_brok.data[prop]
                    if mapping[prop]['transform'] is not None:
                        logger.info(
                            "[GLPIdb Broker] Call function for type %s and prop %s"
                            % (type, prop))
                        f = mapping[prop]['transform']
                        val = f(val)
                    name = prop
                    if 'name' in mapping[prop]:
                        name = mapping[prop]['name']
                    to_add.append((name, val))
                    to_del.append(prop)
                else:
                    to_del.append(prop)
            for prop in to_del:
                del new_brok.data[prop]
            for (name, val) in to_add:
                new_brok.data[name] = val
        else:
            print "No preprocess type", brok.type
            print brok.data
        return new_brok

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        type = b.type
        # To update check in glpi_plugin_monitoring_hosts
        manager = 'manage_' + type + 'up_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, 0)
            f = getattr(self, manager)
            queries = f(new_b)
            # Ok, we've got queries, now: run them!
            for q in queries:
                self.db_backend.execute_query(q)
        manager = 'manage_' + type + '_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, '1')
            if 'host_name' in new_b.data:
                if 'plugin_monitoring_services_id' not in new_b.data:
                    return
            f = getattr(self, manager)
            queries = f(new_b)
            # Ok, we've got queries, now: run them!
            for q in queries:
                self.db_backend.execute_query(q)
            return

    ## Host result
    ## def manage_host_check_result_brok(self, b):
    ##     logger.info("GLPI: data in DB %s " % b)
    ##     b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
    ##     query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
    ##     return [query]

    ## Host result
    ## def manage_host_check_resultup_brok(self, b):
    ##     logger.info("GLPI: data in DB %s " % b)
    ##     new_data = copy.deepcopy(b.data)
    ##     new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
    ##     new_data['id'] = b.data['plugin_monitoring_services_id']
    ##     del new_data['plugin_monitoring_services_id']
    ##     del new_data['perf_data']
    ##     del new_data['output']
    ##     del new_data['latency']
    ##     del new_data['execution_time']
    ##     where_clause = {'id': new_data['id']}
    ##     query = self.db_backend.create_update_query('glpi_plugin_monitoring_services', new_data, where_clause)
    ##     return [query]

    # Service result
    def manage_service_check_result_brok(self, b):
        logger.debug("[GLPIdb Broker] Data in DB %s" % b)
        try:
            b.data['plugin_monitoring_servicescatalogs_id']
            return ''
        except:
            b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
            logger.debug("[GLPIdb Broker] Add event service: %s" % str(b.data))
            query = self.db_backend.create_insert_query(
                'glpi_plugin_monitoring_serviceevents', b.data)
            return [query]
        return ''

    # Service result
    def manage_service_check_resultup_brok(self, b):
        """If a host is defined locally (in shinken) and not in GLPI,
           we must not edit GLPI datas!
        """
        if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
           'plugin_monitoring_services_id'         not in b.data:
            return list()

        logger.info("GLPI: data in DB %s " % str(b.data))
        new_data = copy.deepcopy(b.data)
        new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
        del new_data['perf_data']
        del new_data['output']
        del new_data['latency']
        del new_data['execution_time']
        try:
            new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
            del new_data['plugin_monitoring_servicescatalogs_id']
            table = 'glpi_plugin_monitoring_servicescatalogs'
        except:
            new_data['id'] = b.data['plugin_monitoring_services_id']
            del new_data['plugin_monitoring_services_id']
            table = 'glpi_plugin_monitoring_services'

        where_clause = {'id': new_data['id']}
        logger.debug("[GLPIdb Broker] Update service: %s" % str(new_data))
        query = self.db_backend.create_update_query(table, new_data,
                                                    where_clause)
        return [query]
Ejemplo n.º 13
0
 def create_db(self):
     self.db = DBMysql(host='localhost', user='******', password='******', database='merlin', character_set='utf8')
Ejemplo n.º 14
0
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status' : {
                                'program_start' : {'name' : 'program_start_time', 'transform' : de_unixify},
                                'pid' : {'name' : 'process_id', 'transform' : None},
                                'last_alive' : {'name' : 'status_update_time', 'transform' : de_unixify},
                                'is_running' : {'name' : 'is_currently_running', 'transform' : None}
                                },
            }
        
        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        
        # Centreon ndo add some fields like long_output that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronise_database_id = int(conf.synchronise_database_id)


    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.log("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database, 
                          self.character_set, table_prefix='nagios_', port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc) to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns where TABLE_SCHEMA='ndo' and TABLE_NAME='nagios_servicestatus' and COLUMN_NAME='long_output';"
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.log("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = []


    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)
    
        # If we syncronize, must look for id change
        if self.synchronise_database_id != '0' and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronise database id
            # so we wait for the instance name
            if 'instance_name' not in new_b.data :
                self.todo.append(new_b)
                return  
                  
            # We convert the id to write properly in the base using the 
            # instance_name to reuse the instance_id in the base.
            else:
                new_b.data['instance_id'] = self.convert_id(new_b.data['instance_id'], new_b.data['instance_name'])
                self.todo.append(new_b)
                for brok in self.todo :
                    # We have to put the good instance ID to all brok waiting
                    # in the list then execute the query
                    brok.data['instance_id'] = new_b.data['instance_id']
                    queries = BaseModule.manage_brok(self, brok)
                    if queries is not None:
                        for q in queries :
                            self.db.execute_query(q)
                # We've finished to manage the todo, so we empty it
                self.todo = []
                return

        # Executed if we don't synchronise or there is no instance_id
        queries = BaseModule.manage_brok(self,new_b)
        
        if queries is not None:
            for q in queries :
                self.db.execute_query(q)
            return



    # Create the database connection
    # Exception is raised if a arg is bad.
    def connect_database(self):    
        try :
            self.db.connect_database()
        except _mysql_exceptions.OperationalError as exp:
            logger.log( "[MySQL/NDO] Module raise an exception : %s . Please check the arguments!" % exp)
            raise


    # Query the database to get the proper instance_id
    def get_instance_id(self,name):
        query1 = u"SELECT  max(instance_id) + 1 from nagios_instances"
        query2 = u"SELECT instance_id from nagios_instances where instance_name = '%s';" % name

        self.db.execute_query(query1)
        row1  = self.db.fetchone()

        self.db.execute_query(query2)
        row2 = self.db.fetchone()

        if len(row1)<1 :
            return -1
        # We are the first process writing in base      
        elif row1[0] is None:
            return 1
        # No previous instance found return max
        elif row2 is None :
            return row1[0]
        # Return the previous instance
        else:
            return row2[0]



    def convert_id(self, id, name):
        # Look if we have already encountered this id
        if id in self.database_id_cache :
            return self.database_id_cache[id]
        else :
            data_id = 1
            # If we disable the database sync, we are using the in-brok instance_id
            if self.synchronise_database_id == '0':
                data_id = id
            # Else : we are quering the database and get a new one
            else:
                data_id = self.get_instance_id(name)
            # cache this!
            self.database_id_cache[id] = data_id                
            return data_id



    def get_host_object_id_by_name_sync(self, host_name, instance_id):
        # First look in cache.
        if instance_id in self.hosts_cache_sync:
            if host_name in self.hosts_cache_sync[instance_id]:
                return self.hosts_cache_sync[instance_id][host_name]

        # Not in cache, not good
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1' and instance_id='%s'" % (host_name,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            if instance_id not in self.hosts_cache_sync:
                self.hosts_cache_sync[instance_id] = {}
            self.hosts_cache_sync[instance_id][host_name] = row[0]
            return row[0]



    def get_contact_object_id_by_name_sync(self, contact_name,instance_id):        
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='10' and instance_id='%s'" % (contact_name,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]



    def get_hostgroup_object_id_by_name_sync(self, hostgroup_name, instance_id):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3' and instance_id='%s'" % (hostgroup_name,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]



    def get_max_hostgroup_id_sync(self): 
        query = u"SELECT max(hostgroup_id) + 1 from nagios_hostgroups"
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]



    def get_service_object_id_by_name_sync(self, host_name, service_description, instance_id):
        if instance_id in self.services_cache_sync:
            if (host_name, service_description) in self.services_cache_sync[instance_id]:
                return self.services_cache_sync[instance_id][(host_name, service_description)]

        # else; not in cache :(
        query = u"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2' and instance_id='%s'" % (host_name, service_description,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            if instance_id not in self.services_cache_sync:
                self.services_cache_sync[instance_id] = {}
            self.services_cache_sync[instance_id][(host_name, service_description)] = row[0]
            return row[0]



    def get_servicegroup_object_id_by_name_sync(self, servicegroup_name, instance_id):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4' and instance_id='%s'" % (servicegroup_name,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]
 
   
    
    def get_max_servicegroup_id_sync(self):
        query = u"SELECT max(servicegroup_id) + 1 from nagios_servicegroups"
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]


        
    def get_contactgroup_object_id_by_name_sync(self, contactgroup_name, instance_id):              
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='11'and instance_id='%s'" % (contactgroup_name,instance_id)
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]



    def get_max_contactgroup_id_sync(self):
        query = u"SELECT max(contactgroup_id) + 1 from nagios_contactgroups"
        self.db.execute_query(query)
        row = self.db.fetchone ()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]



    # Ok, we are at launch time and a scheduler want him only, OK...
    # So create several queries with all tables we need to delete with
    # our instance_id
    # This brob must be send at the begining of a scheduler session,
    # if not, BAD THINGS MAY HAPPENED :)
    def manage_clean_all_my_instance_id_brok(self, b):
        instance_id = b.data['instance_id']
        tables = ['commands', 'contacts', 'contactgroups', 'hosts',
                  'hostescalations', 'hostgroups', 'notifications',
                  'services',  'serviceescalations', 'programstatus',
                  'servicegroups', 'timeperiods', 'hostgroup_members',
                  'contactgroup_members', 'objects', 'hoststatus',
                  'servicestatus', 'instances', 'servicegroup_members']
        res = []
        for table in tables:
            q = "DELETE FROM %s WHERE instance_id = '%s' " % ('nagios_'+table, instance_id)
            res.append(q)

        # We also clean cache, because we are not sure about this data now
        logger.log("[MySQL/NDO] Flushing caches (clean from instance %d)" % instance_id)
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        return res



    # Program status is .. status of program? :)
    # Like pid, daemon mode, last activity, etc
    # We aleady clean database, so insert
    # TODO : fill nagios_instances
    def manage_program_status_brok(self, b):
        new_b = copy.deepcopy(b)

        # Must delete me first
        query_delete_instance = u"DELETE FROM %s WHERE instance_name = '%s' " % ('nagios_instances', b.data['instance_name'])

        query_instance = self.db.create_insert_query('instances', {'instance_name' : new_b.data['instance_name'],\
         'instance_description' : new_b.data['instance_name'], \
        'instance_id' : new_b.data['instance_id']
        })

        to_del = ['instance_name', 'command_file', 'check_external_commands', 'check_service_freshness',
                  'check_host_freshness']
        to_add = []
        mapping = self.mapping['program_status']
        for prop in new_b.data:
            # ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                #print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]['transform'] is not None:
                    f = mapping[prop]['transform']
                    val = f(val)
                new_name = mapping[prop]['name']
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        query = self.db.create_insert_query('programstatus', new_b.data)
        return [query_delete_instance, query_instance, query]



    # TODO : fill nagios_instances
    def manage_update_program_status_brok(self, b):
        new_b = copy.deepcopy(b)
        to_del = ['instance_name', 'command_file', 'check_external_commands', 'check_service_freshness',
                  'check_host_freshness']
        to_add = []
        mapping = self.mapping['program_status']
        for prop in new_b.data:
            # ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                #print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]['transform'] is not None:
                    f = mapping[prop]['transform']
                    val = f(val)
                new_name = mapping[prop]['name']
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        where_clause = {'instance_id' : new_b.data['instance_id']}
        query = self.db.create_update_query('programstatus', new_b.data, where_clause)
        return [query]



    # A host have just be create, database is clean, we INSERT it
    def manage_initial_host_status_brok(self, b):
        
        data = b.data

        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 1,
                        'name1' : data['host_name'], 'is_active' : data['active_checks_enabled']
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])

        #print "DATA:", data
        hosts_data = { 'instance_id' : data['instance_id'],
                      'host_object_id' : host_id, 'alias' : data['alias'],
                      'display_name' : data['display_name'], 'address' : data['address'],
                      'failure_prediction_options' : '0', 'check_interval' : data['check_interval'],
                      'retry_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                      'first_notification_delay' : data['first_notification_delay'], 'notification_interval' : data['notification_interval'],
                      'flap_detection_enabled' : data['flap_detection_enabled'], 'low_flap_threshold' : data['low_flap_threshold'],
                      'high_flap_threshold' : data['high_flap_threshold'], 'process_performance_data' : data['process_perf_data'],
                      'freshness_checks_enabled' : data['check_freshness'], 'freshness_threshold' : data['freshness_threshold'],
                      'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                      'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                      'obsess_over_host' : data['obsess_over_host'], 'notes' : data['notes'], 'notes_url' : data['notes_url'],
            }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('hosts', hosts_data)

        # Now create an hoststatus entry
        hoststatus_data = {'instance_id' : data['instance_id'],
                           'host_object_id' : host_id,
                           'normal_check_interval' : data['check_interval'],
                           'retry_check_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                           'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                           'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                           'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                           'obsess_over_host' : data['obsess_over_host'],'process_performance_data' : data['process_perf_data'],
                           'check_type' : 0, 'current_check_attempt' : data['attempt'],
                           'execution_time' : data['execution_time'], 'latency' : data['latency'],
                           'output' : data['output'], 'perfdata' : data['perf_data'],'last_check' : de_unixify(data['last_chk']),
                           'last_hard_state_change' :  de_unixify(data['last_hard_state_change']),
                           'last_state_change' :  de_unixify(data['last_state_change']),
                           'last_notification' : de_unixify(data['last_notification']),
                           'current_notification_number' : data['current_notification_number'],
                           'problem_has_been_acknowledged' : data['problem_has_been_acknowledged'], 'acknowledgement_type' : data['acknowledgement_type'],
                           # set check to 1 so nagvis is happy
                           'has_been_checked' : 1, 'percent_state_change' : data['percent_state_change'], 'is_flapping' : data['is_flapping'],
                           'flap_detection_enabled' : data['flap_detection_enabled'],
                           }

        # Centreon add some fields
        if self.centreon_version:
            hoststatus_data['long_output'] = data['long_output']

        hoststatus_query = self.db.create_insert_query('hoststatus' , hoststatus_data)

        return [query, hoststatus_query]



    # A service have just been created, database is clean, we INSERT it
    def manage_initial_service_status_brok(self, b):
        #new_b = copy.deepcopy(b)

        data = b.data
        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 2,
                        'name1' : data['host_name'], 'name2' : data['service_description'], 'is_active' : data['active_checks_enabled']
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])
        service_id = self.get_service_object_id_by_name_sync(data['host_name'], data['service_description'],data['instance_id'])
        
        # TODO : Include with the service cache.
        self.mapping_service_id[data['id']] = service_id
        

        #print "DATA:", data
        #print "HOST ID:", host_id
        #print "SERVICE ID:", service_id
        services_data = { 'instance_id' : data['instance_id'],
                      'service_object_id' : service_id, 'host_object_id' : host_id,
                      'display_name' : data['display_name'],
                      'failure_prediction_options' : '0', 'check_interval' : data['check_interval'],
                      'retry_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                      'first_notification_delay' : data['first_notification_delay'], 'notification_interval' : data['notification_interval'],
                      'flap_detection_enabled' : data['flap_detection_enabled'], 'low_flap_threshold' : data['low_flap_threshold'],
                      'high_flap_threshold' : data['high_flap_threshold'], 'process_performance_data' : data['process_perf_data'],
                      'freshness_checks_enabled' : data['check_freshness'], 'freshness_threshold' : data['freshness_threshold'],
                      'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                      'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                      'obsess_over_service' : data['obsess_over_service'], 'notes' : data['notes'], 'notes_url' : data['notes_url']
            }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('services', services_data)

        # Now create an hoststatus entry
        servicestatus_data = {'instance_id' : data['instance_id'],
                              'service_object_id' : service_id,
                              'normal_check_interval' : data['check_interval'],
                              'retry_check_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                              'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                              'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                              'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                              'obsess_over_service' : data['obsess_over_service'],'process_performance_data' : data['process_perf_data'],

                              'check_type' : 0, 'current_check_attempt' : data['attempt'],
                              'execution_time' : data['execution_time'], 'latency' : data['latency'],
                              'output' : data['output'], 'perfdata' : data['perf_data'], 'last_check' : de_unixify(data['last_chk']),
                              'last_hard_state_change' :  de_unixify(data['last_hard_state_change']),
                              'last_state_change' :  de_unixify(data['last_state_change']),
                              'last_notification' : de_unixify(data['last_notification']),
                              'current_notification_number' : data['current_notification_number'],
                              'problem_has_been_acknowledged' : data['problem_has_been_acknowledged'], 'acknowledgement_type' : data['acknowledgement_type'],
                              # set check to 1 so nagvis is happy
                              'has_been_checked' : 1, 'percent_state_change' : data['percent_state_change'], 'is_flapping' : data['is_flapping'],
                              'flap_detection_enabled' : data['flap_detection_enabled'],
                              }

        # Centreon add some fields
        if self.centreon_version:
            servicestatus_data['long_output'] = data['long_output']

        servicestatus_query = self.db.create_insert_query('servicestatus' , servicestatus_data)

        return [query, servicestatus_query]



    # A new host group? Insert it
    # We need to do something for the members prop (host.id, host_name)
    # They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_hostgroup_status_brok(self, b):
        data = b.data

        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 3,
                        'name1' : data['hostgroup_name'], 'is_active' : 1
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        hostgroup_id = self.get_hostgroup_object_id_by_name_sync(data['hostgroup_name'],data['instance_id'])
        
        # We can't get the id of the hostgroup in the base because we don't have inserted it yet!
        # So we get a suitable id in this table an fix it for the hostgroup and hostgroup_member
        hostgp_id = self.get_max_hostgroup_id_sync()

        hostgroups_data = { 'hostgroup_id' : hostgp_id, 'instance_id' :  data['instance_id'],
                           'config_type' : 0, 'hostgroup_object_id' : hostgroup_id,
                           'alias' : data['alias']
            }

        query = self.db.create_insert_query('hostgroups', hostgroups_data)
        res = [query]

        # Ok, the hostgroups table is uptodate, now we add relations
        # between hosts and hostgroups
        for (h_id, h_name) in b.data['members']:
            host_id = self.get_host_object_id_by_name_sync(h_name,data['instance_id'])

            hostgroup_members_data = {'instance_id' : data['instance_id'], 'hostgroup_id' : hostgp_id,
                                      'host_object_id' : host_id}
            q = self.db.create_insert_query('hostgroup_members', hostgroup_members_data)
            res.append(q)
        return res



    # A new service group? Insert it
    # We need to do something for the members prop (serv.id, service_name)
    # They are for service_hostgroup table, with just service.id servicegroup.id
    def manage_initial_servicegroup_status_brok(self, b):
        data = b.data

        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 4,
                        'name1' : data['servicegroup_name'], 'is_active' : 1
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        servicegroup_id = self.get_servicegroup_object_id_by_name_sync(data['servicegroup_name'],data['instance_id'])
        svcgp_id = self.get_max_servicegroup_id_sync()
        


        servicegroups_data = {'servicegroup_id' : svcgp_id, 'instance_id' :  data['instance_id'],
                           'config_type' : 0, 'servicegroup_object_id' : servicegroup_id,
                           'alias' : data['alias']
            }

        query = self.db.create_insert_query('servicegroups', servicegroups_data)
        res = [query]
        
                
        # Ok, the servicegroups table is up to date, now we add relations
        # between service and servicegroups
        for (s_id, s_name) in b.data['members']:
            # TODO : Include with the service cache.
            service_id = self.mapping_service_id[s_id]
            servicegroup_members_data = {'instance_id' : data['instance_id'], 'servicegroup_id' : svcgp_id,
                                         'service_object_id' : service_id}
            q = self.db.create_insert_query('servicegroup_members', servicegroup_members_data)
            res.append(q)
        return res



    # Same than service result, but for host result
    def manage_host_check_result_brok(self, b):
        data = b.data
        #print "DATA", data
        host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])

        # Only the host is impacted
        where_clause = {'host_object_id' : host_id}
        host_check_data = {'instance_id' : data['instance_id'],
                           'check_type' : 0, 'is_raw_check' : 0, 'current_check_attempt' : data['attempt'],
                           'state' : data['state_id'], 'state_type' : data['state_type_id'],
                           'start_time' : data['start_time'], 'start_time_usec' : 0,
                           'execution_time' : data['execution_time'], 'latency' : data['latency'],
                           'return_code' : data['return_code'], 'output' : data['output'],
                           'perfdata' : data['perf_data']
        }
        # Centreon add some fields
        if self.centreon_version:
            host_check_data['long_output'] = data['long_output']

        query = self.db.create_update_query('hostchecks', host_check_data, where_clause)

        # Now servicestatus
        hoststatus_data = {'instance_id' : data['instance_id'],
                           'check_type' : 0, 'current_check_attempt' : data['attempt'],
                           'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                           'execution_time' : data['execution_time'], 'latency' : data['latency'],
                           'output' : data['output'], 'perfdata' : data['perf_data'], 'last_check' : de_unixify(data['last_chk']),
                           'percent_state_change' : data['percent_state_change'],
        }
        # Centreon add some fields
        if self.centreon_version:
            hoststatus_data['long_output'] = data['long_output']

        hoststatus_query = self.db.create_update_query('hoststatus' , hoststatus_data, where_clause)

        return [query, hoststatus_query]



    # The next schedule got it's own brok. got it and just update the
    # next_check with it
    def manage_host_next_schedule_brok(self, b):
        data = b.data
        
        host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])

        # Only the host is impacted
        where_clause = {'host_object_id' : host_id}

        # Just update teh host status
        hoststatus_data = {'next_check' : de_unixify(data['next_chk'])}
        hoststatus_query = self.db.create_update_query('hoststatus' , hoststatus_data, where_clause)

        return [hoststatus_query]



    # Same than host result, but for service result
    def manage_service_check_result_brok(self, b):
        data = b.data
        #print "DATA", data
        service_id = self.get_service_object_id_by_name_sync(data['host_name'], data['service_description'],data['instance_id'])
        
        
        # Only the service is impacted
        where_clause = {'service_object_id' : service_id}
        service_check_data = {'instance_id' : data['instance_id'],
                           'check_type' : 0, 'current_check_attempt' : data['attempt'],
                           'state' : data['state_id'], 'state_type' : data['state_type_id'],
                           'start_time' : data['start_time'], 'start_time_usec' : 0,
                           'execution_time' : data['execution_time'], 'latency' : data['latency'],
                           'return_code' : data['return_code'], 'output' : data['output'],
                           'perfdata' : data['perf_data']
        }

        # Centreon add some fields
        if self.centreon_version:
            service_check_data['long_output'] = data['long_output']

        query = self.db.create_update_query('servicechecks', service_check_data, where_clause)

        # Now servicestatus
        servicestatus_data = {'instance_id' : data['instance_id'],
                              'check_type' : 0, 'current_check_attempt' : data['attempt'],
                              'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                              'execution_time' : data['execution_time'], 'latency' : data['latency'],
                              'output' : data['output'], 'perfdata' : data['perf_data'], 'last_check' : de_unixify(data['last_chk']),
                              'percent_state_change' : data['percent_state_change'],
        }

        # Centreon add some fields
        if self.centreon_version:
            servicestatus_data['long_output'] = data['long_output']

        servicestatus_query = self.db.create_update_query('servicestatus' , servicestatus_data, where_clause)

        return [query, servicestatus_query]



    # The next schedule got it's own brok. got it and just update the
    # next_check with it
    def manage_service_next_schedule_brok(self, b):
        data = b.data
        #print "DATA", data
        service_id = self.get_service_object_id_by_name_sync(data['host_name'], data['service_description'],data['instance_id'])
        
        
        # Only the service is impacted
        where_clause = {'service_object_id' : service_id}

        # Just update the service status
        servicestatus_data = {'next_check' : de_unixify(data['next_chk'])}
        servicestatus_query = self.db.create_update_query('servicestatus' , servicestatus_data, where_clause)

        return [servicestatus_query]



    # Ok the host is updated
    def manage_update_host_status_brok(self, b):
        data = b.data
        
        host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])
            

        hosts_data = {'instance_id' : data['instance_id'],
                      'failure_prediction_options' : '0', 'check_interval' : data['check_interval'],
                      'retry_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                      'first_notification_delay' : data['first_notification_delay'], 'notification_interval' : data['notification_interval'],
                      'flap_detection_enabled' : data['flap_detection_enabled'], 'low_flap_threshold' : data['low_flap_threshold'],
                      'high_flap_threshold' : data['high_flap_threshold'], 'process_performance_data' : data['process_perf_data'],
                      'freshness_checks_enabled' : data['check_freshness'], 'freshness_threshold' : data['freshness_threshold'],
                      'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                      'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                      'obsess_over_host' : data['obsess_over_host'], 'notes' : data['notes'], 'notes_url' : data['notes_url']
            }
        # Only the host is impacted
        where_clause = {'host_object_id' : host_id}

        query = self.db.create_update_query('hosts', hosts_data, where_clause)

        # Now update an hoststatus entry
        hoststatus_data = {'instance_id' : data['instance_id'],
                           'host_object_id' : host_id,
                           'normal_check_interval' : data['check_interval'],
                           'retry_check_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                           'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                           'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                           'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                           'obsess_over_host' : data['obsess_over_host'],'process_performance_data' : data['process_perf_data'],
                           'check_type' : 0, 'current_check_attempt' : data['attempt'],
                           'execution_time' : data['execution_time'], 'latency' : data['latency'],
                           'output' : data['output'], 'perfdata' : data['perf_data'],'last_check' : de_unixify(data['last_chk']),
                           'last_hard_state_change' :  de_unixify(data['last_hard_state_change']),
                           'last_state_change' :  de_unixify(data['last_state_change']),
                           'last_notification' : de_unixify(data['last_notification']),
                           'current_notification_number' : data['current_notification_number'],
                           'problem_has_been_acknowledged' : data['problem_has_been_acknowledged'], 'acknowledgement_type' : data['acknowledgement_type'],
                           # set check to 1 so nagvis is happy
                           'has_been_checked' : 1, 'is_flapping' : data['is_flapping'], 'percent_state_change' : data['percent_state_change'], 
                           'flap_detection_enabled' : data['flap_detection_enabled'],
                           }

        # Centreon add some fields
        if self.centreon_version:
            hoststatus_data['long_output'] = data['long_output']

        hoststatus_query = self.db.create_update_query('hoststatus' , hoststatus_data, where_clause)

        return [query, hoststatus_query]



    # Ok the service is updated
    def manage_update_service_status_brok(self, b):
        data = b.data

        service_id = self.get_service_object_id_by_name_sync(data['host_name'], data['service_description'],data['instance_id'])


        services_data = {'instance_id' : data['instance_id'],
                      'display_name' : data['display_name'],
                      'failure_prediction_options' : '0', 'check_interval' : data['check_interval'],
                      'retry_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                      'first_notification_delay' : data['first_notification_delay'], 'notification_interval' : data['notification_interval'],
                      'flap_detection_enabled' : data['flap_detection_enabled'], 'low_flap_threshold' : data['low_flap_threshold'],
                      'high_flap_threshold' : data['high_flap_threshold'], 'process_performance_data' : data['process_perf_data'],
                      'freshness_checks_enabled' : data['check_freshness'], 'freshness_threshold' : data['freshness_threshold'],
                      'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                      'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                      'obsess_over_service' : data['obsess_over_service'], 'notes' : data['notes'], 'notes_url' : data['notes_url']
            }

        # Only the service is impacted
        where_clause = {'service_object_id' : service_id, 'instance_id' : data['instance_id']}
        # where_clause = {'host_name' : data['host_name']}
        query = self.db.create_update_query('services', services_data, where_clause)

        # Now create an hoststatus entry
        servicestatus_data = {'instance_id' : data['instance_id'],
                              'service_object_id' : service_id,
                              'normal_check_interval' : data['check_interval'],
                              'retry_check_interval' : data['retry_interval'], 'max_check_attempts' : data['max_check_attempts'],
                              'current_state' : data['state_id'], 'state_type' : data['state_type_id'],
                              'passive_checks_enabled' : data['passive_checks_enabled'], 'event_handler_enabled' : data['event_handler_enabled'],
                              'active_checks_enabled' : data['active_checks_enabled'], 'notifications_enabled' : data['notifications_enabled'],
                              'obsess_over_service' : data['obsess_over_service'],'process_performance_data' : data['process_perf_data'],

                              'check_type' : 0, 'current_check_attempt' : data['attempt'],
                              'execution_time' : data['execution_time'], 'latency' : data['latency'],
                              'output' : data['output'], 'perfdata' : data['perf_data'], 'last_check' : de_unixify(data['last_chk']),
                              'last_hard_state_change' :  de_unixify(data['last_hard_state_change']),
                              'last_state_change' :  de_unixify(data['last_state_change']),
                              'last_notification' : de_unixify(data['last_notification']),
                              'current_notification_number' : data['current_notification_number'],
                              'problem_has_been_acknowledged' : data['problem_has_been_acknowledged'], 'acknowledgement_type' : data['acknowledgement_type'],
                              # set check to 1 so nagvis is happy
                              'has_been_checked' : 1, 'is_flapping' : data['is_flapping'], 'percent_state_change' : data['percent_state_change'],
                              'flap_detection_enabled' : data['flap_detection_enabled'],
                              }

        # Centreon add some fields
        if self.centreon_version:
            servicestatus_data['long_output'] = data['long_output']

        where_clause = {'service_object_id' : service_id}
        servicestatus_query = self.db.create_update_query('servicestatus' , servicestatus_data, where_clause)

        return [query, servicestatus_query]




    # A host have just be create, database is clean, we INSERT it
    def manage_initial_contact_status_brok(self, b):
        data = b.data
        
        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 10,
                        'name1' : data['contact_name'], 'is_active' : 1
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        contact_obj_id = self.get_contact_object_id_by_name_sync(data['contact_name'],data['instance_id'])
        
        contacts_data = {'instance_id' : data['instance_id'],
                      'contact_object_id' : contact_obj_id,
                      'alias' : data['alias'],
                      'email_address' : data['email'], 'pager_address' : data['pager'],
                      'host_notifications_enabled' : data['host_notifications_enabled'],
                      'service_notifications_enabled' : data['service_notifications_enabled'],
            }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('contacts', contacts_data)
        return [query]



    # A new contact group? Insert it
    def manage_initial_contactgroup_status_brok(self, b):
        data = b.data

        # First add to nagios_objects
        objects_data = {'instance_id' : data['instance_id'], 'objecttype_id' : 11,
                        'name1' : data['contactgroup_name'], 'is_active' : 1
                        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        contactgroup_id = self.get_contactgroup_object_id_by_name_sync(data['contactgroup_name'],data['instance_id'])
        ctcgp_id = self.get_max_contactgroup_id_sync()

        contactgroups_data = {'contactgroup_id' : ctcgp_id, 'instance_id' :  data['instance_id'],
                           'config_type' : 0,
                           'contactgroup_object_id' : contactgroup_id,
                           'alias' : data['alias']
            }

        query = self.db.create_insert_query('contactgroups', contactgroups_data)
        res = [query]

        # Ok, the hostgroups table is uptodate, now we add relations
        # between hosts and hostgroups
        for (c_id, c_name) in b.data['members']:
            
            contact_obj_id = self.get_contact_object_id_by_name_sync(c_name,data['instance_id'])
            
            contactgroup_members_data = {'instance_id' : data['instance_id'],
                                         'contactgroup_id' : ctcgp_id,
                                         'contact_object_id' : contact_obj_id}
            q = self.db.create_insert_query('contactgroup_members', contactgroup_members_data)
            res.append(q)
        return res



    # A notification have just be created, we INSERT it
    def manage_notification_raise_brok(self, b):

        data = b.data
        #print "CREATING A NOTIFICATION", data
        if data['service_description'] != '':
             service_id = self.get_service_object_id_by_name_sync(data['host_name'], data['service_description'],data['instance_id'])
        else:
             host_id = self.get_host_object_id_by_name_sync(data['host_name'],data['instance_id'])

        notification_data = {'instance_id' :  data['instance_id'],
                             'start_time' : de_unixify(data['start_time']),
                             'end_time' : de_unixify(data['end_time']),
                             'state' : data['state']                             
                             }
        
        query = self.db.create_insert_query('notifications', notification_data)
        return [query]
class Glpidb_broker(BaseModule):
    def __init__(self, modconf):
        BaseModule.__init__(self, modconf)

        self.hosts_cache = {}
        self.services_cache = {}

        # Database configuration
        self.host = getattr(modconf, 'host', '127.0.0.1')
        self.user = getattr(modconf, 'user', 'shinken')
        self.password = getattr(modconf, 'password', 'shinken')
        self.database = getattr(modconf, 'database', 'glpidb')
        self.character_set = getattr(modconf, 'character_set', 'utf8')
        logger.info("[glpidb] using '%s' database on %s (user = %s)", self.database, self.host, self.user)

        # Database tables update configuration
        self.update_availability = bool(getattr(modconf, 'update_availability', '0')=='1')
        self.update_shinken_state = bool(getattr(modconf, 'update_shinken_state', '0')=='1')
        self.update_services_events = bool(getattr(modconf, 'update_services_events', '0')=='1')
        self.update_hosts = bool(getattr(modconf, 'update_hosts', '0')=='1')
        self.update_services = bool(getattr(modconf, 'update_services', '0')=='1')
        self.update_acknowledges = bool(getattr(modconf, 'update_acknowledges', '0')=='1')
        logger.info("[glpidb] updating availability: %s", self.update_availability)
        logger.info("[glpidb] updating Shinken state: %s", self.update_shinken_state)
        logger.info("[glpidb] updating services events: %s", self.update_services_events)
        logger.info("[glpidb] updating hosts states: %s", self.update_hosts)
        logger.info("[glpidb] updating services states: %s", self.update_services)
        logger.info("[glpidb] updating acknowledges states: %s", self.update_acknowledges)

    def init(self):
        from shinken.db_mysql import DBMysql
        logger.info("[glpidb] Creating a mysql backend : %s (%s)" % (self.host, self.database))
        self.db_backend = DBMysql(self.host, self.user, self.password, self.database, self.character_set)

        logger.info("[glpidb] Connecting to database ...")
        self.db_backend.connect_database()
        logger.info("[glpidb] Connected")

    # Get a brok, parse it, and put in in database
    def manage_brok(self, b):
        # Build initial host state cache
        if b.type == 'initial_host_status':
            host_name = b.data['host_name']
            logger.debug("[glpidb] initial host status : %s", host_name)

            try:
                logger.debug("[glpidb] initial host status : %s : %s", host_name, b.data['customs'])
                self.hosts_cache[host_name] = {'hostsid': b.data['customs']['_HOSTID'], 'itemtype': b.data['customs']['_ITEMTYPE'], 'items_id': b.data['customs']['_ITEMSID'] }
            except:
                self.hosts_cache[host_name] = {'items_id': None}
                logger.debug("[glpidb] no custom _HOSTID and/or _ITEMTYPE and/or _ITEMSID for %s", host_name)

            logger.debug("[glpidb] initial host status : %s is %s", host_name, self.hosts_cache[host_name]['items_id'])

        # Build initial service state cache
        if b.type == 'initial_service_status':
            host_name = b.data['host_name']
            service_description = b.data['service_description']
            service_id = host_name+"/"+service_description
            logger.debug("[glpidb] initial service status : %s", service_id)

            if not host_name in self.hosts_cache or self.hosts_cache[host_name]['items_id'] is None:
                logger.debug("[glpidb] initial service status, host is not defined in Glpi : %s.", host_name)
                return

            try:
                logger.debug("[glpidb] initial service status : %s : %s", service_id, b.data['customs'])
                self.services_cache[service_id] = {'itemtype': b.data['customs']['_ITEMTYPE'], 'items_id': b.data['customs']['_ITEMSID'] }
            except:
                self.services_cache[service_id] = {'items_id': None}
                logger.debug("[glpidb] no custom _ITEMTYPE and/or _ITEMSID for %s", service_id)

            logger.debug("[glpidb] initial service status : %s is %s", service_id, self.services_cache[service_id]['items_id'])

        # Manage host check result if host is defined in Glpi DB
        if b.type == 'host_check_result':
            host_name = b.data['host_name']
            logger.debug("[glpidb] host check result: %s: %s", host_name, b.data)

            # Update Shinken state table
            if self.update_shinken_state:
                self.record_shinken_state(host_name, '', b)

            # Update availability
            if self.update_availability:
                self.record_availability(host_name, '', b)

            if host_name in self.hosts_cache and self.hosts_cache[host_name]['items_id'] is not None:
                start = time.time()
                self.record_host_check_result(b)
                logger.debug("[glpidb] host check result: %s, %d seconds", host_name, time.time() - start)

        # Manage service check result if service is defined in Glpi DB
        if b.type == 'service_check_result':
            host_name = b.data['host_name']
            service_description = b.data['service_description']
            service_id = host_name+"/"+service_description
            logger.debug("[glpidb] service check result: %s", service_id)

            # Update Shinken state table
            if self.update_shinken_state:
                self.record_shinken_state(host_name, service_description, b)

            # Update availability
            if self.update_availability:
                self.record_availability(host_name, service_description, b)

            if host_name in self.hosts_cache and self.hosts_cache[host_name]['items_id'] is not None:
                if service_id in self.services_cache and self.services_cache[service_id]['items_id'] is not None:
                    start = time.time()
                    self.record_service_check_result(b)
                    logger.debug("[glpidb] service check result: %s, %d seconds", service_id, time.time() - start)

        return

    ## Host result
    def record_host_check_result(self, b):
        host_name = b.data['host_name']
        host_cache = self.hosts_cache[host_name]
        logger.debug("[glpidb] record host check result: %s: %s", host_name, b.data)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])

        if self.update_hosts:
            data = {}
            data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
            data['state'] = b.data['state']
            data['state_type'] = b.data['state_type']
            data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['perf_data'] = b.data['perf_data']
            data['latency'] = b.data['latency']
            data['execution_time'] = b.data['execution_time']
            data['is_acknowledged'] = '1' if b.data['problem_has_been_acknowledged'] else '0'

            where_clause = {'items_id': host_cache['items_id'], 'itemtype': host_cache['itemtype']}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_hosts', data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Update acknowledge table if host becomes UP
        #if self.update_acknowledges and b.data['state_id'] == 0 and b.data['last_state_id'] != 0:
        # Update acknowledge table if host is UP
        if self.update_acknowledges and b.data['state_id'] == 0:
            data = {}
            data['end_time'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['expired'] = '1'

            where_clause = {'items_id': host_cache['items_id'], 'itemtype': "PluginMonitoringHost"}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_acknowledges', data, where_clause)
            logger.debug("[glpidb] acknowledge query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    ## Service result
    def record_service_check_result(self, b):
        host_name = b.data['host_name']
        service_description = b.data['service_description']
        service_id = host_name+"/"+service_description
        service_cache = self.services_cache[service_id]
        logger.debug("[glpidb] service check result: %s: %s", service_id, b.data)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])

        # Insert into serviceevents log table
        if self.update_services_events:
            data = {}
            data['plugin_monitoring_services_id'] = service_cache['items_id']
            data['date'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
            data['state'] = b.data['state']
            data['state_type'] = b.data['state_type']
            data['perf_data'] = b.data['perf_data']
            data['latency'] = b.data['latency']
            data['execution_time'] = b.data['execution_time']

            query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', data)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Update service state table
        if self.update_services:
            data = {}
            data['event'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
            data['state'] = b.data['state']
            data['state_type'] = b.data['state_type']
            data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['is_acknowledged'] = '1' if b.data['problem_has_been_acknowledged'] else '0'

            where_clause = {'id': service_cache['items_id']}
            table = 'glpi_plugin_monitoring_services'
            if service_cache['itemtype'] == 'ServiceCatalog':
                table = 'glpi_plugin_monitoring_servicescatalogs'
            query = self.db_backend.create_update_query(table, data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Update acknowledge table if service becomes OK
        #if self.update_acknowledges and b.data['state_id'] == 0 and b.data['last_state_id'] != 0:
        # Update acknowledge table if service is OK
        if self.update_acknowledges and b.data['state_id'] == 0:
            data = {}
            data['end_time'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
            data['expired'] = '1'

            where_clause = {'items_id': service_cache['items_id'], 'itemtype': "PluginMonitoringService"}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_acknowledges', data, where_clause)
            logger.debug("[glpidb] acknowledge query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    ## Update Shinken all hosts/services state
    def record_shinken_state(self, hostname, service, b):
        # Insert/update in shinken state table
        logger.debug("[glpidb] record shinken state: %s/%s: %s", hostname, service, b.data)

        # Test if record still exists
        exists = None
        query = "SELECT COUNT(*) AS nbRecords FROM `glpi_plugin_monitoring_shinkenstates` WHERE hostname='%s' AND service='%s';" % (hostname, service)
        try:
            self.db_backend.execute_query(query)
            res = self.db_backend.fetchone()
            exists = True if res[0] > 0 else False
        except Exception as exp:
            # No more table update because table does not exist or is bad formed ...
            self.update_shinken_state = False
            logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Escape SQL fields ...
        # b.data['output'] = MySQLdb.escape_string(b.data['output'])
        # b.data['long_output'] = MySQLdb.escape_string(b.data['long_output'])
        # b.data['perf_data'] = MySQLdb.escape_string(b.data['perf_data'])

        data = {}
        data['hostname'] = hostname
        data['service'] = service
        data['state'] = b.data['state_id']
        data['state_type'] = b.data['state_type']
        data['last_output'] = ("%s \n %s", b.data['output'], b.data['long_output']) if (len(b.data['long_output']) > 0) else b.data['output']
        data['last_check'] = datetime.datetime.fromtimestamp( int(b.data['last_chk']) ).strftime('%Y-%m-%d %H:%M:%S')
        data['last_perfdata'] = b.data['perf_data']
        data['is_ack'] = '1' if b.data['problem_has_been_acknowledged'] else '0'

        if exists:
            where_clause = {'hostname': hostname, 'service': service}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_shinkenstates', data, where_clause)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)
        else:
            query = self.db_backend.create_insert_query('glpi_plugin_monitoring_shinkenstates', data)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    ## Update hosts/services availability
    def record_availability(self, hostname, service, b):
        # Insert/update in shinken state table
        logger.debug("[glpidb] record availability: %s/%s: %s", hostname, service, b.data)
        # if hostname.startswith('sim'):
            # logger.warning("[glpidb] record availability: %s/%s: %s", hostname, service, b.data)

        # Host check brok:
        # ----------------
        # {'last_time_unreachable': 0, 'last_problem_id': 1, 'check_type': 1, 'retry_interval': 1, 'last_event_id': 1, 'problem_has_been_acknowledged': False, 'last_state': 'DOWN', 'latency': 0, 'last_state_type': 'HARD', 'last_hard_state_change': 1433822140, 'last_time_up': 1433822140, 'percent_state_change': 0.0, 'state': 'UP', 'last_chk': 1433822138, 'last_state_id': 0, 'end_time': 0, 'timeout': 0, 'current_event_id': 1, 'execution_time': 0, 'start_time': 0, 'return_code': 0, 'state_type': 'HARD', 'output': '', 'in_checking': False, 'early_timeout': 0, 'in_scheduled_downtime': False, 'attempt': 1, 'state_type_id': 1, 'acknowledgement_type': 1, 'last_state_change': 1433822140.825969, 'last_time_down': 1433821584, 'instance_id': 0, 'long_output': '', 'current_problem_id': 0, 'host_name': 'sim-0003', 'check_interval': 60, 'state_id': 0, 'has_been_checked': 1, 'perf_data': u''}
        #
        # Interesting information ...
        # 'state_id': 0 / 'state': 'UP' / 'state_type': 'HARD'
        # 'last_state_id': 0 / 'last_state': 'UP' / 'last_state_type': 'HARD'
        # 'last_time_unreachable': 0 / 'last_time_up': 1433152221 / 'last_time_down': 0
        # 'last_chk': 1433152220 / 'last_state_change': 1431420780.184517
        # 'in_scheduled_downtime': False

        # Service check brok:
        # -------------------
        # {'last_problem_id': 0, 'check_type': 0, 'retry_interval': 2, 'last_event_id': 0, 'problem_has_been_acknowledged': False, 'last_time_critical': 0, 'last_time_warning': 0, 'end_time': 0, 'last_state': 'OK', 'latency': 0.2347090244293213, 'last_time_unknown': 0, 'last_state_type': 'HARD', 'last_hard_state_change': 1433736035, 'percent_state_change': 0.0, 'state': 'OK', 'last_chk': 1433785101, 'last_state_id': 0, 'host_name': u'shinken24', 'has_been_checked': 1, 'check_interval': 5, 'current_event_id': 0, 'execution_time': 0.062339067459106445, 'start_time': 0, 'return_code': 0, 'state_type': 'HARD', 'output': 'Ok : memory consumption is 37%', 'service_description': u'Memory', 'in_checking': False, 'early_timeout': 0, 'in_scheduled_downtime': False, 'attempt': 1, 'state_type_id': 1, 'acknowledgement_type': 1, 'last_state_change': 1433736035.927526, 'instance_id': 0, 'long_output': u'', 'current_problem_id': 0, 'last_time_ok': 1433785103, 'timeout': 0, 'state_id': 0, 'perf_data': u'cached=13%;;;0%;100% buffered=1%;;;0%;100% consumed=37%;80%;90%;0%;100% used=53%;;;0%;100% free=46%;;;0%;100% swap_used=0%;;;0%;100% swap_free=100%;;;0%;100% buffered_abs=36076KB;;;0KB;2058684KB used_abs=1094544KB;;;0KB;2058684KB cached_abs=284628KB;;;0KB;2058684KB consumed_abs=773840KB;;;0KB;2058684KB free_abs=964140KB;;;0KB;2058684KB total_abs=2058684KB;;;0KB;2058684KB swap_total=392188KB;;;0KB;392188KB swap_used=0KB;;;0KB;392188KB swap_free=392188KB;;;0KB;392188KB'}
        #
        # Interesting information ...
        # 'state_id': 0 / 'state': 'OK' / 'state_type': 'HARD'
        # 'last_state_id': 0 / 'last_state': 'OK' / 'last_state_type': 'HARD'
        # 'last_time_critical': 0 / 'last_time_warning': 0 / 'last_time_unknown': 0 / 'last_time_ok': 1433785103
        # 'last_chk': 1433785101 / 'last_state_change': 1433736035.927526
        # 'in_scheduled_downtime': False

        # Only for simulated hosts ...
        # if not hostname.startswith('sim'):
            # return

        # Only for host check ...
        # if not service is '':
            # return

        # Ignoring SOFT states ...
        # if b.data['state_type_id']==0:
            # logger.warning("[glpidb] record availability for: %s/%s, but no HARD state, ignoring ...", hostname, service)


        midnight = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
        midnight_timestamp = time.mktime (midnight.timetuple())
        # Number of seconds today ...
        seconds_today = int(b.data['last_chk']) - midnight_timestamp
        # Number of seconds since state changed
        since_last_state = int(b.data['last_state_change']) - seconds_today
        # Scheduled downtime
        scheduled_downtime = bool(b.data['in_scheduled_downtime'])
        # Day
        day = datetime.date.today().strftime('%Y-%m-%d')

        # Database table
        # --------------
        # `hostname` varchar(255) CHARACTER SET latin1 DEFAULT NULL,
        # `service` varchar(255) CHARACTER SET latin1 DEFAULT NULL,
        # `day` DATE DEFAULT NULL,
        # `is_downtime` tinyint(1) DEFAULT '0',
        # `daily_0` int(6) DEFAULT '0',                 Up/Ok
        # `daily_1` int(6) DEFAULT '0',                 Down/Warning
        # `daily_2` int(6) DEFAULT '0',                 Unreachable/Critical
        # `daily_3` int(6) DEFAULT '0',                 Unknown
        # `daily_4` int(6) DEFAULT '86400',             Unchecked
        # `daily_9` int(6) DEFAULT '0',                 Downtime
        # --------------

        # Test if record for current day still exists
        exists = False
        res = None
        query = """SELECT id, hostname, service, day, is_downtime,
                    daily_0, daily_1, daily_2, daily_3, daily_4,
                    first_check_state, first_check_timestamp,
                    last_check_state, last_check_timestamp
                    FROM `glpi_plugin_monitoring_availabilities`
                    WHERE hostname='%s' AND service='%s' AND day='%s';""" % (hostname, service, day)
        try:
            self.db_backend.execute_query(query)
            res = self.db_backend.fetchone()
            logger.debug("[glpidb] record availability, select query result: %s", res)
                # (9L, 'sim-0001', '', datetime.date(2015, 6, 9), 0, 0L, 0L, 0L, 0L, 86400L, 1, 1433854693L, 1, 1433854693L)
            exists = True if res is not None else False
        except Exception as exp:
            # No more table update because table does not exist or is bad formed ...
            self.update_shinken_state = False
            logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Configure recorded data
        data = {}
        data['hostname'] = hostname
        data['service'] = service
        data['day'] = day
        data['is_downtime'] = '1' if bool(b.data['in_scheduled_downtime']) else '0'
        # All possible states are 0 seconds duration.
        data['daily_0'] = 0
        data['daily_1'] = 0
        data['daily_2'] = 0
        data['daily_3'] = 0
        data['daily_4'] = 0

        current_state = b.data['state']
        current_state_id = b.data['state_id']
        last_state = b.data['last_state']
        last_check_state = res[12] if res else 3
        last_check_timestamp = res[13] if res else midnight_timestamp
        since_last_state = 0
        logger.debug("[glpidb] current state: %s, last state: %s", current_state, last_state)

        # Host check
        if service=='':
            last_time_unreachable = b.data['last_time_unreachable']
            last_time_up = b.data['last_time_up']
            last_time_down = b.data['last_time_down']
            last_state_change = b.data['last_state_change']
            last_state_change = int(time.time())

            if current_state == 'UP':
                since_last_state = int(last_state_change - last_check_timestamp)

            elif current_state== 'UNREACHABLE':
                since_last_state = int(last_state_change - last_check_timestamp)

            elif current_state == 'DOWN':
                since_last_state = int(last_state_change - last_check_timestamp)
        # Service check
        else:
            last_state_change = int(time.time())
            since_last_state = int(last_state_change - last_check_timestamp)

        # Update existing record
        if exists:
            data = {
                    'is_downtime': res[4],
                    'daily_0': res[5], 'daily_1': res[6], 'daily_2': res[7], 'daily_3': res[8], 'daily_4': res[9]
                    }

            logger.debug("[glpidb] current data: %s", data)

            # Update record
            if since_last_state > seconds_today:
                # Last state changed before today ...

                # Current state duration for all seconds of today
                data["daily_%d" % current_state_id] = seconds_today
            else:
                # Increase current state duration with seconds since last state
                data["daily_%d" % b.data['state_id']] += (since_last_state)

            # Unchecked state for all day duration minus all states duration
            data['daily_4'] = 86400
            for value in [ data['daily_0'], data['daily_1'], data['daily_2'], data['daily_3'] ]:
                data['daily_4'] -= value

            # Last check state and timestamp
            data['last_check_state'] = current_state_id
            data['last_check_timestamp'] = int(b.data['last_chk'])

            where_clause = {'hostname': hostname, 'service': service, 'day': day}
            query = self.db_backend.create_update_query('glpi_plugin_monitoring_availabilities', data, where_clause)
            logger.debug("[glpidb] record availability, update query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

        # Create record
        else:
            # First check state and timestamp
            data['first_check_state'] = current_state_id
            data['first_check_timestamp'] = int(b.data['last_chk'])

            # Last check state and timestamp
            data['last_check_state'] = current_state_id
            data['last_check_timestamp'] = int(b.data['last_chk'])

            # Ignore computed values because it is the first check received today!
            data['daily_4'] = 86400

            query = self.db_backend.create_insert_query('glpi_plugin_monitoring_availabilities', data)
            logger.debug("[glpidb] record availability, insert query: %s", query)
            try:
                self.db_backend.execute_query(query)
            except Exception as exp:
                logger.error("[glpidb] error '%s' when executing query: %s", exp, query)

    def main(self):
        self.set_proctitle(self.name)
        self.set_exit_handler()
        while not self.interrupted:
            logger.debug("[glpidb] queue length: %s", self.to_q.qsize())
            start = time.time()
            l = self.to_q.get()
            for b in l:
                b.prepare()
                self.manage_brok(b)

            logger.debug("[glpidb] time to manage %s broks (%d secs)", len(l), time.time() - start)
Ejemplo n.º 16
0
class Ndodb_Mysql_broker(BaseModule):

    """ This Class is a plugin for the Shinken Broker. It is in charge
    to brok information into the database. For the moment
    only Mysql is supported. This code is __imported__ from Broker.
    The managed_brok function is called by Broker for manage the broks. It calls
    the manage_*_brok functions that create queries, and then run queries.

    """

    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of data and transform function
        self.mapping = {
            'program_status': {
                'program_start': {'name': 'program_start_time', 'transform': de_unixify},
                'pid': {'name': 'process_id', 'transform': None},
                'last_alive': {'name': 'status_update_time', 'transform': de_unixify},
                'is_running': {'name': 'is_currently_running', 'transform': None},
                'last_log_rotation': {'name': 'last_log_rotation', 'transform': de_unixify},
                'last_command_check': {'name': 'last_command_check', 'transform': de_unixify}
                },
            }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set
        self.port = int(getattr(conf, 'port', '3306'))
        self.prefix = getattr(conf, 'prefix', 'nagios_')

        # Centreon ndo add some fields like long_output
        # that are not in the vanilla ndo
        self.centreon_version = False
        self.synchronize_database_id = int(conf.synchronize_database_id)

    # Called by Broker so we can do init stuff
    # TODO: add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        logger.info("I connect to NDO database")
        self.db = DBMysql(self.host, self.user, self.password, self.database,
                          self.character_set, table_prefix=self.prefix,
                          port=self.port)
        self.connect_database()

        # Cache for hosts and services
        # The structure is as follow:
        # First the instance id then the host / (host,service desc)
        # to access the wanted data
        self.services_cache_sync = {}
        self.hosts_cache_sync = {}

        # We need to search for centreon_specific fields, like long_output
        query = u"select TABLE_NAME from information_schema.columns " \
                "where TABLE_SCHEMA='ndo' and " \
                "TABLE_NAME='%sservicestatus' and " \
                "COLUMN_NAME='long_output';" % self.prefix

        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            self.centreon_version = False
        else:
            self.centreon_version = True
            logger.info("[MySQL/NDO] Using the centreon version")

        # Cache for database id
        # In order not to query the database every time
        self.database_id_cache = {}

        # Mapping service_id in Shinken and in database
        # Because can't acces host_name from a service everytime :(
        self.mapping_service_id = {}

        # Todo list to manage brok
        self.todo = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We need to do some brok mod, so we copy it
        new_b = copy.deepcopy(b)

        # If we synchronize, must look for id change
        if self.synchronize_database_id != 0 and 'instance_id' in new_b.data:
            # If we use database sync, we have to synchronize database id
            # so we wait for the instance name
            brok_id = new_b.data['instance_id']
            converted_instance_id = self.convert_id(brok_id)
            if converted_instance_id is not None:
                new_b.data['instance_id'] = converted_instance_id
                queries = BaseModule.manage_brok(self, new_b)
                if queries is not None:
                    for q in queries:
                        self.db.execute_query(q)

            if converted_instance_id is None:
                if brok_id in self.todo:
                    self.todo[brok_id].append(new_b)
                else:
                    self.todo[brok_id] = [new_b]

            if converted_instance_id is None and 'instance_name' in new_b.data:
                converted_brok_id = self.get_instance_id(new_b.data['instance_name'])
                self.database_id_cache[brok_id] = converted_brok_id
                # We have to put the good instance ID to all brok waiting
                # in the list then execute the query
                for brok in self.todo[brok_id]:
                    brok.data['instance_id'] = converted_brok_id
                    queries = BaseModule.manage_brok(self, brok)
                    if queries is not None:
                        for q in queries:
                            self.db.execute_query(q)
                # We've finished to manage the todo, so we empty it
                self.todo[brok_id] = []

            return

        # Executed if we don't synchronize or there is no instance_id
        queries = BaseModule.manage_brok(self, new_b)

        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return

    # Create the database connection
    # Exception is raised if a arg is bad.
    def connect_database(self):
        try:
            self.db.connect_database()
        except _mysql_exceptions.OperationalError, exp:
            logger.info(
                "[MySQL/NDO] Module raised an exception: %s ." \
                "Please check the arguments!" % \
                exp)
            raise
Ejemplo n.º 17
0
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        # Mapping for name of dataand transform function
        self.mapping = {
            "program_status": {
                "program_start": {"name": "program_start_time", "transform": de_unixify},
                "pid": {"name": "process_id", "transform": None},
                "last_alive": {"name": "status_update_time", "transform": de_unixify},
                "is_running": {"name": "is_currently_running", "transform": None},
            }
        }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set

    # Called by Broker so we can do init stuff
    # TODO : add conf param to get pass with init
    # Conf from arbiter!
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(
            self.host, self.user, self.password, self.database, self.character_set, table_prefix="nagios_"
        )
        self.connect_database()

        # Cache for hosts and services
        # will be flushed when we got a net instance id
        # or something like that
        self.services_cache = {}
        self.hosts_cache = {}

    # Get a brok, parse it, and put in in database
    # We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        # We've got problem with instance_id == 0 so we add 1 every where
        if "instance_id" in b.data:
            b.data["instance_id"] = b.data["instance_id"] + 1
        # print "(Ndo) I search manager:", manager
        queries = BaseModule.manage_brok(self, b)
        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return
        # print "(ndodb)I don't manage this brok type", b

    # Create the database connection
    # TODO : finish (begin :) ) error catch and conf parameters...
    def connect_database(self):
        self.db.connect_database()

    def get_host_object_id_by_name(self, host_name):
        # First look in cache.
        if host_name in self.hosts_cache:
            return self.hosts_cache[host_name]

        # Not in cache, not good
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1'" % host_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.hosts_cache[host_name] = row[0]
            return row[0]

    def get_hostgroup_object_id_by_name(self, hostgroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3'" % hostgroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    def get_service_object_id_by_name(self, host_name, service_description):
        # first look in cache
        if (host_name, service_description) in self.services_cache:
            return self.services_cache[(host_name, service_description)]

        # else; not in cache :(
        query = u"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2'" % (
            host_name,
            service_description,
        )
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.services_cache[(host_name, service_description)] = row[0]
            return row[0]

    def get_servicegroup_object_id_by_name(self, servicegroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4'" % servicegroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    # Ok, we are at launch and a scheduler want him only, OK...
    # So ca create several queries with all tables we need to delete with
    # our instance_id
    # This brob must be send at the begining of a scheduler session,
    # if not, BAD THINGS MAY HAPPENED :)
    def manage_clean_all_my_instance_id_brok(self, b):
        instance_id = b.data["instance_id"]
        tables = [
            "commands",
            "contacts",
            "contactgroups",
            "hosts",
            "hostescalations",
            "hostgroups",
            "notifications",
            "services",
            "serviceescalations",
            "programstatus",
            "servicegroups",
            "timeperiods",
            "hostgroup_members",
            "contactgroup_members",
            "objects",
            "hoststatus",
            "servicestatus",
            "instances",
            "servicegroup_members",
        ]
        res = []
        for table in tables:
            q = "DELETE FROM %s WHERE instance_id = '%s' " % ("nagios_" + table, instance_id)
            res.append(q)

        # We also clean cache, because we are not sure about this data now
        print "[MySQL/NDO] Flushing caches"
        self.services_cache = {}
        self.hosts_cache = {}

        return res

    # Program status is .. status of program? :)
    # Like pid, daemon mode, last activity, etc
    # We aleady clean database, so insert

    # TODO : fill nagios_instances
    def manage_program_status_brok(self, b):
        new_b = copy.deepcopy(b)

        # Must delete me first
        query_delete_instance = u"DELETE FROM %s WHERE instance_name = '%s' " % (
            "nagios_instances",
            b.data["instance_name"],
        )

        query_instance = self.db.create_insert_query(
            "instances",
            {
                "instance_name": new_b.data["instance_name"],
                "instance_description": new_b.data["instance_name"],
                "instance_id": new_b.data["instance_id"],
            },
        )

        to_del = ["instance_name", "command_file"]
        to_add = []
        mapping = self.mapping["program_status"]
        for prop in new_b.data:
            # ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                # print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]["transform"] is not None:
                    f = mapping[prop]["transform"]
                    val = f(val)
                new_name = mapping[prop]["name"]
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        query = self.db.create_insert_query("programstatus", new_b.data)
        return [query_delete_instance, query_instance, query]

    # TODO : fill nagios_instances
    def manage_update_program_status_brok(self, b):
        new_b = copy.deepcopy(b)
        to_del = ["instance_name", "command_file"]
        to_add = []
        mapping = self.mapping["program_status"]
        for prop in new_b.data:
            # ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                # print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]["transform"] is not None:
                    f = mapping[prop]["transform"]
                    val = f(val)
                new_name = mapping[prop]["name"]
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        where_clause = {"instance_id": new_b.data["instance_id"]}
        query = self.db.create_update_query("programstatus", new_b.data, where_clause)
        return [query]

    # A host have just be create, database is clean, we INSERT it
    def manage_initial_host_status_brok(self, b):
        # new_b = copy.deepcopy(b)

        data = b.data

        # First add to nagios_objects
        objects_data = {
            "instance_id": data["instance_id"],
            "objecttype_id": 1,
            "name1": data["host_name"],
            "is_active": data["active_checks_enabled"],
        }
        object_query = self.db.create_insert_query("objects", objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name(data["host_name"])

        # print "DATA:", data
        hosts_data = {
            "host_id": data["id"],
            "instance_id": data["instance_id"],
            "host_object_id": host_id,
            "alias": data["alias"],
            "display_name": data["display_name"],
            "address": data["address"],
            "failure_prediction_options": "0",
            "check_interval": data["check_interval"],
            "retry_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "first_notification_delay": data["first_notification_delay"],
            "notification_interval": data["notification_interval"],
            "flap_detection_enabled": data["flap_detection_enabled"],
            "low_flap_threshold": data["low_flap_threshold"],
            "high_flap_threshold": data["high_flap_threshold"],
            "process_performance_data": data["process_perf_data"],
            "freshness_checks_enabled": data["check_freshness"],
            "freshness_threshold": data["freshness_threshold"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_host": data["obsess_over_host"],
            "notes": data["notes"],
            "notes_url": data["notes_url"],
        }

        # print "HOST DATA", hosts_data
        query = self.db.create_insert_query("hosts", hosts_data)

        # Now create an hoststatus entry
        hoststatus_data = {
            "instance_id": data["instance_id"],
            "host_object_id": host_id,
            "normal_check_interval": data["check_interval"],
            "retry_check_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_host": data["obsess_over_host"],
            "process_performance_data": data["process_perf_data"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
            "last_hard_state_change": de_unixify(data["last_hard_state_change"]),
            "problem_has_been_acknowledged": data["problem_has_been_acknowledged"],
            "acknowledgement_type": data["acknowledgement_type"],
            # set check to 1 so nagvis is happy
            "has_been_checked": 1,
        }
        hoststatus_query = self.db.create_insert_query("hoststatus", hoststatus_data)

        return [query, hoststatus_query]

    # A host have just be create, database is clean, we INSERT it
    def manage_initial_service_status_brok(self, b):
        # new_b = copy.deepcopy(b)

        data = b.data
        # First add to nagios_objects
        objects_data = {
            "instance_id": data["instance_id"],
            "objecttype_id": 2,
            "name1": data["host_name"],
            "name2": data["service_description"],
            "is_active": data["active_checks_enabled"],
        }
        object_query = self.db.create_insert_query("objects", objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name(data["host_name"])
        service_id = self.get_service_object_id_by_name(data["host_name"], data["service_description"])

        # print "DATA:", data
        # print "HOST ID:", host_id
        # print "SERVICE ID:", service_id
        services_data = {
            "service_id": data["id"],
            "instance_id": data["instance_id"],
            "service_object_id": service_id,
            "host_object_id": host_id,
            "display_name": data["display_name"],
            "failure_prediction_options": "0",
            "check_interval": data["check_interval"],
            "retry_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "first_notification_delay": data["first_notification_delay"],
            "notification_interval": data["notification_interval"],
            "flap_detection_enabled": data["flap_detection_enabled"],
            "low_flap_threshold": data["low_flap_threshold"],
            "high_flap_threshold": data["high_flap_threshold"],
            "process_performance_data": data["process_perf_data"],
            "freshness_checks_enabled": data["check_freshness"],
            "freshness_threshold": data["freshness_threshold"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_service": data["obsess_over_service"],
            "notes": data["notes"],
            "notes_url": data["notes_url"],
        }

        # print "HOST DATA", hosts_data
        query = self.db.create_insert_query("services", services_data)

        # Now create an hoststatus entry
        servicestatus_data = {
            "instance_id": data["instance_id"],
            "service_object_id": service_id,
            "normal_check_interval": data["check_interval"],
            "retry_check_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_service": data["obsess_over_service"],
            "process_performance_data": data["process_perf_data"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
            "last_hard_state_change": de_unixify(data["last_hard_state_change"]),
            "problem_has_been_acknowledged": data["problem_has_been_acknowledged"],
            "acknowledgement_type": data["acknowledgement_type"],
            # set check to 1 so nagvis is happy
            "has_been_checked": 1,
        }
        servicestatus_query = self.db.create_insert_query("servicestatus", servicestatus_data)

        return [query, servicestatus_query]

    # A new host group? Insert it
    # We need to do something for the members prop (host.id, host_name)
    # They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_hostgroup_status_brok(self, b):
        data = b.data

        # First add to nagios_objects
        objects_data = {
            "instance_id": data["instance_id"],
            "objecttype_id": 3,
            "name1": data["hostgroup_name"],
            "is_active": 1,
        }
        object_query = self.db.create_insert_query("objects", objects_data)
        self.db.execute_query(object_query)

        hostgroup_id = self.get_hostgroup_object_id_by_name(data["hostgroup_name"])

        hostgroups_data = {
            "hostgroup_id": data["id"],
            "instance_id": data["instance_id"],
            "config_type": 0,
            "hostgroup_object_id": hostgroup_id,
            "alias": data["alias"],
        }

        query = self.db.create_insert_query("hostgroups", hostgroups_data)
        res = [query]

        # Ok, the hostgroups table is uptodate, now we add relations
        # between hosts and hostgroups
        for (h_id, h_name) in b.data["members"]:
            host_id = self.get_host_object_id_by_name(h_name)
            hostgroup_members_data = {
                "instance_id": data["instance_id"],
                "hostgroup_id": data["id"],
                "host_object_id": host_id,
            }
            q = self.db.create_insert_query("hostgroup_members", hostgroup_members_data)
            res.append(q)
        return res

    # A new host group? Insert it
    # We need to do something for the members prop (host.id, host_name)
    # They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_servicegroup_status_brok(self, b):
        data = b.data

        # First add to nagios_objects
        objects_data = {
            "instance_id": data["instance_id"],
            "objecttype_id": 4,
            "name1": data["servicegroup_name"],
            "is_active": 1,
        }
        object_query = self.db.create_insert_query("objects", objects_data)
        self.db.execute_query(object_query)

        servicegroup_id = self.get_servicegroup_object_id_by_name(data["servicegroup_name"])

        servicegroups_data = {
            "servicegroup_id": data["id"],
            "instance_id": data["instance_id"],
            "config_type": 0,
            "servicegroup_object_id": servicegroup_id,
            "alias": data["alias"],
        }

        query = self.db.create_insert_query("servicegroups", servicegroups_data)
        res = [query]

        # Ok, the hostgroups table is uptodate, now we add relations
        # between hosts and hostgroups
        for (s_id, s_name) in b.data["members"]:
            servicegroup_members_data = {
                "instance_id": data["instance_id"],
                "servicegroup_id": data["id"],
                "service_object_id": s_id,
            }
            q = self.db.create_insert_query("servicegroup_members", servicegroup_members_data)
            res.append(q)
        return res

    # Same than service result, but for host result
    def manage_host_check_result_brok(self, b):
        data = b.data
        # print "DATA", data
        host_id = self.get_host_object_id_by_name(data["host_name"])
        # Only the host is impacted
        where_clause = {"host_object_id": host_id}
        host_check_data = {
            "instance_id": data["instance_id"],
            "check_type": 0,
            "is_raw_check": 0,
            "current_check_attempt": data["attempt"],
            "state": data["state_id"],
            "state_type": data["state_type_id"],
            "start_time": data["start_time"],
            "start_time_usec": 0,
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "return_code": data["return_code"],
            "output": data["output"],
            "perfdata": data["perf_data"],
        }
        query = self.db.create_update_query("hostchecks", host_check_data, where_clause)

        # Now servicestatus
        hoststatus_data = {
            "instance_id": data["instance_id"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
        }
        hoststatus_query = self.db.create_update_query("hoststatus", hoststatus_data, where_clause)

        return [query, hoststatus_query]

    # The next schedule got it's own brok. got it and just update the
    # next_check with it
    def manage_host_next_schedule_brok(self, b):
        data = b.data
        host_id = self.get_host_object_id_by_name(data["host_name"])
        # Only the host is impacted
        where_clause = {"host_object_id": host_id}

        # Just update teh host status
        hoststatus_data = {"next_check": de_unixify(data["next_chk"])}
        hoststatus_query = self.db.create_update_query("hoststatus", hoststatus_data, where_clause)

        return [hoststatus_query]

    # Same than service result, but for host result
    def manage_service_check_result_brok(self, b):
        data = b.data
        # print "DATA", data
        service_id = self.get_service_object_id_by_name(data["host_name"], data["service_description"])

        # Only the service is impacted
        where_clause = {"service_object_id": service_id}
        service_check_data = {
            "instance_id": data["instance_id"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "state": data["state_id"],
            "state_type": data["state_type_id"],
            "start_time": data["start_time"],
            "start_time_usec": 0,
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "return_code": data["return_code"],
            "output": data["output"],
            "perfdata": data["perf_data"],
        }
        query = self.db.create_update_query("servicechecks", service_check_data, where_clause)

        # Now servicestatus
        servicestatus_data = {
            "instance_id": data["instance_id"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
        }

        servicestatus_query = self.db.create_update_query("servicestatus", servicestatus_data, where_clause)

        return [query, servicestatus_query]

    # The next schedule got it's own brok. got it and just update the
    # next_check with it
    def manage_service_next_schedule_brok(self, b):
        data = b.data
        # print "DATA", data
        service_id = self.get_service_object_id_by_name(data["host_name"], data["service_description"])

        # Only the service is impacted
        where_clause = {"service_object_id": service_id}

        # Just update the service status
        servicestatus_data = {"next_check": de_unixify(data["next_chk"])}
        servicestatus_query = self.db.create_update_query("servicestatus", servicestatus_data, where_clause)

        return [servicestatus_query]

    # Ok the host is updated
    def manage_update_host_status_brok(self, b):
        data = b.data
        host_id = self.get_host_object_id_by_name(data["host_name"])

        hosts_data = {
            "instance_id": data["instance_id"],
            "failure_prediction_options": "0",
            "check_interval": data["check_interval"],
            "retry_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "first_notification_delay": data["first_notification_delay"],
            "notification_interval": data["notification_interval"],
            "flap_detection_enabled": data["flap_detection_enabled"],
            "low_flap_threshold": data["low_flap_threshold"],
            "high_flap_threshold": data["high_flap_threshold"],
            "process_performance_data": data["process_perf_data"],
            "freshness_checks_enabled": data["check_freshness"],
            "freshness_threshold": data["freshness_threshold"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_host": data["obsess_over_host"],
            "notes": data["notes"],
            "notes_url": data["notes_url"],
        }
        # Only the host is impacted
        where_clause = {"host_object_id": host_id}

        query = self.db.create_update_query("hosts", hosts_data, where_clause)

        # Now update an hoststatus entry
        hoststatus_data = {
            "instance_id": data["instance_id"],
            "host_object_id": host_id,
            "normal_check_interval": data["check_interval"],
            "retry_check_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_host": data["obsess_over_host"],
            "process_performance_data": data["process_perf_data"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
            "last_hard_state_change": de_unixify(data["last_hard_state_change"]),
            "problem_has_been_acknowledged": data["problem_has_been_acknowledged"],
            "acknowledgement_type": data["acknowledgement_type"],
            # set check to 1 so nagvis is happy
            "has_been_checked": 1,
        }
        hoststatus_query = self.db.create_update_query("hoststatus", hoststatus_data, where_clause)

        return [query, hoststatus_query]

    # Ok the host is updated
    def manage_update_service_status_brok(self, b):
        data = b.data

        service_id = self.get_service_object_id_by_name(data["host_name"], data["service_description"])

        services_data = {
            "instance_id": data["instance_id"],
            "display_name": data["display_name"],
            "failure_prediction_options": "0",
            "check_interval": data["check_interval"],
            "retry_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "first_notification_delay": data["first_notification_delay"],
            "notification_interval": data["notification_interval"],
            "flap_detection_enabled": data["flap_detection_enabled"],
            "low_flap_threshold": data["low_flap_threshold"],
            "high_flap_threshold": data["high_flap_threshold"],
            "process_performance_data": data["process_perf_data"],
            "freshness_checks_enabled": data["check_freshness"],
            "freshness_threshold": data["freshness_threshold"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_service": data["obsess_over_service"],
            "notes": data["notes"],
            "notes_url": data["notes_url"],
        }

        # Only the service is impacted
        where_clause = {"service_object_id": service_id, "service_id": data["id"]}
        # where_clause = {'host_name' : data['host_name']}
        query = self.db.create_update_query("services", services_data, where_clause)

        # Now create an hoststatus entry
        servicestatus_data = {
            "instance_id": data["instance_id"],
            "service_object_id": service_id,
            "normal_check_interval": data["check_interval"],
            "retry_check_interval": data["retry_interval"],
            "max_check_attempts": data["max_check_attempts"],
            "current_state": data["state_id"],
            "state_type": data["state_type_id"],
            "passive_checks_enabled": data["passive_checks_enabled"],
            "event_handler_enabled": data["event_handler_enabled"],
            "active_checks_enabled": data["active_checks_enabled"],
            "notifications_enabled": data["notifications_enabled"],
            "obsess_over_service": data["obsess_over_service"],
            "process_performance_data": data["process_perf_data"],
            "check_type": 0,
            "current_check_attempt": data["attempt"],
            "execution_time": data["execution_time"],
            "latency": data["latency"],
            "output": data["output"],
            "perfdata": data["perf_data"],
            "last_check": de_unixify(data["last_chk"]),
            "last_hard_state_change": de_unixify(data["last_hard_state_change"]),
            "problem_has_been_acknowledged": data["problem_has_been_acknowledged"],
            "acknowledgement_type": data["acknowledgement_type"],
            # set check to 1 so nagvis is happy
            "has_been_checked": 1,
        }

        where_clause = {"service_object_id": service_id}
        servicestatus_query = self.db.create_update_query("servicestatus", servicestatus_data, where_clause)

        return [query, servicestatus_query]

    # A host have just be create, database is clean, we INSERT it
    def manage_initial_contact_status_brok(self, b):
        # new_b = copy.deepcopy(b)
        data = b.data
        # print "DATA:", data

        contacts_data = {
            "contact_id": data["id"],
            "instance_id": data["instance_id"],
            "contact_object_id": data["id"],
            "contact_object_id": data["id"],
            "alias": data["alias"],
            "email_address": data["email"],
            "pager_address": data["pager"],
            "host_notifications_enabled": data["host_notifications_enabled"],
            "service_notifications_enabled": data["service_notifications_enabled"],
        }

        # print "HOST DATA", hosts_data
        query = self.db.create_insert_query("contacts", contacts_data)
        return [query]

    # A new host group? Insert it
    # We need to do something for the members prop (host.id, host_name)
    # They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_contactgroup_status_brok(self, b):
        data = b.data

        contactgroups_data = {
            "contactgroup_id": data["id"],
            "instance_id": data["instance_id"],
            "config_type": 0,
            "contactgroup_object_id": data["id"],
            "alias": data["alias"],
        }

        query = self.db.create_insert_query("contactgroups", contactgroups_data)
        res = [query]

        # Ok, the hostgroups table is uptodate, now we add relations
        # between hosts and hostgroups
        for (c_id, c_name) in b.data["members"]:
            # print c_name
            contactgroup_members_data = {
                "instance_id": data["instance_id"],
                "contactgroup_id": data["id"],
                "contact_object_id": c_id,
            }
            q = self.db.create_insert_query("contactgroup_members", contactgroup_members_data)
            res.append(q)
        return res
Ejemplo n.º 18
0
class Glpidb_broker(BaseModule):
    def __init__(self, modconf, host=None, user=None, password=None, database=None, character_set=None, database_path=None):
        #Mapping for name of data, rename attributes and transform function
        self.mapping = {
           #Host
           'host_check_result' : {
               'plugin_monitoring_services_id' : {'transform' : None},
               'event' : {'transform' : None},
               'perf_data' : {'transform' : None},
               'output' : {'transform' : None},
               'state' : {'transform' : None},
               'latency' : {'transform' : None},
               'execution_time' : {'transform' : None},
               'state_type' : {'transform' : None},
               },
           #Service
           'service_check_result' : {
               'plugin_monitoring_services_id' : {'transform' : None},
               'plugin_monitoring_servicescatalogs_id' : {'transform' : None},
               'event' : {'transform' : None},
               'perf_data' : {'transform' : None},
               'output' : {'transform' : None},
               'state' : {'transform' : None},
               'latency' : {'transform' : None},
               'execution_time' : {'transform' : None},
               'state_type' : {'transform' : None},
               }
           }
        # Last state of check
#        self.checkstatus = {
#           '0' : None,
#           }
        BaseModule.__init__(self, modconf)
        self.host = host
        self.user = user
        self.password = password
        self.database = database
        self.character_set = character_set
        self.database_path = database_path
        
        from shinken.db_mysql import DBMysql
        print "Creating a mysql backend"
        self.db_backend = DBMysql(host, user, password, database, character_set)



    #Called by Broker so we can do init stuff
    #TODO : add conf param to get pass with init
    #Conf from arbiter!
    def init(self):
        print "I connect to Glpi database"
        self.db_backend.connect_database()




    def preprocess(self, type, brok, checkst):
        new_brok = copy.deepcopy(brok)        
        #Only preprocess if we can apply a mapping
        if type in self.mapping:
            #print "brok data : ", brok.data
            try:
                s = brok.data['service_description'].split('-')
                try:
                    if 'businessrules' in s[2]:
                        new_brok.data['plugin_monitoring_servicescatalogs_id'] = s[1]
                except:
                    new_brok.data['plugin_monitoring_services_id'] = s[1]
                    new_brok.data['event'] = brok.data['output']
            except:
                try:
                    s = brok.data['host_name'].split('-')
                    new_brok.data['plugin_monitoring_services_id'] = s[1]
                    new_brok.data['event'] = brok.data['output']
                except: 
                    pass
            to_del = []
            to_add = []
            mapping = self.mapping[brok.type]
            for prop in new_brok.data:
            #ex : 'name' : 'program_start_time', 'transform'
                if prop in mapping:
                    #print "Got a prop to change", prop
                    val = new_brok.data[prop]
                    if mapping[prop]['transform'] is not None:
                        print "Call function for", type, prop
                        f = mapping[prop]['transform']
                        val = f(val)
                    name = prop
                    if 'name' in mapping[prop]:
                        name = mapping[prop]['name']
                    to_add.append((name, val))
                    to_del.append(prop)
                else:
                    to_del.append(prop)
            for prop in to_del:
                del new_brok.data[prop]
            for (name, val) in to_add:
                new_brok.data[name] = val
        else:
            print "No preprocess type", brok.type
            print brok.data
        return new_brok



    #Get a brok, parse it, and put in in database
    #We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        type = b.type
        # To update check in glpi_plugin_monitoring_hosts
        manager = 'manage_'+type+'up_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, 0)
            f = getattr(self, manager)
            queries = f(new_b)
            #Ok, we've got queries, now : run them!
            for q in queries :
                self.db_backend.execute_query(q)
        manager = 'manage_'+type+'_brok'
        if hasattr(self, manager):
            new_b = self.preprocess(type, b, '1')
            if 'host_name' in new_b.data:
               if 'plugin_monitoring_services_id' not in new_b.data:
                  return
            f = getattr(self, manager)
            queries = f(new_b)
            #Ok, we've got queries, now : run them!
            for q in queries :
                self.db_backend.execute_query(q)
            return


    #Host result
    #def manage_host_check_result_brok(self, b):
        #logger.info("GLPI : data in DB %s " % b)
        #b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
        #query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
        #return [query]


    #Host result
    #def manage_host_check_resultup_brok(self, b):
        #logger.info("GLPI : data in DB %s " % b)
        #new_data = copy.deepcopy(b.data)
        #new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
        #new_data['id'] = b.data['plugin_monitoring_services_id']
        #del new_data['plugin_monitoring_services_id']
        #del new_data['perf_data']
        #del new_data['output']
        #del new_data['latency']
        #del new_data['execution_time']
        #where_clause = {'id' : new_data['id']}
        #query = self.db_backend.create_update_query('glpi_plugin_monitoring_services', new_data, where_clause)
        #return [query]

    #Service result
    def manage_service_check_result_brok(self, b):
        #logger.info("GLPI : data in DB %s " % b)
        try:
            b.data['plugin_monitoring_servicescatalogs_id']
            return ''
        except:
            b.data['date'] = time.strftime('%Y-%m-%d %H:%M:%S')
            #print "Add event service : ", b.data
            query = self.db_backend.create_insert_query('glpi_plugin_monitoring_serviceevents', b.data)
            return [query]
        return ''

    #Service result
    def manage_service_check_resultup_brok(self, b):
        """If a host is defined locally (in shinken) and not in GLPI,
           we must not edit GLPI datas !
        """
        if 'plugin_monitoring_servicescatalogs_id' not in b.data and\
           'plugin_monitoring_services_id'         not in b.data:
            return list()

        logger.info("GLPI : data in DB %s " % b.data)
        new_data = copy.deepcopy(b.data)
        new_data['last_check'] = time.strftime('%Y-%m-%d %H:%M:%S')
        del new_data['perf_data']
        del new_data['output']
        del new_data['latency']
        del new_data['execution_time']
        try:
            new_data['id'] = b.data['plugin_monitoring_servicescatalogs_id']
            del new_data['plugin_monitoring_servicescatalogs_id']
            table = 'glpi_plugin_monitoring_servicescatalogs'
        except:
            new_data['id'] = b.data['plugin_monitoring_services_id']
            del new_data['plugin_monitoring_services_id']
            table = 'glpi_plugin_monitoring_services'

        where_clause = {'id' : new_data['id']}
        #print "Update service : ", new_data
        query = self.db_backend.create_update_query(table, new_data, where_clause)
        return [query]
Ejemplo n.º 19
0
 def create_db(self):
     self.db = DBMysql(host='localhost',
                       user='******',
                       password='******',
                       database='merlin',
                       character_set='utf8')
Ejemplo n.º 20
0
class Ndodb_Mysql_broker(BaseModule):
    def __init__(self, conf):
        BaseModule.__init__(self, conf)
        #Mapping for name of dataand transform function
        self.mapping = {
            'program_status': {
                'program_start': {
                    'name': 'program_start_time',
                    'transform': de_unixify
                },
                'pid': {
                    'name': 'process_id',
                    'transform': None
                },
                'last_alive': {
                    'name': 'status_update_time',
                    'transform': de_unixify
                },
                'is_running': {
                    'name': 'is_currently_running',
                    'transform': None
                }
            },
        }

        self.host = conf.host
        self.user = conf.user
        self.password = conf.password
        self.database = conf.database
        self.character_set = conf.character_set

    #Called by Broker so we can do init stuff
    #TODO : add conf param to get pass with init
    #Conf from arbiter!
    def init(self):
        print "I connect to NDO database"
        self.db = DBMysql(self.host,
                          self.user,
                          self.password,
                          self.database,
                          self.character_set,
                          table_prefix='nagios_')
        self.connect_database()

        #Cache for hosts and services
        #will be flushed when we got a net instance id
        #or something like that
        self.services_cache = {}
        self.hosts_cache = {}

    #Get a brok, parse it, and put in in database
    #We call functions like manage_ TYPEOFBROK _brok that return us queries
    def manage_brok(self, b):
        #We've got problem with instance_id == 0 so we add 1 every where
        if 'instance_id' in b.data:
            b.data['instance_id'] = b.data['instance_id'] + 1
        #print "(Ndo) I search manager:", manager
        queries = BaseModule.manage_brok(self, b)
        if queries is not None:
            for q in queries:
                self.db.execute_query(q)
            return
        #print "(ndodb)I don't manage this brok type", b

    #Create the database connexion
    #TODO : finish (begin :) ) error catch and conf parameters...
    def connect_database(self):
        self.db.connect_database()

    def get_host_object_id_by_name(self, host_name):
        #First look in cache.
        if host_name in self.hosts_cache:
            return self.hosts_cache[host_name]

        #Not in cache, not good
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='1'" % host_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.hosts_cache[host_name] = row[0]
            return row[0]

    def get_hostgroup_object_id_by_name(self, hostgroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='3'" % hostgroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    def get_service_object_id_by_name(self, host_name, service_description):
        #first look in cache
        if (host_name, service_description) in self.services_cache:
            return self.services_cache[(host_name, service_description)]

        #else; not in cache :(
        query = u"SELECT object_id from nagios_objects where name1='%s' and name2='%s' and objecttype_id='2'" % (
            host_name, service_description)
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            self.services_cache[(host_name, service_description)] = row[0]
            return row[0]

    def get_servicegroup_object_id_by_name(self, servicegroup_name):
        query = u"SELECT object_id from nagios_objects where name1='%s' and objecttype_id='4'" % servicegroup_name
        self.db.execute_query(query)
        row = self.db.fetchone()
        if row is None or len(row) < 1:
            return 0
        else:
            return row[0]

    #Ok, we are at launch and a scheduler want him only, OK...
    #So ca create several queries with all tables we need to delete with
    #our instance_id
    #This brob must be send at the begining of a scheduler session,
    #if not, BAD THINGS MAY HAPPENED :)
    def manage_clean_all_my_instance_id_brok(self, b):
        instance_id = b.data['instance_id']
        tables = [
            'commands', 'contacts', 'contactgroups', 'hosts',
            'hostescalations', 'hostgroups', 'notifications', 'services',
            'serviceescalations', 'programstatus', 'servicegroups',
            'timeperiods', 'hostgroup_members', 'contactgroup_members',
            'objects', 'hoststatus', 'servicestatus', 'instances',
            'servicegroup_members'
        ]
        res = []
        for table in tables:
            q = "DELETE FROM %s WHERE instance_id = '%s' " % (
                'nagios_' + table, instance_id)
            res.append(q)

        #We also clean cache, because we are not sure about this data now
        print "[MySQL/NDO] Flushing caches"
        self.services_cache = {}
        self.hosts_cache = {}

        return res

    #Program status is .. status of program? :)
    #Like pid, daemon mode, last activity, etc
    #We aleady clean database, so insert

    #TODO : fill nagios_instances
    def manage_program_status_brok(self, b):
        new_b = copy.deepcopy(b)

        #Must delete me first
        query_delete_instance = u"DELETE FROM %s WHERE instance_name = '%s' " % (
            'nagios_instances', b.data['instance_name'])

        query_instance = self.db.create_insert_query('instances', {'instance_name' : new_b.data['instance_name'],\
         'instance_description' : new_b.data['instance_name'], \
        'instance_id' : new_b.data['instance_id']
        })

        to_del = ['instance_name', 'command_file']
        to_add = []
        mapping = self.mapping['program_status']
        for prop in new_b.data:
            #ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                #print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]['transform'] is not None:
                    f = mapping[prop]['transform']
                    val = f(val)
                new_name = mapping[prop]['name']
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        query = self.db.create_insert_query('programstatus', new_b.data)
        return [query_delete_instance, query_instance, query]

    #TODO : fill nagios_instances
    def manage_update_program_status_brok(self, b):
        new_b = copy.deepcopy(b)
        to_del = ['instance_name', 'command_file']
        to_add = []
        mapping = self.mapping['program_status']
        for prop in new_b.data:
            #ex : 'name' : 'program_start_time', 'transform'
            if prop in mapping:
                #print "Got a prop to change", prop
                val = new_b.data[prop]
                if mapping[prop]['transform'] is not None:
                    f = mapping[prop]['transform']
                    val = f(val)
                new_name = mapping[prop]['name']
                to_add.append((new_name, val))
                to_del.append(prop)
        for prop in to_del:
            del new_b.data[prop]
        for (name, val) in to_add:
            new_b.data[name] = val
        where_clause = {'instance_id': new_b.data['instance_id']}
        query = self.db.create_update_query('programstatus', new_b.data,
                                            where_clause)
        return [query]

    #A host have just be create, database is clean, we INSERT it
    def manage_initial_host_status_brok(self, b):
        #new_b = copy.deepcopy(b)

        data = b.data

        #First add to nagios_objects
        objects_data = {
            'instance_id': data['instance_id'],
            'objecttype_id': 1,
            'name1': data['host_name'],
            'is_active': data['active_checks_enabled']
        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name(data['host_name'])

        #print "DATA:", data
        hosts_data = {
            'host_id': data['id'],
            'instance_id': data['instance_id'],
            'host_object_id': host_id,
            'alias': data['alias'],
            'display_name': data['display_name'],
            'address': data['address'],
            'failure_prediction_options': '0',
            'check_interval': data['check_interval'],
            'retry_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'first_notification_delay': data['first_notification_delay'],
            'notification_interval': data['notification_interval'],
            'flap_detection_enabled': data['flap_detection_enabled'],
            'low_flap_threshold': data['low_flap_threshold'],
            'high_flap_threshold': data['high_flap_threshold'],
            'process_performance_data': data['process_perf_data'],
            'freshness_checks_enabled': data['check_freshness'],
            'freshness_threshold': data['freshness_threshold'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_host': data['obsess_over_host'],
            'notes': data['notes'],
            'notes_url': data['notes_url']
        }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('hosts', hosts_data)

        #Now create an hoststatus entry
        hoststatus_data = {
            'instance_id': data['instance_id'],
            'host_object_id': host_id,
            'normal_check_interval': data['check_interval'],
            'retry_check_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_host': data['obsess_over_host'],
            'process_performance_data': data['process_perf_data'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk']),
            'last_hard_state_change':
            de_unixify(data['last_hard_state_change']),
            'problem_has_been_acknowledged':
            data['problem_has_been_acknowledged'],
            'acknowledgement_type': data['acknowledgement_type'],
            #set check to 1 so nagvis is happy
            'has_been_checked': 1,
        }
        hoststatus_query = self.db.create_insert_query('hoststatus',
                                                       hoststatus_data)

        return [query, hoststatus_query]

    #A host have just be create, database is clean, we INSERT it
    def manage_initial_service_status_brok(self, b):
        #new_b = copy.deepcopy(b)

        data = b.data
        #First add to nagios_objects
        objects_data = {
            'instance_id': data['instance_id'],
            'objecttype_id': 2,
            'name1': data['host_name'],
            'name2': data['service_description'],
            'is_active': data['active_checks_enabled']
        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        host_id = self.get_host_object_id_by_name(data['host_name'])
        service_id = self.get_service_object_id_by_name(
            data['host_name'], data['service_description'])

        #print "DATA:", data
        #print "HOST ID:", host_id
        #print "SERVICE ID:", service_id
        services_data = {
            'service_id': data['id'],
            'instance_id': data['instance_id'],
            'service_object_id': service_id,
            'host_object_id': host_id,
            'display_name': data['display_name'],
            'failure_prediction_options': '0',
            'check_interval': data['check_interval'],
            'retry_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'first_notification_delay': data['first_notification_delay'],
            'notification_interval': data['notification_interval'],
            'flap_detection_enabled': data['flap_detection_enabled'],
            'low_flap_threshold': data['low_flap_threshold'],
            'high_flap_threshold': data['high_flap_threshold'],
            'process_performance_data': data['process_perf_data'],
            'freshness_checks_enabled': data['check_freshness'],
            'freshness_threshold': data['freshness_threshold'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_service': data['obsess_over_service'],
            'notes': data['notes'],
            'notes_url': data['notes_url']
        }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('services', services_data)

        #Now create an hoststatus entry
        servicestatus_data = {
            'instance_id': data['instance_id'],
            'service_object_id': service_id,
            'normal_check_interval': data['check_interval'],
            'retry_check_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_service': data['obsess_over_service'],
            'process_performance_data': data['process_perf_data'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk']),
            'last_hard_state_change':
            de_unixify(data['last_hard_state_change']),
            'problem_has_been_acknowledged':
            data['problem_has_been_acknowledged'],
            'acknowledgement_type': data['acknowledgement_type'],
            #set check to 1 so nagvis is happy
            'has_been_checked': 1,
        }
        servicestatus_query = self.db.create_insert_query(
            'servicestatus', servicestatus_data)

        return [query, servicestatus_query]

    #A new host group? Insert it
    #We need to do something for the members prop (host.id, host_name)
    #They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_hostgroup_status_brok(self, b):
        data = b.data

        #First add to nagios_objects
        objects_data = {
            'instance_id': data['instance_id'],
            'objecttype_id': 3,
            'name1': data['hostgroup_name'],
            'is_active': 1
        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        hostgroup_id = self.get_hostgroup_object_id_by_name(
            data['hostgroup_name'])

        hostgroups_data = {
            'hostgroup_id': data['id'],
            'instance_id': data['instance_id'],
            'config_type': 0,
            'hostgroup_object_id': hostgroup_id,
            'alias': data['alias']
        }

        query = self.db.create_insert_query('hostgroups', hostgroups_data)
        res = [query]

        #Ok, the hostgroups table is uptodate, now we add relations
        #between hosts and hostgroups
        for (h_id, h_name) in b.data['members']:
            host_id = self.get_host_object_id_by_name(h_name)
            hostgroup_members_data = {
                'instance_id': data['instance_id'],
                'hostgroup_id': data['id'],
                'host_object_id': host_id
            }
            q = self.db.create_insert_query('hostgroup_members',
                                            hostgroup_members_data)
            res.append(q)
        return res

    #A new host group? Insert it
    #We need to do something for the members prop (host.id, host_name)
    #They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_servicegroup_status_brok(self, b):
        data = b.data

        #First add to nagios_objects
        objects_data = {
            'instance_id': data['instance_id'],
            'objecttype_id': 4,
            'name1': data['servicegroup_name'],
            'is_active': 1
        }
        object_query = self.db.create_insert_query('objects', objects_data)
        self.db.execute_query(object_query)

        servicegroup_id = self.get_servicegroup_object_id_by_name(
            data['servicegroup_name'])

        servicegroups_data = {
            'servicegroup_id': data['id'],
            'instance_id': data['instance_id'],
            'config_type': 0,
            'servicegroup_object_id': servicegroup_id,
            'alias': data['alias']
        }

        query = self.db.create_insert_query('servicegroups',
                                            servicegroups_data)
        res = [query]

        #Ok, the hostgroups table is uptodate, now we add relations
        #between hosts and hostgroups
        for (s_id, s_name) in b.data['members']:
            servicegroup_members_data = {
                'instance_id': data['instance_id'],
                'servicegroup_id': data['id'],
                'service_object_id': s_id
            }
            q = self.db.create_insert_query('servicegroup_members',
                                            servicegroup_members_data)
            res.append(q)
        return res

    #Same than service result, but for host result
    def manage_host_check_result_brok(self, b):
        data = b.data
        #print "DATA", data
        host_id = self.get_host_object_id_by_name(data['host_name'])
        #Only the host is impacted
        where_clause = {'host_object_id': host_id}
        host_check_data = {
            'instance_id': data['instance_id'],
            'check_type': 0,
            'is_raw_check': 0,
            'current_check_attempt': data['attempt'],
            'state': data['state_id'],
            'state_type': data['state_type_id'],
            'start_time': data['start_time'],
            'start_time_usec': 0,
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'return_code': data['return_code'],
            'output': data['output'],
            'perfdata': data['perf_data']
        }
        query = self.db.create_update_query('hostchecks', host_check_data,
                                            where_clause)

        #Now servicestatus
        hoststatus_data = {
            'instance_id': data['instance_id'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk'])
        }
        hoststatus_query = self.db.create_update_query('hoststatus',
                                                       hoststatus_data,
                                                       where_clause)

        return [query, hoststatus_query]

    #The next schedule got it's own brok. got it and just update the
    #next_check with it
    def manage_host_next_schedule_brok(self, b):
        data = b.data
        host_id = self.get_host_object_id_by_name(data['host_name'])
        #Only the host is impacted
        where_clause = {'host_object_id': host_id}

        #Just update teh host status
        hoststatus_data = {'next_check': de_unixify(data['next_chk'])}
        hoststatus_query = self.db.create_update_query('hoststatus',
                                                       hoststatus_data,
                                                       where_clause)

        return [hoststatus_query]

    #Same than service result, but for host result
    def manage_service_check_result_brok(self, b):
        data = b.data
        #print "DATA", data
        service_id = self.get_service_object_id_by_name(
            data['host_name'], data['service_description'])

        #Only the service is impacted
        where_clause = {'service_object_id': service_id}
        service_check_data = {
            'instance_id': data['instance_id'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'state': data['state_id'],
            'state_type': data['state_type_id'],
            'start_time': data['start_time'],
            'start_time_usec': 0,
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'return_code': data['return_code'],
            'output': data['output'],
            'perfdata': data['perf_data']
        }
        query = self.db.create_update_query('servicechecks',
                                            service_check_data, where_clause)

        #Now servicestatus
        servicestatus_data = {
            'instance_id': data['instance_id'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk'])
        }

        servicestatus_query = self.db.create_update_query(
            'servicestatus', servicestatus_data, where_clause)

        return [query, servicestatus_query]

    #The next schedule got it's own brok. got it and just update the
    #next_check with it
    def manage_service_next_schedule_brok(self, b):
        data = b.data
        #print "DATA", data
        service_id = self.get_service_object_id_by_name(
            data['host_name'], data['service_description'])

        #Only the service is impacted
        where_clause = {'service_object_id': service_id}

        #Just update the service status
        servicestatus_data = {'next_check': de_unixify(data['next_chk'])}
        servicestatus_query = self.db.create_update_query(
            'servicestatus', servicestatus_data, where_clause)

        return [servicestatus_query]

    #Ok the host is updated
    def manage_update_host_status_brok(self, b):
        data = b.data
        host_id = self.get_host_object_id_by_name(data['host_name'])

        hosts_data = {
            'instance_id': data['instance_id'],
            'failure_prediction_options': '0',
            'check_interval': data['check_interval'],
            'retry_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'first_notification_delay': data['first_notification_delay'],
            'notification_interval': data['notification_interval'],
            'flap_detection_enabled': data['flap_detection_enabled'],
            'low_flap_threshold': data['low_flap_threshold'],
            'high_flap_threshold': data['high_flap_threshold'],
            'process_performance_data': data['process_perf_data'],
            'freshness_checks_enabled': data['check_freshness'],
            'freshness_threshold': data['freshness_threshold'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_host': data['obsess_over_host'],
            'notes': data['notes'],
            'notes_url': data['notes_url']
        }
        #Only the host is impacted
        where_clause = {'host_object_id': host_id}

        query = self.db.create_update_query('hosts', hosts_data, where_clause)

        #Now update an hoststatus entry
        hoststatus_data = {
            'instance_id': data['instance_id'],
            'host_object_id': host_id,
            'normal_check_interval': data['check_interval'],
            'retry_check_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_host': data['obsess_over_host'],
            'process_performance_data': data['process_perf_data'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk']),
            'last_hard_state_change':
            de_unixify(data['last_hard_state_change']),
            'problem_has_been_acknowledged':
            data['problem_has_been_acknowledged'],
            'acknowledgement_type': data['acknowledgement_type'],
            #set check to 1 so nagvis is happy
            'has_been_checked': 1,
        }
        hoststatus_query = self.db.create_update_query('hoststatus',
                                                       hoststatus_data,
                                                       where_clause)

        return [query, hoststatus_query]

    #Ok the host is updated
    def manage_update_service_status_brok(self, b):
        data = b.data

        service_id = self.get_service_object_id_by_name(
            data['host_name'], data['service_description'])

        services_data = {
            'instance_id': data['instance_id'],
            'display_name': data['display_name'],
            'failure_prediction_options': '0',
            'check_interval': data['check_interval'],
            'retry_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'first_notification_delay': data['first_notification_delay'],
            'notification_interval': data['notification_interval'],
            'flap_detection_enabled': data['flap_detection_enabled'],
            'low_flap_threshold': data['low_flap_threshold'],
            'high_flap_threshold': data['high_flap_threshold'],
            'process_performance_data': data['process_perf_data'],
            'freshness_checks_enabled': data['check_freshness'],
            'freshness_threshold': data['freshness_threshold'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_service': data['obsess_over_service'],
            'notes': data['notes'],
            'notes_url': data['notes_url']
        }

        #Only the service is impacted
        where_clause = {
            'service_object_id': service_id,
            'service_id': data['id']
        }
        #where_clause = {'host_name' : data['host_name']}
        query = self.db.create_update_query('services', services_data,
                                            where_clause)

        #Now create an hoststatus entry
        servicestatus_data = {
            'instance_id': data['instance_id'],
            'service_object_id': service_id,
            'normal_check_interval': data['check_interval'],
            'retry_check_interval': data['retry_interval'],
            'max_check_attempts': data['max_check_attempts'],
            'current_state': data['state_id'],
            'state_type': data['state_type_id'],
            'passive_checks_enabled': data['passive_checks_enabled'],
            'event_handler_enabled': data['event_handler_enabled'],
            'active_checks_enabled': data['active_checks_enabled'],
            'notifications_enabled': data['notifications_enabled'],
            'obsess_over_service': data['obsess_over_service'],
            'process_performance_data': data['process_perf_data'],
            'check_type': 0,
            'current_check_attempt': data['attempt'],
            'execution_time': data['execution_time'],
            'latency': data['latency'],
            'output': data['output'],
            'perfdata': data['perf_data'],
            'last_check': de_unixify(data['last_chk']),
            'last_hard_state_change':
            de_unixify(data['last_hard_state_change']),
            'problem_has_been_acknowledged':
            data['problem_has_been_acknowledged'],
            'acknowledgement_type': data['acknowledgement_type'],
            #set check to 1 so nagvis is happy
            'has_been_checked': 1,
        }

        where_clause = {'service_object_id': service_id}
        servicestatus_query = self.db.create_update_query(
            'servicestatus', servicestatus_data, where_clause)

        return [query, servicestatus_query]

    #A host have just be create, database is clean, we INSERT it
    def manage_initial_contact_status_brok(self, b):
        #new_b = copy.deepcopy(b)
        data = b.data
        #print "DATA:", data

        contacts_data = {
            'contact_id': data['id'],
            'instance_id': data['instance_id'],
            'contact_object_id': data['id'],
            'contact_object_id': data['id'],
            'alias': data['alias'],
            'email_address': data['email'],
            'pager_address': data['pager'],
            'host_notifications_enabled': data['host_notifications_enabled'],
            'service_notifications_enabled':
            data['service_notifications_enabled'],
        }

        #print "HOST DATA", hosts_data
        query = self.db.create_insert_query('contacts', contacts_data)
        return [query]

    #A new host group? Insert it
    #We need to do something for the members prop (host.id, host_name)
    #They are for host_hostgroup table, with just host.id hostgroup.id
    def manage_initial_contactgroup_status_brok(self, b):
        data = b.data

        contactgroups_data = {
            'contactgroup_id': data['id'],
            'instance_id': data['instance_id'],
            'config_type': 0,
            'contactgroup_object_id': data['id'],
            'alias': data['alias']
        }

        query = self.db.create_insert_query('contactgroups',
                                            contactgroups_data)
        res = [query]

        #Ok, the hostgroups table is uptodate, now we add relations
        #between hosts and hostgroups
        for (c_id, c_name) in b.data['members']:
            #print c_name
            contactgroup_members_data = {
                'instance_id': data['instance_id'],
                'contactgroup_id': data['id'],
                'contact_object_id': c_id
            }
            q = self.db.create_insert_query('contactgroup_members',
                                            contactgroup_members_data)
            res.append(q)
        return res