Exemple #1
0
 def import_interface(self):
     add_count = 0
     ifaces = {}
     try:
         result = self.db_handle.execute(
             """SELECT i.id,i.host,f.field,f.value
             FROM interfaces AS i, interfaces_values AS f
             WHERE i.type=4 and i.id=f.interface AND f.field IN (3,4,6)""")
         for row in result:
             ifid = row[0]
             if ifid not in ifaces:
                 ifaces[ifid] = model.Iface()
                 ifaces[ifid].host_id = self.host_id(row[1])
             if row[2] == 3:
                 ifaces[ifid].ifindex = int(row[3])
             elif row[2] == 4:
                 ifaces[ifid].display_name = unicode(row[3])
             elif row[2] == 6:
                 ifaces[ifid].speed = int(row[3])
         for iface in ifaces.values():
             DBSession.add(iface)
             DBSession.flush()
             add_count += 1
     except IntegrityError as errmsg:
         self.log.error('Error importing interfaces: %s', errmsg)
         transaction.abort()
         return None
     else:
         self.log.info('Interfaces: %d added.', add_count)
     return []
Exemple #2
0
    def prepare(self):
        self.url = url
        hostid_filter = []
        if hasattr(self, 'host_id') and self.host_id is not None:
            hostid_filter = [Attribute.host_id == self.host_id]

        admin_down = DBSession.query(func.count(Attribute.id)).\
            filter(and_(*(
                hostid_filter + [Attribute.admin_state == State.DOWN]
            ))).first()
        self.att_total = int(admin_down[0])
        db_states = DBSession.query(
            EventState.internal_state, func.count(Attribute.id)).\
            join(Attribute).filter(and_(
                *(hostid_filter +
                  [Attribute.admin_state != State.DOWN]))).\
            group_by(EventState.internal_state)
        tmp_states = {}
        for att in db_states:
            tmp_states[att[0]] = att[1]
            self.att_total += att[1]

        self.att_states = []
        for state_val, label in State.NAMES.items():
            if state_val is None:
                self.att_states.append((label, admin_down[0]))
            else:
                try:
                    self.att_states.append((label, tmp_states[state_val]))
                except KeyError:
                    self.att_states.append((label, 0))
        super(AttributeSummary, self).prepare()
Exemple #3
0
 def import_user(self):
     """
     Users in JFFNMS become users  in RNMS
     """
     add_count = 0
     users = {}
     result = self.db_handle.execute("""SELECT id,username,name
         FROM clients
         WHERE id > 2 ORDER BY id""")
     for row in result:
         user = model.User()
         user.user_name = unicode(row[1])
         user.display_name = unicode(row[2])
         user.email_address = unicode(row[1])
         user.password = u'password'
         try:
             DBSession.add(user)
             DBSession.flush()
         except IntegrityError as errmsg:
             self.log.error('Error importing users: %s', errmsg)
             transaction.abort()
             return None
         else:
             users[row[0]] = user.user_id
             add_count += 1
     self.log.info('Users: %d added.', add_count)
     return users
Exemple #4
0
 def import_user(self):
     """
     Users in JFFNMS become users  in RNMS
     """
     add_count = 0
     users = {}
     result = self.db_handle.execute(
         """SELECT id,username,name
         FROM clients
         WHERE id > 2 ORDER BY id""")
     for row in result:
         user = model.User()
         user.user_name = unicode(row[1])
         user.display_name = unicode(row[2])
         user.email_address = unicode(row[1])
         user.password = u'password'
         try:
             DBSession.add(user)
             DBSession.flush()
         except IntegrityError as errmsg:
             self.log.error('Error importing users: %s', errmsg)
             transaction.abort()
             return None
         else:
             users[row[0]] = user.user_id
             add_count += 1
     self.log.info('Users: %d added.', add_count)
     return users
Exemple #5
0
 def import_interface(self):
     add_count = 0
     ifaces = {}
     try:
         result = self.db_handle.execute(
             """SELECT i.id,i.host,f.field,f.value
             FROM interfaces AS i, interfaces_values AS f
             WHERE i.type=4 and i.id=f.interface AND f.field IN (3,4,6)""")
         for row in result:
             ifid = row[0]
             if ifid not in ifaces:
                 ifaces[ifid] = model.Iface()
                 ifaces[ifid].host_id = self.host_id(row[1])
             if row[2] == 3:
                 ifaces[ifid].ifindex = int(row[3])
             elif row[2] == 4:
                 ifaces[ifid].display_name = unicode(row[3])
             elif row[2] == 6:
                 ifaces[ifid].speed = int(row[3])
         for iface in ifaces.values():
             DBSession.add(iface)
             DBSession.flush()
             add_count += 1
     except IntegrityError as errmsg:
         self.log.error('Error importing interfaces: %s', errmsg)
         transaction.abort()
         return None
     else:
         self.log.info('Interfaces: %d added.', add_count)
     return []
Exemple #6
0
 def _discovery_lost(self, host, attribute):
     """
     Autodiscovery failed to find this attribute in the host but the
     database has this attribute.  Possibly delete or set disabled this
     Attribute.
     """
     # Host has this attribute but it wasn't discovered
     if attribute.attribute_type.ad_validate and attribute.poll_enabled:
         self.logger.debug('H:%d AT:%d Not found in host: %s',
                           host.id, attribute.attribute_type_id,
                           attribute.index)
         if host.autodiscovery_policy.can_del():
             if self.print_only:
                 DBSession.delete(attribute)
             event_info = u' - Deleted'
         elif host.autodiscovery_policy.can_disable():
             if self.print_only:
                 attribute.set_disabled()
             event_info = u' - Disabled'
         else:
             event_info = u''
         if host.autodiscovery_policy.alert_delete and not self.print_only:
             new_event = Event.create_admin(
                 host, attribute,
                 'Attribute not found in host{0}'.format(event_info))
             if new_event is not None:
                 DBSession.add(new_event)
                 new_event.process()
Exemple #7
0
 def _discovery_lost(self, host, attribute):
     """
     Autodiscovery failed to find this attribute in the host but the
     database has this attribute.  Possibly delete or set disabled this
     Attribute.
     """
     # Host has this attribute but it wasn't discovered
     if attribute.attribute_type.ad_validate and attribute.poll_enabled:
         self.logger.debug('H:%d AT:%d Not found in host: %s', host.id,
                           attribute.attribute_type_id, attribute.index)
         if host.autodiscovery_policy.can_del():
             if self.print_only:
                 DBSession.delete(attribute)
             event_info = u' - Deleted'
         elif host.autodiscovery_policy.can_disable():
             if self.print_only:
                 attribute.set_disabled()
             event_info = u' - Disabled'
         else:
             event_info = u''
         if host.autodiscovery_policy.alert_delete and not self.print_only:
             new_event = Event.create_admin(
                 host, attribute,
                 'Attribute not found in host{0}'.format(event_info))
             if new_event is not None:
                 DBSession.add(new_event)
                 new_event.process()
Exemple #8
0
    def recv_trap(self, recv_addr, recv_msg):
        # Fix the IPv4 mapped addresses
        if recv_addr[0][:7] == '::ffff:':
            host_ip = recv_addr[0][7:]
        else:
            host_ip = recv_addr[0]
        host_id = self._get_host_id(host_ip)
        if host_id is None:
            self.logger.debug('Notification message from unknown host %s',
                              host_ip)
            return
        while recv_msg:
            msg_ver = int(api.decodeMessageVersion(recv_msg))
            if msg_ver in api.protoModules:
                pmod = api.protoModules[msg_ver]
            else:
                self.logger.info('H:%d - Unsupported SNMP version %s from %s',
                                 host_id, msg_ver, host_ip)
                return

            req_msg, recv_msg = decoder.decode(
                recv_msg,
                asn1Spec=pmod.Message(),
            )

            req_pdu = pmod.apiMessage.getPDU(req_msg)
            if req_pdu.isSameTypeWith(pmod.TrapPDU()):
                trap_oid = None

                if msg_ver == api.protoVersion1:
                    trap_oid = self._get_trapv1_oid(pmod, req_pdu)
                    if trap_oid is None:
                        return
                    new_trap = SnmpTrap(host_id, trap_oid)
                    new_trap.set_uptime(
                        pmod.apiTrapPDU.getTimeStamp(req_pdu).prettyPrint())
                    var_binds = pmod.apiTrapPDU.getVarBindList(req_pdu)
                else:
                    new_trap = SnmpTrap(host_id, None)
                    var_binds = pmod.apiPDU.getVarBindList(req_pdu)

                for var_bind in var_binds:
                    oid, val = pmod.apiVarBind.getOIDVal(var_bind)
                    if oid == SNMP_TRAP_OID:
                        new_trap.trap_oid = val.prettyPrint()
                    else:
                        new_trap.set_varbind(oid.prettyPrint(),
                                             val.prettyPrint())
                if new_trap.trap_oid is None:
                    self.logger.info('H:%d Trap with no trap_oid?')
                else:
                    if self._duplicate_trap(host_id, new_trap.trap_oid):
                        self.logger.debug(
                            'H:%d Duplicate Trap,not added OID:%s', host_id,
                            new_trap.trap_oid)
                    else:
                        self.logger.debug('H:%d New Trap v%s OID:%s', host_id,
                                          msg_ver, new_trap.trap_oid)
                        DBSession.add(new_trap)
                        transaction.commit()
Exemple #9
0
    def prepare(self):
        self.url = url
        hostid_filter = []
        if hasattr(self, 'host_id') and self.host_id is not None:
            hostid_filter = [Attribute.host_id == self.host_id]

        admin_down = DBSession.query(func.count(Attribute.id)).\
            filter(and_(*(
                hostid_filter + [Attribute.admin_state == State.DOWN]
            ))).first()
        self.att_total = int(admin_down[0])
        db_states = DBSession.query(
            EventState.internal_state, func.count(Attribute.id)).\
            join(Attribute).filter(and_(
                *(hostid_filter +
                  [Attribute.admin_state != State.DOWN]))).\
            group_by(EventState.internal_state)
        tmp_states = {}
        for att in db_states:
            tmp_states[att[0]] = att[1]
            self.att_total += att[1]

        self.att_states = []
        for state_val, label in State.NAMES.items():
            if state_val is None:
                self.att_states.append((label, admin_down[0]))
            else:
                try:
                    self.att_states.append((label, tmp_states[state_val]))
                except KeyError:
                    self.att_states.append((label, 0))
        super(AttributeSummary, self).prepare()
Exemple #10
0
 def types_option(self, a=None):
     if a is not None and type(a) is not list:
         a = [a]
     att_ids = [int(x) for x in a]
     atype = DBSession.query(GraphType.id, GraphType.display_name,
                             GraphType.attribute_type_id).\
         filter(GraphType.attribute_type_id.in_(
                DBSession.query(Attribute.attribute_type_id).
                filter(Attribute.id.in_(att_ids))
                ))
     return dict(data_name='atype', items=atype.all())
Exemple #11
0
 def types_option(self, a=None):
     if a is not None and type(a) is not list:
         a = [a]
     att_ids = [int(x) for x in a]
     atype = DBSession.query(GraphType.id, GraphType.display_name,
                             GraphType.attribute_type_id).\
         filter(GraphType.attribute_type_id.in_(
                DBSession.query(Attribute.attribute_type_id).
                filter(Attribute.id.in_(att_ids))
                ))
     return dict(data_name='atype', items=atype.all())
Exemple #12
0
 def _run_matches(self, logfile_id, match_rows, loglines):
     """ Go over the  loglines looking for matches """
     line_count = 0
     for line in loglines:
         line_count += 1
         for row in match_rows:
             match_data = row.try_match(line)
             if match_data:
                 new_event = Event(**match_data)
                 DBSession.add(new_event)
     self.logger.info("LOGF(%s): %d messages processed",
                      logfile_id, line_count)
Exemple #13
0
 def _run_matches(self, logfile_id, match_rows, loglines):
     """ Go over the  loglines looking for matches """
     line_count = 0
     for line in loglines:
         line_count += 1
         for row in match_rows:
             match_data = row.try_match(line)
             if match_data:
                 new_event = Event(**match_data)
                 DBSession.add(new_event)
     self.logger.info("LOGF(%s): %d messages processed", logfile_id,
                      line_count)
Exemple #14
0
 def _delete_rows(self, del_model, del_id=None):
     """ Delete all items of del_model with an ID higher than del_id
         Returns number of deleted items
     """
     deleted_items = 0
     if del_id is None:
         deleted_items = DBSession.query(del_model).delete()
     else:
         deleted_items = DBSession.query(del_model).\
             filter(del_model.id > del_id).delete()
     if deleted_items is None:
         return 0
     return deleted_items
Exemple #15
0
 def _delete_rows(self, del_model, del_id=None):
     """ Delete all items of del_model with an ID higher than del_id
         Returns number of deleted items
     """
     deleted_items = 0
     if del_id is None:
         deleted_items = DBSession.query(del_model).delete()
     else:
         deleted_items = DBSession.query(del_model).\
             filter(del_model.id > del_id).delete()
     if deleted_items is None:
         return 0
     return deleted_items
Exemple #16
0
 def option(self):
     """ Return a list of hosts. If user has required
     permission it shows all, else just their ones """
     if permissions.host_ro:
         hosts = DBSession.query(Host.id, Host.display_name)
     else:
         hosts = DBSession.query(Host.id, Host.display_name).filter(
             Host.id.in_(
                 DBSession.query(Attribute.host_id).filter(
                     Attribute.user_id ==
                     request.identity['user'].user_id)))
     items = hosts.all()
     items.insert(0, ('', '-- Choose Host --'))
     return dict(items=items)
Exemple #17
0
 def option(self):
     """ Return a list of hosts. If user has required
     permission it shows all, else just their ones """
     if permissions.host_ro:
         hosts = DBSession.query(Host.id, Host.display_name)
     else:
         hosts = DBSession.query(Host.id, Host.display_name).filter(
             Host.id.in_(
                 DBSession.query(Attribute.host_id).filter(
                     Attribute.user_id == request.identity['user'].user_id)
             )
         )
     items = hosts.all()
     items.insert(0, ('', '-- Choose Host --'))
     return dict(items=items)
Exemple #18
0
def check_alarm_triggers(logger):
    alarms = DBSession.query(Alarm).filter(Alarm.processed == False)
    triggers = Trigger.alarm_triggers()
    logger.info('%d Alarms to process', alarms.count())
    if alarms.count() == 0:
        return
    for alarm in alarms:
        for trigger in triggers:
            rule_result = False
            for rule in trigger.rules:
                rule_result = rule.eval(rule_result, alarm)
                if rule_result == True and rule.stop == True:
                    break

            if rule_result == True:
                if trigger.email_owner == True:
                    logger.debug('A%d T%d: email to %s', alarm.attribute.id,
                                 trigger.id, alarm.attribute.user.user_name)
                    email_action(trigger, alarm.attribute.user, alarm=alarm)
            if trigger.email_users == True:
                sent_users = []
                for trigger_user in trigger.users:
                    sent_users.append(alarm.attribute.user.user_name)
                    trigger.email_action(trigger, trigger_user, alarm=alarm)
                if sent_users != []:
                    logger.debug('A%d T%d: email to %s', alarm.attribute.id,
                                 trigger.id, ','.join(sent_users))
        alarm.processed = True
    transaction.commit()
Exemple #19
0
def process_events(logger):
    """
    Scan all events that have not been previously checked and set alerts
    where required.
    Returns a set of changed attributes
    """
    changed_attributes = set()
    events = DBSession.query(Event).filter(Event.processed == False)
    logger.info('%d Events to process', events.count())
    for event in events:
        if event.event_state is None or event.attribute is None:
            event.set_processed()
            continue


        if event.event_state.is_up():
            event.acknowledged = True

        changed_attributes.add(event.attribute_id)

        if event.event_state.is_alert() == False:
            down_event = Event.find_down(event.attribute_id,
                                         event.event_type_id,
                                        event.id)
            if event.event_state.is_downtesting():
                process_event_downtesting(logger, event, down_event)
            elif event.event_state.is_up():
                process_event_up(logger, event, down_event)
        event.set_processed()
    transaction.commit()
    return changed_attributes
Exemple #20
0
def check_alarm_triggers(logger):
    alarms = DBSession.query(Alarm).filter(Alarm.processed == False)
    triggers = Trigger.alarm_triggers()
    logger.info('%d Alarms to process', alarms.count())
    if alarms.count() == 0:
        return
    for alarm in alarms:
        for trigger in triggers:
            rule_result = False
            for rule in trigger.rules:
                rule_result = rule.eval(rule_result, alarm)
                if rule_result == True and rule.stop == True:
                    break

            if rule_result == True:
                if trigger.email_owner == True:
                    logger.debug('A%d T%d: email to %s',alarm.attribute.id, trigger.id, alarm.attribute.user.user_name)
                    email_action(trigger, alarm.attribute.user, alarm=alarm)
            if trigger.email_users == True:
                sent_users = []
                for trigger_user in trigger.users:
                    sent_users.append(alarm.attribute.user.user_name)
                    trigger.email_action(trigger, trigger_user,alarm=alarm)
                if sent_users != [] :
                    logger.debug('A%d T%d: email to %s',alarm.attribute.id, trigger.id, ','.join(sent_users))
        alarm.processed = True
    transaction.commit()
Exemple #21
0
    def consolidate(self):
        """ Run the consolidator for SNMP traps """
        traps = DBSession.query(SnmpTrap).\
            filter(SnmpTrap.processed == False)  # noqa
        self.logger.info('%d SNMP Traps to process', traps.count())
        if traps.count() == 0:
            return

        for trap in traps:
            trap.processed = True
            try:
                trap_matches = self.trap_matches[trap.trap_oid]
            except KeyError:
                continue
            for trap_match in trap_matches:
                (attribute, trap_value, error) = \
                    trap_match.run(trap.host, trap)
                if error is not None:
                    self.logger.warn('TrapMatch error: %s', error)
                    continue
                if attribute is not None:
                    # We have matched to this trap
                    backend_result = \
                        trap_match.backend.run(None, attribute, trap_value)
                    self.logger.debug("A:%d Trap:%s -> %s:%s", attribute.id,
                                      str(trap_value)[:100],
                                      trap_match.backend.display_name,
                                      backend_result)
                    if trap_match.stop_if_match is True:
                        break
        transaction.commit()
Exemple #22
0
 def prepare(self):
     conditions = []
     conditions.append(Host.show_host == True)  # noqa
     if self.zone_id is not None:
         conditions.append(Host.zone_id == self.zone_id)
     hosts = DBSession.query(Host).join(Zone).filter(
         and_(*conditions)).order_by(asc(Zone.display_name),
                                     asc(Host.display_name))
     if hosts.count() == 0:
         flash('No Hosts Found', 'alert')
         self.map_groups = None
     else:
         for host in hosts:
             vendor, device = host.snmp_type()
             hstate, state_desc = self.host_state(host)
             if self.alarmed_only and hstate == 'ok':
                 continue
             host_fields = [('Zone', host.zone.display_name),
                            ('Status', state_desc), ('Vendor', vendor),
                            ('Device', device),
                            ('Address', host.mgmt_address)]
             self.add_item(
                 host.zone_id, host.zone.display_name, [], {
                     'name': host.display_name,
                     'state': hstate,
                     'url': url('/attributes/map/', {'h': host.id}),
                     'fields': host_fields,
                 })
     super(HostMap, self).prepare()
Exemple #23
0
def process_events(logger):
    """
    Scan all events that have not been previously checked and set alerts
    where required.
    Returns a set of changed attributes
    """
    changed_attributes = set()
    events = DBSession.query(Event).filter(Event.processed == False)
    logger.info('%d Events to process', events.count())
    for event in events:
        if event.event_state is None or event.attribute is None:
            event.set_processed()
            continue

        if event.event_state.is_up():
            event.acknowledged = True

        changed_attributes.add(event.attribute_id)

        if event.event_state.is_alert() == False:
            down_event = Event.find_down(event.attribute_id,
                                         event.event_type_id, event.id)
            if event.event_state.is_downtesting():
                process_event_downtesting(logger, event, down_event)
            elif event.event_state.is_up():
                process_event_up(logger, event, down_event)
        event.set_processed()
    transaction.commit()
    return changed_attributes
Exemple #24
0
    def _get_tabledata(self, table, conditions=None, **kw):
        query = DBSession.query(table)
        if conditions is not None and conditions != []:
            query = query.filter(and_(*conditions))
        if 'sort' in kw:
            insp = inspect(table)
            try:
                sort_col = insp.columns[kw['sort']]
            except KeyError:
                try:
                    sort_table = insp.relationships[kw['sort']]
                    sort_col = sort_table.table.c['display_name']
                    query = query.join(sort_table.table)
                except KeyError:
                    return None
            sort_order = kw.get('order')  # default is asc
            if sort_order == 'desc':
                query = query.order_by(desc(sort_col))
            else:
                query = query.order_by(sort_col)
        total = query.count()
        if 'offset' in kw:
            query = query.offset(kw['offset'])
        if 'limit' in kw:
            query = query.limit(kw['limit'])

        return (total, query)
Exemple #25
0
    def consolidate(self):
        """ Run the consolidator for SNMP traps """
        traps = DBSession.query(SnmpTrap).\
            filter(SnmpTrap.processed == False)  # noqa
        self.logger.info('%d SNMP Traps to process', traps.count())
        if traps.count() == 0:
            return

        for trap in traps:
            trap.processed = True
            try:
                trap_matches = self.trap_matches[trap.trap_oid]
            except KeyError:
                continue
            for trap_match in trap_matches:
                (attribute, trap_value, error) = \
                    trap_match.run(trap.host, trap)
                if error is not None:
                    self.logger.warn(
                        'TrapMatch error: %s',
                        error)
                    continue
                if attribute is not None:
                    # We have matched to this trap
                    backend_result = \
                        trap_match.backend.run(None, attribute, trap_value)
                    self.logger.debug(
                        "A:%d Trap:%s -> %s:%s",
                        attribute.id,
                        str(trap_value)[:100],
                        trap_match.backend.display_name,
                        backend_result)
                    if trap_match.stop_if_match is True:
                        break
        transaction.commit()
Exemple #26
0
 def mapseveritycss(self):
     severities = DBSession.query(Severity)
     return dict(
         severities=[
             (s.id, s.bgcolor,
                 '%.6x' % (int(s.bgcolor, 16) & 0xfefefe >> 1))
             for s in severities],)
Exemple #27
0
 def field_value(cls, attribute_id, field_tag):
     """ Return the value of the field for the given attribute that
     matches the tag
     """
     ftag = DBSession.query(AttributeTypeField).\
         join(AttributeType, Attribute).filter(
             Attribute.id == attribute_id,
             AttributeTypeField.tag == field_tag).first()
     if ftag is None:
         return None
     fval = DBSession.query(cls.value).filter(
         cls.attribute_id == attribute_id,
         cls.attribute_type_field_id == ftag.id).first()
     if fval is not None:
         return fval[0]
     return ftag.default_value
Exemple #28
0
    def prepare(self):
        conditions = []
        if self.host_id is not None:
            conditions.append(Attribute.host_id == self.host_id)
        if self.alarmed_only:
            conditions.append(EventState.internal_state != State.UP)
        attributes = DBSession.query(Attribute).join(Host, EventState).\
            filter(and_(*conditions)).\
            order_by(asc(Host.display_name), asc(Attribute.display_name))
        if attributes.count() == 0:
            flash('No Attributes Found', 'alert')
            self.map_groups = None
        else:
            for attribute in attributes:
                astate, state_desc = self.attribute_state(attribute)

                try:
                    atype = attribute.attribute_type.display_name
                except AttributeError:
                    atype = 'Unknown'
                att_fields = [('Host', attribute.host.display_name),
                              ('Type', atype),
                              ('Status', state_desc), ]
                for k, v in attribute.description_dict().items():
                    if v != '':
                        att_fields.append((k, v))
                self.add_item(attribute.host_id, attribute.host.display_name,
                              [('Address', attribute.host.mgmt_address)],
                              {'name': attribute.display_name,
                               'state': astate,
                               'url': url('/attributes/'+str(attribute.id)),
                               'fields': att_fields,
                               })
        super(AttributeMap, self).prepare()
Exemple #29
0
 def prepare(self):
     conditions = []
     conditions.append(Host.show_host == True)  # noqa
     if self.zone_id is not None:
         conditions.append(Host.zone_id == self.zone_id)
     hosts = DBSession.query(Host).join(Zone).filter(
         and_(*conditions)).order_by(asc(Zone.display_name),
                                     asc(Host.display_name))
     if hosts.count() == 0:
         flash('No Hosts Found',  'alert')
         self.map_groups = None
     else:
         for host in hosts:
             vendor, device = host.snmp_type()
             hstate, state_desc = self.host_state(host)
             if self.alarmed_only and hstate == 'ok':
                 continue
             host_fields = [('Zone', host.zone.display_name),
                            ('Status', state_desc),
                            ('Vendor', vendor),
                            ('Device', device),
                            ('Address', host.mgmt_address)]
             self.add_item(host.zone_id, host.zone.display_name,
                           [],
                           {'name': host.display_name,
                            'state': hstate,
                            'url': url('/attributes/map/', {'h': host.id}),
                            'fields': host_fields,
                            })
     super(HostMap, self).prepare()
Exemple #30
0
 def next_sla_analysis(cls):
     """
     Return the attribute that would be the next one for SLA
     Used for finding how long before we need to rescan again
     """
     return DBSession.query(cls).\
         filter(and_(cls.sla_id > 1, cls.poller_set_id > 1)).\
         order_by(asc(cls.next_sla)).first()
Exemple #31
0
 def type_option(self):
     """ Show option list of Attribute Types """
     types = DBSession.query(
         AttributeType.id,
         AttributeType.display_name)
     items = types.all()
     items.insert(0, ('', '-- Choose Type --'))
     return dict(items=items)
Exemple #32
0
 def alarmed_events(cls, conditions):
     """
     Find all alarmed Events with extra conditions
     """
     conditions.extend([
         cls.alarmed == True,  # noqa
         cls.stop_time == None])
     return DBSession.query(cls).join(EventState).filter(and_(*conditions))
Exemple #33
0
 def attribute_client_option(self):
     """ Show option list of all users who own attributes """
     users = DBSession.query(
         model.User.user_id,
         model.User.display_name).join(model.Attribute).distinct()
     items = users.all()
     items.insert(0, ('', '-- Choose Client --'))
     return dict(items=items)
Exemple #34
0
 def import_snmp(self, old_comm):
     if old_comm is None:
         comm = model.SnmpCommunity.by_name(u'None')
         if comm is not None:
             return comm.id
     try:
         (comm_ver, comm_data) = old_comm.split(':')
     except ValueError:
         pass
     else:
         comm_ver = int(comm_ver[1:])
         display_name = unicode(old_comm.split('|')[0])
         comm_fields = comm_data.split('|')
         comm_name = comm_fields[0]
         comm_id = DBSession.query(model.SnmpCommunity.id).\
             select_from(model.SnmpCommunity).\
             filter(and_(
                 model.SnmpCommunity.community == comm_name,
                 model.SnmpCommunity.version == comm_ver)).\
             scalar()
         if comm_id is not None:
             return comm_id
         new_comm = model.SnmpCommunity()
         new_comm.display_name = display_name
         new_comm.version = comm_ver
         if comm_ver == 3:
             if comm_fields[1] == 'noAuthNoPriv':
                 new_comm.set_v3auth_none()
             elif comm_fields[1] in ('authNoPriv', 'authPriv'):
                 if comm_fields[2] == 'md5':
                     new_comm.set_v3auth_md5(comm_name, comm_fields[3])
                 else:
                     new_comm.set_v3auth_sha(comm_name, comm_fields[3])
             if comm_fields[1] != 'authPriv' or comm_fields[5] == '':
                 new_comm.set_v3privacy_none()
             elif comm_fields[4] == 'des':
                 new_comm.set_v3privacy_des(comm_fields[5])
             else:
                 new_comm.set_v3privacy_aes(comm_fields[5])
         else:
             new_comm.community = comm_name
         DBSession.add(new_comm)
         DBSession.flush()
         return new_comm.id
     return 1
Exemple #35
0
 def import_snmp(self, old_comm):
     if old_comm is None:
         comm = model.SnmpCommunity.by_name(u'None')
         if comm is not None:
             return comm.id
     try:
         (comm_ver, comm_data) = old_comm.split(':')
     except ValueError:
         pass
     else:
         comm_ver = int(comm_ver[1:])
         display_name = unicode(old_comm.split('|')[0])
         comm_fields = comm_data.split('|')
         comm_name = comm_fields[0]
         comm_id = DBSession.query(model.SnmpCommunity.id).\
             select_from(model.SnmpCommunity).\
             filter(and_(
                 model.SnmpCommunity.community == comm_name,
                 model.SnmpCommunity.version == comm_ver)).\
             scalar()
         if comm_id is not None:
             return comm_id
         new_comm = model.SnmpCommunity()
         new_comm.display_name = display_name
         new_comm.version = comm_ver
         if comm_ver == 3:
             if comm_fields[1] == 'noAuthNoPriv':
                 new_comm.set_v3auth_none()
             elif comm_fields[1] in ('authNoPriv', 'authPriv'):
                 if comm_fields[2] == 'md5':
                     new_comm.set_v3auth_md5(comm_name, comm_fields[3])
                 else:
                     new_comm.set_v3auth_sha(comm_name, comm_fields[3])
             if comm_fields[1] != 'authPriv' or comm_fields[5] == '':
                 new_comm.set_v3privacy_none()
             elif comm_fields[4] == 'des':
                 new_comm.set_v3privacy_des(comm_fields[5])
             else:
                 new_comm.set_v3privacy_aes(comm_fields[5])
         else:
             new_comm.community = comm_name
         DBSession.add(new_comm)
         DBSession.flush()
         return new_comm.id
     return 1
Exemple #36
0
 def consolidate(self):
     """ Run the actual consolidation for logfiles """
     logfiles = DBSession.query(Logfile)
     for logfile in logfiles:
         if logfile.id == 1:  # Magic 1 means internal database
             self._cons_syslog(logfile)
         else:
             self._cons_logfile(logfile)
     transaction.commit()
Exemple #37
0
 def consolidate(self):
     """ Run the actual consolidation for logfiles """
     logfiles = DBSession.query(Logfile)
     for logfile in logfiles:
         if logfile.id == 1:  # Magic 1 means internal database
             self._cons_syslog(logfile)
         else:
             self._cons_logfile(logfile)
     transaction.commit()
Exemple #38
0
 def discovered_exists(cls, host_id, attribute_type_id, index):
     """
     Return True if this discovered item already exists
     """
     query = DBSession.query(cls.id).filter(and_(
         cls.host_id == host_id,
         cls.attribute_type_id == attribute_type_id,
         cls.index == index)).first()
     return query is not None
Exemple #39
0
 def alarmed_events(cls, conditions):
     """
     Find all alarmed Events with extra conditions
     """
     conditions.extend([
         cls.alarmed == True,  # noqa
         cls.stop_time == None
     ])
     return DBSession.query(cls).join(EventState).filter(and_(*conditions))
Exemple #40
0
 def host_alarm(cls, host_id):
     """
     Return the highest priority alarmed Event for the given Host id
 """
     return cls.alarmed_events([
         cls.attribute_id.in_(
             DBSession.query(
                 Attribute.id).filter(Attribute.host_id == host_id))
     ]).order_by(desc(EventState.priority)).first()
Exemple #41
0
 def _get_host_attributes(self, host_id, atype_id):
     """ Return a list of Attributes for the given hosts for a given
     AttributeType """
     atts = DBSession.query(Attribute).filter(
         and_(Attribute.host_id == host_id,
              Attribute.attribute_type_id == atype_id))
     if atts is None:
         return {}
     return {att.index: att for att in atts}
Exemple #42
0
 def _discovery_found(self, host, atype_id, attribute):
     """
     Autodiscovery has found a new attribute that is not stored in
     the database.
     """
     if host.autodiscovery_policy.can_add(attribute):
         self.logger.debug('H:%d AT:%d New Interface Found: %s', host.id,
                           atype_id, attribute.index)
     if host.autodiscovery_policy.permit_add:
         if self.print_only:
             self.logger.debug('H:%d AT:%d Added %s', host.id, atype_id,
                               attribute.index)
         else:
             real_att = Attribute.from_discovered(host, attribute)
             DBSession.add(real_att)
             DBSession.flush()
             self.logger.debug('H:%d AT:%d Added %s = %d', host.id,
                               atype_id, attribute.index, real_att.id)
Exemple #43
0
 def _delete_users(self):
     """ Delete all items of del_model with an ID higher than del_id
         Returns number of deleted items
     """
     deleted_items = DBSession.query(model.User).\
         filter(model.User.user_id > 2).delete()
     if deleted_items is None:
         return 0
     return deleted_items
Exemple #44
0
 def _get_host_attributes(self):
     host_atts = {}
     atts = DBSession.query(Attribute).filter(
         Attribute.host_id == self.dhost.id)
     for att in atts:
         if att.attribute_type_id not in host_atts:
             host_atts[att.attribute_type_id] = {}
         host_atts[att.attribute_type_id][att.index] = att
     return host_atts
Exemple #45
0
 def _get_host_attributes(self, host_id, atype_id):
     """ Return a list of Attributes for the given hosts for a given
     AttributeType """
     atts = DBSession.query(Attribute).filter(and_(
         Attribute.host_id == host_id,
         Attribute.attribute_type_id == atype_id))
     if atts is None:
         return {}
     return {att.index: att for att in atts}
Exemple #46
0
 def _delete_users(self):
     """ Delete all items of del_model with an ID higher than del_id
         Returns number of deleted items
     """
     deleted_items = DBSession.query(model.User).\
         filter(model.User.user_id > 2).delete()
     if deleted_items is None:
         return 0
     return deleted_items
Exemple #47
0
 def _get_host_attributes(self):
     host_atts = {}
     atts = DBSession.query(Attribute).filter(
         Attribute.host_id == self.dhost.id)
     for att in atts:
         if att.attribute_type_id not in host_atts:
             host_atts[att.attribute_type_id] = {}
         host_atts[att.attribute_type_id][att.index] = att
     return host_atts
Exemple #48
0
 def host_alarm(cls, host_id):
     """
     Return the highest priority alarmed Event for the given Host id
 """
     return cls.alarmed_events(
         [cls.attribute_id.in_(
             DBSession.query(Attribute.id).
             filter(Attribute.host_id == host_id))]).order_by(
             desc(EventState.priority)).first()
Exemple #49
0
    def bulk_add(self, h, attribs):
        """ From a discovery phase, add the following attributes """
        if tmpl_context.form_errors:
            self.process_form_errors()
            return {}

        host = Host.by_id(h)
        if host is None:
            return dict(errors='Unknown Host ID {}'.format(h))

        old_att_id = None
        new_count = 0

        decoded_attribs = json.loads(attribs)
        for vals in decoded_attribs:
            if old_att_id != vals['atype_id']:
                attribute_type = AttributeType.by_id(vals['atype_id'])
                if attribute_type is None:
                    return dict(errors='Unknown Attribute Type ID {}'.format(
                        vals['atype_id']))

            if Attribute.discovered_exists(host.id, attribute_type.id,
                                           vals['id']):
                continue
            new_attribute = Attribute(host=host,
                                      attribute_type=attribute_type,
                                      display_name=vals['display_name'],
                                      index=vals['id'])
            try:
                admin_state = State(name=vals['admin_state'])
            except ValueError:
                new_attribute.admin_state = State.UNKNOWN
            else:
                new_attribute.admin_state = int(admin_state)
            new_attribute.state = EventState.by_name(vals['oper_state'])
            if new_attribute.state is None:
                new_attribute.state = EventState.get_up()

            for tag, value in vals['fields'].items():
                new_attribute.set_field(tag, value)
            DBSession.add(new_attribute)
            new_count += 1

        return dict(status='{} Attributes added'.format(new_count))
Exemple #50
0
 def set_limit(self, limits):
     from rnms.model import EventType
     """ Convert the limits as tags into indexes """
     fname = trigger_fields[self.field]
     if fname == 'event_type':
         self.limit = ','.join(
             [unicode(x[0]) for x in DBSession.query(EventType.id).
                 filter(EventType.tag.in_(limits.split(',')))])
         return
     raise ValueError('Dont have limits for {}'.format(fname))
Exemple #51
0
 def load_config(self):
     """ Load configuration for logfiles consolidation """
     self.match_sets = {}
     db_sets = DBSession.query(LogmatchSet)
     for db_set in db_sets:
         self.match_sets[db_set.id] = []
         for row in db_set.rows:
             self.match_sets[db_set.id].append(MatchRow(row))
     self.logger.debug(
         "Consolidator loaded %d match sets.\n", len(self.match_sets))
Exemple #52
0
 def import_hostconfig(self):
     conf_count = 0
     try:
         result = self.db_handle.execute("""SELECT date,host,config
             FROM hosts_config WHERE id > 1 ORDER BY id""")
         for row in result:
             conf = model.HostConfig()
             conf.created = datetime.datetime.fromtimestamp(row[0])
             conf.host_id = self.host_id(row[1])
             conf.config = row[2]
             DBSession.add(conf)
             DBSession.flush()
     except IntegrityError as errmsg:
         self.log.error('Error importing host configs: %s', errmsg)
         transaction.abort()
         return None
     else:
         self.log.info('Hosts Config: %d added.', conf_count)
     return []
Exemple #53
0
 def load_config(self):
     """ Load configuration for logfiles consolidation """
     self.match_sets = {}
     db_sets = DBSession.query(LogmatchSet)
     for db_set in db_sets:
         self.match_sets[db_set.id] = []
         for row in db_set.rows:
             self.match_sets[db_set.id].append(MatchRow(row))
     self.logger.debug("Consolidator loaded %d match sets.\n",
                       len(self.match_sets))
Exemple #54
0
    def bulk_add(self, h, attribs):
        """ From a discovery phase, add the following attributes """
        if tmpl_context.form_errors:
            self.process_form_errors()
            return {}

        host = Host.by_id(h)
        if host is None:
            return dict(errors='Unknown Host ID {}'.format(h))

        old_att_id = None
        new_count = 0

        decoded_attribs = json.loads(attribs)
        for vals in decoded_attribs:
            if old_att_id != vals['atype_id']:
                attribute_type = AttributeType.by_id(vals['atype_id'])
                if attribute_type is None:
                    return dict(errors='Unknown Attribute Type ID {}'.
                                format(vals['atype_id']))

            if Attribute.discovered_exists(host.id, attribute_type.id,
                                           vals['id']):
                continue
            new_attribute = Attribute(
                host=host, attribute_type=attribute_type,
                display_name=vals['display_name'], index=vals['id'])
            try:
                admin_state = State(name=vals['admin_state'])
            except ValueError:
                new_attribute.admin_state = State.UNKNOWN
            else:
                new_attribute.admin_state = int(admin_state)
            new_attribute.state = EventState.by_name(vals['oper_state'])
            if new_attribute.state is None:
                new_attribute.state = EventState.get_up()

            for tag, value in vals['fields'].items():
                new_attribute.set_field(tag, value)
            DBSession.add(new_attribute)
            new_count += 1

        return dict(status='{} Attributes added'.format(new_count))
Exemple #55
0
def check_all_attributes_state(logger):
    """ Recalculate all Attributes Oper state
    This is done when the consolidator is first started
    """
    logger.debug('Recalculating all Attributes Oper state')
    attributes = DBSession.query(Attribute)
    if attributes is None:
        return
    for attribute in attributes:
        attribute.calculate_oper()
    transaction.commit()
Exemple #56
0
 def load_config(self):
     """ Load configuration from Database """
     self.trap_matches = defaultdict(list)
     trap_match_count = 0
     for trap_match in DBSession.query(TrapMatch).\
             order_by(TrapMatch.position):
         trap_match_count += 1
         self.trap_matches[trap_match.trap_oid].append(
             MatchTrap(trap_match))
     self.logger.debug("Trap Consolidator loaded %d trap rules.",
                       trap_match_count)