Exemplo n.º 1
0
    def make_alert_history(self):
        """Generates an alert history object based on the current attributes"""
        if self.state == Event.STATE_END:
            return self._resolve_alert_history()

        attrs = dict(
            start_time=self.time,
            end_time=INFINITY if self.state == Event.STATE_START else None,
        )
        if self.severity_rules:
            self.severity = self.severity_rules.evaluate(self)
        for attr in (
                'source',
                'device',
                'netbox',
                'subid',
                'event_type',
                'value',
                'severity',
        ):
            attrs[attr] = getattr(self, attr)
        alert = AlertHistory(**attrs)
        alert.alert_type = self.get_alert_type()
        self._update_history_vars(alert)
        return alert
Exemplo n.º 2
0
 def setUp(self):
     self.alert1 = AlertHistory(start_time=datetime(2014, 1, 1),
                                end_time=datetime(2014, 1, 3))
     self.alert2 = AlertHistory(start_time=datetime(2014, 1, 20),
                                end_time=datetime(2014, 1, 21))
     self.downtime1 = self.alert1.end_time - self.alert1.start_time
     self.downtime2 = self.alert2.end_time - self.alert2.start_time
     self.alerts = [self.alert1, self.alert2]
Exemplo n.º 3
0
def simple_alerthist():
    hist = AlertHistory(source_id='ipdevpoll',
                        event_type_id='info',
                        start_time=datetime.now(),
                        value=0,
                        severity=0)
    yield hist
    if hist.pk:
        hist.delete()
Exemplo n.º 4
0
def was_on_maintenance(alert: AlertHistory):
    """Returns True if the subject of the alert appeared to be on maintenance at the
    time the alert was issued.

    The NAV libraries contain API calls to evaluate whether an alert subject is
    currently on maintenance. However, it doesn't currently have the ability to
    easily evaluate whether something was on maintenance at a particular point in
    time. This becomes important to nav-argus-glue when syncing potentially old
    alerts from the NAV alert history to Argus (NAV 5.1 at the time of this writing)
    - hence, this function exists.
    """
    on_maintenance = False
    maintenances = AlertHistory.objects.filter(
        event_type="maintenanceState",
        netbox=alert.netbox,
        start_time__lte=alert.start_time,
        end_time__gte=alert.start_time,
    )
    subject = alert.get_subject()
    if isinstance(subject, Service):
        on_maintenance = maintenances.filter(subid=str(subject.id)).count() > 0
    if not on_maintenance:
        # if service wasn't explicitly on service, check whether the netbox itself was
        on_maintenance = maintenances.filter(subid="").count() > 0

    if on_maintenance:
        _logger.debug(
            "%s was on maintenance when the alert took place: %s",
            subject,
            describe_alerthist(alert),
        )
        return True
    else:
        return False
Exemplo n.º 5
0
def build_tags_from(alert: AlertHistory) -> Generator:
    """
    Generates a series of tag tuples
    :param alert: An AlertHistory object from NAV
    :returns: A generator of (tag_name, tag_value) tuples, suitable to make a tag
              dictionary for an Argus incident.
    """
    yield "event_type", alert.event_type_id
    if alert.alert_type:
        yield "alert_type", alert.alert_type.name
    subject = alert.get_subject()
    # TODO: Find a sane convention for translating various event subjects to tags, such
    #       as power supplies, modules etc.

    if alert.netbox:
        yield "host", alert.netbox.sysname
        yield "room", alert.netbox.room.id
        yield "location", alert.netbox.room.location.id
        yield "organization", alert.netbox.organization.id
    if isinstance(subject, Netbox):
        yield "host_url", subject.get_absolute_url()
    elif isinstance(subject, Interface):
        yield "interface", subject.ifname

    for tag, value in _config.get_always_add_tags().items():
        yield tag, value
Exemplo n.º 6
0
def test_short_outage(localhost, db):
    plugin_registry['snmpcheck'] = snmpcheck.SnmpCheck
    job = JobHandler('snmpcheck', localhost.pk, plugins=['snmpcheck'])
    agent = Mock()
    job.agent = agent
    job._create_agentproxy = Mock()
    job._destroy_agentproxy = Mock()
    agent.walk.return_value = defer.succeed(False)
    with pytest.raises(SuggestedReschedule):
        yield job.run()
    assert agent.walk.called
    assert localhost.info_set.filter(key=snmpcheck.INFO_KEY_NAME,
                                     variable=snmpcheck.INFO_VARIABLE_NAME,
                                     value="down").exists()
    assert (EventQueue.objects.filter(
        source_id='ipdevpoll',
        target_id='eventEngine',
        event_type='snmpAgentState',
        netbox_id=localhost.pk,
        state=EventQueue.STATE_START,
    ).count() == 1)
    with pytest.raises(SuggestedReschedule):
        yield job.run()
    assert localhost.info_set.filter(key=snmpcheck.INFO_KEY_NAME,
                                     variable=snmpcheck.INFO_VARIABLE_NAME,
                                     value="down").exists()
    assert (EventQueue.objects.filter(
        source_id='ipdevpoll',
        target_id='eventEngine',
        event_type='snmpAgentState',
        netbox_id=localhost.pk,
        state=EventQueue.STATE_START,
    ).count() == 2)

    # now fake an AlertHist entry from event engine
    AlertHistory(
        source_id='ipdevpoll',
        event_type_id='snmpAgentState',
        netbox_id=localhost.pk,
        start_time=datetime.now(),
        end_time=INFINITY,
        value=100,
        severity=3,
    ).save()

    # and make sure snmpcheck tries to resolve it when the box is up
    agent.walk.return_value = defer.succeed(True)
    yield job.run()
    assert localhost.info_set.filter(key=snmpcheck.INFO_KEY_NAME,
                                     variable=snmpcheck.INFO_VARIABLE_NAME,
                                     value="up").exists()
    assert (EventQueue.objects.filter(
        source_id='ipdevpoll',
        target_id='eventEngine',
        event_type='snmpAgentState',
        netbox_id=localhost.pk,
        state=EventQueue.STATE_END,
    ).count() == 1)
Exemplo n.º 7
0
def alerthist_with_two_messages(localhost):
    alert = AlertHistory(
        source_id='ipdevpoll',
        netbox=localhost,
        start_time=datetime.now() - timedelta(hours=1),
        end_time=INFINITY,
        event_type_id='boxState',
        value=100,
        severity=3,
    )
    alert.save()
    msg1 = AlertHistoryMessage(
        alert_history=alert,
        state=AlertHistoryMessage.STATE_START,
        type='sms',
        language='en',
        message='Problem detected',
    )
    msg1.save()
    msg2 = AlertHistoryMessage(
        alert_history=alert,
        state=AlertHistoryMessage.STATE_END,
        type='sms',
        language='en',
        message='Problem resolved',
    )
    msg2.save()

    yield alert
    alert.delete()
Exemplo n.º 8
0
 def setUp(self):
     self.alert1 = AlertHistory(start_time=datetime(2014, 1, 1),
                                end_time=datetime(2014, 1, 3))
     self.alert2 = AlertHistory(start_time=datetime(2014, 1, 20),
                                end_time=datetime(2014, 1, 21))
     self.maintenance0 = AlertHistory(start_time=datetime(2013, 12, 31),
                                      end_time=datetime(2014, 1, 1))
     self.maintenance1 = AlertHistory(start_time=datetime(2014, 1, 2),
                                      end_time=datetime(2014, 1, 5))
     self.maintenance2 = AlertHistory(start_time=datetime(2014, 1, 7),
                                      end_time=datetime(2014, 1, 13))
     self.maintenance3 = AlertHistory(start_time=datetime(2014, 1, 15),
                                      end_time=datetime(2014, 1, 21))
     self.maintenance5 = AlertHistory(start_time=datetime(2014, 1, 29),
                                      end_time=datetime(2014, 2, 2))
     self.downtime1 = self.alert1.end_time - self.alert1.start_time
     self.downtime2 = self.alert2.end_time - self.alert2.start_time
     self.alerts = [self.alert1, self.alert2]