示例#1
0
文件: event.py 项目: crudbug/canopsis
def event_processing(event, ctx=None, cm=None, gm=None, *args, **kwargs):
    """Process input event in getting graph nodes bound to input event entity.

    If at least one graph node is found, execute its tasks.

    :param Context cm:
    :param GraphManager gm:
    """

    if ctx is None:
        ctx = {}

    if cm is None:
        cm = singleton_per_scope(Context)
    if gm is None:
        gm = singleton_per_scope(GraphManager)

    entity = cm.get_entity(event)

    if entity is not None:
        entity_id = cm.get_entity_id(entity)
        vertices = gm.get_elts(
            info={BaseTaskedVertice.ENTITY: entity_id},
            cls=BaseTaskedVertice
        )

        for vertice in vertices:
            vertice.process(event=event, ctx=ctx, *args, **kwargs)

    return event
示例#2
0
    def __init__(
        self,
        storage,
        _id=None,
        name=None,
        namespace='events',
        record=None,
        logging_level=None
    ):
        self.type = 'selector'
        self.storage = storage

        if name and not _id:
            self._id = self.type + "." + storage.account._id + "." + name

        # Default vars
        self.namespace = namespace

        self.dostate = True

        self.data = {}
        self.mfilter = {}
        self.include_ids = []
        self.exclude_ids = []
        self.changed = False
        self.rk = None

        # Ouput replace purpose
        self.template_replace = {
            0: '[OFF]',
            1: '[MINOR]',
            2: '[MAJOR]',
            3: '[CRITICAL]',
        }

        # Compute produced event state purpose
        self.states_labels = {
            'off': 0,
            'minor': 1,
            'major': 2,
            'critical': 3
        }

        self.logger = getLogger('Selector')
        self.context = singleton_per_scope(Context)
        self.pbehavior = singleton_per_scope(PBehaviorManager)
        # Canopsis filter management for mongo
        self.cfilter = Filter()

        if logging_level:
            self.logger.setLevel(logging_level)

        self.load(record.dump())
示例#3
0
def serie_processing(engine, event, manager=None, logger=None, **_):
    """Engine work processing task."""

    if manager is None:
        manager = singleton_per_scope(Serie)

    # Generate metric metadata
    metric_meta = {
        meta: event[meta]
        for meta in ['unit', 'min', 'max', 'warn', 'crit']
        if event.get(meta, None) is not None
    }
    metric_meta['type'] = 'GAUGE'

    # Generate metric entity
    entity = {
        'type': 'metric',
        'connector': 'canopsis',
        'connector_name': engine.name,
        'component': event['component'],
        'resource': event['resource'],
        'name': event['crecord_name']
    }

    context = manager[Serie.CONTEXT_MANAGER]
    entity_id = context.get_entity_id(entity)

    # Publish points
    perfdata = manager[Serie.PERFDATA_MANAGER]
    perfdata.put(
        entity_id,
        points=manager.calculate(event),
        meta=metric_meta,
        cache=False
    )
示例#4
0
def beat_processing(engine, alertsmgr=None, logger=None, **kwargs):
    if alertsmgr is None:
        alertsmgr = singleton_per_scope(Alerts)

    alertsmgr.config = alertsmgr.load_config()

    alertsmgr.resolve_alarms()
示例#5
0
    def process(self, event, publisher=None, manager=None, source=None, logger=None, **kwargs):

        """
        :param TopologyManager manager:
        """

        if manager is None:
            manager = singleton_per_scope(TopologyManager)

        # save old state
        old_state = self.state
        # process task
        result = super(TopoVertice, self).process(
            event=event, publisher=publisher, manager=manager, source=source, logger=logger, **kwargs
        )
        # compare old state and new state
        if self.state != old_state:
            # update edges
            targets_by_edge = manager.get_targets(ids=self.id, add_edges=True)
            for edge_id in targets_by_edge:
                edge, _ = targets_by_edge[edge_id]
                # update edge state
                edge.state = self.state
                edge.save(manager=manager)
            # if not equal
            new_event = self.get_event(state=self.state, source=source)
            # publish a new event
            if publisher is not None:
                publish(event=new_event, publisher=publisher)
            # save self
            self.save(manager=manager)

        return result
示例#6
0
def exports(ws):
    el_kwargs = {'el_storage': EventsLog.provide_default_basics()}
    manager = singleton_per_scope(EventsLog, kwargs=el_kwargs)

    @ws.application.post('/api/v2/event')
    def send_event_post():
        try:
            events = request.json
        except ValueError as verror:
            return gen_json_error(
                {'description': 'malformed JSON : {0}'.format(verror)},
                HTTP_ERROR)

        if events is None:
            return gen_json_error({'description': 'nothing to return'},
                                  HTTPError)

        return send_events(ws, events)

    @route(ws.application.post, name='event', payload=['event', 'url'])
    @route(ws.application.put, name='event', payload=['event', 'url'])
    def send_event(event, url=None):
        if ws.enable_crossdomain_send_events and url is not None:
            payload = {'event': json.dumps(event)}

            response = requests.post(url, data=payload)

            if response.status_code != 200:
                api_response = json.loads(response.text)

                return (api_response['data'], api_response['total'])

            else:
                return HTTPError(response.status_code, response.text)

        else:
            return send_events(ws, event)

    @route(ws.application.get,
           name='eventslog/count',
           payload=['tstart', 'tstop', 'limit', 'select'])
    def get_event_count_per_day(tstart, tstop, limit=100, select={}):
        """ get eventslog log count for each days in a given period
            :param tstart: timestamp of the begin period
            :param tstop: timestamp of the end period
            :param limit: limit the count number per day
            :param select: filter for eventslog collection
            :return: list in which each item contains an interval and the
            related count
            :rtype: list
        """

        results = manager.get_eventlog_count_by_period(tstart,
                                                       tstop,
                                                       limit=limit,
                                                       query=select)

        return results
示例#7
0
def init_managers():
    """
    Init managers [sic].
    """
    pb_logger, pb_storage = PBehaviorManager.provide_default_basics()
    pb_kwargs = {'logger': pb_logger, 'pb_storage': pb_storage}
    pb_manager = singleton_per_scope(PBehaviorManager, kwargs=pb_kwargs)

    return pb_manager
示例#8
0
    def set_entity(self, entity_id, *args, **kwargs):

        super(TopoVertice, self).set_entity(entity_id=entity_id, *args, **kwargs)
        # update entity state
        if entity_id is not None:
            cm = singleton_per_scope(CheckManager)
            state = cm.state(ids=entity_id)
            if state is None:
                state = TopoVertice.DEFAULT_STATE
            self.info[TopoVertice.STATE] = state
示例#9
0
def init_managers():
    """
    Init managers [sic].
    """
    config, pb_logger, pb_storage = PBehaviorManager.provide_default_basics()
    pb_kwargs = {'config': config,
                 'logger': pb_logger,
                 'pb_storage': pb_storage}
    pb_manager = singleton_per_scope(PBehaviorManager, kwargs=pb_kwargs)

    return pb_manager
示例#10
0
def beat_processing(
    engine,
    sessionmgr=None,
    eventmgr=None,
    usermgr=None,
    alertsmgr=None,
    logger=None,
    **kwargs
):
    if sessionmgr is None:
        sessionmgr = singleton_per_scope(Session)

    if eventmgr is None:
        eventmgr = singleton_per_scope(EventMetricProducer)

    if usermgr is None:
        usermgr = singleton_per_scope(UserMetricProducer)

    if alertsmgr is None:
        alertsmgr = singleton_per_scope(Alerts)

    storage = alertsmgr[alertsmgr.ALARM_STORAGE]

    session_stats(usermgr, sessionmgr, logger)

    with engine.Lock(engine, 'alarm_stats_computation') as l:
        if l.own():
            opened_alarm_stats(
                eventmgr,
                alertsmgr,
                storage,
                logger
            )

            resolved_alarm_stats(
                eventmgr,
                usermgr,
                alertsmgr,
                storage,
                logger
            )
示例#11
0
def event_processing(event, veventmanager=None, **params):
    """Add vevent information in VEventManager from input event.

    :param dict event: event to process.
    :param function get_info: function which takes in parameter the event and
        returns vevent info.
    """

    # initialiaze veventmanager
    if veventmanager is None:
        storage = Middleware.get_middleware_by_uri(
            VEventManager.VEVENT_COLL_URL)
        veventmanager = singleton_per_scope(VEventManager,
                                            kwargs={'vevent_storage': storage})
    context = singleton_per_scope(ContextGraph)
    # get source from the event
    source = context.get_id(event)
    # get vevent from the event
    vevent = event[VEventManager.VEVENT]
    # add event information into veventmanager
    veventmanager.put(source=source, vevents=[vevent], cache=True)
示例#12
0
    def set_entity(self, entity_id, *args, **kwargs):

        super(Topology, self).set_entity(entity_id=entity_id, *args, **kwargs)

        # set default entity if entity_id is None
        if entity_id is None and self.entity is None:
            # set entity
            ctxm = singleton_per_scope(Context)
            event = self.get_event(source=0, state=0)
            entity = ctxm.get_entity(event)
            entity_id = ctxm.get_entity_id(entity)
            self.entity = entity_id
示例#13
0
def init_managers():
    """
    Init managers [sic].
    """
    mar_logger, mar_collection = MetaAlarmRuleManager.provide_default_basics()
    ma_rule_kwargs = {
        'logger': mar_logger,
        'ma_rule_collection': mar_collection
    }
    ma_rule_manager = singleton_per_scope(MetaAlarmRuleManager,
                                          kwargs=ma_rule_kwargs)

    return ma_rule_manager
示例#14
0
def init_managers():
    """
    Init managers [sic].
    """
    config, pb_logger, pb_collection = PBehaviorManager.provide_default_basics(
    )
    pb_kwargs = {
        'config': config,
        'logger': pb_logger,
        'pb_collection': pb_collection
    }
    pb_manager = singleton_per_scope(PBehaviorManager, kwargs=pb_kwargs)

    return pb_manager
示例#15
0
    def get_event(self, *args, **kwargs):

        result = super(TopoNode, self).get_event(*args, **kwargs)

        tm = singleton_per_scope(TopologyManager)
        graphs = tm.get_graphs(elts=self.id)
        # iterate on existing graphs
        for graph in graphs:
            # update result as soon as a graph has been founded
            result["component"] = graph.id
            break
        result["resource"] = self.id

        return result
示例#16
0
    def save(self, context=None, *args, **kwargs):

        super(Topology, self).save(*args, **kwargs)

        # use global context if input context is None
        if context is None:
            context = singleton_per_scope(Context)
        # get self entity
        event = self.get_event()
        entity = context.get_entity(event)
        ctx, _id = context.get_entity_context_and_name(entity=entity)
        entity = {Context.NAME: _id}
        # put the topology in the context by default
        context.put(_type=self.type, entity=entity, context=ctx)
示例#17
0
    def __init__(self, logger, pb_storage):
        """
        :param dict config: configuration
        :param pb_storage: PBehavior Storage object
        """
        super(PBehaviorManager, self).__init__()
        kwargs = {"logger": logger}
        self.context = singleton_per_scope(ContextGraph, kwargs=kwargs)
        self.logger = logger
        self.pb_storage = pb_storage

        self.pb_store = MongoCollection(
            MongoStore.get_default().get_collection('default_pbehavior'))

        self.currently_active_pb = set()
示例#18
0
def beat_processing(engine, manager=None, logger=None, **_):
    """Engine beat processing task."""

    if manager is None:
        manager = singleton_per_scope(Serie)

    with engine.Lock(engine, 'serie_fetching') as lock:
        if lock.own():
            for serie in manager.get_series(time()):
                publish(
                    publisher=engine.amqp,
                    event=serie,
                    rk=engine.amqp_queue,
                    exchange='amq.direct',
                    logger=logger
                )
示例#19
0
文件: event.py 项目: crudbug/canopsis
    def entity(self, value):
        """Change of entity id and update state.

        :param value: new entity (id) to use.
        :type value: dict or str
        """

        if value is not None:
            if isinstance(value, dict):
                # get entity id
                ctx = singleton_per_scope(Context)
                value = ctx.get_entity_id(value)

            # update entity
            self.info[BaseTaskedVertice.ENTITY] = value
        # call specific set entity
        self.set_entity(value)
示例#20
0
def event_processing(engine, event, alertsmgr=None, logger=None, **kwargs):
    if alertsmgr is None:
        alertsmgr = singleton_per_scope(Alerts)

    encoded_event = {}

    for k, v in event.items():
        try:
            k = k.encode('utf-8')
        except:
            pass
        try:
            v = v.encode('utf-8')
        except:
            pass
        encoded_event[k] = v

    alertsmgr.archive(encoded_event)
示例#21
0
def exports(ws):

    session_manager = singleton_per_scope(Session)

    @route(ws.application.get, name='account/me', adapt=False)
    def get_me():
        user = get_user()
        user.pop('id', None)
        user.pop('eid', None)
        return user

    @route(ws.application.get, payload=['username'])
    def keepalive(username):
        session_manager.keep_alive(username)

    @route(ws.application.get, payload=['username'])
    def sessionstart(username):
        session_manager.session_start(username)
示例#22
0
 def __init__(self, config, logger, pb_storage):
     """
     :param dict config: configuration
     :param pb_storage: PBehavior Storage object
     """
     super(PBehaviorManager, self).__init__()
     kwargs = {"logger": logger}
     self.context = singleton_per_scope(ContextGraph, kwargs=kwargs)
     self.logger = logger
     self.pb_storage = pb_storage
     self.config = config
     self.config_data = self.config.get(self.PBH_CAT, {})
     self.default_tz = self.config_data.get("default_timezone",
                                            "Europe/Paris")
     # this line allow us to raise an exception pytz.UnknownTimeZoneError,
     # if the timezone defined in the pbehabior configuration file is wrong
     pytz.timezone(self.default_tz)
     self.pb_store = MongoCollection(MongoStore.get_default().get_collection('default_pbehavior'))
     self.currently_active_pb = set()
示例#23
0
def interpret(condition, **kwargs):
    """
    Wrapper for Interpreter.interpret, allowing to interpret search filters
    without having to instanciate a Interpreter class.

    An Intepreter is instantiated as a singleton (per scope).

    :param str condition: Search condition to interpret
    :param dict kwargs: Given to Interpreter constructor the first time it is
      instantiated

    :return: Tuple with corresponding mongo filter and a string carrying
      informations about scope ('all' or 'this')
    :rtype: tuple

    :raises ValueError: If expression is not correct or if parser generation
      failed (bad grammar)
    """

    return singleton_per_scope(Interpreter, kwargs=kwargs).interpret(condition)
示例#24
0
def interpret(condition, **kwargs):
    """
    Wrapper for Interpreter.interpret, allowing to interpret search filters
    without having to instanciate a Interpreter class.

    An Intepreter is instantiated as a singleton (per scope).

    :param str condition: Search condition to interpret
    :param dict kwargs: Given to Interpreter constructor the first time it is
      instantiated

    :return: Tuple with corresponding mongo filter and a string carrying
      informations about scope ('all' or 'this')
    :rtype: tuple

    :raises ValueError: If expression is not correct or if parser generation
      failed (bad grammar)
    """

    return singleton_per_scope(Interpreter, kwargs=kwargs).interpret(condition)
示例#25
0
 def __init__(self, config, logger, pb_storage):
     """
     :param dict config: configuration
     :param pb_storage: PBehavior Storage object
     """
     super(PBehaviorManager, self).__init__()
     kwargs = {"logger": logger}
     self.context = singleton_per_scope(ContextGraph, kwargs=kwargs)
     self.logger = logger
     self.pb_storage = pb_storage
     self.config = config
     self.config_data = self.config.get(self.PBH_CAT, {})
     self.default_tz = self.config_data.get("default_timezone",
                                            "Europe/Paris")
     # this line allow us to raise an exception pytz.UnknownTimeZoneError,
     # if the timezone defined in the pbehabior configuration file is wrong
     pytz.timezone(self.default_tz)
     self.pb_store = MongoCollection(
         MongoStore.get_default().get_collection('default_pbehavior'))
     self.currently_active_pb = set()
示例#26
0
    def load(self, serializedelts, graphmgr=None):
        """Load serialized graphs.

        If serialized elts correspond to existing graph elements, the graph
        element is updated with serialized information.

        :param dict(s) serializedelts: serialized elements to load.
        :param GraphManager graphmgr: graph manager to use.
        :return: list of loaded GraphElements.
        :rtype:
        """

        # ensure graphs is a list of graphs.
        if isinstance(serializedelts, dict):
            serializedelts = [serializedelts]

        if graphmgr is None:
            graphmgr = singleton_per_scope(GraphManager)

        result = graphmgr.put_elts(serializedelts)

        return result
示例#27
0
def exports(ws):
    """
    Expose session routes.
    """

    kwargs = {
        'collection': Middleware.get_middleware_by_uri(
            Session.SESSION_STORAGE_URI
        )._backend
    }
    session_manager = singleton_per_scope(Session, kwargs=kwargs)

    @route(ws.application.get, name='account/me', adapt=False)
    def get_me():
        """
        Return the user account.
        """
        user = get_user()
        user.pop('id', None)
        user.pop('eid', None)

        return user

    @route(ws.application.get, payload=['username'])
    def keepalive(username):
        """
        Maintain the current session.
        """
        session_manager.keep_alive(username)

    @ws.application.get('/sessionstart')
    def sessionstart():
        """
        Start a new session.
        """
        username = request.get('username', None)

        session_manager.session_start(username)
        return {}
示例#28
0
def event_processing(
        engine, event, manager=None, logger=None, ctx=None, tm=None, cm=None,
        **kwargs
):
    """Process input event in getting topology nodes bound to input event
    entity.

    One topology nodes are founded, executing related rules.

    :param dict event: event to process.
    :param Engine engine: engine which consumes the event.
    :param TopologyManager manager: topology manager to use.
    :param Logger logger: logger to use in this task.
    :param Context ctx:
    :param TopologManager tm:
    :param CheckManager cm:
    """

    # initialize ctx
    if ctx is None:
        ctx = singleton_per_scope(Context)

    if tm is None:
        tm = singleton_per_scope(TopologyManager)

    if cm is None:
        cm = singleton_per_scope(CheckManager)

    event_type = event[Event.TYPE]

    # apply processing only in case of check event
    if event_type in cm.types:
        # get source type
        source_type = event[Event.SOURCE_TYPE]
        # in case of topology node
        if source_type in [TopoNode.TYPE, Topology.TYPE]:
            # get entity and entity id
            entity = ctx.get_entity(event)
            entity_id = ctx.get_entity_id(entity)
            elt_id = ctx.get_name(entity_id)
            logger.debug(u"elt_id {0}".format(elt_id))
            # process all targets
            elt = tm.get_elts(ids=elt_id)
            if elt is not None:
                targets = tm.get_targets(ids=elt_id)
                logger.debug(u"targets {0}".format(targets))
                # process and save all targets
                for target in targets:
                    target.process(
                        event=event, publisher=engine.amqp,
                        manager=tm, source=elt_id,
                        logger=logger,
                        **kwargs
                    )

        else:  # in case of entity event
            # get entity and entity id
            entity = ctx.get_entity(event)
            if entity is not None:
                entity_id = ctx.get_entity_id(entity)
                logger.debug(u"entity_id {0}".format(entity_id))
                elts = tm.get_elts(info={TopoNode.ENTITY: entity_id})
                logger.debug(u"elts {0}".format(elts))
                # process all elts
                for elt in elts:
                    elt.process(
                        event=event, publisher=engine.amqp,
                        manager=tm, logger=logger,
                        **kwargs
                    )

    return event
示例#29
0
    def work(self, event, *xargs, **kwargs):

        rk = get_routingkey(event)
        default_action = self.configuration.get('default_action', 'pass')

        # list of supported actions

        rules = self.configuration.get('rules', [])
        to_apply = []

        self.logger.debug(u'event {}'.format(event))

        # When list configuration then check black and
        # white lists depending on json configuration
        for filterItem in rules:
            actions = filterItem.get('actions')
            name = filterItem.get('name', 'no_name')

            self.logger.debug(u'rule {}'.format(filterItem))
            self.logger.debug(u'filter is {}'.format(filterItem['mfilter']))
            # Try filter rules on current event
            if filterItem['mfilter'] and check(filterItem['mfilter'], event):
                self.logger.debug(
                    u'Event: {}, filter matches'.format(event.get('rk', event))
                )

                if 'pbehaviors' in filterItem:
                    pbehaviors = filterItem.get('pbehaviors', {})
                    list_in = pbehaviors.get('in', [])
                    list_out = pbehaviors.get('out', [])

                    if list_in or list_out:
                        pbm = singleton_per_scope(PBehaviorManager)
                        cm = singleton_per_scope(ContextGraph)
                        entity = cm.get_entity(event)
                        entity_id = cm.get_entity_id(entity)

                        result = pbm.check_pbehaviors(
                            entity_id, list_in, list_out
                        )

                        if not result:
                            break

                for action in actions:
                    if action['type'].lower() == 'drop':
                        self.apply_actions(event, to_apply)
                        return self.a_drop(event, None, name)
                    to_apply.append((name, action))

                if filterItem.get('break', 0):
                    self.logger.debug(
                        u' + Filter {} broke the next filters processing'
                        .format(
                            filterItem.get('name', 'filter')
                        )
                    )
                    break

        if len(to_apply):
            if self.apply_actions(event, to_apply):
                self.logger.debug(
                    u'Event before sent to next engine: %s' % event
                )
                event['rk'] = event['_id'] = get_routingkey(event)
                return event

        # No rules matched
        if default_action == 'drop':
            self.logger.debug("Event '%s' dropped by default action" % (rk))
            self.drop_event_count += 1
            return DROP

        self.logger.debug("Event '%s' passed by default action" % (rk))
        self.pass_event_count += 1

        self.logger.debug(u'Event before sent to next engine: %s' % event)
        event['rk'] = event['_id'] = get_routingkey(event)
        return event
示例#30
0
def exports(ws):

    manager = singleton_per_scope(PerfData)

    @route(ws.application.post, payload=['metric_id', 'timewindow', 'meta'])
    def perfdata_count(metric_id, timewindow=None, meta=None):
        if timewindow is not None:
            timewindow = TimeWindow(**timewindow)

        result = manager.count(
            metric_id=metric_id, timewindow=timewindow, meta=meta
        )

        return result

    @route(
        ws.application.post,
        payload=[
            'metric_id', 'with_meta',
            'limit', 'skip', 'period',
            'timewindow', 'period', 'timeserie', 'sliding_time'
        ]
    )
    def perfdata(
        metric_id, timewindow=None, period=None, with_meta=True,
        limit=0, skip=0, timeserie=None, meta=None, sliding_time=False
    ):
        if timewindow is not None:
            timewindow = TimeWindow(**timewindow)

        if timeserie is not None:
            if period is None:
                period = timeserie.pop('period', None)

            timeserie = TimeSerie(**timeserie)

            if period is not None:
                timeserie.period = Period(**period)

        if not isinstance(metric_id, list):
            metrics = [metric_id]

        else:
            metrics = metric_id

        result = []

        for metric_id in metrics:
            # meta -> _meta
            pts, _meta = manager.get(
                metric_id=metric_id, with_meta=True,
                timewindow=timewindow, limit=limit, skip=skip,
                meta=meta, sliding_time=sliding_time
            )

            _meta['data_id'] = metric_id

            if timeserie is not None:
                pts = timeserie.calculate(pts, timewindow, meta=_meta)

            if with_meta:
                result.append({
                    'points': pts,
                    'meta': _meta
                })

            else:
                result.append({
                    'points': pts
                })

        return (result, len(result))

    @route(ws.application.put, payload=['metric_id', 'points', 'meta'])
    def perfdata(metric_id, points, meta=None):
        manager.put(metric_id=metric_id, points=points, meta=meta)

        result = points

        return result

    @route(ws.application.delete, payload=['metric_id', 'timewindow', 'meta'])
    def perfdata(metric_id, timewindow=None, meta=None):
        if timewindow is not None:
            timewindow = TimeWindow(**timewindow)

        manager.remove(metric_id=metric_id, timewindow=timewindow, meta=meta)

        result = None

        return result

    @route(ws.application.get)
    def perfdata_period(metric_id):
        result = manager.get_period(metric_id)

        return result

    @route(ws.application.get)
    def perfdata_internal(metric):
        result = manager.is_internal(metric)

        return result
示例#31
0
    def work(self, event, *xargs, **kwargs):

        rk = get_routingkey(event)
        default_action = self.configuration.get('default_action', 'pass')

        # list of supported actions

        rules = self.configuration.get('rules', [])
        to_apply = []

        self.logger.debug(u'event {}'.format(event))

        # When list configuration then check black and
        # white lists depending on json configuration
        for filterItem in rules:
            actions = filterItem.get('actions')
            name = filterItem.get('name', 'no_name')

            self.logger.debug(u'rule {}'.format(filterItem))
            self.logger.debug(u'filter is {}'.format(filterItem['mfilter']))
            # Try filter rules on current event
            if filterItem['mfilter'] and check(filterItem['mfilter'], event):
                self.logger.debug(u'Event: {}, filter matches'.format(
                    event.get('rk', event)))

                if 'pbehaviors' in filterItem:
                    pbehaviors = filterItem.get('pbehaviors', {})
                    list_in = pbehaviors.get('in', [])
                    list_out = pbehaviors.get('out', [])

                    if list_in or list_out:
                        pbm = singleton_per_scope(PBehaviorManager)
                        cm = singleton_per_scope(ContextGraph)
                        entity = cm.get_entity(event)
                        entity_id = cm.get_entity_id(entity)

                        result = pbm.check_pbehaviors(entity_id, list_in,
                                                      list_out)

                        if not result:
                            break

                for action in actions:
                    if action['type'].lower() == 'drop':
                        self.apply_actions(event, to_apply)
                        return self.a_drop(event, None, name)
                    to_apply.append((name, action))

                if filterItem.get('break', 0):
                    self.logger.debug(
                        u' + Filter {} broke the next filters processing'.
                        format(filterItem.get('name', 'filter')))
                    break

        if len(to_apply):
            if self.apply_actions(event, to_apply):
                self.logger.debug(u'Event before sent to next engine: %s' %
                                  event)
                event['rk'] = event['_id'] = get_routingkey(event)
                return event

        # No rules matched
        if default_action == 'drop':
            self.logger.debug("Event '%s' dropped by default action" % (rk))
            self.drop_event_count += 1
            return DROP

        self.logger.debug("Event '%s' passed by default action" % (rk))
        self.pass_event_count += 1

        self.logger.debug(u'Event before sent to next engine: %s' % event)
        event['rk'] = event['_id'] = get_routingkey(event)
        return event
示例#32
0
def beat_processing(
    engine,
    sessionmgr=None,
    eventmgr=None,
    usermgr=None,
    alertsmgr=None,
    logger=None,
    **kwargs
):
    if sessionmgr is None:
        sessionmgr = singleton_per_scope(Session)

    if eventmgr is None:
        eventmgr = singleton_per_scope(EventMetricProducer)

    if usermgr is None:
        usermgr = singleton_per_scope(UserMetricProducer)

    if alertsmgr is None:
        alertsmgr = singleton_per_scope(Alerts)

    storage = alertsmgr[alertsmgr.ALARM_STORAGE]
    events = sessionmgr.duration()

    with engine.Lock(engine, 'alarm_stats_computation') as l:
        if l.own():
            resolved_alarms = alertsmgr.get_alarms(
                resolved=True,
                exclude_tags='stats'
            )

            for data_id in resolved_alarms:
                for docalarm in resolved_alarms[data_id]:
                    docalarm[storage.DATA_ID] = data_id
                    alarm = docalarm[storage.VALUE]
                    alarm_ts = docalarm[storage.TIMESTAMP]
                    alarm_events = alertsmgr.get_events(docalarm)

                    solved_delay = alarm['resolved'] - alarm_ts
                    events.append(eventmgr.alarm_solved_delay(solved_delay))

                    if alarm['ack'] is not None:
                        ack_ts = alarm['ack']['t']
                        ackremove = get_previous_step(
                            alarm,
                            'ackremove',
                            ts=ack_ts
                        )
                        ts = alarm_ts if ackremove is None else ackremove['t']
                        ack_delay = ack_ts - ts

                        events.append(eventmgr.alarm_ack_delay(ack_delay))
                        events.append(
                            eventmgr.alarm_ack_solved_delay(
                                solved_delay - ack_delay
                            )
                        )

                        events.append(usermgr.alarm_ack_delay(
                            alarm['ack']['a'],
                            ack_delay
                        ))

                    if len(alarm_events) > 0:
                        events.append(eventmgr.alarm(alarm_events[0]))

                    for event in alarm_events:
                        if event['event_type'] == 'ack':
                            events.append(eventmgr.alarm_ack(event))
                            events.append(
                                usermgr.alarm_ack(event, event['author'])
                            )

                        elif event['timestamp'] == alarm['resolved']:
                            events.append(eventmgr.alarm_solved(event))

                            if alarm['ack'] is not None:
                                events.append(eventmgr.alarm_ack_solved(event))

                                events.append(
                                    usermgr.alarm_ack_solved(
                                        alarm['ack']['a'],
                                        alarm['resolved'] - alarm['ack']['t']
                                    )
                                )

                                events.append(
                                    usermgr.alarm_solved(
                                        alarm['ack']['a'],
                                        alarm['resolved'] - alarm_ts
                                    )
                                )

                    alertsmgr.update_current_alarm(
                        docalarm,
                        alarm,
                        tags='stats'
                    )

    for event in events:
        publish(publisher=engine.amqp, event=event, logger=logger)
示例#33
0
def event_processing(engine, event, alertsmgr=None, logger=None, **kwargs):
    if alertsmgr is None:
        alertsmgr = singleton_per_scope(Alerts)

    alertsmgr.archive(event)
示例#34
0
文件: event.py 项目: crudbug/canopsis
def exports(ws):
    manager = singleton_per_scope(EventsLog)

    @route(ws.application.post, name="event", payload=["event", "url"])
    @route(ws.application.put, name="event", payload=["event", "url"])
    def send_event(event, url=None):
        if ws.enable_crossdomain_send_events and url is not None:
            payload = {"event": json.dumps(event)}

            response = requests.post(url, data=payload)

            if response.status_code != 200:
                api_response = json.loads(response.text)

                return (api_response["data"], api_response["total"])

            else:
                return HTTPError(response.status_code, response.text)

        else:
            events = ensure_iterable(event)
            exchange = ws.amqp.exchange_name_events

            for event in events:
                if schema.validate(event, "cevent"):
                    sname = "cevent.{0}".format(event["event_type"])

                    if schema.validate(event, sname):
                        if event["event_type"] == "eue":
                            sname = "cevent.eue.{0}".format(event["type_message"])

                            if not schema.validate(event, sname):
                                continue

                        rk = "{0}.{1}.{2}.{3}.{4}".format(
                            event["connector"],
                            event["connector_name"],
                            event["event_type"],
                            event["source_type"],
                            event["component"],
                        )

                        if event["source_type"] == "resource":
                            rk = "{0}.{1}".format(rk, event["resource"])

                        ws.amqp.publish(event, rk, exchange)

            return events

    @route(ws.application.get, name="eventslog/count", payload=["tstart", "tstop", "limit", "select"])
    def get_event_count_per_day(tstart, tstop, limit=100, select={}):
        """ get eventslog log count for each days in a given period
            :param tstart: timestamp of the begin period
            :param tstop: timestamp of the end period
            :param limit: limit the count number per day
            :param select: filter for eventslog collection
            :return: list in which each item contains an interval and the
            related count
            :rtype: list
        """

        results = manager.get_eventlog_count_by_period(tstart, tstop, limit=limit, query=select)

        return results
示例#35
0
 def setUp(self):
     """initialize a manager
     """
     self.manager = singleton_per_scope(EventsLog)
示例#36
0
def exports(ws):
    """
    Expose session routes.
    """

    kwargs = {
        'collection':
        Middleware.get_middleware_by_uri(Session.SESSION_STORAGE_URI)._backend
    }
    session_manager = singleton_per_scope(Session, kwargs=kwargs)

    @route(ws.application.get, name='account/me', adapt=False)
    def get_me():
        """
        Return the user account.
        """
        user = get_user()
        user.pop('id', None)
        user.pop('eid', None)

        return user

    @ws.application.post('/api/v2/keepalive')
    def keepalive():
        """
        Maintain the current session.
        """
        try:
            data = json.loads(request.body.read())
            visible = data["visible"]
            paths = data["path"]
            id_beaker_session, username = get_info()
            time = session_manager.keep_alive(id_beaker_session, username,
                                              visible, paths)
            return gen_json({
                'description': "Session keepalive",
                "time": time,
                "visible": visible,
                "paths": paths
            })

        except SessionError as e:
            return gen_json_error({'description': e.value}, HTTP_ERROR)

    @ws.application.get('/api/v2/sessionstart')
    def sessionstart():
        """
        Start a new session.
        """
        try:
            id_beaker_session, username = get_info()
            session_manager.session_start(id_beaker_session, username)
            return gen_json({'description': "Session Start"})
        except SessionError as e:
            return gen_json_error({'description': e.value}, HTTP_ERROR)

    @ws.application.post('/api/v2/session_tracepath')
    def sessiontracepath():
        try:
            data = json.loads(request.body.read())
            paths = data["path"]
            id_beaker_session, username = get_info()
            session_manager.session_tracepath(id_beaker_session, username,
                                              paths)
        except SessionError as e:
            return gen_json_error({'description': e.value}, HTTP_ERROR)

    @ws.application.get('/api/v2/sessions')
    def session():
        try:
            params = {}
            params_key = request.query.keys()
            for key in params_key:
                if key == "usernames[]":
                    params[key] = request.query.getall(key)
                else:
                    params[key] = request.query.get(key)
            id_beaker_session, username = get_info()
            sessions = session_manager.sessions_req(id_beaker_session, params)
            return gen_json({'description': "Sessions", 'sessions': sessions})

        except SessionError as e:
            return gen_json_error({'description': e.value}, HTTP_ERROR)
示例#37
0
def exports(ws):
    el_kwargs = {
        'el_storage': EventsLog.provide_default_basics()
    }
    manager = singleton_per_scope(EventsLog, kwargs=el_kwargs)

    @ws.application.post(
        '/api/v2/event'
    )
    def send_event_post():
        try:
            events = request.json
        except ValueError as verror:
            return gen_json_error({'description':
                                   'malformed JSON : {0}'.format(verror)},
                                  HTTP_ERROR)

        if events is None:
            return gen_json_error(
                {'description': 'nothing to return'},
                HTTPError
            )

        return send_events(ws, events)

    @route(ws.application.post, name='event', payload=['event', 'url'])
    @route(ws.application.put, name='event', payload=['event', 'url'])
    def send_event(event, url=None):
        if ws.enable_crossdomain_send_events and url is not None:
            payload = {
                'event': json.dumps(event)
            }

            response = requests.post(url, data=payload)

            if response.status_code != 200:
                api_response = json.loads(response.text)

                return (api_response['data'], api_response['total'])

            else:
                return HTTPError(response.status_code, response.text)

        else:
            return send_events(ws, event)

    @route(ws.application.get,
           name='eventslog/count',
           payload=['tstart', 'tstop', 'limit', 'select']
           )
    def get_event_count_per_day(tstart, tstop, limit=100, select={}):
        """ get eventslog log count for each days in a given period
            :param tstart: timestamp of the begin period
            :param tstop: timestamp of the end period
            :param limit: limit the count number per day
            :param select: filter for eventslog collection
            :return: list in which each item contains an interval and the
            related count
            :rtype: list
        """

        results = manager.get_eventlog_count_by_period(
            tstart, tstop, limit=limit, query=select
        )

        return results
示例#38
0
def at_least(
        event, ctx, vertice, state=Check.OK, min_weight=1, rrule=None, f=None,
        manager=None, edge_types=None, edge_data=None, edge_query=None,
        **kwargs
):
    """
    Generic condition applied on sources of vertice which check if at least
    source nodes check a condition.

    :param dict event: processed event.
    :param dict ctx: rule context which must contain rule vertice.
    :param TopoNode vertice: vertice to check.
    :param int state: state to check among sources nodes.
    :param float min_weight: minimal weight (default 1) to reach in order to
        validate this condition. If None, condition results in checking all
        sources.
    :param rrule rrule: rrule to consider in order to check condition in time.
    :param f: function to apply on source vertice state. If None, use equality
        between input state and source vertice state.
    :param edge_ids: edge from where find target/source vertices.
    :type edge_ids: list or str
    :param edge_types: edge types from where find target/source vertices.
    :type edge_types: list or str
    :param dict edge_query: additional edge query.

    :return: True if condition is checked among source nodes.
    :rtype: bool
    """

    result = False

    if manager is None:
        manager = singleton_per_scope(TopologyManager)

    # ensure min_weight is exclusively a float or None
    if min_weight:
        min_weight = float(min_weight)
    elif min_weight != 0:
        min_weight = None

    sources_by_edges = manager.get_sources(
        ids=vertice.id, add_edges=True,
        edge_types=edge_types, edge_data=edge_data, edge_query=edge_query
    )

    if sources_by_edges and min_weight is None:
        # if edges & checking all nodes is required, result is True by default
        result = True

    if isinstance(f, basestring):
        f = lookup(f)

    # for all edges
    for edge_id in sources_by_edges:
        # get edge and sources
        edge, sources = sources_by_edges[edge_id]
        # get edge_weight which is 1 by default
        for source in sources:
            source_state = source.state
            if source_state == state if f is None else f(source_state):
                if min_weight is not None:  # if min_weight is not None
                    min_weight -= edge.weight  # remove edge_weight from result
                    if min_weight <= 0:  # if min_weight is negative, ends loop
                        result = True
                        break
            elif min_weight is None:
                # stop if condition is not checked and min_weight is None
                result = False
                break

    # if result, save source_nodes in ctx in order to save read data from db
    if result:
        ctx[SOURCES_BY_EDGES] = sources_by_edges

    return result