Exemplo n.º 1
0
    def record_events(self, event_models):
        """Write the events to database.

        :param event_models: a list of models.Event objects.
        """
        error = None
        for event_model in event_models:
            traits = []
            if event_model.traits:
                for trait in event_model.traits:
                    traits.append({
                        'trait_name': trait.name,
                        'trait_type': trait.dtype,
                        'trait_value': trait.value
                    })
            try:
                self.db.event.insert_one({
                    '_id': event_model.message_id,
                    'event_type': event_model.event_type,
                    'timestamp': event_model.generated,
                    'traits': traits,
                    'raw': event_model.raw
                })
            except pymongo.errors.DuplicateKeyError as ex:
                LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex)
            except Exception as ex:
                LOG.exception(_LE("Failed to record event: %s") % ex)
                error = ex
        if error:
            raise error
Exemplo n.º 2
0
 def create_index(self, keys, name=None, *args, **kwargs):
     try:
         self.conn.create_index(keys, name=name, *args, **kwargs)
     except pymongo.errors.OperationFailure as e:
         if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
             LOG.info(_LI("Index %s will be recreate.") % name)
             self._recreate_index(keys, name, *args, **kwargs)
Exemplo n.º 3
0
    def clear_expired_event_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param ttl: Number of seconds to keep records for.
        """
        session = self._engine_facade.get_session()
        with session.begin():
            end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
            event_q = (session.query(
                models.Event.id).filter(models.Event.generated < end))

            event_subq = event_q.subquery()
            for trait_model in [
                    models.TraitText, models.TraitInt, models.TraitFloat,
                    models.TraitDatetime
            ]:
                (session.query(trait_model).filter(
                    trait_model.event_id.in_(event_subq)).delete(
                        synchronize_session="fetch"))
            event_rows = event_q.delete()

            # remove EventType and TraitType with no corresponding
            # matching events and traits
            (session.query(models.EventType).filter(
                ~models.EventType.events.any()).delete(
                    synchronize_session="fetch"))
            LOG.info(_LI("%d events are removed from database"), event_rows)
Exemplo n.º 4
0
    def connect(self, url, max_retries, retry_interval):
        connection_options = pymongo.uri_parser.parse_uri(url)
        del connection_options['database']
        del connection_options['username']
        del connection_options['password']
        del connection_options['collection']
        pool_key = tuple(connection_options)

        if pool_key in self._pool:
            client = self._pool.get(pool_key)()
            if client:
                return client
        splitted_url = netutils.urlsplit(url)
        log_data = {
            'db': splitted_url.scheme,
            'nodelist': connection_options['nodelist']
        }
        LOG.info(_LI('Connecting to %(db)s on %(nodelist)s') % log_data)
        try:
            client = MongoProxy(pymongo.MongoClient(url), max_retries,
                                retry_interval)
        except pymongo.errors.ConnectionFailure as e:
            LOG.warning(
                _('Unable to connect to the database server: '
                  '%(errmsg)s.') % {'errmsg': e})
            raise
        self._pool[pool_key] = weakref.ref(client)
        return client
    def record_events(self, events):

        def _build_bulk_index(event_list):
            for ev in event_list:
                traits = {t.name: t.value for t in ev.traits}
                yield {'_op_type': 'create',
                       '_index': '%s_%s' % (self.index_name,
                                            ev.generated.date().isoformat()),
                       '_type': ev.event_type,
                       '_id': ev.message_id,
                       '_source': {'timestamp': ev.generated.isoformat(),
                                   'traits': traits,
                                   'raw': ev.raw}}

        error = None
        for ok, result in helpers.streaming_bulk(
                self.conn, _build_bulk_index(events)):
            if not ok:
                __, result = result.popitem()
                if result['status'] == 409:
                    LOG.info(_LI('Duplicate event detected, skipping it: %s')
                             % result)
                else:
                    LOG.exception(_LE('Failed to record event: %s') % result)
                    error = storage.StorageUnknownWriteError(result)

        if self._refresh_on_write:
            self.conn.indices.refresh(index='%s_*' % self.index_name)
            while self.conn.cluster.pending_tasks(local=True)['tasks']:
                pass
        if error:
            raise error
Exemplo n.º 6
0
    def clear_expired_data(ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.

        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_LI("Dropping event data with TTL %d"), ttl)
Exemplo n.º 7
0
def expirer():
    conf = service.prepare_service()

    if conf.database.event_time_to_live > 0:
        LOG.debug("Clearing expired event data")
        conn = storage.get_connection_from_config(conf)
        conn.clear_expired_data(conf.database.event_time_to_live)
    else:
        LOG.info(
            _LI("Nothing to clean, database event time to live "
                "is disabled"))
Exemplo n.º 8
0
    def record_events(self, event_models):
        """Write the events to SQL database via sqlalchemy.

        :param event_models: a list of model.Event objects.
        """
        session = self._engine_facade.get_session()
        error = None
        for event_model in event_models:
            event = None
            try:
                with session.begin():
                    event_type = self._get_or_create_event_type(
                        event_model.event_type, session=session)
                    event = models.Event(event_model.message_id, event_type,
                                         event_model.generated,
                                         event_model.raw)
                    session.add(event)
                    session.flush()

                    if event_model.traits:
                        trait_map = {}
                        for trait in event_model.traits:
                            if trait_map.get(trait.dtype) is None:
                                trait_map[trait.dtype] = []
                            trait_map[trait.dtype].append({
                                'event_id': event.id,
                                'key': trait.name,
                                'value': trait.value
                            })
                        for dtype in trait_map.keys():
                            model = TRAIT_ID_TO_MODEL[dtype]
                            session.execute(model.__table__.insert(),
                                            trait_map[dtype])
            except dbexc.DBDuplicateEntry as e:
                LOG.info(_LI("Duplicate event detected, skipping it: %s") % e)
            except KeyError as e:
                LOG.exception(_LE('Failed to record event: %s') % e)
            except Exception as e:
                LOG.exception(_LE('Failed to record event: %s') % e)
                error = e
        if error:
            raise error