コード例 #1
0
class BaseIndexAnalyzer(object):
    """Base class for analyzers.

    Attributes:
        name: Analyzer name.
        index_name: Name if Elasticsearch index.
        datastore: Elasticsearch datastore client.
        sketch: Instance of Sketch object.
    """

    NAME = 'name'
    IS_SKETCH_ANALYZER = False

    # If this analyzer depends on another analyzer
    # it needs to be included in this frozenset by using
    # the indexer names.
    DEPENDENCIES = frozenset()

    def __init__(self, index_name):
        """Initialize the analyzer object.

        Args:
            index_name: Elasticsearch index name.
        """
        self.name = self.NAME
        self.index_name = index_name
        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

        if not hasattr(self, 'sketch'):
            self.sketch = None

    def event_stream(self, query_string, query_filter=None, query_dsl=None,
                     indices=None, return_fields=None):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name}

        # If not provided we default to the message field as this will always
        # be present.
        if not return_fields:
            return_fields = ['message']

        # Make sure we always return tag, human_readable and emoji attributes.
        return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
        return_fields = list(set(return_fields))

        if not indices:
            indices = [self.index_name]

        # Refresh the index to make sure it is searchable.
        for index in indices:
            self.datastore.client.indices.refresh(index=index)

        event_generator = self.datastore.search_stream(
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            return_fields=return_fields
        )
        for event in event_generator:
            yield Event(event, self.datastore, sketch=self.sketch)

    @_flush_datastore_decorator
    def run_wrapper(self):
        """A wrapper method to run the analyzer.

        This method is decorated to flush the bulk insert operation on the
        datastore. This makes sure that all events are indexed at exit.

        Returns:
            Return value of the run method.
        """
        result = self.run()
        return result

    @classmethod
    def get_kwargs(cls):
        """Get keyword arguments needed to instantiate the class.

        Every analyzer gets the index_name as its first argument from Celery.
        By default this is the only argument. If your analyzer need more
        arguments you can override this method and return as a dictionary.

        If you want more than one instance to be created for your analyzer you
        can return a list of dictionaries with kwargs and each one will be
        instantiated and registered in Celery. This is neat if you want to run
        your analyzer with different arguments in parallel.

        Returns:
            List of keyword argument dicts or None if no extra arguments are
            needed.
        """
        return None

    def run(self):
        """Entry point for the analyzer."""
        raise NotImplementedError
コード例 #2
0
class BaseIndexAnalyzer(object):
    """Base class for analyzers.

    Attributes:
        name: Analyzer name.
        index_name: Name if Elasticsearch index.
        datastore: Elasticsearch datastore client.
        sketch: Instance of Sketch object.
    """

    NAME = 'name'
    IS_SKETCH_ANALYZER = False

    # If this analyzer depends on another analyzer
    # it needs to be included in this frozenset by using
    # the indexer names.
    DEPENDENCIES = frozenset()

    # Used as hints to the frontend UI in order to render input forms.
    FORM_FIELDS = []

    # Configure how long an analyzer should run before the timeline
    # gets fully indexed.
    SECONDS_PER_WAIT = 10
    MAXIMUM_WAITS = 360

    def __init__(self, index_name):
        """Initialize the analyzer object.

        Args:
            index_name: Elasticsearch index name.
        """
        self.name = self.NAME
        self.index_name = index_name
        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

        if not hasattr(self, 'sketch'):
            self.sketch = None

    def event_stream(self,
                     query_string=None,
                     query_filter=None,
                     query_dsl=None,
                     indices=None,
                     return_fields=None):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name}

        # If not provided we default to the message field as this will always
        # be present.
        if not return_fields:
            return_fields = ['message']

        # Make sure we always return tag, human_readable and emoji attributes.
        return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
        return_fields = list(set(return_fields))

        if not indices:
            indices = [self.index_name]

        # Refresh the index to make sure it is searchable.
        for index in indices:
            self.datastore.client.indices.refresh(index=index)

        event_generator = self.datastore.search_stream(
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            return_fields=return_fields)
        for event in event_generator:
            yield Event(event, self.datastore, sketch=self.sketch)

    @_flush_datastore_decorator
    def run_wrapper(self, analysis_id):
        """A wrapper method to run the analyzer.

        This method is decorated to flush the bulk insert operation on the
        datastore. This makes sure that all events are indexed at exit.

        Returns:
            Return value of the run method.
        """
        analysis = Analysis.query.get(analysis_id)
        analysis.set_status('STARTED')

        timeline = analysis.timeline
        searchindex = timeline.searchindex

        counter = 0
        while True:
            status = searchindex.get_status.status
            status = status.lower()
            if status == 'ready':
                break

            if status == 'fail':
                logging.error(
                    'Unable to run analyzer on a failed index ({0:s})'.format(
                        searchindex.index_name))
                return 'Failed'

            time.sleep(self.SECONDS_PER_WAIT)
            counter += 1
            if counter >= self.MAXIMUM_WAITS:
                logging.error(
                    'Indexing has taken too long time, aborting run of '
                    'analyzer')
                return 'Failed'
            # Refresh the searchindex object.
            db_session.refresh(searchindex)

        # Run the analyzer. Broad Exception catch to catch any error and store
        # the error in the DB for display in the UI.
        try:
            result = self.run()
            analysis.set_status('DONE')
        except Exception:  # pylint: disable=broad-except
            analysis.set_status('ERROR')
            result = traceback.format_exc()

        # Update database analysis object with result and status
        analysis.result = '{0:s}'.format(result)
        db_session.add(analysis)
        db_session.commit()

        return result

    def run(self):
        """Entry point for the analyzer."""
        raise NotImplementedError
コード例 #3
0
ファイル: api_fetcher.py プロジェクト: temach/timesketch
class ApiDataFetcher(interface.DataFetcher):
    """Data Fetcher for an API story exporter."""

    def __init__(self):
        """Initialize the data fetcher."""
        super(ApiDataFetcher, self).__init__()
        self._datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

    def get_aggregation(self, agg_dict):
        """Returns an aggregation object from an aggregation dict.

        Args:
            agg_dict (dict): a dictionary containing information
                about the stored aggregation.

        Returns:
            An aggregation object (instance of AggregationResult) from a
            saved aggregation or None if not found.
        """
        aggregation_id = agg_dict.get('id')
        if not aggregation_id:
            return None

        aggregation = Aggregation.query.get(aggregation_id)
        if not aggregation:
            return None

        try:
            agg_class = aggregator_manager.AggregatorManager.get_aggregator(
                aggregation.agg_type)
        except KeyError:
            return None

        if not agg_class:
            return pd.DataFrame()
        aggregator = agg_class(sketch_id=self._sketch_id)
        parameter_string = aggregation.parameters
        parameters = json.loads(parameter_string)
        return aggregator.run(**parameters)

    def get_view(self, view_dict):
        """Returns a data frame from a view dict.

        Args:
            view_dict (dict): a dictionary containing information
                about the stored view.

        Returns:
            A pandas DataFrame with the results from a view aggregation.
        """
        view_id = view_dict.get('id')
        if not view_id:
            return pd.DataFrame()

        view = View.query.get(view_id)
        if not view:
            return pd.DataFrame()

        if not view.query_string and not view.query_dsl:
            return pd.DataFrame()

        query_filter = view.query_filter
        if query_filter and isinstance(query_filter, str):
            query_filter = json.loads(query_filter)
        elif not query_filter:
            query_filter = {'indices': '_all', 'size': 100}

        if view.query_dsl:
            query_dsl = json.loads(view.query_dsl)
        else:
            query_dsl = None

        sketch = Sketch.query.get_with_acl(self._sketch_id)
        sketch_indices = [
            t.searchindex.index_name
            for t in sketch.active_timelines
        ]

        results = self._datastore.search_stream(
            sketch_id=self._sketch_id,
            query_string=view.query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=sketch_indices,
        )
        result_list = [x.get('_source') for x in results]
        return pd.DataFrame(result_list)
コード例 #4
0
class BaseGraphPlugin:
    """Base class for a graph.

    Attributes:
        datastore (ElasticsearchDataStore): Elasticsearch datastore object.
        graph (nx.Graph): NetworkX Graph object.
    """
    # Name that the graph will be registered as.
    NAME = 'name'

    # Display name (used in the UI)
    DISPLAY_NAME = 'display_name'

    # Description of the plugin (used in the UI)
    DESCRIPTION = 'description'

    # Type of graph. There are four supported types: Undirected Graph,
    # Undirected Multi Graph, Directed Graph, Directed  Multi Graph.
    # If you have multiple edges between nodes you need to use the multi graphs.
    #
    # See NetworkX documentation for details:
    # https://networkx.org/documentation/stable/reference/classes/index.html
    GRAPH_TYPE = 'MultiDiGraph'

    def __init__(self, sketch=None):
        """Initialize the graph object.

        Args:
            sketch (Sketch): Sketch object.

        Raises:
            KeyError if graph type specified is not supported.
        """
        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])
        if not GRAPH_TYPES.get(self.GRAPH_TYPE):
            raise KeyError(f'Graph type {self.GRAPH_TYPE} is not supported')
        self.graph = Graph(self.GRAPH_TYPE)
        self.sketch = sketch

    def _get_all_sketch_indices(self):
        """List all indices in the Sketch.
        Returns:
            List of index names.
        """
        active_timelines = self.sketch.active_timelines
        indices = [t.searchindex.index_name for t in active_timelines]
        return indices

    # TODO: Refactor this to reuse across analyzers and graphs.
    def event_stream(
            self, query_string=None, query_filter=None, query_dsl=None,
            indices=None, return_fields=None, scroll=True):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.
            scroll: Boolean determining whether we support scrolling searches
                or not. Defaults to True.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        # Query all sketch indices if none are specified.
        if not indices:
            indices = self._get_all_sketch_indices()

        if not query_filter:
            query_filter = {}

        return_fields = list(set(return_fields))

        event_generator = self.datastore.search_stream(
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            return_fields=return_fields,
            enable_scroll=scroll,
        )
        return event_generator

    def generate(self):
        """Entry point for the graph."""
        raise NotImplementedError
コード例 #5
0
class BaseIndexAnalyzer(object):
    """Base class for analyzers.

    Attributes:
        name: Analyzer name.
        index_name: Name if Elasticsearch index.
        datastore: Elasticsearch datastore client.
        sketch: Instance of Sketch object.
    """

    NAME = 'name'
    IS_SKETCH_ANALYZER = False

    # If this analyzer depends on another analyzer
    # it needs to be included in this frozenset by using
    # the indexer names.
    DEPENDENCIES = frozenset()

    def __init__(self, index_name):
        """Initialize the analyzer object.

        Args:
            index_name: Elasticsearch index name.
        """
        self.name = self.NAME
        self.index_name = index_name
        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

        if not hasattr(self, 'sketch'):
            self.sketch = None

    def event_stream(
            self, query_string=None, query_filter=None, query_dsl=None,
            indices=None, return_fields=None):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name}

        # If not provided we default to the message field as this will always
        # be present.
        if not return_fields:
            return_fields = ['message']

        # Make sure we always return tag, human_readable and emoji attributes.
        return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
        return_fields = list(set(return_fields))

        if not indices:
            indices = [self.index_name]

        # Refresh the index to make sure it is searchable.
        for index in indices:
            self.datastore.client.indices.refresh(index=index)

        event_generator = self.datastore.search_stream(
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            return_fields=return_fields
        )
        for event in event_generator:
            yield Event(event, self.datastore, sketch=self.sketch)

    @_flush_datastore_decorator
    def run_wrapper(self, analysis_id):
        """A wrapper method to run the analyzer.

        This method is decorated to flush the bulk insert operation on the
        datastore. This makes sure that all events are indexed at exit.

        Returns:
            Return value of the run method.
        """
        analysis = Analysis.query.get(analysis_id)
        analysis.set_status('STARTED')

        # Run the analyzer
        result = self.run()

        # Update database analysis object with result and status
        analysis.result = '{0:s}'.format(result)
        analysis.set_status('DONE')
        db_session.add(analysis)
        db_session.commit()

        return result

    def run(self):
        """Entry point for the analyzer."""
        raise NotImplementedError
コード例 #6
0
ファイル: interface.py プロジェクト: Onager/timesketch
class BaseIndexAnalyzer(object):
    """Base class for analyzers.

    Attributes:
        name: Analyzer name.
        index_name: Name if Elasticsearch index.
        datastore: Elasticsearch datastore client.
        sketch: Instance of Sketch object.
    """

    NAME = 'name'
    IS_SKETCH_ANALYZER = False

    # If this analyzer depends on another analyzer
    # it needs to be included in this frozenset by using
    # the indexer names.
    DEPENDENCIES = frozenset()

    def __init__(self, index_name):
        """Initialize the analyzer object.

        Args:
            index_name: Elasticsearch index name.
        """
        self.name = self.NAME
        self.index_name = index_name
        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

        if not hasattr(self, 'sketch'):
            self.sketch = None

    def event_stream(
            self, query_string=None, query_filter=None, query_dsl=None,
            indices=None, return_fields=None):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name}

        # If not provided we default to the message field as this will always
        # be present.
        if not return_fields:
            return_fields = ['message']

        # Make sure we always return tag, human_readable and emoji attributes.
        return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
        return_fields = list(set(return_fields))

        if not indices:
            indices = [self.index_name]

        # Refresh the index to make sure it is searchable.
        for index in indices:
            self.datastore.client.indices.refresh(index=index)

        event_generator = self.datastore.search_stream(
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            return_fields=return_fields
        )
        for event in event_generator:
            yield Event(event, self.datastore, sketch=self.sketch)

    @_flush_datastore_decorator
    def run_wrapper(self):
        """A wrapper method to run the analyzer.

        This method is decorated to flush the bulk insert operation on the
        datastore. This makes sure that all events are indexed at exit.

        Returns:
            Return value of the run method.
        """
        result = self.run()

        # Update the searchindex description with analyzer result.
        # TODO: Don't overload the description field.
        searchindex = SearchIndex.query.filter_by(
            index_name=self.index_name).first()

        # Some code paths set the description equals to the name. Remove that
        # here to get a clean description with only analyzer results.
        if searchindex.description == searchindex.name:
            searchindex.description = ''

        # Append the analyzer result.
        if result:
            searchindex.description = '{0:s}\n{1:s}'.format(
                searchindex.description, result)
        db_session.add(searchindex)
        db_session.commit()

        return result

    @classmethod
    def get_kwargs(cls):
        """Get keyword arguments needed to instantiate the class.

        Every analyzer gets the index_name as its first argument from Celery.
        By default this is the only argument. If your analyzer need more
        arguments you can override this method and return as a dictionary.

        If you want more than one instance to be created for your analyzer you
        can return a list of dictionaries with kwargs and each one will be
        instantiated and registered in Celery. This is neat if you want to run
        your analyzer with different arguments in parallel.

        Returns:
            List of keyword argument dicts or None if no extra arguments are
            needed.
        """
        return None

    def run(self):
        """Entry point for the analyzer."""
        raise NotImplementedError
コード例 #7
0
class BaseAnalyzer:
    """Base class for analyzers.

    Attributes:
        name: Analyzer name.
        index_name: Name if Elasticsearch index.
        datastore: Elasticsearch datastore client.
        sketch: Instance of Sketch object.
        timeline_id: The ID of the timeline the analyzer runs on.
        tagged_events: Dict with all events to add tags and those tags.
        emoji_events: Dict with all events to add emojis and those emojis.
    """

    NAME = 'name'
    DISPLAY_NAME = None
    DESCRIPTION = None

    # If this analyzer depends on another analyzer
    # it needs to be included in this frozenset by using
    # the indexer names.
    DEPENDENCIES = frozenset()

    # Used as hints to the frontend UI in order to render input forms.
    FORM_FIELDS = []

    # Configure how long an analyzer should run before the timeline
    # gets fully indexed.
    SECONDS_PER_WAIT = 10
    MAXIMUM_WAITS = 360

    def __init__(self, index_name, sketch_id, timeline_id=None):
        """Initialize the analyzer object.

        Args:
            index_name: Elasticsearch index name.
            sketch_id: Sketch ID.
            timeline_id: The timeline ID.
        """
        self.name = self.NAME
        self.index_name = index_name
        self.sketch = Sketch(sketch_id=sketch_id)
        self.timeline_id = timeline_id
        self.timeline_name = ''

        self.tagged_events = {}
        self.emoji_events = {}

        self.datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

        if not hasattr(self, 'sketch'):
            self.sketch = None

    def event_pandas(self,
                     query_string=None,
                     query_filter=None,
                     query_dsl=None,
                     indices=None,
                     return_fields=None):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to be included in the search results,
                if not included all fields will be included in the results.

        Returns:
            A python pandas object with all the events.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name, 'size': 10000}

        if not indices:
            indices = [self.index_name]

        if self.timeline_id:
            timeline_ids = [self.timeline_id]
        else:
            timeline_ids = None

        # Refresh the index to make sure it is searchable.
        for index in indices:
            try:
                self.datastore.client.indices.refresh(index=index)
            except elasticsearch.NotFoundError:
                logger.error('Unable to refresh index: {0:s}, not found, '
                             'removing from list.'.format(index))
                broken_index = indices.index(index)
                _ = indices.pop(broken_index)

        if not indices:
            raise ValueError('Unable to get events, no indices to query.')

        if return_fields:
            default_fields = definitions.DEFAULT_SOURCE_FIELDS
            return_fields.extend(default_fields)
            return_fields = list(set(return_fields))
            return_fields = ','.join(return_fields)

        results = self.datastore.search_stream(
            sketch_id=self.sketch.id,
            query_string=query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=indices,
            timeline_ids=timeline_ids,
            return_fields=return_fields,
        )

        events = []
        for event in results:
            source = event.get('_source')
            source['_id'] = event.get('_id')
            source['_type'] = event.get('_type')
            source['_index'] = event.get('_index')
            events.append(source)

        return pandas.DataFrame(events)

    def event_stream(self,
                     query_string=None,
                     query_filter=None,
                     query_dsl=None,
                     indices=None,
                     return_fields=None,
                     scroll=True):
        """Search ElasticSearch.

        Args:
            query_string: Query string.
            query_filter: Dictionary containing filters to apply.
            query_dsl: Dictionary containing Elasticsearch DSL query.
            indices: List of indices to query.
            return_fields: List of fields to return.
            scroll: Boolean determining whether we support scrolling searches
                or not. Defaults to True.

        Returns:
            Generator of Event objects.

        Raises:
            ValueError: if neither query_string or query_dsl is provided.
        """
        if not (query_string or query_dsl):
            raise ValueError('Both query_string and query_dsl are missing')

        if not query_filter:
            query_filter = {'indices': self.index_name}

        # If not provided we default to the message field as this will always
        # be present.
        if not return_fields:
            return_fields = ['message']

        # Make sure we always return tag, human_readable and emoji attributes.
        return_fields.extend(['tag', 'human_readable', '__ts_emojis'])
        return_fields = list(set(return_fields))

        if not indices:
            indices = [self.index_name]

        # Refresh the index to make sure it is searchable.
        for index in indices:
            try:
                self.datastore.client.indices.refresh(index=index)
            except elasticsearch.NotFoundError:
                logger.error('Unable to find index: {0:s}, removing from '
                             'result set.'.format(index))
                broken_index = indices.index(index)
                _ = indices.pop(broken_index)
        if not indices:
            raise ValueError(
                'Unable to query for analyzers, discovered no index to query.')

        if self.timeline_id:
            timeline_ids = [self.timeline_id]
        else:
            timeline_ids = None

        # Exponential backoff for the call to Elasticsearch. Sometimes the
        # cluster can be a bit overloaded and timeout on requests. We want to
        # retry a few times in order to give the cluster a chance to return
        # results.
        backoff_in_seconds = 3
        retries = 5
        for x in range(0, retries):
            try:
                event_generator = self.datastore.search_stream(
                    query_string=query_string,
                    query_filter=query_filter,
                    query_dsl=query_dsl,
                    indices=indices,
                    return_fields=return_fields,
                    enable_scroll=scroll,
                    timeline_ids=timeline_ids)
                for event in event_generator:
                    yield Event(event,
                                self.datastore,
                                sketch=self.sketch,
                                analyzer=self)
                break  # Query was succesful
            except elasticsearch.TransportError as e:
                sleep_seconds = (backoff_in_seconds * 2**x +
                                 random.uniform(3, 7))
                logger.info(
                    'Attempt: {0:d}/{1:d} sleeping {2:f} for query {3:s}'.
                    format(x + 1, retries, sleep_seconds, query_string))
                time.sleep(sleep_seconds)

                if x == retries - 1:
                    logger.error(
                        'Timeout executing search for {0:s}: {1!s}'.format(
                            query_string, e),
                        exc_info=True)
                    raise

    @_flush_datastore_decorator
    def run_wrapper(self, analysis_id):
        """A wrapper method to run the analyzer.

        This method is decorated to flush the bulk insert operation on the
        datastore. This makes sure that all events are indexed at exit.

        Returns:
            Return value of the run method.
        """
        analysis = Analysis.query.get(analysis_id)
        analysis.set_status('STARTED')

        timeline = analysis.timeline
        self.timeline_name = timeline.name
        searchindex = timeline.searchindex

        counter = 0
        while True:
            status = searchindex.get_status.status
            status = status.lower()
            if status == 'ready':
                break

            if status == 'fail':
                logger.error(
                    'Unable to run analyzer on a failed index ({0:s})'.format(
                        searchindex.index_name))
                return 'Failed'

            time.sleep(self.SECONDS_PER_WAIT)
            counter += 1
            if counter >= self.MAXIMUM_WAITS:
                logger.error(
                    'Indexing has taken too long time, aborting run of '
                    'analyzer')
                return 'Failed'
            # Refresh the searchindex object.
            db_session.refresh(searchindex)

        # Run the analyzer. Broad Exception catch to catch any error and store
        # the error in the DB for display in the UI.
        try:
            result = self.run()
            analysis.set_status('DONE')
        except Exception:  # pylint: disable=broad-except
            analysis.set_status('ERROR')
            result = traceback.format_exc()

        # Update database analysis object with result and status
        analysis.result = '{0:s}'.format(result)
        db_session.add(analysis)
        db_session.commit()

        return result

    @classmethod
    def get_kwargs(cls):
        """Get keyword arguments needed to instantiate the class.
        Every analyzer gets the index_name as its first argument from Celery.
        By default this is the only argument. If your analyzer need more
        arguments you can override this method and return as a dictionary.

        If you want more than one instance to be created for your analyzer you
        can return a list of dictionaries with kwargs and each one will be
        instantiated and registered in Celery. This is neat if you want to run
        your analyzer with different arguments in parallel.

        Returns:
            List of keyword argument dicts or empty list if no extra arguments
            are needed.
        """
        return []

    def run(self):
        """Entry point for the analyzer."""
        raise NotImplementedError
コード例 #8
0
class SimilarityScorer(object):
    """Score events based on Jaccard distance."""
    def __init__(self, index, data_type):
        """Initializes a similarity scorer.

        Args:
            index: Elasticsearch index name.
            data_type: Name of the data_type.
        """
        self._datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])
        self._config = SimilarityScorerConfig(index, data_type)

    def _shingles_from_text(self, text):
        """Splits string into words.

        Args:
            text: String to extract words from.

        Returns:
            List of words.
        """
        # TODO: Remove stopwords using the NLTK python package.
        # TODO: Remove configured patterns from string.
        delimiters = self._config.delimiters
        return re.split('|'.join(delimiters), text)

    def _minhash_from_text(self, text):
        """Calculate minhash of text.

        Args:
            text: String to calculate minhash of.

        Returns:
            A minhash (instance of datasketch.minhash.MinHash)
        """
        minhash = MinHash(self._config.num_perm)
        for word in self._shingles_from_text(text):
            minhash.update(word.encode('utf8'))
        return minhash

    def _new_lsh_index(self):
        """Create a new LSH from a set of Timesketch events.

        Returns:
            A tuple with an LSH (instance of datasketch.lsh.LSH) and a
            dictionary with event ID as key and minhash as value.
        """
        minhashes = {}
        lsh = MinHashLSH(self._config.threshold, self._config.num_perm)

        # Event generator for streaming Elasticsearch results.
        events = self._datastore.search_stream(
            query_string=self._config.query,
            query_filter={},
            indices=[self._config.index],
            return_fields=[self._config.field])

        with lsh.insertion_session() as lsh_session:
            for event in events:
                event_id = event['_id']
                index_name = event['_index']
                event_type = event['_type']
                event_text = event['_source'][self._config.field]

                # Insert minhash in LSH index
                key = (event_id, event_type, index_name)
                minhash = self._minhash_from_text(event_text)
                minhashes[key] = minhash
                lsh_session.insert(key, minhash)

        return lsh, minhashes

    @staticmethod
    def _calculate_score(lsh, minhash, total_num_events):
        """Calculate a score based on Jaccard distance.

        The score is calculated based on how many similar events that there are
        for the event being scored. This is called neighbours and we simply
        calculate how many neighbours the event has divided by the total events
        in the LSH.

        Args:
            lsh: Instance of datasketch.lsh.MinHashLSH
            minhash: Instance of datasketch.minhash.MinHash
            total_num_events: Integer of how many events in the LSH

        Returns:
            A float between 0 and 1.
        """
        neighbours = lsh.query(minhash)
        return float(len(neighbours)) / float(total_num_events)

    def _update_event(self, event_id, event_type, index_name, score):
        """Add a similarity_score attribute to the event in Elasticsearch.

        Args:
            event_id: ID of the Elasticsearch document.
            event_type: The Elasticsearch type of the event.
            index_name: The name of the index in Elasticsearch.
            score: A numerical similarity score with value between 0 and 1.
        """
        update_doc = {'similarity_score': score}
        self._datastore.import_event(index_name,
                                     event_type,
                                     event_id=event_id,
                                     event=update_doc)

    def run(self):
        """Entry point for a SimilarityScorer.

        Returns:
            A dict with metadata about the processed data set.
        """
        lsh, minhashes = self._new_lsh_index()
        total_num_events = len(minhashes)
        for key, minhash in minhashes.items():
            event_id, event_type, index_name = key
            score = self._calculate_score(lsh, minhash, total_num_events)
            self._update_event(event_id, event_type, index_name, score)

        return dict(index=self._config.index,
                    data_type=self._config.data_type,
                    num_events_processed=total_num_events)
コード例 #9
0
ファイル: api_fetcher.py プロジェクト: tpterovtt/timesketch
class ApiDataFetcher(interface.DataFetcher):
    """Data Fetcher for an API story exporter."""

    def __init__(self):
        """Initialize the data fetcher."""
        super(ApiDataFetcher, self).__init__()
        self._datastore = ElasticsearchDataStore(
            host=current_app.config['ELASTIC_HOST'],
            port=current_app.config['ELASTIC_PORT'])

    def get_aggregation(self, agg_dict):
        """Returns an aggregation object from an aggregation dict.

        Args:
            agg_dict (dict): a dictionary containing information
                about the stored aggregation.

        Returns:
            A dict with metadata information as well as the aggregation
            object (instance of AggregationResult) from a saved aggregation
            or an empty dict if not found.
        """
        aggregation_id = agg_dict.get('id')
        if not aggregation_id:
            return {}

        aggregation = Aggregation.query.get(aggregation_id)
        if not aggregation:
            return {}

        try:
            agg_class = aggregator_manager.AggregatorManager.get_aggregator(
                aggregation.agg_type)
        except KeyError:
            return {}

        if not agg_class:
            return pd.DataFrame()
        aggregator = agg_class(sketch_id=self._sketch_id)
        parameter_string = aggregation.parameters
        parameters = json.loads(parameter_string)
        data = {
            'aggregation': aggregator.run(**parameters),
            'name': aggregation.name,
            'description': aggregation.description,
            'agg_type': aggregation.agg_type,
            'parameters': parameters,
            'chart_type': aggregation.chart_type,
            'user': aggregation.user,
        }
        return data

    def get_aggregation_group(self, agg_dict):
        """Returns an aggregation object from an aggregation dict.

        Args:
            agg_dict (dict): a dictionary containing information
                about the stored aggregation.

        Returns:
            A dict that contains metadata about the aggregation group
            as well as a chart object (instance of altair.Chart)
            with the combined chart object from the group.
        """
        group_id = agg_dict.get('id')
        if not group_id:
            return None

        group = AggregationGroup.query.get(group_id)
        if not group:
            return None

        orientation = group.orientation

        result_chart = None
        for aggregator in group.aggregations:
            if aggregator.parameters:
                aggregator_parameters = json.loads(aggregator.parameters)
            else:
                aggregator_parameters = {}

            agg_class = aggregator_manager.AggregatorManager.get_aggregator(
                aggregator.agg_type)
            if not agg_class:
                continue

            aggregator_obj = agg_class(sketch_id=self._sketch_id)
            chart_type = aggregator_parameters.pop('supported_charts', None)
            color = aggregator_parameters.pop('chart_color', '')
            result_obj = aggregator_obj.run(**aggregator_parameters)

            chart = result_obj.to_chart(
                chart_name=chart_type,
                chart_title=aggregator_obj.chart_title,
                as_chart=True, interactive=True, color=color)

            if result_chart is None:
                result_chart = chart
            elif orientation == 'horizontal':
                result_chart = alt.hconcat(chart, result_chart)
            elif orientation == 'vertical':
                result_chart = alt.vconcat(chart, result_chart)
            else:
                result_chart = alt.layer(chart, result_chart)

        data = {
            'name': group.name,
            'description': group.description,
            'chart': result_chart,
            'parameters': group.parameters,
            'orientation': group.orientation,
            'user': group.user,
        }
        return data

    def get_view(self, view_dict):
        """Returns a data frame from a view dict.

        Args:
            view_dict (dict): a dictionary containing information
                about the stored view.

        Returns:
            A pandas DataFrame with the results from a view aggregation.
        """
        view_id = view_dict.get('id')
        if not view_id:
            return pd.DataFrame()

        view = View.query.get(view_id)
        if not view:
            return pd.DataFrame()

        if not view.query_string and not view.query_dsl:
            return pd.DataFrame()

        query_filter = view.query_filter
        if query_filter and isinstance(query_filter, str):
            query_filter = json.loads(query_filter)
        elif not query_filter:
            query_filter = {'indices': '_all', 'size': 100}

        if view.query_dsl:
            query_dsl = json.loads(view.query_dsl)
        else:
            query_dsl = None

        sketch = Sketch.query.get_with_acl(self._sketch_id)
        sketch_indices = [
            t.searchindex.index_name
            for t in sketch.active_timelines
        ]

        results = self._datastore.search_stream(
            sketch_id=self._sketch_id,
            query_string=view.query_string,
            query_filter=query_filter,
            query_dsl=query_dsl,
            indices=sketch_indices,
        )
        result_list = [x.get('_source') for x in results]
        return pd.DataFrame(result_list)