Example #1
0
def search_command(proxies):
    """Performs a search in Elasticsearch."""
    index = demisto.args().get('index')
    query = demisto.args().get('query')
    fields = demisto.args().get('fields')  # fields to display
    explain = 'true' == demisto.args().get('explain')
    base_page = int(demisto.args().get('page'))
    size = int(demisto.args().get('size'))
    sort_field = demisto.args().get('sort-field')
    sort_order = demisto.args().get('sort-order')

    es = elasticsearch_builder(proxies)

    que = QueryString(query=query)
    search = Search(using=es,
                    index=index).query(que)[base_page:base_page + size]
    if explain:
        # if 'explain parameter is set to 'true' - adds explanation section to search results
        search = search.extra(explain=True)

    if fields is not None:
        fields = fields.split(',')
        search = search.source(fields)

    if sort_field is not None:
        search = search.sort({sort_field: {'order': sort_order}})

    response = search.execute().to_dict()

    total_dict, total_results = get_total_results(response)
    search_context, meta_headers, hit_tables, hit_headers = results_to_context(
        index, query, base_page, size, total_dict, response)
    search_human_readable = tableToMarkdown('Search Metadata:',
                                            search_context,
                                            meta_headers,
                                            removeNull=True)
    hits_human_readable = tableToMarkdown('Hits:',
                                          hit_tables,
                                          hit_headers,
                                          removeNull=True)
    total_human_readable = search_human_readable + '\n' + hits_human_readable
    full_context = {
        'Elasticsearch.Search(val.Query == obj.Query && val.Index == obj.Index '
        '&& val.Server == obj.Server && val.Page == obj.Page && val.Size == obj.Size)':
        search_context
    }

    return_outputs(total_human_readable, full_context, response)
Example #2
0
def multiple_aggregate_search(es_connection, index_pattern, search_query, aggregation_type, aggregation_field_one, aggregation_field_two, sort='@timestamp', limit_to_fields=[], date_start='now-1d/d', date_end='now'):
    s = Search(using=es_connection, index=index_pattern, doc_type='_doc')
    s = s.query('query_string', query=search_query)
    if len(limit_to_fields) != 0:
	    s = s.source(limit_to_fields)
    s = s.sort(sort)
    s = s.filter('range', **{'@timestamp': {'gte': date_start, 'lt': date_end}})
    # The four lines above could be summarized into the line below based on your preference:
    # s = Search(using=es_connection, index='lab4.1-complete', doc_type='_doc').query('query_string', query='tags:internal_source').source(['source_ip']).sort('source_ip')
    s.aggs.bucket(aggregation_field_one, 'terms', field=aggregation_field_one, size=999999).metric('Count', aggregation_type, field=aggregation_field_one)
    s.aggs.bucket(aggregation_field_two, 'terms', field=aggregation_field_two, size=999999).metric('Count', aggregation_type, field=aggregation_field_two)
    response = s.execute()
    aggregation_one = [ x['key'] for x in response.aggregations[aggregation_field_one].buckets ]
    aggregation_two = [ x['key'] for x in response.aggregations[aggregation_field_two].buckets ]
    return { aggregation_one[i]: aggregation_two[i] for i in range(len(aggregation_one)) }
    return list(zip([ x['key'] for x in response.aggregations[aggregation_field_one].buckets ], [ x['key'] for x in response.aggregations[aggregation_field_two].buckets ]))
Example #3
0
def build_search(es_connection, index, query, sort='@timestamp', limit_to_fields=[]):
	"""[summary]

	Args:
		index ([string]): [Index pattern to search against]
		query ([string]): [Lucene query to limit results]
		sort (str, optional): [Sort filter]. Defaults to '@timestamp'.
		limit_to_fields (list, optional): [Limit which fields to return]. Defaults to [].

	Returns:
		[type]: [description]
	"""
	search = Search(using=es_connection, index=index, doc_type='_doc')
	search = search.query('query_string', query=query)
	if len(limit_to_fields) != 0:
		search = search.source(limit_to_fields)
	search = search.sort(sort)
	return search
Example #4
0
def aggregate_search(es_connection, index_pattern, search_query, aggregation_type, aggregation_field, sort='@timestamp', limit_to_fields=[], date_start='now-1d/d', date_end='now', result_size=100, interval='auto'):
    s = Search(using=es_connection, index=index_pattern, doc_type='_doc')
    s = s.query('query_string', query=search_query)
    if len(limit_to_fields) != 0:
            s = s.source(limit_to_fields)
    s = s.sort(sort)
    if date_start != 'ignore':
        s = s.filter('range', **{sort: {'gte': date_start, 'lt': date_end}})
    s.aggs.bucket(aggregation_field, 'terms', field=aggregation_field, size=result_size)
    if aggregation_type == 'date_histogram':
        s.aggs[aggregation_field].metric('Count', aggregation_type, field=aggregation_field, interval=interval)
    else:
        s.aggs[aggregation_field].metric('Count', aggregation_type, field=aggregation_field)
    response = s.execute()
    if aggregation_type in ["terms", "auto_date_histogram", "date_histogram"]:
        data = [ x for x in response.aggregations[aggregation_field].buckets ]
        return_dict = {}
        for row in data:
            field = row['key']
            value = row['doc_count']
            return_dict[field] = value
        return return_dict
    else:
        return [ x for x in response.aggregations[aggregation_field].buckets ]