コード例 #1
0
ファイル: alerts.py プロジェクト: datastark/aleph
def build_query(al):
    last_checked = al.checked_at or (datetime.now() -
                                     timedelta(days=al.checking_interval))
    es_date_format = '%FT%X.000'
    newerthan = last_checked.strftime(es_date_format)
    qry = document_query(args={'q': al.query}, newerthan=newerthan)
    return qry
コード例 #2
0
ファイル: search_api.py プロジェクト: nightsh/aleph
def query():
    etag_cache_keygen()
    query = document_query(request.args, lists=authz.authz_lists('read'),
                           sources=authz.authz_sources('read'))
    results = search_documents(query)
    pager = Pager(results,
                  results_converter=lambda ds: [add_urls(d) for d in ds])
    data = pager.to_dict()
    data['facets'] = transform_facets(results.result.get('aggregations', {}))
    return jsonify(data)
コード例 #3
0
ファイル: exports_api.py プロジェクト: datastark/aleph
def export():
    attributes = request.args.getlist('attribute')
    query = document_query(request.args, lists=authz.authz_lists('read'),
                           sources=authz.authz_sources('read'))
    query['_source'] = set(query['_source'])
    for attribute in attributes:
        if attribute in CORE_FIELDS:
            query['_source'].add(attribute)
        else:
            query['_source'].add('attributes')
    query['_source'] = list(query['_source'])
    output = (process_row(r, attributes) for r in raw_iter(query))
    output = make_excel(output, attributes)
    return send_file(output, mimetype=XLSX_MIME, as_attachment=True,
                     attachment_filename='export.xlsx')
コード例 #4
0
ファイル: search_api.py プロジェクト: OpenOil-UG/aleph
def _query():
    '''
    everything here should be applicable both to the internal and to the
    public api
    '''
    etag_cache_keygen()
    query = document_query(request.args, lists=authz.authz_lists('read'),
                           sources=authz.authz_sources('read'),
                           highlights=True)
    results = search_documents(query)
    pager = Pager(results,
                  results_converter=lambda ds: [add_urls(d) for d in ds])
    data = pager.to_dict()
    #import ipdb; ipdb.set_trace()
    data['facets'] = transform_facets(results.result.get('aggregations', {}))
    return data
コード例 #5
0
ファイル: search_api.py プロジェクト: vied12/aleph
def _query():
    '''
    everything here should be applicable both to the internal and to the
    public api
    '''
    etag_cache_keygen()
    query = document_query(request.args,
                           lists=authz.authz_lists('read'),
                           sources=authz.authz_sources('read'),
                           highlights=True)
    results = search_documents(query)
    pager = Pager(results,
                  results_converter=lambda ds: [add_urls(d) for d in ds])
    data = pager.to_dict()
    #import ipdb; ipdb.set_trace()
    data['facets'] = transform_facets(results.result.get('aggregations', {}))
    return data
コード例 #6
0
ファイル: exports_api.py プロジェクト: vied12/aleph
def export():
    attributes = request.args.getlist('attribute')
    query = document_query(request.args,
                           lists=authz.authz_lists('read'),
                           sources=authz.authz_sources('read'))
    query['_source'] = set(query['_source'])
    for attribute in attributes:
        if attribute in CORE_FIELDS:
            query['_source'].add(attribute)
        else:
            query['_source'].add('attributes')
    query['_source'] = list(query['_source'])
    output = (process_row(r, attributes) for r in raw_iter(query))
    output = make_excel(output, attributes)
    return send_file(output,
                     mimetype=XLSX_MIME,
                     as_attachment=True,
                     attachment_filename='export.xlsx')
コード例 #7
0
ファイル: graph_api.py プロジェクト: datastark/aleph
def generate_graph(args):
    fields = ['id', 'collection', 'entities.id', 'entities.label',
              'entities.category']
    query = document_query(args, fields=fields,
                           sources=authz.authz_sources('read'),
                           lists=authz.authz_lists('read'),
                           facets=False)
    graph = nx.MultiGraph()
    for doc in raw_iter(query):
        entities = set()
        for entity in doc.get('_source').get('entities', []):
            if not graph.has_node(entity.get('id')):
                graph.add_node(entity.get('id'),
                               label=entity.get('label'),
                               category=entity.get('category'))
            entities.add(entity.get('id'))
        for (src, dst) in combinations(entities, 2):
            graph.add_edge(src, dst, weight=1)
    graph = multigraph_to_weighted(graph)
    return paginate_graph(graph)
コード例 #8
0
def generate_graph(args):
    fields = [
        'id', 'collection', 'entities.id', 'entities.label',
        'entities.category'
    ]
    query = document_query(args,
                           fields=fields,
                           sources=authz.authz_sources('read'),
                           lists=authz.authz_lists('read'),
                           facets=False)
    graph = nx.MultiGraph()
    for doc in raw_iter(query):
        entities = set()
        for entity in doc.get('_source').get('entities', []):
            if not graph.has_node(entity.get('id')):
                graph.add_node(entity.get('id'),
                               label=entity.get('label'),
                               category=entity.get('category'))
            entities.add(entity.get('id'))
        for (src, dst) in combinations(entities, 2):
            graph.add_edge(src, dst, weight=1)
    graph = multigraph_to_weighted(graph)
    return paginate_graph(graph)
コード例 #9
0
ファイル: alerts.py プロジェクト: OpenOil-UG/aleph
def build_query(al):
    last_checked = al.checked_at or (datetime.now() - timedelta(days=al.checking_interval))
    es_date_format = "%FT%X.000"
    newerthan = last_checked.strftime(es_date_format)
    qry = document_query(args={"q": al.query}, newerthan=newerthan)
    return qry