示例#1
0
文件: alerts.py 项目: wcyn/aleph
def check_role_alerts(authz):
    alerts = Alert.by_role(authz.role).all()
    if not len(alerts):
        return
    log.info('Alerting %r, %d alerts...', authz.role, len(alerts))
    for alert in alerts:
        args = {
            'q': alert.query_text,
            'filter:entities.id': alert.entity_id,
            'limit': 50
        }
        state = QueryState(args, authz)
        results = documents_query(state, since=alert.notified_at)
        if results['total'] == 0:
            continue
        log.info('Found %d new results for: %r', results['total'], alert.label)
        alert.update()
        try:
            subject = '%s (%s new results)' % (alert.label, results['total'])
            html = render_template('email/alert.html',
                                   alert=alert,
                                   role=authz.role,
                                   total=results.get('total'),
                                   results=format_results(alert, results),
                                   app_title=app_title,
                                   app_url=app_url)
            notify_role(authz.role, subject, html)
        except Exception as ex:
            log.exception(ex)
    db.session.commit()
示例#2
0
def export():
    query = documents_query(request.args)
    query = {'query': query['query']}
    limit = min(10000, get_limit(default=50))
    output = make_excel(get_results(query, limit), FIELDS)
    return send_file(output, mimetype=XLSX_MIME, as_attachment=True,
                     attachment_filename='export.xlsx')
示例#3
0
def export():
    query = documents_query(request.args)
    query = {'query': query['query']}
    limit = min(10000, get_limit(default=50))
    output = make_excel(get_results(query, limit), FIELDS)
    return send_file(output, mimetype=XLSX_MIME, as_attachment=True,
                     attachment_filename='export.xlsx')
示例#4
0
def statistics():
    collections = authz.collections(authz.READ)
    enable_cache(vary=collections)
    query = documents_query(MultiDict())
    query["size"] = 0
    result = execute_documents_query(MultiDict(), query)
    # collections = Collection.category_statistics(collections)
    return jsonify({"document_count": result["total"], "collection_count": len(collections)})
示例#5
0
def query():
    enable_cache(vary_user=True)
    state = QueryState(request.args, request.authz)
    result = documents_query(state)
    params = next_params(request.args, result)
    log_event(request)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return jsonify(result)
示例#6
0
def statistics():
    enable_cache(vary_user=True)
    documents = documents_query(QueryState({}, request.authz, limit=0))
    entities = entities_query(QueryState({}, request.authz, limit=0))
    return jsonify({
        'documents_count': documents.get('total'),
        'entities_count': entities.get('total'),
        'collections_count': len(request.authz.collections_read)
    })
示例#7
0
def query():
    enable_cache(vary_user=True, vary=authz.collections(authz.READ))
    query = documents_query(request.args)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    result = execute_documents_query(request.args, query)
    params = next_params(request.args, result)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return jsonify(result)
示例#8
0
def statistics():
    collections = authz.collections(authz.READ)
    enable_cache(vary=collections)
    query = documents_query(MultiDict())
    query['size'] = 0
    result = execute_documents_query(MultiDict(), query)
    # collections = Collection.category_statistics(collections)
    return jsonify({
        'document_count': result['total'],
        'collection_count': len(collections)
    })
示例#9
0
def query():
    enable_cache(vary_user=True,
                 vary=authz.collections(authz.READ))
    query = documents_query(request.args)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    result = execute_documents_query(request.args, query)
    params = next_params(request.args, result)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return jsonify(result)
示例#10
0
def query():
    enable_cache(vary_user=True, vary=authz.collections(authz.READ))
    query = documents_query(request.args)
    query["size"] = get_limit(default=100)
    query["from"] = get_offset()
    # import json
    # print json.dumps(query, indent=2)
    result = execute_documents_query(request.args, query)
    params = next_params(request.args, result)
    log_event(request)
    if params is not None:
        result["next"] = url_for("search_api.query", **params)
    return jsonify(result)
示例#11
0
def query():
    creds = authz.collections(authz.READ), authz.sources(authz.READ)
    enable_cache(vary_user=True, vary=creds)
    query = documents_query(request.args)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    result = execute_documents_query(request.args, query)
    result['alert'] = None
    if authz.logged_in():
        result['alert'] = Alert.exists(request.args, request.auth_role)
    params = next_params(request.args, result)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return jsonify(result)
示例#12
0
文件: search_api.py 项目: 01-/aleph
def query():
    creds = authz.collections(authz.READ), authz.sources(authz.READ)
    enable_cache(vary_user=True, vary=creds)
    query = documents_query(request.args)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    result = execute_documents_query(request.args, query)
    result['alert'] = None
    if authz.logged_in():
        result['alert'] = Alert.exists(request.args, request.auth_role)
    params = next_params(request.args, result)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return jsonify(result)
示例#13
0
def _query(escape=False):
    '''
    everything here should be applicable both to the internal and to the
    public api
    '''
    creds = authz.collections(authz.READ), authz.sources(authz.READ)
    enable_cache(vary_user=True, vary=creds)
    query = documents_query(request.args, escape=escape)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    result = execute_documents_query(request.args, query)
    params = next_params(request.args, result)
    if params is not None:
        result['next'] = url_for('search_api.query', **params)
    return result
示例#14
0
def generate_graph(args):
    fields = ['id', 'collection', 'entities.entity_id', 'entities.name',
              'entities.category']
    query = documents_query(args, fields=fields, facets=False)

    graph = nx.MultiGraph()
    for doc in raw_iter(query):
        entities = set()
        for entity in doc.get('_source').get('entities', []):
            if not graph.has_node(entity.get('entity_id')):
                graph.add_node(entity.get('entity_id'),
                               label=entity.get('name'),
                               category=entity.get('category'))
            entities.add(entity.get('entity_id'))
        for (src, dst) in combinations(entities, 2):
            graph.add_edge(src, dst, weight=1)
    graph = multigraph_to_weighted(graph)

    return paginate_graph(graph)
示例#15
0
def generate_graph(args):
    fields = ['id', 'collection', 'entities.uuid', 'entities.name',
              'entities.$schema']
    query = documents_query(args, fields=fields, facets=False)
    query = {'query': query['query']}

    graph = nx.MultiGraph()
    for doc in scan_iter(query):
        entities = set()
        for entity in doc.get('_source').get('entities', []):
            if not graph.has_node(entity.get('uuid')):
                obj = Entity.by_id(entity.get('uuid'))
                graph.add_node(entity.get('uuid'),
                               label=obj.name,
                               schema=obj.type)
            entities.add(entity.get('uuid'))
        for (src, dst) in combinations(entities, 2):
            graph.add_edge(src, dst, weight=1)
    graph = multigraph_to_weighted(graph)
    return paginate_graph(graph)
示例#16
0
def generate_graph(args):
    fields = [
        'id', 'collection', 'entities.uuid', 'entities.name',
        'entities.$schema'
    ]
    query = documents_query(args, fields=fields, facets=False)
    query = {'query': query['query']}

    graph = nx.MultiGraph()
    for doc in scan_iter(query):
        entities = set()
        for entity in doc.get('_source').get('entities', []):
            if not graph.has_node(entity.get('uuid')):
                graph.add_node(entity.get('uuid'),
                               label=entity.get('name'),
                               schema=entity.get('$schema'))
            entities.add(entity.get('uuid'))
        for (src, dst) in combinations(entities, 2):
            graph.add_edge(src, dst, weight=1)
    graph = multigraph_to_weighted(graph)
    return paginate_graph(graph)
示例#17
0
def export():
    output = format_results(documents_query(request.args))
    output = make_excel(output)
    return send_file(output, mimetype=XLSX_MIME, as_attachment=True,
                     attachment_filename='export.xlsx')
示例#18
0
def query():
    etag_cache_keygen()
    query = documents_query(request.args)
    query['size'] = get_limit(default=100)
    query['from'] = get_offset()
    return jsonify(execute_documents_query(request.args, query))