Пример #1
0
def search_articles(request, input_text):
    template = loader.get_template('search/search_articles.html')
    articles = Article.search(input_text)
    articles_info = []
    for article in articles:
        article_info = {}
        article_info['article'] = article
        article_info['authors_acronym'] = map(lambda a: a.acronym, article.authors.all())
        articles_info.append(article_info)
    context = RequestContext(request, {
        'articles': articles_info,
    })
    return HttpResponse(template.render(context))
Пример #2
0
 def get(self, request):
     key_words = request.GET.get('s', '')
     re_datas = []
     if key_words:
         s = Article.search()
         s = s.suggest('my_suggest', key_words, completion={
             "field": "title_suggest", "fuzzy": {
                 "fuzziness": 2
             },
             "size": 10
         })
         suggestions = s.execute_suggest()
         for match in suggestions.my_suggest[0].options:
             source = match._source
             re_datas.append(source["title"])
     return HttpResponse(json.dumps(re_datas), content_type="application/json")
def import_db(article_info):
    def normalize_array_str(info):
        return info if isinstance(info, (str, unicode)) else ''.join(info)

    def normalize_authors(authors):
        normalized_authors = []
        for author in authors:
            author_info = {}
            if isinstance(author, (str, unicode,  list)):
                author_info['author'] = normalize_array_str(author)
            elif isinstance(author, dict):
                for k, v in author.iteritems():
                    author_info[k] = normalize_array_str(v)
            if author_info:
                normalized_authors.append(author_info)
        return normalized_authors

    if article_info['abstract']:
        semantic_extractor = SemanticExtractor(article_info['abstract'])
        extracted_keywords_map = semantic_extractor.get_keywords()
        extracted_language_acr = semantic_extractor.get_language()
    else:
        extracted_keywords_map = {}
        extracted_language_acr = None
    article_title = normalize_array_str(article_info['title'])
    article_authors = normalize_authors(article_info['authors'])
    article = Article.add_article(article_title, article_authors,
                                  language_acr=extracted_language_acr)
    for reference in article_info.get('references', []):
        r_title = normalize_array_str(reference['title'])
        r_authors = normalize_authors(reference['authors'])
        Reference.add_reference(article, r_title, r_authors)
    for ck in article_info.get('keywords'):
        if isinstance(ck, dict):
            v = ck.get('keyword', [])
            keyword = normalize_array_str(v)
            Theme.add_themes(article, keyword, 'article')
            synonyms_list = ck.get('synonyms', [])
            for synonym in synonyms_list:
                Theme.add_theme_synonym(keyword, normalize_array_str(synonym))
        elif isinstance(ck, (str, list)):
            keyword = normalize_array_str(ck)
            Theme.add_themes(article, keyword, 'article')
    for extraction_type, keyword_list in extracted_keywords_map.iteritems():
        for keyword in keyword_list:
            Theme.add_themes(article, keyword, extraction_type)
    return article