Esempio n. 1
0
def get_es_results(abstracts, top_n):
    es_results = []
    config = Config()
    search = Search()

    for abstract in abstracts:
        print(search.search_by_abstract(abstract, top_n, config))
        es_results.append(search.search_by_abstract(abstract, top_n, config))

    return es_results
Esempio n. 2
0
def search_tpoic(topic, n=RETURN_SIZE):
    s = Search(topic)
    records = Record.records(topic, n)
    if len(records) != 0:
        results = [Result(record.doc_id, record.score) for record in records]
    else:
        results = s.search()
        records = [
            Record(pn=n,
                   topic_id=topic,
                   score=result.score,
                   doc_id=result.doc_id) for result in results
        ]
        Record.save_all(records)
    p = s.p(results, n)
    res = format_results(topic, results, n, p)
    return render_template('search_result.html', n=n, p=p, results=res)
Esempio n. 3
0
def get_es_results(abstracts, top_n):
    start_time = time.time()
    es_results = []
    config = Config()
    search = Search()
    for abstract in abstracts:
        try:
            result = search.search_by_abstract(abstract, top_n, config)
            print(result)
            print('搜索结果中包含 ' + str(len(result)) + ' 条数据')
            es_results.append(result)
        except (Exception) as e:
            print('ES检索出现异常: Exception:', str(e))

    end_time = time.time()
    time_used = datetime.timedelta(seconds=int(round(end_time - start_time)))
    print('检索耗时:' + str(time_used))
    return es_results
Esempio n. 4
0
def prepare():
    """
    Elasticsearch config preparing, global arguments preparing, vocabulary preparing and model restore preparing.
    :return:
    """
    ir_config = Config()
    search = Search()
    args = path_arg4test()

    # load vocab
    print('Loading vocab...')
    with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
        vocab = pickle.load(fin)

    print('Restoring the model...')
    rc_model = RCModel(vocab, args)
    rc_model.restore(model_dir=args.model_dir, model_prefix=args.algo)
    return vocab, search, ir_config, args, rc_model
Esempio n. 5
0
def search_by_user(disease, gene, n, type_):
    s = Search()
    results = s.search_by_user(disease, gene, type_)
    #p = s.p(results, n)
    res = format_results(None, results, n, p=None)
    return render_template("search_result.html", n=n, p=False, results=res)
# from infer import prepare, inference, infer_prob
from infer_tune import prepare, inference, infer_prob


def chinese_tokenizer(documents):
    """
    中文文本转换为词序列(restore时还需要用到,必须包含)
    :param documents:
    :return:
    """
    for document in documents:
        yield list(jieba.cut(document))


config = Config()
search = Search()
vocab_processor, model, session = prepare()
with open('../data/primary_question_dict.json') as primary_dict_f:
    primary_question_dict = json.loads(primary_dict_f.readline())


class MatchHandler(tornado.web.RequestHandler):
    def data_received(self, chunk):
        pass

    def get(self, *args, **kwargs):
        self.render('find_answer.html')

    def post(self, *args, **kwargs):
        self.use_write()
Esempio n. 7
0
from ir.search import Search

s = Search(1)
res = s.search()
print(s.p(res, 10))