def handle_index(page=1): args = request.query.decode("utf-8") page = int(args.page) searchType = args.searchType query = args.query # search solr = Solr(SOLR_URL + '/' + solr_core[searchType] + '/') start = (page - 1) * PER_PAGE solr_query_params = { 'start':start, 'rows':10, 'hl': 'true', 'hl.fl': 'body', 'hl.fragsize': 100, 'hl.snippets': 3, 'df': 'text' } responseObj = solr.search(query, **solr_query_params) ResultObj = parse_response(responseObj) resultNum = ResultObj['hits'] print resultNum if resultNum == 0: return "No match Docs find! What's a pity" resultNum = resultNum if resultNum < TOTAL else TOTAL paginator = Pagination(page, PER_PAGE, resultNum) # result results = results_for_page(ResultObj, page, PER_PAGE, resultNum) # ad if resultNum < 2: ads = ads_for_page(ResultObj, 1) else: ads = ads_for_page(ResultObj, AD_NUM) #spellcheck spell_flag = True query_right = spellcheck.spellCorret(query, wordSet, bagOfword, parameter) if len(query_right) == 0: spell_flag = False template_params = { 'query': query, 'results': results, 'ads': ads, 'paginator': paginator, 'resultNum': resultNum, 'searchType': searchType, 'spellFlag': spell_flag, 'query_right': query_right } return template('result.ftl', **template_params)
def system_eval(): global trec_eval_results,baseline baseline = OrderedDict(baseline) if len(trec_eval_results) != 0: template_params = { 'trec_eval_results': trec_eval_results, 'baseline': baseline } return template('syseval_result.ftl', **template_params) trec_list = ['Trec14', 'Trec09'] R = lambda x:int(filter(str.isdigit,x)) for trec in trec_list: qrels = trec_dict[trec]['qrels.adhoc'] query_dict = evaluate.get_query_info(trec_dict[trec]['queries']) print 'query len %d' % len(query_dict) core = 'Trec09' if R(trec) < 12 else 'Trec12' solr = Solr(SOLR_URL + '/' + solr_core[core] + '/', timeout=100) results = [] for query_id, query in query_dict.items(): responseObj = solr.search(query, **solr_query_params) ResultObj = parse_response(responseObj) results.append([query_id, ResultObj]) # gen submit and save evaluate.gen_save_submmit(results, RUN_FILE_PATH) # eval by eval_program cmd = EVAL_EXE + ' -m all_trec -M 1000 ' + qrels + ' ' + RUN_FILE_PATH + ' > ' + RESULT_PATH exe_cmd(cmd) # read eval result and display eval_results = evaluate.read_eval(RESULT_PATH) trec_eval_results[trec] = eval_results print trec + ' is Done!' template_params = { 'trec_eval_results': trec_eval_results, 'baseline': baseline } return template('syseval_result.ftl', **template_params)