def query(query): returns = search(query, 10) if len(returns) > 0: responses = list() print(len(returns)) return render_template('search_results.html', results=returns) else: return "No Sites found!"
def get_search(): query = request.query.query.lower() or "" page = int(request.query.page) sort = request.query.sort results, max_pages, total_results = searching.search(query, page, sort) return template("results.tpl", query=query, results=results, queries=None, sort=sort, index='code', max_pages=max_pages, page=page, total_results=total_results)
def main(): if request.args.get("search") is None: return render_template("index.html") if request.method == "GET": search_arg = request.args.get("search") print(search_arg) returns = search(search_arg, 10, "de") if len(returns) > 0: responses = list() print(len(returns)) return render_template('search_results.html', results=returns, search=search_arg) else: return "No Sites found!"
def findJobsButton(): search_for = request.form['search-for'] avoiding = request.form['avoiding'] location = request.form['location'] fulltime = request.form.get("fulltime") != None parttime = request.form.get("parttime") != None casual = request.form.get("casual") != None gumtree = request.form.get("gumtree") != None indeed = request.form.get("indeed") != None jora = request.form.get("seek") != None # find User-Agent of current browser for Beautiful Soup # user_agent = request.user_agent.string # HEADERS = {"User-Agent":f"{user_agent}"} # create a words array from words in text field that were separated by commas words_to_search = format_text_field(search_for) words_to_avoid = format_text_field(avoiding) positions_to_render = searching.search(gumtree, jora, indeed, words_to_search, words_to_avoid, location, fulltime, parttime, casual) return render_template('results.html', positions=positions_to_render)\
def my_form_post(): arrayInput = request.form['array'] item = request.form['item'] array = data[arrayInput] return render_template("dataPreview.html", year = year, minsAndMax = minsAndMax, df = df, sortedTL = sortedTeamsList , sortedWL = sortedWinList, sortedWLPCT = sortedWinLossPCT, search = search("ALL", array , item))
print(len(wordkey)) print("loading complete!") print("------------Search Start------------") while True: print("Please input the query statement:") #获取输入 statement = input() #分词,并标记词性 tokens_tag = pos_tag(nltk.word_tokenize(statement)) #输入单词预处理 tokens = stemming.stemmer_lemma(tokens_tag) tokens = [word.lower() for word in tokens if word.isalpha()] print(tokens) #查询相关文件集合 docset = searching.search(wordindex, tokens) #获取文件对应的分数(最高分的k个) scorelist = getscore.topK(10, wordindex, fileNum, tokens, docset) #无相关返回 if len(scorelist) == 0: print("无相关结果") #打印结果 for doc, score in scorelist: doc = doc.rstrip() time, url, context = loading.getfile(path, doc, tokens) print("------------------------------------") print("文章名: " + doc) print("分数为: " + "%.3f" % score) print("时间为: " + time) print("地址为: " + url) print("相关内容为:")