def test_query5(self): actual1 = [x for x in query("bp+oil")] actual2 = [x for x in query("oil+bp")] expected = ['http://tcp-connections.herokuapp.com/crises/7/', 'http://tcp-connections.herokuapp.com/organizations/10/'] self.assertEquals(actual1, expected) self.assertEquals(actual2, expected)
def test_query3(self): actual1 = [x for x in query("")] actual2 = [x for x in query("+")] actual3 = [x for x in query("!@#$%^&*()")] actual4 = [x for x in query(" ")] self.assertEquals(actual1, []) self.assertEquals(actual2, []) self.assertEquals(actual3, []) self.assertEquals(actual4, [])
def test_query1(self): actual1 = [x for x in query("bill+gates")] actual2 = [x for x in query("BILL+GATES")] actual3 = [x for x in query(" !#@$ BILL + GATES \n ")] expected = ['http://tcp-connections.herokuapp.com/organizations/5/', 'http://tcp-connections.herokuapp.com/crises/8/', 'http://tcp-connections.herokuapp.com/crises/1/', 'http://tcp-connections.herokuapp.com/people/4/', 'http://tcp-connections.herokuapp.com/people/7/'] self.assertEquals(actual1, expected) self.assertEquals(actual2, expected) self.assertEquals(actual3, expected)
def search_nodes(): q = request.args.get('q').strip().lower() if len(q) < 3: abort(400) nodes = unique_list( search.query(q), max=40 ) nodes = map(render_summary, nodes) nodes = map(render_dates, nodes) return json_response(nodes)
def query(self, search_string, rel, pop): html = "<html><body>" + form results = search.query(search_string, rel, pop) if results != ["No results found."]: html += "<ul>" for result in search.query(search_string, int(rel), int(pop)): url = result[0] score = str(result[1]) html += "<li>[" + score + "] <a href='" + url + "'>" + url + "</a></li>" html += "</ul></body></html>" else: html += "<p>No results found.</p></body></html>" return html
def optimized_search(query) : q = search.query(query) ratio = 50 q_ratio_threshold = uphold_ratio_threshold(q, ratio) while q_ratio_threshold is None and ratio > 0 : ratio -= 10 q_ratio_threshold = uphold_ratio_threshold(q, ratio) q_best_ordering = get_best_ordering(q_ratio_threshold) return q_best_ordering, q_ratio_threshold
def main(): selection = "" while selection.lower() not in ['a', 'b', 'c', 'd']: selection = input("Please select an action: \ \n [A] List all images \n [B] Search for an image \ \n [C] Add an image \n [D] Delete an image \n") if selection.lower() == "a": if len(repo.df) == 0: print("No images here yet.") else: print(repo.df) elif selection.lower() == "b": category = input("Enter a category to search, or press Enter if you don't want a certain category: ") keywords = input("Enter some keywords, like 'beach picture', or press Enter: ") result = query(category, keywords) if len(result) == 0: print("No images were found.") else: print("Top Results:") print(result) see_images = input("Would you like to see the images? Input Y if yes. ") if see_images.lower() == "y": images = list(result['path']) for img in images: Image.open(img).show() elif selection.lower() == "c": print("Select an image: ") pathname = filedialog.askopenfilename() possible_categories = repo.df['category'].unique() categories_string = "\n".join([f"\t[{i}] {name}" for i, name in enumerate(possible_categories)]) print("Select a category from the following, or input a new category name: ") category = input(categories_string + "\n") if category.isdigit() and int(category) < len(possible_categories): category = possible_categories[int(category)] description = input("Enter a short phrase describing your image: ") if repo.add_image(pathname, category, description): print("Image successfully added!") else: print("Image already exists. ") else: print(repo.df) ids = input("Enter the ID number of each image you want to delete separated by spaces: ").split() ids = [int(id) for id in ids] if repo.delete_image(ids): print("Image successfully deleted.") else: print("This image does not exist in the repository.") return
def get(self): query = self.request.get('q')[:100].strip() if len(query) == 0: self.response.write('') return cinemas, films = search.query(query) self.response.headers['Content-Type'] = 'application/json' self.render('search_ajax.json', { 'n_cinemas': cinemas.number_found, 'results_cinemas': cinemas.results, 'n_films': films.number_found, 'results_films': films.results, })
def select(s, db, cursor): s = s.split() if s[0] == 'register': res = user.register(s[1], s[2], s[3], s[4], db, cursor) if s[0] == 'login': res = user.login(s[1], s[2], db, cursor) if s[0] == 'dingdan': res = order.check(s[1], db, cursor) all = "" for r in res: all += sendms(r) + "\n" print all return all if s[0] == 'zhanghu': res = user.inform(s[1], db, cursor) if s[0] == 'zuche': res = order.rent(s[1], int(s[2]), db, cursor) if s[0] == 'huanche': res = order.rent_back(s[1], int(s[2]), db, cursor) if s[0] == 'chongzhi': res = user.charge(s[1], float(s[2]), db, cursor) if s[0] == 'shanchu': res = user.delete(s[1], db, cursor) if s[0] == 'jiache': res = bike.insert(int(s[1]), s[2:], db, cursor) if s[0] == 'shanche': res = bike.delete(int(s[1]), db, cursor) if s[0] == 'chache': res = bike.query(int(s[1]), db, cursor) if s[0] == 'fujin': res = bike.nearby(s[1], s[2], db, cursor) if s[0] == 'sousuo': res = search.query(int(s[1]), db, cursor) if s[0] == 'xml': res = xml.ret_xml(s[1], [s[2], s[3]], db, cursor) print sendms(res) return sendms(res)
def create_widgets(self, master): self.text_sp = tk.Text(master, width=55, height=5, font=('標楷', 12)) self.text_sp.place( x=240, y=160, anchor='center' ) # writing in same line would contribute AttributeError self.btn_record = tk.Button( master, text='Record', width=15, height=2, font=('times', 11), command=lambda: self.text_sp.insert('end', Speech_text())) self.btn_record.place(x=160, y=240, anchor='center') self.btn_confirm = tk.Button( master, text='Confirm', width=15, height=2, font=('times', 11), command=lambda: query(self.text_sp.get(0.0, 'end'))) self.btn_confirm.place(x=320, y=240, anchor='center')
def do_GET(self): self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() if '?' in self.path: # 如果带有参数 self.queryString = urllib.parse.unquote(self.path.split('?', 1)[1]) respone = { "kw": '', "question": "服务器错误", "answer": [] } try: params = urllib.parse.parse_qs(self.queryString) respone = search.query(workbook=workbook, keyWord=params["kw"][0]) print(respone) except Exception as err: print(err) finally: self.wfile.write(json.dumps(respone).encode('utf-8')) else: self.wfile.write(json.dumps(data).encode('utf-8')) return
def search(): q = request.args.get('q') if not q: return 'not query str' items = query(q) return render_template('search.html', items=items)
def search (request): search_terms = request.GET.get('query') print "searched: %s" % search_terms query_results = query(search_terms) print "results: %s" % query_results return render(request, 'search.html', {'search_terms': search_terms.replace('+', ' '), 'query_results': query_results.items()})
def do_query(): while True: sen = raw_input(u'Start Query Here->:') result = search.query(sen) for item in result: print 'similarity : {0} \n {1}'.format(item[0], item[1])
def search(q): return query(q)
def probablisticSearch(map, targetx, targety, policy): dim = map.shape[0] initbelief = 1.0 / (dim * dim) beliefmap = np.full((dim, dim), initbelief) count = 0 search_count = 0 move_count = 0 current_i = 0 current_j = 0 destination_i = 0 destination_j = 0 while True: count += 1 maxbelief = 0 maxcells = [] if policy == 1: # highest prob of containing the target for i in range(dim): for j in range(dim): if beliefmap[i][j] > maxbelief: maxbelief = beliefmap[i][j] maxcells.clear() maxcells.append((i, j)) elif beliefmap[i][j] == maxbelief: maxcells.append((i, j)) else: # highest prob of finding the target for i in range(dim): for j in range(dim): p = 0 if map[i][j] == flat: p = 0.9 elif map[i][j] == hilly: p = 0.7 elif map[i][j] == forested: p = 0.3 else: p = 0.1 if beliefmap[i][j] * p > maxbelief: maxbelief = beliefmap[i][j] * p maxcells.clear() maxcells.append((i, j)) elif beliefmap[i][j] * p == maxbelief: maxcells.append((i, j)) # break ties arbitrarily seed = random.random() #print(maxcells) query_cell_index = int(seed * (len(maxcells) - 1)) query_i = maxcells[query_cell_index][0] query_j = maxcells[query_cell_index][1] for cell in maxcells: if cell[0] == destination_i and cell[ 1] == destination_j and count != 1: query_i = destination_i query_j = destination_j #print("==========") #print("Max belief is %s" % maxbelief) #print("Max belief cell is [%s,%s]" % (query_i,query_j)) #print("Current cell is [%s,%s]" % (current_i,current_j)) if_search = 1 # the first search is starting point if count == 1 or (current_i == query_i and current_j == query_j): #print("No need to move") current_i = query_i current_j = query_j else: # decide whether to move or search distance = manhattanDistance(current_i, current_j, query_i, query_j) #print("distance:%s" % distance) destination = maxbelief for i in range(distance): destination = destination * 0.95 if policy == 1: if beliefmap[current_i][current_j] < destination: current_i, current_j = move(current_i, current_j, query_i, query_j) #print("moving to [%s,%s]" % (current_i,current_j)) move_count += 1 if_search = 0 else: p = 0 if map[current_i][current_j] == flat: p = 0.9 elif map[current_i][current_j] == hilly: p = 0.7 elif map[current_i][current_j] == forested: p = 0.3 else: p = 0.1 if beliefmap[current_i][current_j] * p < destination: current_i, current_j = move(current_i, current_j, query_i, query_j) move_count += 1 if_search = 0 query_result = 0 if if_search == 1: search_count += 1 query_result = query(current_i, current_j, targetx, targety, map[targetx][targety]) if query_result == 1: print("Target found at [%s,%s], %s." % (current_i, current_j, map[current_i][current_j])) print("Totoal Search Steps:%d" % search_count) return search_count, move_count else: # print("Search [%d,%d]" % (query_i,query_j)) makeMove(map, beliefmap, current_i, current_j) for i in range(dim): for j in range(dim): if beliefmap[i][j] < 0: raise Exception("ERROR")
def POST(self, query, page): res = search.query(query, page) res = json.dumps(res) cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' return res
#coding=utf-8 import corpus import lda import search from gensim import corpora, models """ corpus.build_corpus() c = corpus.load_corpus() print 'Hello' for i in c: print i lda.init_tfidf() lda.train_lda() post = lda.load_topic_of_post() for p in post: print p """ h = search.query(u'女歌手') for i in h: print i[0] print i[1]
def test_query2(self): actual = [x for x in query("al+gore")] expected = ['http://tcp-connections.herokuapp.com/people/7/', 'http://tcp-connections.herokuapp.com/people/10/'] self.assertEquals(actual, expected)
def test_query6(self): actual = [x for x in query("oil")] expected = ['http://tcp-connections.herokuapp.com/crises/7/', 'http://tcp-connections.herokuapp.com/organizations/10/'] self.assertEquals(actual, expected)