def search(self): output = self._config["DEFAULT"]["output"] print( "Ingrese las palabras a buscar, separadas por blancos para búsqueda múltiple" ) terms = str(input()).split() start = time.time() try: search = Search(output) results = search.search_in_ii(terms) for term, docs in results.items(): print("---- Resultados de la búsqueda %s ----" % term) if docs is not None: for doc in docs: print(doc) print("Resultados: %s" % len(docs)) else: print("No se encontraron resultados.") end = time.time() print("La búsqueda demoró %s segundos" % (end - start)) except FileNotFoundError: print( "No se encontró en disco el índice invertido, debe generarlo con la opción 2" )
def test_run_all(): context = Context('data') Search.build_code_base('./code_base/basis_code.jcb', context) cg = CodeGen('./code_base/basis_code.jcb', None, Example) cg.build_reward_training_data('./code_base/basis_code.evd', context) cg = CodeGen(None, './code_base/basis_code.evd', Example) assert isinstance(cg.xgb, XGBClassifier) with open('./code_base/basis_code.evd', 'r') as f: txt = f.read().splitlines() lol = [] for s in txt: if s[0] != '.': lol.append([float(x) for x in s.split(', ')]) test = np.array(lol) pred = cg.predict_rewards(test[:, range(1, LENGTH_CODE_EVAL + 1)]) assert sum(1 * (pred > 0.5) == test[:, 0]) / test.shape[0] > 0.8
def OnOpenRegExSearch(self,event): dialog = wx.TextEntryDialog(None, "Please enter regular expression here:", "Regular Expression Search","", style=wx.OK|wx.CANCEL) if dialog.ShowModal() == wx.ID_OK: print "You have entered: %s" % dialog.GetValue() search=Search(self.genomemodel, self.treeview) search.regexsearch(dialog.GetValue())
def search(): my_search = Search() document_name = my_search.searching(e1.get()) if len(document_name) > 0: url = "D:\SearchEngine\\Dataset\\"+document_name webbrowser.open_new_tab(url) else: webbrowser.open_new_tab("Error.html")
def testComplex(self): search = Search() results = search.performSearch('WAS iSeries') print(len(results)) #self.assertTrue(len(results)>0) #self.assertTrue(len(results)==1) for result in results: print(result.get_title())
def OnOpenGeneSearch(self,event): dialog = wx.TextEntryDialog(None, "Please enter sequence string here:", "Gene finding","", style=wx.OK|wx.CANCEL) if dialog.ShowModal() == wx.ID_OK: print "You have entered: %s" % dialog.GetValue() search=Search(self.genomemodel, self.treeview) search.genesearch(dialog.GetValue())
def update_required_items(spoiler): worlds = spoiler.worlds # get list of all of the progressive items that can appear in hints # all_locations: all progressive items. have to collect from these # item_locations: only the ones that should appear as "required"/WotH all_locations = [ location for world in worlds for location in world.get_filled_locations() ] # Set to test inclusion against item_locations = { location for location in all_locations if location.item.majoritem and not location.locked and location.item.name != 'Triforce Piece' } # if the playthrough was generated, filter the list of locations to the # locations in the playthrough. The required locations is a subset of these # locations. Can't use the locations directly since they are location to the # copied spoiler world, so must compare via name and world id if spoiler.playthrough: translate = lambda loc: worlds[loc.world.id].get_location(loc.name) spoiler_locations = set( map(translate, itertools.chain.from_iterable(spoiler.playthrough.values()))) item_locations &= spoiler_locations # Skip even the checks _maybe_set_light_arrows = lambda _: None else: _maybe_set_light_arrows = maybe_set_light_arrows required_locations = [] search = Search([world.state for world in worlds]) for location in search.iter_reachable_locations(all_locations): # Try to remove items one at a time and see if the game is still beatable if location in item_locations: old_item = location.item location.item = None # copies state! This is very important as we're in the middle of a search # already, but beneficially, has search it can start from if not search.can_beat_game(): required_locations.append(location) location.item = old_item _maybe_set_light_arrows(location) search.state_list[location.item.world.id].collect(location.item) # Filter the required location to only include location in the world required_locations_dict = {} for world in worlds: required_locations_dict[world.id] = list( filter(lambda location: location.world.id == world.id, required_locations)) spoiler.required_locations = required_locations_dict
def test_opencart_search(browser, base_url, value): opencart_main_page = Search(browser, base_url) opencart_main_page.go_to_site() opencart_main_page.enter_word(value) opencart_main_page.click_on_the_search_button() elements = opencart_main_page.search_results() for elem in elements: assert value in elem.text
def testInitial(self): search = Search() results = search.performSearch('terrible') self.assertTrue(len(results) > 0) self.assertTrue(len(results) == 1) for result in results: print(result.get_title()) results = search.performSearch('parrafa') self.assertTrue(len(results) > 0) self.assertTrue(len(results) == 4) for result in results: print(result.get_title())
def getResult(self, component, message, device_id): if component == "search": obj1 = InterestManager(message, device_id) obj2 = Search(obj1.new_data) print(message) print(obj1.new_data) self.result["results"] = obj2.searching() elif component == "location": obj = Location_finder(message) self.result = obj.find() else: pass return self.result
def __init__(self, file_name_train, file_name_test, trans=False, data_type=""): self.trunc_size = 20 self.read_ali_data(file_name_train, file_name_test) if trans: self.trans_tfidf() self.cx = lambda a, b: round( np.inner(a, b) / (LA.norm(a) * LA.norm(b) + 0.0000001), 3) self.search = Search("../../data/ecommerce") self.search_error = 0 self.data_type = data_type
def fill(self, window, worlds, location_pools, item_pools): search = Search.max_explore([world.state for world in worlds], itertools.chain.from_iterable(item_pools)) if not search.can_beat_game(False): raise FillError('Item pool does not contain items required to beat game!') for world_dist in self.world_dists: world_dist.fill(window, worlds, location_pools, item_pools)
def search(self, galleries): search_galleries = [g for g in galleries if g.gid is None] self.logger.debug("Search galleries: %s" % [g.name for g in search_galleries]) try: self.inc_val = (Decimal(100.0) / Decimal(len(search_galleries) + math.ceil(len(search_galleries) / 25))) except ZeroDivisionError: pass need_metadata_galleries = [] for gallery in search_galleries: search_results = Search.search_by_gallery(gallery) self.signals.progress.emit(self.inc_val) if search_results: gallery.id = Gallery.process_ex_url(search_results) if gallery.gid: need_metadata_galleries.append(gallery) if len(need_metadata_galleries) == 3: self.get_metadata(need_metadata_galleries) need_metadata_galleries = [] if need_metadata_galleries: self.get_metadata(need_metadata_galleries) force_galleries = [g for g in galleries if g.force_metadata] force_gallery_metalist = [force_galleries[i:i + self.API_MAX_ENTRIES] for i in range(0, len(galleries), self.API_MAX_ENTRIES)] [self.get_metadata(g) for g in force_gallery_metalist] self.signals.end.emit()
def search_ticket(): response = {} vehicle_no = request.args.get('vehicle_no','') timestamp = request.args.get('time','') try: searchobj = Search() outlist = searchobj.validate_data(vehicle_no,timestamp) out_list = {} out_list['data'] = outlist return json.dumps(out_list) except Exception as e: response['success'] = False response['data'] = [] response['message'] = str(e) return_obj = jsonify(response) return_obj.status_code = 400 return return_obj
def get(self): search = Search(webapp2.RequestHandler) url_linktext = 'test' disce_values = {'url_linktext': url_linktext} template = JINJA_ENVIRONMENT.get_template( '/resources/www/results.html') self.response.write(template.render(disce_values))
def test_dish_search(self): search_terms = {"name":"gumbo","city":"eugene","state":"OR"} dishes = Search.dish_search(search_terms) names = ["Seafood Gumbo","Seafood Gumbo","Mexican Gumbo","Spicy Southern Gumbo","Craft 2 - Mexican Gumbo"] dish_names = [] for dish in dishes: dish_names+= dish.name self.assertEqual(names.sort(),dish_names.sort())
def test_experiments(): random.seed(2001) stop_rlz = { 'broken_threshold' : 0.1, 'max_broken_walks' : 50, 'max_elapsed_sec' : 2, 'min_num_walks' : 30, 'stop_num_full_matches' : 3 } x_path = Search.build_experiment(15, 30, stop_rlz) with open(x_path + '/config.json', 'r') as f: conf = json.load(f) assert(len(conf['solved']) == 15) assert(len(conf['not_solved']) == 15) assert(conf['stop_rlz']['max_elapsed_sec'] == 2) assert(conf['stop_rlz']['stop_num_full_matches'] == 3) random.seed(2001) Search.run_experiment(x_path, mcts=MockMCTS()) with open(x_path + '/result_details.json', 'r') as f: results = json.load(f) name = conf['solved'][0] assert isinstance(results[name]['correct'][0], int) with open(x_path + '/result_summary.txt', 'r') as f: summary = f.read().splitlines() assert summary[0].startswith('Total number of problems') assert summary[1].startswith('Total number of questions') assert summary[2].startswith('Total number of correct solutions') assert summary[3].startswith('Total running time') assert summary[5].startswith('Correct solutions in code base') assert summary[6].startswith('New correct solutions') assert summary[7].startswith('Failed in code base') assert summary[8].startswith('Found and wrong') assert summary[9].startswith('New correct found') shutil.rmtree(x_path)
def _update(path, backlinks={}): links = S.markdown_links_search(path) if links: for link in links: name = os.path.basename(link) backs = set((backlinks[name])) if name in backlinks else set() backs.add(path) backlinks.update({name: list(backs)}) return backlinks
class Paper: def __init__(self, logger, translate): self.translate = translate self.search = Search(logger, translate) def getOutputByCategory(self, TwitterID, category): Summary_list, ce, cj = self.search.searchByCategory(category) ret_cat = [ce, cj] HTML_list = makeHTML(Summary_list) makeJPG(HTML_list, TwitterID, path='register') count = 0 ret_list = [] for result in Summary_list: count += 1 ret = [result['title_JP'], result['abs_url']] ret_list.append(ret) return ret_cat, ret_list def getOutputByKeyword(self, TwitterID, keyward): Summary_list, t_keyword = self.search.searchByKeyword(keyward) HTML_list = makeHTML(Summary_list) makeJPG(HTML_list, TwitterID, path='reply') count = 0 ret_list = [] for result in Summary_list: count += 1 ret = [result['title_JP'], result['abs_url']] ret_list.append(ret) return ret_list, t_keyword def getOutputByRandom(self): TwitterID = 'eigoyurusan' Summary_list, ce, cj = self.search.searchByRandom() ret_cat = [ce, cj] HTML_list = makeHTML(Summary_list) makeJPG(HTML_list, TwitterID, path='auto') count = 0 ret_list = [] for result in Summary_list: count += 1 ret = [result['title_JP'], result['abs_url']] ret_list.append(ret) return ret_cat, ret_list
def main(): rospy.init_node('gate_task') sm = smach.StateMachine(outcomes=['done', 'fail']) with sm: sm.userdata.search_object = 'Gate' smach.StateMachine.add('SEARCH', Search(), transitions={'Success': 'done', 'Failure': 'fail'}) sm.userdata.type = 'gateManuever' sm.userdata.args = {} smach.StateMachine.add('MOVE', Move()) sm.execute()
def can_reach_stone(worlds, stone_location, location): if location == None: return True old_item = location.item location.item = None search = Search.max_explore([world.state for world in worlds]) location.item = old_item return (search.spot_access(stone_location) and search.state_list[location.world.id].guarantee_hint())
def runProgram(youtube_search_query, text_to_find): dl = Downloader() celeb_videos = Search.search(youtube_search_query) links = [Caption.YOUTUBE_LINK + x for x in celeb_videos] print links count = 0 for videoid in celeb_videos: cap = Caption(videoid) results = cap.find(text_to_find) for result in results: dl.download(result, text_to_find + str(count)) count += 1
def main(): search = Search() h = 600 w = 800 g = window(w, h) while True: event, value = g.window.read() # See if user wants to quit or window was closed if event == sg.WINDOW_CLOSED: break if event == 'Search': result = search.reg_Search(value[0]) elif event == 'Random Search': result = search.search_rand() else: continue if result is None: print("Article not found") else: print("The article is", result) webbrowser.open(result)
def test(self): # run indexing process #indexer = Indexer() #indexer.run() #print("index completed") #perform search search = Search() results = search.performSearch('WAS iSeries') print('hists: ' + str(len(results))) self.assertTrue(len(results)) #recover fragments fr = FragmentRecover() for result in results: print(result) fgm = fr.retrieve(result.get_path(), result.get_order()) print(fgm.get_source_file()) print(fgm.get_order()) print(fgm.get_text()) self.assertTrue(fgm.get_text())
def result(self,userInput): userInput = str(userInput) googleRes = Search() cleanPage = Clean() summ = Summary() query = QueryExpansion() idf = self.loadSet("trainedSetV1") area = query.expand(userInput) results = googleRes.googleAPICall(userInput) # Only return 20 results length = len(results) if len(results) > 20: length = 20 for res in results[0:length]: document = cleanPage.cleanHTML(res) print res start = time.clock() print (summ.simpleSummary(document,userInput,idf,area,1)) elapsed = (time.clock() - start) print (elapsed) print
def result(self,userInput): result = [] userInput = str(userInput) googleRes = Search() cleanPage = Clean() summ = Summary() query = QueryClassifier() queryExpand = QueryExpansionProcess() expandedQuery = queryExpand.expand(userInput) idf = self.loadSet("trainedSetV1") area = query.expand(userInput) results = googleRes.googleAPICall(userInput) # Only return 20 results length = len(results) if len(results) > 20: length = 20 for res in results[0:length]: document,title = cleanPage.cleanHTML(res) result.append(title) result.append(res) result.append(summ.simpleSummary(document,userInput,idf,area,expandedQuery)) return (result)
def set_entrances_based_rules(worlds): # Use the states with all items available in the pools for this seed complete_itempool = [item for world in worlds for item in world.get_itempool_with_dungeon_items()] search = Search([world.state for world in worlds]) search.collect_all(complete_itempool) search.collect_locations() for world in worlds: for location in world.get_locations(): if location.type == 'Shop': # If All Locations Reachable is on, prevent shops only ever reachable as child from containing Buy Goron Tunic and Buy Zora Tunic items if not world.check_beatable_only: if not search.can_reach(location.parent_region, age='adult'): forbid_item(location, 'Buy Goron Tunic') forbid_item(location, 'Buy Zora Tunic')
def FinalPresentation(Query): D = Search.Search(Query) Sort_based_on_ranks = sorted(D, key=D.get, reverse=True) for key in Sort_based_on_ranks: url = key code = requests.get(url,stream = True) with open("temp.html","wb") as html: html.write(code.content) data = open("temp.html",'r') soup = BeautifulSoup(data, 'html.parser') print(soup.h1.get_text()+"\n"+url+"\n\n\n") if(len(Sort_based_on_ranks)==0): print("I'm sorry! The webpages included in the project does not have any related information")
def tune( dataFilename ): Nvalues = [8] Kvalues = [4, 8] print "Starting" for n in Nvalues: print n for k in Kvalues: print k Parameters.N = n Parameters.K = k t = int((Parameters.E - Parameters.N)/Parameters.K) Parameters.T = t print Parameters.T from Search import Search results = Search.search() f = open( dataFilename , 'a' ) f.write( str( Parameters.N ) + " " + str( Parameters.K ) + " " + str( Parameters.T ) + " " + str( results[ 0 ].get_fitness() ) + "\n" ) f.close()
def get_unc_listing(ip, username, password, unc_path): try: cmd = 'Search' search_xmldoc_req = Search.build(unc_path, username=username, password=password) as_conn = ASHTTPConnector(ip) as_conn.set_credential(username, password) res = as_conn.post("Search", parser.encode(search_xmldoc_req)) wapxml_res = parser.decode(res) filename = "get_unc_listing.txt" print('[+] Save response file to %s' % (filename)) with open(filename, 'w+') as file_object: file_object.write(str(wapxml_res)) except Exception as e: print('[!]Error:%s' % e)
def show_snippets(): "Input format: `key1 key2, language1 language2`" if not varibles_checked(): return 0 # Get all sorted snippets if not ['search_all_folders']: sorted_note_list = S.get_sorted_files(C["path_to_new_Snippet"]) else: all_files = S.get_sorted_files(Config.FILES_PATH) # only search notes with code fences sorted_note_list = list( filter(lambda x: "```" in x['content'], all_files)) # Parse input mode, keywords, languages = get_parsed_arg() if mode == "Recent": result = sorted_note_list elif mode == "Title_Search": result = S.title_search(keywords, sorted_note_list) elif mode == "And_Search" or mode == "Exact_Search": result = S.and_search(keywords, sorted_note_list) elif mode == "Or_Search": result = S.or_search(keywords, sorted_note_list) if languages: result = S.metric_search("language", languages, sorted_note_list) # Generate ScriptFilter Output if result: # show matched results num = int(C["result_nums"]) display_matched_result(query, result[:num]) else: # show none matched info Display.show({ "title": "Nothing found ..", "subtitle": f'Presh "⌘" to create a new Snippet with title \"{query}\"', "arg": '', "mods": { "cmd": { "arg": f'new|[Snippet>{query}]', "subtitle": "Press <Enter> to confirm creating" } } }) return
def show_backlinks(): filename = U.get_typora_filename() if filename: link_list = S.backlinks_search(filename) matched_list = [] for link in link_list: path = U.get_abspath(link, query_dict=True) matched_list.append(F.get_file_info(path)) if not matched_list: Display.show({ "title": "Not found related Backlinks", "subtitle": "No other notes links to current file" }) else: display_matched_result(filename, matched_list) else: Display.show({ "title": "Error!", "subtitle": "No file is opened in Typora." })
def show_markdown_links(): """Show MarkDown links contained in currently opened file""" filename = U.get_typora_filename() if filename: link_list = S.markdown_links_search(filename, filename=True) matched_list = [] for link in link_list: path = U.get_abspath(link, query_dict=True) matched_list.append(F.get_file_info(path)) if not matched_list: Display.show({ "title": "No MarkDown Link is found in the current file.", "subtitle": "" }) else: display_matched_result(filename, matched_list) else: Display.show({ "title": "Error!", "subtitle": "No file is opened in Typora." })
def tune(dataFilename): Nvalues = [8] Kvalues = [4, 8] print "Starting" for n in Nvalues: print n for k in Kvalues: print k Parameters.N = n Parameters.K = k t = int((Parameters.E - Parameters.N) / Parameters.K) Parameters.T = t print Parameters.T from Search import Search results = Search.search() f = open(dataFilename, 'a') f.write( str(Parameters.N) + " " + str(Parameters.K) + " " + str(Parameters.T) + " " + str(results[0].get_fitness()) + "\n") f.close()
def show_notes(): if not varibles_checked(): return 0 # Get all sorted notes if C['search_all_folders']: sorted_note_list = S.get_sorted_files(Config.FILES_PATH) else: sorted_note_list = S.get_sorted_files(Config.NOTES_PATH) # Parse input mode, keywords, tags = get_parsed_arg() if mode == "Recent": result = sorted_note_list elif mode == "Title_Search": result = S.title_search(keywords, sorted_note_list) elif mode == "And_Search" or mode == "Exact_Search": result = S.and_search(keywords, sorted_note_list) elif mode == "Or_Search": result = S.or_search(keywords, sorted_note_list) # Parse tags if needed if tags: result = S.metric_search("tag", tags, result) # Generate ScriptFilter Output if result: # show matched results num = int(C["result_nums"]) display_matched_result(query, result[:num]) else: # show none matched info Display.show({ "title": "Nothing found ...", "subtitle": f'Presh "⌘" to create a new Note with title \"{query}\"', "arg": '', "mods": { "cmd": { "arg": f'new|[Note>{query}]', "subtitle": "Press 'Enter' to confirm creating" } } }) return
def test_venue_search(self): search_terms = {"name":"belly","city":"eugene","state":"OR"} venues = Search.venue_search(search_terms) names =[ "Belly","Taqueria Belly"] venue_names = [venue.name for venue in venues] self.assertEqual(names.sort(),venue_names.sort())
print('p:' + str(PROB)) print('Total Solutions:' + str(total_solutions)) print('Total Time:' + str(total_time)) print('Average Solved:' + str(average_solved)) print('Total Path Length:' + str(total_path_length)) print('Average Path Length:' + str(average_path_length)) """ print("--------------------------------\nA* Euclidean") for x in range(0, ITERATIONS): current_map = Map(DIM, PROB) start_time = time.time() current_map.results = Search(current_map).A_star("euclidean") current_time = round(time.time() - start_time, 20) if (current_map.results['Status'] == 'Found Path'): total_solutions += 1 total_cells_visited += int(current_map.results['# of Visited Cells']) total_time += current_time #if(current_map.results['# of Visited Cells'] != 'n/a'): #total_cells_visited += int(current_map.results['# of Visited Cells']) print("Time: ", current_time) average_solved = round(total_solutions / ITERATIONS, 7) average_cells_visited = round(total_cells_visited / total_solutions, 4) print('p:' + str(PROB))
def buildWorldGossipHints(spoiler, world, checkedLocations=None): # rebuild hint exclusion list hintExclusions(world, clear_cache=True) world.barren_dungeon = False world.woth_dungeon = 0 search = Search.max_explore([w.state for w in spoiler.worlds]) for stone in gossipLocations.values(): stone.reachable = (search.spot_access( world.get_location(stone.location)) and search.state_list[world.id].guarantee_hint()) if checkedLocations is None: checkedLocations = set() stoneIDs = list(gossipLocations.keys()) world.distribution.configure_gossip(spoiler, stoneIDs) random.shuffle(stoneIDs) hint_dist = hint_dist_sets[world.hint_dist] hint_types, hint_prob = zip(*hint_dist.items()) hint_prob, _ = zip(*hint_prob) # Add required location hints alwaysLocations = getHintGroup('always', world) for hint in alwaysLocations: location = world.get_location(hint.name) checkedLocations.add(hint.name) location_text = getHint(location.name, world.clearer_hints).text if '#' not in location_text: location_text = '#%s#' % location_text item_text = getHint(getItemGenericName(location.item), world.clearer_hints).text add_hint(spoiler, world, stoneIDs, GossipText('%s #%s#.' % (location_text, item_text), ['Green', 'Red']), hint_dist['always'][1], location, force_reachable=True) # Add trial hints if world.trials_random and world.trials == 6: add_hint(spoiler, world, stoneIDs, GossipText( "#Ganon's Tower# is protected by a powerful barrier.", ['Pink']), hint_dist['trial'][1], force_reachable=True) elif world.trials_random and world.trials == 0: add_hint(spoiler, world, stoneIDs, GossipText( "Sheik dispelled the barrier around #Ganon's Tower#.", ['Yellow']), hint_dist['trial'][1], force_reachable=True) elif world.trials < 6 and world.trials > 3: for trial, skipped in world.skipped_trials.items(): if skipped: add_hint(spoiler, world, stoneIDs, GossipText( "the #%s Trial# was dispelled by Sheik." % trial, ['Yellow']), hint_dist['trial'][1], force_reachable=True) elif world.trials <= 3 and world.trials > 0: for trial, skipped in world.skipped_trials.items(): if not skipped: add_hint(spoiler, world, stoneIDs, GossipText( "the #%s Trial# protects Ganon's Tower." % trial, ['Pink']), hint_dist['trial'][1], force_reachable=True) hint_types = list(hint_types) hint_prob = list(hint_prob) hint_counts = {} if world.hint_dist == "tournament": fixed_hint_types = [] for hint_type in hint_types: fixed_hint_types.extend([hint_type] * int(hint_dist[hint_type][0])) fill_hint_types = ['sometimes', 'random'] current_fill_type = fill_hint_types.pop(0) while stoneIDs: if world.hint_dist == "tournament": if fixed_hint_types: hint_type = fixed_hint_types.pop(0) else: hint_type = current_fill_type else: try: # Weight the probabilities such that hints that are over the expected proportion # will be drawn less, and hints that are under will be drawn more. # This tightens the variance quite a bit. The variance can be adjusted via the power weighted_hint_prob = [] for w1_type, w1_prob in zip(hint_types, hint_prob): p = w1_prob if p != 0: # If the base prob is 0, then it's 0 for w2_type, w2_prob in zip(hint_types, hint_prob): if w2_prob != 0: # If the other prob is 0, then it has no effect # Raising this term to a power greater than 1 will decrease variance # Conversely, a power less than 1 will increase variance p = p * ( ((hint_counts.get(w2_type, 0) / w2_prob) + 1) / ((hint_counts.get(w1_type, 0) / w1_prob) + 1)) weighted_hint_prob.append(p) hint_type = random_choices(hint_types, weights=weighted_hint_prob)[0] except IndexError: raise Exception( 'Not enough valid hints to fill gossip stone locations.') hint = hint_func[hint_type](spoiler, world, checkedLocations) if hint == None: index = hint_types.index(hint_type) hint_prob[index] = 0 if world.hint_dist == "tournament" and hint_type == current_fill_type: logging.getLogger('').info( 'Not enough valid %s hints for tournament distribution.', hint_type) if fill_hint_types: current_fill_type = fill_hint_types.pop(0) logging.getLogger('').info( 'Switching to %s hints to fill remaining gossip stone locations.', current_fill_type) else: raise Exception( 'Not enough valid hints for tournament distribution.') else: gossip_text, location = hint place_ok = add_hint(spoiler, world, stoneIDs, gossip_text, hint_dist[hint_type][1], location) if place_ok: hint_counts[hint_type] = hint_counts.get(hint_type, 0) + 1 if not place_ok and world.hint_dist == "tournament": logging.getLogger('').debug('Failed to place %s hint for %s.', hint_type, location.name) fixed_hint_types.insert(0, hint_type)
def __init__(self): Search.__init__(self) self.moveQueue = Queue.Queue()
def showBacklog(handler, id, search = None, devEdit = False): requirePriv(handler, 'User') id = int(id) sprint = Sprint.load(id) if not sprint or sprint.isHidden(handler.session['user']): ErrorBox.die('Sprints', "No sprint with ID <b>%d</b>" % id) elif not sprint.canView(handler.session['user']): ErrorBox.die('Private', "You must be a sprint member to view this sprint") # Redirect to search help page if searched for empty string if search == '': redirect('/help/search') handler.title(sprint.safe.name) drawNavArrows(sprint, handler.session['user'], '') tasks = sprint.getTasks() editable = sprint.canEdit(handler.session['user']) or (devEdit and isDevMode(handler)) search = Search(sprint, search) print "<script src=\"/settings/sprints.js\" type=\"text/javascript\"></script>" print "<script type=\"text/javascript\">" print "var sprintid = %d;" % id print "var currentUser = %s;" % toJS(handler.session['user'].username if handler.session['user'] else None) print "var totalTasks = %d;" % len(tasks) # True is a placeholder for the dynamic tokens (status, assigned) print "var searchTokens = %s;" % toJS(filter(None, [search.getBaseString() if search.hasBaseString() else None] + [True] + ["%s:%s" % (filt.getKey(), filt.value) for filt in search.getAll() if filt.getKey() not in ('status', 'assigned')])) print "var searchDescriptions = %s;" % toJS(filter(None, ["matching %s" % search.getBaseString() if search.hasBaseString() else None] + [True] + [filt.description() for filt in search.getAll()])) print "TaskTable.init({link_hours_status: %s});" % toJS(not sprint.isPlanning()) print "$('document').ready(function() {" if search.has('assigned'): print " $('%s').addClass('selected');" % ', '.join("#filter-assigned a[assigned=\"%s\"]" % user.username for user in search.get('assigned').users + ([handler.session['user']] if search.get('assigned').currentUser else [])) if search.has('status'): print " $('%s').addClass('selected');" % ', '.join("#filter-status a[status=\"%s\"]" % status.name for status in search.get('status').statuses) print " apply_filters();" print "});" print "</script>" print "<div id=\"selected-task-box\">" print "<span></span>" print Button('history', id = 'selected-history').positive() print Button('highlight', id = 'selected-highlight').positive() print Button('mass edit', id = 'selected-edit').positive() print Button('cancel', id = 'selected-cancel') #.negative() print "</div>" print "<div class=\"backlog-tabs\">" print tabs(sprint, 'backlog') print "<input type=\"text\" id=\"search\" value=\"%s\">" % search.getFullString().replace('"', '"') print "</div>" undelay(handler) print InfoBox('Loading...', id = 'post-status', close = True) avail = Availability(sprint) if sprint.isActive() else None dayStart = Weekday.today().date() print "<div id=\"filter-assigned\">" print "<a class=\"fancy danger\" href=\"#\"><img src=\"/static/images/cross.png\"> None</a>" for member in sorted(sprint.members): cls = ['fancy'] if not sprint.isPlanning() and avail and avail.get(member, dayStart) == 0: cls.append('away') print "<a class=\"%s\" assigned=\"%s\" href=\"/sprints/%d?search=assigned:%s\"><img src=\"%s\"> %s</a>" % (' '.join(cls), member.username, id, member.username, member.getAvatar(16), member.username) print "</div><br>" print "<div id=\"filter-status\">" print "<a class=\"fancy danger\" href=\"#\"><img src=\"/static/images/cross.png\"> None</a>" for status in sorted(statuses.values()): print "<a class=\"fancy\" status=\"%s\" href=\"/sprints/%d?search=status:%s\"><img src=\"%s\">%s</a>" % (status.name, id, status.name.replace(' ', '-'), status.getIcon(), status.text) print "</div><br>" if handler.session['user'].hasPrivilege('Admin') and 'deleted' in sprint.flags: print "<form method=\"post\" action=\"/admin/projects/%d/cancel-deletion/%d\">" % (sprint.project.id, sprint.id) print WarningBox("This sprint is flagged for deletion during nightly cleanup. %s" % Button('Cancel').mini().post()) print "</form>" if sprint.isPlanning(): if sprint.isActive(): print InfoBox("Today is <b>sprint planning</b> — tasks aren't finalized until the end of the day") else: daysTillPlanning = (tsToDate(sprint.start) - getNow()).days + 1 print InfoBox("The sprint has <b>not begun</b> — planning is %s. All changes are considered to have been made midnight of plan day" % ('tomorrow' if daysTillPlanning == 1 else "in %d days" % daysTillPlanning)) elif sprint.isReview(): print InfoBox("Today is <b>sprint review</b> — this is the last day to make changes to the backlog. All open tasks will be deferred at the end of the day") elif not sprint.isOver(): noHours = filter(lambda task: task.stillOpen() and task.hours == 0, tasks) if noHours != []: print WarningBox("There are <a href=\"/sprints/%d?search=status:not-started,in-progress,blocked hours:0\">open tasks with no hour estimate</a>" % sprint.id) tasks = search.filter(tasks) if isDevMode(handler): print Button('#all-tasks borders', "javascript:$('#all-tasks, #all-tasks tr td').css('border', '1px solid #f00').css('border-collapse', 'collapse');").negative() if not editable: print Button('make editable', "/sprints/%d?devEdit" % id).negative() elif devEdit: print Button('make uneditable', "/sprints/%d" % id).negative() print "<div class=\"debugtext\">" print "start: %d (%s)<br>" % (sprint.start, tsToDate(sprint.start)) print "end: %d (%s)<br>" % (sprint.end, tsToDate(sprint.end)) print "</div>" showing = ResponseWriter() print "<span id=\"task-count\"></span>" # save-search href set by update_task_count() print "<a class=\"save-search\"><img src=\"/static/images/save.png\" title=\"Save search\"></a>" print "<a class=\"cancel-search\" href=\"/sprints/%d\"><img src=\"/static/images/cross.png\" title=\"Clear search\"></a>" % id showing = showing.done() print TaskTable(sprint, editable = editable, tasks = tasks, tableID = 'all-tasks', dateline = showing, taskClasses = {task: ['highlight'] for task in (search.get('highlight').tasks if search.has('highlight') else [])}, debug = isDevMode(handler), groupActions = True, taskModActions = True, index = True, goal = True, status = True, name = True, assigned = True, historicalHours = True, hours = True, devEdit = devEdit)
def OnSearch( self, event ): #if query is empty, do nothing query = self.query_textCtrl.GetValue() if query == '': return 0 #handle query, check stemmer, serchtype is Exact or Partial and do somthing relative if '/' in query: keywords, k = query.split('/') k = int(k) keywords = keywords.split() else : k = 1 keywords = query.split() #searchtype = self.stype1_radioBtn.GetValue() #True-->exact False-->partial distance = self.dis_spin.GetValue() # 0 for exact matching , else fo patial matching radioIndex = self.stemmer_radioBox.GetSelection() radioString = self.stemmer_radioBox.GetStringSelection() if radioIndex == 0: path ='./NoStemmer/' elif radioIndex == 1: path = './Porter/' elif radioIndex ==2: path = './Lancaster/' if self.tit_radioBtn.GetValue(): self.seach_dic = json.load( open(path + radioString + '_tit_index.json', 'r')) if self.abs_radioBtn.GetValue(): self.seach_dic = json.load( open(path + radioString + '_abs_index.json', 'r')) if self.both_radioBtn.GetValue(): self.seach_dic = json.load( open(path + radioString + '_both_index.json', 'r')) search = Search() ############### #Exacty Search data = [] if distance == 0: if len(keywords) > 2: print "can't query more than two terms" return 0 elif len(keywords) == 2: item = search.pos_intersect(keywords[0],keywords[1],self.seach_dic, k) elif len(keywords) == 1: item = search.termSearch(keywords[0], self.seach_dic) if item == None: wx.MessageBox("Can't find " + self.query_textCtrl.GetValue(),'Information',wx.OK | wx.ICON_INFORMATION) return 0 for k,v in item.items(): temp={} temp['docid'] = int(k) temp['title'] = search.get_title(k) temp['times'] = v[0] temp['pos'] = v[1] data.append(temp) #Partial Search else: if len(keywords) > 2: print "can't query more than two terms" return 0 elif len(keywords) == 2: item, d_dict = search.partial_posSearch(keywords[0],keywords[1],self.seach_dic, distance, k) elif len(keywords) == 1: item, d_dict = search.partialSearch(keywords[0], self.seach_dic,distance) if item == None: wx.MessageBox("Can't find " + self.query_textCtrl.GetValue(),'Information',wx.OK | wx.ICON_INFORMATION) return 0 for k,v in item.items(): for k1, v1 in v.items(): temp = {} temp['docid'] = int(k1) temp['title'] = search.get_title(k1) temp['times'] = v1[0] temp['pos'] = v1[1] temp['match'] = k temp['distance'] = d_dict[k] data.append(temp) #set list data self.SetListCtrl(data) #set result text times = sum( t['times'] for t in data) self.result_staticText.SetLabel('Result: '+str(len(data))+' files matching | Times: ' + str(times) )
def search(): query = request.form['query'] s = Search() results = s.search(query) return render_template('search.html', query=query, results=results, clippyjs_agent=random_clippyjs_agent())
except Exception,e: pass host='' if hostname.startswith("www") == True: parts = hostname.split(".") for part in parts: if part !='www': host+=part+'.' host = host[:-1] else: host = hostname search = Search(host) search.process() emails = search.get_emails() hosts = search.get_hostnames() full = [] print "\n\n[+] Emails:" print "------------------" if emails == []: print "No emails found" else: for email in emails: print email print "\n[+] Hosts:" print "------------------------------------"
def __init__(self): Search.__init__(self) self.moveStack = []
def helpSearch(handler): handler.title("Search") print "Searching is a combination of miscellaneous filters and a free-form task name fuzzy search. The search operators are:<br>" operators = [('assigned', "Comma-separated list of usernames the task is assigned to. Use \"me\" to refer to the current user"), ('status', "Comma-separated list of the task's current status"), ('hours', "The task's current hours. Also accepts several forms of range: <code>4-8</code>, <code>4-</code>, <code>>4</code>, <code>>=4</code>, etc."), ('goal', "Comma-separated list of goal colors"), ('created', "YYYYMMDD date (or range of dates) the task was created in"), ('modified', "YYYYMMDD date (or range of dates) the task was modified in")] print "<ul>" for (name, desc) in operators: print "<li><b>%s</b> — %s" % (name, desc) print "</ul>" print "Search operators are of the form <code>name:value</code>, e.g. <code>status:complete,deferred</code>. Fields can be quoted as necessary, e.g. <code>status:\"not started\"</code><br><br>" print "Any non-search operator is assumed to be part of the free-form task name search. For example, <code>foo hours:4 bar</code> shows all 4 hour tasks matching the string \"foo bar\". Task names must be at least %d%% similar to the free-form search to match<br><br>" % Search.minMatchPercent() print "The number of matching tasks is shown on the backlog dateline. The <img class=\"bumpdown\" src=\"/static/images/save.png\"> icon saves the search for future use, while the <img class=\"bumpdown\" src=\"/static/images/cross.png\"> icon cancels the search and shows all tasks"
def __init__(self, costFunction): Search.__init__(self) self.moveQueue = Queue.PriorityQueue() self.cost = costFunction
def run(): logger=Logger.getInstance() logger.logInfo(' --------- Initiating CLI ----------') print('============================Local Indexer===============================') print('\r\n') ac = 'n' while(ac.upper()=='N'): print('\r\n') criteria = input("ingrese criterio de busqueda :") logger.logInfo('criteria: ' + criteria) #perform search search = Search() results = search.performSearch(criteria) ac='o' while(ac.upper()=='O'): print('hists: ' + str(len(results))) print('criterio: ' + criteria) logger.logInfo('hists: ' + str(len(results))) #recover fragments fr = FragmentRecover() i = 0 for result in results: print(str(i) + " | " + result.get_title()) i = i + 1 if (i==0): print("No se hallaron resultados") ac="N" break print("-----------------") print("\n\r") print("\n\r") #result input and validation fg="" while (fg=="" or int(fg)>=i): fg = input("seleccione resultado: ") try: int(fg) except ValueError: fg="" print("\n\r") print("\n\r") print('========================================================================') fgm = fr.recover(results[int(fg)].get_path(), results[int(fg)].get_order()) print(fg + " | " + results[int(fg)].get_title()) print('fragmento ' + str(fgm.get_order()) + ' extension chars: ' + str(len(fgm.get_text()))) print('------------------------------------------------------------------------') print(LoinxCLI.display_text(fgm.get_text(),criteria,results[int(fg)].get_path())) print('------------------------------------------------------------------------') print(fg + " | " + results[int(fg)].get_title()) print('fragmento ' + str(fgm.get_order()) + ' extension chars: ' + str(len(fgm.get_text()))) print('------------------------------------------------------------------------') print('ver (o)tro resultado (t)exto completo (n)ueva busqueda (f)inalizar ') print('========================================================================') ac="" while(ac.upper() not in ['O','T','N','F']): ac = input() if (ac.upper()=='T'): LoinxCLI.openFile(results[int(fg)].get_path(),fgm.get_order()) ac='o'
def clean_name(self): return Search.clean_title(self.title, remove_enclosed=False)
"Tammi Todman", "Harley Mussell", "Iola Bordenet", "Edwardo Khela", "Myles Deanne", "Elden Dohrman", "Ira Hooghkirk", "Eileen Stigers", "Mariann Melena", "Maryrose Badura", "Ardelia Koffler", "Lacresha Kempker", "Charlyn Singley", "Lekisha Tawney", "Christena Botras", "Mike Blanchet", "Cathryn Hinkson", "Errol Shinkle", "Mavis Bhardwaj", "Sung Filipi", "Keiko Dedeke", "Lorelei Morrical", "Jimmie Lessin", "Adrianne Hercules", "Latrisha Haen", "Denny Friedeck", "Emmett Whitesell", "Sina Sauby", "Melony Engwer", "Alina Reichel", "Rosamond Shawe", "Elinore Benyard", "Sang Bouy", "Ed Aparo", "Sheri Wedding", "Sang Snellgrove", "Shaquana Sones", "Elvia Motamed", "Candice Lucey", "Sibyl Froeschle", "Ray Spratling", "Cody Mandeville", "Donita Cheatham", "Darren Later", "Johnnie Stivanson", "Enola Kohli", "Leann Muccia", "Carey Philps", "Suellen Tohonnie", "Evelynn Delucia", "Luz Kliment", "Lettie Jirjis", "Francene Klebe", "Margart Scholz", "Sarah Growden", "Glennis Gines", "Rachael Ojima", "Teofila Stample", "Narcisa Shanley", "Gene Lesnick", "Malena Applebaum", "Norma Tingey", "Marianela Mcmullen", "Rosalva Dosreis", "Dallas Heinzmann", "Sade Streitnatter", "Lea Pelzel", "Judith Zwahlen", "Hope Vacarro", "Annette Ayudan", "Irvin Cyree", "Scottie Levenstein", "Agustina Kobel", "Kira Moala", "Fawn Englar", "Jamal Gillians", "Karen Lauterborn", "Kit Karratti", "Steven Deras", "Mary Rosenberger", "Alonso Viviano" ] for n in search_names: index = Search.binary_search(name, n) print(index) index = Search.linear_search(name, n) print(index) print("---------------------________________----------------------------")
def sort_name(self): return Search.clean_title(self.title)