def full_search(self): """ Provides a common method to perform a search on api and website @warning: should be wrapped in a try,except block @return: a list of obituaries @rtype: list """ name = self.rget("name", str) or None pob = self.rget("pob", str) or None pod = self.rget("pod", str) or None # join the string params together if they exist search_tokens = utils.tokenize_multi(name, pob, pod) logging.info(search_tokens) dob = self.rget("dob", self.parse_date) or None dod = self.rget("dod", self.parse_date) or None lat = self.rget("lat", float) lon = self.rget("lon", float) logging.info("Sending to search: ") logging.info(search_tokens) if lat and lon: # search by location ghash = geohash.encode(lat, lon) ghash_list = geohash.expand(ghash) precision = self.rget("precision", int) or 4 return utils.search(search_tokens, dob, dod, ghash_list, precision) else: return utils.search(search_tokens, dob, dod)
def prompter(books, users, **kwargs): try: options = [ "Search the book database for a book by Author surname", "Search the user database for a user by User surname", "Lend a book to a user", "Receive book borrowed from a user", "Exit from the system(creates new users and books database csv files)" ] length = max(list(map(lambda option: len(option), options))) while True: print('') print("Press Ctrl+C to exit the system without saving") prompt = '> ' print("".rjust(length + 10, '-')) for index, option in enumerate(options): print(f'{index+1}) {option}'.rjust(16)) print("".rjust(64, '-')) print(prompt, end='') selected = input("Select an option from the list above using the number: ") if(selected.isdigit() and int(selected) > 0 and int(selected) < 6): if(selected == "1"): while True: author_name = input("$ Enter the Author's surname: ") if not author_name.isdigit(): result = search(author_name, books) display_result(result) break if(selected == "2"): while True: user_name = input(f"$$ Enter the User's surname: ") if not user_name.isdigit(): result = search(user_name, users) display_result(result) break if(selected == "3"): books, users = lend_book(books, users) if(selected == "4"): books, users = return_book(books, users) if(selected == "5"): on_exit(books, users, kwargs["b"], kwargs["u"]) except KeyboardInterrupt: print("\nExiting...") exit(0)
def main(): this_txt_dir = os.path.join(txt_dir, folder[0]) for pig in range(1,31): this_pig_dir = os.path.join(image_dir, str(pig)) for FILE in os.listdir(this_pig_dir): portion = os.path.splitext(FILE) print(portion) txt_name = portion[0] + txt_SUFFIX print(txt_name) u.search(txt_name) exit()
def _test(): d = { 'type': 'REST', 'name': 'api_input_test', # I just need it to be unique 'id': 'page', 'location' : { 'url': lambda v: 'https://reqres.in/api/users?page={}'.format(v.get('page')), 'errors' : { 200: "OK", 403: "Bad request params", 404: "File not found... ?", 500: "Permanent fail", }, }, 'data': { 'REMAP': { 'reqres_page' : "page", 'reqres_count': "per_page", 'reqres_total': "total", 'reqres_pages': "total_pages", 'reqres_ids' : ('data', 0, { 'id' : 'id', 'name': lambda v: "{} {}".format(v.get('first_name'), v.get('last_name')), 'avatar': 'avatar', }), }, }, } urlin = Input(**d) rv = urlin.get(1) return search(('reqres_ids', 'name'), rv) == "George Bluth"
def loan_request_local(loan_amount, loan_marr, loan_terms, probability_of_default, loss_given_default, client_min_rate, client_max_rate, search_samples, show: bool = False, save: str = ""): irr = search(loan_amount, loan_terms, search_samples, client_min_rate, client_max_rate, probability_of_default, loss_given_default) a = Amortization(loan_amount, irr, loan_terms) table = a.get_enriched_table(probability_of_default, loss_given_default) request = "" if show: if loan_marr < irr: request = "Approved" print("Loan request for", loan_amount, "due in", loan_terms, "terms with", a.annuity, "fix payments and interest rate of", round(irr * 100, 2), "%:", request) print(table) else: request = "Rejected" print("Loan request for", loan_amount, "due in", loan_terms, "termns with", a.annuity, "fix payments and interest rate of", round(irr * 100, 2), "%:", request) print(table) if save.endswith(".csv"): table.to_csv(save)
def res(resource): # Create channel for the resource channel = createChannel(resource) # Grab the list of annotations for the resource annotations = search(resource) # Go through the annotations in the list and grab the good stuff for a in annotations: text = a['text'] link = a['links']['incontext'] source = a['document']['title'][0] # Grab the quoted text...sometimes it is in different place so I do a try/except block for i in range(0, 4): try: q = a['target'][0]['selector'][i]['exact'] if q != None: quotedText = q break except Exception: continue quotedText = 'Error: could not find annotation' # Create the text that will appear in the Are.na block body = '>%s\n\n-- "%s" ([source](%s))\n\n%s' % (quotedText, source, link, text) block = createBlock(channel, body, title=None) # TODO: figure out what to return for real - the channel now updated w/ annotations? return 'See if it worked!'
def reserve(self, event, driver, msg): if not ut.search(event.user.username, driver, msg): return False id_profile = ut.find_css_selector_element(driver, '#id_teacher_0 a', msg) if id_profile is None: return False id_profile.click() i = 0 id_reserve = None while True: id_event = ut.find_element_id(driver, 'id_meeting_' + str(i), msg) if id_event is None: msg.pop() break source = id_event.text if event.date in source and event.begin_time in source and event.end_time in source and str( event.capacity) in source: id_reserve = ut.find_element_id(id_event, 'id_reserve_meeting', msg) if id_reserve is None: return False break i += 1 if id_reserve is None: msg.append('meeting not found') return False id_reserve.click() return True
def findPerson(query): """ returns the name that shows up the most from the google search of the query arguments: string of the question return: name of a person """ file = open("words.txt") words = file.read() l = utils.search(query) goodWords=[] exp = "[A-Z][a-z][a-z]+ [A-Z][a-z]+" for pages in l: text = re.sub("[\t\n ]", " ", utils.get_page(pages)) result = re.findall(exp, text) for x in result: z = x.split(" ") if z[0].lower() not in words and z[1].lower() not in words: goodWords.append(x) wordcounts={} for word in goodWords: if wordcounts.has_key(word): wordcounts[word]+=1 else: wordcounts[word]=1 person = wordcounts.keys()[0] for word in wordcounts: if wordcounts[word] > wordcounts[person]: person = word return person
def main(max_rows, cache, keywords, search_in, write_excel): pd.set_option("max_rows", max_rows) expire_after = datetime.timedelta(days=cache) session = init_session(expire_after) df = download_index(session) # df = add_possible_repository_url(df) # print(df) # print("Have repo like archive: %d" % df['have_repo_like_archive'].sum()) # print("Don't have repo like archive: %d" % (len(df) - df['have_repo_like_archive'].sum())) # return # df_first = df.groupby('name').last().reset_index() df_last = df.groupby('name').first().reset_index() df_last['number_of_version'] = df.groupby('name')['version'].count() df_last = add_possible_repository_url(df_last) print(df_last[[ 'name', 'url', 'website', 'possible_url_repository', 'have_repo_like_archive' ]].set_index('name')) print("Have repo like archive: %d" % df_last['have_repo_like_archive'].sum()) if keywords != '': columns_search = search_in.split(',') df_search = search(keywords, columns_search, session=session, df_last=df_last) if write_excel: print("Writing Excel file") with pd.ExcelWriter("libraries.xlsx") as writer: df_last.to_excel(writer, sheet_name='Last') # df_first.to_excel(writer, sheet_name='First') df.to_excel(writer, sheet_name='All') if keywords != '': df_search.to_excel(writer, sheet_name='Search')
def vgg_search(img_url, nb_results): # load the index file index = faiss.read_index("Shiying_and_Jiwei_mini_index.index") # A VGG16 pre-trained model vgg16_model = VGG16(weights='imagenet', include_top=False) # vgg16_model.summary() # number of nearest neighbors for each descriptor k = 100 #compute query vector of descriptors xq = utils_vgg16.eval_vgg_with_l2_norm_compute_xq_cloud( vgg16_model, img_url) if xq.shape == (0, ): return None, None, None, None, None #actual search # xq - the descriptors of the query image # index - the whole index of the images in the storage # k - number of nearest neighbors for each descriptor # I - the indexes of the relevant similar images D, I = utils.search(xq, index, k) # Return top N file_ids (images' names) according to IC top_n, scores, sources = utils_evaluation.eval_retrieve_top_n( I, nb_results) numbers = list(range(1, 11)) #for num in numbers: # print("Result number ", num, " is: ", top_n[num-1], " with score: ", scores[num-1]) #with open("search_results.txt", "w") as f: # for num in numbers: # idx = str(top_n[num - 1]) # idx1 = idx[:3] # idx2 = idx[3:] # #https://storage.cloud.google.com/evaluation_images/111_14.jpg?authuser=2 # f.write("https://storage.cloud.google.com/evaluation_images/{}_{}.jpg\n".format(idx1, idx2)) images = [] for num in numbers: idx = str(top_n[num - 1]) idx1 = idx[:3] idx2 = idx[3:] #https://storage.cloud.google.com/evaluation_images/111_14.jpg?authuser=2 if idx1 != "444": images.append( "https://storage.cloud.google.com/evaluation_images/{}_{}.jpg\n" .format(idx1, idx2)) else: images.append( "https://storage.cloud.google.com/evaluation_images/{}_{}.png\n" .format(idx1, idx2)) return images
def index(request): if 'q' in request.GET: name = request.GET['q'].strip() results = search(name) return render_to_response('cazador/results.html', {'results': results}) return render_to_response('cazador/index.html')
def get_audio_from_file(song_name): try: *_, file_name, start_on = next(utils.search(songs, _0=song_name)) except StopIteration: return None else: return FFmpegPCMAudio(songs_path + file_name, options='-ss ' + start_on)
def search(): sectionTemplate = "./templates/seaerch_result.tpl" query = request.forms.get('q') return template("./pages/index.html", version=getVersion(), sectionData={}, sectionTemplate=sectionTemplate, query=query, results=search(query))
def lookup(self, keyword): return_dict = utils.search(keyword) logging.debug(f'returned: {return_dict}') if return_dict: self.prev_dict = self.cur_word self.cur_word = return_dict self.num_shown = 0 self.handle_print('\n' * 5 + keyword) self.print_cur_word()
def search_result(): query = request.forms.get('q') sectionTemplate = "./templates/search_result.tpl" return template("./pages/index.html", version=utils.getVersion(), sectionTemplate=sectionTemplate, query=query, results=utils.search(query), sectionData={})
def _find_packages_urls(release, architecture, package): url = f"https://packages.debian.org/{release}/{architecture}/{package}/download" try: package_url = utils.search( r"['\"](?P<url>https?.*?libc6.*?.deb)['\"]", url).group("url") except AttributeError: print(utils.make_warning(f"Problems: {utils.make_bright(url)}")) return [] else: return [package_url]
def get_queryset(self): user = self.request.user query_param = "q" query_string = self.request.GET.get(query_param, "").strip() if not query_string: return BookMark.objects.filter(user=user) return utils.search(self.request, BookMark, ['tags', 'name'])
def get_queryset(self): user = self.request.user query_param="q" query_string = self.request.GET.get(query_param,"").strip() if not query_string: return BookMark.objects.filter(user=user) return utils.search(self.request, BookMark, ['tags','name'])
def get(self): query = urllib.parse.unquote_plus(request.args.get("query")) n = int(request.args.get("n")) response_ids = search( query, search_index, model, image_ids, word_to_index, index_to_wordvec, n ) blank_url = ( "https://iiif.wellcomecollection.org/image/{}.jpg/full/760,/0/default.jpg" ) response_urls = [blank_url.format(id) for id in response_ids] return jsonify({"request": query, "response": response_urls})
def related(result_id): related_news = [] related_tweets = [] wiki_article = dict() news_articles = [] form = SearchForm(); global initial_query initial_search_term = initial_query nearby_tweets = False; if 'near' in request.args: if request.args['near'] == "true": nearby_tweets = True; wiki_article_solr = get_item(wiki, result_id) if wiki_article_solr: wiki_article = wiki_article_solr query_terms = [] twitter_query = ""; keywords = wiki_article.get('keywords',[]) if not application.config.get('INDEX_KEYWORD_GENERATION'): keywords = extract_keywords(wiki_article['wiki_body'][0].encode('utf-8')).get('keywords') keywords.append(wiki_article["title"][0]); print "keywords : " + str(keywords); query_term = "" #since we are favoring precision over recall if len(keywords) > 1: for t in keywords: query_terms += t.split() query_term = "+".join(query_terms) query_term = "title:"+wiki_article["title"][0]+"^3 news_body:"+query_term; news_articles = search(news, query_term) # twitter_query = " OR ".join(query_terms) twitter_query = wiki_article["title"][0]; related_tweets = search_twitter(twitter_query, nearby_tweets) ; print "tweets :" + str(related_tweets); if news_articles: news_articles = news_articles[0] #TODO: remove the list comprehension, it was just for design purposes related_news = [news_article for news_article in news_articles if news_article.get('news_body')] return render_template('related.html',**locals());
def search(): page_name = 'search' user = utils.get_user_from_cookie(request) search_query = request.args.get("query") if not search_query: page_content = render_template("search.html", user=user, message='') return render_page(page_content, page_name, user=user) users = utils.search(search_query) if not users: page_content = render_template("search.html", message='NO USERS FOUND :(', user=user) return render_page(page_content, page_name, user=user) page_content = render_template("search.html", message='', users=users) return render_page(page_content, page_name, user=user)
def search(): page_name = "search" user = utils.get_user_from_cookie(request) search_query = request.args.get('query') if not search_query: page_content = render_template('search.html', user=user, message="") return render_page(page_content, page_name, user=user) users = utils.search(search_query) if not users: page_content = render_template('search.html', message="NO USERS FOUND :(", user=user) return render_page(page_content, page_name, user=user) page_content = render_template('search.html', message="", users=users) return render_page(page_content, page_name, user=user)
def face_search(): start = time.time() ensure_folder(STATIC_DIR) ensure_folder(UPLOAD_DIR) file = request.files['file'] filename = secure_filename(file.filename) filename = filename.lower() if filename in ['jpg', 'jpeg', 'png', 'gif']: filename = str(random.randint(0, 101)) + '.' + filename file_upload = os.path.join(UPLOAD_DIR, filename) file.save(file_upload) resize(file_upload) print('file_upload: ' + file_upload) name, prob, file_star = search(file_upload) elapsed = time.time() - start return name, prob, file_star, file_upload, float(elapsed)
def search(): if request.method == 'GET': search_key = request.args['search_text'] if "page" in request.args: page = request.args["page"] else: page = 1 books = utils.search(search_key) if len(books) % 12 == 0: max_page = len(books) // 12 else: max_page = len(books) // 12 + 1 if page < max_page: display_books = books[(page - 1) * 12, page * 12] else: display_books = books[(min(page, max_page) - 1) * 12:] return render_template('search-result-page.html', books=display_books)
def _onkeydown(self, event): keycode = event.GetKeyCode() shiftdown = event.ShiftDown() keycode = cmdline.reformat(keycode, shiftdown) if not self._is_loading: name, opt = self.cmd_panel.operate(keycode, mode=self.renderer.mode) if name in ['booru input', 'save input']: if name == 'save input': opt = '0' + opt booru_ids = utils.search(self.path, opt) if len(booru_ids) > 10: booru_ids = booru_ids[:10] info = '' if booru_ids: info = ' Local vids:\n ' + '\n '.join(booru_ids) self.cmd_panel.add_info(info, mode=self.renderer.mode)
def get_results(request): if request.is_ajax() and request.method == "POST": try: res = json.loads(request.body) print res cords = res['cords'] cords = cords[0:1] + cords[-1:] cords = [[x['d'], x['e']] for x in cords] temp = [] temp.append(cords[0][0]) temp.append(cords[0][1]) temp.append(cords[1][0]) temp.append(cords[1][1]) distance = res['distance'] start_place = res['start'] end_place = res['end'] radius = int(res['radius']) time = datetime.datetime.strptime(res['time'], "%m/%d/%Y %H:%M") matched_trips = search(cords, time, radius) results = [] for trip in matched_trips: res = {} res['id'] = trip.id res['startPlace'] = trip.start_place res['endPlace'] = trip.end_place res['startTime'] = datetime.datetime.strftime( trip.time, "%d %b, %H:%M") results.append(res) data = json.dumps({ 'cords': temp, 'start_place': start_place, 'end_place': end_place }) request.session['data'] = data request.session['results'] = results # print render(request, 'searchResults.html', {'results':results, 'data':data}) # return render(request, 'searchResults.html', {'results':results, 'data':data}) return HttpResponse() except: return HttpResponseBadRequest() else: return HttpResponseNotAllowed(['POST'])
def findprice(foodname, mode): global food print "Times (in ms):\n" #timer for debugging one=time.time()*1000.0 if mode == 0: results = utils.search(foodname) else: results = utils.search2(foodname) print "Total recipe search: " + str(time.time()*1000.0 - one) #the time for the following assignments is negligible recipeTitle = results[0] ingred = results[1] imgURL = results [2] directions = results[3] source = results[4] #timer 2 two=time.time()*1000.0 pricelist=[] for ingredient in ingred: p,n = utils.getPrice(key, ingredient) if p == None: flash("Your search has failed. Please try another recipe.") return redirect('/') p = str(p); if len(p[p.find('.'):]) < 3: p += '0' #sometimes the price shows up with 1 decimal place. I'm assuming it lopped off a 0 somehow. pListelement = [p, (n.replace("#food", "") + " (" + ingredient) + ")"] #getting '#food' in the ingredient names was a problem pricelist.append(pListelement) timer = time.time()*1000.0 - two print "Total ingredient search: " + str(timer) print "Average per ingredient: " + str(timer / len(pricelist)) print "Total to find data: " + str(time.time()*1000.0 - one) food = foodname return render_template("pricer.html", foodname=foodname, title=recipeTitle, sURL=source, ingredients=ingred,imgURL=imgURL, prices=pricelist, directions=directions, mode = mode)
def solve(grid, format='string'): if len(grid) != 81: print 'ERROR: Sudoku length is not proper' sys.exit() values = search(grid_values(grid)) if '' in values.values(): print 'INFO: Invalid Sudoku' sys.exit() format_output = "" for row in row_units: for cell in row: format_output += values[cell] if format == 'grid': display(values) else: print format_output
def results(): qt = request.args.get('q') query_terms = qt.split() query_term = "+".join(query_terms) form = SearchForm(); query_term = "title:"+query_term+"^3 wiki_body:"+query_term; print query_term sear = search(wiki, query_term, hl="true") error_message = "No results found for your search!" global initial_query initial_search_term = initial_query # for Did you mean? section # http://localhost:8983/solr/wikiArticleCollection/spell?q=alternatie&wt=json&indent=true params = urllib.urlencode({'q': initial_query, 'wt': "json", 'indent' : "true" }) print params; did_you_mean = urllib.urlopen("http://localhost:8983/solr/wikiArticleCollection/spell?%s" % params) did_you_mean_object = json.load(did_you_mean) did_you_mean_words = []; try: did_you_mean_words = []; for word in did_you_mean_object["spellcheck"]["suggestions"][1]["suggestion"]: did_you_mean_words.append(word["word"]); except: did_you_mean_words = []; #currently returning only results that were highlighted if sear: search_results = sear[0] result_snippets = sear [1] for res in search_results: _id = res['id'] if _id in result_snippets: try: res['wiki_body'] = ("...").join(result_snippets[_id].get('wiki_body', [])) except: del search_results[_id] else: del search_results[_id] if search_results: if len(search_results)>0: error_message = "" return render_template('results.html', **locals());
def __call__(self, data): """ frankinstate output from data-parts. this makes Mapper 'callable' so a Mapper is a valid field value in defining a Mapper. Not sure how you could, but Don't make cycles. """ logger.debug("mapper: {} data: {}".format(self.remap, data)) from utils import search # remap the input data (effectively deleting anything not listed) if self.remap: rv = search(self.remap, data, invalid=self.invalid) elif self.discard and isinstance(data, dict): rv = deepcopy(data) else: rv = {} # add the static fields if self.fields: eval_field(rv, self.fields, rv or data) # delete any discards self._discards(self.discard, rv) return rv
def get(self,**kwargs): result_id = kwargs.get('wiki_id') related_news = [] related_tweets = [] wiki_article = dict() news_articles = [] nearby = False; wiki_article_solr = get_item(wiki, result_id) if wiki_article_solr: wiki_article = wiki_article_solr query_terms = [] twitter_query = ""; keywords = wiki_article.get('keywords',[]) #TODO: For large documents, get keywords for random parts of the document only, to keep the kw list short enough # try: # keywords = get_keywords(wiki_article['wiki_body'][0]) # except: # keywords = get_keywords(wiki_article['wiki_body'][0].decode()) # logger.info(keywords) # if not application.config.get('INDEX_KEYWORD_GENERATION'): # keywords = extract_keywords(wiki_article['wiki_body'][0].encode('utf-8')).get('keywords') # logger.info(keywords) twitter_query = wiki_article["title"][0]; #since we are favoring precision over recall query_terms = ["\""+t+"\"" for t in keywords] query_terms = wiki_article['title'][0].split() query_term = "+".join(query_terms) if query_term: news_articles = search(news, query_term, defType="edismax", mm='2<-25%25' , ps=3, qf="title^20.0+keywords^20.0+body^2.0", pf="title^20.0+keywords^20.0+body^20.0")[0] related_tweets = search_twitter(twitter_query, nearby) ; related_tweets = [html_parser.unescape(tweet) for tweet in related_tweets] return dict(related_news=news_articles[:3], wiki_article=wiki_article, related_tweets=related_tweets), 200
def get_results(request): if request.is_ajax() and request.method == "POST": try: res = json.loads(request.body) print res cords = res['cords'] cords = cords[0:1] + cords[-1:] cords = [[x['d'], x['e']] for x in cords] temp = [] temp.append(cords[0][0]) temp.append(cords[0][1]) temp.append(cords[1][0]) temp.append(cords[1][1]) distance = res['distance'] start_place = res['start'] end_place = res['end'] radius = int(res['radius']) time = datetime.datetime.strptime(res['time'], "%m/%d/%Y %H:%M") matched_trips = search(cords, time, radius) results = [] for trip in matched_trips: res = {} res['id'] = trip.id res['startPlace'] = trip.start_place res['endPlace'] = trip.end_place res['startTime'] = datetime.datetime.strftime(trip.time, "%d %b, %H:%M") results.append(res) data = json.dumps({'cords': temp, 'start_place': start_place, 'end_place': end_place}) request.session['data'] = data request.session['results'] = results # print render(request, 'searchResults.html', {'results':results, 'data':data}) # return render(request, 'searchResults.html', {'results':results, 'data':data}) return HttpResponse() except: return HttpResponseBadRequest() else: return HttpResponseNotAllowed(['POST'])
def login(): new_user = {} # email new_user['email'] = input("Enter your email: ") while not utils.check(new_user['email'], 'email'): new_user['email'] = input("Enter your email: ") # password new_user['password'] = input("Enter your password: "******"Enter your password: ") found = search(dbHandler.users, new_user['email'], new_user['password']) if not found: print('Wrong email or password') return None print('Correct!') globals.current_user = found print(found)
def _find_packages_urls(release, architecture, package): url = f"https://launchpad.net/ubuntu/{release}/{architecture}/{package}" packages_versions = set( utils.findall( fr'"/ubuntu/.+?/{package}/(?P<version>.+?)(?:\.\d+)?"', url)) if not packages_versions: print(utils.make_warning(f"Problems: {utils.make_bright(url)}")) return [] n = 3 most_recent_packages_versions = sorted(packages_versions, reverse=True)[:n] packages_urls = [ utils.search( r"['\"](?P<url>https?.*?libc6.*?.deb)['\"]", f"https://launchpad.net/ubuntu/{release}/{architecture}/{package}/{package_filename}", ).group("url") for package_filename in most_recent_packages_versions ] if not packages_urls: print(utils.make_warning(f"Problems: {utils.make_bright(url)}")) return [] return packages_urls
def loan_request_cloud(sheet_id, show: bool = False): rate = RateValue(spreadsheet_id=sheet_id) request_value = RequestValue(spreadsheet_id=sheet_id) values = ConfigTable(spreadsheet_id=sheet_id) table_google = ResultTable(spreadsheet_id=sheet_id) irr = search(values.loan_amount, values.loan_terms, values.search_samples, values.client_min_rate, values.client_max_rate, values.probability_of_default, values.loss_given_default) a = Amortization(values.loan_amount, irr, values.loan_terms) table = a.get_enriched_table(values.probability_of_default, values.loss_given_default) rate.update(irr) request = "" if values.loan_marr < irr: request = "Approved" request_value.update(request) if show: print("Loan request for", values.loan_amount, "due in", values.loan_terms, "terms with", a.annuity, "fix payments and interest rate of", round(irr * 100, 2), "%:", request) print(table) else: request = "Rejected" request_value.update(request) if show: print("Loan request for", values.loan_amount, "due in", values.loan_terms, "terms with", a.annuity, "fix payments and interest rate of", round(irr * 100, 2), "%:", request) print(table) table = table.fillna("-") table_2 = [table.columns[:, ].values.astype(str).tolist() ] + table.values.tolist() table_google.update(table_2)
def data(request): """ Returns data for datatables """ user = request.user secho = int(request.POST.get('sEcho', 0)) + 1 return_dict = { "aaData": [], "iTotalRecords": 0, "iTotalDisplayRecords": 0, "sEcho": secho } query = None sort = None sort_reverse = False qfilter = '' basic_greenstone = True search_config = Config.get_or_create("search") if 'raw_query' in request.POST: query, sort_raw, query_greenstone = \ request.POST['raw_query'].split("||", 2) if sort_raw: sort = sort_raw[1:] sort_reverse = (sort_raw[0] == '\\') # make_advanced_query() doesn't handle greenstone filtering. # Instead, it leaves a placeholder that is replaced here. # This is to leave all the querying to the second request # (this one), and only lucene query building to the first. query = query.replace( GREENSTONE_NEWSEARCH_PLACEHOLDER, "(%s)" % (' OR '.join([ "urn:%s" % x['nodeID'] for x in greenstone_query("", "", query_greenstone) ]))) basic_greenstone = False elif 'filter' in request.POST: qfilter = sanitize_lucene(request.POST['filter']) qfilter = request.POST['filter'] reference = request.POST.get('reference', 0) filter_item_type = request.POST.get('item_type', None) query = make_query(search_config, qfilter, reference, filter_item_type) elif 'filtered' in request.POST: qfilter = sanitize_lucene(request.POST['filtered']) reference = request.POST.get('reference', 0) filter_item_type = request.POST.get('item_type', None) query = make_query(search_config, qfilter, reference, filter_item_type) if not query: print "search failed: query = %s" % query return HttpResponse(simplejson.dumps(return_dict),\ mimetype='application/javascript') show_config = {} # Result Display Configuration for item in search_config.values.values(): item_type = item['type'] fields = OrderedDict() for field in item['fields']: if field['show']: more = field.get('more', False) existence = field.get('exist', False) fields[field['field']] = (field['name'], more, existence) show_config[item_type] = fields try: docs = search('search/by_field', q=query, include_fields="022_a,020_a,urn,_id,existence", limit=133742) docs = list(docs) except RequestFailed: print "search failed: request failed" return HttpResponse(simplejson.dumps(return_dict),\ mimetype='application/javascript') db = get_db('couchflow') # group uniq docs by urn uniq_docs = {} if basic_greenstone: greenstone_urns = [ x['nodeID'] for x in greenstone_query("", "", qfilter) ] greenstone_docs = db.view("couchflow/by_urn", keys=greenstone_urns) for doc in greenstone_docs: urn = doc['key'] uniq_docs.setdefault(urn, { 'count': 0, 'id': None, "existences": [] }) uniq_docs[urn]['id'] = doc['id'] #uniq_docs[urn]['count'] += 1 uniq_docs[urn]['greenstone'] = True for doc in docs: try: urn = doc['fields']['urn'] except KeyError: urn = None if urn is None or urn == 'undefined': print "Item should have urn", doc['id'] continue # TODO: check if should be a list if type(urn) is list: urn = urn[0] uniq_docs.setdefault(urn, {'count': 0, 'id': None, "existences": []}) if doc['fields']['existence'] != "false": uniq_docs[urn]['existences'].append(doc['id']) uniq_docs[urn]['count'] += 1 else: uniq_docs[urn]['id'] = doc['id'] columns = [] start = int(request.POST['iDisplayStart']) length = int(request.POST['iDisplayLength']) #sort_col = int(request.POST['iSortCol_0']) #sort_dir = request.POST['sSortDir_0'] count = len([u for u in uniq_docs.values() if u["id"]]) sorted_uniq_docs = uniq_docs.values() if basic_greenstone: sorted_uniq_docs.sort(key=lambda x: 'greenstone' not in x) keys = [doc['id'] for doc in sorted_uniq_docs[start:start + length]] keys_exist = [doc['existences']\ for doc in sorted_uniq_docs[start:start+length]] keys_exist = [item for sublist in keys_exist for item in sublist] def _get_field(field, doc): subfield = "" if '_' in field: field, subfield = field.split('_') #field_value = doc.fields_properties.get(field, None) try: field_value = doc["fields_properties"][field] except KeyError: field_value = "" if field_value and len(field_value['list']): field_value = field_value['list'][0] if subfield: field_value = field_value['subfields'].get(subfield, "") if field_value and 'exec_value' in field_value and field_value[ 'exec_value']: exec_val = field_value['exec_value'][0] if not isinstance(exec_val, basestring): if exec_val == None: exec_val = "" exec_val = str(exec_val) return exec_val return "" # get existences existences = {} for doc in db.view('_all_docs', keys=keys_exist, include_docs=True): existences[doc["doc"]["_id"]] = doc["doc"] for doc in db.view('_all_docs', keys=keys, include_docs=True): #doc = WFItem.wrap(doc['doc']) if not "doc" in doc: continue doc = doc["doc"] show_item_config = show_config.get(doc['item_type'], None) if not show_item_config: print 'Search config missing for', doc['item_type'] continue try: img_name = doc['fields_properties']['5000']['list'][0][ 'exec_value'][0] except Exception, error: print 'Image not found', error img_name = 'none.png' img_path = "/couchflow/get_attach/couchflow/%s/%s" % (doc['_id'], img_name) row = [doc['_id'], '<img style="width:80px" src="%s"/>' % img_path] data = '' for field, (name, more, existence) in show_item_config.iteritems(): if existence: continue field_value = _get_field(field, doc) if not field_value: continue row_value = '%s: %s' % (name, field_value) row_value = row_value.replace('/', '').replace(',', '') more_class = ' class="search_more"' if more else '' data += '<div%s>%s</div>' % (more_class, row_value) doc_urn = get_urn(doc) if not doc_urn: print "Invalid Item, need a urn", doc["_id"] continue if not doc['reference']: data += 'Disponibles: %s<br>' % uniq_docs[doc_urn]['count'] if uniq_docs[doc_urn]["existences"]: data += "<br><h3 class='search_more'>Ejemplares</h3>" # Add Existences for e in uniq_docs[doc_urn]["existences"]: if existences.get(e, False): data += "<div id='%s' class='existence search_more'>" % e for field, (name, more, exist_conf) in\ show_item_config.iteritems(): if exist_conf: field_value = _get_field(field, existences[e]) if not field_value: field_value = "" row_value = '%s: %s' % (name, field_value) row_value = row_value.replace('/', '').replace(',', '') more_class = ' class="search_more"' if more else '' data += '<div%s>%s</div>' % (more_class, row_value) data += "</div>" row.append(data) row.append('') sort_value = None if sort: sort_value = _get_field(sort, doc) columns.append((sort_value, row))
def get_queryset(self): user = self.request.user return utils.search(self.request, BookMark, ['tags', 'name'])
def data(request): """ Returns data for datatables """ user = request.user secho = int(request.POST.get('sEcho', 0)) + 1 return_dict = {"aaData": [], "iTotalRecords":0, "iTotalDisplayRecords":0, "sEcho": secho} query = None sort = None sort_reverse = False qfilter = '' basic_greenstone = True search_config = Config.get_or_create("search") if 'raw_query' in request.POST: query, sort_raw, query_greenstone = \ request.POST['raw_query'].split("||", 2) if sort_raw: sort = sort_raw[1:] sort_reverse = (sort_raw[0] == '\\') # make_advanced_query() doesn't handle greenstone filtering. # Instead, it leaves a placeholder that is replaced here. # This is to leave all the querying to the second request # (this one), and only lucene query building to the first. query = query.replace(GREENSTONE_NEWSEARCH_PLACEHOLDER, "(%s)" % (' OR '.join(["urn:%s" % x['nodeID'] for x in greenstone_query("", "", query_greenstone)]))) basic_greenstone = False elif 'filter' in request.POST: qfilter = sanitize_lucene(request.POST['filter']) qfilter = request.POST['filter'] reference = request.POST.get('reference', 0) filter_item_type = request.POST.get('item_type', None) query = make_query(search_config, qfilter, reference, filter_item_type) elif 'filtered' in request.POST: qfilter = sanitize_lucene(request.POST['filtered']) reference = request.POST.get('reference', 0) filter_item_type = request.POST.get('item_type', None) query = make_query(search_config, qfilter, reference, filter_item_type) if not query: print "search failed: query = %s" % query return HttpResponse(simplejson.dumps(return_dict),\ mimetype='application/javascript') show_config = {} # Result Display Configuration for item in search_config.values.values(): item_type = item['type'] fields = OrderedDict() for field in item['fields']: if field['show']: more = field.get('more', False) existence = field.get('exist', False) fields[field['field']] = (field['name'], more, existence) show_config[item_type] = fields try: docs = search('search/by_field', q=query, include_fields="022_a,020_a,urn,_id,existence", limit=133742) docs = list(docs) except RequestFailed: print "search failed: request failed" return HttpResponse(simplejson.dumps(return_dict),\ mimetype='application/javascript') db = get_db('couchflow') # group uniq docs by urn uniq_docs = {} if basic_greenstone: greenstone_urns = [x['nodeID'] for x in greenstone_query("", "", qfilter)] greenstone_docs = db.view("couchflow/by_urn", keys=greenstone_urns) for doc in greenstone_docs: urn = doc['key'] uniq_docs.setdefault(urn, {'count':0, 'id': None, "existences": []}) uniq_docs[urn]['id'] = doc['id'] #uniq_docs[urn]['count'] += 1 uniq_docs[urn]['greenstone'] = True for doc in docs: try: urn = doc['fields']['urn'] except KeyError: urn = None if urn is None or urn == 'undefined': print "Item should have urn", doc['id'] continue # TODO: check if should be a list if type(urn) is list: urn = urn[0] uniq_docs.setdefault(urn, {'count':0, 'id': None, "existences": []}) if doc['fields']['existence'] != "false": uniq_docs[urn]['existences'].append(doc['id']) uniq_docs[urn]['count'] += 1 else: uniq_docs[urn]['id'] = doc['id'] columns = [] start = int(request.POST['iDisplayStart']) length = int(request.POST['iDisplayLength']) #sort_col = int(request.POST['iSortCol_0']) #sort_dir = request.POST['sSortDir_0'] count = len([u for u in uniq_docs.values() if u["id"]]) sorted_uniq_docs = uniq_docs.values() if basic_greenstone: sorted_uniq_docs.sort(key=lambda x: 'greenstone' not in x) keys = [doc['id'] for doc in sorted_uniq_docs[start:start+length]] keys_exist = [doc['existences']\ for doc in sorted_uniq_docs[start:start+length]] keys_exist = [item for sublist in keys_exist for item in sublist] def _get_field(field, doc): subfield = "" if '_' in field: field, subfield = field.split('_') #field_value = doc.fields_properties.get(field, None) try: field_value = doc["fields_properties"][field] except KeyError: field_value = "" if field_value and len(field_value['list']): field_value = field_value['list'][0] if subfield: field_value = field_value['subfields'].get(subfield, "") if field_value and 'exec_value' in field_value and field_value['exec_value']: exec_val = field_value['exec_value'][0] if not isinstance(exec_val, basestring): if exec_val == None: exec_val = "" exec_val = str(exec_val) return exec_val return "" # get existences existences = {} for doc in db.view('_all_docs', keys=keys_exist, include_docs=True): existences[doc["doc"]["_id"]] = doc["doc"] for doc in db.view('_all_docs', keys=keys, include_docs=True): #doc = WFItem.wrap(doc['doc']) if not "doc" in doc: continue doc = doc["doc"] show_item_config = show_config.get(doc['item_type'], None) if not show_item_config: print 'Search config missing for', doc['item_type'] continue try: img_name = doc['fields_properties']['5000']['list'][0]['exec_value'][0] except Exception, error: print 'Image not found', error img_name = 'none.png' img_path = "/couchflow/get_attach/couchflow/%s/%s" % (doc['_id'], img_name) row = [doc['_id'], '<img style="width:80px" src="%s"/>' % img_path] data = '' for field, (name, more, existence) in show_item_config.iteritems(): if existence: continue field_value = _get_field(field, doc) if not field_value: continue row_value = '%s: %s' % (name, field_value) row_value = row_value.replace('/', '').replace(',', '') more_class = ' class="search_more"' if more else '' data += '<div%s>%s</div>' % (more_class, row_value) doc_urn = get_urn(doc) if not doc_urn: print "Invalid Item, need a urn", doc["_id"] continue if not doc['reference']: data += 'Disponibles: %s<br>' % uniq_docs[doc_urn]['count'] if uniq_docs[doc_urn]["existences"]: data += "<br><h3 class='search_more'>Ejemplares</h3>" # Add Existences for e in uniq_docs[doc_urn]["existences"]: if existences.get(e, False): data += "<div id='%s' class='existence search_more'>" % e for field, (name, more, exist_conf) in\ show_item_config.iteritems(): if exist_conf: field_value = _get_field(field, existences[e]) if not field_value: field_value = "" row_value = '%s: %s' % (name, field_value) row_value = row_value.replace('/', '').replace(',', '') more_class = ' class="search_more"' if more else '' data += '<div%s>%s</div>' % (more_class, row_value) data += "</div>" row.append(data) row.append('') sort_value = None if sort: sort_value = _get_field(sort, doc) columns.append((sort_value, row))
def reference_complete(request): """ Returns data for reference complete """ query = request.GET.get("term") qfilter = sanitize_lucene(query) search_config = Config.get_or_create("search") # autoridades query = make_query(search_config, qfilter, True, "05a721a33096563ec44d8da885fa1a30") show_config = {} result = [] # Result Display Configuration for item in search_config.values.values(): item_type = item['type'] fields = OrderedDict() for field in item['fields']: if field['show']: more = field.get('more', False) fields[field['field']] = (field['name'], more) show_config[item_type] = fields try: docs = search('search/by_field', q=query, include_fields="022_a,020_a,urn,_id", limit=133742) docs = list(docs) except RequestFailed: print "Fail!" print "QQ", query # group uniq docs by urn uniq_docs = {} for doc in docs: try: urn = doc['fields']['urn'] except KeyError: print "Item should have urn" continue # TODO: check if should be a list if type(urn) is list: urn = urn[0] uniq_docs.setdefault(urn, {'count':0, 'id': None}) uniq_docs[urn]['id'] = doc['id'] uniq_docs[urn]['count'] += 1 db = get_db('couchflow') keys = [doc['id'] for doc in uniq_docs.values()] def _get_field(field, doc): subfield = "" if '_' in field: field, subfield = field.split('_') #field_value = doc.fields_properties.get(field, None) try: field_value = doc["fields_properties"][field] except KeyError: field_value = "" if field_value and len(field_value['list']): field_value = field_value['list'][0] if subfield: field_value = field_value['subfields'].get(subfield, "") if field_value and 'exec_value' in field_value and field_value['exec_value']: exec_val = field_value['exec_value'][0] if not isinstance(exec_val, basestring): if exec_val == None: exec_val = "" exec_val = str(exec_val) return exec_val return "" for doc in db.view('_all_docs', keys=keys, include_docs=True): #doc = WFItem.wrap(doc['doc']) doc = doc["doc"] show_item_config = show_config.get(doc['item_type'], None) if not show_item_config: print 'Unknown', doc['item_type'] continue field = _get_field('700_a', doc) result.append({'label': field}) #data = '' #for field, (name, more) in show_item_config.iteritems(): # row_value = '%s: %s' % (name, _get_field(field, doc)) # row_value = row_value.replace('/', '').replace(',', '') # data += '<div>%s</div>' % row_value #doc_urn = get_urn(doc) #if not doc_urn: # print "Invalid Item, need a urn", doc["_id"] # continue #result.append(data) return HttpResponse(simplejson.dumps(result),\ mimetype='application/javascript')
def get(self,**kwargs): parser.add_argument('q', type=str) parser.add_argument('page', type=int) parser.add_argument('rows', type=int) args = parser.parse_args() qt = args.get('q') if not qt: qt = '*:*' rows = args.get('rows') if not rows: rows = 10 page = args.get('page') if not page: page = 1 query_terms = qt.split() query_term = query_terms[0] if len(query_terms)>1: query_term = "+".join(query_terms) search_results = None search_results_ = search(wiki, query_term, page=page, rows=rows, defType="edismax", mm=2, ps=3, qf="title^20.0+body^10.0", pf="title^20.0+body^20.0") num_results = 0; error_message = "No results found for your search!" if search_results_: search_results = search_results_[0] result_snippets = search_results_[1] has_next = search_results_[2] > 1 has_previous = (page - 1) > 0 num_results = search_results_[3] for res in search_results: _id = res['id'] if _id in result_snippets: try: res['wiki_body'] = ("...").join(result_snippets[_id].get('wiki_body', [])) except: del search_results[_id] else: del search_results[_id] params = urllib.urlencode({'q': qt, 'wt': "json", 'indent' : "true" }) did_you_mean = urllib.urlopen("http://localhost:8983/solr/wikiArticleCollection/spell?%s" % params) did_you_mean_object = json.load(did_you_mean) did_you_mean_words = []; try: did_you_mean_words = []; for word in did_you_mean_object["spellcheck"]["suggestions"][1]["suggestion"]: did_you_mean_words.append(word["word"]); except: did_you_mean_words = []; if search_results: if len(search_results)>0: error_message = "" suggested_terms = did_you_mean_words logger.info('returning %s results' % num_results) return dict(search_results=search_results, error_message=error_message, query_term=qt, current_page=page, num_results=num_results, suggested_terms=suggested_terms), 200
def reference_complete(request): """ Returns data for reference complete """ query = request.GET.get("term") qfilter = sanitize_lucene(query) search_config = Config.get_or_create("search") # autoridades query = make_query(search_config, qfilter, True, "05a721a33096563ec44d8da885fa1a30") show_config = {} result = [] # Result Display Configuration for item in search_config.values.values(): item_type = item['type'] fields = OrderedDict() for field in item['fields']: if field['show']: more = field.get('more', False) fields[field['field']] = (field['name'], more) show_config[item_type] = fields try: docs = search('search/by_field', q=query, include_fields="022_a,020_a,urn,_id", limit=133742) docs = list(docs) except RequestFailed: print "Fail!" print "QQ", query # group uniq docs by urn uniq_docs = {} for doc in docs: try: urn = doc['fields']['urn'] except KeyError: print "Item should have urn" continue # TODO: check if should be a list if type(urn) is list: urn = urn[0] uniq_docs.setdefault(urn, {'count': 0, 'id': None}) uniq_docs[urn]['id'] = doc['id'] uniq_docs[urn]['count'] += 1 db = get_db('couchflow') keys = [doc['id'] for doc in uniq_docs.values()] def _get_field(field, doc): subfield = "" if '_' in field: field, subfield = field.split('_') #field_value = doc.fields_properties.get(field, None) try: field_value = doc["fields_properties"][field] except KeyError: field_value = "" if field_value and len(field_value['list']): field_value = field_value['list'][0] if subfield: field_value = field_value['subfields'].get(subfield, "") if field_value and 'exec_value' in field_value and field_value[ 'exec_value']: exec_val = field_value['exec_value'][0] if not isinstance(exec_val, basestring): if exec_val == None: exec_val = "" exec_val = str(exec_val) return exec_val return "" for doc in db.view('_all_docs', keys=keys, include_docs=True): #doc = WFItem.wrap(doc['doc']) doc = doc["doc"] show_item_config = show_config.get(doc['item_type'], None) if not show_item_config: print 'Unknown', doc['item_type'] continue field = _get_field('700_a', doc) result.append({'label': field}) #data = '' #for field, (name, more) in show_item_config.iteritems(): # row_value = '%s: %s' % (name, _get_field(field, doc)) # row_value = row_value.replace('/', '').replace(',', '') # data += '<div>%s</div>' % row_value #doc_urn = get_urn(doc) #if not doc_urn: # print "Invalid Item, need a urn", doc["_id"] # continue #result.append(data) return HttpResponse(simplejson.dumps(result),\ mimetype='application/javascript')
from utils import ( youtube_authenticate, get_video_details, print_video_infos, search ) if __name__ == "__main__": # authenticate to YouTube API youtube = youtube_authenticate() # search for the query 'python' and retrieve 2 items only response = search(youtube, q="python", maxResults=2) items = response.get("items") for item in items: # get the video ID video_id = item["id"]["videoId"] # get the video details video_response = get_video_details(youtube, id=video_id) # print the video details print_video_infos(video_response) print("="*50)
def get_queryset(self): user = self.request.user return utils.search(self.request, BookMark, ['tags','name'])