def character_search(**args): info = to_dict(args) for field in ['modulus', 'conductor', 'order']: info[field] = info.get(field, '') query = {} print "args = ", args if 'natural' in args: label = info.get('natural', '') try: modulus = int(str(label).partition('.')[0]) number = int(str(label).partition('.')[2]) except ValueError: return "<span style='color:red;'>ERROR: bad query</span>" return redirect(url_for("render_webpage_label", modulus=modulus, number=number)) else: for field in ['modulus', 'conductor', 'order']: if info.get(field): query[field] = parse_range(info[field]) info["bread"] = [('Dirichlet Characters', url_for("render_Character")), ('search results', ' ')] info['credit'] = 'Sage' if (len(query) != 0): from sage.modular.dirichlet import DirichletGroup info['contents'] = charactertable(query) info['title'] = 'Dirichlet Characters' return render_template("dirichlet_characters/character_search.html", **info) else: return "<span style='color:red;'>ERROR: bad query</span>"
def character_search(**args): info = to_dict(args) for field in ['modulus', 'conductor', 'order']: info[field] = info.get(field, '') query = {} print "args = ", args if 'natural' in args: label = info.get('natural', '') try: modulus = int(str(label).partition('.')[0]) number = int(str(label).partition('.')[2]) except ValueError: return "<span style='color:red;'>ERROR: bad query</span>" return redirect( url_for("render_webpage_label", modulus=modulus, number=number)) else: for field in ['modulus', 'conductor', 'order']: if info.get(field): query[field] = parse_range(info[field]) info["bread"] = [('Dirichlet Characters', url_for("render_Character")), ('search results', ' ')] info['credit'] = 'Sage' if (len(query) != 0): from sage.modular.dirichlet import DirichletGroup info['contents'] = charactertable(query) info['title'] = 'Dirichlet Characters' return render_template( "dirichlet_characters/character_search.html", **info) else: return "<span style='color:red;'>ERROR: bad query</span>"
def character_search(**args): #import base info = to_dict(args) query = {} print args if 'natural' in args: label = info.get('natural', '') modulus = int(str(label).partition('.')[0]) number = int(str(label).partition('.')[2]) return redirect( url_for("render_webpage_label", modulus=modulus, number=number)) else: for field in ['modulus', 'conductor', 'order']: if info.get(field): query[field] = parse_range(info[field]) info["bread"] = [('Dirichlet Characters', url_for("render_Character")), ('search results', ' ')] info['credit'] = 'Sage' if (len(query) != 0): count_default = 100 if info.get('count'): try: count = int(info['count']) except: count = count_default else: info['count'] = count_default count = count_default start_default = 0 if info.get('start'): try: print "Check" start = int(info['start']) if (start < 0): print "Check1" start += (1 - (start + 1) / count) * count except: print "Check2" start = start_default else: start = start_default from sage.modular.dirichlet import DirichletGroup t, texname, number, length = charactertable(query) info['characters'] = t info['texname'] = texname info['number'] = number info['len'] = length if (start >= length): print "Check3" start -= (1 + (start - length) / count) * count if (start < 0): print "Check4" start = 0 info['start'] = start info['title'] = 'Dirichlet Characters' print start, count, length return render_template( "dirichlet_characters/character_search.html", **info)
def get_imgs(url, title=None, customWidget=None): url = clean_url(url) if 's=view' in url and 'page=favorites' not in url: raise NotImplementedError('Not Implemented') if 'page=dapi' not in url.lower(): tags = get_tags(url).replace(' ', '+') url = "https://gelbooru.com/index.php?page=dapi&s=post&q=index&tags={}&pid={}&limit={}".format( tags, 0, LIMIT) if customWidget is not None: print_ = customWidget.print_ else: def print_(*values): sys.stdout.writelines(values + ('\n', )) # Range if customWidget is not None: range_pid = customWidget.range else: range_pid = None if range_pid is not None: max_pid = max(parse_range(range_pid, max=100000)) else: max_pid = 2000 imgs = [] url_imgs = set() for p in range(100): url = setPage(url, p) #print_(url) html = downloader.read_html(url) soup = BeautifulSoup(html, 'html.parser') posts = soup.findAll('post') if not posts: break for post in posts: url_img = post.attrs['file_url'] if url_img not in url_imgs: url_imgs.add(url_img) id = post.attrs['id'] img = Image(id, url_img) imgs.append(img) if len(imgs) >= max_pid: break if len(imgs) >= max_pid: break if customWidget is not None and not customWidget.alive: break if customWidget is not None: customWidget.exec_queue.put( (customWidget, u"customWidget.setTitle(u'{} {} - {}')".format( tr_(u'읽는 중...'), title, len(imgs)))) return imgs
def character_search(**args): # import base info = to_dict(args) query = {} print args if "natural" in args: label = info.get("natural", "") modulus = int(str(label).partition(".")[0]) number = int(str(label).partition(".")[2]) return redirect(url_for("render_webpage_label", modulus=modulus, number=number)) else: for field in ["modulus", "conductor", "order"]: if info.get(field): query[field] = parse_range(info[field]) info["bread"] = [("Dirichlet Characters", url_for("render_Character")), ("search results", " ")] info["credit"] = "Sage" if len(query) != 0: count_default = 100 if info.get("count"): try: count = int(info["count"]) except: count = count_default else: info["count"] = count_default count = count_default start_default = 0 if info.get("start"): try: print "Check" start = int(info["start"]) if start < 0: print "Check1" start += (1 - (start + 1) / count) * count except: print "Check2" start = start_default else: start = start_default from sage.modular.dirichlet import DirichletGroup t, texname, number, length = charactertable(query) info["characters"] = t info["texname"] = texname info["number"] = number info["len"] = length if start >= length: print "Check3" start -= (1 + (start - length) / count) * count if start < 0: print "Check4" start = 0 info["start"] = start info["title"] = "Dirichlet Characters" print start, count, length return render_template("dirichlet_characters/character_search.html", **info)
def get_plot_functions(self): range_t = utils.parse_range(self.data.get('range')) t_min = range_t.get('min') t_max = range_t.get('max') t_interval = range_t.get('interval') function_x = utils.parse_curve_function(self.data.get('function_x')) function_y = utils.parse_curve_function(self.data.get('function_y')) xs = [] ys = [] for i in np.arange(t_min, t_max, t_interval): # this t is need for parametric curves t = float(i) x = eval(function_x) + self.translation[0] y = eval(function_y) + self.translation[1] xs.append(x) ys.append(y) return xs, ys
def resize_sample_folder(sample_folder, output_folder, skip_layers, ratio=2, invert=True): section_folders = sorted(glob.glob(os.path.join(sample_folder, '*'))) layer = 0 skipped_layers = utils.parse_range(skip_layers) for section_folder in section_folders: if os.path.isdir(section_folder): layer += 1 if layer in skipped_layers: print("Skipping layer {}".format(layer)) continue print("Resizing section_folder:{}".format(section_folder)) section_folder_name = section_folder.split(os.path.sep)[-1] output_section_folder = os.path.join(output_folder, section_folder_name) resize_section_folder(section_folder, output_section_folder, ratio=ratio, invert=invert)
#after_bbox_dir = os.path.join(args.workspace_dir, "after_bbox") #create_dir(after_bbox_dir) sifts_dir = os.path.join(args.workspace_dir, "sifts") create_dir(sifts_dir) matched_sifts_dir = os.path.join(args.workspace_dir, "matched_sifts") create_dir(matched_sifts_dir) after_ransac_dir = os.path.join(args.workspace_dir, "after_ransac") create_dir(after_ransac_dir) all_layers = [] layer_to_sifts = {} layer_to_ts_json = {} layer_to_json_prefix = {} layer_meshes_dir = {} skipped_layers = parse_range(args.skip_layers) bbox_suffix = "_bbox" for tiles_fname in glob.glob(os.path.join(args.input_dir, '*.json')): tiles_fname_prefix = os.path.splitext(os.path.basename(tiles_fname))[0] # read the layer from the file layer = read_layer_from_file(tiles_fname) if args.from_layer != -1: if layer < args.from_layer: continue if args.to_layer != -1: if layer > args.to_layer: continue
def hilbert_modular_form_search(**args): C = getDBConnection() C.hmfs.forms.ensure_index([('level_norm', pymongo.ASCENDING), ('label', pymongo.ASCENDING)]) info = to_dict(args) # what has been entered in the search boxes if 'label' in info: args = {'label': info['label']} return render_hmf_webpage(**args) query = {} for field in ['field_label', 'weight', 'level_norm', 'dimension']: if info.get(field): if field == 'weight': try: parallelweight = int(info[field]) query['parallel_weight'] = parallelweight except: query[field] = str(parse_list(info[field])) elif field == 'field_label': query[field] = parse_field_string(info[field]) elif field == 'label': query[field] = info[field] elif field == 'dimension': query[field] = parse_range(str(info[field])) elif field == 'level_norm': query[field] = parse_range(info[field]) else: query[field] = info[field] if info.get('count'): try: count = int(info['count']) except: count = 100 else: info['count'] = 100 count = 100 info['query'] = dict(query) res = C.hmfs.forms.find( query).sort([('level_norm', pymongo.ASCENDING), ('label', pymongo.ASCENDING)]).limit(count) nres = res.count() if nres > 0: info['field_pretty_name'] = field_pretty(res[0]['field_label']) else: info['field_pretty_name'] = '' info['number'] = nres if nres == 1: info['report'] = 'unique match' else: if nres > count: info['report'] = 'displaying first %s of %s matches' % (count, nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['field_label'] = v['field_label'] v_clean['short_label'] = v['short_label'] v_clean['label'] = v['label'] v_clean['level_ideal'] = teXify_pol(v['level_ideal']) v_clean['dimension'] = v['dimension'] res_clean.append(v_clean) info['forms'] = res_clean t = 'Hilbert Modular Form search results' bread = [('Hilbert Modular Forms', url_for("hilbert_modular_form_render_webpage")), ( 'Search results', ' ')] properties = [] return render_template("hilbert_modular_form/hilbert_modular_form_search.html", info=info, title=t, properties=properties, bread=bread)
def elliptic_curve_search(**args): info = to_dict(args) query = {} if 'jump' in args: label = info.get('label', '') m = cremona_label_regex.match(label) if m: N, iso, number = cremona_label_regex.match(label).groups() if number: return render_curve_webpage_by_label(label=label) else: return render_isogeny_class(str(N) + iso) else: query['label'] = label for field in ['conductor', 'torsion', 'rank']: if info.get(field): query[field] = parse_range(info[field]) #if info.get('iso'): #query['isogeny'] = parse_range(info['isogeny'], str) if 'optimal' in info and info['optimal'] == 'on': query['number'] = 1 info['query'] = query count_default = 100 if info.get('count'): try: count = int(info['count']) except: count = count_default else: info['count'] = count_default count = count_default start_default = 0 if info.get('start'): try: start = int(info['start']) if (start < 0): start += (1 - (start + 1) / count) * count except: start = start_default else: start = start_default cursor = base.getDBConnection().ellcurves.curves.find(query) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([('conductor', ASCENDING), ('iso', ASCENDING), ('number', ASCENDING)]).skip(start).limit(count) info['curves'] = res info['format_ainvs'] = format_ainvs info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres credit = 'John Cremona' t = 'Elliptic Curves' bread = [('Elliptic Curves', url_for("rational_elliptic_curves")), ('Search Results', '.')] return render_template("elliptic_curve/elliptic_curve_search.html", info=info, credit=credit, bread=bread, title=t)
def number_field_search(**args): info = to_dict(args) if 'natural' in info: field_id = info['natural'] field_id = parse_field_string(info['natural']) return render_field_webpage({'label' : field_id}) query = {} for field in ['degree', 'signature', 'discriminant', 'class_number', 'class_group', 'galois_group']: if info.get(field): if field in ['class_group', 'signature']: query[field] = parse_list(info[field]) else: if field == 'galois_group': query[field] = complete_group_code(info[field]) else: ran = info[field] ran = ran.replace('..','-') query[field] = parse_range(ran) if info.get('ur_primes'): ur_primes = [int(a) for a in str(info['ur_primes']).split(',')] else: ur_primes = [] if info.get('count'): try: count = int(info['count']) except: count = 10 else: info['count'] = 10 count = 10 if info.get('start'): try: start = int(info['start']) if(start < 0): start += (1-(start+1)/count)*count except: start = 0 else: start = 0 info['query'] = dict(query) if 'lucky' in args: import base C = base.getDBConnection() one = C.numberfields.fields.find_one(query) if one: label = one['label'] return render_field_webpage({'label': label}) if 'discriminant' in query: import base C = base.getDBConnection() res = C.numberfields.fields.find(query).sort([('degree',pymongo.ASCENDING),('signature',pymongo.DESCENDING),('discriminant',pymongo.ASCENDING)]) # TODO: pages nres = res.count() else: # find matches with negative discriminant: neg_query = dict(query) neg_query['discriminant'] = {'$lt':0} import base C = base.getDBConnection() res_neg = C.numberfields.fields.find(neg_query).sort([('degree',pymongo.ASCENDING),('discriminant',pymongo.DESCENDING)]) nres_neg = res_neg.count() # TODO: pages # find matches with positive discriminant: pos_query = dict(query) pos_query['discriminant'] = {'$gt':0} import base C = base.getDBConnection() res_pos = C.numberfields.fields.find(pos_query).sort([('degree',pymongo.ASCENDING),('discriminant',pymongo.ASCENDING)]) nres_pos = res_pos.count() # TODO: pages res = merge_sort(iter(res_neg),iter(res_pos)) nres = nres_pos+nres_neg if ur_primes: res = filter_ur_primes(res, ur_primes) if(start>=nres): start-=(1+(start-nres)/count)*count if(start<0): start=0 res = iter_limit(res,count,start) info['fields'] = res info['number'] = nres info['start'] = start if nres==1: info['report'] = 'unique match' else: if nres>count or start!=0: info['report'] = 'displaying matches %s-%s of %s'%(start+1,min(nres,start+count),nres) else: info['report'] = 'displaying all %s matches'%nres info['format_coeffs'] = format_coeffs info['learnmore'] = [('Number Field labels', url_for("render_labels_page")), ('Galois group labels',url_for("render_groups_page")), ('Discriminant ranges',url_for("render_discriminants_page"))] t = 'Number Field search results' bread = [('Number Fields', url_for("number_field_render_webpage")),('Search results',' ')] properties = [] return render_template("number_field/number_field_search.html", info = info, title=t, properties=properties, bread=bread)
def elliptic_curve_search(**args): info = to_dict(args) query = {} if 'jump' in args: label = info.get('label', '') m = cremona_label_regex.match(label) if m: N, iso, number = cremona_label_regex.match(label).groups() if number: return render_curve_webpage_by_label(label=label) else: return render_isogeny_class(str(N)+iso) else: query['label'] = label for field in ['conductor', 'torsion', 'rank']: if info.get(field): query[field] = parse_range(info[field]) #if info.get('iso'): #query['isogeny'] = parse_range(info['isogeny'], str) if 'optimal' in info and info['optimal']=='on': query['number'] = 1 info['query'] = query count_default=100 if info.get('count'): try: count = int(info['count']) except: count = count_default else: info['count'] = count_default count = count_default start_default=0 if info.get('start'): try: start = int(info['start']) if(start < 0): start += (1-(start+1)/count)*count except: start = start_default else: start = start_default cursor = base.getDBConnection().ellcurves.curves.find(query) nres = cursor.count() if(start>=nres): start-=(1+(start-nres)/count)*count if(start<0): start=0 res = cursor.sort([('conductor', ASCENDING), ('iso', ASCENDING), ('number', ASCENDING)]).skip(start).limit(count) info['curves'] = res info['format_ainvs'] = format_ainvs info['number'] = nres info['start'] = start if nres==1: info['report'] = 'unique match' else: if nres>count or start!=0: info['report'] = 'displaying matches %s-%s of %s'%(start+1,min(nres,start+count),nres) else: info['report'] = 'displaying all %s matches'%nres credit = 'John Cremona' t = 'Elliptic Curves' bread = [('Elliptic Curves', url_for("rational_elliptic_curves")), ('Search Results', '.')] return render_template("elliptic_curve/elliptic_curve_search.html", info = info, credit=credit,bread=bread, title = t)
def get_imgs(url, title=None, customWidget=None): if 's=view' in url and 'page=favorites' not in url: raise NotImplementedError('Not Implemented') if customWidget is not None: print_ = customWidget.print_ else: def print_(*values): sys.stdout.writelines(values + ('\n', )) # Range if customWidget is not None: range_pid = customWidget.range else: range_pid = None if range_pid is not None: max_pid = max(parse_range(range_pid, max=100000)) else: max_pid = 2000 imgs = [] pid = 0 url_imgs = set() while pid < max_pid: url = setPage(url, pid) print_(url) html = downloader.read_html(url) soup = BeautifulSoup(html, 'html.parser') articles = soup.findAll('div', {'class': 'thumbnail-preview'}) + soup.findAll( 'span', {'class': 'thumb'}) if not articles: break for article in articles: try: url_img = article.span.a.attrs['href'] except: url_img = article.a.attrs['href'] if not url_img.startswith('http'): url_img = urljoin('https://gelbooru.com', url_img) parsed_url = urlparse(url_img) qs = parse_qs(parsed_url.query) id = qs['id'][0] print url_img if url_img not in url_imgs: url_imgs.add(url_img) img = Image(id, url_img) imgs.append(img) pid += 1 if pid >= max_pid: break if customWidget is not None and not customWidget.alive: break pids = [ int(pid_.replace('pid=', '')) for pid_ in re.findall('pid=[0-9]+', html) ] pids_larger = [pid_ for pid_ in pids if pid_ >= pid] if pids_larger: pid = min(pids_larger) else: break if customWidget is not None: customWidget.exec_queue.put( (customWidget, u"customWidget.setTitle(u'{} {} - {}')".format( tr_(u'읽는 중...'), title, pid))) return imgs
def database_query(db_name, coll_name): if not is_safe(db_name) or not is_safe(coll_name): return "Nope." C = base.getDBConnection() if db_name not in C.database_names(): return "No such database." db = getattr(C, db_name) if coll_name not in db.collection_names(): return "No such collection." args = to_dict(request.args) info = dict(args) collection = getattr(db, coll_name) collection.ensure_index('metadata', background=True) metadata = collection.find_one({'metadata': 'metadata'}) if metadata: del metadata['_id'] info['metadata'] = json.dumps(metadata, sort_keys=True, indent=4) else: info['metadata'] = "No metadata." indices = set() for name, index in collection.index_information().items(): key = index['key'][0][0] if key == '_id': continue indices.add(key) try: indices.remove('metadata') except ValueError: pass if args.get('_fields'): info['default_fields'] = args['_fields'].split(',') else: # TODO: pull from metadata info['default_fields'] = list(indices) try: limit = int(args.pop('_limit')) except (TypeError, KeyError): info['_limit'] = limit = 100 if '_search' in args: query = {} for key, value in args.items(): if key[0] == '_': continue try: query[key] = parse_range(value, int) except (TypeError, ValueError): try: query[key] = parse_range(value, float) except (TypeError, ValueError): query[key] = parse_range(value, str) res = collection.find(query).limit(limit) else: res = None # TODO: is there a better way to do [this url] + "&format=..."? non_format_args = to_dict(request.args) if '_format' in non_format_args: del non_format_args['_format'] info['formats'] = [(format, url_for('database_query', db_name=db_name, coll_name=coll_name, _format=format, **non_format_args)) for format in ('text', 'csv', 'json')] format = args.get('_format', 'html') if format in ('txt', 'text'): info['sep'] = ' ' elif format == 'csv': info['sep'] = ',' elif format == 'json': res = json_iter(res) info['default_fields'] = ['all'] info['sep'] = '' else: title = "%s.%s" % (db_name, coll_name) return render_template("raw/query.html", db=db_name, coll=coll_name, info=info, indices=indices, res=res, title = title) # not html response = make_response(render_template("raw/query_download.html", db=db_name, coll=coll_name, info=info, indices=indices, res=res)) response.headers['Content-type'] = 'text/plain' return response
def hilbert_modular_form_search(**args): C = getDBConnection() C.hmfs.forms.ensure_index([('level_norm', pymongo.ASCENDING), ('label', pymongo.ASCENDING)]) info = to_dict(args) # what has been entered in the search boxes if 'label' in info: args = {'label': info['label']} return render_hmf_webpage(**args) query = {} for field in ['field_label', 'weight', 'level_norm', 'dimension']: if info.get(field): if field == 'weight': try: parallelweight = int(info[field]) query['parallel_weight'] = parallelweight except: query[field] = str(parse_list(info[field])) elif field == 'field_label': query[field] = parse_field_string(info[field]) elif field == 'label': query[field] = info[field] elif field == 'dimension': query[field] = parse_range(str(info[field])) elif field == 'level_norm': query[field] = parse_range(info[field]) else: query[field] = info[field] if info.get('count'): try: count = int(info['count']) except: count = 100 else: info['count'] = 100 count = 100 info['query'] = dict(query) res = C.hmfs.forms.find(query).sort([('level_norm', pymongo.ASCENDING), ('label', pymongo.ASCENDING) ]).limit(count) nres = res.count() if nres > 0: info['field_pretty_name'] = field_pretty(res[0]['field_label']) else: info['field_pretty_name'] = '' info['number'] = nres if nres == 1: info['report'] = 'unique match' else: if nres > count: info['report'] = 'displaying first %s of %s matches' % (count, nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['field_label'] = v['field_label'] v_clean['short_label'] = v['short_label'] v_clean['label'] = v['label'] v_clean['level_ideal'] = teXify_pol(v['level_ideal']) v_clean['dimension'] = v['dimension'] res_clean.append(v_clean) info['forms'] = res_clean t = 'Hilbert Modular Form search results' bread = [('Hilbert Modular Forms', url_for("hilbert_modular_form_render_webpage")), ('Search results', ' ')] properties = [] return render_template( "hilbert_modular_form/hilbert_modular_form_search.html", info=info, title=t, properties=properties, bread=bread)
def number_field_search(**args): info = to_dict(args) if 'natural' in info: field_id = info['natural'] field_id = parse_field_string(info['natural']) return render_field_webpage({'label': field_id}) query = {} for field in [ 'degree', 'signature', 'discriminant', 'class_number', 'class_group', 'galois_group' ]: if info.get(field): if field in ['class_group', 'signature']: query[field] = parse_list(info[field]) else: if field == 'galois_group': query[field] = complete_group_code(info[field]) else: ran = info[field] ran = ran.replace('..', '-') query[field] = parse_range(ran) if info.get('ur_primes'): ur_primes = [int(a) for a in str(info['ur_primes']).split(',')] else: ur_primes = [] if info.get('count'): try: count = int(info['count']) except: count = 10 else: info['count'] = 10 count = 10 if info.get('start'): try: start = int(info['start']) if (start < 0): start += (1 - (start + 1) / count) * count except: start = 0 else: start = 0 info['query'] = dict(query) if 'lucky' in args: import base C = base.getDBConnection() one = C.numberfields.fields.find_one(query) if one: label = one['label'] return render_field_webpage({'label': label}) if 'discriminant' in query: import base C = base.getDBConnection() res = C.numberfields.fields.find(query).sort([ ('degree', pymongo.ASCENDING), ('signature', pymongo.DESCENDING), ('discriminant', pymongo.ASCENDING) ]) # TODO: pages nres = res.count() else: # find matches with negative discriminant: neg_query = dict(query) neg_query['discriminant'] = {'$lt': 0} import base C = base.getDBConnection() res_neg = C.numberfields.fields.find(neg_query).sort([ ('degree', pymongo.ASCENDING), ('discriminant', pymongo.DESCENDING) ]) nres_neg = res_neg.count() # TODO: pages # find matches with positive discriminant: pos_query = dict(query) pos_query['discriminant'] = {'$gt': 0} import base C = base.getDBConnection() res_pos = C.numberfields.fields.find(pos_query).sort([ ('degree', pymongo.ASCENDING), ('discriminant', pymongo.ASCENDING) ]) nres_pos = res_pos.count() # TODO: pages res = merge_sort(iter(res_neg), iter(res_pos)) nres = nres_pos + nres_neg if ur_primes: res = filter_ur_primes(res, ur_primes) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = iter_limit(res, count, start) info['fields'] = res info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres info['format_coeffs'] = format_coeffs info['learnmore'] = [ ('Number Field labels', url_for("render_labels_page")), ('Galois group labels', url_for("render_groups_page")), ('Discriminant ranges', url_for("render_discriminants_page")) ] t = 'Number Field search results' bread = [('Number Fields', url_for("number_field_render_webpage")), ('Search results', ' ')] properties = [] return render_template("number_field/number_field_search.html", info=info, title=t, properties=properties, bread=bread)
sample_dir, skip_layers, ratio=resize_ratio, invert=True) else: sample_dir = sample_dir_raw # Step 2: Import tile specifications from raw sample images. parse_sample(sample_dir, spec_dir, 'cc_BJ') # Step 3: Compute and match sift features, optimize the 2d montage. # Get all input json files (one per section) into a dictionary # {json_fname -> [filtered json fname, sift features file, etc.]} json_files = dict( (jf, {}) for jf in (glob.glob(os.path.join(spec_dir, '*.json')))) skipped_layers = parse_range(skip_layers) all_layers = [] layers_data = {} fixed_tile = 0 render_first = True for f in sorted(json_files.keys()): tiles_fname_prefix = os.path.splitext( os.path.basename(f))[0] # cc_Sec001 cur_tilespec = load_tile_specifications(f) # content in cc_Sec001.json # read the layer from the file layer = None for tile in cur_tilespec: if tile['layer'] is None: print("Error reading layer in one of the tiles in: {0}".format( f))