def init_g2c_stats(self): if self._stats: return g2c_logger.debug("Computing genus 2 curve stats...") counts = self._counts total = counts["ncurves"] stats = {} dists = [] for attr in stats_attribute_list: values = g2cdb().curves.distinct(attr['name']) values.sort() vcounts = [] rows = [] colcount = 0 avg = 0 for value in values: n = g2cdb().curves.find({attr['name']:value}).count() prop = format_percentage(n,total) if 'avg' in attr and attr['avg']: avg += n*value value_string = attr['format'](value) if 'format' in attr else value vcounts.append({'value': value_string, 'curves': n, 'query':url_for(".index_Q")+'?'+attr['name']+'='+str(value),'proportion': prop}) if len(vcounts) == 10: rows.append(vcounts) vcounts = [] if len(vcounts): rows.append(vcounts) if 'avg' in attr and attr['avg']: vcounts.append({'value':'\\mathrm{avg}\\ %.2f'%(float(avg)/total), 'curves':total, 'query':url_for(".index_Q") +'?'+attr['name'],'proportion':format_percentage(1,1)}) dists.append({'attribute':attr,'rows':rows}) stats["distributions"] = dists self._stats = stats g2c_logger.debug("... finished computing genus 2 curve stats.")
def index_Q(): if len(request.args) != 0: return genus2_curve_search(**request.args) info = {'counts' : g2cstats().counts()} info["stats_url"] = url_for(".statistics") info["curve_url"] = lambda dbc: url_for_label(dbc['label']) info["browse_curves"] = [ g2cdb().curves.find_one({"label":"169.a.169.1"}), g2cdb().curves.find_one({"label":"1116.a.214272.1"}), g2cdb().curves.find_one({"label":"1152.a.147456.1"}), g2cdb().curves.find_one({"label":"1369.a.50653.1"}), g2cdb().curves.find_one({"label":"12500.a.12500.1"}), ] info["conductor_list"] = ['1-499', '500-999', '1000-99999','100000-1000000' ] info["discriminant_list"] = ['1-499', '500-999', '1000-99999','100000-1000000' ] info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict title = 'Genus 2 curves over $\Q$' bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', ' ')] return render_template("browse_search_g2.html", info=info, credit=credit_string, title=title, learnmore=learnmore_list(), bread=bread)
def init_g2c_stats(self): if self._stats: return g2c_logger.debug("Computing genus 2 curve stats...") counts = self._counts total = counts["ncurves"] stats = {} dists = [] for attr in stats_attribute_list: values = g2cdb().curves.distinct(attr['name']) values.sort() vcounts = [] rows = [] colcount = 0 avg = 0 for value in values: n = g2cdb().curves.find({attr['name']: value}).count() prop = format_percentage(n, total) if 'avg' in attr and attr['avg']: avg += n * value value_string = attr['format']( value) if 'format' in attr else value vcounts.append({ 'value': value_string, 'curves': n, 'query': url_for(".index_Q") + '?' + attr['name'] + '=' + str(value), 'proportion': prop }) if len(vcounts) == 10: rows.append(vcounts) vcounts = [] if len(vcounts): rows.append(vcounts) if 'avg' in attr and attr['avg']: vcounts.append({ 'value': '\\mathrm{avg}\\ %.2f' % (float(avg) / total), 'curves': total, 'query': url_for(".index_Q") + '?' + attr['name'], 'proportion': format_percentage(1, 1) }) dists.append({'attribute': attr, 'rows': rows}) stats["distributions"] = dists self._stats = stats g2c_logger.debug("... finished computing genus 2 curve stats.")
def init_g2c_count(self): if self._counts: return counts = {} ncurves = g2cdb().curves.count() counts['ncurves'] = ncurves counts['ncurves_c'] = comma(ncurves) nclasses = g2cdb().isogeny_classes.count() counts['nclasses'] = nclasses counts['nclasses_c'] = comma(nclasses) max_D = g2cdb().curves.find().sort('abs_disc', DESCENDING).limit(1)[0]['abs_disc'] counts['max_D'] = max_D counts['max_D_c'] = comma(max_D) self._counts = counts
def download_search(info): lang = info["submit"] filename = 'genus2_curves' + download_file_suffix[lang] mydate = time.strftime("%d %B %Y") # reissue saved query here res = g2cdb().curves.find(ast.literal_eval(info["query"])) c = download_comment_prefix[lang] s = '\n' s += c + ' Genus 2 curves downloaded from the LMFDB downloaded on %s. Found %s curves.\n'%(mydate, res.count()) s += c + ' Below is a list called data. Each entry has the form:\n' s += c + ' [[f coeffs],[h coeffs]]\n' s += c + ' defining the hyperelliptic curve y^2+h(x)y=f(x)\n' s += c + '\n' s += c + ' ' + download_make_data_comment[lang] + '\n' s += '\n' s += download_assignment_start[lang] + '\\\n' # loop through all search results and grab the curve equations for r in res: entry = str(r['min_eqn']) entry = entry.replace('u','') entry = entry.replace('\'','') s += entry + ',\\\n' s = s[:-3] s += download_assignment_end[lang] s += '\n\n' s += download_make_data[lang] strIO = StringIO.StringIO() strIO.write(s) strIO.seek(0) return send_file(strIO, attachment_filename=filename, as_attachment=True)
def download_search(info): lang = info["submit"] filename = 'genus2_curves' + download_file_suffix[lang] mydate = time.strftime("%d %B %Y") # reissue saved query here res = g2cdb().curves.find(literal_eval(info["query"]), { '_id': int(0), 'min_eqn': int(1) }) c = download_comment_prefix[lang] s = '\n' s += c + ' Genus 2 curves downloaded from the LMFDB downloaded on %s. Found %s curves.\n' % ( mydate, res.count()) s += c + ' Below is a list called data. Each entry has the form:\n' s += c + ' [[f coeffs],[h coeffs]]\n' s += c + ' defining the hyperelliptic curve y^2+h(x)y=f(x)\n' s += c + '\n' s += c + ' ' + download_make_data_comment[lang] + '\n' s += '\n' s += download_assignment_start[lang] + '\\\n' # loop through all search results and grab the curve equations for r in res: entry = str(r['min_eqn']) entry = entry.replace('u', '') entry = entry.replace('\'', '') s += entry + ',\\\n' s = s[:-3] s += download_assignment_end[lang] s += '\n\n' s += download_make_data[lang] strIO = StringIO.StringIO() strIO.write(s) strIO.seek(0) return send_file(strIO, attachment_filename=filename, as_attachment=True)
def by_label(label): """ Searches for a specific genus 2 curve isogeny class in the curves collection by its label. """ try: data = g2cdb().isogeny_classes.find_one({"label" : label}) except AttributeError: return "Invalid isogeny class label" # caller must catch this and raise an error if data: return G2Cisogeny_class(data) return "No isogeny class with this label currently in the database" # caller must catch this and raise an error
def genus2_curve_search(**args): info = to_dict(args) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict query = {} # database callable bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')] #if 'SearchAgain' in args: # return rational_genus2_curves() if 'jump' in args: curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(info["jump"].strip()): data = render_curve_webpage_by_label(info["jump"].strip()) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(info["jump"].strip()): data = render_isogeny_class(info["jump"].strip()) else: class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(info["jump"].strip()) and ZZ(info["jump"][1:]) < 2**61: c = g2cdb().isogeny_classes.find_one({'hash':int(info["jump"][1:])}) if c: data = render_isogeny_class(c["label"]) else: data = "Hash not found" else: data = "Invalid label" if isinstance(data,str): flash(Markup(data + " <span style='color:black'>%s</span>"%(info["jump"])),"error") return redirect(url_for(".index")) return data try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type') parse_bool(info,query,'has_square_sha') parse_bool(info,query,'locally_solvable') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'cond','conductor') parse_ints(info,query,'num_rat_wpts','Weierstrass points') parse_ints(info,query,'torsion_order') parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if info.get('g20') and info.get('g21') and info.get('g22'): query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) count = parse_count(info, 50) start = parse_start(info) cursor = g2cdb().curves.find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", pymongo.ASCENDING), ("class", pymongo.ASCENDING), ("disc_key", pymongo.ASCENDING), ("label", pymongo.ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["isog_label"] = v["class"] isogeny_class = g2cdb().isogeny_classes.find_one({'label' : isog_label(v["label"])}) v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"] if isogeny_class["is_gl2_type"] == True: v_clean["is_gl2_type_display"] = '✔' #checkmark else: v_clean["is_gl2_type_display"] = '' v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(isogeny_class['st_group']) v_clean["st_group_href"] = st_group_href(isogeny_class['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda dbc: url_for_label(dbc['label']) info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label']) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) credit = credit_string title = 'Genus 2 Curves search results' return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
def random_curve(): label = random_object_from_collection(g2cdb().curves)['label'] # This version leaves the word 'random' in the URL: #return render_curve_webpage_by_label(label) # This version uses the curve's own URL: return redirect(url_for(".by_g2c_label", label=label), 301)
def genus2_curve_search(**args): info = to_dict(args['data']) if 'jump' in info: jump = info["jump"].strip() curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(jump): return redirect(url_for_curve_label(jump), 301) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61: c = g2cdb().isogeny_classes.find_one({'Lhash': jump[1:].strip()}) if c: return redirect(url_for_isogeny_class_label(c["label"]), 301) else: errmsg = "Hash not found" else: errmsg = "Invalid label" flash(Markup(errmsg + " <span style='color:black'>%s</span>"%(jump)),"error") return redirect(url_for(".index")) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread',(('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type') parse_bool(info,query,'has_square_sha') parse_bool(info,query,'locally_solvable') parse_bool(info,query,'is_simple_geom') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'cond') parse_ints(info,query,'num_rat_wpts','Weierstrass points') parse_ints(info,query,'torsion_order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1) parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ] if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) # Database query happens here cursor = g2cdb().curves.find(query,{'_id':int(0),'label':int(1),'min_eqn':int(1),'st_group':int(1),'is_gl2_type':int(1),'analytic_rank':int(1)}) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_gl2_type_display"] = '✔' if v["is_gl2_type"] else '' # display checkmark if true, blank otherwise v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(v['st_group']) v_clean["st_group_href"] = st_group_href(v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) title = info.get('title','Genus 2 Curve search results') credit = credit_string return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
def random_curve(): label = random_value_from_collection(g2cdb().curves, 'label') return redirect(url_for_curve_label(label), 301)
def make_class(self): from lmfdb.genus2_curves.genus2_curve import url_for_curve_label # Data curves_data = g2cdb().curves.find({"class" : self.label},{'_id':int(0),'label':int(1),'min_eqn':int(1),'disc_key':int(1)}).sort([("disc_key", ASCENDING), ("label", ASCENDING)]) assert curves_data self.curves = [ {"label" : c['label'], "equation_formatted" : list_to_min_eqn(c['min_eqn']), "url": url_for_curve_label(c['label'])} for c in curves_data ] self.ncurves = curves_data.count() self.bad_lfactors = [ [c[0], list_to_factored_poly_otherorder(c[1])] for c in self.bad_lfactors] # Data derived from Sato-Tate group self.st_group_name = st_group_name(self.st_group) self.st_group_href = st_group_href(self.st_group) self.st0_group_name = st0_group_name(self.real_geom_end_alg) # Later used in Lady Gaga box: self.real_geom_end_alg_disp = [r'\End(J_{\overline{\Q}}) \otimes \R', end_alg_name(self.real_geom_end_alg)] if self.is_gl2_type: self.is_gl2_type_name = 'yes' else: self.is_gl2_type_name = 'no' # Endomorphism data endodata = g2cdb().endomorphisms.find_one({"label" : self.curves[0]['label']}) self.gl2_statement_base = \ gl2_statement_base(endodata['factorsRR_base'], r'\(\Q\)') self.endo_statement_base = \ """Endomorphism algebra over \(\Q\):<br>""" + \ endo_statement_isog(endodata['factorsQQ_base'], endodata['factorsRR_base'], r'') endodata['fod_poly'] = intlist_to_poly(endodata['fod_coeffs']) self.fod_statement = fod_statement(endodata['fod_label'], endodata['fod_poly']) if endodata['fod_label'] != '1.1.1.1': self.endo_statement_geom = \ """Endomorphism algebra over \(\overline{\Q}\):<br>""" + \ endo_statement_isog(endodata['factorsQQ_geom'], endodata['factorsRR_geom'], r'\overline{\Q}') else: self.endo_statement_geom = '' # Title self.title = "Genus 2 Isogeny Class %s" % (self.label) # Lady Gaga box self.properties = ( ('Label', self.label), ('Number of curves', str(self.ncurves)), ('Conductor','%s' % self.cond), ('Sato-Tate group', self.st_group_href), ('\(%s\)' % self.real_geom_end_alg_disp[0], '\(%s\)' % self.real_geom_end_alg_disp[1]), ('\(\mathrm{GL}_2\)-type','%s' % self.is_gl2_type_name) ) x = self.label.split('.')[1] self.friends = [('L-function', url_for("l_functions.l_function_genus2_page", cond=self.cond,x=x))] #self.downloads = [('Download Euler factors', ".")] #self.downloads = [ # ('Download Euler factors', "."), # url_for(".download_g2c_eulerfactors", label=self.label)), # ('Download stored data for all curves', # url_for(".download_g2c_all", label=self.label)) # ] # Breadcrumbs self.bread = ( ('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('%s' % self.cond, url_for(".by_conductor", cond=self.cond)), ('%s' % self.label, ' ') ) # More friends (NOTE: to be improved) self.ecproduct_wurl = [] if hasattr(self, 'ecproduct'): for i in range(2): curve_label = self.ecproduct[i] crv_url = url_for("ec.by_ec_label", label=curve_label) if i == 1 or len(set(self.ecproduct)) != 1: self.friends.append(('Elliptic curve ' + curve_label, crv_url)) self.ecproduct_wurl.append({'label' : curve_label, 'url' : crv_url}) self.ecquadratic_wurl = [] if hasattr(self, 'ecquadratic'): for i in range(len(self.ecquadratic)): curve_label = self.ecquadratic[i] crv_spl = curve_label.split('-') crv_url = url_for("ecnf.show_ecnf_isoclass", nf = crv_spl[0], conductor_label = crv_spl[1], class_label = crv_spl[2]) self.friends.append(('Elliptic curve ' + curve_label, crv_url)) self.ecquadratic_wurl.append({'label' : curve_label, 'url' : crv_url, 'nf' : crv_spl[0]}) if hasattr(self, 'mfproduct'): for i in range(len(self.mfproduct)): mf_label = self.mfproduct[i] mf_spl = mf_label.split('.') mf_spl.append(mf_spl[2][-1]) mf_spl[2] = mf_spl[2][:-1] # Need a splitting function mf_url = url_for("emf.render_elliptic_modular_forms", level=mf_spl[0], weight=mf_spl[1], character=mf_spl[2], label=mf_spl[3]) self.friends.append(('Modular form ' + mf_label, mf_url)) if hasattr(self, 'mfhilbert'): for i in range(len(self.mfhilbert)): mf_label = self.mfhilbert[i] mf_spl = mf_label.split('-') mf_url = url_for("hmf.render_hmf_webpage", field_label=mf_spl[0], label=mf_label) self.friends.append(('Hilbert modular form ' + mf_label, mf_url))
def genus2_curve_search(**args): info = to_dict(args['data']) if 'jump' in info: jump = info["jump"].strip() curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(jump): return redirect(url_for_curve_label(jump), 301) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61: c = g2cdb().isogeny_classes.find_one( {'Lhash': jump[1:].strip()}) if c: return redirect( url_for_isogeny_class_label(c["label"]), 301) else: errmsg = "Hash not found" else: errmsg = "Invalid label" flash(Markup(errmsg + " <span style='color:black'>%s</span>" % (jump)), "error") return redirect(url_for(".index")) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread', (('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info, query, 'abs_disc', 'absolute discriminant') parse_bool(info, query, 'is_gl2_type') parse_bool(info, query, 'has_square_sha') parse_bool(info, query, 'locally_solvable') parse_bool(info, query, 'is_simple_geom') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4, check_divisibility="increasing") parse_ints(info, query, 'cond') parse_ints(info, query, 'num_rat_wpts', 'Weierstrass points') parse_ints(info, query, 'torsion_order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul, [int(n) for n in query['torsion']], 1) parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank') parse_ints(info, query, 'analytic_rank', 'analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2inv'] = [info['g20'], info['g21'], info['g22']] if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) # Database query happens here cursor = g2cdb().curves.find( query, { '_id': int(0), 'label': int(1), 'min_eqn': int(1), 'st_group': int(1), 'is_gl2_type': int(1), 'analytic_rank': int(1) }) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_gl2_type_display"] = '✔' if v[ "is_gl2_type"] else '' # display checkmark if true, blank otherwise v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(v['st_group']) v_clean["st_group_href"] = st_group_href(v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start + count < nres) title = info.get('title', 'Genus 2 Curve search results') credit = credit_string return render_template("search_results_g2.html", info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)