def lattice_search_isometric(res, info, query): """ We check for isometric lattices if the user enters a valid gram matrix but not one stored in the database This may become slow in the future: at the moment we compare against a list of stored matrices with same dimension and determinant (just compare with respect to dimension is slow) """ if info['number'] == 0 and info.get('gram'): A = query['gram'] n = len(A[0]) d = matrix(A).determinant() for gram in db.lat_lattices.search({'dim': n, 'det': int(d)}, 'gram'): if isom(A, gram): query['gram'] = gram proj = lattice_search_projection count = parse_count(info) start = parse_start(info) res = db.lat_lattices.search(query, proj, limit=count, offset=start, info=info) break for v in res: v['min'] = v.pop('minimum') return res
def artin_representation_search(**args): info = to_dict(args) if 'natural' in info: label = info['natural'] # test if it is ok try: label = parse_artin_label(label) except ValueError as err: flash(Markup("Error: %s" % (err)), "error") bread = get_bread([('Search results','')]) return search_input_error({'err':''}, bread) return render_artin_representation_webpage(label) title = 'Artin representation search results' bread = [('Artin representation', url_for(".index")), ('Search results', ' ')] sign_code = 0 query = {'Hide': 0} try: parse_primes(info,query,"unramified",name="Unramified primes", qfield="BadPrimes",mode="complement",to_string=True) parse_primes(info,query,"ramified",name="Ramified primes", qfield="BadPrimes",mode="append",to_string=True) parse_restricted(info,query,"root_number",qfield="GaloisConjugates.Sign", allowed=[1,-1],process=int) parse_restricted(info,query,"frobenius_schur_indicator",qfield="Indicator", allowed=[1,0,-1],process=int) parse_galgrp(info,query,"group",name="Group",qfield="Galois_nt",use_bson=False) parse_ints(info,query,'dimension',qfield='Dim') parse_ints(info,query,'conductor',qfield='Conductor_key', parse_singleton=make_cond_key) #parse_paired_fields(info,query,field1='conductor',qfield1='Conductor_key',parse1=parse_ints,kwds1={'parse_singleton':make_cond_key}, #field2='dimension',qfield2='Dim', parse2=parse_ints) except ValueError: return search_input_error(info, bread) count = parse_count(info,10) start = parse_start(info) data = ArtinRepresentation.collection().find(query).sort([("Dim", ASC), ("Conductor_key", ASC)]) nres = data.count() data = data.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 if nres == 1: report = 'unique match' else: if nres > count or start != 0: report = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: report = 'displaying all %s matches' % nres if nres == 0: report = 'no matches' initfunc = ArtinRepresentation return render_template("artin-representation-search.html", req=info, data=data, title=title, bread=bread, query=query, start=start, report=report, nres=nres, initfunc=initfunc, sign_code=sign_code)
def modlmf_search(**args): C = getDBConnection() info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return modlmf_by_label(info.get('label'), C) query = {} try: for field, name in (('characteristic','Field characteristic'),('deg','Field degree'),('level', 'Level'), ('conductor','Conductor'), ('weight_grading', 'Weight grading')): parse_ints(info, query, field, name) except ValueError as err: info['err'] = str(err) return search_input_error(info) # miss search by character, search up to twists and gamma0, gamma1 count = parse_count(info,50) start = parse_start(info) info['query'] = dict(query) res = C.mod_l_eigenvalues.modlmf.find(query).sort([('characteristic', ASC), ('deg', ASC), ('level', ASC), ('weight_grading', ASC)]).skip(start).limit(count) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} for m in ['label','characteristic','deg','level','weight_grading']: v_clean[m]=v[m] res_clean.append(v_clean) info['modlmfs'] = res_clean t = 'Mod ℓ Modular Forms Search Results' bread=[('Modular Forms', "/ModularForm"),('mod ℓ', url_for(".modlmf_render_webpage")),('Search Results', ' ')] properties = [] return render_template("modlmf-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def modlmf_search(**args): C = getDBConnection() info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return modlmf_by_label(info.get('label'), C) query = {} try: for field, name in (('characteristic','Field characteristic'),('deg','Field degree'),('level', 'Level'), ('conductor','Conductor'), ('min_weight', 'Minimal weight')): parse_ints(info, query, field, name) except ValueError as err: info['err'] = str(err) return search_input_error(info) # miss search by character, search up to twists and gamma0, gamma1 count = parse_count(info,50) start = parse_start(info) info['query'] = dict(query) res = C.mod_l_eigenvalues.modlmf.find(query).sort([('characteristic', ASC), ('deg', ASC), ('level', ASC), ('min_weight', ASC), ('conductor', ASC)]).skip(start).limit(count) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} for m in ['label','characteristic','deg','level','min_weight','conductor']: v_clean[m]=v[m] res_clean.append(v_clean) info['modlmfs'] = res_clean t = 'Mod ℓ Modular Forms Search Results' bread=[('Modular Forms', "/ModularForm"),('mod ℓ', url_for(".modlmf_render_webpage")),('Search Results', ' ')] properties = [] return render_template("modlmf-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def local_field_search(**args): info = to_dict(args) bread = get_bread([("Search results", ' ')]) C = base.getDBConnection() query = {} if 'jump_to' in info: return render_field_webpage({'label': info['jump_to']}) try: parse_galgrp(info, query, 'gal', use_bson=False) parse_ints(info, query, 'p', name='Prime p') parse_ints(info, query, 'n', name='Degree') parse_ints(info, query, 'c', name='Discriminant exponent c') parse_ints(info, query, 'e', name='Ramification index e') except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) # logger.debug(query) res = C.localfields.fields.find(query).sort([('p', pymongo.ASCENDING), ('n', pymongo.ASCENDING), ('c', pymongo.ASCENDING), ('label', pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 info['fields'] = res info['number'] = nres info['group_display'] = group_display_shortC(C) info['display_poly'] = format_coeffs info['slopedisp'] = show_slope_content info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("lf-search.html", info=info, title="Local Number Field Search Result", bread=bread, credit=LF_credit)
def __call__(self, info): info = to_dict(info) # I'm not sure why this is required... for key, func in self.shortcuts.items(): if info.get(key,'').strip(): return func(info) query = {} template_kwds = {} for key in self.kwds: template_kwds[key] = info.get(key, self.kwds[key]()) try: errpage = self.f(info, query) except ValueError as err: # Errors raised in parsing info['err'] = str(err) err_title = query.pop('__err_title__', self.err_title) return render_template(self.template, info=info, title=err_title, **template_kwds) if errpage is not None: return errpage if 'result_count' in info: nres = self.table.count(query) return jsonify({"nres":str(nres)}) sort = query.pop('__sort__', None) table = query.pop('__table__', self.table) proj = query.pop('__projection__', self.projection) # We want to pop __title__ even if overridden by info. title = query.pop('__title__', self.title) title = info.get('title', title) template = query.pop('__template__', self.template) count = parse_count(info, self.per_page) start = parse_start(info) try: res = table.search(query, proj, limit=count, offset=start, sort=sort, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error('The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) info['query'] = dict(query) return render_template(self.template, info=info, title=self.err_title, **template_kwds) else: if self.cleaners: for v in res: for name, func in self.cleaners.items(): v[name] = func(v) if self.postprocess is not None: res = self.postprocess(res, info, query) for key, func in self.longcuts.items(): if info.get(key,'').strip(): return func(res, info, query) info['results'] = res return render_template(template, info=info, title=title, **template_kwds)
def higher_genus_w_automorphisms_search(**args): info = to_dict(args) bread = get_bread([("Search results", url_for('.search'))]) C = base.getDBConnection() query = {} if 'jump_to' in info: return render_hgcwa_webpage({'label': info['jump_to']}) try: parse_list(info,query,'group', name='Group') parse_ints(info,query,'genus',name='Genus') parse_list(info,query,'signature',name='Signature') parse_ints(info,query,'dim',name='Dimension of the family') if 'inc_hyper' in info: if info['inc_hyper'] == 'exclude': query['hyperelliptic'] = False elif info['inc_hyper'] == 'only': query['hyperelliptic'] = True except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) res = C.curve_automorphisms.families.find(query).sort([( 'g', pymongo.ASCENDING), ('dim', pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['fields'] = res info['number'] = nres info['group_display'] = group_display_shortC(C) info['sign_display'] = sign_display info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min( nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("hgcwa-search.html", info=info, title="Higher Genus Curves with Automorphisms Search Result", bread=bread, credit=HGCwA_credit)
def local_field_search(**args): info = to_dict(args) bread = get_bread([("Search results", " ")]) C = base.getDBConnection() query = {} if info.get("jump_to"): return redirect(url_for(".by_label", label=info["jump_to"]), 301) try: parse_galgrp(info, query, "gal", use_bson=False) parse_ints(info, query, "p", name="Prime p") parse_ints(info, query, "n", name="Degree") parse_ints(info, query, "c", name="Discriminant exponent c") parse_ints(info, query, "e", name="Ramification index e") except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) # logger.debug(query) res = C.localfields.fields.find(query).sort( [("p", pymongo.ASCENDING), ("n", pymongo.ASCENDING), ("c", pymongo.ASCENDING), ("label", pymongo.ASCENDING)] ) nres = res.count() res = res.skip(start).limit(count) if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 info["fields"] = res info["number"] = nres info["group_display"] = group_display_shortC(C) info["display_poly"] = format_coeffs info["slopedisp"] = show_slope_content info["start"] = start if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres return render_template( "lf-search.html", info=info, title="Local Number Field Search Result", bread=bread, credit=LF_credit )
def local_field_search(**args): info = to_dict(args) bread = get_bread([("Search results", url_for('.search'))]) C = base.getDBConnection() query = {} if 'jump_to' in info: return render_field_webpage({'label': info['jump_to']}) try: parse_galgrp(info,query,'gal', use_bson=False) parse_ints(info,query,'p',name='Prime p') parse_ints(info,query,'n',name='Degree') parse_ints(info,query,'c',name='Discriminant exponent c') parse_ints(info,query,'e',name='Ramification index e') except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) # logger.debug(query) res = C.localfields.fields.find(query).sort([('p', pymongo.ASCENDING), ( 'n', pymongo.ASCENDING), ('c', pymongo.ASCENDING), ('label', pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['fields'] = res info['number'] = nres info['group_display'] = group_display_shortC(C) info['display_poly'] = format_coeffs info['slopedisp'] = show_slope_content info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("lf-search.html", info=info, title="Local Number Field Search Result", bread=bread, credit=LF_credit)
def hilbert_modular_form_search(**args): info = to_dict(args) # what has been entered in the search boxes if 'label' in info and info['label']: lab=info['label'].strip() info['label']=lab try: split_full_label(lab) return hilbert_modular_form_by_label(lab) except ValueError: return redirect(url_for(".hilbert_modular_form_render_webpage")) query = {} try: parse_nf_string(info,query,'field_label',name="Field") parse_ints(info,query,'deg', name='Field degree') parse_ints(info,query,'disc',name="Field discriminant") parse_ints(info,query,'dimension') parse_ints(info,query,'level_norm', name="Level norm") parse_hmf_weight(info,query,'weight',qfield=('parallel_weight','weight')) except ValueError: return search_input_error() count = parse_count(info,100) start = parse_start(info) info['query'] = dict(query) C = getDBConnection() res = C.hmfs.forms.find( query).sort([('deg', pymongo.ASCENDING), ('disc', pymongo.ASCENDING), ('level_norm', pymongo.ASCENDING), ('level_label', pymongo.ASCENDING), ('label_nsuffix', pymongo.ASCENDING)]).skip(start).limit(count) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = start info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['field_label'] = v['field_label'] v_clean['short_label'] = v['short_label'] v_clean['label'] = v['label'] v_clean['level_ideal'] = teXify_pol(v['level_ideal']) v_clean['dimension'] = v['dimension'] res_clean.append(v_clean) info['forms'] = res_clean t = 'Hilbert Modular Form search results' bread = [('Hilbert Modular Forms', url_for(".hilbert_modular_form_render_webpage")), ( 'Search results', ' ')] properties = [] return render_template("hilbert_modular_form_search.html", info=info, title=t, credit=hmf_credit, properties=properties, bread=bread, learnmore=learnmore_list())
def search(**args): """ query processing for Sato-Tate groups -- returns rendered results page """ info = to_dict(args) if 'jump' in info: return redirect(url_for('.by_label', label=info['jump']), 301) if 'label' in info: return redirect(url_for('.by_label', label=info['label']), 301) bread = [('Sato-Tate groups', url_for('.index')),('Search Results', '.')] count = parse_count(info, 25) start = parse_start(info) # if user clicked refine search always restart at 0 if 'refine' in info: start = 0 ratonly = True if info.get('rational_only','no').strip().lower() == 'yes' else False query = {'rational':True} if ratonly else {} try: parse_ints(info,query,'weight','weight') if 'weight' in query: weight_list = parse_ints_to_list_flash(info.get('weight'),'weight') parse_ints(info,query,'degree','degree') if 'degree' in query: degree_list = parse_ints_to_list_flash(info.get('degree'),'degree') if info.get('identity_component'): query['identity_component'] = info['identity_component'] parse_ints(info,query,'components','components') if 'components' in query: components_list = parse_ints_to_list_flash(info.get('components'), 'components') parse_rational(info,query,'trace_zero_density','trace zero density') except ValueError as err: info['err'] = str(err) return render_template('st_results.html', info=info, title='Sato-Tate groups search input error', bread=bread, credit=credit_string) # Check mu(n) groups first (these are not stored in the database) results = [] if (not 'weight' in query or 0 in weight_list) and \ (not 'degree' in query or 1 in degree_list) and \ (not 'identity_component' in query or query['identity_component'] == 'SO(1)') and \ (not 'trace_zero_density' in query or query['trace_zero_density'] == '0'): if not 'components' in query: components_list = xrange(1,3 if ratonly else start+count+1) elif ratonly: components_list = [n for n in range(1,3) if n in components_list] nres = len(components_list) if 'components' in query or ratonly else INFINITY for n in itertools.islice(components_list,start,start+count): results.append(mu_info(n)) else: nres = 0 # Now lookup other (rational) ST groups in database if nres != INFINITY: cursor = st_groups().find(query) start2 = start - nres if start > nres else 0 nres += cursor.count() if start < nres and len(results) < count: res = cursor.sort([('weight',ASCENDING), ('degree', ASCENDING), ('real_dimension', ASCENDING), ('identity_component', ASCENDING), ('name', ASCENDING)]).skip(start2).limit(count-len(results)) for v in res: v_clean = {} v_clean['label'] = v['label'] v_clean['weight'] = v['weight'] v_clean['degree'] = v['degree'] v_clean['real_dimension'] = v['real_dimension'] v_clean['identity_component'] = st0_pretty(v['identity_component']) v_clean['name'] = v['name'] v_clean['pretty'] = v['pretty'] v_clean['components'] = v['components'] v_clean['component_group'] = sg_pretty(v['component_group']) v_clean['trace_zero_density'] = v['trace_zero_density'] v_clean['trace_moments'] = trace_moments(v['moments']) results.append(v_clean) if nres == 0: info['report'] = 'no matches' elif nres == 1: info['report'] = 'unique match' else: if nres == INFINITY or nres > count or start > 0: info['report'] = 'displaying matches %d-%d %s' % (start + 1, start + len(results), "of %d"%nres if nres != INFINITY else "") else: info['report'] = 'displaying all %s matches' % nres info['st0_list'] = st0_list info['st0_dict'] = st0_dict info['stgroups'] = results info['stgroup_url'] = lambda dbc: url_for('.by_label', label=dbc['label']) info['start'] = start info['count'] = count info['more'] = 1 if nres < 0 or nres > start+count else 0 title = 'Sato-Tate group search results' return render_template('st_results.html', info=info, credit=credit_string,learnmore=learnmore_list(), bread=bread, title=title)
def genus2_curve_search(**args): info = to_dict(args) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict query = {} # database callable bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')] #if 'SearchAgain' in args: # return rational_genus2_curves() if 'jump' in args: curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(info["jump"].strip()): data = render_curve_webpage_by_label(info["jump"].strip()) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(info["jump"].strip()): data = render_isogeny_class(info["jump"].strip()) else: class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(info["jump"].strip()) and ZZ(info["jump"][1:]) < 2**61: c = g2cdb().isogeny_classes.find_one({'hash':int(info["jump"][1:])}) if c: data = render_isogeny_class(c["label"]) else: data = "Hash not found" else: data = "Invalid label" if isinstance(data,str): flash(Markup(data + " <span style='color:black'>%s</span>"%(info["jump"])),"error") return redirect(url_for(".index")) return data try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type') parse_bool(info,query,'has_square_sha') parse_bool(info,query,'locally_solvable') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'cond','conductor') parse_ints(info,query,'num_rat_wpts','Weierstrass points') parse_ints(info,query,'torsion_order') parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if info.get('g20') and info.get('g21') and info.get('g22'): query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) count = parse_count(info, 50) start = parse_start(info) cursor = g2cdb().curves.find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", pymongo.ASCENDING), ("class", pymongo.ASCENDING), ("disc_key", pymongo.ASCENDING), ("label", pymongo.ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["isog_label"] = v["class"] isogeny_class = g2cdb().isogeny_classes.find_one({'label' : isog_label(v["label"])}) v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"] if isogeny_class["is_gl2_type"] == True: v_clean["is_gl2_type_display"] = '✔' #checkmark else: v_clean["is_gl2_type_display"] = '' v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(isogeny_class['st_group']) v_clean["st_group_href"] = st_group_href(isogeny_class['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda dbc: url_for_label(dbc['label']) info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label']) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) credit = credit_string title = 'Genus 2 Curves search results' return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
def galois_group_search(**args): info = to_dict(args) if info.get('jump_to'): return redirect(url_for('.by_label', label=info['jump_to']).strip(), 301) bread = get_bread([("Search Results", ' ')]) C = base.getDBConnection() query = {} def includes_composite(s): s = s.replace(' ','').replace('..','-') for interval in s.split(','): if '-' in interval[1:]: ix = interval.index('-',1) a,b = int(interval[:ix]), int(interval[ix+1:]) if b == a: if a != 1 and not a.is_prime(): return True if b > a and b > 3: return True else: a = ZZ(interval) if a != 1 and not a.is_prime(): return True try: parse_ints(info,query,'n','degree') parse_ints(info,query,'t') parse_ints(info,query,'order', qfield='orderkey', parse_singleton=make_order_key) parse_bracketed_posints(info, query, qfield='gapidfull', split=False, exactlength=2, keepbrackets=True, name='Gap id', field='gapid') for param in ('cyc', 'solv', 'prim', 'parity'): parse_bool(info,query,param,minus_one_to_zero=(param != 'parity')) degree_str = prep_ranges(info.get('n')) info['show_subs'] = degree_str is None or (LIST_RE.match(degree_str) and includes_composite(degree_str)) except ValueError as err: info['err'] = str(err) return search_input_error(info, bread) count = parse_count(info, 50) start = parse_start(info) if 'orderkey' in query and not ('n' in query): res = C.transitivegroups.groups.find(query).sort([('orderkey', pymongo.ASCENDING), ('gapid', pymongo.ASCENDING), ('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)]) else: res = C.transitivegroups.groups.find(query).sort([('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['groups'] = res info['group_display'] = group_display_prettyC(C) info['report'] = "found %s groups" % nres info['yesno'] = yesno info['wgg'] = WebGaloisGroup.from_data info['start'] = start info['number'] = nres if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("gg-search.html", info=info, title="Galois Group Search Result", bread=bread, credit=GG_credit)
def abelian_variety_search(**args): info = to_dict(args) if 'download' in info and info['download'] != 0: return download_search(info) bread = args.get('bread', get_bread(('Search Results', ' '))) if 'jump' in info: return by_label(info.get('label','')) query = {} try: parse_ints(info,query,'q',name='base field') parse_ints(info,query,'g',name='dimension') if 'simple' in info: if info['simple'] == 'yes': query['is_simp'] = True elif info['simple'] == 'no': query['is_simp'] = False if 'primitive' in info: if info['primitive'] == 'yes': query['is_prim'] = True elif info['primitive'] == 'no': query['is_prim'] = False if 'jacobian' in info: jac = info['jacobian'] if jac == 'yes': query['is_jac'] = 1 elif jac == 'not_no': query['is_jac'] = {'$gt' : -1} elif jac == 'not_yes': query['is_jac'] = {'$lt' : 1} elif jac == 'no': query['is_jac'] = -1 if 'polarizable' in info: pol = info['polarizable'] if pol == 'yes': query['is_pp'] = 1 elif pol == 'not_no': query['is_pp'] = {'$gt' : -1} elif pol == 'not_yes': query['is_pp'] = {'$lt' : 1} elif pol == 'no': query['is_pp'] = -1 parse_ints(info,query,'p_rank') parse_ints(info,query,'ang_rank') parse_newton_polygon(info,query,'newton_polygon',qfield='slps') # TODO parse_string_start(info,query,'initial_coefficients',qfield='poly',initial_segment=["1"]) parse_string_start(info,query,'abvar_point_count',qfield='A_cnts') parse_string_start(info,query,'curve_point_count',qfield='C_cnts',first_field='pt_cnt') parse_abvar_decomp(info,query,'decomposition',qfield='decomp',av_stats=AbvarFqStats()) parse_nf_string(info,query,'number_field',qfield='nf') parse_galgrp(info,query,qfield='gal') except ValueError: return search_input_error(info, bread) info['query'] = query count = parse_count(info, 50) start = parse_start(info) cursor = db().find(query) nres = cursor.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 res = cursor.sort([('sort', ASCENDING)]).skip(start).limit(count) res = list(res) info['abvars'] = [AbvarFq_isoclass(x) for x in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 0: info['report'] = 'no matches' elif nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' %(start + 1, min(nres, start+count), nres) else: info['report'] = 'displaying all %s matches' % nres t = 'Abelian Variety search results' return render_template("abvarfq-search-results.html", info=info, credit=abvarfq_credit, bread=bread, title=t)
def search(**args): info = to_dict(args) query = {} if 'label' in info: return search_by_label(info['label']) info['st0_list'] = st0_list info['st0_dict'] = st0_dict bread = [('Sato-Tate groups', url_for('.index')), ('Search Results', '.')] if not query: try: parse_ints(info, query, 'weight') parse_ints(info, query, 'degree') if info.get('identity_component'): query['identity_component'] = info['identity_component'] parse_ints(info, query, 'components') parse_rational(info, query, 'trace_zero_density') except ValueError as err: info['err'] = str(err) return render_template('results.html', info=info, title='Sato-Tate groups search input rror', bread=bread, credit=credit_string) cursor = st_groups().find(query) info['query'] = dict(query) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([('degree', ASCENDING), ('real_dimension', ASCENDING), ('identity_component', ASCENDING), ('name', ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['label'] = v['label'] v_clean['weight'] = v['weight'] v_clean['degree'] = v['degree'] v_clean['real_dimension'] = v['real_dimension'] v_clean['identity_component'] = st0_pretty(v['identity_component']) v_clean['name'] = v['name'] v_clean['pretty'] = v['pretty'] v_clean['components'] = v['components'] v_clean['component_group'] = sg_pretty(v['component_group']) v_clean['trace_zero_density'] = v['trace_zero_density'] v_clean['trace_moments'] = trace_moments(v['moments']) res_clean.append(v_clean) info['stgroups'] = res_clean info['stgroup_url'] = lambda dbc: url_for('.by_label', label=dbc['label']) info['start'] = start info['count'] = count info['more'] = int(start + count < nres) credit = credit_string title = 'Sato-Tate group search results' return render_template('results.html', info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)
def abelian_variety_search(**args): info = to_dict(args) if 'download' in info and info['download'] != 0: return download_search(info) bread = args.get('bread', get_bread(('Search Results', ' '))) if 'jump' in info: return by_label(info.get('label', '')) query = {} try: parse_ints(info, query, 'q') parse_ints(info, query, 'g') if 'simple' in info: if info['simple'] == 'yes': query['decomposition'] = {'$size': 1} query['decomposition.0.1'] = 1 elif info['simple'] == 'no': query['$or'] = [{ 'decomposition': { '$not': { '$size': 1 } } }, { 'decomposition.0.1': { '$gt': 1 } }] if 'primitive' in info: if info['primitive'] == 'yes': query['primitive_models'] = {'$size': 0} elif info['primitive'] == 'no': query['primitive_models'] = {'$not': {'$size': 0}} if 'jacobian' in info: if info['jacobian'] == 'yes': query['known_jacobian'] = 1 elif info['jacobian'] == 'no': query['known_jacobian'] = -1 else: info['jacobian'] = "any" if 'polarizable' in info: if info['polarizable'] == 'yes': query['principally_polarizable'] = 1 elif info['polarizable'] == 'no': query['principally_polarizable'] = -1 else: info['polarizable'] = "any" parse_ints(info, query, 'p_rank') parse_ints(info, query, 'angle_ranks') parse_newton_polygon(info, query, 'newton_polygon', qfield='slopes') parse_list_start(info, query, 'initial_coefficients', qfield='polynomial', index_shift=1) parse_list_start(info, query, 'abvar_point_count', qfield='A_counts', parse_singleton=str) parse_list_start(info, query, 'curve_point_count', qfield='C_counts', parse_singleton=str) parse_abvar_decomp(info, query, 'decomposition', av_stats=AbvarFqStats()) parse_nf_string(info, query, 'number_field') except ValueError: return search_input_error(info, bread) info['query'] = query count = parse_count(info, 50) start = parse_start(info) cursor = db().find(query) nres = cursor.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 #res = cursor.sort([]).skip(start).limit(count) res = cursor.skip(start).limit(count) res = list(res) info['abvars'] = [AbvarFq_isoclass(x) for x in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 0: info['report'] = 'no matches' elif nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres t = 'Abelian Variety search results' return render_template("abvarfq-search-results.html", info=info, credit=abvarfq_credit, bread=bread, title=t)
def belyi_search(info): if 'jump' in info: jump = info["jump"].strip() if re.match(r'^\d+T\d+-\[\d+,\d+,\d+\]-\d+-\d+-\d+-g\d+-[a-z]+$', jump): return redirect(url_for_belyi_galmap_label(jump), 301) else: if re.match(r'^\d+T\d+-\[\d+,\d+,\d+\]-\d+-\d+-\d+-g\d+$', jump): return redirect(url_for_belyi_passport_label(jump), 301) else: errmsg = "%s is not a valid Belyi map or passport label" flash_error(errmsg, jump) return redirect(url_for(".index")) if info.get('download', '').strip(): return download_search(info) #search options info['geometry_types_list'] = geometry_types_list info['geometry_types_dict'] = geometry_types_dict bread = info.get('bread', (('Belyi Maps', url_for(".index")), ('Search Results', '.'))) query = {} try: if 'group' in query: info['group'] = query['group'] parse_bracketed_posints(info, query, 'abc_list', 'a, b, c', maxlength=3) if query.get('abc_list'): if len(query['abc_list']) == 3: a, b, c = sorted(query['abc_list']) query['a_s'] = a query['b_s'] = b query['c_s'] = c elif len(query['abc_list']) == 2: a, b = sorted(query['abc_list']) sub_query = [] sub_query.append({ 'a_s': a, 'b_s': b }) sub_query.append({ 'b_s': a, 'c_s': b }) query['$or'] = sub_query elif len(query['abc_list']) == 1: a = query['abc_list'][0] query['$or'] = [{ 'a_s': a }, { 'b_s': a }, { 'c_s': a }] query.pop('abc_list') # a naive hack if info.get('abc'): for elt in ['a_s', 'b_s', 'c_s']: info_hack = {} info_hack[elt] = info['abc'] parse_ints(info_hack, query, elt) parse_ints(info, query, 'g', 'g') parse_ints(info, query, 'deg', 'deg') parse_ints(info, query, 'orbit_size', 'orbit_size') # invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) for fld in ['geomtype', 'group']: if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("belyi_search_results.html", info=info, title='Belyi Maps Search Input Error', bread=bread, credit=credit_string) # Database query happens here info["query"] = query # save query for reuse in download_search cursor = belyi_db_galmaps().find( query, { '_id': False, 'label': True, 'group': True, 'abc': True, 'g': True, 'deg': True, 'geomtype': True, 'orbit_size': True }) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([("deg", ASCENDING), ("group_num", ASCENDING), ("g", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} for key in ('label', 'group', 'deg', 'g', 'orbit_size'): v_clean[key] = v[key] v_clean['geomtype'] = geometry_types_dict[v['geomtype']] res_clean.append(v_clean) info["belyi_galmaps"] = res_clean info["belyi_galmap_url"] = lambda label: url_for_belyi_galmap_label(label) info["start"] = start info["count"] = count info["more"] = int(start + count < nres) title = info.get('title', 'Belyi map search results') credit = credit_string return render_template("belyi_search_results.html", info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)
def elliptic_curve_search(info): if info.get('download') == '1' and info.get('Submit') and info.get( 'query'): return download_search(info) if not 'query' in info: info['query'] = {} bread = info.get('bread', [('Elliptic Curves', url_for(".index")), ('Search Results', '.')]) if 'jump' in info: label = info.get('label', '').replace(" ", "") # This label should be a full isogeny class label or a full # curve label (including the field_label component) try: nf, cond_label, iso_label, number = split_full_label(label.strip()) except ValueError: info['err'] = '' return search_input_error(info, bread) return redirect( url_for(".show_ecnf", nf=nf, conductor_label=cond_label, class_label=iso_label, number=number), 301) query = {} if 'jinv' in info: if info.get('field', '').strip() == '2.2.5.1': info['jinv'] = info['jinv'].replace('phi', 'a') if info.get('field', '').strip() == '2.0.4.1': info['jinv'] = info['jinv'].replace('i', 'a') try: parse_ints(info, query, 'conductor_norm') parse_noop(info, query, 'conductor_label') parse_nf_string(info, query, 'field', name="base number field", qfield='field_label') parse_nf_elt(info, query, 'jinv', name='j-invariant') parse_ints(info, query, 'torsion', name='Torsion order', qfield='torsion_order') parse_bracketed_posints(info, query, 'torsion_structure', maxlength=2) if 'torsion_structure' in query and not 'torsion_order' in query: query['torsion_order'] = reduce( mul, [int(n) for n in query['torsion_structure']], 1) parse_ints(info, query, field='isodeg', qfield='isogeny_degrees') except (TypeError, ValueError): return search_input_error(info, bread) if query.get('jinv'): query['jinv'] = ','.join(query['jinv']) if query.get('field_label') == '1.1.1.1': return redirect(url_for("ec.rational_elliptic_curves", **request.args), 301) if 'include_isogenous' in info and info['include_isogenous'] == 'off': info['number'] = 1 query['number'] = 1 if 'include_base_change' in info and info['include_base_change'] == 'off': query['base_change'] = [] else: info['include_base_change'] = "on" if 'include_Q_curves' in info: if info['include_Q_curves'] == 'exclude': query['q_curve'] = False elif info['include_Q_curves'] == 'only': query['q_curve'] = True if 'include_cm' in info: if info['include_cm'] == 'exclude': query['cm'] = 0 elif info['include_cm'] == 'only': query['cm'] = {'$ne': 0} info['query'] = query count = parse_count(info, 50) start = parse_start(info) # make the query and trim results according to start/count: cursor = db_ecnf().find(query) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC), ('conductor_label', ASC), ('iso_nlabel', ASC), ('number', ASC)]).skip(start).limit(count) res = list(res) for e in res: e['numb'] = str(e['number']) e['field_knowl'] = nf_display_knowl(e['field_label'], getDBConnection(), field_pretty(e['field_label'])) info['curves'] = res # [ECNF(e) for e in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) info['field_pretty'] = field_pretty info['web_ainvs'] = web_ainvs if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres t = info.get('title', 'Elliptic Curve search results') return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
def search(**args): """ query processing for Sato-Tate groups -- returns rendered results page """ info = to_dict(args) if 'jump' in info: return redirect(url_for('.by_label', label=info['jump']), 301) if 'label' in info: return redirect(url_for('.by_label', label=info['label']), 301) template_kwds = { 'bread': [('Sato-Tate Groups', url_for('.index')), ('Search Results', '.')], 'credit': credit_string, 'learnmore': learnmore_list() } title = 'Sato-Tate Group Search Results' err_title = 'Sato-Tate Groups Search Input Error' count = parse_count(info, 25) start = parse_start(info) # if user clicked refine search always restart at 0 if 'refine' in info: start = 0 ratonly = True if info.get('rational_only', 'no').strip().lower() == 'yes' else False query = {'rational': True} if ratonly else {} try: parse_ints(info, query, 'weight', 'weight') if 'weight' in query: weight_list = parse_ints_to_list_flash(info.get('weight'), 'weight') parse_ints(info, query, 'degree', 'degree') if 'degree' in query: degree_list = parse_ints_to_list_flash(info.get('degree'), 'degree') if info.get('identity_component'): query['identity_component'] = info['identity_component'] parse_ints(info, query, 'components', 'components') if 'components' in query: components_list = parse_ints_to_list_flash(info.get('components'), 'components') parse_rational(info, query, 'trace_zero_density', 'trace zero density') except ValueError as err: info['err'] = str(err) return render_template('st_results.html', info=info, title=err_title, **template_kwds) # Check mu(n) groups first (these are not stored in the database) results = [] if (not 'weight' in query or 0 in weight_list) and \ (not 'degree' in query or 1 in degree_list) and \ (not 'identity_component' in query or query['identity_component'] == 'SO(1)') and \ (not 'trace_zero_density' in query or query['trace_zero_density'] == '0'): if not 'components' in query: components_list = xrange(1, 3 if ratonly else start + count + 1) elif ratonly: components_list = [n for n in range(1, 3) if n in components_list] nres = len( components_list) if 'components' in query or ratonly else INFINITY for n in itertools.islice(components_list, start, start + count): results.append(mu_info(n)) else: nres = 0 if 'result_count' in info: nres += db.gps_sato_tate.count(query) return jsonify({"nres": str(nres)}) # Now lookup other (rational) ST groups in database if nres != INFINITY: start2 = start - nres if start > nres else 0 proj = [ 'label', 'weight', 'degree', 'real_dimension', 'identity_component', 'name', 'pretty', 'components', 'component_group', 'trace_zero_density', 'moments' ] try: res = db.gps_sato_tate.search(query, proj, limit=max(count - len(results), 0), offset=start2, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error( 'The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) return render_template('st_results.html', info=info, title=err_title, **template_kwds) info['number'] += nres if start < info['number'] and len(results) < count: for v in res: v['identity_component'] = st0_pretty(v['identity_component']) v['component_group'] = sg_pretty(v['component_group']) v['trace_moments'] = trace_moments(v['moments']) results.append(v) else: info['number'] = 'infinity' info['start'] = start info['count'] = count info['st0_list'] = st0_list info['st0_dict'] = st0_dict info['results'] = results info['stgroup_url'] = lambda dbc: url_for('.by_label', label=dbc['label']) return render_template('st_results.html', info=info, title=title, **template_kwds)
def search(**args): """ query processing for Sato-Tate groups -- returns rendered results page """ info = to_dict(args) if 'jump' in info: return redirect(url_for('.by_label', label=info['jump']), 301) if 'label' in info: return redirect(url_for('.by_label', label=info['label']), 301) bread = [('Sato-Tate Groups', url_for('.index')),('Search Results', '.')] count = parse_count(info, 25) start = parse_start(info) # if user clicked refine search always restart at 0 if 'refine' in info: start = 0 ratonly = True if info.get('rational_only','no').strip().lower() == 'yes' else False query = {'rational':True} if ratonly else {} try: parse_ints(info,query,'weight','weight') if 'weight' in query: weight_list = parse_ints_to_list_flash(info.get('weight'),'weight') parse_ints(info,query,'degree','degree') if 'degree' in query: degree_list = parse_ints_to_list_flash(info.get('degree'),'degree') if info.get('identity_component'): query['identity_component'] = info['identity_component'] parse_ints(info,query,'components','components') if 'components' in query: components_list = parse_ints_to_list_flash(info.get('components'), 'components') parse_rational(info,query,'trace_zero_density','trace zero density') except ValueError as err: info['err'] = str(err) return render_template('st_results.html', info=info, title='Sato-Tate Groups Search Input Error', bread=bread, credit=credit_string) # Check mu(n) groups first (these are not stored in the database) results = [] if (not 'weight' in query or 0 in weight_list) and \ (not 'degree' in query or 1 in degree_list) and \ (not 'identity_component' in query or query['identity_component'] == 'SO(1)') and \ (not 'trace_zero_density' in query or query['trace_zero_density'] == '0'): if not 'components' in query: components_list = xrange(1,3 if ratonly else start+count+1) elif ratonly: components_list = [n for n in range(1,3) if n in components_list] nres = len(components_list) if 'components' in query or ratonly else INFINITY for n in itertools.islice(components_list,start,start+count): results.append(mu_info(n)) else: nres = 0 # Now lookup other (rational) ST groups in database if nres != INFINITY: cursor = st_groups().find(query) start2 = start - nres if start > nres else 0 nres += cursor.count() if start < nres and len(results) < count: res = cursor.sort([('weight',ASCENDING), ('degree', ASCENDING), ('real_dimension', ASCENDING), ('identity_component', ASCENDING), ('name', ASCENDING)]).skip(start2).limit(count-len(results)) for v in res: v_clean = {} v_clean['label'] = v['label'] v_clean['weight'] = v['weight'] v_clean['degree'] = v['degree'] v_clean['real_dimension'] = v['real_dimension'] v_clean['identity_component'] = st0_pretty(v['identity_component']) v_clean['name'] = v['name'] v_clean['pretty'] = v['pretty'] v_clean['components'] = v['components'] v_clean['component_group'] = sg_pretty(v['component_group']) v_clean['trace_zero_density'] = v['trace_zero_density'] v_clean['trace_moments'] = trace_moments(v['moments']) results.append(v_clean) if nres == 0: info['report'] = 'no matches' elif nres == 1: info['report'] = 'unique match' else: if nres == INFINITY or nres > count or start > 0: info['report'] = 'displaying matches %d-%d %s' % (start + 1, start + len(results), "of %d"%nres if nres != INFINITY else "") else: info['report'] = 'displaying all %s matches' % nres info['st0_list'] = st0_list info['st0_dict'] = st0_dict info['stgroups'] = results info['stgroup_url'] = lambda dbc: url_for('.by_label', label=dbc['label']) info['start'] = start info['count'] = count info['more'] = 1 if nres < 0 or nres > start+count else 0 title = 'Sato-Tate Group Search Results' return render_template('st_results.html', info=info, credit=credit_string,learnmore=learnmore_list(), bread=bread, title=title)
def elliptic_curve_search(info): if info.get('download') == '1' and info.get('Submit') and info.get('query'): return download_search(info) if 'SearchAgain' in info: return rational_elliptic_curves() query = {} bread = info.get('bread',[('Elliptic Curves', url_for("ecnf.index")), ('$\Q$', url_for(".rational_elliptic_curves")), ('Search Results', '.')]) if 'jump' in info: label = info.get('label', '').replace(" ", "") m = match_lmfdb_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif label.startswith("Cremona:"): label = label[8:] m = match_cremona_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif match_cremona_label(label): return elliptic_curve_jump_error(label, info, cremona_label=True) elif label: # Try to parse a string like [1,0,3,2,4] as valid # Weistrass coefficients: lab = re.sub(r'\s','',label) lab = re.sub(r'^\[','',lab) lab = re.sub(r']$','',lab) try: labvec = lab.split(',') labvec = [QQ(str(z)) for z in labvec] # Rationals allowed E = EllipticCurve(labvec).minimal_model() # Now we do have a valid curve over Q, but it might # not be in the database. data = db_ec().find_one({'xainvs': EC_ainvs(E)}) if data is None: info['conductor'] = E.conductor() return elliptic_curve_jump_error(label, info, missing_curve=True) return by_ec_label(data['lmfdb_label']) except (TypeError, ValueError, ArithmeticError): return elliptic_curve_jump_error(label, info) else: query['label'] = '' try: parse_rational(info,query,'jinv','j-invariant') parse_ints(info,query,'conductor') parse_ints(info,query,'torsion','torsion order') parse_ints(info,query,'rank') parse_ints(info,query,'sha','analytic order of Ш') parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing') # speed up slow torsion_structure searches by also setting torsion if 'torsion_structure' in query and not 'torsion' in query: query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1) if 'include_cm' in info: if info['include_cm'] == 'exclude': query['cm'] = 0 elif info['include_cm'] == 'only': query['cm'] = {'$ne' : 0} parse_ints(info,query,field='isodeg',qfield='isogeny_degrees') parse_primes(info, query, 'surj_primes', name='surjective primes', qfield='non-maximal_primes', mode='complement') if info.get('surj_quantifier') == 'exactly': mode = 'exact' else: mode = 'append' parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes', qfield='non-maximal_primes',mode=mode) except ValueError as err: info['err'] = str(err) return search_input_error(info, bread) count = parse_count(info,100) start = parse_start(info) if 'optimal' in info and info['optimal'] == 'on': # fails on 990h3 query['number'] = 1 info['query'] = query cursor = db_ec().find(query); cursor = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING), ('lmfdb_number', ASCENDING)]); # equivalent to # cursor = res # nres = res.count() # if(start >= nres): # start -= (1 + (start - nres) / count) * count # if(start < 0): # start = 0 # res = res.skip(start).limit(count) try: start, nres, res = search_cursor_timeout_decorator(cursor, start, count); except ValueError as err: info['err'] = err; return search_input_error(info, bread) info['curves'] = res info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number']) info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1]) info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 2: info['report'] = 'displaying both matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres t = info.get('title','Elliptic Curves search results') return render_template("ec-search-results.html", info=info, credit=ec_credit(), bread=bread, title=t)
def elliptic_curve_search(**args): info = to_dict(args['data']) if 'download' in info and info['download'] != 0: return download_search(info) bread = [('Elliptic Curves', url_for(".index")), ('Search Results', '.')] if 'jump' in info: label = info.get('label', '').replace(" ", "") # This label should be a full isogeny class label or a full # curve label (including the field_label component) try: nf, cond_label, iso_label, number = split_full_label(label.strip()) except ValueError: if not 'query' in info: info['query'] = {} info['err'] = '' return search_input_error(info, bread) return show_ecnf(nf, cond_label, iso_label, number) query = {} try: parse_ints(info,query,'conductor_norm') parse_noop(info,query,'conductor_label') parse_nf_string(info,query,'field',name="base number field",qfield='field_label') parse_nf_elt(info,query,'jinv',name='j-invariant') parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order') parse_bracketed_posints(info,query,'torsion_structure',maxlength=2) except ValueError: return search_input_error(info, bread) if 'include_isogenous' in info and info['include_isogenous'] == 'off': info['number'] = 1 query['number'] = 1 if 'include_base_change' in info and info['include_base_change'] == 'off': query['base_change'] = [] else: info['include_base_change'] = "on" info['query'] = query count = parse_count(info, 50) start = parse_start(info) # make the query and trim results according to start/count: cursor = db_ecnf().find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC), ('conductor_label', ASC), ('iso_nlabel', ASC), ('number', ASC)]).skip(start).limit(count) res = list(res) for e in res: e['numb'] = str(e['number']) e['field_knowl'] = nf_display_knowl(e['field_label'], getDBConnection(), field_pretty(e['field_label'])) info['curves'] = res # [ECNF(e) for e in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) info['field_pretty'] = field_pretty info['web_ainvs'] = web_ainvs if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres t = 'Elliptic Curve search results' return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
def genus2_curve_search(**args): info = to_dict(args['data']) if 'jump' in info: jump = info["jump"].strip() curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(jump): return redirect(url_for_curve_label(jump), 301) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61: c = g2cdb().isogeny_classes.find_one( {'Lhash': jump[1:].strip()}) if c: return redirect( url_for_isogeny_class_label(c["label"]), 301) else: errmsg = "Hash not found" else: errmsg = "Invalid label" flash(Markup(errmsg + " <span style='color:black'>%s</span>" % (jump)), "error") return redirect(url_for(".index")) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread', (('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info, query, 'abs_disc', 'absolute discriminant') parse_bool(info, query, 'is_gl2_type') parse_bool(info, query, 'has_square_sha') parse_bool(info, query, 'locally_solvable') parse_bool(info, query, 'is_simple_geom') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4, check_divisibility="increasing") parse_ints(info, query, 'cond') parse_ints(info, query, 'num_rat_wpts', 'Weierstrass points') parse_ints(info, query, 'torsion_order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul, [int(n) for n in query['torsion']], 1) parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank') parse_ints(info, query, 'analytic_rank', 'analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2inv'] = [info['g20'], info['g21'], info['g22']] if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) # Database query happens here cursor = g2cdb().curves.find( query, { '_id': int(0), 'label': int(1), 'min_eqn': int(1), 'st_group': int(1), 'is_gl2_type': int(1), 'analytic_rank': int(1) }) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_gl2_type_display"] = '✔' if v[ "is_gl2_type"] else '' # display checkmark if true, blank otherwise v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(v['st_group']) v_clean["st_group_href"] = st_group_href(v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start + count < nres) title = info.get('title', 'Genus 2 Curve search results') credit = credit_string return render_template("search_results_g2.html", info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)
def higher_genus_w_automorphisms_search(**args): info = to_dict(args) bread = get_bread([("Search results", '')]) C = base.getDBConnection() query = {} if 'jump_to' in info: labs = info['jump_to'] if label_is_one_passport(labs): return render_passport({'passport_label': labs}) elif label_is_one_family(labs): return render_family({'label': labs}) else: flash_error( "The label %s is not a legitimate label for this data.", labs) return redirect(url_for(".index")) #allow for ; in signature if info.get('signature'): info['signature'] = info['signature'].replace(';', ',') try: parse_gap_id(info, query, 'group', 'Group') parse_ints(info, query, 'genus', name='Genus') parse_bracketed_posints(info, query, 'signature', split=False, name='Signature', keepbrackets=True) if query.get('signature'): query['signature'] = info['signature'] = str( sort_sign(ast.literal_eval(query['signature']))).replace( ' ', '') parse_ints(info, query, 'dim', name='Dimension of the family') if 'inc_hyper' in info: if info['inc_hyper'] == 'exclude': query['hyperelliptic'] = False elif info['inc_hyper'] == 'only': query['hyperelliptic'] = True if 'inc_cyc_trig' in info: if info['inc_cyc_trig'] == 'exclude': query['cyclic_trigonal'] = False elif info['inc_cyc_trig'] == 'only': query['cyclic_trigonal'] = True if 'inc_full' in info: if info['inc_full'] == 'exclude': query['full_auto'] = {'$exists': True} elif info['inc_full'] == 'only': query['full_auto'] = {'$exists': False} query['cc.1'] = 1 except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) if 'groupsize' in info and info['groupsize'] != '': err, result = add_group_order_range(query, info['groupsize'], C) if err != None: flash_error( 'Parse error on group order field. <font face="Courier New"><br />Given: ' + err + '<br />-------' + result + '</font>') """ res = C.curve_automorphisms.passports.find(query).sort([( 'genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING), ('cc'[0],pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 L = [ ] for field in res: field['signature'] = ast.literal_eval(field['signature']) L.append(field) code = "" download_code = 'download' in info first_download_entry = True for field in L: field['signature'] = ast.literal_eval(field['signature']) if download_code: if first_download_entry: code += '\n'.join(hgcwa_code(label=field['passport_label'], download_type='magma').split('\n')[1:]) else: code += hgcwa_code(label=field['passport_label'], download_type='magma').split('result_record:=[];')[1] first_download_entry = False if 'download' in info: response = make_response(code) response.headers['Content-type'] = 'text/plain' return response """ res = C.curve_automorphisms.passports.find(query).sort([ ('genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING), ('cc'[0], pymongo.ASCENDING) ]) nres = res.count() res = res.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 L = [] for field in res: field['signature'] = ast.literal_eval(field['signature']) L.append(field) if 'download_magma' in info: code = "// MAGMA CODE FOR SEACH RESULTS\n\n" first_download_entry = True for field in L: #print field if first_download_entry: code += ('\n'.join( hgcwa_code(label=field['passport_label'], download_type='magma').split('\n')[1:] )).replace( ", and generate data which is the same for all entries", "") else: code += hgcwa_code( label=field['passport_label'], download_type='magma').split('result_record:=[];')[1] first_download_entry = False response = make_response(code) response.headers['Content-type'] = 'text/plain' return response elif 'download_gap' in info: code = "# GAP CODE FOR SEARCH RESULTS\n\n" first_download_entry = True for field in L: print field['group'] if first_download_entry: code += ('\n'.join( hgcwa_code(label=field['passport_label'], download_type='gap').split('\n') [1:])).replace( "# Generate data which is the same for all entries.\n", "") else: code += hgcwa_code( label=field['passport_label'], download_type='gap').split('result_record:=[];')[1] first_download_entry = False response = make_response(code) response.headers['Content-type'] = 'text/plain' return response info['fields'] = L info['number'] = nres info['group_display'] = sg_pretty info['show_downloads'] = len(L) > 0 info['sign_display'] = sign_display info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template( "hgcwa-search.html", info=info, title= "Families of Higher Genus Curves with Automorphisms Search Result", credit=credit, bread=bread)
def elliptic_curve_search(**args): info = to_dict(args) if "download" in info and info["download"] != "0": return download_search(info) query = {} bread = [ ("Elliptic Curves", url_for("ecnf.index")), ("$\Q$", url_for(".rational_elliptic_curves")), ("Search Results", "."), ] if "SearchAgain" in args: return rational_elliptic_curves() if "jump" in args: label = info.get("label", "").replace(" ", "") m = match_lmfdb_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif label.startswith("Cremona:"): label = label[8:] m = match_cremona_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif match_cremona_label(label): return elliptic_curve_jump_error(label, info, cremona_label=True) elif label: # Try to parse a string like [1,0,3,2,4] as valid # Weistrass coefficients: lab = re.sub(r"\s", "", label) lab = re.sub(r"^\[", "", lab) lab = re.sub(r"]$", "", lab) try: labvec = lab.split(",") labvec = [QQ(str(z)) for z in labvec] # Rationals allowed E = EllipticCurve(labvec) # Now we do have a valid curve over Q, but it might # not be in the database. ainvs = [str(c) for c in E.minimal_model().ainvs()] data = db_ec().find_one({"ainvs": ainvs}) if data is None: info["conductor"] = E.conductor() return elliptic_curve_jump_error(label, info, missing_curve=True) return by_ec_label(data["lmfdb_label"]) except (TypeError, ValueError, ArithmeticError): return elliptic_curve_jump_error(label, info) else: query["label"] = "" try: parse_rational(info, query, "jinv", "j-invariant") parse_ints(info, query, "conductor") parse_ints(info, query, "torsion", "torsion order") parse_ints(info, query, "rank") parse_ints(info, query, "sha", "analytic order of Ш") parse_bracketed_posints( info, query, "torsion_structure", maxlength=2, process=str, check_divisibility="increasing" ) parse_primes( info, query, "surj_primes", name="surjective primes", qfield="non-surjective_primes", mode="complement" ) if info.get("surj_quantifier") == "exactly": mode = "exact" else: mode = "append" parse_primes( info, query, "nonsurj_primes", name="non-surjective primes", qfield="non-surjective_primes", mode=mode ) except ValueError as err: info["err"] = str(err) return search_input_error(info, bread) count = parse_count(info, 100) start = parse_start(info) if "optimal" in info and info["optimal"] == "on": # fails on 990h3 query["number"] = 1 info["query"] = query cursor = db_ec().find(query) nres = cursor.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 res = ( cursor.sort([("conductor", ASCENDING), ("iso_nlabel", ASCENDING), ("lmfdb_number", ASCENDING)]) .skip(start) .limit(count) ) info["curves"] = res info["format_ainvs"] = format_ainvs info["curve_url"] = lambda dbc: url_for( ".by_triple_label", conductor=dbc["conductor"], iso_label=split_lmfdb_label(dbc["lmfdb_iso"])[1], number=dbc["lmfdb_number"], ) info["iso_url"] = lambda dbc: url_for( ".by_double_iso_label", conductor=dbc["conductor"], iso_label=split_lmfdb_label(dbc["lmfdb_iso"])[1] ) info["number"] = nres info["start"] = start info["count"] = count info["more"] = int(start + count < nres) if nres == 1: info["report"] = "unique match" elif nres == 2: info["report"] = "displaying both matches" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres credit = "John Cremona" if "non-surjective_primes" in query: credit += "and Andrew Sutherland" t = "Elliptic Curves search results" return render_template("search_results.html", info=info, credit=credit, bread=bread, title=t)
def genus2_curve_search(info): if 'jump' in info: jump = info["jump"].strip() if re.match(r'^\d+\.[a-z]+\.\d+\.\d+$', jump): return redirect(url_for_curve_label(jump), 301) else: if re.match(r'^\d+\.[a-z]+$', jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input if re.match(r'^\#\d+$', jump) and ZZ(jump[1:]) < 2**61: c = g2c_db_curves().find_one({'Lhash': jump[1:].strip()}) if c: return redirect( url_for_isogeny_class_label(c["class"]), 301) else: errmsg = "hash %s not found" else: errmsg = "%s is not a valid genus 2 curve or isogeny class label" flash_error(errmsg, jump) return redirect(url_for(".index")) if info.get('download', '').strip() == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread', (('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info, query, 'abs_disc', 'absolute discriminant') parse_bool(info, query, 'is_gl2_type', 'is of GL2-type') parse_bool(info, query, 'has_square_sha', 'has square Sha') parse_bool(info, query, 'locally_solvable', 'is locally solvable') parse_bool(info, query, 'is_simple_geom', 'is geometrically simple') parse_ints(info, query, 'cond', 'conductor') parse_ints(info, query, 'num_rat_wpts', 'rational Weierstrass points') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4, check_divisibility="increasing") parse_ints(info, query, 'torsion_order', 'torsion order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul, [int(n) for n in query['torsion']], 1) if 'torsion' in query: query['torsion_subgroup'] = str(query['torsion']).replace(" ", "") query.pop('torsion') # search using string key, not array of ints parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank') parse_ints(info, query, 'analytic_rank', 'analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2_inv'] = "['%s','%s','%s']" % (info['g20'], info['g21'], info['g22']) if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("g2c_search_results.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) # Database query happens here info["query"] = query # save query for reuse in download_search cursor = g2c_db_curves().find( query, { '_id': False, 'label': True, 'eqn': True, 'st_group': True, 'is_gl2_type': True, 'is_simple_geom': True, 'analytic_rank': True }) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_simple_geom"] = v["is_simple_geom"] v_clean["equation_formatted"] = list_to_min_eqn(literal_eval(v["eqn"])) v_clean["st_group_link"] = st_link_by_name(1, 4, v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start + count < nres) title = info.get('title', 'Genus 2 Curve search results') credit = credit_string return render_template("g2c_search_results.html", info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)
def search(**args): """ query processing for Sato-Tate groups -- returns rendered results page """ info = to_dict(args) if 'jump' in info: return redirect(url_for('.by_label', label=info['jump']), 301) if 'label' in info: return redirect(url_for('.by_label', label=info['label']), 301) template_kwds = {'bread':[('Sato-Tate Groups', url_for('.index')),('Search Results', '.')], 'credit':credit_string, 'learnmore':learnmore_list()} title = 'Sato-Tate Group Search Results' err_title = 'Sato-Tate Groups Search Input Error' count = parse_count(info, 25) start = parse_start(info) # if user clicked refine search always restart at 0 if 'refine' in info: start = 0 ratonly = True if info.get('rational_only','no').strip().lower() == 'yes' else False query = {'rational':True} if ratonly else {} try: parse_ints(info,query,'weight','weight') if 'weight' in query: weight_list = parse_ints_to_list_flash(info.get('weight'),'weight') parse_ints(info,query,'degree','degree') if 'degree' in query: degree_list = parse_ints_to_list_flash(info.get('degree'),'degree') if info.get('identity_component'): query['identity_component'] = info['identity_component'] parse_ints(info,query,'components','components') if 'components' in query: components_list = parse_ints_to_list_flash(info.get('components'), 'components') parse_rational(info,query,'trace_zero_density','trace zero density') except ValueError as err: info['err'] = str(err) return render_template('st_results.html', info=info, title=err_title, **template_kwds) # Check mu(n) groups first (these are not stored in the database) results = [] if (not 'weight' in query or 0 in weight_list) and \ (not 'degree' in query or 1 in degree_list) and \ (not 'identity_component' in query or query['identity_component'] == 'SO(1)') and \ (not 'trace_zero_density' in query or query['trace_zero_density'] == '0'): if not 'components' in query: components_list = xrange(1,3 if ratonly else start+count+1) elif ratonly: components_list = [n for n in range(1,3) if n in components_list] nres = len(components_list) if 'components' in query or ratonly else INFINITY for n in itertools.islice(components_list,start,start+count): results.append(mu_info(n)) else: nres = 0 if 'result_count' in info: nres += db.gps_sato_tate.count(query) return jsonify({"nres":str(nres)}) # Now lookup other (rational) ST groups in database if nres != INFINITY: start2 = start - nres if start > nres else 0 proj = ['label','weight','degree','real_dimension','identity_component','name','pretty','components','component_group','trace_zero_density','moments'] try: res = db.gps_sato_tate.search(query, proj, limit=max(count - len(results), 0), offset=start2, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error('The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) return render_template('st_results.html', info=info, title=err_title, **template_kwds) info['number'] += nres if start < info['number'] and len(results) < count: for v in res: v['identity_component'] = st0_pretty(v['identity_component']) v['component_group'] = sg_pretty(v['component_group']) v['trace_moments'] = trace_moments(v['moments']) results.append(v) else: info['number'] = 'infinity' info['start'] = start info['count'] = count info['st0_list'] = st0_list info['st0_dict'] = st0_dict info['results'] = results info['stgroup_url'] = lambda dbc: url_for('.by_label', label=dbc['label']) return render_template('st_results.html', info=info, title=title, **template_kwds)
def __call__(self, info, random=False): # If random is True, returns a random label info = to_dict(info, exclude=['bread' ]) # I'm not sure why this is required... for key, func in self.shortcuts.items(): if info.get(key, '').strip(): return func(info) query = {} template_kwds = {} for key in self.kwds: template_kwds[key] = info.get(key, self.kwds[key]()) try: errpage = self.f(info, query) except ValueError as err: # Errors raised in parsing info['err'] = str(err) err_title = query.pop('__err_title__', self.err_title) return render_template(self.template, info=info, title=err_title, **template_kwds) else: err_title = query.pop('__err_title__', self.err_title) if errpage is not None: return errpage if 'result_count' in info: nres = self.table.count(query) return jsonify({"nres": str(nres)}) sort = query.pop('__sort__', None) table = query.pop('__table__', self.table) # We want to pop __title__ even if overridden by info. title = query.pop('__title__', self.title) title = info.get('title', title) template = query.pop('__template__', self.template) if random: query.pop('__projection__', None) proj = query.pop('__projection__', self.projection) count = parse_count(info, self.per_page) start = parse_start(info) try: if random: # Ignore __projection__: it's intended for searches label = table.random(query, projection=0) if label is None: res = [] # ugh; we have to set these manually info['query'] = dict(query) info['number'] = 0 info['count'] = count info['start'] = start info['exact_count'] = True else: return redirect(self.url_for_label(label), 307) else: res = table.search(query, proj, limit=count, offset=start, sort=sort, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error( 'The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) info['query'] = dict(query) return render_template(self.template, info=info, title=self.err_title, **template_kwds) else: try: if self.cleaners: for v in res: for name, func in self.cleaners.items(): v[name] = func(v) if self.postprocess is not None: res = self.postprocess(res, info, query) except ValueError as err: # Errors raised in postprocessing flash_error(str(err)) info['err'] = str(err) return render_template(self.template, info=info, title=err_title, **template_kwds) for key, func in self.longcuts.items(): if info.get(key, '').strip(): return func(res, info, query) info['results'] = res return render_template(template, info=info, title=title, **template_kwds)
def __call__(self, info, random=False): # If random is True, returns a random label info = to_dict(info, exclude =['bread']) # I'm not sure why this is required... for key, func in self.shortcuts.items(): if info.get(key,'').strip(): return func(info) query = {} template_kwds = {} for key in self.kwds: template_kwds[key] = info.get(key, self.kwds[key]()) try: errpage = self.f(info, query) except ValueError as err: # Errors raised in parsing info['err'] = str(err) err_title = query.pop('__err_title__', self.err_title) return render_template(self.template, info=info, title=err_title, **template_kwds) else: err_title = query.pop('__err_title__', self.err_title) if errpage is not None: return errpage if 'result_count' in info: nres = self.table.count(query) return jsonify({"nres":str(nres)}) sort = query.pop('__sort__', None) table = query.pop('__table__', self.table) # We want to pop __title__ even if overridden by info. title = query.pop('__title__', self.title) title = info.get('title', title) template = query.pop('__template__', self.template) if random: query.pop('__projection__', None) proj = query.pop('__projection__', self.projection) count = parse_count(info, self.per_page) start = parse_start(info) try: if random: # Ignore __projection__: it's intended for searches label = table.random(query, projection=0) if label is None: res = [] # ugh; we have to set these manually info['query'] = dict(query) info['number'] = 0 info['count'] = count info['start'] = start info['exact_count'] = True else: return redirect(self.url_for_label(label), 307) else: res = table.search(query, proj, limit=count, offset=start, sort=sort, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error('The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) info['query'] = dict(query) return render_template(self.template, info=info, title=self.err_title, **template_kwds) else: try: if self.cleaners: for v in res: for name, func in self.cleaners.items(): v[name] = func(v) if self.postprocess is not None: res = self.postprocess(res, info, query) except ValueError as err: # Errors raised in postprocessing flash_error(str(err)) info['err'] = str(err) return render_template(self.template, info=info, title=err_title, **template_kwds) for key, func in self.longcuts.items(): if info.get(key,'').strip(): return func(res, info, query) info['results'] = res return render_template(template, info=info, title=title, **template_kwds)
def artin_representation_search(**args): info = to_dict(args) if 'natural' in info: label = info['natural'] # test if it is ok try: label = parse_artin_label(label) except ValueError as err: flash(Markup("Error: %s" % (err)), "error") bread = get_bread([('Search results', '')]) return search_input_error({'err': ''}, bread) return render_artin_representation_webpage(label) title = 'Artin representation search results' bread = [('Artin representation', url_for(".index")), ('Search results', ' ')] sign_code = 0 query = {'Hide': 0} try: parse_primes(info, query, "unramified", name="Unramified primes", qfield="BadPrimes", mode="complement", to_string=True) parse_primes(info, query, "ramified", name="Ramified primes", qfield="BadPrimes", mode="append", to_string=True) parse_restricted(info, query, "root_number", qfield="GaloisConjugates.Sign", allowed=[1, -1], process=int) parse_restricted(info, query, "frobenius_schur_indicator", qfield="Indicator", allowed=[1, 0, -1], process=int) parse_galgrp(info, query, "group", name="Group", qfield="Galois_nt", use_bson=False) parse_ints(info, query, 'dimension', qfield='Dim') parse_ints(info, query, 'conductor', qfield='Conductor_key', parse_singleton=make_cond_key) #parse_paired_fields(info,query,field1='conductor',qfield1='Conductor_key',parse1=parse_ints,kwds1={'parse_singleton':make_cond_key}, #field2='dimension',qfield2='Dim', parse2=parse_ints) except ValueError: return search_input_error(info, bread) count = parse_count(info, 10) start = parse_start(info) data = ArtinRepresentation.collection().find(query).sort([("Dim", ASC), ("Conductor_key", ASC)]) nres = data.count() data = data.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 if nres == 1: report = 'unique match' else: if nres > count or start != 0: report = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: report = 'displaying all %s matches' % nres if nres == 0: report = 'no matches' initfunc = ArtinRepresentation return render_template("artin-representation-search.html", req=info, data=data, title=title, bread=bread, query=query, start=start, report=report, nres=nres, initfunc=initfunc, sign_code=sign_code)
def genus2_curve_search(**args): info = to_dict(args['data']) if 'jump' in info: jump = info["jump"].strip() curve_label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+$') if curve_label_regex.match(jump): return redirect(url_for_curve_label(jump), 301) else: class_label_regex = re.compile(r'\d+\.[a-z]+$') if class_label_regex.match(jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input class_label_regex = re.compile(r'#\d+$') if class_label_regex.match(jump) and ZZ(jump[1:]) < 2**61: c = g2cdb().isogeny_classes.find_one({'Lhash': jump[1:].strip()}) if c: return redirect(url_for_isogeny_class_label(c["label"]), 301) else: errmsg = "Hash not found" else: errmsg = "Invalid label" flash(Markup(errmsg + " <span style='color:black'>%s</span>"%(jump)),"error") return redirect(url_for(".index")) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread',(('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type') parse_bool(info,query,'has_square_sha') parse_bool(info,query,'locally_solvable') parse_bool(info,query,'is_simple_geom') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'cond') parse_ints(info,query,'num_rat_wpts','Weierstrass points') parse_ints(info,query,'torsion_order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1) parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2inv'] = [ info['g20'], info['g21'], info['g22'] ] if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) # Database query happens here cursor = g2cdb().curves.find(query,{'_id':int(0),'label':int(1),'min_eqn':int(1),'st_group':int(1),'is_gl2_type':int(1),'analytic_rank':int(1)}) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_gl2_type_display"] = '✔' if v["is_gl2_type"] else '' # display checkmark if true, blank otherwise v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(v['st_group']) v_clean["st_group_href"] = st_group_href(v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) title = info.get('title','Genus 2 Curve search results') credit = credit_string return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
def elliptic_curve_search(**args): info = to_dict(args["data"]) if "download" in info and info["download"] != 0: return download_search(info) if not "query" in info: info["query"] = {} bread = [("Elliptic Curves", url_for(".index")), ("Search Results", ".")] if "jump" in info: label = info.get("label", "").replace(" ", "") # This label should be a full isogeny class label or a full # curve label (including the field_label component) try: nf, cond_label, iso_label, number = split_full_label(label.strip()) except ValueError: info["err"] = "" return search_input_error(info, bread) return show_ecnf(nf, cond_label, iso_label, number) query = {} try: parse_ints(info, query, "conductor_norm") parse_noop(info, query, "conductor_label") parse_nf_string(info, query, "field", name="base number field", qfield="field_label") parse_nf_elt(info, query, "jinv", name="j-invariant") parse_ints(info, query, "torsion", name="Torsion order", qfield="torsion_order") parse_bracketed_posints(info, query, "torsion_structure", maxlength=2) if "torsion_structure" in query and not "torsion_order" in query: query["torsion_order"] = reduce(mul, [int(n) for n in query["torsion_structure"]], 1) except ValueError: return search_input_error(info, bread) if "include_isogenous" in info and info["include_isogenous"] == "off": info["number"] = 1 query["number"] = 1 if "include_base_change" in info and info["include_base_change"] == "off": query["base_change"] = [] else: info["include_base_change"] = "on" if "include_Q_curves" in info: if info["include_Q_curves"] == "exclude": query["q_curve"] = False elif info["include_Q_curves"] == "only": query["q_curve"] = True if "include_cm" in info: if info["include_cm"] == "exclude": query["cm"] = 0 elif info["include_cm"] == "only": query["cm"] = {"$ne": 0} info["query"] = query count = parse_count(info, 50) start = parse_start(info) # make the query and trim results according to start/count: cursor = db_ecnf().find(query) nres = cursor.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 res = ( cursor.sort( [ ("field_label", ASC), ("conductor_norm", ASC), ("conductor_label", ASC), ("iso_nlabel", ASC), ("number", ASC), ] ) .skip(start) .limit(count) ) res = list(res) for e in res: e["numb"] = str(e["number"]) e["field_knowl"] = nf_display_knowl(e["field_label"], getDBConnection(), field_pretty(e["field_label"])) info["curves"] = res # [ECNF(e) for e in res] info["number"] = nres info["start"] = start info["count"] = count info["more"] = int(start + count < nres) info["field_pretty"] = field_pretty info["web_ainvs"] = web_ainvs if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres t = "Elliptic Curve search results" return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
def number_field_search(info): info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))] t = 'Global Number Field search results' bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search Results', ' ')] if 'natural' in info: query = {'label_orig': info['natural']} try: parse_nf_string(info,query,'natural',name="Label",qfield='label') return redirect(url_for(".by_label", label=query['label'])) except ValueError: query['err'] = info['err'] return search_input_error(query, bread) if 'algebra' in info: fields=info['algebra'].split('_') fields2=[WebNumberField.from_coeffs(a) for a in fields] for j in range(len(fields)): if fields2[j] is None: fields2[j] = WebNumberField.fakenf(fields[j]) t = 'Number field algebra' info = {} info = {'fields': fields2} return render_template("number_field_algebra.html", info=info, title=t, bread=bread) query = {} try: parse_galgrp(info,query, qfield='galois') parse_ints(info,query,'degree') parse_bracketed_posints(info,query,'signature',split=False,exactlength=2) parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key) parse_ints(info,query,'class_number') parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing') parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True) # modes are now contained (in), exactly, include if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include': mode = 'append' parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True) elif 'ram_quantifier' in info and str(info['ram_quantifier']) == 'contained': parse_primes(info,query,'ram_primes','ramified primes','ramps_all','subsets',to_string=False) pass # build list else: mode = 'liststring' parse_primes(info,query,'ram_primes','ramified primes','ramps_all',mode) except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) # nf_logger.debug(query) info['query'] = dict(query) if 'lucky' in info: one = nfdb().find_one(query) if one: label = one['label'] return redirect(url_for(".by_label", label=clean_input(label))) fields = nfdb() res = fields.find(query) res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)]) if 'download' in info and info['download'] != '0': return download_search(info, res) # equivalent to # nres = res.count() #if(start >= nres): # start -= (1 + (start - nres) / count) * count #if(start < 0): # start = 0 # res = res.skip(start).limit(count) try: start, nres, res = search_cursor_timeout_decorator(res, start, count); except ValueError as err: info['err'] = err; return search_input_error(info, bread) info['fields'] = res info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres info['wnf'] = WebNumberField.from_data return render_template("number_field_search.html", info=info, title=t, bread=bread)
def higher_genus_w_automorphisms_search(**args): info = to_dict(args) bread = get_bread([("Search results", '')]) C = base.getDBConnection() query = {} if 'jump_to' in info: labs = info['jump_to'] if label_is_one_passport(labs): return render_passport({'passport_label': labs}) elif label_is_one_family(labs): return render_family({'label': labs}) else: flash_error( "The label %s is not a legitimate label for this data.", labs) return redirect(url_for(".index")) #allow for ; in signature if info.get('signature'): info['signature'] = info['signature'].replace(';', ',') try: parse_gap_id(info, query, 'group', 'Group') parse_ints(info, query, 'genus', name='Genus') parse_bracketed_posints(info, query, 'signature', split=False, name='Signature', keepbrackets=True) if query.get('signature'): query['signature'] = info['signature'] = str( sort_sign(ast.literal_eval(query['signature']))).replace( ' ', '') parse_ints(info, query, 'dim', name='Dimension of the family') if 'inc_hyper' in info: if info['inc_hyper'] == 'exclude': query['hyperelliptic'] = False elif info['inc_hyper'] == 'only': query['hyperelliptic'] = True if 'inc_cyc_trig' in info: if info['inc_cyc_trig'] == 'exclude': query['cyclic_trigonal'] = False elif info['inc_cyc_trig'] == 'only': query['cyclic_trigonal'] = True if 'inc_full' in info: if info['inc_full'] == 'exclude': query['full_auto'] = {'$exists': True} elif info['inc_full'] == 'only': query['full_auto'] = {'$exists': False} query['cc.1'] = 1 except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) res = C.curve_automorphisms.passports.find(query).sort([ ('genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING), ('cc'[0], pymongo.ASCENDING) ]) nres = res.count() res = res.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 L = [] for field in res: field['signature'] = ast.literal_eval(field['signature']) L.append(field) info['fields'] = L info['number'] = nres info['group_display'] = sg_pretty info['sign_display'] = sign_display info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template( "hgcwa-search.html", info=info, title= "Families of Higher Genus Curves with Automorphisms Search Result", bread=bread)
def number_field_search(**args): info = to_dict(args) info["learnmore"] = [ ("Global number field labels", url_for(".render_labels_page")), ("Galois group labels", url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ("Quadratic imaginary class groups", url_for(".render_class_group_data")), ] t = "Global Number Field search results" bread = [("Global Number Fields", url_for(".number_field_render_webpage")), ("Search results", " ")] # for k in info.keys(): # nf_logger.debug(str(k) + ' ---> ' + str(info[k])) # nf_logger.debug('******************* '+ str(info['search'])) if "natural" in info: query = {"label_orig": info["natural"]} try: parse_nf_string(info, query, "natural", name="Label", qfield="label") return redirect(url_for(".by_label", label=clean_input(query["label"]))) except ValueError: query["err"] = info["err"] return search_input_error(query, bread) query = {} try: parse_galgrp(info, query, qfield="galois") parse_ints(info, query, "degree") parse_bracketed_posints(info, query, "signature", split=False, exactlength=2) parse_signed_ints(info, query, "discriminant", qfield=("disc_sign", "disc_abs_key"), parse_one=make_disc_key) parse_ints(info, query, "class_number") parse_bracketed_posints(info, query, "class_group", split=False, check_divisibility="increasing") parse_primes( info, query, "ur_primes", name="Unramified primes", qfield="ramps", mode="complement", to_string=True ) if "ram_quantifier" in info and str(info["ram_quantifier"]) == "some": mode = "append" else: mode = "exact" parse_primes(info, query, "ram_primes", "ramified primes", "ramps", mode, to_string=True) except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) if info.get("paging"): try: paging = int(info["paging"]) if paging == 0: start = 0 except: pass C = base.getDBConnection() # nf_logger.debug(query) info["query"] = dict(query) if "lucky" in args: one = C.numberfields.fields.find_one(query) if one: label = one["label"] return redirect(url_for(".by_label", clean_input(label))) fields = C.numberfields.fields res = fields.find(query) if "download" in info and info["download"] != "0": return download_search(info, res) res = res.sort([("degree", ASC), ("disc_abs_key", ASC), ("disc_sign", ASC)]) nres = res.count() res = res.skip(start).limit(count) if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 info["fields"] = res info["number"] = nres info["start"] = start if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres info["wnf"] = WebNumberField.from_data return render_template("number_field_search.html", info=info, title=t, bread=bread)
def elliptic_curve_search(**args): info = to_dict(args) if 'download' in info and info['download'] != '0': return download_search(info) query = {} bread = [('Elliptic Curves', url_for("ecnf.index")), ('$\Q$', url_for(".rational_elliptic_curves")), ('Search Results', '.')] if 'SearchAgain' in args: return rational_elliptic_curves() if 'jump' in args: label = info.get('label', '').replace(" ", "") m = match_lmfdb_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif label.startswith("Cremona:"): label = label[8:] m = match_cremona_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif match_cremona_label(label): return elliptic_curve_jump_error(label, info, cremona_label=True) elif label: # Try to parse a string like [1,0,3,2,4] as valid # Weistrass coefficients: lab = re.sub(r'\s','',label) lab = re.sub(r'^\[','',lab) lab = re.sub(r']$','',lab) try: labvec = lab.split(',') labvec = [QQ(str(z)) for z in labvec] # Rationals allowed E = EllipticCurve(labvec) # Now we do have a valid curve over Q, but it might # not be in the database. ainvs = [str(c) for c in E.minimal_model().ainvs()] data = db_ec().find_one({'ainvs': ainvs}) if data is None: info['conductor'] = E.conductor() return elliptic_curve_jump_error(label, info, missing_curve=True) return by_ec_label(data['lmfdb_label']) except (TypeError, ValueError, ArithmeticError): return elliptic_curve_jump_error(label, info) else: query['label'] = '' try: parse_rational(info,query,'jinv','j-invariant') parse_ints(info,query,'conductor') parse_ints(info,query,'torsion','torsion order') parse_ints(info,query,'rank') parse_ints(info,query,'sha','analytic order of Ш') parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing') if 'include_cm' in info: if info['include_cm'] == 'exclude': query['cm'] = 0 elif info['include_cm'] == 'only': query['cm'] = {'$ne' : 0} parse_primes(info, query, 'surj_primes', name='surjective primes', qfield='non-surjective_primes', mode='complement') if info.get('surj_quantifier') == 'exactly': mode = 'exact' else: mode = 'append' parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes', qfield='non-surjective_primes',mode=mode) except ValueError as err: info['err'] = str(err) return search_input_error(info, bread) count = parse_count(info,100) start = parse_start(info) if 'optimal' in info and info['optimal'] == 'on': # fails on 990h3 query['number'] = 1 info['query'] = query cursor = db_ec().find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING), ('lmfdb_number', ASCENDING)]).skip(start).limit(count) info['curves'] = res info['format_ainvs'] = format_ainvs info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number']) info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1]) info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 2: info['report'] = 'displaying both matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres credit = 'John Cremona' if 'non-surjective_primes' in query: credit += 'and Andrew Sutherland' t = 'Elliptic Curves search results' return render_template("search_results.html", info=info, credit=credit, bread=bread, title=t)
def genus2_curve_search(**args): info = to_dict(args) if 'download' in info and info['download'] == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict query = {} # database callable bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')] #if 'SearchAgain' in args: # return rational_genus2_curves() if 'jump' in args: label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+') if label_regex.match(info["jump"].strip()): data = render_curve_webpage_by_label(info["jump"].strip()) else: data = "Invalid label" if data == "Invalid label": flash( Markup( "The label <span style='color:black'>%s</span> is invalid." % (info["jump"])), "error") return redirect(url_for(".index")) if data == "Data for curve not found": flash( Markup( "No genus 2 curve with label <span style='color:black'>%s</span> was found in the database." % (info["jump"])), "error") return redirect(url_for(".index")) return data try: parse_ints(info, query, 'abs_disc', 'absolute discriminant') parse_bool(info, query, 'is_gl2_type') parse_bool(info, query, 'has_square_sha') parse_bool(info, query, 'locally_solvable') for fld in ('st_group', 'real_geom_end_alg'): if info.get(fld): query[fld] = info[fld] for fld in ('aut_grp', 'geom_aut_grp'): parse_bracketed_posints(info, query, fld, exactlength=2) #Encoded into a GAP ID. # igusa and igusa_clebsch invariants not currently searchable parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4, check_divisibility="increasing") parse_ints(info, query, 'cond', 'conductor') parse_ints(info, query, 'num_rat_wpts', 'Weierstrass points') parse_ints(info, query, 'torsion_order') parse_ints(info, query, 'two_selmer_rank', '2-Selmer rank') parse_ints(info, query, 'analytic_rank', 'analytic rank') except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) count = parse_count(info, 50) start = parse_start(info) cursor = db_g2c().curves.find(query) nres = cursor.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 res = cursor.sort([("cond", pymongo.ASCENDING), ("class", pymongo.ASCENDING), ("disc_key", pymongo.ASCENDING), ("label", pymongo.ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["isog_label"] = v["class"] isogeny_class = db_g2c().isogeny_classes.find_one( {'label': isog_label(v["label"])}) v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"] if isogeny_class["is_gl2_type"] == True: v_clean["is_gl2_type_display"] = '✔' #checkmark else: v_clean["is_gl2_type_display"] = '' v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(isogeny_class['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda dbc: url_for_label(dbc['label']) info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label']) info["start"] = start info["count"] = count info["more"] = int(start + count < nres) credit = credit_string title = 'Genus 2 Curves search results' return render_template("search_results_g2.html", info=info, credit=credit, learnmore=learnmore_list(), bread=bread, title=title)
def genus2_curve_search(info): if 'jump' in info: jump = info["jump"].strip() if re.match(r'^\d+\.[a-z]+\.\d+\.\d+$',jump): return redirect(url_for_curve_label(jump), 301) else: if re.match(r'^\d+\.[a-z]+$', jump): return redirect(url_for_isogeny_class_label(jump), 301) else: # Handle direct Lhash input if re.match(r'^\#\d+$',jump) and ZZ(jump[1:]) < 2**61: c = g2c_db_curves().find_one({'Lhash': jump[1:].strip()}) if c: return redirect(url_for_isogeny_class_label(c["class"]), 301) else: errmsg = "hash %s not found" else: errmsg = "%s is not a valid genus 2 curve or isogeny class label" flash_error (errmsg, jump) return redirect(url_for(".index")) if info.get('download','').strip() == '1': return download_search(info) info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict bread = info.get('bread',(('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.'))) query = {} try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type','is of GL2-type') parse_bool(info,query,'has_square_sha','has square Sha') parse_bool(info,query,'locally_solvable','is locally solvable') parse_bool(info,query,'is_simple_geom','is geometrically simple') parse_ints(info,query,'cond','conductor') parse_ints(info,query,'num_rat_wpts','rational Weierstrass points') parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'torsion_order','torsion order') if 'torsion' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion']],1) if 'torsion' in query: query['torsion_subgroup'] = str(query['torsion']).replace(" ","") query.pop('torsion') # search using string key, not array of ints parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') # G2 invariants and drop-list items don't require parsing -- they are all strings (supplied by us, not the user) if 'g20' in info and 'g21' in info and 'g22' in info: query['g2_inv'] = "['%s','%s','%s']"%(info['g20'], info['g21'], info['g22']) if 'class' in info: query['class'] = info['class'] for fld in ('st_group', 'real_geom_end_alg', 'aut_grp_id', 'geom_aut_grp_id'): if info.get(fld): query[fld] = info[fld] except ValueError as err: info['err'] = str(err) return render_template("g2c_search_results.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) # Database query happens here info["query"] = query # save query for reuse in download_search cursor = g2c_db_curves().find(query, {'_id':False, 'label':True, 'eqn':True, 'st_group':True, 'is_gl2_type':True, 'is_simple_geom':True, 'analytic_rank':True}) count = parse_count(info, 50) start = parse_start(info) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", ASCENDING), ("class", ASCENDING), ("disc_key", ASCENDING), ("label", ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["class"] = class_from_curve_label(v["label"]) v_clean["is_gl2_type"] = v["is_gl2_type"] v_clean["is_simple_geom"] = v["is_simple_geom"] v_clean["equation_formatted"] = list_to_min_eqn(literal_eval(v["eqn"])) v_clean["st_group_link"] = st_link_by_name(1,4,v['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda label: url_for_curve_label(label) info["class_url"] = lambda label: url_for_isogeny_class_label(label) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) title = info.get('title','Genus 2 Curve search results') credit = credit_string return render_template("g2c_search_results.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title)
def bmf_field_dim_table(**args): argsdict = to_dict(args) argsdict.update(to_dict(request.args)) gl_or_sl = argsdict['gl_or_sl'] field_label = argsdict['field_label'] field_label = nf_string_to_label(field_label) start = parse_start(argsdict) info = {} info['gl_or_sl'] = gl_or_sl # level_flag controls whether to list all levels ('all'), only # those with positive cuspidal dimension ('cusp'), or only those # with positive new dimension ('new'). Default is 'cusp'. level_flag = argsdict.get('level_flag', 'cusp') info['level_flag'] = level_flag count = parse_count(argsdict, 50) pretty_field_label = field_pretty(field_label) bread = [('Bianchi Modular Forms', url_for(".index")), (pretty_field_label, ' ')] properties = [] if gl_or_sl == 'gl2_dims': info['group'] = 'GL(2)' info['bgroup'] = '\GL(2,\mathcal{O}_K)' else: info['group'] = 'SL(2)' info['bgroup'] = '\SL(2,\mathcal{O}_K)' t = ' '.join([ 'Dimensions of Spaces of {} Bianchi Modular Forms over'.format( info['group']), pretty_field_label ]) query = {} query['field_label'] = field_label query[gl_or_sl] = {'$exists': True} data = db.bmf_dims.search(query, limit=count, offset=start, info=info) nres = info['number'] if nres > count or start != 0: info['report'] = 'Displaying items %s-%s of %s levels,' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'Displaying all %s levels,' % nres # convert data to a list and eliminate levels where all # new/cuspidal dimensions are 0. (This could be done at the # search stage, but that requires adding new fields to each # record.) def filter(dat, flag): dat1 = dat[gl_or_sl] return any([int(dat1[w][flag]) > 0 for w in dat1]) flag = 'cuspidal_dim' if level_flag == 'cusp' else 'new_dim' data = [dat for dat in data if level_flag == 'all' or filter(dat, flag)] info['field'] = field_label info['field_pretty'] = pretty_field_label nf = WebNumberField(field_label) info['base_galois_group'] = nf.galois_string() info['field_degree'] = nf.degree() info['field_disc'] = str(nf.disc()) info['field_poly'] = teXify_pol(str(nf.poly())) weights = set() for dat in data: weights = weights.union(set(dat[gl_or_sl].keys())) weights = list([int(w) for w in weights]) weights.sort() info['weights'] = weights info['nweights'] = len(weights) data.sort(key=lambda x: [int(y) for y in x['level_label'].split(".")]) dims = {} for dat in data: dims[dat['level_label']] = d = {} for w in weights: sw = str(w) if sw in dat[gl_or_sl]: d[w] = { 'd': dat[gl_or_sl][sw]['cuspidal_dim'], 'n': dat[gl_or_sl][sw]['new_dim'] } else: d[w] = {'d': '?', 'n': '?'} info['nlevels'] = len(data) dimtable = [{ 'level_label': dat['level_label'], 'level_norm': dat['level_norm'], 'level_space': url_for(".render_bmf_space_webpage", field_label=field_label, level_label=dat['level_label']) if gl_or_sl == 'gl2_dims' else "", 'dims': dims[dat['level_label']] } for dat in data] info['dimtable'] = dimtable return render_template("bmf-field_dim_table.html", info=info, title=t, properties=properties, bread=bread)
def hilbert_modular_form_search(**args): info = to_dict(args) # what has been entered in the search boxes if 'label' in info and info['label']: lab = info['label'].strip() info['label'] = lab try: split_full_label(lab) return hilbert_modular_form_by_label(lab) except ValueError: return redirect(url_for(".hilbert_modular_form_render_webpage")) query = {} try: parse_nf_string(info, query, 'field_label', name="Field") parse_ints(info, query, 'deg', name='Field degree') parse_ints(info, query, 'disc', name="Field discriminant") parse_ints(info, query, 'dimension') parse_ints(info, query, 'level_norm', name="Level norm") parse_hmf_weight(info, query, 'weight', qfield=('parallel_weight', 'weight')) except ValueError: return search_input_error() count = parse_count(info, 100) start = parse_start(info) info['query'] = dict(query) C = getDBConnection() res = C.hmfs.forms.find(query).sort([('deg', pymongo.ASCENDING), ('disc', pymongo.ASCENDING), ('level_norm', pymongo.ASCENDING), ('level_label', pymongo.ASCENDING), ('label_nsuffix', pymongo.ASCENDING) ]).skip(start).limit(count) nres = res.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 info['number'] = nres info['start'] = start info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['field_label'] = v['field_label'] v_clean['short_label'] = v['short_label'] v_clean['label'] = v['label'] v_clean['level_ideal'] = teXify_pol(v['level_ideal']) v_clean['dimension'] = v['dimension'] res_clean.append(v_clean) info['forms'] = res_clean t = 'Hilbert Modular Form search results' bread = [('Hilbert Modular Forms', url_for(".hilbert_modular_form_render_webpage")), ('Search results', ' ')] properties = [] return render_template("hilbert_modular_form_search.html", info=info, title=t, credit=hmf_credit, properties=properties, bread=bread, learnmore=learnmore_list())
def rep_galois_modl_search(**args): C = getDBConnection() info = to_dict(args) # what has been entered in the search boxes if "download" in info: return download_search(info) if "label" in info and info.get("label"): return rep_galois_modl_by_label_or_name(info.get("label"), C) query = {} try: for field, name in ( ("dim", "Dimension"), ("det", "Determinant"), ("level", None), ("minimum", "Minimal vector length"), ("class_number", None), ("aut", "Group order"), ): parse_ints(info, query, field, name) # Check if length of gram is triangular gram = info.get("gram") if gram and not (9 + 8 * ZZ(gram.count(","))).is_square(): flash( Markup( "Error: <span style='color:black'>%s</span> is not a valid input for Gram matrix. It must be a list of integer vectors of triangular length, such as [1,2,3]." % (gram) ), "error", ) raise ValueError parse_list(info, query, "gram", process=vect_to_sym) except ValueError as err: info["err"] = str(err) return search_input_error(info) count = parse_count(info, 50) start = parse_start(info) info["query"] = dict(query) res = ( C.mod_l_galois.reps.find(query) .sort([("dim", ASC), ("det", ASC), ("level", ASC), ("class_number", ASC), ("label", ASC)]) .skip(start) .limit(count) ) nres = res.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 info["number"] = nres info["start"] = int(start) info["more"] = int(start + count < nres) if nres == 1: info["report"] = "unique match" else: if nres == 0: info["report"] = "no matches" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["dim"] = v["dim"] v_clean["det"] = v["det"] v_clean["level"] = v["level"] v_clean["gram"] = vect_to_matrix(v["gram"]) res_clean.append(v_clean) info["rep_galois_modls"] = res_clean t = "Mod ℓ Galois representations Search Results" bread = [("Representations", "/Representation"), ("mod ℓ", url_for(".index")), ("Search Results", " ")] properties = [] return render_template( "rep_galois_modl-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list(), )
def number_field_search(info): info['learnmore'] = [ ('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data")) ] t = 'Global Number Field search results' bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search results', ' ')] if 'natural' in info: query = {'label_orig': info['natural']} try: parse_nf_string(info, query, 'natural', name="Label", qfield='label') return redirect( url_for(".by_label", label=clean_input(query['label_orig']))) except ValueError: query['err'] = info['err'] return search_input_error(query, bread) if 'algebra' in info: fields = info['algebra'].split('_') fields2 = [WebNumberField.from_coeffs(a) for a in fields] for j in range(len(fields)): if fields2[j] is None: fields2[j] = WebNumberField.fakenf(fields[j]) t = 'Number field algebra' info = {} info = {'fields': fields2} return render_template("number_field_algebra.html", info=info, title=t, bread=bread) query = {} try: parse_galgrp(info, query, qfield='galois') parse_ints(info, query, 'degree') parse_bracketed_posints(info, query, 'signature', split=False, exactlength=2) parse_signed_ints(info, query, 'discriminant', qfield=('disc_sign', 'disc_abs_key'), parse_one=make_disc_key) parse_ints(info, query, 'class_number') parse_bracketed_posints(info, query, 'class_group', split=False, check_divisibility='increasing') parse_primes(info, query, 'ur_primes', name='Unramified primes', qfield='ramps', mode='complement', to_string=True) # modes are now contained (in), exactly, include if 'ram_quantifier' in info and str( info['ram_quantifier']) == 'include': mode = 'append' parse_primes(info, query, 'ram_primes', 'ramified primes', 'ramps', mode, to_string=True) elif 'ram_quantifier' in info and str( info['ram_quantifier']) == 'contained': parse_primes(info, query, 'ram_primes', 'ramified primes', 'ramps_all', 'subsets', to_string=False) pass # build list else: mode = 'liststring' parse_primes(info, query, 'ram_primes', 'ramified primes', 'ramps_all', mode) except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) # nf_logger.debug(query) info['query'] = dict(query) if 'lucky' in info: one = nfdb().find_one(query) if one: label = one['label'] return redirect(url_for(".by_label", label=clean_input(label))) fields = nfdb() res = fields.find(query) res = res.sort([('degree', ASC), ('disc_abs_key', ASC), ('disc_sign', ASC)]) if 'download' in info and info['download'] != '0': return download_search(info, res) nres = res.count() res = res.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 info['fields'] = res info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres info['wnf'] = WebNumberField.from_data return render_template("number_field_search.html", info=info, title=t, bread=bread)
def higher_genus_w_automorphisms_search(**args): info = to_dict(args) bread = get_bread([("Search Results",'')]) C = base.getDBConnection() query = {} if 'jump_to' in info: labs = info['jump_to'] if label_is_one_passport(labs): return render_passport({'passport_label': labs}) elif label_is_one_family(labs): return render_family({'label': labs}) else: flash_error ("The label %s is not a legitimate label for this data.",labs) return redirect(url_for(".index")) #allow for ; in signature if info.get('signature'): info['signature'] = info['signature'].replace(';',',') try: parse_gap_id(info,query,'group','Group') parse_ints(info,query,'genus',name='Genus') parse_bracketed_posints(info,query,'signature',split=False,name='Signature',keepbrackets=True) if query.get('signature'): query['signature'] = info['signature'] = str(sort_sign(ast.literal_eval(query['signature']))).replace(' ','') parse_ints(info,query,'dim',name='Dimension of the family') if 'inc_hyper' in info: if info['inc_hyper'] == 'exclude': query['hyperelliptic'] = False elif info['inc_hyper'] == 'only': query['hyperelliptic'] = True if 'inc_cyc_trig' in info: if info['inc_cyc_trig'] == 'exclude': query['cyclic_trigonal'] = False elif info['inc_cyc_trig'] == 'only': query['cyclic_trigonal'] = True if 'inc_full' in info: if info['inc_full'] == 'exclude': query['full_auto'] = {'$exists': True} elif info['inc_full'] == 'only': query['full_auto'] = {'$exists': False} query['cc.1'] = 1 except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) if 'groupsize' in info and info['groupsize'] != '': err, result = add_group_order_range(query, info['groupsize'], C) if err != None: flash_error('Parse error on group order field. <font face="Courier New"><br />Given: ' + err + '<br />-------' + result + '</font>') res = C.curve_automorphisms.passports.find(query).sort([( 'genus', pymongo.ASCENDING), ('dim', pymongo.ASCENDING), ('cc'[0],pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 L = [ ] for field in res: field['signature'] = ast.literal_eval(field['signature']) L.append(field) if 'download_magma' in info: return hgcwa_code_download_search(L,'magma') #OR RES?????? elif 'download_gap' in info: return hgcwa_code_download_search(L,'gap') #OR L?????? info['fields'] = L info['number'] = nres info['group_display'] = sg_pretty info['show_downloads'] = len(L) > 0 info['sign_display'] = sign_display info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min( nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("hgcwa-search.html", info=info, title="Families of Higher Genus Curves with Automorphisms Search Result", credit=credit, bread=bread)
def galois_group_search(**args): info = to_dict(args) bread = get_bread([("Search results", url_for(".search"))]) C = base.getDBConnection() query = {} if "jump_to" in info: return render_group_webpage({"label": info["jump_to"]}) def includes_composite(s): s = s.replace(" ", "").replace("..", "-") for interval in s.split(","): if "-" in interval[1:]: ix = interval.index("-", 1) a, b = int(interval[:ix]), int(interval[ix + 1 :]) if b == a: if a != 1 and not a.is_prime(): return True if b > a and b > 3: return True else: a = ZZ(interval) if a != 1 and not a.is_prime(): return True try: parse_ints(info, query, "n", "degree") parse_ints(info, query, "t") for param in ("cyc", "solv", "prim", "parity"): parse_bool(info, query, param, minus_one_to_zero=(param != "parity")) degree_str = prep_ranges(info.get("n")) info["show_subs"] = degree_str is None or (LIST_RE.match(degree_str) and includes_composite(degree_str)) except ValueError as err: info["err"] = str(err) return search_input_error(info, bread) count = parse_count(info, 50) start = parse_start(info) res = C.transitivegroups.groups.find(query).sort([("n", pymongo.ASCENDING), ("t", pymongo.ASCENDING)]) nres = res.count() res = res.skip(start).limit(count) if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 info["groups"] = res info["group_display"] = group_display_prettyC(C) info["report"] = "found %s groups" % nres info["yesno"] = yesno info["wgg"] = WebGaloisGroup.from_data info["start"] = start info["number"] = nres if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info["report"] = "displaying matches %s-%s of %s" % (start + 1, min(nres, start + count), nres) else: info["report"] = "displaying all %s matches" % nres return render_template( "gg-search.html", info=info, title="Galois Group Search Result", bread=bread, credit=GG_credit )
def genus2_curve_search(**args): info = to_dict(args) print "info", info info["st_group_list"] = st_group_list info["st_group_dict"] = st_group_dict info["real_geom_end_alg_list"] = real_geom_end_alg_list info["real_geom_end_alg_to_ST0_dict"] = real_geom_end_alg_to_ST0_dict info["aut_grp_list"] = aut_grp_list info["aut_grp_dict"] = aut_grp_dict info["geom_aut_grp_list"] = geom_aut_grp_list info["geom_aut_grp_dict"] = geom_aut_grp_dict query = {} # database callable bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', url_for(".index_Q")), ('Search Results', '.')] #if 'SearchAgain' in args: # return rational_genus2_curves() if 'jump' in args: label_regex = re.compile(r'\d+\.[a-z]+.\d+.\d+') if label_regex.match(info["jump"].strip()): data = render_curve_webpage_by_label(info["jump"].strip()) else: data = "Invalid label" print data if data == "Invalid label": flash(Markup("The label <span style='color:black'>%s</span> is invalid."%(info["jump"])),"error") return redirect(url_for(".index")) if data == "Data for curve not found": flash(Markup("No genus 2 curve with label <span style='color:black'>%s</span> was found in the database."%(info["jump"])),"error") return redirect(url_for(".index")) return data try: parse_ints(info,query,'abs_disc','absolute discriminant') parse_bool(info,query,'is_gl2_type') parse_bool(info,query,'has_square_sha') parse_bool(info,query,'locally_solvable') for fld in ('st_group', 'real_geom_end_alg'): if info.get(fld): query[fld] = info[fld] for fld in ('aut_grp', 'geom_aut_grp'): #Encoded into a GAP ID. parse_bracketed_posints(info,query,fld,exactlength=2) # igusa and igusa_clebsch invariants not currently searchable parse_bracketed_posints(info, query, 'torsion', 'torsion structure', maxlength=4,check_divisibility="increasing") parse_ints(info,query,'cond','conductor') parse_ints(info,query,'num_rat_wpts','Weierstrass points') parse_ints(info,query,'torsion_order') parse_ints(info,query,'two_selmer_rank','2-Selmer rank') parse_ints(info,query,'analytic_rank','analytic rank') except ValueError as err: info['err'] = str(err) return render_template("search_results_g2.html", info=info, title='Genus 2 Curves Search Input Error', bread=bread, credit=credit_string) info["query"] = dict(query) print "query", info["query"] print "info", info count = parse_count(info, 50) start = parse_start(info) cursor = db_g2c().curves.find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([("cond", pymongo.ASCENDING), ("class", pymongo.ASCENDING), ("disc_key", pymongo.ASCENDING), ("label", pymongo.ASCENDING)]).skip(start).limit(count) nres = res.count() if nres == 1: info["report"] = "unique match" else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean["label"] = v["label"] v_clean["isog_label"] = v["class"] isogeny_class = db_g2c().isogeny_classes.find_one({'label' : isog_label(v["label"])}) v_clean["is_gl2_type"] = isogeny_class["is_gl2_type"] if isogeny_class["is_gl2_type"] == True: v_clean["is_gl2_type_display"] = '✔' #checkmark else: v_clean["is_gl2_type_display"] = '' v_clean["equation_formatted"] = list_to_min_eqn(v["min_eqn"]) v_clean["st_group_name"] = st_group_name(isogeny_class['st_group']) v_clean["analytic_rank"] = v["analytic_rank"] res_clean.append(v_clean) info["curves"] = res_clean info["curve_url"] = lambda dbc: url_for_label(dbc['label']) info["isog_url"] = lambda dbc: isog_url_for_label(dbc['label']) info["start"] = start info["count"] = count info["more"] = int(start+count<nres) credit = credit_string title = 'Genus 2 Curves search results' return render_template("search_results_g2.html", info=info, credit=credit,learnmore=learnmore_list(), bread=bread, title=title) credit = credit_string title = 'Genus 2 curves over $\Q$' bread = [('Genus 2 Curves', url_for(".index")), ('$\Q$', ' ')]
def elliptic_curve_search(**args): info = to_dict(args['data']) if 'download' in info and info['download'] != 0: return download_search(info) if not 'query' in info: info['query'] = {} bread = [('Elliptic Curves', url_for(".index")), ('Search Results', '.')] if 'jump' in info: label = info.get('label', '').replace(" ", "") # This label should be a full isogeny class label or a full # curve label (including the field_label component) try: nf, cond_label, iso_label, number = split_full_label(label.strip()) except ValueError: info['err'] = '' return search_input_error(info, bread) return show_ecnf(nf, cond_label, iso_label, number) query = {} try: parse_ints(info,query,'conductor_norm') parse_noop(info,query,'conductor_label') parse_nf_string(info,query,'field',name="base number field",qfield='field_label') parse_nf_elt(info,query,'jinv',name='j-invariant') parse_ints(info,query,'torsion',name='Torsion order',qfield='torsion_order') parse_bracketed_posints(info,query,'torsion_structure',maxlength=2) if 'torsion_structure' in query and not 'torsion_order' in query: query['torsion_order'] = reduce(mul,[int(n) for n in query['torsion_structure']],1) except ValueError: return search_input_error(info, bread) if 'include_isogenous' in info and info['include_isogenous'] == 'off': info['number'] = 1 query['number'] = 1 if 'include_base_change' in info and info['include_base_change'] == 'off': query['base_change'] = [] else: info['include_base_change'] = "on" if 'include_Q_curves' in info: if info['include_Q_curves'] == 'exclude': query['q_curve'] = False elif info['include_Q_curves'] == 'only': query['q_curve'] = True if 'include_cm' in info: if info['include_cm'] == 'exclude': query['cm'] = 0 elif info['include_cm'] == 'only': query['cm'] = {'$ne' : 0} info['query'] = query count = parse_count(info, 50) start = parse_start(info) # make the query and trim results according to start/count: cursor = db_ecnf().find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([('field_label', ASC), ('conductor_norm', ASC), ('conductor_label', ASC), ('iso_nlabel', ASC), ('number', ASC)]).skip(start).limit(count) res = list(res) for e in res: e['numb'] = str(e['number']) e['field_knowl'] = nf_display_knowl(e['field_label'], getDBConnection(), field_pretty(e['field_label'])) info['curves'] = res # [ECNF(e) for e in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) info['field_pretty'] = field_pretty info['web_ainvs'] = web_ainvs if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres t = 'Elliptic Curve search results' return render_template("ecnf-search-results.html", info=info, credit=ecnf_credit, bread=bread, title=t)
def number_field_search(**args): info = to_dict(args) info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))] t = 'Global Number Field search results' bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search results', ' ')] # for k in info.keys(): # nf_logger.debug(str(k) + ' ---> ' + str(info[k])) # nf_logger.debug('******************* '+ str(info['search'])) if 'natural' in info: query = {'label_orig': info['natural']} try: parse_nf_string(info,query,'natural',name="Label",qfield='label') return redirect(url_for(".by_label", label= clean_input(query['label']))) except ValueError: query['err'] = info['err'] return search_input_error(query, bread) query = {} try: parse_galgrp(info,query, qfield='galois') parse_ints(info,query,'degree') parse_bracketed_posints(info,query,'signature',split=False,exactlength=2) parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key) parse_ints(info,query,'class_number') parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing') parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True) if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'some': mode = 'append' else: mode = 'exact' parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True) except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) if info.get('paging'): try: paging = int(info['paging']) if paging == 0: start = 0 except: pass C = base.getDBConnection() # nf_logger.debug(query) info['query'] = dict(query) if 'lucky' in args: one = C.numberfields.fields.find_one(query) if one: label = one['label'] return redirect(url_for(".by_label", clean_input(label))) fields = C.numberfields.fields res = fields.find(query) if 'download' in info and info['download'] != '0': return download_search(info, res) res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)]) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['fields'] = res info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres info['wnf'] = WebNumberField.from_data return render_template("number_field_search.html", info=info, title=t, bread=bread)
def galois_group_search(**args): info = to_dict(args) bread = get_bread([("Search results", url_for('.search'))]) C = base.getDBConnection() query = {} if 'jump_to' in info: return render_group_webpage({'label': info['jump_to']}) def includes_composite(s): s = s.replace(' ', '').replace('..', '-') for interval in s.split(','): if '-' in interval[1:]: ix = interval.index('-', 1) a, b = int(interval[:ix]), int(interval[ix + 1:]) if b == a: if a != 1 and not a.is_prime(): return True if b > a and b > 3: return True else: a = ZZ(interval) if a != 1 and not a.is_prime(): return True try: parse_ints(info, query, 'n', 'degree') parse_ints(info, query, 't') for param in ('cyc', 'solv', 'prim', 'parity'): parse_bool(info, query, param, minus_one_to_zero=(param != 'parity')) degree_str = prep_ranges(info.get('n')) info['show_subs'] = degree_str is None or ( LIST_RE.match(degree_str) and includes_composite(degree_str)) except ValueError as err: info['err'] = str(err) return search_input_error(info, bread) count = parse_count(info, 50) start = parse_start(info) res = C.transitivegroups.groups.find(query).sort([('n', pymongo.ASCENDING), ('t', pymongo.ASCENDING) ]) nres = res.count() res = res.skip(start).limit(count) if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 info['groups'] = res info['group_display'] = group_display_prettyC(C) info['report'] = "found %s groups" % nres info['yesno'] = yesno info['wgg'] = WebGaloisGroup.from_data info['start'] = start info['number'] = nres if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres return render_template("gg-search.html", info=info, title="Galois Group Search Result", bread=bread, credit=GG_credit)
def __call__(self, info): info = to_dict(info) # I'm not sure why this is required... for key, func in self.shortcuts.items(): if info.get(key, '').strip(): return func(info) query = {} template_kwds = {} for key in self.kwds: template_kwds[key] = info.get(key, self.kwds[key]()) try: errpage = self.f(info, query) except ValueError as err: # Errors raised in parsing info['err'] = str(err) err_title = query.pop('__err_title__', self.err_title) return render_template(self.template, info=info, title=err_title, **template_kwds) if errpage is not None: return errpage if 'result_count' in info: nres = self.table.count(query) return jsonify({"nres": str(nres)}) sort = query.pop('__sort__', None) table = query.pop('__table__', self.table) proj = query.pop('__projection__', self.projection) # We want to pop __title__ even if overridden by info. title = query.pop('__title__', self.title) title = info.get('title', title) template = query.pop('__template__', self.template) count = parse_count(info, self.per_page) start = parse_start(info) try: res = table.search(query, proj, limit=count, offset=start, sort=sort, info=info) except QueryCanceledError as err: ctx = ctx_proc_userdata() flash_error( 'The search query took longer than expected! Please help us improve by reporting this error <a href="%s" target=_blank>here</a>.' % ctx['feedbackpage']) info['err'] = str(err) info['query'] = dict(query) return render_template(self.template, info=info, title=self.err_title, **template_kwds) else: if self.cleaners: for v in res: for name, func in self.cleaners.items(): v[name] = func(v) if self.postprocess is not None: res = self.postprocess(res, info, query) for key, func in self.longcuts.items(): if info.get(key, '').strip(): return func(res, info, query) info['results'] = res return render_template(template, info=info, title=title, **template_kwds)
def elliptic_curve_search(info): if info.get('download') == '1' and info.get('Submit') and info.get('query'): return download_search(info) if 'SearchAgain' in info: return rational_elliptic_curves() query = {} bread = info.get('bread',[('Elliptic Curves', url_for("ecnf.index")), ('$\Q$', url_for(".rational_elliptic_curves")), ('Search Results', '.')]) if 'jump' in info: label = info.get('label', '').replace(" ", "") m = match_lmfdb_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif label.startswith("Cremona:"): label = label[8:] m = match_cremona_label(label) if m: try: return by_ec_label(label) except ValueError: return elliptic_curve_jump_error(label, info, wellformed_label=True) elif match_cremona_label(label): return elliptic_curve_jump_error(label, info, cremona_label=True) elif label: # Try to parse a string like [1,0,3,2,4] as valid # Weistrass coefficients: lab = re.sub(r'\s','',label) lab = re.sub(r'^\[','',lab) lab = re.sub(r']$','',lab) try: labvec = lab.split(',') labvec = [QQ(str(z)) for z in labvec] # Rationals allowed E = EllipticCurve(labvec) # Now we do have a valid curve over Q, but it might # not be in the database. ainvs = [str(c) for c in E.minimal_model().ainvs()] xainvs = ''.join(['[',','.join(ainvs),']']) data = db_ec().find_one({'xainvs': xainvs}) if data is None: data = db_ec().find_one({'ainvs': ainvs}) if data is None: info['conductor'] = E.conductor() return elliptic_curve_jump_error(label, info, missing_curve=True) return by_ec_label(data['lmfdb_label']) except (TypeError, ValueError, ArithmeticError): return elliptic_curve_jump_error(label, info) else: query['label'] = '' try: parse_rational(info,query,'jinv','j-invariant') parse_ints(info,query,'conductor') parse_ints(info,query,'torsion','torsion order') parse_ints(info,query,'rank') parse_ints(info,query,'sha','analytic order of Ш') parse_bracketed_posints(info,query,'torsion_structure',maxlength=2,process=str,check_divisibility='increasing') # speed up slow torsion_structure searches by also setting torsion if 'torsion_structure' in query and not 'torsion' in query: query['torsion'] = reduce(mul,[int(n) for n in query['torsion_structure']],1) if 'include_cm' in info: if info['include_cm'] == 'exclude': query['cm'] = 0 elif info['include_cm'] == 'only': query['cm'] = {'$ne' : 0} parse_ints(info,query,field='isodeg',qfield='isogeny_degrees') parse_primes(info, query, 'surj_primes', name='surjective primes', qfield='non-maximal_primes', mode='complement') if info.get('surj_quantifier') == 'exactly': mode = 'exact' else: mode = 'append' parse_primes(info, query, 'nonsurj_primes', name='non-surjective primes', qfield='non-maximal_primes',mode=mode) except ValueError as err: info['err'] = str(err) return search_input_error(info, bread) count = parse_count(info,100) start = parse_start(info) if 'optimal' in info and info['optimal'] == 'on': # fails on 990h3 query['number'] = 1 info['query'] = query cursor = db_ec().find(query) nres = cursor.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 res = cursor.sort([('conductor', ASCENDING), ('iso_nlabel', ASCENDING), ('lmfdb_number', ASCENDING)]).skip(start).limit(count) info['curves'] = res info['format_ainvs'] = format_ainvs info['curve_url'] = lambda dbc: url_for(".by_triple_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1], number=dbc['lmfdb_number']) info['iso_url'] = lambda dbc: url_for(".by_double_iso_label", conductor=dbc['conductor'], iso_label=split_lmfdb_label(dbc['lmfdb_iso'])[1]) info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 2: info['report'] = 'displaying both matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres #credit = 'John Cremona' #if 'non-surjective_primes' in query or 'non-maximal_primes' in query: # credit += ' and Andrew Sutherland' t = info.get('title','Elliptic Curves search results') return render_template("ec-search-results.html", info=info, credit=ec_credit(), bread=bread, title=t)
def rep_galois_modl_search(**args): C = getDBConnection() info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return rep_galois_modl_by_label_or_name(info.get('label'), C) query = {} try: for field, name in (('dim', 'Dimension'), ('det', 'Determinant'), ('level', None), ('minimum', 'Minimal vector length'), ('class_number', None), ('aut', 'Group order')): parse_ints(info, query, field, name) # Check if length of gram is triangular gram = info.get('gram') if gram and not (9 + 8 * ZZ(gram.count(','))).is_square(): flash( Markup( "Error: <span style='color:black'>%s</span> is not a valid input for Gram matrix. It must be a list of integer vectors of triangular length, such as [1,2,3]." % (gram)), "error") raise ValueError parse_list(info, query, 'gram', process=vect_to_sym) except ValueError as err: info['err'] = str(err) return search_input_error(info) count = parse_count(info, 50) start = parse_start(info) info['query'] = dict(query) res = C.mod_l_galois.reps.find(query).sort([('dim', ASC), ('det', ASC), ('level', ASC), ('class_number', ASC), ('label', ASC) ]).skip(start).limit(count) nres = res.count() if (start >= nres): start -= (1 + (start - nres) / count) * count if (start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % ( start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['label'] = v['label'] v_clean['dim'] = v['dim'] v_clean['det'] = v['det'] v_clean['level'] = v['level'] v_clean['gram'] = vect_to_matrix(v['gram']) res_clean.append(v_clean) info['rep_galois_modls'] = res_clean t = 'Mod ℓ Galois representations Search Results' bread = [('Representations', "/Representation"), ("mod ℓ", url_for(".index")), ('Search Results', ' ')] properties = [] return render_template("rep_galois_modl-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def lattice_search(**args): info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return lattice_by_label_or_name(info.get('label')) query = {} try: for field, name in (('dim','Dimension'),('det','Determinant'),('level',None), ('minimum','Minimal vector length'), ('class_number',None), ('aut','Group order')): parse_ints(info, query, field, name) # Check if length of gram is triangular gram = info.get('gram') if gram and not (9 + 8*ZZ(gram.count(','))).is_square(): flash(Markup("Error: <span style='color:black'>%s</span> is not a valid input for Gram matrix. It must be a list of integer vectors of triangular length, such as [1,2,3]." % (gram)),"error") raise ValueError parse_list(info, query, 'gram', process=vect_to_sym) except ValueError as err: info['err'] = str(err) return search_input_error(info) count = parse_count(info,50) start = parse_start(info) info['query'] = dict(query) res = lattice_db().find(query).sort([('dim', ASC), ('det', ASC), ('level', ASC), ('class_number', ASC), ('label', ASC), ('minimum', ASC), ('aut', ASC)]).skip(start).limit(count) nres = res.count() # here we are checking for isometric lattices if the user enters a valid gram matrix but not one stored in the database_names, this may become slow in the future: at the moment we compare against list of stored matrices with same dimension and determinant (just compare with respect to dimension is slow) if nres==0 and info.get('gram'): A=query['gram']; n=len(A[0]) d=matrix(A).determinant() result=[B for B in lattice_db().find({'dim': int(n), 'det' : int(d)}) if isom(A, B['gram'])] if len(result)>0: result=result[0]['gram'] query_gram={ 'gram' : result } query.update(query_gram) res = lattice_db().find(query) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['label']=v['label'] v_clean['dim']=v['dim'] v_clean['det']=v['det'] v_clean['level']=v['level'] v_clean['class_number']=v['class_number'] v_clean['min']=v['minimum'] v_clean['aut']=v['aut'] res_clean.append(v_clean) info['lattices'] = res_clean t = 'Integral Lattices Search Results' bread = [('Lattices', url_for(".lattice_render_webpage")),('Search Results', ' ')] properties = [] return render_template("lattice-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def rep_galois_modl_search(**args): C = getDBConnection() info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return rep_galois_modl_by_label_or_name(info.get('label'), C) query = {} try: for field, name in (('dim','Dimension'),('det','Determinant'),('level',None), ('minimum','Minimal vector length'), ('class_number',None), ('aut','Group order')): parse_ints(info, query, field, name) # Check if length of gram is triangular gram = info.get('gram') if gram and not (9 + 8*ZZ(gram.count(','))).is_square(): flash(Markup("Error: <span style='color:black'>%s</span> is not a valid input for Gram matrix. It must be a list of integer vectors of triangular length, such as [1,2,3]." % (gram)),"error") raise ValueError parse_list(info, query, 'gram', process=vect_to_sym) except ValueError as err: info['err'] = str(err) return search_input_error(info) count = parse_count(info,50) start = parse_start(info) info['query'] = dict(query) res = C.mod_l_galois.reps.find(query).sort([('dim', ASC), ('det', ASC), ('level', ASC), ('class_number', ASC), ('label', ASC)]).skip(start).limit(count) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} v_clean['label']=v['label'] v_clean['dim']=v['dim'] v_clean['det']=v['det'] v_clean['level']=v['level'] v_clean['gram']=vect_to_matrix(v['gram']) res_clean.append(v_clean) info['rep_galois_modls'] = res_clean t = 'Mod ℓ Galois representations Search Results' bread = [('Representations', "/Representation"),("mod ℓ", url_for(".index")), ('Search Results', ' ')] properties = [] return render_template("rep_galois_modl-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def number_field_search(**args): info = to_dict(args) info['learnmore'] = [('Global number field labels', url_for(".render_labels_page")), ('Galois group labels', url_for(".render_groups_page")), (Completename, url_for(".render_discriminants_page")), ('Quadratic imaginary class groups', url_for(".render_class_group_data"))] t = 'Global Number Field search results' bread = [('Global Number Fields', url_for(".number_field_render_webpage")), ('Search results', ' ')] # for k in info.keys(): # nf_logger.debug(str(k) + ' ---> ' + str(info[k])) # nf_logger.debug('******************* '+ str(info['search'])) if 'natural' in info: query = {'label_orig': info['natural']} try: parse_nf_string(info,query,'natural',name="Label",qfield='label') return redirect(url_for(".by_label", label= clean_input(query['label_orig']))) except ValueError: query['err'] = info['err'] return search_input_error(query, bread) query = {} try: parse_galgrp(info,query, qfield='galois') parse_ints(info,query,'degree') parse_bracketed_posints(info,query,'signature',split=False,exactlength=2) parse_signed_ints(info,query,'discriminant',qfield=('disc_sign','disc_abs_key'),parse_one=make_disc_key) parse_ints(info,query,'class_number') parse_bracketed_posints(info,query,'class_group',split=False,check_divisibility='increasing') parse_primes(info,query,'ur_primes',name='Unramified primes',qfield='ramps',mode='complement',to_string=True) # modes are now contained (in), exactly, include if 'ram_quantifier' in info and str(info['ram_quantifier']) == 'include': mode = 'append' parse_primes(info,query,'ram_primes','ramified primes','ramps',mode,to_string=True) elif 'ram_quantifier' in info and str(info['ram_quantifier']) == 'contained': parse_primes(info,query,'ram_primes','ramified primes','ramps_all','subsets',to_string=False) pass # build list else: mode = 'liststring' parse_primes(info,query,'ram_primes','ramified primes','ramps_all',mode) except ValueError: return search_input_error(info, bread) count = parse_count(info) start = parse_start(info) if info.get('paging'): try: paging = int(info['paging']) if paging == 0: start = 0 except: pass C = base.getDBConnection() # nf_logger.debug(query) info['query'] = dict(query) if 'lucky' in args: one = C.numberfields.fields.find_one(query) if one: label = one['label'] return redirect(url_for(".by_label", label=clean_input(label))) fields = C.numberfields.fields res = fields.find(query) res = res.sort([('degree', ASC), ('disc_abs_key', ASC),('disc_sign', ASC)]) if 'download' in info and info['download'] != '0': return download_search(info, res) nres = res.count() res = res.skip(start).limit(count) if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['fields'] = res info['number'] = nres info['start'] = start if nres == 1: info['report'] = 'unique match' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres info['wnf'] = WebNumberField.from_data return render_template("number_field_search.html", info=info, title=t, bread=bread)
def hecke_algebras_search(**args): info = to_dict(args) # what has been entered in the search boxes if 'download' in info: return download_search(info) if 'label' in info and info.get('label'): return hecke_algebras_by_label(info.get('label')) query = {} try: for field, name in (('level','Level'),('weight','Weight'),('num_orbits', 'Number of Hecke orbits'),('ell','characteristic')): parse_ints(info, query, field, name) except ValueError as err: info['err'] = str(err) return search_input_error(info) if 'ell' in info and info.get('ell'): if int(info.get('ell'))>13: flash(Markup("No data for primes or integers greater than $13$ is available"), "error") return redirect(url_for(".hecke_algebras_render_webpage")) elif int(info.get('ell')) not in [2,3,5,7,11,13]: flash(Markup("No data for integers which are not primes"), "error") return redirect(url_for(".hecke_algebras_render_webpage")) if 'orbit_label' in info and info.get('orbit_label'): check=[int(i) for i in info['orbit_label'].split(".")] if 'level' in info and info.get('level'): try: for field in ['level','weight']: if info.get(field): int(info.get(field)) except ValueError as err: flash(Markup("Orbit label <span style='color:black'>%s</span> and input Level or Weight are not compatible" %(info.get('orbit_label'))),"error") return redirect(url_for(".hecke_algebras_render_webpage")) if int(info.get('level'))!=check[0]: flash(Markup("Orbit label <span style='color:black'>%s</span> and Level <span style='color:black'>%s</span> are not compatible inputs" %(info.get('orbit_label'), info.get('level'))),"error") return redirect(url_for(".hecke_algebras_render_webpage")) if 'weight' in info and info.get('weight'): if int(info.get('weight'))!=check[1]: flash(Markup("Orbit label <span style='color:black'>%s</span> and Weight <span style='color:black'>%s</span> are not compatible inputs" %(info.get('orbit_label'), info.get('weight'))), "error") return redirect(url_for(".hecke_algebras_render_webpage")) if 'ell' in info and info.get('ell'): return render_hecke_algebras_webpage_l_adic(orbit_label=info.get('orbit_label'), prime=info.get('ell')) else: return hecke_algebras_by_orbit_label(info.get('orbit_label')) count = parse_count(info,50) start = parse_start(info) info['query'] = dict(query) if 'ell' in info and info.get('ell'): res= hecke_l_adic_db().find(query).sort([('level', ASC), ('weight', ASC), ('num_orbits', ASC)]).skip(start).limit(count) else: res = hecke_db().find(query).sort([('level', ASC), ('weight', ASC), ('num_orbits', ASC)]).skip(start).limit(count) nres = res.count() if(start >= nres): start -= (1 + (start - nres) / count) * count if(start < 0): start = 0 info['number'] = nres info['start'] = int(start) info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' else: if nres == 0: info['report'] = 'no matches' else: if nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' % (start + 1, min(nres, start + count), nres) else: info['report'] = 'displaying all %s matches' % nres res_clean = [] for v in res: v_clean = {} if 'ell' in info and info.get('ell'): v_clean['orbit_label']=v['orbit_label'] v_clean['index']=v['index'] v_clean['label']=".".join(v['orbit_label'].split(".")[i] for i in [0,1,2]) else: v_clean['label']=v['label'] v_clean['num_orbits']=v['num_orbits'] v_clean['level']=v['level'] v_clean['weight']=v['weight'] res_clean.append(v_clean) info['hecke_algebras'] = res_clean t = 'Hecke Algebras Search Results' bread = [('HeckeAlgebras', url_for(".hecke_algebras_render_webpage")),('Search Results', ' ')] properties = [] return render_template("hecke_algebras-search.html", info=info, title=t, properties=properties, bread=bread, learnmore=learnmore_list())
def abelian_variety_search(**args): info = to_dict(args) if 'download' in info and info['download'] != 0: return download_search(info) bread = args.get('bread', get_bread(('Search Results', ' '))) if 'jump' in info: return by_label(info.get('label','')) query = {} try: parse_ints(info,query,'q') parse_ints(info,query,'g') if 'simple' in info: if info['simple'] == 'yes': query['decomposition'] = {'$size' : 1} query['decomposition.0.1'] = 1 elif info['simple'] == 'no': query['$or'] = [{'decomposition': {'$not' : {'$size' : 1}}}, {'decomposition.0.1' : {'$gt': 1}}] if 'primitive' in info: if info['primitive'] == 'yes': query['primitive_models'] = {'$size' : 0} elif info['primitive'] == 'no': query['primitive_models'] = {'$not' : {'$size' : 0}} if 'jacobian' in info: if info['jacobian'] == 'yes': query['known_jacobian'] = 1 elif info['jacobian'] == 'no': query['known_jacobian'] = -1 else: info['jacobian'] = "any" if 'polarizable' in info: if info['polarizable'] == 'yes': query['principally_polarizable'] = 1 elif info['polarizable'] == 'no': query['principally_polarizable'] = -1 else: info['polarizable'] = "any" parse_ints(info,query,'p_rank') parse_ints(info,query,'angle_ranks') parse_newton_polygon(info,query,'newton_polygon',qfield='slopes') parse_list_start(info,query,'initial_coefficients',qfield='polynomial',index_shift=1) parse_list_start(info,query,'abvar_point_count',qfield='A_counts',parse_singleton=str) parse_list_start(info,query,'curve_point_count',qfield='C_counts',parse_singleton=str) parse_abvar_decomp(info,query,'decomposition',av_stats=AbvarFqStats()) parse_nf_string(info,query,'number_field') except ValueError: return search_input_error(info, bread) info['query'] = query count = parse_count(info, 50) start = parse_start(info) cursor = db().find(query) nres = cursor.count() if start >= nres: start -= (1 + (start - nres) / count) * count if start < 0: start = 0 #res = cursor.sort([]).skip(start).limit(count) res = cursor.skip(start).limit(count) res = list(res) info['abvars'] = [AbvarFq_isoclass(x) for x in res] info['number'] = nres info['start'] = start info['count'] = count info['more'] = int(start + count < nres) if nres == 1: info['report'] = 'unique match' elif nres == 0: info['report'] = 'no matches' elif nres > count or start != 0: info['report'] = 'displaying matches %s-%s of %s' %(start + 1, min(nres, start+count), nres) else: info['report'] = 'displaying all %s matches' % nres t = 'Abelian Variety search results' return render_template("abvarfq-search-results.html", info=info, credit=abvarfq_credit, bread=bread, title=t)