def download_galmap_text(self, label, lang="text"): data = db.belyi_galmaps_fixed.lookup(label) if data is None: return abort(404, "Label not found: %s" % label) return self._wrap(Json.dumps(data), label, title='Data for embedded Belyi map with label %s,'%label)
def download_traces(self, label, lang='text'): data = self._get_traces(label) # to return errors if not isinstance(data, list): return data return self._wrap(Json.dumps(data), label + '.traces', lang=lang, title='Trace form for %s,' % (label))
def download_traces(self, label, lang='text'): data = self._get_traces(label) # to return errors if not isinstance(data,list): return data return self._wrap(Json.dumps(data), label + '.traces', lang=lang, title='Trace form for %s,'%(label))
def download_G2C_all(label): data = db.g2c_curves.lookup(label, label_col='label') if data is None: return genus2_jump_error(label, {}) data_list = [data] response = make_response('\n\n'.join(Json.dumps(d) for d in data_list)) response.headers['Content-type'] = 'text/plain' return response
def download_all(self, label, lang='text'): data = db.av_fq_isog.lookup(label) if data is None: return abort(404, "Label not found: %s" % label) return self._wrap( Json.dumps(data), label, lang=lang, title='Stored data for abelian variety isogeny class %s,' % (label))
def download_coefficients(self, label, lang='text'): data = db.maass_newforms.lookup(label, projection="coefficients") if data is None: return abort(404, "Coefficient data for Maass form %s not found in the database"%label) c = data data = [str(c[n]) for n in range(len(c))] return self._wrap(Json.dumps(data).replace('"',''), label + '.coefficients', lang=lang, title='Coefficients for Maass form %s,'%(label))
def download_newspace(self, label, lang='text'): data = db.mf_newspaces.lookup(label) if data is None: return abort(404, "Label not found: %s"%label) space = WebNewformSpace(data) data['newforms'] = [form['label'] for form in space.newforms] data['oldspaces'] = space.oldspaces return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newspace %s,'%(label))
def download_newspace(self, label, lang='text'): data = db.mf_newspaces.lookup(label) if data is None: return abort(404, "Label not found: %s" % label) space = WebNewformSpace(data) data['newforms'] = [form['label'] for form in space.newforms] data['oldspaces'] = space.oldspaces return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newspace %s,' % (label))
def download_newform(self, label, lang='text'): data = db.mf_newforms.lookup(label) if data is None: return abort(404, "Label not found: %s" % label) form = WebNewform(data) if form.has_exact_qexp: data['qexp'] = form.qexp data['traces'] = form.texp return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newform %s,' % (label))
def download(self, label, lang='text'): data = db.maass_newforms.lookup(label) if data is None: return abort(404, "Maass form %s not found in the database"%label) for col in db.maass_newforms.col_type: if db.maass_newforms.col_type[col] == "numeric" and data.get(col): data[col] = str(data[col]) if db.maass_newforms.col_type[col] == "numeric[]" and data.get(col): data[col] = [str(data[col][n]) for n in range(len(data[col]))] return self._wrap(Json.dumps(data), label, lang=lang, title='All stored data for Maass form %s,'%(label))
def cc_generator(): yield '[\n' for ev in db.mf_hecke_cc.search( {'hecke_orbit_code': code}, ['label', 'embedding_root_real', 'embedding_root_imag', col], sort=['conrey_index', 'embedding_index']): D = {'label': ev.get('label'), col: ev.get(col)} root = (ev.get('embedding_root_real'), ev.get('embedding_root_imag')) if root != (None, None): D['root'] = root yield Json.dumps(D) + ',\n\n' yield ']\n'
def download_newform(self, label, lang='text'): data = db.mf_newforms.lookup(label) if data is None: return abort(404, "Label not found: %s" % label) form = WebNewform(data) form.setup_cc_data({'m': '1-%s' % form.dim}) if form.has_exact_qexp: data['qexp'] = form.qexp data['traces'] = form.texp if form.has_complex_qexp: data['complex_embeddings'] = form.cc_data return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newform %s,' % (label))
def download_ECNF_all(nf,conductor_label,class_label,number): conductor_label = unquote(conductor_label) conductor_label = convert_IQF_label(nf,conductor_label) try: nf_label = nf_string_to_label(nf) except ValueError: return search_input_error() label = "".join(["-".join([nf_label, conductor_label, class_label]), number]) data = db.ec_nfcurves.lookup(label) if data is None: return search_input_error() response = make_response(Json.dumps(data)) response.headers['Content-type'] = 'text/plain' return response
def download_newform(self, label, lang='text'): data = db.mf_newforms.lookup(label) if data is None: return abort(404, "Label not found: %s"%label) form = WebNewform(data) form.setup_cc_data({'m':'1-%s'%form.dim}) if form.has_exact_qexp: data['qexp'] = form.qexp data['traces'] = form.texp if form.has_complex_qexp: data['complex_embeddings'] = form.cc_data return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newform %s,'%(label))
def download_full_space(self, label, lang='text'): try: space = WebGamma1Space.by_label(label) except ValueError: return abort(404, "Label not found: %s"%label) data = {} for attr in ['level', 'weight', 'label', 'oldspaces']: data[attr] = getattr(space, attr) data['newspaces'] = [spc['label'] for spc, forms in space.decomp] data['newforms'] = sum([[form['label'] for form in forms] for spc, forms in space.decomp], []) data['dimgrid'] = space.dim_grid._grid return self._wrap(Json.dumps(data), label, lang=lang, title='Stored data for newspace %s,'%(label))
def download_embedding(self, label, lang='text'): data = db.mf_hecke_cc.lucky({'label': label}, [ 'label', 'embedding_root_real', 'embedding_root_imag', 'an_normalized', 'angles' ]) if data is None: return abort(404, "No embedded newform found for %s" % (label)) root = (data.pop('embedding_root_real', None), data.pop('embedding_root_imag', None)) if root != (None, None): data['root'] = root return self._wrap(Json.dumps(data), label, lang=lang, title='Coefficient data for embedded newform %s,' % label)
def cc_generator(): yield '[\n' for ev in db.mf_hecke_cc.search( {'hecke_orbit_code':code}, ['label', 'embedding_root_real', 'embedding_root_imag', col], sort=['conrey_index', 'embedding_index']): D = {'label':ev.get('label'), col:ev.get(col)} root = (ev.get('embedding_root_real'), ev.get('embedding_root_imag')) if root != (None, None): D['root'] = root yield Json.dumps(D) + ',\n\n' yield ']\n'
def download_embedding(self, label, lang='text'): data = db.mf_hecke_cc.lucky({'label':label}, ['label', 'embedding_root_real', 'embedding_root_imag', 'an_normalized', 'angles']) if data is None: return abort(404, "No embedded newform found for %s"%(label)) root = (data.pop('embedding_root_real', None), data.pop('embedding_root_imag', None)) if root != (None, None): data['root'] = root return self._wrap(Json.dumps(data), label, lang=lang, title='Coefficient data for embedded newform %s,'%label)
def download_EC_all(label): try: N, iso, number = split_lmfdb_label(label) except (ValueError,AttributeError): return elliptic_curve_jump_error(label, {}) if number: data = db.ec_curvedata.lookup(label, label_col='lmfdb_label') if data is None: return elliptic_curve_jump_error(label, {}) data_list = [data] else: data_list = list(db.ec_curvedata.search({'lmfdb_iso': label}, sort=['lmfdb_number'])) if not data_list: return elliptic_curve_jump_error(label, {}) response = make_response('\n\n'.join(Json.dumps(d) for d in data_list)) response.headers['Content-type'] = 'text/plain' return response
def download_EC_all(label): try: N, iso, number = split_lmfdb_label(label) except (ValueError,AttributeError): return elliptic_curve_jump_error(label, {}) if number: data = db.ec_curves.lookup(label, label_col='lmfdb_label') if data is None: return elliptic_curve_jump_error(label, {}) data_list = [data] else: data_list = list(db.ec_curves.search({'lmfdb_iso': label}, projection=2, sort=['number'])) if len(data_list) == 0: return elliptic_curve_jump_error(label, {}) response = make_response('\n\n'.join(Json.dumps(d) for d in data_list)) response.headers['Content-type'] = 'text/plain' return response
def download_ECNF_all(nf,conductor_label,class_label,number): conductor_label = unquote(conductor_label) try: nf_label = nf_string_to_label(nf) except ValueError: flash_error("%s is not a valid number field label", nf_label) return redirect(url_for(".index")) label = "".join(["-".join([nf_label, conductor_label, class_label]), number]) if not LABEL_RE.fullmatch(label): flash_error("%s is not a valid elliptic curve label.", label) return redirect(url_for(".index")) data = db.ec_nfcurves.lookup(label) if data is None: flash_error("%s is not the label of an elliptic curve in the database.", label) return redirect(url_for(".index")) response = make_response(Json.dumps(data)) response.headers['Content-type'] = 'text/plain' return response
def api_query(table, id = None): #if censored_table(table): # return abort(404) # parsing the meta parameters _format and _offset format = request.args.get("_format", "html") offset = int(request.args.get("_offset", 0)) DELIM = request.args.get("_delim", ",") fields = request.args.get("_fields", None) sortby = request.args.get("_sort", None) if fields: fields = ['id'] + fields.split(DELIM) else: fields = 3 if sortby: sortby = sortby.split(DELIM) if offset > 10000: if format != "html": return abort(404) else: flash_error("offset %s too large, please refine your query.", offset) return redirect(url_for(".api_query", table=table)) # preparing the actual database query q try: coll = getattr(db, table) except AttributeError: if format != "html": return abort(404) else: flash_error("table %s does not exist", table) return redirect(url_for(".index")) q = {} # if id is set, just go and get it, ignore query parameeters if id is not None: if offset: return abort(404) single_object = True api_logger.info("API query: id = '%s', fields = '%s'" % (id, fields)) if re.match(r'^\d+$', id): id = int(id) else: return abort(404, "id '%s' must be an integer" % id) data = coll.lucky({'id':id}, projection=fields) data = [data] if data else [] else: single_object = False for qkey, qval in request.args.items(): from ast import literal_eval try: if qkey.startswith("_"): continue elif qval.startswith("s"): qval = qval[1:] elif qval.startswith("i"): qval = int(qval[1:]) elif qval.startswith("f"): qval = float(qval[1:]) elif qval.startswith("ls"): # indicator, that it might be a list of strings qval = qval[2].split(DELIM) elif qval.startswith("li"): qval = [int(_) for _ in qval[2:].split(DELIM)] elif qval.startswith("lf"): qval = [float(_) for _ in qval[2:].split(DELIM)] elif qval.startswith("py"): # literal evaluation qval = literal_eval(qval[2:]) elif qval.startswith("cs"): # containing string in list qval = { "$contains" : [qval[2:]] } elif qval.startswith("ci"): qval = { "$contains" : [int(qval[2:])] } elif qval.startswith("cf"): qval = { "contains" : [float(qval[2:])] } elif qval.startswith("cpy"): qval = { "$contains" : [literal_eval(qval[3:])] } except: # no suitable conversion for the value, keep it as string pass # update the query q[qkey] = qval # assure that one of the keys of the query is indexed # however, this doesn't assure that the query will be fast... #if q != {} and len(set(q.keys()).intersection(collection_indexed_keys(coll))) == 0: # flash_error("no key in the query %s is indexed.", q) # return redirect(url_for(".api_query", table=table)) # sort = [('fieldname1', 1 (ascending) or -1 (descending)), ...] if sortby is not None: sort = [] for key in sortby: if key.startswith("-"): sort.append((key[1:], -1)) else: sort.append((key, 1)) else: sort = None # executing the query "q" and replacing the _id in the result list # So as not to preserve backwards compatibility (see test_api_usage() test) if table=='ec_curvedata': for oldkey, newkey in zip(['label', 'iso', 'number'], ['Clabel', 'Ciso', 'Cnumber']): if oldkey in q: q[newkey] = q[oldkey] q.pop(oldkey) try: data = list(coll.search(q, projection=fields, sort=sort, limit=100, offset=offset)) except QueryCanceledError: flash_error("Query %s exceeded time limit.", q) return redirect(url_for(".api_query", table=table)) except KeyError as err: flash_error("No key %s in table %s", err, table) return redirect(url_for(".api_query", table=table)) except ValueError as err: flash_error(str(err)) return redirect(url_for(".api_query", table=table)) if single_object and not data: if format != 'html': return abort(404) else: flash_error("no document with id %s found in table %s.", id, table) return redirect(url_for(".api_query", table=table)) # fixup data for display and json/yaml encoding if 'bytea' in coll.col_type.values(): for row in data: for key, val in row.items(): if type(val) == buffer: row[key] = "[binary data]" #data = [ dict([ (key, val if coll.col_type[key] != 'bytea' else "binary data") for key, val in row.items() ]) for row in data] data = Json.prep(data) # preparing the datastructure start = offset next_req = dict(request.args) next_req["_offset"] = offset url_args = next_req.copy() query = url_for(".api_query", table=table, **next_req) offset += len(data) next_req["_offset"] = offset next = url_for(".api_query", table=table, **next_req) # the collected result data = { "table": table, "timestamp": datetime.utcnow().isoformat(), "data": data, "start": start, "offset": offset, "query": query, "next": next, "rec_id": 'id' if coll._label_col is None else coll._label_col, } if format.lower() == "json": #return flask.jsonify(**data) # can't handle binary data if PY3: return current_app.response_class(json.dumps(data, indent=2), mimetype='application/json') else: return current_app.response_class(json.dumps(data, encoding='ISO-8859-1', indent=2), mimetype='application/json') elif format.lower() == "yaml": y = yaml.dump(data, default_flow_style=False, canonical=False, allow_unicode=True) return Response(y, mimetype='text/plain') else: # sort displayed records by key (as jsonify and yaml_dump do) data["pretty"] = pretty_document location = table title = "API - " + location bc = [("API", url_for(".index")), (table,)] query_unquote = unquote(data["query"]) return render_template("collection.html", title=title, single_object=single_object, query_unquote = query_unquote, url_args = url_args, bread=bc, **data)
if single_object and not data: if format != 'html': flask.abort(404) else: flash_error("no document with id %s found in table %s.", id, table) return flask.redirect(url_for(".api_query", table=table)) # fixup data for display and json/yaml encoding if 'bytea' in coll.col_type.values(): for row in data: for key, val in row.iteritems(): if type(val) == buffer: row[key] = "[binary data]" #data = [ dict([ (key, val if coll.col_type[key] != 'bytea' else "binary data") for key, val in row.iteritems() ]) for row in data] data = Json.prep(data) # preparing the datastructure start = offset next_req = dict(request.args) next_req["_offset"] = offset url_args = next_req.copy() query = url_for(".api_query", table=table, **next_req) offset += len(data) next_req["_offset"] = offset next = url_for(".api_query", table=table, **next_req) # the collected result data = { "table": table, "timestamp": datetime.utcnow().isoformat(),
def download_galmap_text(self, label, lang="text"): data = db.belyi_galmaps.lookup(label) return self._wrap(Json.dumps(data), label, title='Data for embedded Belyi map with label %s,' % label)
def datapage(labels, tables, title, bread, label_cols=None, sorts=None): """ INPUT: - ``labels`` -- a string giving a label used in the tables (e.g. '11.a1' for an elliptic curve), or a list of strings (one per table) - ``tables`` -- a search table or list of search tables (as strings) - ``title`` -- title for the page - ``bread`` -- bread for the page - ``label_cols`` -- a list of column names of the same length; defaults to using ``label`` everywhere - ``sorts`` -- lists for sorting each table; defaults to None """ format = request.args.get("_format", "html") if not isinstance(tables, list): tables = [tables] if not isinstance(labels, list): labels = [labels for table in tables] if label_cols is None: label_cols = ["label" for table in tables] if sorts is None: sorts = [None for table in tables] assert len(labels) == len(tables) == len(label_cols) def apierror(msg, flash_extras=[], code=404, table=False): if format == "html": flash_error(msg, *flash_extras) if table: return redirect(url_for("API.api_query", table=table)) else: return redirect(url_for("API.index")) else: return abort(code, msg % tuple(flash_extras)) data = [] search_schema = {} extra_schema = {} for label, table, col, sort in zip(labels, tables, label_cols, sorts): q = {col: label} coll = db[table] try: data.append(list(coll.search(q, projection=3, sort=sort))) except QueryCanceledError: return apierror("Query %s exceeded time limit.", [q], code=500, table=table) except KeyError as err: return apierror("No key %s in table %s", [err, table], table=table) except Exception as err: return apierror(str(err), table=table) search_schema[table] = [(col, coll.col_type[col]) for col in sorted(coll.search_cols)] extra_schema[table] = [(col, coll.col_type[col]) for col in sorted(coll.extra_cols)] data = Json.prep(data) # the collected result data = { "labels": labels, "tables": tables, "label_cols": label_cols, "timestamp": datetime.utcnow().isoformat(), "data": data, } if format.lower() == "json": return current_app.response_class(json.dumps(data, indent=2), mimetype='application/json') elif format.lower() == "yaml": y = yaml.dump(data, default_flow_style=False, canonical=False, allow_unicode=True) return Response(y, mimetype='text/plain') else: return render_template("apidata.html", title=title, search_schema=search_schema, extra_schema=extra_schema, bread=bread, pretty=pretty_document, **data)