def covid_page(): data = get_data() df = pd.DataFrame({ "height": [x['height'] for x in data], "weight": [x['weight'] for x in data], "pokemon": [x['name'] for x in data], "image": [x['image'] for x in data], "size": [x['weight'] / x['height'] for x in data] }) fig = px.scatter(df, x="height", y="weight", size='size', text="pokemon", hover_data=["pokemon"]) # fig.update_traces(hovertemplate="<b>%{marker.pokemon}%{text}</b><br><br><b>Height: </b>%{x:.2f}<br><b>Weight: </b>%{y:.2f}") content = html.Div([ html.Div([ dbc.Button( "Back to home", color="dark", className="mr-1", href='/'), html.Div([dcc.Graph(id='Pokemon and their height', figure=fig)], style={"paddingTop": "40px"}) ], className='text-center container', style={'paddingTop': "50px"}) ]) return content
def get_data(): dataset_id = int(request.args['id']) dataset = dict() dataset['data'] = utils.get_data(dataset_id) dataset['metadata'] = utils.read_metadata(dataset_id) return jsonify(dataset)
async def plot( *, resource_url: str = Query(..., title="Resource URL", description="URL to a NetCDF resource"), get: str = Query( ..., title="Query string", description= "Receive list of parameters or get the plot, specifying the variable name", regex='^(param|plot)$'), variable: str = Query(None, title="Variable name", description="String with the NetCDF Variable name"), metadata: bool = Query( False, title="metadata", description="If true add metadata tab to the plot widget")): if get == 'param': return get_variables(resource_url) if get == 'plot': data = get_data(resource_url, variable) # json_plot = create_plot(data) json_plot = create_page(data, metadata=metadata) json_data = json_item(json_plot, target='tsplot') return json_data
def webhook(): try: data = request.get_json() #if 'token' in data and data['token'] == TOKEN: if 'type' in data: _type = data['type'] if 'data' in data: _data = data['data'] if 'custom' in _data: custom = _data['custom'] else: custom = {} # Actions if _type == 'downloadStarted': return jsonify({'status': True}) json_back_data = _data['uid'] uid = json_back_data['uid'] print(uid) f = open(DEBUG_UID, 'w') f.write(str(uid) + '\n') f.close() elif _type == 'downloadCompleted': _file = custom['file'] _video_data = get_data(_file) _response = { 'datetime': _video_data['datetime'], 'uid': _data['uid'], 'url': _data['url'], 'meta': { 'storage': _data['meta'] } } print(_data['uid']) f = open(DEBUG_UID, 'w') f.write(_data['uid'] + '\n') send('newVideo', _response, False, False) f.close() remove_uploaded_file(_file) update() return jsonify({'status': True}) elif _type == 'downloadError': return jsonify({'status': True}) else: abort(400, {'status': False, 'message': 'Type no found.'}) else: abort(400, {'status': False, 'message': 'Invalid data.'}) else: abort(400, {'status': False, 'message': 'Invalid type.'}) #else: # abort(400, { # 'status': False, # 'message': 'Invalid token.' # }) except BadRequest: abort(400, {'status': False, 'message': 'Invalid input data.'})
def stats(num_weeks): """ Return the graph with the number of training sessions per sport """ if num_weeks <= 0: num_weeks = 1 data = make_weeklycount(get_data(num_weeks)) return render_template('stats.html', num_weeks=num_weeks, data=data, title='Stats')
def init_data(): """倒入原始数据""" d = utils.get_data('./initdata/data.csv') sdb = sqldb.SQLDB() results = [] for r in d: x = r[0].split(',') results.append([x[0], x[1], x[2], x[3], x[4], x[5]]) sdb.write_data(results)
def get_bill_protocol(form, payment): data = { "payer_currency": USD_CURRENCY, "shop_amount": float(form.amount.data), "shop_currency": USD_CURRENCY, "shop_id": SHOP_ID, "shop_order_id": str(payment.shop_order_id), } data = get_data(data, REQUIRED_FIELDS_BILL, PIASTIX_BILL_URL) return redirect(data["url"])
async def plot( *, resource_url: str = Query(..., title="Resource URL", description="URL to a NetCDF resource"), get: str = Query( ..., title="Query string", description= "Receive list of parameters or get the plot, specifying the variable name", regex='^(param|plot)$'), variable: str = Query(None, title="Variable name", description="String with the NetCDF Variable name"), axis: str = Query(None, title="axis type", description="String with the NetCDF Variable name", regex='^(x_axis|y_axis)$'), metadata: bool = Query( False, title="metadata", description="If true add metadata tab to the plot widget")): if get == 'param': try: return get_variables(resource_url) except IOError: raise HTTPException( status_code=422, detail="URL To invalid or not supported NetCDF resource") if get == 'plot': if not axis: axis = 'y_axis' data = get_data(resource_url, variable, axis) # json_plot = create_plot(data) json_plot = create_page(data, metadata=metadata) json_data = json_item(json_plot, target='tsplot') return json_data
def index(**options): locale, strings = utils.localization(options.get('locale', 'en')) html = utils.get_data("roadmap.html") return render_template("roadmap.html", locale=locale, strings=strings)
async def download( *, resource_url: str = Query(..., title="Resource URL", description="URL to a NetCDF resource"), variable: List[str] = Query(None, title="Variable name", description="List of NetCDF Variable names"), axis: str = Query(None, title="axis type", description="String with the NetCDF Variable name", regex='^(x_axis|y_axis)$'), output_format: str = Query(None, title="output format", description="output format", regex='^(csv|nc)$')): # list of variables plottable_variables = get_variables(resource_url) # axis = list(plottable_variables.keys())[0] if not axis: axis = 'y_axis' if not variable: variables_items = {'variables': plottable_variables[axis]} else: variables_items = {'variables': variable} # check if the user provided valid parameters valid_vars = [] for i in variables_items['variables']: if i in plottable_variables[axis]: valid_vars.append(i) else: print('removed:', i) print(valid_vars) # create an empty list to append one dataframe for each variables data = [] for i in valid_vars: # get_data is handling only variable selection at the moment # TODO: add an option in get_data to allow time-selection # a good candidate for this is by using pandas time-range slicing # it will require one extra parameters in the URL request, # to handle start/end time selection data.append(get_data(resource_url, i, axis)) # merge the requested data in a single dataframe # suffixes = ['_'+i.variable_metadata['standard_name'] for i in data] try: df_final = reduce( lambda left, right: pd.merge( left, right, suffixes=(False, False), on=data[0].index.name), data) except ValueError: suffixes = ['_' + i.variable_metadata['standard_name'] for i in data] df_final = reduce( lambda left, right: pd.merge( left, right, suffixes=suffixes, on=data[0].index.name), data) # generate a uuid for the filename if not output_format: output_format = 'csv' if output_format == 'csv': rv = base64.b64encode(uuid.uuid4().bytes).decode('utf-8') unique = re.sub(r'[\=\+\/]', lambda m: { '+': '-', '/': '_', '=': '' }[m.group(0)], rv) filename = str(unique) + '.' + str(output_format) + '.zip' # TODO: read the secret-key from a configuration file s = TimestampSigner('secret-key') download_token = s.sign(filename).decode() # this stores the data in the 'DOWNLOAD_DIR' which is set in the docker-compose.yml instruction outfile = Path(os.environ['DOWNLOAD_DIR'], str(filename)) compression_opts = dict(method='zip', archive_name='dataset.csv') df_final.to_csv(outfile, compression=compression_opts) with open('metadata.csv', 'w', newline="") as csv_file: writer = csv.writer(csv_file) for key, value in data[0].dataset_metadata.items(): writer.writerow([key, value]) with open('metadata.html', 'w') as f: f.write( json2html.convert(json={**data[0].dataset_metadata}, table_attributes="id=\"metadata\" ")) zip = zipfile.ZipFile(outfile, 'a') zip.write('metadata.html', os.path.basename('metadata.html')) zip.write('metadata.csv', os.path.basename('metadata.csv')) zip.close() # the line below will return a direct download if os.path.isfile(outfile): return FileResponse(path=outfile, filename='dataset.csv.zip') # the line below will redirect to a download landing page # return RedirectResponse(url='/download/%s' % str(download_token)) if output_format == 'nc': filename = str(uuid.uuid5(uuid.NAMESPACE_URL, 'download')) + '.' + str(output_format) s = TimestampSigner('secret-key') download_token = s.sign(filename).decode() # this stores the data in the 'DOWNLOAD_DIR' which is set in the docker-compose.yml instruction outfile = Path(os.environ['DOWNLOAD_DIR'], str(filename)) ds = df_final.to_xarray() ds.attrs = data[0].dataset_metadata for i in data: ds[i.columns.values[0]].attrs = i.variable_metadata ds.to_netcdf(outfile) # the line below will return a direct download if os.path.isfile(outfile): return FileResponse(path=outfile, filename='out.nc')
def index(**options): locale, strings = utils.localization(options.get('locale', 'en')) versions = utils.get_data('changelog.json') return render_template("changelog.html", locale=locale, strings=strings, versions=versions)