def download_results(self, id: str): task = db_manager.get('tasks', id=id) spider = db_manager.get('spiders', id=task['spider_id']) col_name = spider.get('col') if not col_name: return send_csv([], f'results_{col_name}_{round(time())}.csv') items = db_manager.list(col_name, {'task_id': id}, limit=999999999) fields = get_spider_col_fields(col_name, task_id=id, limit=999999999) return send_csv(items, filename=f'results_{col_name}_{round(time())}.csv', fields=fields, encoding='utf-8')
def get(self, file_type=None): parser.add_argument('id', type=int) parser.add_argument('gene_name_id', type=int) parser.add_argument('organ_id', type=int) parser.add_argument('gene_name', type=str) parser.add_argument('organ_name', type=str) args = parser.parse_args() new_args = {key: val for key, val in args.items() if val is not None} lookup = LookupModel.query if new_args.get('gene_name'): lookup = lookup.filter(LookupModel.gene_name.has( GeneNameModel.name == new_args.get('gene_name'))) new_args.pop('gene_name') if new_args.get('organ_name'): lookup = lookup.filter(LookupModel.organ.has( OrganModel.name == new_args.get('organ_name'))) new_args.pop('organ_name') data = lookup.filter_by(**new_args).all() if file_type == 'csv': data2 = [x.to_dict() for x in data] keys = data2[0].keys() return send_csv(data2, "lookupList.csv", keys) # return Response(str(data), # mimetype='text/csv', # headers={'Content-Disposition': # 'attachment;filename=lookupList.csv'}) else: data = {'items': [x.to_dict() for x in data]} return jsonify(data)
def get(self, site_id, interval=""): t = self.get_time_delta(site_id, interval) if t == None: return Response("Validation Error", status=400) last_pub_time = db.session.query(models.AQILogs.publish_time).filter( models.AQILogs.id == db.session.query(func.max( models.AQILogs.id))).first()[0] logs = db.session.query( models.AQILogs.publish_time, models.AQILogs.aqi).filter( models.AQILogs.site_id == site_id).filter( models.AQILogs.publish_time >= (last_pub_time - t)).all() rtn = [] dic = {} for log in logs: dic = log._asdict() # format: 2019-11-19T04:55:00+0000 dic['Time (UTC)'] = dic.pop('publish_time').strftime( "%Y-%m-%dT%H:%M:%S+0800") rtn.append(dic) return send_csv(rtn, "rtn.csv", dic.keys(), cache_timeout=0)
def export_public(): ''' Returns all counts per shelter for public shelters Returns: 200: JSON object with: counts: list of shelters and this day's counts ''' # only show person count to admin counts = db.session.query(Count)\ .join(Shelter, Shelter.id == Count.shelter_id)\ .order_by(Count.day)\ .filter(Shelter.public)\ .values( Count.shelter_id, Count.bedcount, Count.personcount, Count.day, Shelter.name ) result_dict = map(lambda q: q._asdict(), counts) return send_csv(result_dict, "shelterCounts.csv", ['day', 'name', 'personcount', 'bedcount', 'shelter_id'])
def ordersMonth(month): try: query = ( "SELECT restaurants.name AS Restaurant, clients.fName AS ClientFirstName, clients.lName " + "AS ClientLastName, orders.orderDate AS Date " + "FROM restaurants " + "JOIN orders ON restaurants.idRestaurant = orders.idRestaurant " + "JOIN clients ON clients.idClient = orders.idClient WHERE MONTH(orders.orderDate) = " + str(month)) database.execute(query) rows = database.fetchall() csv = [] for i in rows: csv_temp = {} csv_temp['Restaurant'] = i[0] csv_temp['ClientFirstName'] = i[1] csv_temp['ClientLastName'] = i[2] csv_temp['Date'] = i[3] csv.append(csv_temp) return send_csv( csv, "test.csv", ["Restaurant", "ClientFirstName", "ClientLastName", "Date"]) except Exception as e: print(e)
def getCSV(): rose = Rose() resultSet = rose.queryRoseData() roselist = [] for roseIndex in resultSet: objDict = { "id": roseIndex.id, "total_max": roseIndex.total_max, "total_mean": roseIndex.total_mean, "total_median": roseIndex.total_median, "total_std": roseIndex.total_std, "up_max": roseIndex.up_max, "up_mean": roseIndex.up_mean, "up_median": roseIndex.up_median, "up_std": roseIndex.up_std, "bottom_max": roseIndex.bottom_max, "bottom_mean": roseIndex.bottom_mean, "bottom_median": roseIndex.bottom_median, "bottom_std": roseIndex.bottom_std, "height_max": roseIndex.height_max, "height_mean": roseIndex.height_mean, "height_median": roseIndex.height_median, "height_std": roseIndex.height_std, "grade": roseIndex.grade, 'weight': roseIndex.weight, 'create_time': roseIndex.create_time } roselist.append(objDict) # 返回的csv默认名字 timeVersion = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) csvName = 'roselist_{}.csv'.format(timeVersion) return send_csv(roselist, csvName, CSVHead)
def use(): try: query = ( "SELECT clients.fName, clients.lName, clients.address, clients.identification, users.userName, users.password " + "FROM users " + "JOIN clients ON clients.idClient = users.Client_idClient;") database.execute(query) rows = database.fetchall() csv = [] for i in rows: csv_temp = {} csv_temp['ClientFirstName'] = i[0] csv_temp['ClientLastName'] = i[1] csv_temp['ClientAddress'] = i[2] csv_temp['ClientIdentification'] = i[3] csv_temp['UserName'] = i[4] csv_temp['Password'] = i[5] csv.append(csv_temp) return send_csv(csv, "test.csv", [ "ClientFirstName", "ClientLastName", "ClientAddress", "ClientIdentification", "UserName", "Password" ]) except Exception as e: print(e)
def api_csv_transaction_list(): search_hash = request.args['hash'] transaction_data_for_csv = [] block_result = data_structures.block_collection.find_one(search_hash) for tx in block_result['tx']: result = data_structures.transaction_collection.find_one(tx) result["number_of_inputs"] = result.pop('vin_sz') result["number_of_outputs"] = result.pop('vout_sz') result["transaction_hash"] = result.pop('_id') transaction_data_for_csv.append(result) transaction_data_for_csv.sort(key=lambda item: item.get("time")) assert len(block_result['tx']) == len(transaction_data_for_csv) fields_list = ["transaction_hash", "block_height", "number_of_inputs", "number_of_outputs", "coinbase_transaction", "time", "size", "value_inputs", "fee", "value_outputs"] return send_csv(transaction_data_for_csv, filename=f"Transactions for Block {block_result['height']}.csv", fields=fields_list, writer_kwargs={"extrasaction": "ignore"})
def csv(): all_user_data = DataCatin.query.all() # nik_CL_to_CSV = [] all_data_CSV = [] for i in all_user_data: # nik_CL_to_CSV.append({'NIK Catin Laki-laki': i.NIK_catin_laki_laki}) all_data_CSV.append({ 'NIK Catin Laki-laki': i.NIK_catin_laki_laki, 'Nama Catin Laki-laki': i.nama_catin_laki_laki, 'NIK Catin Perempuan': i.NIK_catin_perempuan, 'Nama Catin Perempuan': i.nama_catin_perempuan, 'Tanggal Daftar': i.tanggal_daftar, 'Jadwal Nikah': i.jadwal_nikah, 'Jam': i.jam, 'Tempat Pelaksanaan Nikah': i.tempat_pelaksaan_nikah, 'Status Pendaftaran': i.status_pendaftaran }) return send_csv( all_data_CSV, "testing.csv", [ '<b>NIK Catin Laki-laki</b>', 'Nama Catin Laki-laki', 'NIK Catin Perempuan', 'Nama Catin Perempuan', 'Tanggal Daftar', 'Jadwal Nikah', 'Jam', 'Tempat Pelaksanaan Nikah', 'Status Pendaftaran' ], cache_timeout=1, delimiter=';')
def index(): return send_csv([{ "id": 42, "foo": "bar" }, { "id": 91, "foo": "baz" }], "test.csv", ["id", "foo"])
def summary_data(zone): summary = Summary.select(Summary.zone, Summary.date, Summary.description, Summary.score_key, Summary.value).where(Summary.zone.contains(zone)) summary_d = [model_to_dict(s) for s in summary] columns = ['zone', 'date', 'description', 'score_key', 'value'] # Get rid of the ID column for item in summary_d: del(item['id']) return send_csv(summary_d, f"ted_summary_{zone}.csv", columns)
def download_tags_with_usage(): """ GET endpoint that downloads a CSV file of the list of club tags with usage statistics per tag. """ tags_with_usage = mongo_aggregations.fetch_aggregated_tag_list() return send_csv(tags_with_usage, 'tags.csv', ['_id', 'name', 'num_clubs'], cache_timeout=0)
def toycsv(): sh = get_toycsvsheet() all_records = sh.get_all_records() csv_columns = [ "Account Name", "Website", "Industries Trade", "Industries Other", "Products Self Reported", "HS Code", "Keywords", "Billing Zip/Postal Code" ] return send_csv(all_records, "toy.csv", csv_columns)
def export_data(measurement, unique_id, start_seconds, end_seconds): """ Return data from start_seconds to end_seconds from influxdb. Used for exporting data. """ current_app.config['INFLUXDB_USER'] = INFLUXDB_USER current_app.config['INFLUXDB_PASSWORD'] = INFLUXDB_PASSWORD current_app.config['INFLUXDB_DATABASE'] = INFLUXDB_DATABASE dbcon = influx_db.connection output = Output.query.filter(Output.unique_id == unique_id).first() input = Input.query.filter(Input.unique_id == unique_id).first() math = Math.query.filter(Math.unique_id == unique_id).first() if output: name = output.name elif input: name = input.name elif math: name = math.name else: name = None utc_offset_timedelta = datetime.datetime.utcnow() - datetime.datetime.now() start = datetime.datetime.fromtimestamp(float(start_seconds)) start += utc_offset_timedelta start_str = start.strftime('%Y-%m-%dT%H:%M:%S.%fZ') end = datetime.datetime.fromtimestamp(float(end_seconds)) end += utc_offset_timedelta end_str = end.strftime('%Y-%m-%dT%H:%M:%S.%fZ') query_str = query_string( measurement, unique_id, start_str=start_str, end_str=end_str) if query_str == 1: flash('Invalid query string', 'error') return redirect(url_for('routes_page.page_export')) raw_data = dbcon.query(query_str).raw if not raw_data or 'series' not in raw_data: flash('No measurements to export in this time period', 'error') return redirect(url_for('routes_page.page_export')) # Generate column names col_1 = 'timestamp (UTC)' col_2 = '{name} {meas} ({id})'.format( name=name, meas=measurement, id=unique_id) csv_filename = '{id}_{meas}.csv'.format(id=unique_id, meas=measurement) # Populate list of dictionary entries for each column to convert to CSV # and send to the user to download csv_data = [] for each_data in raw_data['series'][0]['values']: csv_data.append({col_1: str(each_data[0][:-4]).replace('T', ' '), col_2: each_data[1]}) return send_csv(csv_data, csv_filename, [col_1, col_2])
def make_csv(data: List[attr.s], filename: str) -> Response: headers = set() entries = [] for d in data: headers.update(attr.fields_dict(d.__class__).keys()) # This will not work too well for objects that include other attr.s objects. entries.append(attr.asdict(d)) return send_csv(entries, filename, headers)
def index(): return send_csv([{ "id": 42, "foo": "bar" }, { "id": 91, "foo": "baz" }], "test.csv", schema=IdSchema())
def download(): params = { "username" : session['username'] } api_response = requests.post(API_GET_TODOS, json= params) body = api_response.json() data = body["data"]["todos"] return send_csv(data,"My_todo_lits.csv", ["title","description","deadline","priority","status","id"])
def provide_csv_data(nickname): if nickname is None or nickname == "": return { "error": "invalid nickname" } participant = Participant.query.filter(Participant.nickname == nickname).first() if participant is None: return { "info": "no such subject" } current_pattern = dict.fromkeys(column_names, '-') rows = [] sessions = ExperimentSession.query.filter(ExperimentSession.subject_id == participant.id).order_by(ExperimentSession.start).all() for session in sessions: current_pattern["Session ID"] = session.id current_pattern["Session Date"] = session.session_date current_pattern["Time t"] = session.t_value current_pattern["Planned trial count"] = session.trial_count current_pattern["Feedback Loop mode"] = session.feedback current_pattern["Overall Time version"] = session.constant_time_version current_pattern["Session Start"] = session.start current_pattern["Session End"] = session.end current_pattern["Session Duration"] = session.duration patterns_in_session = Pattern.query.filter(Pattern.session_id == session.id).order_by(Pattern.moment).all() for pattern in patterns_in_session: current_pattern["Pattern"] = f'{pattern.pattern:010b}' current_pattern["Pattern Source Number"] = pattern.pattern current_pattern["Pattern Start"] = pattern.moment answers = Answer.query.filter(Answer.pattern_id == pattern.id).order_by(Answer.lamp_num).all() for answer in answers: index = f'Lamp {answer.lamp_num}' current_pattern[f'{index} Answer Time'] = answer.answer_time current_pattern[f'{index} Correct'] = answer.correct rows.append(copy(current_pattern)) for i in range(10): current_pattern[f'Lamp {i} Answer Time'] = '-' current_pattern[f'Lamp {i} Correct'] = '-' return send_csv(rows, \ f'{nickname}_{participant.age}_{participant.sex}.csv', \ column_names)
def transform_view(): # get the uploeded file request_file = request.files['data_file'] # check file if not request_file: return "No file selected" if not allowed_file(request_file.filename): return "The file format is not allowed. Please upload a csv" # read file csv_file = TextIOWrapper(request_file, encoding='utf-8') csv_reader = csv.DictReader(csv_file, delimiter=',') fields = { 'Air temperature': 'ta', 'MRT': 'tr', 'Air velocity': 'vel', 'Relative humidity': 'rh', 'Metabolic rate': 'met', 'Clothing level': 'clo' } si_unit = True if any([True for x in csv_reader.fieldnames if x.split(' [')[1] == 'F]']): si_unit = False csv_reader.fieldnames = [ fields[x.split(' [')[0]] for x in csv_reader.fieldnames ] results = [] # calculated indexes and return file for row in csv_reader: for element in row.keys(): row[element] = float(row[element]) if si_unit: r = cm.comfPMVElevatedAirspeed(row['ta'], row['tr'], row['vel'], int(row['rh']), row['met'], row['clo']) row['PMV'] = round(r['pmv'], 1) row['PPD'] = round(r['ppd'], 1) row['SET'] = round(r['set'], 2) row['CE'] = round(r['ce'], 2) else: r = cm.comfPMVElevatedAirspeed(cm.fahrenheit_to_celsius(row['ta']), cm.fahrenheit_to_celsius(row['tr']), row['vel'] / 196.85, int(row['rh']), row['met'], row['clo']) row['PMV'] = round(r['pmv'], 1) row['PPD'] = round(r['ppd'], 1) row['SET'] = round(r['set'], 2) row['CE'] = round(r['ce'], 2) results.append(row) return send_csv(results, "results.csv", list(row.keys()))
def index(): name_CL_to_CSV = [] name_CL = ['Lolo', 'Fadli', 'Jori', 'Andi', 'Budi', 'Dedi', 'Nori'] for i in name_CL: name_CL_to_CSV.append(i) return send_csv([{ 'Nama Catin Laki-laki': name_CL_to_CSV }], "testing.csv", ['Nama Catin Laki-laki'], cache_timeout=1, delimiter=';')
def home(): form = LoginForm() if form.validate_on_submit(): try: os.remove(app.config["OUTPUT_PATH"]) except: pass with driver_context() as driver: df, ls = scrape(driver, form.username.data, form.password.data) return send_csv( ls, "latest-littlefield.csv", [ "Day", "Customer Orders", "Queued Orders", "Inventory Level", "Machine 1 Utilisation Rate", "Machine 2 Utilisation Rate", "Machine 3 Utilisation Rate", "Machine 1 Queue", "Machine 2 Queue", "Machine 3 Queue", "Completed Jobs Tier 1", "Completed Jobs Tier 2", "Completed Jobs Tier 3", "Avg Lead Time Tier 1", "Avg Lead Time Tier 2", "Avg Lead Time Tier 3", "Avg Revenue Tier 1", "Avg Revenue Tier 2", "Avg Revenue Tier 3", ], ) return render_template("index.html", form=form) # @app.route('/data') # def data_load(): # return render_template('data_ready.html') # # # @app.route('/download') # def plot_csv(): # file_path = os.path.join(os.path.dirname(__name__), os.path.pardir, 'output', 'latest-littlefield.csv') # with open(file_path) as file: # csv_file = file.read # resp = Response(csv_file, # mimetype='text/csv', # headers={"Content-disposition": # "attachment; filename=latest-littlefield.csv"}) # return resp
def api_csv_currency_report(): fields_list = [ 'date', 'USD' ] fields_list.extend(currency_list) retrieval_date_from = request.args['date_from'] retrieval_date_to = request.args['date_to'] currency_json_data = currency_logic.currency_data_retriever(retrieval_date_from, retrieval_date_to) currency_csv_data = [dict(value, date=key) for key, value in currency_json_data.items()] return send_csv(currency_csv_data, filename=f"Currency Data {str(retrieval_date_from), str(retrieval_date_to)}.csv", fields=fields_list)
def download_rso_users(): """ GET endpoint that downloads a CSV file of the list of RSO emails scraped from CalLink. """ rso_list = mongo_aggregations.fetch_aggregated_rso_list() for rso_email in rso_list: rso_email['registered'] = 'Yes' if rso_email['registered'] else 'No' rso_email['confirmed'] = 'Yes' if rso_email['confirmed'] else 'No' return send_csv(rso_list, 'rso_emails.csv', ['email', 'registered', 'confirmed'], cache_timeout=0)
def download_clubs(): """ GET endpoint that downloads a CSV file of the list of clubs with relevent info (abridged). """ club_list = _fetch_clubs() for club in club_list: club['confirmed'] = 'Yes' if club['confirmed'] else 'No' club['reactivated'] = 'Yes' if club['reactivated'] else 'No' return send_csv(club_list, 'clubs.csv', ['name', 'email', 'confirmed', 'reactivated'], cache_timeout=0)
def subsidies_transactions_get(): # noqa: E501 """Download all transactions; supports filtering. For each known subsidy, its transactions are returned as a CSV. It supports ISO date filtering so that just the transactions in a given time interval may be returned. If no filter is provided, then all transactions from the very beginning are returned. # noqa: E501 Due to a bug in the Swagger code generation the query arguments are never filled in the method argoments, therefore we just use Flask's own facilities to retrieve them. :rtype: file """ starting = request.args.get("from") ending = request.args.get("to") initiative = request.args.get("initiative") if initiative != None: try: raise service.exceptions.BadRequestException( "Filtering by initiative in the CSV report is not supported - ignoring parameter" ) except Exception: pass if (starting == None and ending != None) or (starting != None and ending == None): raise service.exceptions.BadRequestException( "Both date intervals must be specified, or none at all for full dump" ) if starting == None and ending == None: rows, filename, schema = service.subsidies.read_all_transactions() return send_csv(rows, filename, schema) else: start_date = datetime.strptime(starting, '%Y-%m-%d') end_date = datetime.strptime(ending, '%Y-%m-%d') if end_date < start_date: raise service.exceptions.BadRequestException( "'to' date must be after the 'from' date") rows, filename, schema = service.subsidies.read_all_transactions( start_date, end_date) return send_csv(rows, filename, schema)
def item_download(item_id): item = Item.query.get_or_404(item_id) if item.owner != current_user: abort(403) entries = Entry.query.filter_by(item_id=item_id, deleted=False).order_by(Entry.timestamp) entries_list = get_csv_list(entries) # for e in entries_list: # print(e) return send_csv( entries_list, item.itemname + ".csv", ["timestamp", "comment", "longitude", "latitude", "entrytags"])
def download_recent_csv(): data = get_json(f'{base_dir}/tmp/recent.json') if data is None: return "Nie odbyto żadnej sesji." print(data) rows = [] if data['mode'] != 'pilot': # Stage two data stage_two_row = dict.fromkeys(column_names, 'n/a') stage_two_row["External Task Number"] = "STAGE TWO" stage_two_row["Internal Task Number"] = "STAGE TWO" stage_two_row["GIL Clicks"] = data["stageTwoGILClicks"] rows.append(stage_two_row) # Stage four tasks data for task_data in data["taskData"]: task_row = dict.fromkeys(column_names, '-') task_row["External Task Number"] = task_data["externalTaskNum"] task_row["Internal Task Number"] = task_data["internalTaskNum"] task_row["Task Name"] = task_data["taskName"] task_row["GIL Clicks"] = task_data["GILClicks"] task_row["Correct"] = 1 if task_data["correct"] else 0 task_row["Card Order"] = task_data["cardOrder"] task_row["Task Execution Time"] = task_data["executionDuration"] task_row["Task Solution Time"] = task_data["solutionDuration"] task_row["P Selection Times"] = task_data["selectionTimes"][0] task_row["nP Selection Times"] = task_data["selectionTimes"][1] task_row["Q Selection Times"] = task_data["selectionTimes"][2] task_row["nQ Selection Times"] = task_data["selectionTimes"][3] task_row["P Deselection Times"] = task_data["deselectionTimes"][0] task_row["nP Deselection Times"] = task_data["deselectionTimes"][1] task_row["Q Deselection Times"] = task_data["deselectionTimes"][2] task_row["nQ Deselection Times"] = task_data["deselectionTimes"][3] task_row["Final Selection"] = task_data["finalSelectionOrder"] task_row["Final Selection Times"] = task_data[ "finalSelectionOrderedTimes"] rows.append(task_row) # Sending csv return send_csv(rows, \ f'{data["nickname"]}_{data["gender"]}_{data["age"]}_mode{data["mode"]}.csv', \ column_names)
def operator(): if 'email' in session: name = current_user.name all_user_data = DataCatin.query.all() if request.method == 'POST': all_data_CSV = [] for i in all_user_data: # nik_CL_to_CSV.append({'NIK Catin Laki-laki': i.NIK_catin_laki_laki}) all_data_CSV.append({ 'NIK Catin Laki-laki': i.NIK_catin_laki_laki, 'Nama Catin Laki-laki': i.nama_catin_laki_laki, 'NIK Catin Perempuan': i.NIK_catin_perempuan, 'Nama Catin Perempuan': i.nama_catin_perempuan, 'Tanggal Daftar': i.tanggal_daftar, 'Jadwal Nikah': i.jadwal_nikah, 'Jam': i.jam, 'Tempat Pelaksanaan Nikah': i.tempat_pelaksaan_nikah, 'Status Pendaftaran': i.status_pendaftaran }) return send_csv( all_data_CSV, "laporan pendaftaran.csv", [ 'NIK Catin Laki-laki', 'Nama Catin Laki-laki', 'NIK Catin Perempuan', 'Nama Catin Perempuan', 'Tanggal Daftar', 'Jadwal Nikah', 'Jam', 'Tempat Pelaksanaan Nikah', 'Status Pendaftaran' ], cache_timeout=1, delimiter=';') return render_template('operator_dashboard.html', WELCOME=name, catin=all_user_data, DOWNLOAD_CSV='') else: return redirect(url_for('index'))
def get(self): """return data """ output = request.args.get('format', 'json').lower().strip() if output not in ['json', 'csv']: abort(400, message="wrong output format, should be 'json' or 'csv'") sort_order = int(request.args.get("sort", "1")) cursor = mongo.db.quicktests.find({ 'county': '05334' }).sort("date", sort_order) if "limit" in request.args: limit = int(request.args.get("limit")) cursor = cursor.limit(limit) quicktests = list(cursor) # get the general header information with trends etc. today = mongo.db.quicktests.find_one({'county': '05334'}, sort=[('date', pymongo.DESCENDING) ]) date = today['date'] today['rate_formatted'] = "%s%%" % str(round(today['rate_percent'], 2)).replace(".", ",") resp = { 'date': date, 'dateFormatted': date.strftime("%d. %B %Y"), 'dates': [r['date'] for r in quicktests], 'format': output, 'positive': [r['positive'] for r in quicktests], 'negative': [r['total'] - r['positive'] for r in quicktests], 'total': [r['total'] for r in quicktests], 'rate': [r['rate'] for r in quicktests], 'rate_percent': [r['rate_percent'] for r in quicktests], 'rate_permille': [r['rate_permille'] for r in quicktests], 'today': today } if output == "json": return resp elif output == "csv": headers = quicktests[-1].keys() return send_csv( quicktests, "corona_%s_corona.csv" % (date.strftime("%Y_%m_%d")), headers)
def upload_file(): if request.method == 'POST': stocks = request.form['stocks_list'] alloc = optimize(stocks) output_type = request.form.get('select') if output_type == 'json': return alloc else: return send_csv( [{"stock": i, "weight": alloc[i]} for i in alloc], "test.csv", ["stock", "weight"] )
def export_data(unique_id, measurement_id, start_seconds, end_seconds): """ Return data from start_seconds to end_seconds from influxdb. Used for exporting data. """ current_app.config['INFLUXDB_USER'] = INFLUXDB_USER current_app.config['INFLUXDB_PASSWORD'] = INFLUXDB_PASSWORD current_app.config['INFLUXDB_DATABASE'] = INFLUXDB_DATABASE current_app.config['INFLUXDB_TIMEOUT'] = 5 dbcon = influx_db.connection output = Output.query.filter(Output.unique_id == unique_id).first() input_dev = Input.query.filter(Input.unique_id == unique_id).first() math = Math.query.filter(Math.unique_id == unique_id).first() if output: name = output.name elif input_dev: name = input_dev.name elif math: name = math.name else: name = None device_measurement = DeviceMeasurements.query.filter( DeviceMeasurements.unique_id == measurement_id).first() if device_measurement: conversion = Conversion.query.filter( Conversion.unique_id == device_measurement.conversion_id).first() else: conversion = None channel, unit, measurement = return_measurement_info( device_measurement, conversion) utc_offset_timedelta = datetime.datetime.utcnow() - datetime.datetime.now() start = datetime.datetime.fromtimestamp(float(start_seconds)) start += utc_offset_timedelta start_str = start.strftime('%Y-%m-%dT%H:%M:%S.%fZ') end = datetime.datetime.fromtimestamp(float(end_seconds)) end += utc_offset_timedelta end_str = end.strftime('%Y-%m-%dT%H:%M:%S.%fZ') query_str = query_string( unit, unique_id, measure=measurement, channel=channel, start_str=start_str, end_str=end_str) if query_str == 1: flash('Invalid query string', 'error') return redirect(url_for('routes_page.page_export')) raw_data = dbcon.query(query_str).raw if not raw_data or 'series' not in raw_data: flash('No measurements to export in this time period', 'error') return redirect(url_for('routes_page.page_export')) # Generate column names col_1 = 'timestamp (UTC)' col_2 = '{name} {meas} ({id})'.format( name=name, meas=measurement, id=unique_id) csv_filename = '{id}_{meas}.csv'.format(id=unique_id, meas=measurement) # Populate list of dictionary entries for each column to convert to CSV # and send to the user to download csv_data = [] for each_data in raw_data['series'][0]['values']: csv_data.append({col_1: str(each_data[0][:-4]).replace('T', ' '), col_2: each_data[1]}) return send_csv(csv_data, csv_filename, [col_1, col_2])