def jobslist(): data = [] country = '' form = ListForm(request.form) if request.method == 'POST' and form.validate_on_submit(): ids = form.ids.data.split(',') for x in ids: with orm.db_session: job = JobEntity.get(id=x) data.append(dict(job)) if country == '': country = data[0]['country'] if form.returntype.data == '0': return excel.make_response_from_records(data, 'csv', file_name='jobs_%s.csv' % country) elif form.returntype.data == '1': return excel.make_response_from_records(data, 'xlsx', file_name='jobs_%s.xlsx' % country) elif form.returntype.data == '2': getDescriptions(data, country) return render_template('parsers/selectedjobs.jade', data=data) #html = render_template('parsers/parsers.jade', data=data, form=ListForm(), keyword='', country=country) #return render_pdf(HTML(string=html)) return ('', 204)
def download_employee_list(item_id): results = CompanyController.get_employees(item_id) result_list = [] for item in results: result = serialize_field(item) result_list.append(result) return excel.make_response_from_records(result_list, "xlsx")
def sheet(): # define sql statement res = conn.execute('select * from user') # build excel from records return excel.make_response_from_records(res.fetchall(), "xlsx", file_name="sheet.xlsx")
def doexport(): jpnew = db.jpvs_new.find() j = [] for i in jpnew: j.append(i) return excel.make_response_from_records(j, ['jobprop_label', 'label'], "xlsx")
def download_file(): local_time = time.time() - int( request.cookies.get('localTimeZoneOffset')) * 60 file_name = "{}-{}".format( session['username'], datetime.utcfromtimestamp(local_time).strftime('%Y.%m.%d-%H.%M.%S')) data = session['data'] file_data = [] for post in data: post_dict = OrderedDict() post_dict['Likes'] = post['likes'] post_dict['Comments'] = post['comments'] if post['views']: post_dict['Views'] = post['views'] else: post_dict['Views'] = "N/A" post_dict['Media Type'] = post['media_type'] post_dict['Link'] = post['link'] post_dict['Publish Date'] = post['published_date'] file_data.append(post_dict) return excel.make_response_from_records(file_data, 'csv', file_name=file_name)
def get(self, slug=None): if slug: obj = self.resource.model.query.filter(self.resource.model.id == slug) obj = self.resource.has_read_permission(obj).first() if obj: return make_response(jsonify(self.resource.schema(exclude=tuple(self.resource.obj_exclude), only=tuple(self.resource.obj_only)).dump( obj, many=False).data), 200) return make_response(jsonify({'error': True, 'message': 'Resource not found'}), 404) else: objects = self.resource.apply_filters(queryset=self.resource.model.query, **request.args) objects = self.resource.has_read_permission(objects) if '__order_by' in request.args: objects = self.resource.apply_ordering(objects, request.args.getlist('__order_by')) if '__export__' in request.args and self.resource.export is True: objects = objects.paginate(page=self.resource.page, per_page=self.resource.max_export_limit) return make_response_from_records( self.resource.schema(exclude=tuple(self.resource.obj_exclude), only=tuple(self.resource.obj_only)) .dump(objects.items, many=True).data, 'csv', 200, self.resource.model.__name__) try: resources = objects.paginate(page=self.resource.page, per_page=self.resource.limit) except DataError as e: return make_response(jsonify(dict(message='invalid query params', operation='Query Resource', error=str(e))), 400) if resources.items: return make_response(jsonify({'success': True, 'data': self.resource.schema(exclude=tuple(self.resource.obj_exclude), only=tuple(self.resource.obj_only)) .dump(resources.items, many=True).data, 'total': resources.total}), 200) return make_response(jsonify({'error': True, 'message': 'No Resource Found'}), 404)
def get(self, id): """Download assets under an asset category""" asset_category = AssetCategory.get_or_404(id) assets = asset_category.assets.filter_by(deleted=False).all() asset_schema = AssetSchema(many=True, exclude=[ 'id', 'asset_category_id', 'deleted', 'deleted_at', 'created_by', 'deleted_by', 'updated_at', 'updated_by' ]) assets_data = asset_schema.dump(assets).data assets_data_record = [] for asset in assets_data: # Flatten the asset dict object so as to have # the custom attributes as a separate column in the csv file asset_data = flatten(asset, reducer=lambda key1, key2: key2) asset_data['created_at'] = dateutil.parser.parse( asset_data['created_at']).date() assets_data_record.append(asset_data) return excel.make_response_from_records( assets_data_record, 'csv', file_name=f'{asset_category.name} Assets Export - {date.today()}')
def download_observations(id): observations = session_project.observation_by_patient_id(id) full_obs = [] for o in observations: info_patient = o.to_dict().get("info_patient") info_hospital = o.to_dict().get("info_hospital") full_dict = {**info_patient, **info_hospital} full_obs.append(full_dict) return excel.make_response_from_records(full_obs, "csv")
def export(source): if request.method == 'GET': user = UserController.find_by_id(request.args.get("user_id")) item_list = EmailStatController.get_items(source, user.user_handle_industry) result_list = [] for item in item_list: result = serialize_field(source, item) result_list.append(result) return excel.make_response_from_records(result_list, "xlsx")
def getplaylistbyid(): playlistid = request.form['playlistid'] auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) spotify = spotipy.Spotify(auth_manager=auth_manager) tracklist, playlist_name = playlist_to_tracklist(playlistid) output = excel.make_response_from_records(tracklist, 'csv') output_filename = playlist_name + " - " + playlistid + ".csv" output.headers[ "Content-Disposition"] = "attachment; filename=" + output_filename output.headers["Content-type"] = "text/csv" return output
def post(self): tenant = request.args.get("tenant", "").lower() download_type = request.args.get('downloadType') json_data = request.get_json() sites = json_data["sites"] start_date = json_data["startDate"] end_date = json_data["endDate"] frequency = json_data["frequency"] pollutants = json_data["pollutants"] from_bigquery = json_data.get("fromBigQuery") if from_bigquery: data = EventsModel.from_bigquery(tenant, sites, start_date, end_date, frequency, pollutants) if download_type == 'csv': return excel.make_response_from_records( data, 'csv', file_name=f'airquality-{frequency}-data') return create_response("air-quality data download successful", data=data), Status.HTTP_200_OK events_model = EventsModel(tenant) data = approximate_coordinates( events_model.get_downloadable_events(sites, start_date, end_date, frequency, pollutants)) if download_type == 'json': return create_response("air-quality data download successful", data=data), Status.HTTP_200_OK if download_type == 'csv': return excel.make_response_from_records( data, 'csv', file_name=f'airquality-{frequency}-data') return create_response(f'unknown data format {download_type}', success=False), Status.HTTP_400_BAD_REQUEST
def download(): """Download excel file""" if request.method == "POST": if request.form.get("download"): receipts = db.execute( "SELECT id, name, header, total, date, date_created, category, language, image_link FROM 'receipts' WHERE user_id = :user_id AND deleted = 0", user_id=session["user_id"]) print(receipts) print(type(receipts)) return excel.make_response_from_records(receipts, 'xlsx', file_name="receipts") else: print(request.form) return redirect("/") else: return redirect("/")
def scrape(): # Get the output_data and reset it global output_data output_data = [] try: # Passing the user input to our Scraping Function scrape_with_crochet(desCity=desCity, checkinDate=checkinDate, checkoutDate=checkoutDate, room=room, traveler=traveler) except Exception as e: pass # Downlpad excel file with scrapped data return excel.make_response_from_records(output_data, "xls", file_name="hotelsdata")
def dump_projects_database(extension_type): if extension_type not in ["csv", "ods", "xls", "xlsx"]: raise ValueError("Unsupported format: %s" % extension_type) dirty_projects = project_get_info(every=True, usage=False) projects = [] for project in dirty_projects: if not project.responsible: continue if not project.ref: continue projects.append(project) output = sorted(list(map(lambda x: x.pretty_dict(), projects)), key=lambda x: x["id"]) excel.init_excel(current_app) filename = "projects." + extension_type return excel.make_response_from_records(output, file_type=extension_type, file_name=filename)
def upload_array(struct_type): if struct_type == "array": array = request.get_array(field_name='file') return excel.make_response_from_array(array, 'xls', sheet_name='test_array') elif struct_type == "dict": adict = request.get_dict(field_name='file') return excel.make_response_from_dict(adict, 'xls', sheet_name='test_array') elif struct_type == "records": records = request.get_records(field_name='file') return excel.make_response_from_records(records, 'xls', sheet_name='test_array') elif struct_type == "book": book = request.get_book(field_name='file') return excel.make_response(book, 'xls') elif struct_type == "book_dict": book_dict = request.get_book_dict(field_name='file') return excel.make_response_from_book_dict(book_dict, 'xls')
def index(variable): if request.method == "GET": return render_template("homepage.html", variable=variable) if request.method == "POST": form = request.form report_date = form["report_date"] report_type = form["report_type"] report_language = form["report_language"] final_list = [] dictionary = OrderedDict({ "No.": "", "Driver Code": "", "Driver Name": "", "Vehicle Type": "", "Re-delivered Order": "", "Net Re-delivered Revenue": "", "Number Of Orders (#)": "", "Net revenue": "", "Total Order Weight (kg)": "", "Total Order Volume (m3)": "", "Planned Revenue(đ)": "", "Planned Cost(đ)": "", "Planned Distance(km)": "", "Planned Time(hours)": "", "Actual Revenue(đ)": "", "Actual Cost(đ)": "", "Actual Distance(km)": "", "Actual Start Time": report_date, "Actual End Time": "", "Actual Time(hours)": "", "VFR by Trips(%)": "", "Vfr By Weight (%)": "", "Fulfilled Orders (#)": "", "Net revenue delivered": "", "Partially Fulfilled Orders (#)": "", "Unfulfilled Orders (#)": "", "Not Yet Fulfilled Orders (#)": "", "Number of Off-200m Check-in (#)": "", }) final_list.append(dictionary) return excel.make_response_from_records(final_list, "xlsx", file_name="ST01")
def getallplaylists(): auth_manager = spotipy.oauth2.SpotifyOAuth(cache_path=session_cache_path()) if not auth_manager.get_cached_token(): return redirect('/') spotify = spotipy.Spotify(auth_manager=auth_manager) allplaylists = spotify.current_user_playlists(offset=50)['items'] full_tracklist = [] for playlist in allplaylists: current_tracklist, current_playlist_name = playlist_to_tracklist( playlist['id']) for track in current_tracklist: track['playlist_id'] = playlist['id'] track['Playlist Name'] = current_playlist_name full_tracklist = full_tracklist + current_tracklist output = excel.make_response_from_records(full_tracklist, 'csv') output_filename = "All Playlists.csv" output.headers[ "Content-Disposition"] = "attachment; filename=" + output_filename output.headers["Content-type"] = "text/csv" return output
def export(): return excel.make_response_from_records( records=students_records_for_export(), file_type='xlsx')
def report_from_records(dict_data, file_type="csv", file_name="export_data"): return excel.make_response_from_records(dict_data, file_type, file_name=file_name)