async def fetch_data(data_info: str): startTime = time.time() data_list = data_info.split('|') if len(data_list) != 2: print("Invalid data format:\n", data_list) data_name, cols = data_list[0], data_list[1].split(',') print("DATA NAME", data_name) if data_name[-4:] == '.csv': filename = os.path.join(DATA_DIR, data_name) else: filename = os.path.join(DATA_DIR, data_name + '.csv') stream = io.StringIO() if 'all' in cols: dataset = pd.read_csv(filename) else: dataset = getDataFromCols(filename, cols) dataset.to_csv(stream, index=False) response = StreamingResponse(iter([stream.getvalue()]), media_type="text/csv") response.headers["Content-Disposition"] = f"attachment; filename={data_name}.csv" print(f"Loading {len(dataset.columns)} columns from {data_name} took {time.time() - startTime} secs: {filename}") return response
def download(filename): path = os.path.join(UPLOAD_FOLDER, filename) file_like = open(path, mode="rb") response = StreamingResponse(file_like, media_type="application/octet-stream") response.status_code = 200 response.headers["Content-Disposition"] = "attachment; filename=" + filename return response
async def download_pairs(query: Query): models = query.models or [] filtered_pairs = [pair for pair in pairs if pair['Model'] in models] # res = filtered_pairs[query.offset: query.offset + query.limit] print("download", query) import io stream = io.StringIO() # for model in models: # for line in open("/store/yshen/webserve/"+str(model)+"/cluster_test_dbscanss.bed"): # lines = line.strip().split() # pair = {'Model': str(model), 'GenomeRange1':str(lines[0])+":"+str(lines[1])+"-"+str(lines[2]), 'GenomeRange2':str(lines[3])+":"+str(lines[4])+"-"+str(lines[5]),'Prob':str(lines[6][:7])} for pair in filtered_pairs: model = pair['Model'] gr1 = pair['GenomeRange1'] chr1, st1, ed1 = re.findall(r"(.*):(.*)-(.*)", gr1)[0] gr2 = pair['GenomeRange2'] chr2, st2, ed2 = re.findall(r"(.*):(.*)-(.*)", gr2)[0] line = '\t'.join([chr1, st1, ed1, chr2, st2, ed2, str(pair['Prob']), model]) stream.write(line + "\n") stream.flush() stream.seek(0) response = StreamingResponse(stream, media_type="text/txt") response.headers['Context-Disposition'] = "attachment; filename=loops.pair" return response
def generate_openvpn_config( current_user: User = Depends(get_current_active_user)): config = generate_config(current_user.username) response = StreamingResponse(iter([config]), media_type="text/plain") response.headers[ "Content-Disposition"] = f"attachment; filename={current_user.username}.ovpn" return response
async def battlefy_seed_csv(request: Request, tournament_id: str): """Returns a CSV of teams and players for seeding use""" async with aiohttp.ClientSession() as session: async with session.get( f"https://dtmwra1jsgyb0.cloudfront.net/tournaments/{tournament_id}/teams" ) as resp: data = await resp.json() if resp.status != 200: raise HTTPException(status_code=resp.status, detail=f"{data['error']}") # If status is 200 # Create in-memory store for csv writer csv_file = io.StringIO() csv_writer = csv.writer(csv_file) csv_writer.writerow([ "team", "player1", "player2", "player3", "player4", "player5", "player6", "player7", "player8" ]) for team in data: team_row = [team['name']] for p in team.get('players'): name = p['inGameName'] if name[0] is "=": name = f".{name}" team_row.append(name) csv_writer.writerow(team_row) # Return CSV response = StreamingResponse(iter([csv_file.getvalue()]), media_type="text/csv") response.headers[ "Content-Disposition"] = "attachment; filename=teams.csv" return response
def serve_path( path, channel: db_models.Channel = Depends(get_channel_allow_proxy), cache: LocalCache = Depends(LocalCache), session=Depends(get_remote_session), ): if channel.mirror_channel_url and channel.mirror_mode == "proxy": repository = RemoteRepository(channel.mirror_channel_url, session) return get_from_cache_or_download(repository, cache, path) if path == "" or path.endswith("/"): path += "index.html" try: return StreamingResponse(pkgstore.serve_path(channel.name, path)) except FileNotFoundError: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"{channel.name}/{path} not found", ) except IsADirectoryError: try: path += "/index.html" return StreamingResponse(pkgstore.serve_path(channel.name, path)) except FileNotFoundError: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"{channel.name}/{path} not found", )
def get_openvpn_config(current_user: User = Depends(get_current_active_user)): config = requests.post("http://openvpn:8080/api/user/config/show", data={"username": current_user.username}) response = StreamingResponse(iter([config.content]), media_type="text/plain") response.headers[ "Content-Disposition"] = f"attachment; filename={current_user.username}.ovpn" return response
def get_assignment(assignment_id: int, db: session = Depends(session)): assignment = db.query(models.Assignment).get(assignment_id) bytes_ = export.generate_results_report(assignment) report_name = f'{assignment.name}_report' response = StreamingResponse(io.BytesIO(bytes_), media_type="application/x-zip-compressed") response.headers["Content-Disposition"] = f"attachment; filename={report_name}.csv" return response
async def get_csv(): df = df4 stream = io.StringIO() df.to_csv(stream, index=False) response = StreamingResponse(iter([stream.getvalue()]), media_type="text/csv") response.headers[ "Content-Disposition"] = "attachment; filename=each_price_change_catagory.csv" return response
def export_sql(base: str, table: str, dialect: str): cnxn = Connection(cfg.db_system, cfg.db_server, cfg.db_uid, cfg.db_pwd, base) #TODO dbo = Database(cnxn, base) table = Table(dbo, table) ddl = table.export_ddl(dialect) response = StreamingResponse(io.StringIO(ddl), media_type="txt/plain") response.headers[ "Content-Disposition"] = f"attachment; filename={table.name}.sql" return response
async def get_csv(): from fastapi.responses import StreamingResponse from io import StringIO import io datastuff = {'colors': ['blue', 'red'], 'volume': [22, 55]} df = pd.DataFrame(datastuff) response = StreamingResponse(io.StringIO(df.to_csv(index=False)), media_type="text/csv") response.headers[ "Content-Disposition"] = "attachment; filename=export-999.csv" return response
def download_file(file_id: int, user: User = Depends(token_auth), db: Session = Depends(session)): file: ResultFile = db.query(ResultFile).get(file_id) if not file or file.run.pipeline.user_id != user.id: raise False file_path = path.join(config.UPLOAD_DIR, file.path) file_bytes = open(file_path, 'rb') response = StreamingResponse(file_bytes, media_type="application/octet-stream") response.headers["Content-Disposition"] = f"attachment; filename={file.filename}" return response
def convert_structure(inp: str, fmt: Format = Format.sdf, get3d: bool = False): try: out = chem.convert(structure=inp, fmt=fmt, get3d=get3d) except: raise HTTPException(400, detail="Structure could not be converted") if fmt == Format.sdf: return StreamingResponse(io.BytesIO(out.encode()), media_type="chemical/x-mdl-sdfile") if fmt == Format.svg: return StreamingResponse(io.BytesIO(out.encode()), media_type="image/svg+xml") return
async def download_file(): global stream if stream: response = StreamingResponse( BytesIO(stream), media_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) response.headers[ "Content-Disposition"] = "attachment; filename=estimate.xlsx" else: response = {"message": "nothing to download"} return response
async def get_csv(): df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) stream = io.StringIO() df.to_csv(stream, index=False) response = StreamingResponse(iter([stream.getvalue()]), media_type="text/csv") response.headers["Content-Disposition"] = "attachment; filename=export.csv" return response
def trees_export( organization_id: int, format: str = "geojson", auth=Depends(authorization("trees:export")), db: Session = Depends(get_db), ) -> Any: sql = f"SELECT * FROM public.tree WHERE organization_id = {organization_id}" df = gpd.read_postgis(sql, db.bind) if df.empty: return HTTPException(status_code=404, detail="this organization has no trees") if format not in ["geojson", "csv", "xlsx"]: return HTTPException(status_code=404, detail="format not found") organization_in_db = crud.organization.get(db, id=organization_id) if organization_in_db is None: raise HTTPException(status_code=404, detail="organization not found") stream: Union[io.BytesIO, io.StringIO] = io.BytesIO() if format == "geojson": df.to_file(stream, driver="GeoJSON") media_type = "application/geo+json" if format in ["csv", "xlsx"]: df["lat"] = df.geom.y df["lng"] = df.geom.x df_properties = gpd.pd.DataFrame(df["properties"].values.tolist()) col = df.columns.difference(["properties"]) df = gpd.pd.concat([df[col], df_properties], axis=1) df = df.drop(columns=["geom"]) if format == "csv": stream = io.StringIO(df.to_csv()) media_type = "text/csv" if format == "xlsx": df.to_excel(stream, engine="xlsxwriter") media_type = "application/xlsx" response = StreamingResponse(iter([stream.getvalue()]), media_type=media_type) response.headers[ "Content-Disposition"] = f"attachment; filename=export.{format}" return response
async def get_asset_artwork(assetid: str, stream_output: bool = True): content_metadata = await _query_content_metadata(assetid=assetid) if content_metadata['results'].get(assetid, False): artwork_url = content_metadata['results'][assetid]['artwork']['url'].format( w=content_metadata['results'][assetid]['artwork']['width'], h=content_metadata['results'][assetid]['artwork']['height'], f='jpg', ) async with aiohttp.ClientSession() as session: async with session.get(artwork_url) as response: content = await response.content.read() return StreamingResponse(io.BytesIO(content), media_type='image/jpeg') if stream_output else content else: return StreamingResponse(io.BytesIO(b'')) if stream_output else b''
def download_map_set( uuid: UUID, manager: MapSetManager = Depends(get_map_set_manager), backup_io: BackupIo = Depends(get_backup_io), ) -> Response: map_set = get_map_set(manager, uuid) filename = _get_safe_file_name(map_set.name) buffer = BytesIO() backup_io.export_map_set_tar_gz(uuid, buffer) buffer.seek(0) response = StreamingResponse(buffer, media_type="application/open-battle-map") response.headers[ 'Content-Disposition'] = f'attachment; filename="{filename}"' return response
async def handle_get_operator_results( project_id: str, experiment_id: str, run_id: str, operator_id: str, session: Session = Depends(database.session_scope), kubeflow_userid: Optional[str] = Header(database.DB_TENANT), ): """ Handles GET requests to /operators/<operator_id>/results. Parameters ---------- project_id : str experiment_id : str run_id: str operator_id: str session : sqlalchemy.orm.session.Session kubeflow_userid : fastapi.Header Returns ------- starlette.responses.StreamingResponse] ZipFile of the operator_results """ project_controller = ProjectController(session, kubeflow_userid=kubeflow_userid) project_controller.raise_if_project_does_not_exist(project_id) experiment_controller = ExperimentController(session) experiment_controller.raise_if_experiment_does_not_exist(experiment_id) operator_controller = OperatorController(session) operator_controller.raise_if_operator_does_not_exist(operator_id) run_controller = RunController(session) run_controller.raise_if_run_does_not_exist(run_id, experiment_id) result_controller = ResultController(session) results = result_controller.get_results(experiment_id=experiment_id, run_id=run_id, operator_id=operator_id) response = StreamingResponse(results, media_type="application/x-zip-compressed") response.headers[ "Content-Disposition"] = "attachment; filename=results.zip" return response
def scan_data_response( data_stream: Iterable[TokenClassificationRecord], chunk_size: int = 1000, limit: Optional[int] = None, ) -> StreamingResponse: """Generate an textual stream data response for a dataset scan""" async def stream_generator(stream): """Converts dataset scan into a text stream""" def grouper(n, iterable, fillvalue=None): args = [iter(iterable)] * n return itertools.zip_longest(fillvalue=fillvalue, *args) if limit: stream = takeuntil(stream, limit=limit) for batch in grouper( n=chunk_size, iterable=stream, ): filtered_records = filter(lambda r: r is not None, batch) yield "\n".join( map(lambda r: r.json(by_alias=True, exclude_none=True), filtered_records)) + "\n" return StreamingResponse(stream_generator(data_stream), media_type="application/json")
async def read_item(word: str, img: Optional[bool] = None): public_tweets = api.search(word) polarity = [] subjectivity = [] sentiment = [] if img: for tweet in public_tweets: polarity_temp = TextBlob(tweet.text).sentiment.polarity if (polarity_temp > 0): sentiment.append("positive") if (polarity_temp < 0): sentiment.append("negative") if (polarity == 0): sentiment.append('neutral') polarity.append(polarity_temp) subjectivity.append(TextBlob(tweet.text).sentiment.subjectivity) df = pd.DataFrame( list( zip([tweet.text for tweet in public_tweets], polarity, subjectivity, sentiment)), columns=['tweet', 'polarity', 'subjectivity', 'sentiment']) return { "file": StreamingResponse(io.StringIO(df.to_csv(index=False)), media_type="text/csv"), 'text': 'prueba' } else: return {"id": "foo", "value": "there goes my hero"}
async def get_c_haines_model_run( model: ModelEnum, model_run_timestamp: datetime = None, response_format: ObjectTypeEnum = ObjectTypeEnum.GEOJSON): """ Return geojson/kml polygons for c-haines """ logger.info( '/c-haines/%s/predictions?model_run_timestamp=%s&response_format=%s', model, model_run_timestamp, response_format) if response_format == ObjectTypeEnum.GEOJSON: # Not implemented for GeoJSON raise HTTPException(status_code=501) headers = {"Content-Type": kml_media_type} if model_run_timestamp is None: model_run_timestamp = await _get_most_recent_model_run( model, response_format) if model_run_timestamp is None: # most recent model not found raise HTTPException(status_code=404) filename = f'{model}-{model_run_timestamp}.kml' headers["Content-Disposition"] = f"inline;filename={filename}" response = StreamingResponse(fetch_model_run_kml_streamer( model, model_run_timestamp), headers=headers, media_type=kml_media_type) return response
def anonymize(image: UploadFile = File(...), configuration: UploadFile = File(...)): """ Anonymize the given image :param image: Image file :param configuration: Json file :return: The anonymized image """ try: result, errors = anonymizationservice.anonymize(image, configuration) if not errors: _, im_png = cv2.imencode(".png", result) response = StreamingResponse(io.BytesIO(im_png.tobytes()), media_type="image/jpeg") return response else: return ApiResponse( success=False, error= "Some data in your configuration file need to be modified. Check the /available_methods/ endpoint", data=errors) except ApplicationError as e: return ApiResponse(success=False, error=e) except Exception: return ApiResponse(success=False, error='unexpected server error')
def return_file(): def file_gen(file_path): with open(file_path) as f: for line in f: yield line return StreamingResponse(file_gen(f"{__file__}"))
async def static(filename): if os.path.isfile(STATIC_DIR / filename): return FileResponse(STATIC_DIR / filename) return StreamingResponse( virtual_file(translate_size(10)), media_type="application/octet-stream", )
def list_recordings( req: Request, video: str = Query(..., regex='^[a-z0-9_-]+\.mp4$'), ): # https://github.com/tiangolo/fastapi/issues/1240#issuecomment-797618168 asked = req.headers.get("Range") stream, total_size = get_video_and_total_size(video) start_byte_requested = int(asked.split("=")[-1][:-1]) end_byte_planned = min(start_byte_requested + BYTES_PER_RESPONSE, total_size) - 1 chunk_generator = chunk_generator_from_stream(stream, chunk_size=10000, start=start_byte_requested, size=BYTES_PER_RESPONSE) return StreamingResponse( chunk_generator, headers={ "Accept-Ranges": "bytes", "Content-Range": f"bytes {start_byte_requested}-{end_byte_planned}/{total_size}", "Content-Type": "..." }, status_code=206, media_type='video/mp4', )
def get_from_cache_or_download( repository, cache, target, exclude=["repodata.json", "current_respodata.json"]): """Serve from cache or download if missing.""" _, filename = os.path.split(target) skip_cache = filename in exclude chunksize = 10000 def data_iter(f): chunk = f.read(chunksize) while chunk: yield chunk # Do stuff with byte. chunk = f.read(chunksize) if skip_cache: remote_file = repository.open(target) data_stream = remote_file.file return StreamingResponse(data_iter(data_stream)) if target not in cache: # copy from repository to cache remote_file = repository.open(target) data_stream = remote_file.file cache.dump(target, data_stream) return FileResponse(cache[target])
def post_file(file: bytes = File(...)): """ Serves predictions given a CSV file with no header and three columns specifying each penguin's features in the order culmen length, culmen depth, and flipper_length. Returns a streaming response with a new CSV file that contains a column with the predictions. Inputs: - file: bytes from a CSV file as described above. """ # Decode the bytes as text and split the lines: input_lines = file.decode().split() # Split each line as a list of the three features: X = [p.split(',') for p in input_lines] # Get predicted categories: pred = itemgetter(*model.predict(X))(cats) # Append the prediction to each input line: output = [p + ',' + c for p, c in zip(input_lines, pred)] # Join the output as a single string: output = '\n'.join(output) # Encode output as bytes: output = output.encode() return StreamingResponse(BytesIO(output), media_type='text/csv', headers={ 'Content-Disposition': 'attachment;filename="prediction.csv"' })
def stream_from_uri(uri: str, limit: Optional[int] = None) -> StreamingResponse: """ Stream data file as streaming response Parameters ---------- uri: The snapshot uri limit: The number of lines to read. Optional Returns ------- An StreamingResponse for uri data streaming """ media_type = "application/json" # TODO: inferred from uri def iterate_uri_content(_uri): with smart_open(_uri, "rb") as f: # TODO: check file encoding for line in f: yield line generator = iterate_uri_content(uri) if limit: generator = takeuntil(generator, limit=limit) return StreamingResponse(generator, media_type=media_type)
async def create_upload_file(text: str): texts = [text] embeds = [embed] # If you know what the attention layer alignments are, you can retrieve them here by # passing return_alignments=True specs = synthesizer.synthesize_spectrograms(texts, embeds) spec = specs[0] print("Created the mel spectrogram") # Generating the waveform print("Synthesizing the waveform:") # Synthesizing the waveform is fairly straightforward. Remember that the longer the # spectrogram, the more time-efficient the vocoder. generated_wav = vocoder.infer_waveform(spec) # Post-generation # There's a bug with sounddevice that makes the audio cut one second earlier, so we # pad it. generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant") # Trim excess silences to compensate for gaps in spectrograms (issue #53) generated_wav = encoder.preprocess_wav(generated_wav) # Save it on the disk output = BytesIO() wavfile.write(output, synthesizer.sample_rate, generated_wav.astype(np.float32)) return StreamingResponse(output, media_type="audio/x-wav")