Esempio n. 1
0
def serve_path(
        path,
        channel: db_models.Channel = Depends(get_channel_allow_proxy),
        cache: LocalCache = Depends(LocalCache),
        session=Depends(get_remote_session),
):
    if channel.mirror_channel_url and channel.mirror_mode == "proxy":
        repository = RemoteRepository(channel.mirror_channel_url, session)
        return get_from_cache_or_download(repository, cache, path)

    if path == "" or path.endswith("/"):
        path += "index.html"
    try:
        return StreamingResponse(pkgstore.serve_path(channel.name, path))
    except FileNotFoundError:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"{channel.name}/{path} not found",
        )
    except IsADirectoryError:
        try:
            path += "/index.html"
            return StreamingResponse(pkgstore.serve_path(channel.name, path))
        except FileNotFoundError:
            raise HTTPException(
                status_code=status.HTTP_404_NOT_FOUND,
                detail=f"{channel.name}/{path} not found",
            )
Esempio n. 2
0
def convert_structure(inp: str, fmt: Format = Format.sdf, get3d: bool = False):
    try:
        out = chem.convert(structure=inp, fmt=fmt, get3d=get3d)
    except:
        raise HTTPException(400, detail="Structure could not be converted")
    if fmt == Format.sdf:
        return StreamingResponse(io.BytesIO(out.encode()),
                                 media_type="chemical/x-mdl-sdfile")
    if fmt == Format.svg:
        return StreamingResponse(io.BytesIO(out.encode()),
                                 media_type="image/svg+xml")
    return
Esempio n. 3
0
async def get_asset_artwork(assetid: str, stream_output: bool = True):
    content_metadata = await _query_content_metadata(assetid=assetid)

    if content_metadata['results'].get(assetid, False):
        artwork_url = content_metadata['results'][assetid]['artwork']['url'].format(
            w=content_metadata['results'][assetid]['artwork']['width'],
            h=content_metadata['results'][assetid]['artwork']['height'],
            f='jpg',
        )
        async with aiohttp.ClientSession() as session:
            async with session.get(artwork_url) as response:
                content = await response.content.read()
        return StreamingResponse(io.BytesIO(content), media_type='image/jpeg') if stream_output else content
    else:
        return StreamingResponse(io.BytesIO(b'')) if stream_output else b''
Esempio n. 4
0
def scan_data_response(
    data_stream: Iterable[TokenClassificationRecord],
    chunk_size: int = 1000,
    limit: Optional[int] = None,
) -> StreamingResponse:
    """Generate an textual stream data response for a dataset scan"""
    async def stream_generator(stream):
        """Converts dataset scan into a text stream"""
        def grouper(n, iterable, fillvalue=None):
            args = [iter(iterable)] * n
            return itertools.zip_longest(fillvalue=fillvalue, *args)

        if limit:
            stream = takeuntil(stream, limit=limit)

        for batch in grouper(
                n=chunk_size,
                iterable=stream,
        ):
            filtered_records = filter(lambda r: r is not None, batch)
            yield "\n".join(
                map(lambda r: r.json(by_alias=True, exclude_none=True),
                    filtered_records)) + "\n"

    return StreamingResponse(stream_generator(data_stream),
                             media_type="application/json")
Esempio n. 5
0
async def get_c_haines_model_run(
        model: ModelEnum,
        model_run_timestamp: datetime = None,
        response_format: ObjectTypeEnum = ObjectTypeEnum.GEOJSON):
    """ Return geojson/kml polygons for c-haines """
    logger.info(
        '/c-haines/%s/predictions?model_run_timestamp=%s&response_format=%s',
        model, model_run_timestamp, response_format)
    if response_format == ObjectTypeEnum.GEOJSON:
        # Not implemented for GeoJSON
        raise HTTPException(status_code=501)
    headers = {"Content-Type": kml_media_type}
    if model_run_timestamp is None:
        model_run_timestamp = await _get_most_recent_model_run(
            model, response_format)
    if model_run_timestamp is None:
        # most recent model not found
        raise HTTPException(status_code=404)
    filename = f'{model}-{model_run_timestamp}.kml'
    headers["Content-Disposition"] = f"inline;filename={filename}"
    response = StreamingResponse(fetch_model_run_kml_streamer(
        model, model_run_timestamp),
                                 headers=headers,
                                 media_type=kml_media_type)
    return response
Esempio n. 6
0
async def create_upload_file(text: str):
    texts = [text]
    embeds = [embed]
    # If you know what the attention layer alignments are, you can retrieve them here by
    # passing return_alignments=True
    specs = synthesizer.synthesize_spectrograms(texts, embeds)
    spec = specs[0]
    print("Created the mel spectrogram")

    # Generating the waveform
    print("Synthesizing the waveform:")

    # Synthesizing the waveform is fairly straightforward. Remember that the longer the
    # spectrogram, the more time-efficient the vocoder.
    generated_wav = vocoder.infer_waveform(spec)

    # Post-generation
    # There's a bug with sounddevice that makes the audio cut one second earlier, so we
    # pad it.
    generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate),
                           mode="constant")

    # Trim excess silences to compensate for gaps in spectrograms (issue #53)
    generated_wav = encoder.preprocess_wav(generated_wav)

    # Save it on the disk
    output = BytesIO()
    wavfile.write(output, synthesizer.sample_rate,
                  generated_wav.astype(np.float32))
    return StreamingResponse(output, media_type="audio/x-wav")
Esempio n. 7
0
async def static(filename):
    if os.path.isfile(STATIC_DIR / filename):
        return FileResponse(STATIC_DIR / filename)
    return StreamingResponse(
        virtual_file(translate_size(10)),
        media_type="application/octet-stream",
    )
Esempio n. 8
0
def get_from_cache_or_download(
        repository,
        cache,
        target,
        exclude=["repodata.json", "current_respodata.json"]):
    """Serve from cache or download if missing."""

    _, filename = os.path.split(target)
    skip_cache = filename in exclude

    chunksize = 10000

    def data_iter(f):
        chunk = f.read(chunksize)
        while chunk:
            yield chunk
            # Do stuff with byte.
            chunk = f.read(chunksize)

    if skip_cache:
        remote_file = repository.open(target)
        data_stream = remote_file.file

        return StreamingResponse(data_iter(data_stream))

    if target not in cache:
        # copy from repository to cache
        remote_file = repository.open(target)
        data_stream = remote_file.file
        cache.dump(target, data_stream)

    return FileResponse(cache[target])
Esempio n. 9
0
def stream_from_uri(uri: str,
                    limit: Optional[int] = None) -> StreamingResponse:
    """
    Stream data file as streaming response

    Parameters
    ----------
    uri:
        The snapshot uri
    limit:
        The number of lines to read. Optional

    Returns
    -------
        An StreamingResponse for uri data streaming

    """

    media_type = "application/json"  # TODO: inferred from uri

    def iterate_uri_content(_uri):
        with smart_open(_uri, "rb") as f:  # TODO: check file encoding
            for line in f:
                yield line

    generator = iterate_uri_content(uri)
    if limit:
        generator = takeuntil(generator, limit=limit)
    return StreamingResponse(generator, media_type=media_type)
Esempio n. 10
0
def generate_openvpn_config(
        current_user: User = Depends(get_current_active_user)):
    config = generate_config(current_user.username)
    response = StreamingResponse(iter([config]), media_type="text/plain")
    response.headers[
        "Content-Disposition"] = f"attachment; filename={current_user.username}.ovpn"
    return response
Esempio n. 11
0
async def documents_get(
        doc_id: int,
        current_user: UserSchema = Depends(get_current_user_from_cookie),
):
    if current_user.role not in (
            UserRoleEnum.ADMIN,
            UserRoleEnum.LAWYER,
            UserRoleEnum.CLIENT,
    ):
        raise HTTPException(status_code=403, detail="Access forbidden")
    sel = sa.select([Documents.c.name,
                     Documents.c.data]).where(Documents.c.id == doc_id)
    async with engine.begin() as conn:
        res = (await conn.execute(sel)).fetchone()
        if res is None:
            raise HTTPException(status_code=404, detail="Document not found")
        doc_name, data = res

    doc_mime = DocumentMimeEnum(magic.from_buffer(data, mime=True))
    doc_name = f"{doc_name.replace(' ', '_')}.{doc_mime.name}"

    try:
        doc_name.encode("ascii")
        file_expr = 'filename="{}"'.format(doc_name)
    except UnicodeEncodeError:
        # handle a non-ASCII filename
        file_expr = "filename*=utf-8''{}".format(quote(doc_name))

    headers = {"Content-Disposition": "attachment; {}".format(file_expr)}

    return StreamingResponse(io.BytesIO(data),
                             headers=headers,
                             media_type=doc_mime.value)
def runObjectSegmentor(imageFile: UploadFile = File(...)):
    applicationLogger.info('Handling a segmentor request!')
    imageExtension = imageFile.content_type.split('/')[-1]
    predictionsImage = modelService(imageFile.file, imageExtension,
                                    'segmentor')
    return StreamingResponse(predictionsImage,
                             media_type=imageFile.content_type)
Esempio n. 13
0
def figure_blobs_batch_api(uri_batch: URIBatchIn):
    # Get project
    project = Project()
    if not project.exists():
        raise HTTPException(status_code=404)

    return StreamingResponse(figure_batch_result_streamer(uri_batch, project.repo))
Esempio n. 14
0
async def run_figures_search_api(q: Optional[str] = '',
                                 record_range: Optional[str] = '',
                                 record_density: Optional[int] = 50,
                                 calc_ranges: Optional[bool] = False):
    # Get project
    project = Project()
    if not project.exists():
        raise HTTPException(status_code=404)

    query = q.strip()
    try:
        syntax_error_check(query)
    except SyntaxError as se:
        raise HTTPException(status_code=400, detail={
            'name': 'SyntaxError',
            'statement': se.text,
            'line': se.lineno,
            'offset': se.offset
        })

    traces = project.repo.query_figure_objects(query=query)

    try:
        record_range = str_to_range(record_range)
    except ValueError:
        raise HTTPException(status_code=400, detail='Invalid range format')

    streamer = figure_search_result_streamer(traces, record_range, record_density, calc_ranges)
    return StreamingResponse(streamer)
Esempio n. 15
0
async def run_metric_search_api(q: Optional[str] = '',
                                p: Optional[int] = 50,
                                x_axis: Optional[str] = None):
    steps_num = p

    if x_axis:
        x_axis = x_axis.strip()

    # Get project
    project = Project()
    if not project.exists():
        raise HTTPException(status_code=404)

    query = q.strip()
    try:
        syntax_error_check(query)
    except SyntaxError as se:
        raise HTTPException(status_code=400, detail={
            'name': 'SyntaxError',
            'statement': se.text,
            'line': se.lineno,
            'offset': se.offset
        })

    traces = project.repo.query_metrics(query=query)

    streamer = metric_search_result_streamer(traces, steps_num, x_axis)
    return StreamingResponse(streamer)
Esempio n. 16
0
async def stream_file(filePath: str, range: Optional[str] = Header(None)):
    def get_file():
        f = open(filePath, 'rb')
        return f, os.path.getsize(filePath)

    def chunk_generator_from_stream(stream, chunk_size, start, size):
        bytes_read = 0
        stream.seek(start)
        while bytes_read < size:
            bytes_to_read = min(chunk_size, size - bytes_read)
            yield stream.read(bytes_to_read)
            bytes_read = bytes_read + bytes_to_read
        stream.close()

    asked = range or "bytes=0-"
    # print(asked)
    stream, total_size = get_file()
    start_byte = int(asked.split("=")[-1].split('-')[0])

    return StreamingResponse(
        chunk_generator_from_stream(stream,
                                    start=start_byte,
                                    chunk_size=CONTENT_CHUNK_SIZE,
                                    size=total_size),
        headers={
            "Accept-Ranges": "bytes",
            "Content-Range":
            f"bytes {start_byte}-{start_byte+CONTENT_CHUNK_SIZE}/{total_size}",
            "Content-Type": "video/mp4"
        },
        status_code=206)
Esempio n. 17
0
async def voice(txt: str):
    #list_voices()
    # Set the text input to be synthesized
    synthesis_input = texttospeech.SynthesisInput(text=txt)

    # Build the voice request, select the language code ("en-US") and the ssml
    # voice gender ("neutral")
    voice = texttospeech.VoiceSelectionParams(
        language_code="en-US",
        name="en-US-Wavenet-F",
        ssml_gender=texttospeech.SsmlVoiceGender.FEMALE)

    # Select the type of audio file you want returned
    audio_config = texttospeech.AudioConfig(
        audio_encoding=texttospeech.AudioEncoding.LINEAR16)

    # Perform the text-to-speech request on the text input with the selected
    # voice parameters and audio file type
    response = tts_client.synthesize_speech(input=synthesis_input,
                                            voice=voice,
                                            audio_config=audio_config)

    # The response's audio_content is binary.
    async def generate():
        with io.BytesIO(response.audio_content) as buf:
            data = buf.read(1024)
            while data:
                yield data
                data = buf.read(1024)

    return StreamingResponse(generate(), media_type="audio/wav")
Esempio n. 18
0
def output_data(
    df: pd.DataFrame,
    output_format: Optional[str] = "csv",
    output_delimiter: Optional[str] = ",",
) -> StreamingResponse:
    sio = StringIO()

    if output_format == "csv":
        df.to_csv(sio, sep=output_delimiter)
    elif output_format == "json":
        # Make sure that the index (= ids) is included
        df = df.reset_index()
        df.to_json(sio, orient="records")
    elif output_format == "parquet":
        df.to_parquet(sio)
    else:
        raise HTTPException(
            400, f"Do not understand output format {output_format}")

    sio.seek(0)

    return StreamingResponse(
        sio,
        headers={
            "Content-Disposition":
            f"attachment;filename=features.{output_format}"
        },
    )
Esempio n. 19
0
def post_file(file: bytes = File(...)):
    """
    Serves predictions given a CSV file with no header and three columns
    specifying each penguin's features in the order culmen length, culmen
    depth, and flipper_length. Returns a streaming response with a new CSV file
    that contains a column with the predictions.

    Inputs:
        - file: bytes from a CSV file as described above.
    """

    # Decode the bytes as text and split the lines:
    input_lines = file.decode().split()
    # Split each line as a list of the three features:
    X = [p.split(',') for p in input_lines]

    # Get predicted categories:
    pred = itemgetter(*model.predict(X))(cats)

    # Append the prediction to each input line:
    output = [p + ',' + c for p, c in zip(input_lines, pred)]
    # Join the output as a single string:
    output = '\n'.join(output)
    # Encode output as bytes:
    output = output.encode()

    return StreamingResponse(BytesIO(output),
                             media_type='text/csv',
                             headers={
                                 'Content-Disposition':
                                 'attachment;filename="prediction.csv"'
                             })
Esempio n. 20
0
def image(board_name: str, img_fname: str):
    # for image from disk:
    #    img_file = os.path.join(tempfile.gettempdir(), tfile.name) # type: str
    return StreamingResponse(
        get_fourchan_file(IMAGE_ENDPOINT.format(board_name=board_name,
                                                img_fname=img_fname),
                          hostname=FOURCHAN_IMAGE_HOST))
Esempio n. 21
0
def list_recordings(
        req: Request,
        video: str = Query(..., regex='^[a-z0-9_-]+\.mp4$'),
):
    # https://github.com/tiangolo/fastapi/issues/1240#issuecomment-797618168
    asked = req.headers.get("Range")
    stream, total_size = get_video_and_total_size(video)
    start_byte_requested = int(asked.split("=")[-1][:-1])
    end_byte_planned = min(start_byte_requested + BYTES_PER_RESPONSE,
                           total_size) - 1
    chunk_generator = chunk_generator_from_stream(stream,
                                                  chunk_size=10000,
                                                  start=start_byte_requested,
                                                  size=BYTES_PER_RESPONSE)
    return StreamingResponse(
        chunk_generator,
        headers={
            "Accept-Ranges": "bytes",
            "Content-Range":
            f"bytes {start_byte_requested}-{end_byte_planned}/{total_size}",
            "Content-Type": "..."
        },
        status_code=206,
        media_type='video/mp4',
    )
Esempio n. 22
0
def get_screenshot(screenshot_id: PositiveInt):
    '''Метод получения скриншота'''
    try:
        file = get_file(f'{screenshot_id}.png')
        return StreamingResponse(io.BytesIO(file), media_type='image/png')
    except ClientError:
        raise HTTPException(status_code=404, detail='Picture not found')
Esempio n. 23
0
def return_file():
    def file_gen(file_path):
        with open(file_path) as f:
            for line in f:
                yield line

    return StreamingResponse(file_gen(f"{__file__}"))
Esempio n. 24
0
async def load_trading_mining_data(viteAddress: str):
    try:
        api_url = get_dex_api_url(1) + '/mining/trade'
        params = {'address': viteAddress}

        response = requests.get(url=api_url, params=params)

        if response.status_code == status.HTTP_200_OK:
            resp = response.json()
            check_response(resp)
            csv_data = build_mining_data_list(resp['data']['miningList'])

            generated_message = generate_return_file_msg(csv_data)
            response = StreamingResponse(
                content=generated_message['file'],
                media_type='text/csv',
                headers={
                    'Content-Disposition':
                    'filename=tradeMining_{viteAddress}.csv'
                })
            return response
        else:
            raise HTTPException(status_code=response.status_code,
                                detail='Error from vite network')
    except Exception as e:
        raise e
Esempio n. 25
0
def anonymize(image: UploadFile = File(...),
              configuration: UploadFile = File(...)):
    """
    Anonymize the given image
    :param image: Image file
    :param configuration: Json file
    :return: The anonymized image
    """
    try:
        result, errors = anonymizationservice.anonymize(image, configuration)
        if not errors:
            _, im_png = cv2.imencode(".png", result)
            response = StreamingResponse(io.BytesIO(im_png.tobytes()),
                                         media_type="image/jpeg")
            return response
        else:
            return ApiResponse(
                success=False,
                error=
                "Some data in your configuration file need to be modified. Check the /available_methods/ endpoint",
                data=errors)
    except ApplicationError as e:
        return ApiResponse(success=False, error=e)
    except Exception:
        return ApiResponse(success=False, error='unexpected server error')
async def fetch_data(data_info: str):
    startTime = time.time()

    data_list = data_info.split('|')
    if len(data_list) != 2:
        print("Invalid data format:\n", data_list)
    data_name, cols = data_list[0], data_list[1].split(',')
    print("DATA NAME", data_name)
    if data_name[-4:] == '.csv':
        filename = os.path.join(DATA_DIR, data_name)
    else:
        filename = os.path.join(DATA_DIR, data_name + '.csv')
    stream = io.StringIO()


    if 'all' in cols:
        dataset = pd.read_csv(filename)
    else:
        dataset = getDataFromCols(filename, cols)


    dataset.to_csv(stream, index=False)
    response = StreamingResponse(iter([stream.getvalue()]),
                                 media_type="text/csv")
    response.headers["Content-Disposition"] = f"attachment; filename={data_name}.csv"


    print(f"Loading {len(dataset.columns)} columns from {data_name} took {time.time() - startTime} secs: {filename}")

    return response
Esempio n. 27
0
async def read_item(word: str, img: Optional[bool] = None):
    public_tweets = api.search(word)
    polarity = []
    subjectivity = []
    sentiment = []
    if img:
        for tweet in public_tweets:
            polarity_temp = TextBlob(tweet.text).sentiment.polarity
            if (polarity_temp > 0):
                sentiment.append("positive")
            if (polarity_temp < 0):
                sentiment.append("negative")
            if (polarity == 0):
                sentiment.append('neutral')
            polarity.append(polarity_temp)
            subjectivity.append(TextBlob(tweet.text).sentiment.subjectivity)
        df = pd.DataFrame(
            list(
                zip([tweet.text for tweet in public_tweets], polarity,
                    subjectivity, sentiment)),
            columns=['tweet', 'polarity', 'subjectivity', 'sentiment'])
        return {
            "file":
            StreamingResponse(io.StringIO(df.to_csv(index=False)),
                              media_type="text/csv"),
            'text':
            'prueba'
        }

    else:
        return {"id": "foo", "value": "there goes my hero"}
def single(city1_df, city, statecode):
    """Used to create and style a visualization with only one city."""

    styling = dict()
    styling['city1color'] = ['lightgreen', '#4BB543', 'darkcyan',
                             '#663399', '#CC0000']
    styling['title'] = f'Rental Price Estimates for {city}, {statecode}'

    layout = go.Layout(
        paper_bgcolor='rgba(0,0,0,0)',
        plot_bgcolor='rgba(0,0,0,0)'
    )

    # Instantiate figure.
    fig = go.Figure(data=go.Bar(name=f'{city}, {statecode}',
                                x=city1_df['bedroom_size'],
                                y=city1_df['price_2020_08'],
                                marker_color=styling.get('city1color')),
                    layout=layout)
    fig.update_layout(barmode='group',
                      title_text=styling.get('title'),
                      xaxis_title='Number of Bedrooms',
                      yaxis_title='Monthly Rental Estimate',
                      font=dict(family='Open Sans, extra bold',
                                  size=10),
                      legend_title='Cities')
    img = fig.to_image(format="png")

    return StreamingResponse(io.BytesIO(img), media_type="image/png")
Esempio n. 29
0
async def battlefy_seed_csv(request: Request, tournament_id: str):
    """Returns a CSV of teams and players for seeding use"""
    async with aiohttp.ClientSession() as session:
        async with session.get(
                f"https://dtmwra1jsgyb0.cloudfront.net/tournaments/{tournament_id}/teams"
        ) as resp:
            data = await resp.json()
            if resp.status != 200:
                raise HTTPException(status_code=resp.status,
                                    detail=f"{data['error']}")
            # If status is 200
            # Create in-memory store for csv writer
            csv_file = io.StringIO()
            csv_writer = csv.writer(csv_file)
            csv_writer.writerow([
                "team", "player1", "player2", "player3", "player4", "player5",
                "player6", "player7", "player8"
            ])
            for team in data:
                team_row = [team['name']]
                for p in team.get('players'):
                    name = p['inGameName']
                    if name[0] is "=":
                        name = f".{name}"
                    team_row.append(name)
                csv_writer.writerow(team_row)
            # Return CSV
            response = StreamingResponse(iter([csv_file.getvalue()]),
                                         media_type="text/csv")
            response.headers[
                "Content-Disposition"] = "attachment; filename=teams.csv"
            return response
Esempio n. 30
0
async def tweet(text: Optional[str] = None):
    try:
        img = Image.open("./blank.png")
    except Exception:
        blank = requests.get(
            "https://firebasestorage.googleapis.com/v0/b/faketrumptweets-8c438.appspot.com/o/blank-2.png?alt=media&token=79366004-9c55-4c2b-b0aa-a970bf4c2b06"
        )
        with open("blank.png", "wb") as f:
            f.write(blank.content)
            f.close()
    img = Image.open("./blank.png")
    draw = ImageDraw.Draw(img)
    try:
        with open("font.ttf", "rb") as font:
            font.close()
    except Exception:
        font = requests.get(
            "https://firebasestorage.googleapis.com/v0/b/faketrumptweets-8c438.appspot.com/o/font.ttf?alt=media&token=9b1a1497-4284-4a3d-8212-91179f4720ea"
        )
        with open('font.ttf', 'wb') as f:
            f.write(font.content)
            f.close()

    font = ImageFont.truetype("font.ttf", 18)
    lines = textwrap.wrap(text, width=60)
    if len(lines) > 1:
        draw.text((15, 62),
                  "Maximum of 60 characters are allowed.",
                  fill="#604af0",
                  font=font)
    else:
        draw.text((15, 57), text, fill="#14171a", font=font)
    img.save("hi.png")
    file_like = open("./hi.png", mode="rb")
    return StreamingResponse(file_like, media_type="image/png")