Example #1
0
async def create_upload_video(file: UploadFile = File(...),
                              username: str = Depends(get_current_username)):
    contents = await file.read()
    contents_length = len(contents)
    await file.seek(0)

    # Make a bucket with the make_bucket API call.
    try:
        minioClient.make_bucket(username)
    except BucketAlreadyOwnedByYou as err:
        pass
    except BucketAlreadyExists as err:
        pass
    except ResponseError as err:
        raise

    # Put an object 'pumaserver_debug.log' with contents from 'pumaserver_debug.log'.
    try:
        minioClient.put_object(username, 'video.mp4', file.file,
                               contents_length)
    except ResponseError as err:
        print(err)

    return {"message": "success"}
Example #2
0
async def classify_file(request: Request, image: bytes = File(...)):
    """
    Clasifies an image as dog or cat. The input image is received as a stream of bytes.

    Arguments:
        request {Request} -- The user request.

    Keyword Arguments:
        image {bytes} -- Input image (as bytes) to be classified.

    Raises:
        HTTPException: When the input image could not be loaded.

    Returns:
        models.ClassificationResult -- JSON object with the classification result.
    """
    try:
        image = image_utils.bytes_to_image(image)
    except Exception:
        raise HTTPException(
            status_code=HTTP_400_BAD_REQUEST,
            detail="Error loading input image",
        )
    return classify(image, request.app.state.model)
Example #3
0
def app_update_tindakan(
    id: int,
    kondisi_awal: Optional[str] = Form(None),
    tindakan: Optional[str] = Form(None),
    kondisi_pasca: Optional[str] = Form(None),
    id_masalah: Optional[int] = Form(None),
    id_ruangan: Optional[int] = Form(None),
    id_sarana: Optional[int] = Form(None),
    id_kategori: Optional[int] = Form(None),
    foto: Optional[UploadFile] = File(None),
    current_user: user_schema.User = Depends(get_current_user),
    db: Session = Depends(get_db)):
    response = None
    try:
        tindakan = TindakanUpdate(id=id,
                                  kondisi_awal=kondisi_awal,
                                  tindakan=tindakan,
                                  kondisi_pasca=kondisi_pasca,
                                  id_user=current_user.id,
                                  id_masalah=id_masalah,
                                  id_sarana=id_sarana,
                                  id_kategori=id_kategori,
                                  id_ruangan=id_ruangan,
                                  foto=foto)
        response = update_tindakan(db=db, tindakan=tindakan)
        del tindakan
        return {
            "status": response[0],
            "message": response[1],
            "data": response[2]
        }
    except Exception as e:
        raise HTTPException(status_code=sts.HTTP_410_GONE,
                            detail="Error = " + str(e))
    finally:
        del response
def ocr_func(pdfBytes: bytes = File(...), txt_folder: str = './temp/'):
    '''
    Takes an uploaded .pdf file, converts it to plain text, and saves it as a
    .txt file
    range slice size can be changed to put more images in memory, but provides 
    no speed boost, as the bottleneck is pytesseract.image_to_string
    '''
    fileReader = PdfFileReader(BytesIO(pdfBytes))
    maxPages = fileReader.numPages
    del fileReader
    fulltext = []
    for page in range(1, maxPages + 1):
        pil_image = convert_from_bytes(pdfBytes,
                                       dpi=300,
                                       first_page=page,
                                       last_page=page,
                                       fmt='jpg',
                                       thread_count=1,
                                       grayscale=True)
        fulltext += [
            str(pytesseract.image_to_string(image)) for image in pil_image
        ]
        pil_image.clear()
    return (''.join(fulltext).split('\n\n'))
Example #5
0
async def predict_file(file: bytes = File(...)):
    print("predict_file")
    print(len(file), type(file))

    # Save the image
    image_path = "test.png"
    with open(image_path, "wb") as output:
        output.write(file)

    # Load & preprocess
    test_data = await load_preprocess_image_from_path(image_path)

    # Make prediction
    prediction = prediction_model.predict(test_data)
    idx = prediction.argmax(axis=1)[0]
    prediction_label = index2label[idx]

    return {
        "input_image_shape": str(test_data.element_spec.shape),
        "prediction_shape": prediction.shape,
        "prediction_label": prediction_label,
        "prediction": prediction.tolist(),
        "accuracy": round(np.max(prediction) * 100, 2)
    }
Example #6
0
async def create_upload_file(file: UploadFile = File(...)):
    # return {"filename": file.filename}

    contents = file.file.read().decode("utf-8")
    content_html = """
            <div style="background:lightgray; padding:10px; border-radius:12px;
                font-weight: bold; font-size: 16px; word-break:break-all">
            <code>
        """
    for ch in contents:
        if ch == '\n':
            content_html += "<br>"
        elif ch == " ":
            content_html += "&nbsp;"
        elif ch == "<":
            content_html += "&lt;"
        elif ch == ">":
            content_html += "&gt;"
        elif ch == "\t":
            content_html += "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
        else:
            content_html += ch;
    content_html += "</code></div>"
    return HTMLResponse(content_html)
def extraction(
    input_format: Optional[str] = "csv",
    data_file: UploadFile = File(default=None),
    column_id: Optional[str] = None,
    column_kind: Optional[str] = None,
    column_sort: Optional[str] = None,
    column_value: Optional[str] = None,
    settings: Optional[str] = "comprehensive",
    output_format: Optional[str] = "csv",
    output_delimiter: Optional[str] = ",",
) -> StreamingResponse:
    df = read_in_data(input_format, data_file)
    features = extract_features(
        df,
        column_id=column_id,
        column_kind=column_kind,
        column_sort=column_sort,
        column_value=column_value,
        settings=settings,
    )

    return output_data(
        features, output_format=output_format, output_delimiter=output_delimiter
    )
Example #8
0
async def create_upload_file(file: UploadFile = File(...),
                             db: Session = Depends(get_db)):
    try:
        print('Got a file request from client for detecting...')
        #Get type of uploaded file
        tail_of_file = file.content_type.split('/')[1]

        #write file into folder
        contents = await file.read()
        str_id = str(uuid.uuid1())
        filename = ''.join(
            ['static/images/original_images/', str_id, '.', tail_of_file])
        with open(filename, "wb") as f:
            f.write(contents)
        #Create an instance of uploaded file for database query
        upload_file = schemas.UploadedImage(id=str_id,
                                            url_image=filename,
                                            status="PROCESSING")

        #set a row for new uploaded file (through query in crud)
        res = crud.create_upload_file(db, upload_file)

        #Declare dir path for folder that storage cell images
        file_out_folder = os.path.join(
            DESTINATION_DIR_PATH,
            ''.join(os.path.basename(filename).split('.')[:-1]))

        #Pass current task to worker
        worker_task = extract_layout.apply_async(
            args=([filename, file_out_folder]), link=inference_ocr.s(filename))
        #await database.insert_into_result(res, filename)
        print('Ran worker for OCR service....')
    except ValueError as e:
        raise HTTPException(status_code=404, detail=e.args[0])
    finally:
        return {"id": str_id}
Example #9
0
async def search_with_image(request: Request,
                            image: UploadFile = File(...),
                            db: Session = Depends(get_db)):

    with open("out.png", "wb") as buffer:
        shutil.copyfileobj(image.file, buffer)

    image_data = Image.open("out.png").convert("RGB")
    feature = encoder.encode(image_data)

    index_ids = index.search(feature, 4)
    food_items = []
    for index_id in index_ids[0]:

        if index_id != -1:
            item = crud.get_food_by_index_id(db, index_id)
            if item is not None:

                food_items.append(schemas.Food.from_orm(item))

    return templates.TemplateResponse("result.html", {
        "request": request,
        "food_items": food_items
    })
Example #10
0
def recognize(file: bytes = File(...)):
    try:
        f = FaceRecogniton()
        image = Image.open(io.BytesIO(file)).convert("RGB")
        img = np.array(image)
        encodings, labels = f.loadEncodings()
        f.trainKNN(encodings, encodings, labels, labels, 2)
        model = f.loadModel(type="knn")
        boxes, predictions, scores = f.inference_image(model, img)
        print(boxes)

        response = {}
        response["faces"] = []
        for box, label, s in zip(boxes, predictions, scores):
            temp = {}
            temp["box"] = box
            temp["label"] = label
            temp["score"] = str(s)
            response["faces"].append(temp)

        response["image"] = encode(f.visualize(img, boxes, predictions))
        return response
    except Exception as e:
        return {"Error": e}
Example #11
0
async def predict_face(image: UploadFile = File(...)):
    try:
        contents = await image.read()
        # image_bytes = Image.open(io.BytesIO(contents))

        i = np.frombuffer(contents, dtype=np.uint8)
        im = cv2.imdecode(i, cv2.IMREAD_UNCHANGED)
        # cv2.imwrite(datetime.now().strftime('%m%d_%H%M%S%f')+'.jpg',im)

        tsr_imga = cv2.cvtColor(im, cv2.COLOR_BGR2RGBA)
        cudaImage = jetson.utils.cudaFromNumpy(tsr_imga)

        detections = fdNet.Detect(cudaImage, im.shape[0], im.shape[1])
        # jetson.utils.saveImage(datetime.now().strftime('%m%d_%H%M%S%f')+'.jpg', cudaImage)

        data = {"success": False}

        if detections:

            preds = []
            for detection in detections:
                preds.append({
                    "confidence": float(detection.Confidence),
                    "label": fdNet.GetClassDesc(detection.ClassID),
                    "y_min": int(detection.Top),
                    "x_min": int(detection.Left),
                    "y_max": int(detection.Bottom),
                    "x_max": int(detection.Right),
                })
            data["predictions"] = preds
            data["success"] = True

        return data
    except:
        e = sys.exc_info()[1]
        raise HTTPException(status_code=500, detail=str(e))
Example #12
0
def file_upload(
        file: UploadFile = File(...),
        meta: Optional[str] = Form("null"),  # JSON serialized string
        remove_numeric_tables: Optional[bool] = Form(None),
        remove_whitespace: Optional[bool] = Form(None),
        remove_empty_lines: Optional[bool] = Form(None),
        remove_header_footer: Optional[bool] = Form(None),
        valid_languages: Optional[List[str]] = Form(None),
        split_by: Optional[str] = Form(None),
        split_length: Optional[int] = Form(None),
        split_overlap: Optional[int] = Form(None),
        split_respect_sentence_boundary: Optional[bool] = Form(None),
):
    if not INDEXING_PIPELINE:
        raise HTTPException(status_code=501,
                            detail="Indexing Pipeline is not configured.")
    try:
        file_path = Path(
            FILE_UPLOAD_PATH) / f"{uuid.uuid4().hex}_{file.filename}"
        with file_path.open("wb") as buffer:
            shutil.copyfileobj(file.file, buffer)
        INDEXING_PIPELINE.run(
            file_path=file_path,
            remove_numeric_tables=remove_numeric_tables,
            remove_whitespace=remove_whitespace,
            remove_empty_lines=remove_empty_lines,
            remove_header_footer=remove_header_footer,
            valid_languages=valid_languages,
            split_by=split_by,
            split_length=split_length,
            split_overlap=split_overlap,
            split_respect_sentence_boundary=split_respect_sentence_boundary,
            meta=json.loads(meta) or {},
        )
    finally:
        file.file.close()
Example #13
0
async def create_portfolio(
        username: str,
        file: UploadFile = File(...),
        file_size: int = Depends(valid_content_length),
        settings: Settings = Depends(get_settings),
):
    file.file.seek(0)
    data = file.file.read()
    reader = csv.DictReader(data.decode('utf-8').split('\n'))
    orm.clear_holdings(username)
    for row in reader:
        current_date = parsedate(row['date']).date()
        nearest = df.loc[df['date'].sub(current_date).abs().idxmin()]['date']
        current_price = float(df.loc[(df['date'] == nearest)
                                     & (df['Name'] == row['symbol'])]['low'])
        orm.change_stock(
            current_date,
            username,
            row['symbol'],
            'buy',
            row['quantity'],
            current_price,
        )
    return {}
Example #14
0
def post_file(
        files: List[UploadFile] = File(...),
        package: db_models.Package = Depends(get_package_or_fail),
        dao: Dao = Depends(get_dao),
        auth: authorization.Rules = Depends(get_rules)):
    auth.assert_upload_file(package.channel.name, package.name)

    channel_dir = f'static/channels/{package.channel.name}'
    for file in files:
        with tarfile.open(fileobj=file.file._file, mode="r:bz2") as tar:
            info = json.load(tar.extractfile('info/index.json'))

        parts = file.filename.split('-')
        if parts[0] != package.name or info['name'] != package.name:
            raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)

        dir = f'{channel_dir}/{info["subdir"]}/'
        os.makedirs(dir, exist_ok=True)

        file.file._file.seek(0)
        with open(f'{dir}/{file.filename}', 'wb') as my_file:
            shutil.copyfileobj(file.file, my_file)

        user_id = auth.assert_user()

        dao.create_version(
            package=package,
            platform=info['subdir'],
            version=info['version'],
            build_number=info['build_number'],
            build_string=info['build'],
            filename=file.filename,
            info=json.dumps(info),
            uploader_id=user_id)

    subprocess.run(['conda', 'index', channel_dir])
Example #15
0
async def get_index(file: UploadFile = File(...)):
    contents = await file.read()
    img_buffer = np.frombuffer(contents, dtype=np.uint8)
    img = turbojpeg.decode(img_buffer)

    lab_img = rgb2lab(img).astype(np.float32)
    L = lab_img[:, :, 0] / 50. - 1.
    L = np.expand_dims(L, axis=0)
    L = np.expand_dims(L, axis=-1)

    pred_ab = sess.run([output_name],
                       {input_name: np.transpose(L, (0, 3, 1, 2))})[0]

    # Post processing
    pred_ab = np.squeeze(pred_ab, axis=0) * 110

    pred_ab = np.transpose(pred_ab, (1, 2, 0))

    empty_img = np.zeros((256, 256, 3))
    empty_img[:, :, 0] = lab_img[:, :, 0]
    empty_img[:, :, 1:] = pred_ab
    rgb_pred = lab2rgb(empty_img)

    return {'img': turbojpeg.encode(rgb_pred).decode('ISO-8859-1')}
Example #16
0
async def faceMatch(file: UploadFile = File(...)):
    contents = await file.read()
    im_pil, cv_im = init_image(contents)
    tic = time.time()
    dets = face_detection(cv_im)
    print('face detecion time: ', time.time() - tic)
    if len(dets) == 0:
        return {"code": 400, "success": False, "message": "未检测出人脸,请重新上传"}
    det = dets[0]
    boxes, score = det[:4], det[4]
    im_pil = im_pil.crop([boxes[0], boxes[1], boxes[2], boxes[3]])
    tic = time.time()
    feature_in = generate_feature(im_pil)
    print('feature generate: ', time.time() - tic)
    array_in = string2array(feature_in)
    torch_in_feature = torch.from_numpy(array_in).cuda().unsqueeze(0)
    mysqldb = MySQLDB()
    session = mysqldb.session()
    faces = session.query(Face).all()
    max_similarity = 999999.0
    name = None
    tic = time.time()
    for face in faces:
        feature_db = face.feature1
        array_db = string2array(feature_db)
        torch_db_feature = torch.from_numpy(array_db).cuda().unsqueeze(0)
        # cos_similarity = torch.cosine_similarity(torch_in_feature, torch_db_feature, dim=0)
        cos_similarity = torch.pairwise_distance(torch_in_feature,
                                                 torch_db_feature)
        if cos_similarity.cpu().detach().numpy()[0] < max_similarity:
            name = face.name
            max_similarity = cos_similarity.cpu().detach().numpy()[0]

    # print(max_similarity)
    print('match time: ', time.time() - tic)
    return {"code": 200, "success": True, "name": name}
Example #17
0
async def search_audio(request: Request,
                       Table: str = None,
                       audio: UploadFile = File(...)):
    # Search the uploaded audio in Milvus/MySQL
    try:
        # Save the upload data to server.
        content = await audio.read()
        audio_path = os.path.join(UPLOAD_PATH, audio.filename)
        with open(audio_path, "wb+") as f:
            f.write(content)
        host = request.headers['host']
        ids, paths, distances = do_search(host, Table, audio_path, MODEL,
                                          MILVUS_CLI, MYSQL_CLI)
        names = []
        for i in paths:
            names.append(os.path.basename(i))
        res = dict(zip(paths, zip(names, distances)))
        # Sort results by distance metric, closest distances first
        res = sorted(res.items(), key=lambda item: item[1][1])
        LOGGER.info("Successfully searched similar audio!")
        return res
    except Exception as e:
        LOGGER.error(e)
        return {'status': False, 'msg': e}, 400
Example #18
0
async def save_weights(minio_param: MinioParam = Depends(),
                       file: UploadFile = File(...)):
    """
    Upload finetuned weights to an minio s3 storage container. 
    Include minio params as form data along with file for uploading 

    Sample form data:

    .. highlight:: python
    .. code-block:: python
       
        {
        'minio_url': MINIO_URL
        'minio_access_key': MINIO_ACCESS_KEY
        'minio_secret_key': MINIO_SECRET_KEY
        'bucket_name': BUCKET_NAME,
        'object_name': OBJECT_NAME
        } 
    """
    try:
        message = upload_weights(minio_param, file)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"{str(e)}")
    return {'message': message}
Example #19
0
def lung_ct_endpoint(file: bytes = File(...)):

    nparr = np.fromstring(file, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    # Inference
    mask = lung_seg_model.predict(img)
    img = lung_seg_model.get_visualized_img(img, mask)

    # Generate output image
    # out_img = cv2.hconcat([img, mask])
    out_img = img

    response = {
        "success":
        True,
        "prepend_original_image":
        False,
        "results": [{
            "label": "Segmentation Map",
            "image": get_base64_png(out_img)
        }]
    }
    return response
Example #20
0
async def user_thumbnail(
        file: UploadFile = File(..., description='File to upload'),
        user: User = Depends(cognito_signed_in),
        db_session: AsyncSession = Depends(yield_db_session),
        boto_session: AioBaseClient = Depends(get_boto),
) -> Any:
    """
    Upload a profile thumbnail.
    This is behind a CDN, so it might take a little while for it to update
    """
    if not file:
        raise HTTPException(status_code=400, detail='You must provide a file.')
    allowed_formats = ['image/jpg', 'image/jpeg', 'image/png']
    if file.content_type not in allowed_formats:
        raise HTTPException(
            status_code=400,
            detail=f'Currently only support for {",".join(allowed_formats)}')

    # Save thumbnail
    temp_name = uuid4().hex
    output_name = f'{temp_name}.png'
    async with aiofiles.open(temp_name, 'wb') as img:
        while content := await file.read(1024):
            await img.write(content)  # type: ignore
Example #21
0
async def create_upload_file(file: UploadFile = File(...)):
    """Loads the passed file and performs processing.

	Args:
		file: A csv file that has an 'address' field that contains data that needs to be preprocessed.

	Returns:
		A new file with the original strings and the result of processing.

	"""
    print('/api/file/upload/')

    try:
        suffix = str(uuid.uuid4()).replace('-', '').upper()
        good_filepath = suffix + '.csv'
        bad_filepath = 'bad.' + suffix + '.csv'

        save_upload_file(file, Path(bad_filepath))
        process(bad_filepath, good_filepath)
        return {'filename': suffix}

    except Exception as e:
        print(e)
        return {"status": "bad"}
async def data_matrix_recognition(
    model_name: str,
    image: UploadFile = File(
        ..., description="Image to perform Data Matrix decoding:"),
):
    """
        Takes an image and returns decoded data matrices.

        The image is passed to the Data-Matrix-Service for text extraction

        :param model: Model name or model hash

        :param image: Image file

        :return: Text fields with the decoded data matrices inside

    """
    # run data_matrix_service
    response = None
    try:
        image = Image.open(image.file)
        response = data_matrix_service(image)
    except:
        raise HTTPException(
            status_code=500,
            detail=
            'Unexpected Error during Inference (Determination of Data Matrices)'
        )

    if not response:
        raise HTTPException(
            status_code=400,
            detail='Inference (Determination of Data Matrices) is not Possible'
        )

    return response
async def applications_upload(
        application_id: int = Query(
            ...,
            description="id of the application for which to upload a file"),
        upload_file: UploadFile = File(
            ...,
            description="The gzipped application tar-file to be uploaded"),
        content_length: int = Header(...),
):
    """
    Upload application tarball using an authenticated user token.
    """
    if content_length > settings.MAX_UPLOAD_FILE_SIZE:
        raise HTTPException(
            status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
            detail=
            f"Uploaded files cannot exceed {settings.MAX_UPLOAD_FILE_SIZE} bytes.",
        )
    s3man.put(upload_file, app_id=str(application_id))

    update_query = (applications_table.update().where(
        applications_table.c.id == application_id).values(
            dict(application_uploaded=True)))
    await database.execute(update_query)
Example #24
0
async def create_files(file: UploadFile = File(...)):
    '''
    Receives a list of files and saves them to model paths
    '''
    # Create uploads directory if not exists
    dir_path = make_dir(dir_path=f'{os.getenv("UPLOADS_PATH")}/model/tmp/')

    # Create outputs directory if not exists
    output_path = make_dir(dir_path=f'{os.getenv("MODEL_OUTPUTS")}')

    # Create file from request bytes
    file_path = file_from_bytes(file.file, dir_path, 'tmp.jpeg')

    # Yolo Config Dict
    config = {
        'weights': 'yolov5s.pt',
        'source': str(dir_path),
        'output': str(output_path),
        'img_size': 640,
        'conf_thres': 0.4,
        'iou_thres': 0.5,
        'device': 'cpu',  # or gpu number: 0,1,2,3
        'view_img': False,
        'save_txt': 'store_true',
        'classes': '',
        'agnostic_nms': 'store_true',
        'augment': 'store_true',
        'update': False
    }
    # Yolo Detect Objects
    detect(config)

    # Image with objects path
    output_path = make_dir(dir_path=f'{os.getenv("MODEL_OUTPUTS")}')
    # Return image with objects as response
    return FileResponse(str(output_path / 'tmp.jpeg'))
Example #25
0
async def change_emoji(
        module: 'GeneralModule',
        guild: discord.Guild,
        member: discord.Member,
        emoji_file: UploadFile = File(),
        emoji: str = Form(),
):
    if not is_emoji(emoji):
        raise WrongInputException(detail='emoji is wrong')

    if not module.bot.image_creator.emoji_loader.save_downloaded_emojis:
        raise MethodNotAvailableException(detail='cannot save emojis')

    emoji_id = from_char(emoji)
    module.bot.image_creator.emoji_loader.cached_images.clear()
    with open(
            os.path.join(module.bot.image_creator.emoji_loader.emoji_path,
                         emoji_id + '.png'), 'wb') as f:
        f.write(emoji_file.file.read())
    module.bot.logger.info('saved new image for emoji {}'.format(emoji_id))

    return {
        'msg': 'success',
    }
Example #26
0
async def sendEmail(token: str = Form(None),
                    files: List[UploadFile] = File(None),
                    recipients: str = Form(...),
                    text: str = Form(...),
                    subject: str = Form(...),
                    db: Session = Depends(get_db)):
    if not token:
        raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
                            detail='你没有发送邮件的权限')
    if recipients:
        try:
            # 字符串转化为列表
            recipients = recipients.split(',')
            if files:
                await utils.send_email(recipients, subject, text, files)
            else:
                await utils.send_email(recipients, subject, text)
            # 添加记录
            token_decode = jwt.decode(token,
                                      Config.SECRET_KEY,
                                      algorithms=Config.ALGORITHM)
            openid = token_decode['openid']
            current_user = db.query(
                models.User).filter(models.User.Openid == openid).first()
            record = models.Record(Owner=current_user.Department_name,
                                   Operator=current_user.Nick,
                                   Operation='发送了邮件')
            db.add(record)
            db.commit()
            return {'code': 0}
        except:
            raise HTTPException(
                status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
                detail='邮件发送失败')
    raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
                        detail='没有简历被选中')
async def post_update(
        response: Response,
        post_id: int,
        db=Depends(get_db),
        current_user=Depends(get_current_active_user),
        source_url: t.Optional[str] = Form(None),
        title: t.Optional[str] = Form(None),
        content: t.Optional[str] = Form(None),
        image: t.Optional[UploadFile] = File(None),
):
    """
    Post update
    """
    fields_to_update = {}
    if title:
        fields_to_update['title'] = title
    if source_url:
        fields_to_update['source_url'] = source_url
    if content:
        fields_to_update['content'] = content
    if image:
        image_content = await image.read()
        image_name = save_post_image(image.filename, image_content)
        fields_to_update['image_url'] = image_name
    post, user = update_post(db,
                             fields_to_update,
                             post_id=post_id,
                             user_id=current_user.id)
    return Post(id=post.id,
                title=post.title,
                user_id=post.user_id,
                image_url=post.image_url,
                source_url=post.source_url,
                content=post.content,
                created=post.created.isoformat(),
                user=user)
Example #28
0
def pnuemonia_router(image_file: bytes = File(...)):
    model = Train().define_model()
    model.load_weights('classifier/models/weights.h5')

    image = Image.open(io.BytesIO(image_file))

    if image.mode != 'L':
        image = image.convert('L')

    image = image.resize((64, 64))
    image = img_to_array(image)/255.0
    image = image.reshape(1, 64, 64, 1)

    #graph = tf.get_default_graph()
    graph = tf.compat.v1.get_default_graph()
    

    with graph.as_default():
        prediction = model.predict_proba(image)

    predicted_class = 'pneumonia' if prediction[0] > 0.5 else 'normal'

    return {'predicted_class': predicted_class,
            'pneumonia_probability': str(prediction[0])}
Example #29
0
async def transcribe(request: Request,
                     file: UploadFile = File(...),
                     email: str = Form(...),
                     title: str = Form(...),
                     sentence_num: str = Form(...)):
    print("START")
    contents = await file.read()
    with open(vid_file, 'wb') as wfile:
        wfile.write(contents)
    # Insert Local Video File Path
    clip = mp.VideoFileClip(vid_file)
    # Insert Local Audio File Path
    clip.audio.write_audiofile(aud_file)

    sound = AudioSegment.from_file(aud_file, format='mp3')
    sound.export(wav_file, format="wav")
    text = get_large_audio_transcription(wav_file)
    print("\nFull text:", text)

    # html_content = ""
    #
    # for item in text:
    #     html_content += "<button onclick=\"jump({})\">".format(item[1]) + item[0] + "</button>"

    print("the email is: ", email)
    #email_user(email, text)
    summary = getSummarization(title, text[:500], sentence_num)
    tags = getTags(text[:500])
    return templates.TemplateResponse('transcribe.html',
                                      context={
                                          'request': request,
                                          'text': text,
                                          'email': email,
                                          'summary': summary,
                                          'tags': tags
                                      })
Example #30
0
async def upload_data_generation_file(background_tasks: BackgroundTasks,
                                      doc: UploadFile = File(...),
                                      current_user: User = Depends(
                                          auth.get_current_user_and_bot)):
    """
    Uploads document for training data generation and triggers event for intent creation
    """
    TrainingDataGenerationProcessor.is_in_progress(current_user.get_bot())
    TrainingDataGenerationProcessor.check_data_generation_limit(
        current_user.get_bot())
    file_path = await Utility.upload_document(doc)
    TrainingDataGenerationProcessor.set_status(
        bot=current_user.get_bot(),
        user=current_user.get_user(),
        status=EVENT_STATUS.INITIATED.value,
        document_path=file_path)
    token = auth.create_access_token(data={"sub": current_user.email})
    background_tasks.add_task(Utility.trigger_data_generation_event,
                              current_user.get_bot(), current_user.get_user(),
                              token.decode('utf8'))
    return {
        "message":
        "File uploaded successfully and training data generation has begun"
    }