def read_file(file: UploadFile = File(...)): filename = file.filename fileobj = file.file print(filename) print(fileobj)
async def upload_user_photo(response: Response, profile_photo: bytes = File(...)): print("Here") response.headers["x-file-size"] = str(len(profile_photo)) response.set_cookie(key="cookie-api", value="test") return {"file_size": len(profile_photo)}
def enroll_user(audio: UploadFile = File(...), username: str = Form(...)): path = store_audio(audio, username) Model.enroll(path, username) clear_audio_embeddings() return "user profile added"
async def upload_file(created_by: int = Form(...), parent: int = Form(...), f: UploadFile = File(...), db: Session = Depends(utils.get_db)): db_folder = crud.get_folder_by_id(db, parent) # Check if the parent folder exists if not db_folder: raise HTTPException(status_code=404, detail="Folder not found") # Check that the user is actually the creator of the folder if db_folder.created_by != created_by: raise HTTPException(status_code=401, detail="Unauthorized to upload files to this folder") # response_files = [] # for f in files: # Check if file with same name exists in the parent folder. If yes, then append a string to rename the newly uploaded file existing_file = crud.get_file_by_name_in_parent(db, f.filename, parent) if existing_file: f.filename = "new_" + f.filename # Save the file to disk abs_path = os.path.join(db_folder.abs_path, f.filename) with open(abs_path, "wb") as buffer: shutil.copyfileobj(f.file, buffer) # Create an entry in the database as well new_file = schemas.FileCreate( name=f.filename, abs_path=abs_path, is_folder=False, parent=parent, created_by=created_by, created_on=date.today(), size=os.path.getsize(abs_path), file_type=os.path.splitext(abs_path)[1][1:] ) # response_files.append() return crud.create_file(db=db, f=new_file)
async def create_upload_files(files: List[UploadFile] = File(...)): return {"filenames": [file.filename for file in files]}
async def gender(file: UploadFile = File(...)): data, sr = librosa.load(file.file) features = create_features(data, sr) class_value = model.predict(features) class_name = model.get_target_name(class_value) return {"class_value": class_value, "class_name": class_name}
async def upload_attachments(id: int, file: UploadFile = File(...), user: UserTable = Depends(any_user)): await check_access(id, user, status.HTTP_403_FORBIDDEN) return await upload_attachment(id, file, user)
async def posture(file: bytes = File(...)): rightEar = None leftEar = None rightShoulder = None leftShoulder = None nose = None neckJoint = None rightKnee = None leftKnee = None rightFoot = None leftFoot = None leftHand = None rightHand = None nmpyarray = np.array(Image.open(BytesIO(file))) #nmpyarray = np.uint8(image) #json_param = await request.json() #print("inside posture") #nmpyarr = np.array(json_param['arr']) #print(nmpyarr.shape) params, model_params = config_reader() # print(nmpyarr) #nmpyarray = np.uint8(nmpyarr) canvas, all_peaks = process(nmpyarray, params, model_params, modelpos) try: if (all_peaks[16][0][0:2]): rightEar = str(all_peaks[16][0][0:2]) except Exception as e: pass try: if (all_peaks[17][0][0:2]): leftEar = str(all_peaks[17][0][0:2]) except Exception as e: pass try: if (all_peaks[2][0][0:2]): rightShoulder = str(all_peaks[2][0][0:2]) except Exception as e: pass try: if (all_peaks[5][0][0:2]): leftShoulder = str(all_peaks[5][0][0:2]) except Exception as e: pass try: if (all_peaks[0][0][0:2]): nose = str(all_peaks[0][0][0:2]) except Exception as e: pass try: if (all_peaks[1][0][0:2]): neckJoint = str(all_peaks[1][0][0:2]) except Exception as e: pass try: if (all_peaks[9][0][0:2]): rightKnee = str(all_peaks[9][0][0:2]) except Exception as e: pass try: if (all_peaks[10][0][0:2]): rightFoot = str(all_peaks[10][0][0:2]) except Exception as e: pass try: if (all_peaks[12][0][0:2]): leftKnee = str(all_peaks[12][0][0:2]) except Exception as e: pass try: if (all_peaks[13][0][0:2]): leftFoot = str(all_peaks[13][0][0:2]) except Exception as e: pass try: if (all_peaks[7][0][0:2]): leftHand = str(all_peaks[7][0][0:2]) except Exception as e: pass try: if (all_peaks[4][0][0:2]): rightHand = str(all_peaks[4][0][0:2]) except Exception as e: pass print(rightEar) print(leftEar) print(rightShoulder) print(leftShoulder) print(nose) print(neckJoint) return { 'rightEar': rightEar, 'leftEar': leftEar, 'rightShoulder': rightShoulder, 'leftShoulder': leftShoulder, 'nose': nose, 'neckJoint': neckJoint, 'leftKnee': leftKnee, 'leftFoot': leftFoot, 'rightKnee': rightKnee, 'rightFoot': rightFoot, 'leftHand': leftHand, 'rightHand': rightHand }
async def upload_file(file: UploadFile = File(...)): return {"message": " Valid file uploaded", "filetype": file.content_type}
async def upload_file( file: UploadFile = File(...), response_model=Dict[str, float]): path = Path(picture_dir, file.filename) await uploadPicture(file, path) return {"filename": file.filename}
async def add_picture_to_user(id: int, picture: UploadFile = File(...), db: Session = Depends(get_db)): return await userService.add_picture_to_user(db, id, picture)
async def create_upload_file(file: UploadFile = File(...)): global embed embed = load_embedding(file.file) return 200
async def predict(image: bytes = File(...)): return detect_face(image)
def create_file(db: Session = Depends(deps.get_db), updateSupport: bool = False, file: UploadFile = File(...)): def check_dict_label(label, code): dict = db.query(models.Dict_Data).outerjoin( models.Dict_Type, models.Dict_Type.id == models.Dict_Data.type_id).filter( models.Dict_Data.label == label, models.Dict_Type.code == code).one() return dict try: io = BytesIO(file.file.read()) wb = load_workbook(io, read_only=True) ws = wb.active # wb.worksheets[0] for row in ws.iter_rows(min_row=2): # dict_data sex = check_dict_label(row[5].value.strip(""), "sex").label status = check_dict_label(row[6].value.strip(""), "user_status").label user = { "username": row[0].value.strip(""), "nickname": row[1].value.strip(""), "identity_card": row[3].value.strip(""), "phone": row[4].value.strip(""), "sex": sex, "status": status, "hashed_password": get_password_hash(settings.INIT_PASSWORD) } department = db.query(models.Department).filter( models.Department.name == row[2].value.strip("")).one() posts = db.query(models.Dict_Data).outerjoin( models.Dict_Type, models.Dict_Type.id == models.Dict_Data.type_id).filter( models.Dict_Data.label.in_( row[7].value.strip("").split(",")), models.Dict_Type.code == "post").all() exist_user = db.query( models.User).filter(models.User.username == user["username"]) if not exist_user.first(): user = models.User(**user) db.add(user) db.flush() user_department = { "user_id": user.id, "department_id": department.id } db.add(models.User_Department(**user_department)) user_dict = [{ "user_id": user.id, "dict_id": post.id } for post in posts] db.bulk_insert_mappings(models.User_Dict, user_dict) elif updateSupport: exist_user_id = exist_user.one().id exist_user.update(user) db.flush() # department db.query(models.User_Department).filter( models.User_Department.user_id == exist_user_id).delete() user_department = { "user_id": exist_user_id, "department_id": department.id } db.add(models.User_Department(**user_department)) # post db.query(models.User_Dict).filter( models.User_Dict.user_id == exist_user_id).delete() user_dict = [{ "user_id": exist_user_id, "dict_id": post.id } for post in posts] db.bulk_insert_mappings(models.User_Dict, user_dict) return {"code": 20000, "message": "导入成功"} except Exception as exc: raise HTTPException(status_code=200, detail=f"导入失败,请检查数据! Error Reason: {exc}") finally: wb.close()
async def _create(flow: UploadFile = File(...), workspace_id: Optional[uuid.UUID] = Body(None)): try: return store.add(flow.file, workspace_id) except Exception as ex: raise Runtime400Exception from ex
async def qrcode_image_decrypt(file: bytes = File(...)): stream = BytesIO(file) img = Image.open(stream) return decode(img)
async def transform_image( response: Response, preview: bool = False, parameters: Optional[str] = Form(None), transformation: Optional[int] = Form(None), transformation_step: Optional[int] = Form(None), img_url: Optional[str] = Form(None), preview_url: Optional[str] = Form(None), image: Optional[UploadFile] = File(None), id: Optional[str] = Cookie(None), step_count: Optional[str] = Cookie(None), ): if id is None: if image is None: raise HTTPException(status_code=400, detail="Image has to be uploaded") elif img_url is not None or preview_url is not None: raise HTTPException( status_code=400, detail= "Image has to be added to the history before refering it", ) id = str(uuid.uuid4()) response.set_cookie(key="id", value=id) folder_actions.mkdir_p("images/" + id) step_count = "0" if step_count is None or image is not None: step_count = "0" response.set_cookie(key="step_count", value=step_count) elif not preview: step_count = str(int(step_count) + 1) response.set_cookie(key="step_count", value=step_count) img_extension = ".png" if image is not None: img_extension = "." + image.filename.split(".")[1] image = load_image_into_numpy_array(await image.read()) folder_actions.delete_files(folder="images/" + str(id)) elif img_url is not None or preview_url is not None: img_url_new = "images/" + str(id) if img_url is not None: if transformation_step is None: raise HTTPException( status_code=400, detail="transformation_step field required") img_extension = "." + img_url.split(".")[1] img_url_new += ("/transformed_img_" + str(transformation_step) + img_extension) if not preview: step_count = str(int(transformation_step) + 1) response.set_cookie(key="step_count", value=step_count) folder_actions.delete_files( folder="images/" + str(id), split_string="transformed_img_", low=int(transformation_step), ) elif preview_url is not None: img_extension = "." + preview_url.split(".")[1] img_url_new = "image_previews/" + str(id) + img_extension image = np.array(Image.open(img_url_new).convert("RGB")) else: raise HTTPException(status_code=400, detail="img_url or preview_url required") transformed_image = image if img_url is not None or preview_url is None: parameters = json.loads(json.loads(parameters), object_hook=hinted_tuple_hook) transform = augmentations.augmentations_dict[transformation]( **parameters) transformed = transform(image=image) transformed_image = transformed["image"] im = Image.fromarray(transformed_image) img_path = "images/" + str(id) if preview: img_path = "image_previews/" + str(id) + img_extension else: img_path += "/transformed_img_" + str(step_count) + img_extension im.save(img_path) return {"img_path": SERVER_BASE_URL + img_path}
def get_recommendation(image: UploadFile = File(...), type: str = Form(...)): engine, model, new_type_spaces, gpu = build_system() img = Image.open(image.file) print(type) results = generate_outfit(img, type, engine, model, new_type_spaces, gpu) return JSONResponse(status_code=200, content=results)
def video_item(title: str = Form(...), file: UploadFile = File(...), description: str = Form(...)): bp = CreateVideoItem.construct() return bp.create(title, file, description)
def upVersion(ipns: str = Form(...), title: str = Form(...), version: str = Form(...), build: str = Form(...), log: str = Form(...), apk: UploadFile = File(None)): red = redis.Redis(host=conf['redisCacheServer'][0]["host"], port=conf['redisCacheServer'][0]["port"], decode_responses=True) ipfs = red.get(ipns) if ipfs is None: return 'no Version.' files = api.object.links(ipfs) dirhash = api.object.new("unixfs-dir") for fl in files['Links']: if fl['Name'] == conf['storageSubPath']: dirhash = fl if apk: apkname = "%s_%s_%s.apk" % (conf['projectName'].lower(), version, build) apkpath = os.path.join(conf['localStorage'], conf['storageSubPath']) if not os.path.isdir(apkpath): os.mkdir(apkpath) with open(os.path.join(apkpath, apkname), "wb") as f: f.write(apk.file.read()) apkhash = api.add(os.path.join(apkpath, apkname)) update = getupdatejson(ipfs) newupdate = { "title": conf['projectName'], "data": [], } for item in update['data']: if not item['build'] == build: newupdate['data'].append(item) else: if apk: apk_file = os.path.join(conf['storageSubPath'], apkname) dirhash = api.object.patch.rm_link( dirhash['Hash'], item['apk_file'].split('/')[1]) dirhash = api.object.patch.add_link(dirhash['Hash'], apkname, apkhash['Hash']) else: apk_file = item['apk_file'] newupdate['data'].append({ "title": title, "version": version, "build": build, "log": log, "apk_file": apk_file, "datetime": int(time.time()) }) newupdate['last'] = build updatehash = api.add_json(newupdate) hash = conf['uiTemplate'] hash = api.object.patch.add_link(hash, conf['storageSubPath'], dirhash['Hash']) hash = api.object.patch.add_link(hash['Hash'], 'update.json', updatehash) publish(ipns, hash['Hash']) return {"newhash": hash['Hash']}
async def add_package_version(response: Response, package_version: PackageVersion = Depends(), set_active: bool = False, canary_next: bool = False, canary_id: str = "", file: bytes = File(...)): """Uploads a package version with binary package Arguments: response (Response): Starlette response object for setting return codes package_version (PackageVersion): Package description set_active (bool): Default False, if true this version will be set active file (bytes): File uploaded Returns: HTTP_201_CREATED if successful HTTP_404_NOT_FOUND if package is not found """ package_version_dict = package_version.__dict__ packages = DB.table("packages") package_versions = DB.table("package_versions") query = Query() package = packages.get(query.name == package_version_dict["name"]) if package is None: msg = "Package not found" logging.info(msg) response.status_code = status.HTTP_404_NOT_FOUND return { "error": "confrm-005", "message": msg, "detail": "While attempting to add a new package version the package name" + " given was not found" } if canary_id: nodes = DB.table("nodes") node_doc = nodes.get(query.node_id == canary_id) if not node_doc: msg = "Package not found" logging.info(msg) response.status_code = status.HTTP_404_NOT_FOUND return { "error": "confrm-026", "message": msg, "detail": "While attempting to add a new package version the package name" + " given was not found" } existing_version = package_versions.get( (query.name == package_version_dict["name"]) & (query.major == package_version_dict["major"]) & (query.minor == package_version_dict["minor"]) & (query.revision == package_version_dict["revision"])) if existing_version is not None: msg = "Version already exists for package" logging.info(msg) response.status_code = status.HTTP_400_BAD_REQUEST return { "error": "confrm-006", "message": msg, "detail": "While attempting to add a new package version the version given " + " was found to be already used" } if package_version_dict["major"] < 0 or package_version_dict[ "minor"] < 0 or package_version_dict["revision"] < 0: msg = "Version number elements cannot be negative" logging.info(msg) response.status_code = status.HTTP_400_BAD_REQUEST return { "error": "confrm-017", "message": msg, "detail": "While attempting to add a new package version the version given " + " was found to contain negative numbers" } # Package was uploaded, create hash of binary _h = SHA256.new() _h.update(file) # Store the binary in the data_store as a base64 encoded file filename = uuid.uuid4().hex save_file = os.path.join(CONFIG["storage"]["data_dir"], filename) with open(save_file, "wb") as ptr: ptr.write(base64.b64encode(file)) # Escape the strings for key in package_version_dict.keys(): if isinstance(package_version_dict[key], str): package_version_dict[key] = escape(package_version_dict[key]) # Update with blob details package_version_dict["date"] = round(time.time()) package_version_dict["hash"] = _h.hexdigest() package_version_dict["blob_id"] = filename # Store in the database package_versions.insert(package_version_dict) version_str = str(package_version_dict["major"]) + "." + \ str(package_version_dict["minor"]) + "." + \ str(package_version_dict["revision"]) if set_active is True: package["current_version"] = version_str packages.update(package, query.name == package["name"]) # If this is begin set to active, or a canary, delete existing canaries if set_active is True or canary_id: unset_node_canary_for_package(package["name"]) if canary_id: result = await node_package(node_id=canary_id, package=package_version_dict["name"], version=version_str, response=response) print(result) return {}
async def load_weights(image: UploadFile = File(...)): model.load_weights(image.filename) datasets.append(copy.deepcopy(model)) load_data() return {'consolidated weights': model}
def get_segmentation_map(file: bytes = File(...)): """Get segmentation maps from image file""" segmented_image = get_segments(model, file) bytes_io = io.BytesIO() segmented_image.save(bytes_io, format="PNG") return Response(bytes_io.getvalue(), media_type="image/png")
async def create_upload_files(files: List[UploadFile] = File(...)): filenames = [file.filename for file in files] dest = os.path.join(UPLOAD_DIR, filenames[0]) save_upload_file(upload_file=files[0], destination=dest) return {"filenames": filenames}
async def create_files(files: List[bytes] = File(...)): return {"file_sizes": [len(file) for file in files]}
async def predict_image(file: UploadFile = File(...)): image = read_image(await file.read()) image = preprocess(image) infer = predict(image) print(infer) return infer
async def get_fields(file: bytes = File(...)): return make_fields(file)
def upgrade(self, file: UploadFile = File(...)): upgrade_opsi(file, self.program.lifespan)
def verify_user_identity(audio: UploadFile = File(...), username: str = Form(...)): path = store_audio(audio, username) pred = Model.verify(path, username) clear_audio_embeddings() return pred
async def create_upload_file(file: UploadFile = File(...)): # return {"filename": file.filename} return add(file)