async def get_video_list(url: str = Form(None)): ret = await Spider.play_html(url) return ret
async def add_comment(location_id: int = Form(...), title: str = Form(...), comment: str = Form(...)): pg_handler.add_comment(location_id, title, comment) return {'comments': pg_handler.get_comments_by_location(location_id)}
async def post_user(request: Request, username: str = Form(...), password: str = Form(...)): return templates.TemplateResponse("user.html", {"request": request, "username": username, "password": password})
async def transform_image( response: Response, preview: bool = False, parameters: Optional[str] = Form(None), transformation: Optional[int] = Form(None), transformation_step: Optional[int] = Form(None), img_url: Optional[str] = Form(None), preview_url: Optional[str] = Form(None), image: Optional[UploadFile] = File(None), id: Optional[str] = Cookie(None), step_count: Optional[str] = Cookie(None), ): if id is None: if image is None: raise HTTPException(status_code=400, detail="Image has to be uploaded") elif img_url is not None or preview_url is not None: raise HTTPException( status_code=400, detail= "Image has to be added to the history before refering it", ) id = str(uuid.uuid4()) response.set_cookie(key="id", value=id) folder_actions.mkdir_p("images/" + id) step_count = "0" if step_count is None or image is not None: step_count = "0" response.set_cookie(key="step_count", value=step_count) elif not preview: step_count = str(int(step_count) + 1) response.set_cookie(key="step_count", value=step_count) img_extension = ".png" if image is not None: img_extension = "." + image.filename.split(".")[-1] image = load_image_into_numpy_array(await image.read()) folder_actions.delete_files(folder="images/" + str(id)) elif img_url is not None or preview_url is not None: img_url_new = "images/" + str(id) if img_url is not None: if transformation_step is None: raise HTTPException( status_code=400, detail="transformation_step field required") img_extension = "." + img_url.split(".")[-1] img_url_new += ("/transformed_img_" + str(transformation_step) + img_extension) if not preview: step_count = str(int(transformation_step) + 1) response.set_cookie(key="step_count", value=step_count) folder_actions.delete_files( folder="images/" + str(id), split_string="transformed_img_", low=int(transformation_step), ) elif preview_url is not None: img_extension = "." + preview_url.split(".")[-1] img_url_new = "image_previews/" + str(id) + img_extension image = np.array(Image.open(img_url_new).convert("RGB")) else: raise HTTPException(status_code=400, detail="img_url or preview_url required") transformed_image = image if img_url is not None or preview_url is None: parameters = json.loads(json.loads(parameters), object_hook=hinted_tuple_hook) transform = augmentations.augmentations_dict[transformation]( **parameters) transformed = transform(image=image) transformed_image = transformed["image"] im = Image.fromarray(transformed_image) img_path = "images/" + str(id) if preview: img_path = "image_previews/" + str(id) + img_extension else: img_path += "/transformed_img_" + str(step_count) + img_extension im.save(img_path) return {"img_path": SERVER_BASE_URL + img_path}
async def document_post(text: str = Form(...)): return await document_add(elasticsearch=es, document=Document( text=text, vector=encoder.encode(text).tolist()))
async def add_post_data(image: UploadFile = File(...), description: str = Form(None), current_user=Depends(auth_handler.auth_wrapper)): post = {"report_counter": 0, "image": image, "description": description} new_post = await add_post(current_user, post) return ResponseModel(new_post, "Post created successfully.")
def post_token(request_data: OAuth2PasswordRequestForm = Form(...)): data = request_data.parse() access_token = data.username + ":" + data.password return {"access_token": access_token}
async def upload_params(*, link : str = Form(...)): return kmd_lib.setup_params(link)
async def upload_binary(*, link : str = Form(...)): return kmd_lib.setup_binary(link)
async def add_light(db: Session = Depends(get_db), name: str = Form(...), ip: str = Form(...)): crud.create_light(db, name, ip) return RedirectResponse("/", status_code=302)
def from_file(file: UploadFile = File(...), title: str = Form(""), extra: str = Form("")): """Generate Full ISCC Code from Media File with optional explicit metadata.""" media_type = detector.from_buffer(file.file) if media_type not in SUPPORTED_MIME_TYPES: raise HTTPException( HTTP_415_UNSUPPORTED_MEDIA_TYPE, "Unsupported media type '{}'. Please request support at " "https://github.com/iscc/iscc-service/issues.".format(media_type), ) if media_type == "application/x-mobipocket-ebook": file.file.seek(0) tempdir, filepath = mobi.extract(file.file) tika_result = parser.from_file(filepath) shutil.rmtree(tempdir) else: file.file.seek(0) tika_result = parser.from_buffer(file.file) if not title: title = get_title(tika_result, guess=True) mid, norm_title, norm_extra = iscc.meta_id(title, extra) gmt = mime_to_gmt(media_type) if gmt == GMT.IMAGE: file.file.seek(0) cid = iscc.content_id_image(file.file) elif gmt == GMT.TEXT: text = tika_result["content"] if not text: raise HTTPException(HTTP_422_UNPROCESSABLE_ENTITY, "Could not extract text") cid = iscc.content_id_text(tika_result["content"]) elif gmt == GMT.AUDIO: file.file.seek(0) features = audio_id.get_chroma_vector(file.file) cid = audio_id.content_id_audio(features) elif gmt == GMT.VIDEO: file.file.seek(0) _, ext = splitext(file.filename) fn = "{}{}".format(uuid.uuid4(), ext) tmp_path = join(APP_DIR, fn) with open(tmp_path, "wb") as buffer: shutil.copyfileobj(file.file, buffer) features = video_id.get_frame_vectors(tmp_path) cid = video_id.content_id_video(features) os.remove(tmp_path) file.file.seek(0) did = iscc.data_id(file.file) file.file.seek(0) iid, tophash = iscc.instance_id(file.file) if not norm_title: iscc_code = "-".join((cid, did, iid)) else: iscc_code = "-".join((mid, cid, did, iid)) components = iscc_split(iscc_code) result = dict( iscc=iscc_code, tophash=tophash, gmt=gmt, bits=[code_to_bits(c) for c in components], ) if norm_title: result["title"] = title result["title_trimmed"] = norm_title if norm_extra: result["extra"] = extra result["extra_trimmed"] = norm_extra file.file.close() return result
def as_form( cls, name: str = Form(None, min_length=5, max_length=100), src: str = Form(None, min_length=5, max_length=300), ): return cls(name=name, src=src)
async def post_token( response: Response, grant_type: str = Form( None, title="Request type", description="authorization_code is the only supported grant type", example="authorization_code", ), client_id: str = Form( None, title="ID of client", example="oidc-client-name", ), client_secret: str = Form( None, title="Client secret", example="rYTfX6h9-ilGwADfgn7KRQ", ), code: str = Form( None, title="Authorization code", description="The code returned from the /auth/openid/login endpoint", example="gc-W74I5HltJZRc0fOUAapgVQ.3T1xQQgeD063KgmNinw-tA", ), redirect_uri: str = Form( None, title="URL of client", description="Must match the redirect_uri in the client registration", example="https://example.com/", ), context: RequestContext = Depends(context_dependency), ) -> Union[OIDCTokenReply, JSONResponse]: oidc_service = context.factory.create_oidc_service() try: if not grant_type or not client_id or not code or not redirect_uri: raise InvalidRequestError("Invalid token request") if grant_type != "authorization_code": raise UnsupportedGrantTypeError(f"Invalid grant type {grant_type}") authorization_code = OIDCAuthorizationCode.from_str(code) token = await oidc_service.redeem_code(client_id, client_secret, redirect_uri, authorization_code) except OAuthError as e: context.logger.warning("%s", e.message, error=str(e)) content = { "error": e.error, "error_description": e.message if e.hide_error else str(e), } return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content=content) # Log the token redemption. context.logger.info( "Retrieved token for user %s via OpenID Connect", token.username, user=token.username, token=token.jti, ) # Return the token to the caller. The headers are mandated by RFC 6749. response.headers["Cache-Control"] = "no-store" response.headers["Pragma"] = "no-cache" return OIDCTokenReply( access_token=token.encoded, id_token=token.encoded, expires_in=int(token.claims["exp"] - time.time()), )
async def claim_job(job_id: str = Form(...), provider_id: str = Form(...)): scheduler.update({"_id": job_id}, { "claim_status": True, "claim_provider": provider_id }) LOG.info('Claiming job')
async def contact(http_request: Request, email: EmailStr = Form(...), name: str = Form(...), message: str = Form(...)): await submit_contact_form(http_request, email, name, message) return True
async def upload_binary(*, link : str = Form(...)): return kmd_lib.restart_api(link)
async def upload_file(background_tasks: BackgroundTasks, response: Response, file: UploadFile = File(...), folder: Optional[str] = Form(''), childOf: Optional[str] = Form(''), identity: str = Depends(get_jwt_identity)): try: if file.filename == '' or (len(folder) > 0 and folder[0] == '/'): raise SchemaValidationError User.objects.get(id=identity) # make sure the user exists if allowed_file(file.filename): # Handle filename collisions filename = file.filename counter = 2 while True: try: Media.objects.get(filename=filename, folder=folder) newFilename = filename filenameSplit = newFilename.rsplit('.', 1) filename = filenameSplit[0] + '_' + str(counter) + '.' + filenameSplit[1] counter += 1 except DoesNotExist: break mimetype = file.content_type if not mimetype: mimetype = mimetypes.guess_type(filename) if FileSettings.ENABLE_FFMPEG and FileSettings.ENABLE_FILE_PROCESSING and (mimetype[:6] == 'video/' or mimetype[:6] == 'audio/' or mimetype == 'application/x-subrip' or mimetype == 'application/ttml+xml'): # Process the file splitFilename = filename.rsplit('.', 1) media = Media(owner=identity, filename=splitFilename[0], folder=folder, container=True, processing=True) media.save() background_tasks.add_task(processMedia, media, file, splitFilename[1]) #processMedia(media, file) response.status_code = 202 else: media = Media(owner=identity, filename=filename, folder=folder) media.file.put(file.file, content_type=mimetype) media.save() for parent in childOf.split(','): try: if parent: Media.objects.get(id=parent).update(push__associatedMedia=media) except DoesNotExist: pass return media.serialize() raise SchemaValidationError except SchemaValidationError as e: raise SchemaValidationError().http_exception except DoesNotExist: raise UnauthorizedError().http_exception except MediaProcessingError: raise MediaProcessingError().http_exception except Exception as e: raise e
def fucking_test(Taghavi: str = Form(...)): return f"F**k you {Taghavi}"
async def get_timetable(request: str = Form(...)): data = fetch_text_timetable(request) return data
def importHuaWei(request:Request,queryType : int = Form(...), subPage: int = Form(...), subSize: int = Form(...) , file: UploadFile = File(...), db: Session = Depends(api.get_db)): return api.importHW(queryType, subPage, subSize, file, db)
async def trusted_user_addvote_post(request: Request, user: str = Form(default=str()), type: str = Form(default=str()), agenda: str = Form(default=str())): if not request.user.has_credential(creds.TU_ADD_VOTE): return RedirectResponse("/tu", status_code=HTTPStatus.SEE_OTHER) # Build a context. context = await make_variable_context(request, "Add Proposal") context["type"] = type context["user"] = user context["agenda"] = agenda def render_addvote(context, status_code): """ Simplify render_template a bit for this test. """ return render_template(request, "addvote.html", context, status_code) # Alright, get some database records, if we can. if type != "bylaws": user_record = db.query( models.User).filter(models.User.Username == user).first() if user_record is None: context["error"] = "Username does not exist." return render_addvote(context, HTTPStatus.NOT_FOUND) utcnow = time.utcnow() voteinfo = db.query(models.TUVoteInfo).filter( and_(models.TUVoteInfo.User == user, models.TUVoteInfo.End > utcnow)).count() if voteinfo: _ = l10n.get_translator_for_request(request) context["error"] = _("%s already has proposal running for them." ) % (html.escape(user), ) return render_addvote(context, HTTPStatus.BAD_REQUEST) if type not in ADDVOTE_SPECIFICS: context["error"] = "Invalid type." context["type"] = type = "add_tu" # Default for rendering. return render_addvote(context, HTTPStatus.BAD_REQUEST) if not agenda: context["error"] = "Proposal cannot be empty." return render_addvote(context, HTTPStatus.BAD_REQUEST) # Gather some mapped constants and the current timestamp. duration, quorum = ADDVOTE_SPECIFICS.get(type) timestamp = time.utcnow() # Active TU types we filter for. types = {TRUSTED_USER_ID, TRUSTED_USER_AND_DEV_ID} # Create a new TUVoteInfo (proposal)! with db.begin(): active_tus = db.query(User).filter( and_(User.Suspended == 0, User.InactivityTS.isnot(None), User.AccountTypeID.in_(types))).count() voteinfo = db.create(models.TUVoteInfo, User=user, Agenda=html.escape(agenda), Submitted=timestamp, End=(timestamp + duration), Quorum=quorum, ActiveTUs=active_tus, Submitter=request.user) # Redirect to the new proposal. endpoint = f"/tu/{voteinfo.ID}" return RedirectResponse(endpoint, status_code=HTTPStatus.SEE_OTHER)
def importXiaoMi(request: Request, subPage: int = Form(...), subSize: int = Form(...), file: UploadFile = File(...), db: Session = Depends(api.get_db)): return api.importXM(subPage, subSize, file, db)
async def flood_model( request: Request, xmin:float = Form(...), ymin:float = Form(...), xmax:float = Form(...), ymax:float = Form(...), init_start:str = Form(...), init_last:str = Form(...), flood_start:str = Form(...), flood_last:str = Form(...), action:str = Form(...), threshold:float = Form(...) ): xmean = (xmin + xmax)/2 ymean = (ymin + ymax)/2 # 1. Create geometry ee_rectangle = ee.Geometry.Rectangle(xmin, ymin, xmax, ymax) # 2. Create range dates base_period = (init_start, init_last) flood_period = (flood_start, flood_last) # 3. Run the flood model dict_db = db_creator(base_period, flood_period, ee_rectangle) flood_added = flood_estimation(dict_db, difference_threshold=threshold) if action == "display": #pop_added = population_exposed(flood_added) #cropland_added = cropland_exposed(pop_added) #urban_added = urban_exposed(cropland_added) # 4. Upload gee tileid tileids = display(flood_added) return templates.TemplateResponse( "map.html", { "request": request, "flood_extent": str(flood_added["flood_area_stats"]) + " hectares", "before_waterlog": tileids["before_flood"], "after_waterlog": tileids["after_flood"], "waterlog_results": tileids["s1_fresults_id"], "xmin": xmin, "ymin": ymin, "ymax": ymax, "xmax": xmax, "xmean": xmean, "ymean": ymean, "init_start":init_start, "init_last":init_last, "flood_start":flood_start, "flood_last":flood_last } ) elif action == "download": # Remove previous zipfiles zip_files = searching_all_files(pattern="\.zip") [os.remove(zip_file) for zip_file in zip_files] # 1. Create a shapefile geo_file = "waterlog_area_%s.shp" % (time.strftime("%Y%m%d%H%M%S", time.gmtime())) geo_file_search = "waterlog_area_%s" % (time.strftime("%Y%m%d%H%M%S", time.gmtime())) geo_file_zip = 'waterlog_area_%s.zip' % (time.strftime("%Y%m%d%H%M%S", time.gmtime())) final_flood_area = raster_to_vector(flood_added["flood_results"], ee_rectangle) final_flood_area_gpd = gpd.GeoDataFrame.from_features(final_flood_area["features"]) final_flood_area_gpd.to_file(geo_file) # 2. Create a zip file and delete shapefile shapefile_to_zip = searching_all_files(pattern=geo_file_search) with ZipFile(geo_file_zip, 'w') as zipObj2: for item in shapefile_to_zip: zipObj2.write(os.path.basename(item)) os.remove(item) time.sleep(1) return FileResponse(geo_file_zip, media_type="application/zip", filename=geo_file_zip)
async def parseShortVideoByURL(request: Request, parse_url: str = Form(...)): if "h5.weishi.qq.com" in parse_url: return {"real_url_lossless": await handleURLWesee(parse_url)} return {"real_url_lossless": "解析失败!未分析过该短视频网站"}
def create_project(projectName: str = Form(...), mtype: str = Form(...), train: UploadFile = File(...)): inserted_projectID = 0 Operation = generate_project_folder(projectName, train) if Operation["Success"]: try: inserted_projectID = create_project_id(Project21Database) # inserted_modelID=create_model_id(Project21Database) currentIDs.set_current_project_id(inserted_projectID) # currentIDs.set_current_model_id(inserted_modelID) resultsCache.set_project_folder_path( Operation["ProjectFolderPath"]) Project21Database.insert_one( settings.DB_COLLECTION_PROJECT, { "projectID": inserted_projectID, "projectName": projectName, "rawDataPath": Operation["RawDataPath"], "projectFolderPath": Operation["ProjectFolderPath"], "belongsToUserID": currentIDs.get_current_user_id(), "listOfDataIDs": [], "configFileLocation": None, "plotsPath": None, "projectType": mtype, "target": None }) # Project21Database.insert_one(settings.DB_COLLECTION_MODEL,{ # "modelID": inserted_modelID, # "modelName": "Default Model", # "modelType": mtype, # "belongsToUserID": currentIDs.get_current_user_id(), # "belongsToProjectID": inserted_projectID # }) try: result = Project21Database.find_one( settings.DB_COLLECTION_USER, {"userID": currentIDs.get_current_user_id()}) if result is not None: result = serialiseDict(result) if result["listOfProjects"] is not None: newListOfProjects = result["listOfProjects"] newListOfProjects.append(inserted_projectID) Project21Database.update_one( settings.DB_COLLECTION_USER, {"userID": result["userID"]}, {"$set": { "listOfProjects": newListOfProjects }}) else: Project21Database.update_one( settings.DB_COLLECTION_USER, {"userID": result["userID"]}, {"$set": { "listOfProjects": [inserted_projectID] }}) except Exception as e: print("An Error occured: ", e) return JSONResponse({ "File Received": "Success", "Project Folder": "Success", "Database Update": "Partially Successful" }) except Exception as e: print("An Error occured: ", e) return JSONResponse({ "File Received": "Success", "Project Folder": "Success", "Database Update": "Failure" }) return JSONResponse({ "userID": currentIDs.get_current_user_id(), "projectID": inserted_projectID }) else: return JSONResponse(Operation["Error"])
async def language(name: str = Form(default=None), type: str = Form(default=None)): return {"name": name, "type": type}
async def login(username: str = Form(...), password: str = Form(...)): return {"username": username}
async def subscribe(http_request: Request, email: EmailStr = Form(...)): await record_subscribe_request(http_request, email) return True
async def to_feature(text: str = Form(...), wave: UploadFile = File(...)): with TemporaryDirectory() as d: tmp_dir = Path(d) input_audio_path = tmp_dir.joinpath("input.wav") input_audio_path.write_bytes(await wave.read()) # openjtalk phonemes = [ p.label for p in openjtalk_label_getter( text, openjtalk_command="open_jtalk", dict_path=Path("/var/lib/mecab/dic/open-jtalk/naist-jdic"), htsvoice_path=Path( "/usr/share/hts-voice/nitech-jp-atr503-m001/nitech_jp_atr503_m001.htsvoice" ), output_wave_path=tmp_dir.joinpath("wave.wav"), output_log_path=tmp_dir.joinpath("log.txt"), output_type=OutputType.phoneme, without_span=False, ) ] # julius julius_audio_path = tmp_dir.joinpath("julius.wav") subprocess.check_call( f"sox {input_audio_path} -r 16000 -b 16 {julius_audio_path}".split() ) julius_phonemes = [ p if p not in _jvs_to_julius else _jvs_to_julius[p] for p in phonemes if p != "sil" ] julius_dict_path = tmp_dir.joinpath("2nd.dict") julius_dict = sp_inserter.gen_julius_dict_2nd( " ".join(julius_phonemes), model_type=sp_inserter.ModelType.gmm ) julius_dict_path.write_text(julius_dict) julius_dfa_path = tmp_dir.joinpath("2nd.dfa") julius_dfa = sp_inserter.gen_julius_aliment_dfa(julius_dict.count("\n")) julius_dfa_path.write_text(julius_dfa) julius_output = sp_inserter.julius_phone_alignment( str(julius_audio_path), str(tmp_dir.joinpath("2nd")), _hmm_model, model_type=sp_inserter.ModelType.gmm, options=None, ) time_alignment_list = sp_inserter.frame_to_second( sp_inserter.get_time_alimented_list(julius_output) ) i_phoneme = 0 new_phonemes = [] for p in phonemes: if p == "pau" and time_alignment_list[i_phoneme][2] != "sp": continue i_phoneme += 1 new_phonemes.append(p) aligned = JvsPhoneme.convert( [ JvsPhoneme(start=float(o[0]), end=float(o[1]), phoneme=p) for p, o in zip(new_phonemes, time_alignment_list) ] ) for p in aligned: p.verify() # world f0 = F0.from_wave( Wave.load(input_audio_path, sampling_rate=24000, dtype=numpy.float64), frame_period=5.0, f0_floor=71.0, f0_ceil=800, with_vuv=False, f0_type=F0Type.world, ) converted_f0 = f0.convert( input_mean=f0.valid_f0_log.mean(), input_var=f0.valid_f0_log.var(), target_mean=_voiro_mean, target_var=f0.valid_f0_log.var(), ) converted_f0.array = converted_f0.array.astype(numpy.float32).reshape(-1, 1) # feature phoneme_array = LinguisticFeature( phonemes=aligned, phoneme_class=JvsPhoneme, rate=_feature_rate, feature_types=[LinguisticFeature.FeatureType.PHONEME], ).make_array() phoneme = SamplingData(array=phoneme_array, rate=_feature_rate) feature = SamplingData.collect( [converted_f0, phoneme], rate=_feature_rate, mode="min", error_time_length=0.015, ) return StreamingResponse(BytesIO(feature.astype(numpy.float32).tobytes()))
async def lapindrome_post(request: Request, the_string : str = Form(...)): answer = palindrome(the_string) return templates.TemplateResponse('lapindromes.html', context={'request' : request, 'answer' : answer})