async def test_save_training_files(self): nlu_content = "## intent:greet\n- hey\n- hello".encode() stories_content = "## greet\n* greet\n- utter_offer_help\n- action_restart".encode( ) config_content = "language: en\npipeline:\n- name: WhitespaceTokenizer\n- name: RegexFeaturizer\n- name: LexicalSyntacticFeaturizer\n- name: CountVectorsFeaturizer\n- analyzer: char_wb\n max_ngram: 4\n min_ngram: 1\n name: CountVectorsFeaturizer\n- epochs: 5\n name: DIETClassifier\n- name: EntitySynonymMapper\n- epochs: 5\n name: ResponseSelector\npolicies:\n- name: MemoizationPolicy\n- epochs: 5\n max_history: 5\n name: TEDPolicy\n- name: RulePolicy\n- core_threshold: 0.3\n fallback_action_name: action_small_talk\n name: FallbackPolicy\n nlu_threshold: 0.75\n".encode( ) domain_content = "intents:\n- greet\nresponses:\n utter_offer_help:\n - text: 'how may i help you'\nactions:\n- utter_offer_help\n".encode( ) rules_content = "rules:\n\n- rule: Only say `hello` if the user provided a location\n condition:\n - slot_was_set:\n - location: true\n steps:\n - intent: greet\n - action: utter_greet\n".encode( ) nlu = UploadFile(filename="nlu.yml", file=BytesIO(nlu_content)) stories = UploadFile(filename="stories.md", file=BytesIO(stories_content)) config = UploadFile(filename="config.yml", file=BytesIO(config_content)) domain = UploadFile(filename="domain.yml", file=BytesIO(domain_content)) rules = UploadFile(filename="rules.yml", file=BytesIO(rules_content)) training_file_loc = await Utility.save_training_files( nlu, domain, config, stories, rules) assert os.path.exists(training_file_loc['nlu']) assert os.path.exists(training_file_loc['config']) assert os.path.exists(training_file_loc['stories']) assert os.path.exists(training_file_loc['domain']) assert os.path.exists(training_file_loc['rules']) assert os.path.exists(training_file_loc['root'])
async def create_deep_fake_image(origin: UploadFile = File(...), target: UploadFile = File(...)): content_origin = await origin.read() ext = origin.filename[origin.filename.rfind('.'):] origin.filename = str(uuid.uuid4()).replace('-', '') + ext content_target = await target.read() ext = target.filename[target.filename.rfind('.'):] target.filename = str(uuid.uuid4()).replace('-', '') + ext origin_input = os.path.join(config.face_swap_img_path, origin.filename) target_input = os.path.join(config.face_swap_img_path, target.filename) output = os.path.join(config.face_swap_result_path, str(uuid.uuid4()).replace('-', '') + ".png") print("origin_input:", origin_input) print("target_input:", target_input) with open(origin_input, "wb") as fp: fp.write(content_origin) with open(target_input, "wb") as fp: fp.write(content_target) faceswap.makedeepface(upload_origin_image_path=origin_input, upload_target_image_path=target_input, output=output) return { "url": url.convert_path_to_url(output, base_url="/api/v1/content/") }
async def create_upload_file(file: UploadFile = File(...)): file_name = file.filename file.save('./{}'.format(file_name)) packets = rdpcap('./{}'.format(file_name)) res = {} data = [] # Let's iterate through every packet for packet in packets: res = { 'Ethernet': { 'dst': '{}'.format(packet.sprintf("%Ether.dst%")), 'src': '{}'.format(packet.sprintf("%Ether.src%")), 'type': '{}'.format(packet.sprintf("%Ether.type%")) }, 'ARP': { 'hwtype': '{}'.format(packet.sprintf("%ARP.hwtype%")), 'ptype': '{}'.format(packet.sprintf("%ARP.ptype%")), 'hwlen': '{}'.format(packet.sprintf("%ARP.hwlen%")), 'plen': '{}'.format(packet.sprintf("%ARP.plen%")), 'op': '{}'.format(packet.sprintf("%ARP.op%")), 'hwsrc': '{}'.format(packet.sprintf("%ARP.hwsrc%")), 'psrc': '{}'.format(packet.sprintf("%ARP.psrc%")), 'hwdst': '{}'.format(packet.sprintf("%ARP.hwdst%")), 'pdst': '{}'.format(packet.sprintf("%ARP.pdst%")) }, 'Padding': { 'load': '{}'.format(packet.sprintf("%Padding.load%")) } } data.append(res) return data
async def test_create_filemetadata_from_starlette_uploadfile( mock_filepath, expected_md5sum): # WARNING: upload is a wrapper around a file handler that can actually be in memory as well # in file with open(mock_filepath, "rb") as file: upload = UploadFile(mock_filepath.name, file) assert upload.file.tell() == 0 file_meta = await File.create_from_uploaded(upload) assert upload.file.tell( ) > 0, "modifies current position is at the end" assert file_meta.checksum == expected_md5sum # in memory # UploadFile constructor: by not passing file, it enforces a tempfile.SpooledTemporaryFile upload_in_memory = UploadFile(mock_filepath.name) assert isinstance(upload_in_memory.file, tempfile.SpooledTemporaryFile) await upload_in_memory.write(FILE_CONTENT.encode()) await upload_in_memory.seek(0) assert upload_in_memory.file.tell() == 0 file_meta = await File.create_from_uploaded(upload_in_memory) assert upload_in_memory.file.tell( ) > 0, "modifies current position is at the end"
async def test_create_from_yaml_flow_start_exception(monkeypatch): monkeypatch.setattr(flow.flow_store, '_create', mock_flow_start_exception) with pytest.raises(flow.HTTPException) as response: await flow._create_from_yaml( yamlspec=UploadFile(filename='abc.yaml'), uses_files=[UploadFile(filename='abcd.yaml')], pymodules_files=[UploadFile(filename='abc.py')]) assert response.value.status_code == 404 assert 'Flow couldn\'t get started' in response.value.detail
async def test_create_from_yaml_success(monkeypatch): monkeypatch.setattr(flow.flow_store, '_create', mock_create_success) response = await flow._create_from_yaml( yamlspec=UploadFile(filename='abc.yaml'), uses_files=[UploadFile(filename='abcd.yaml')], pymodules_files=[UploadFile(filename='abc.py')]) assert response['status_code'] == 200 assert response['flow_id'] == _temp_id assert response['host'] == '0.0.0.0' assert response['port'] == 12345 assert response['status'] == 'started'
def _test_workspace_files(): with tempfile.NamedTemporaryFile() as fp1, tempfile.NamedTemporaryFile() as fp2: fp1.write(b'Hello world1!') fp2.write(b'Hello world2!') fp1.flush() fp2.flush() fp1.seek(0, 0) fp2.seek(0, 0) id = DaemonID('jworkspace') print(fp1.read()) files = [UploadFile(filename='a.txt'), UploadFile(filename='b.txt', file=fp2)] workspace_files(id, files, daemon_logger)
async def emailmessage( sender: str, password: str, subject: str, context: str, port: str = 587, host: str = "smtp.gmail.com", htmlfile: str = 'base/default.j2', file: UploadFile = File(...) ): if await allowed_csv(file): await save_file(file) filename = file.filename with open(filename, 'r') as file: data = file.read() obj = json.loads(data) for key in obj: name = key['firstname'] + ' ' + key['lastname'] email = key['email'] html = await render_template(htmlfile, context, name) await send_email( receiver=email, sender=sender, password=password, subject=subject, body=html, host=host, port=port ) time.sleep(5) os.remove(filename) return {'message': 'sent message success !'}
async def generate_font_from_sample_image(img_id: str, file: UploadFile = File(...)): # get image file.filename = f"{img_id}_sample.jpg" contents = await file.read() # <-- Important! pathlib.Path("buffer").mkdir(parents=True, exist_ok=True) path = pathlib.Path("buffer") / file.filename # example of how you can save the file with open(path, "wb") as f: f.write(contents) # read image as numpy array sample_chars = cv2.imread(path, 0) # gen font all_char_panel = font_generator.generate_font_from_sample_image(sample_chars) output_path = pathlib.Path("buffer") / f"{img_id}_font.jpg" cv2.imwrite(output_path, all_char_panel) # send to save in cloud storage if all_char_panel: # send as a form data (Top task) r = requests.post(f"dummy_url/image/result/{img_id}", files="xxxxx") if r.status_code == 200: return { "status": "success" } else: raise HTTPException(status_code=404, detail="Resource Not found")
def test_flow_store_with_files(tmpdir): config = flow_file_str() file_yml = UploadFile(Path(tmpdir) / 'file1.yml') file_py = UploadFile(Path(tmpdir) / 'file1.py') files = [file_yml, file_py] store = InMemoryFlowStore() with store._session(): flow_id, _, _ = store._create(config=config, files=files) assert Path(file_yml.filename).exists() assert Path(file_py.filename).exists() assert flow_id in store._store.keys() assert isinstance(store._store[flow_id]['flow'], Flow) store._delete(flow_id) assert flow_id not in store._store.keys() assert not Path(file_yml.filename).exists() assert not Path(file_py.filename).exists()
async def upload_file(request: Request, file: UploadFile = File(...)): try: fileSize = request.headers.get('file-size') logger.info('fileSize: ', fileSize, type(fileSize)) file.spool_max_size = int(fileSize) logger.info(file.spool_max_size) logger.info(file.content_type) f = open(upload_tmp_path + file.filename, 'wb') s = await file.read(file.spool_max_size) f.write(s) f.close() await file.close() uploadParams = request.headers.get('upload-params') uploadParamsDict = json.loads(uploadParams) host_ip = uploadParamsDict['hostIp'] username = uploadParamsDict['username'] location = uploadParamsDict['location'] key = host_ip + username ssh_client = client_db[key] local_path = upload_tmp_path + file.filename remote_path = location + '/' + file.filename ssh_client.put(local_path, remote_path) os.remove(local_path) return {"filename": file.filename} except Exception as e: return RetCls.ret(False, str(e), [{}])
def upload_waybillimg(request: Request, waybillNo: str, file: UploadFile = File(...),): referrer = request.headers.get("Referer") print('upload_image()-request.url: %s \t referrer: %s' % (request.url, referrer)) # if 'file' not in file. request.files: if len(file) < 1 : flash('No file part') return RedirectResponse(referrer) # file = request.files['file'] print("file tag", file) if file.filename == '': flash('No image selected for uploading') return RedirectResponse(referrer) default_wbNo = 'waybillNo' # waybillNo = request.form.get('waybillNo', default_wbNo) info = ['NA', 'NA', 'NA', 'NA'] for wb in waybillList: if waybillNo == wb[0]: info = wb if file and allowed_file(file.filename): original_filename = secure_filename(file.filename) file_ext = '.' + (original_filename.rsplit('.', 1)[1].lower().__str__()) print('from referrer %s waybillno %s' % (referrer, waybillNo)) saving_filename = waybillNo + file_ext unique_filename = make_unique(saving_filename) # file.save(os.path.join(UPLOAD_FOLDER, filename)) file.save(os.path.join(UPLOAD_FOLDER, unique_filename)) print(os.path.join(UPLOAD_FOLDER, unique_filename)) # unique_thumbnail = save_thumbnail(os.path.join(UPLOAD_FOLDER, unique_filename)) # print('upload_image filename: ' + filename) flash('Image successfully uploaded and displayed') # if 'waybill_images' in request.url: # # return templates.TemplateResponse('waybillData.html', {'name':waybillNo, 'filename':unique_filename, 'info':info}) # return RedirectResponse(referrer) # else: # return templates.TemplateResponse('upload.html', {'filename':unique_thumbnail}) # # return templates('upload.html', filename=unique_filename) else: flash('Allowed image types are -> png, jpg, jpeg, gif') return RedirectResponse(referrer)
def test_app_no_file(): test_file = UploadFile("test") with TestClient(app) as client: response = client.post( "/detect_ships/", files={"file": (test_file.filename, test_file.file, "")}) assert response.status_code == 400
async def test_upload_document_invalid_content_type(): """ Tests whether method 'upload_document' returns proper response when sending non PDF file """ upload_file = UploadFile(filename='test') with raises(HTTPException) as exp: await upload_document(upload_file) assert exp.status_code == 415
def resource_save_and_validate_training_files(self): config_path = 'tests/testing_data/yml_training_files/config.yml' domain_path = 'tests/testing_data/yml_training_files/domain.yml' nlu_path = 'tests/testing_data/yml_training_files/data/nlu.yml' stories_path = 'tests/testing_data/yml_training_files/data/stories.yml' http_action_path = 'tests/testing_data/yml_training_files/actions.yml' rules_path = 'tests/testing_data/yml_training_files/data/rules.yml' pytest.config = UploadFile(filename="config.yml", file=BytesIO( open(config_path, 'rb').read())) pytest.domain = UploadFile(filename="domain.yml", file=BytesIO( open(domain_path, 'rb').read())) pytest.nlu = UploadFile(filename="nlu.yml", file=BytesIO(open(nlu_path, 'rb').read())) pytest.stories = UploadFile(filename="stories.yml", file=BytesIO( open(stories_path, 'rb').read())) pytest.http_actions = UploadFile(filename="actions.yml", file=BytesIO( open(http_action_path, 'rb').read())) pytest.rules = UploadFile(filename="rules.yml", file=BytesIO(open(rules_path, 'rb').read())) pytest.non_nlu = UploadFile(filename="non_nlu.yml", file=BytesIO( open(rules_path, 'rb').read())) yield "resource_save_and_validate_training_files"
async def compare_faces(face1: UploadFile = File(...), face2: UploadFile = File(...)): # for concurrency # `create_task` is only for Python 3.7+ # `ensure_future` is its alternative get_face1_bytes_task = asyncio.ensure_future(face1.read()) get_face2_bytes_task = asyncio.ensure_future(face2.read()) face1_bytes = await get_face1_bytes_task face2_bytes = await get_face2_bytes_task # convert to cv2 array img_np_arr = np.frombuffer(face1_bytes, np.uint8) f1 = cv2.imdecode(img_np_arr, cv2.IMREAD_COLOR) img_np_arr = np.frombuffer(face2_bytes, np.uint8) f2 = cv2.imdecode(img_np_arr, cv2.IMREAD_COLOR) # compare sim, dist = compare_two_faces(f1, f2) # convert to NORMAL float from `numpy.float32` return {'sim': float(sim), 'dist': float(dist)}
def resource_unzip_and_validate_exception(self): data_path = 'tests/testing_data/yml_training_files/data' tmp_dir = tempfile.gettempdir() zip_file = os.path.join(tmp_dir, 'test') shutil.make_archive(zip_file, 'zip', data_path) pytest.zip = UploadFile(filename="test.zip", file=BytesIO( open(zip_file + '.zip', 'rb').read())) yield "resource_unzip_and_validate_exception" os.remove(zip_file + '.zip')
async def create_upload_file(file: UploadFile = File(...)): # when the user uploads a file, the filename will be stored in memory. # this is the file that global filename filename = file.filename code = file.read() # write code to code_dir async with aiofiles.open(f'{code_dir}/{filename}', 'wb') as upload: while content := await file.read(1024): await upload.write(content)
async def test_upload_document(mock_database_handler, mock_normalize_document): """ Tests whether method 'upload_document' returns proper response when sending proper PDF file """ upload_file = UploadFile(filename='test', content_type='application/pdf', file=open('tests/resources/pages_num_3.pdf', "rb")) response = await upload_document(upload_file) assert len(response) == 1 assert type(response["id"]) == str
def upload_image(file: UploadFile): file_name = str(uuid.uuid4()) + file.filename.split(".")[-1] file = file.file accesskeyid = 'LTAIv816izYZhMkl' accesskey = 'Psfru0eOBJVFUELx9AsmLVeNsPnCh1' endpoint = 'oss-ap-southeast-1.aliyuncs.com' bucket_name = 'cashloan-ly' auth = oss2.Auth(accesskeyid, accesskey) bucket = oss2.Bucket(auth, endpoint=endpoint, bucket_name=bucket_name) bucket.put_object(file_name, file.read()) image_url = '%s://%s.%s/%s' % ('http', bucket_name, endpoint, file_name) return {'image': image_url}
async def test_save_training_files(self): nlu_content = "## intent:greet\n- hey\n- hello".encode() stories_content = "## greet\n* greet\n- utter_offer_help\n- action_restart".encode( ) config_content = "language: en\npipeline:\n- name: WhitespaceTokenizer\n- name: RegexFeaturizer\n- name: LexicalSyntacticFeaturizer\n- name: CountVectorsFeaturizer\n- analyzer: char_wb\n max_ngram: 4\n min_ngram: 1\n name: CountVectorsFeaturizer\n- epochs: 5\n name: DIETClassifier\n- name: EntitySynonymMapper\n- epochs: 5\n name: ResponseSelector\npolicies:\n- name: MemoizationPolicy\n- epochs: 5\n max_history: 5\n name: TEDPolicy\n- name: RulePolicy\n- core_threshold: 0.3\n fallback_action_name: action_small_talk\n name: FallbackPolicy\n nlu_threshold: 0.75\n".encode( ) domain_content = "intents:\n- greet\nresponses:\n utter_offer_help:\n - text: 'how may i help you'\nactions:\n- utter_offer_help\n".encode( ) rules_content = "rules:\n\n- rule: Only say `hello` if the user provided a location\n condition:\n - slot_was_set:\n - location: true\n steps:\n - intent: greet\n - action: utter_greet\n".encode( ) http_action_content = "http_actions:\n- action_name: [email protected]\n auth_token: bearer hjklfsdjsjkfbjsbfjsvhfjksvfjksvfjksvf\n http_url: http://www.alphabet.com\n params_list:\n - key: testParam1\n parameter_type: value\n value: testValue1\n - key: testParam2\n parameter_type: slot\n value: testValue1\n request_method: GET\n response: json\n".encode( ) nlu = UploadFile(filename="nlu.yml", file=BytesIO(nlu_content)) stories = UploadFile(filename="stories.md", file=BytesIO(stories_content)) config = UploadFile(filename="config.yml", file=BytesIO(config_content)) domain = UploadFile(filename="domain.yml", file=BytesIO(domain_content)) rules = UploadFile(filename="rules.yml", file=BytesIO(rules_content)) http_action = UploadFile(filename="actions.yml", file=BytesIO(http_action_content)) training_file_loc = await DataUtility.save_training_files( nlu, domain, config, stories, rules, http_action) assert os.path.exists(training_file_loc['nlu']) assert os.path.exists(training_file_loc['config']) assert os.path.exists(training_file_loc['stories']) assert os.path.exists(training_file_loc['domain']) assert os.path.exists(training_file_loc['rules']) assert os.path.exists(training_file_loc['http_action']) assert os.path.exists(training_file_loc['root'])
async def get_res_predict( input_data: UploadFile = File(...), ) -> Optional[UJSONResponse]: # X = input_data.file # data = io.BytesIO(input_data) if (input_data.content_type == "audio/wav" or input_data.content_type == "audio/x-wav"): ID = str(uuid.uuid4()) sub = message_queue2.pubsub() sub.subscribe(ID) # input_data.seek(0) # input_data = await input_data.read() for mes in sub.listen(): responses = mes.get("data") if isinstance(responses, bytes): res = responses.decode("utf-8") res = ujson.loads(res) break elif responses == 1: input_data.seek(0) input_data = await input_data.read() with io.BytesIO() as mem_temp_file: mem_temp_file.write(input_data) mem_temp_file.seek(0) # AudioSegment.from_file(mem_temp_file).export(f"{time.time()}.wav","wav") # # temp=mem_temp_file # data, samplerate = sf.read(mem_temp_file) # sf.write(f'{time.time()}.wav', data, samplerate) data = make_spec(mem_temp_file) data = base64.b64encode(data).decode("utf-8") Q_DATA = {"id": ID, "audio_feature": data} message_queue2.rpush(settings.QUEUE_NAME_2, ujson.dumps(Q_DATA)) message_queue2.close() sub.close() return res else: return {"Error": "Type Error"}
async def test_create_file(): files = UploadFile(filename='file', file=open('./test_wav.wav', 'rb')) output = await create_file(files) data, rate = librosa.load('audio.wav', sr=None) is_audio_correct = len(data) == 122880 data, rate = librosa.load('noise_reduce.wav') is_noise_reduce_correct = len(data) == 56448 is_audio_files_correct = all([is_audio_correct, is_noise_reduce_correct]) assert output == {'message': 'transcribed done'} and is_audio_files_correct
async def create_dame_meme_video(image: UploadFile = File(...)): contents = await image.read() image.filename = image.filename.replace(' ', '') input_path = os.path.join(config.image_path, image.filename) output_path = os.path.join(config.video_path, str(uuid.uuid4()).replace('-', '') + ".mp4") with open(input_path, "wb") as fp: fp.write(contents) print("input:", input_path) dame.make_damedame(upload_image_path=input_path, output=output_path) return { "url": url.convert_path_to_url(output_path, base_url="/api/v1/content/") }
async def upload_file(file: UploadFile = File(...)): try: file.filename = "{}.jpg".format(time.strftime("%Y%m%d-%H%M%S")) image_destination = await save_upload_file(file) await publish(image_destination) except Exception as ex: return jsonable_encoder({ "image_name": file.filename, "error": "err", }) return jsonable_encoder({ "image_name": file.filename, "error": None, })
async def upload(file: UploadFile = File(...), dm=Form(...), lowmem=Form(...)): lowmem = True if lowmem == 'True' else False filename = file.filename content = await file.read() with open(filename, 'wb') as file: file.write(content) try: set_global_filedetail(filename=filename, dm=dm, lowmem=lowmem) try: os.remove(filename) except: print("Can't delete, file is in current directory...") return { 'filename': filedetail.filename, 'filetype': filedetail.filetype, 'verify': "Validated" } except Exception as e: return {'filename': "Error", 'filetype': "Error", 'verify': str(e)}
async def populate(): from models import User, Content, Node, Group from fastapi import UploadFile warning(f"Setting a fresh development database.") await me.db.drop_collection("users") await me.db.drop_collection("groups") await me.db.drop_collection("nodes") await me.db.drop_collection("contents") await me.db.drop_collection("fs.files") await me.db.drop_collection("fs.chunks") user = await User.insert_one({ "email": "*****@*****.**", "password_hash": get_password_hash("pass"), }) admin = await User.insert_one({ "email": "*****@*****.**", "password_hash": get_password_hash("pass"), "is_admin": True, }) with open("tests/example.pdf", "rb") as f: uf = UploadFile(filename="example.pdf", file=f, content_type="application/pdf") content = await Content.insert_one({ "short": "An example pdf.", "long": "This is an optional description that can be added to any file to help users.", "filetype": "pdf" }) content = await content.upload(uf) node = await Node.insert_one({ "short": "Argument 10", "long": "# Description of argument 10 \n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", "contents": [content] }) group = await Group.insert_one({ "short": "First", "long": "Description of the first group", "members": [user, admin], "nodes": [node] }) user_token = create_access_token( data={"sub": str(user.id)} ) admin_token = create_access_token( data={"sub": str(admin.id)} ) return admin_token, user_token
async def predict_scene(image: UploadFile = File(...)): try: contents = await image.read() image = Image.open(io.BytesIO(contents)) resized_image = image.resize((scene_input_width, scene_input_height), Image.ANTIALIAS) results = classify_image(scene_interpreter, image=resized_image) label_id, prob = results[0] data = {} data["label"] = scene_labels[label_id] data["confidence"] = prob data["success"] = True return data except: e = sys.exc_info()[1] raise HTTPException(status_code=500, detail=str(e))
async def send_internal(subject: str, recipients: List[str], body: str, subtype: Optional[str] = None, file_attachments: Optional[List[str]] = None): if file_attachments is None: file_attachments = [] message = MessageSchema( subject=subject, recipients=[EmailStr(recipient) for recipient in recipients], body=body, subtype=subtype, attachments=[UploadFile(file_attachment) for file_attachment in file_attachments]) return await send_internal_internal(message)
async def predict_face(image: UploadFile = File(...)): try: contents = await image.read() image = Image.open(io.BytesIO(contents)) image_width = image.size[0] image_height = image.size[1] # Format data and send to interpreter resized_image = image.resize((face_input_width, face_input_height), Image.ANTIALIAS) input_data = np.expand_dims(resized_image, axis=0) face_interpreter.set_tensor(face_input_details[0]["index"], input_data) # Process image and get predictions face_interpreter.invoke() boxes = face_interpreter.get_tensor(face_output_details[0]["index"])[0] classes = face_interpreter.get_tensor( face_output_details[1]["index"])[0] scores = face_interpreter.get_tensor( face_output_details[2]["index"])[0] data = {} faces = [] for i in range(len(scores)): if not classes[i] == 0: # Face continue single_face = {} single_face["userid"] = "unknown" single_face["confidence"] = float(scores[i]) single_face["y_min"] = int(float(boxes[i][0]) * image_height) single_face["x_min"] = int(float(boxes[i][1]) * image_width) single_face["y_max"] = int(float(boxes[i][2]) * image_height) single_face["x_max"] = int(float(boxes[i][3]) * image_width) if single_face["confidence"] < MIN_CONFIDENCE: continue faces.append(single_face) data["predictions"] = faces data["success"] = True return data except: e = sys.exc_info()[1] raise HTTPException(status_code=500, detail=str(e))