def recognize_batch(): data = { "model": request.args.get("lang", "en-GB"), "lm": request.args.get("lm", "default"), "wav": request.data } def generate(response): for result in response: yield json.dumps(result, ensure_ascii=False) try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) response = worker.recognize_batch(data, request.headers) worker.close() return Response(stream_with_context(generate(response))) except MissingHeaderError: return jsonify({ "status": "error", "message": "Missing header Content-Type" }), 400 except NoWorkerAvailableError: return jsonify({ "status": "error", "message": "No worker available" }), 503 except WorkerInternalError: return jsonify({ "status": "error", "message": "Input file corrupted" }), 400 finally: worker.close()
def begin_online_recognition(message): try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) worker.connect_to_worker(message["model"]) session["worker"] = worker except NoWorkerAvailableError: emit('server_error', {"status": "error", "message": "No worker available"})
def begin_online_recognition(message): try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) worker.connect_to_worker(message["model"]) session["worker"] = worker session["connected"] = True except NoWorkerAvailableError: emit('server_error', {"status": "error", "message": "No worker available"}) worker.close()
def recognize_batch(): data = { "model": request.args.get("lang", "en-GB"), "wav": request.data } try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) return jsonify(worker.recognize_batch(data, request.headers)) except MissingHeaderError: return jsonify({"status": "error", "message": "Missing header Content-Type"}), 400 except NoWorkerAvailableError: return jsonify({"status": "error", "message": "No worker available"}), 503
def recognize_batch(): data = {"model": request.args.get("lang", "en-GB"), "wav": request.data} try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) return jsonify(worker.recognize_batch(data, request.headers)) except MissingHeaderError: return jsonify({ "status": "error", "message": "Missing header Content-Type" }), 400 except NoWorkerAvailableError: return jsonify({ "status": "error", "message": "No worker available" }), 503
def recognize_batch(): data = { "model": request.args.get("lang", "en-GB"), "lm": request.args.get("lm", "default"), "wav": request.data } def generate(response): for result in response: yield json.dumps(result) try: worker = create_frontend_worker(os.environ['MASTER_ADDR']) response = worker.recognize_batch(data, request.headers) worker.close() return Response(stream_with_context(generate(response))) except MissingHeaderError: return jsonify({"status": "error", "message": "Missing header Content-Type"}), 400 except NoWorkerAvailableError: return jsonify({"status": "error", "message": "No worker available"}), 503 finally: worker.close()
def test_can_create_frontend_worker(self): worker = create_frontend_worker("ipc:///tmp/worker") self.assertIsInstance(worker, FrontendWorker)