def start(config_file, url_root="./translator", host="0.0.0.0", port=5000, debug=True, use_host="localhost", use_port=8501, use_path="v1/models/universal_encoder:predict", sentence_separator=""): def prefix_route(route_function, prefix='', mask='{0}{1}'): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute app = Flask(__name__) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) @app.route('/models', methods=['GET']) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route('/clone_model/<int:model_id>', methods=['POST']) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if 'timeout' in data: timeout = data['timeout'] del data['timeout'] opt = data.get('opt', None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out['status'] = STATUS_ERROR out['error'] = str(e) else: out['status'] = STATUS_OK out['model_id'] = model_id out['load_time'] = load_time return jsonify(out) @app.route('/unload_model/<int:model_id>', methods=['GET']) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out['status'] = STATUS_OK except Exception as e: out['status'] = STATUS_ERROR out['error'] = str(e) return jsonify(out) @app.route('/translate', methods=['POST']) def translate(): inputs = request.get_json(force=True) out = {} try: translation, scores, n_best, times = translation_server.run(inputs) # assert len(translation) == len(inputs) # assert len(scores) == len(inputs) if type(inputs[0]['src']) is np.ndarray: for i in range(len(translation)): if type(inputs[i]['src']) is np.ndarray: inputs[i]['src'] = inputs[i]['src'].tolist() out = [[{ "src": inputs[min(len(inputs) - 1, i)]['src'], "tgt": translation[i], "n_best": n_best, "pred_score": scores[i] } for i in range(len(translation))]] except ServerModelError as e: out['error'] = str(e) out['status'] = STATUS_ERROR # print(str(out)) return jsonify(out) @app.route('/to_cpu/<int:model_id>', methods=['GET']) def to_cpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_cpu() out['status'] = STATUS_OK return jsonify(out) @app.route('/to_gpu/<int:model_id>', methods=['GET']) def to_gpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_gpu() out['status'] = STATUS_OK return jsonify(out) app.run(debug=debug, host=host, port=port, use_reloader=False, threaded=True)
from flask import Flask, jsonify, request from flask_cors import CORS, cross_origin from onmt.translate import TranslationServer, ServerModelError STATUS_OK = "ok" STATUS_ERROR = "error" app = Flask(__name__) config_file = "./available_models/conf.json" url_root = "/translator" ip = "0.0.0.0" port = 5000 translation_server = TranslationServer() translation_server.start(config_file) CORS(app) app.config["CORS_HEADERS"] = "Content-Type" @app.route("/models", methods=["GET"]) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route("/health", methods=["GET"]) def health(): out = {} out["status"] = STATUS_OK
def start(config_file, url_root="./translator", host="0.0.0.0", port=5000, debug=True): def prefix_route(route_function, prefix="", mask="{0}{1}"): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute app = Flask(__name__) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) @app.route("/models", methods=["GET"]) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route("/clone_model/<int:model_id>", methods=["POST"]) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if "timeout" in data: timeout = data["timeout"] del data["timeout"] opt = data.get("opt", None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out["status"] = STATUS_ERROR out["error"] = str(e) else: out["status"] = STATUS_OK out["model_id"] = model_id out["load_time"] = load_time return jsonify(out) @app.route("/unload_model/<int:model_id>", methods=["GET"]) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out["status"] = STATUS_OK except Exception as e: out["status"] = STATUS_ERROR out["error"] = str(e) return jsonify(out) @app.route("/translate", methods=["POST"]) def translate(): inputs = request.get_json(force=True) out = {} try: translation, scores, n_best, times = translation_server.run(inputs) assert len(translation) == len(inputs) assert len(scores) == len(inputs) out = [[{ "src": inputs[i]["src"], "tgt": translation[i], "n_best": n_best, "pred_score": scores[i], } for i in range(len(translation))]] except ServerModelError as e: out["error"] = str(e) out["status"] = STATUS_ERROR return jsonify(out) @app.route("/to_cpu/<int:model_id>", methods=["GET"]) def to_cpu(model_id): out = {"model_id": model_id} translation_server.models[model_id].to_cpu() out["status"] = STATUS_OK return jsonify(out) @app.route("/to_gpu/<int:model_id>", methods=["GET"]) def to_gpu(model_id): out = {"model_id": model_id} translation_server.models[model_id].to_gpu() out["status"] = STATUS_OK return jsonify(out) app.run(debug=debug, host=host, port=port, use_reloader=False, threaded=True)
class translationPipeline(object): """ This class represents basic translation pipeline - preprocessing, translation and return the translation. Functionality of methods is self-explainatory. """ def __init__(self,pathToWordTokModule, pathToSentTokModule, config): self.config = config self.tokenizer = basicTokenizer(pathToWordTokModule, pathToSentTokModule) self.translationServer = TranslationServer() self.translationServer.start(self.config) def preprocessingInput(self, inputText): print ('input Sentence ',inputText) paragraphToTokenizedSent = self.tokenizer.sentTokenizer(str(inputText[0]['src'])) print('Sentence tokenization of input: ',paragraphToTokenizedSent) listOfTokenizedSentences = self.tokenizer.wordTokenizer(paragraphToTokenizedSent) print('Word Tokenization of sentence segmented input: ',listOfTokenizedSentences) mtSystemID = int(inputText[0]['id']) listOfTokenizedSentences = self.specialPreprocessingForModels(listOfTokenizedSentences, mtSystemID) inputError = self.tokenizer.checkForErrors(listOfTokenizedSentences) return listOfTokenizedSentences, mtSystemID, inputError def specialPreprocessingForModels(self,listOfTokenizedSentences, modelID): """ There are models which uses slightly different preprocessing steps when dealing with text. These unique preprocessing steps for respective models can be included here. """ if modelID == 100: listOfTokenizedSentences = [sentence.lower() for sentence in listOfTokenizedSentences] else: listOfTokenizedSentences = listOfTokenizedSentences return listOfTokenizedSentences def translate(self,listOfTokenizedSentences, mtSystemID): inputToserver = [{'id':mtSystemID}] outputFromServer={} print ('after every tokenization: ',listOfTokenizedSentences) for sentence in listOfTokenizedSentences: inputToserver[0]['src']=sentence output = {} try: translation, scores, n_best, times = self.translationServer.run(inputToserver) assert len(translation) == len(inputToserver) assert len(scores) == len(inputToserver) output = [{"src": inputToserver[i]['src'], "tgt": translation[i], "n_best": n_best, "pred_score": scores[i]} for i in range(len(translation))] except ServerModelError as e: output['error'] = str(e) output['status'] = STATUS_ERROR if 'src' not in outputFromServer: outputFromServer['src']=[output[0]['src']] outputFromServer['tgt']=[output[0]['tgt']] else: outputFromServer['src'].append(output[0]['src']) outputFromServer['tgt'].append(output[0]['tgt']) outputFromServer['src']="\n".join(outputFromServer['src']) outputFromServer['tgt']="\n".join(outputFromServer['tgt']) print ('Translation Output:', outputFromServer['tgt']) return outputFromServer
def start(config_file, url_root="./translator", host="0.0.0.0", port=5000, debug=True): def prefix_route(route_function, prefix='', mask='{0}{1}'): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute app = Flask(__name__) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) @app.route('/models', methods=['GET']) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route('/health', methods=['GET']) def health(): out = {} out['status'] = STATUS_OK return jsonify(out) @app.route('/clone_model/<int:model_id>', methods=['POST']) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if 'timeout' in data: timeout = data['timeout'] del data['timeout'] opt = data.get('opt', None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out['status'] = STATUS_ERROR out['error'] = str(e) else: out['status'] = STATUS_OK out['model_id'] = model_id out['load_time'] = load_time return jsonify(out) @app.route('/unload_model/<int:model_id>', methods=['GET']) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out['status'] = STATUS_OK except Exception as e: out['status'] = STATUS_ERROR out['error'] = str(e) return jsonify(out) @app.route('/translate', methods=['POST']) def translate(): inputs = request.get_json(force=True) out = {} try: trans, scores, n_best, times = translation_server.run(inputs) assert len(trans) == len(inputs) * n_best assert len(scores) == len(inputs) * n_best out = [[] for _ in range(n_best)] for i in range(len(trans)): response = { "src": inputs[i // n_best]['src'], "tgt": trans[i], "n_best": n_best, "pred_score": scores[i] } out[i % n_best].append(response) except ServerModelError as e: out['error'] = str(e) out['status'] = STATUS_ERROR return jsonify(out) @app.route('/to_cpu/<int:model_id>', methods=['GET']) def to_cpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_cpu() out['status'] = STATUS_OK return jsonify(out) @app.route('/to_gpu/<int:model_id>', methods=['GET']) def to_gpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_gpu() out['status'] = STATUS_OK return jsonify(out) app.run(debug=debug, host=host, port=port, use_reloader=False, threaded=True)
def _load_model(): global translation_server translation_server = TranslationServer() translation_server.start('./available_models/conf.json')
def start(config_file, url_root="./translator", host="0.0.0.0", port=5000, debug=False): def prefix_route(route_function, prefix='', mask='{0}{1}'): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute if debug: logger = logging.getLogger("main") log_format = logging.Formatter( "[%(asctime)s %(levelname)s] %(message)s") file_handler = RotatingFileHandler("debug_requests.log", maxBytes=1000000, backupCount=10) file_handler.setFormatter(log_format) logger.addHandler(file_handler) app = Flask(__name__) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) @app.route('/models', methods=['GET']) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route('/health', methods=['GET']) def health(): out = {} out['status'] = STATUS_OK return jsonify(out) @app.route('/clone_model/<int:model_id>', methods=['POST']) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if 'timeout' in data: timeout = data['timeout'] del data['timeout'] opt = data.get('opt', None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out['status'] = STATUS_ERROR out['error'] = str(e) else: out['status'] = STATUS_OK out['model_id'] = model_id out['load_time'] = load_time return jsonify(out) @app.route('/unload_model/<int:model_id>', methods=['GET']) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out['status'] = STATUS_OK except Exception as e: out['status'] = STATUS_ERROR out['error'] = str(e) return jsonify(out) @app.route('/translate', methods=['POST']) def translate(): inputs = request.get_json(force=True) if debug: logger.info(inputs) out = {} try: trans, scores, n_best, _, aligns = translation_server.run(inputs) assert len(trans) == len(inputs) * n_best assert len(scores) == len(inputs) * n_best assert len(aligns) == len(inputs) * n_best out = [[] for _ in range(n_best)] for i in range(len(trans)): response = { "src": inputs[i // n_best]['src'], "tgt": trans[i], "n_best": n_best, "pred_score": scores[i] } if len(aligns[i]) > 0 and aligns[i][0] is not None: response["align"] = aligns[i] out[i % n_best].append(response) except ServerModelError as e: model_id = inputs[0].get("id") if debug: logger.warning("Unload model #{} " "because of an error".format(model_id)) translation_server.models[model_id].unload() out['error'] = str(e) out['status'] = STATUS_ERROR if debug: logger.info(out) return jsonify(out) @app.route('/to_cpu/<int:model_id>', methods=['GET']) def to_cpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_cpu() out['status'] = STATUS_OK return jsonify(out) @app.route('/to_gpu/<int:model_id>', methods=['GET']) def to_gpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_gpu() out['status'] = STATUS_OK return jsonify(out) serve(app, host=host, port=port)
def start(config_file, url_root="/translator", host="0.0.0.0", port=3003, debug=True): def prefix_route(route_function, prefix='', mask='{0}{1}'): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute app = Flask(__name__) CORS(app) app.config.from_pyfile(mongo_config_dir) db.init_app(app) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) def kafka_function(): logger.info('starting kafka from nmt-server on thread-1') doc_translator(translation_server,[kafka_topic[0]['consumer'],kafka_topic[1]['consumer'],kafka_topic[2]['consumer']]) if bootstrap_server_boolean: t1 = threading.Thread(target=kafka_function) # t1.start() @app.route('/models', methods=['GET']) def get_models(): out = {} try: out['status'] = statusCode["SUCCESS"] out['response_body'] = translation_server.list_models() except: out['status'] = statusCode["SYSTEM_ERR"] logger.info("Unexpected error: %s"% sys.exc_info()[0]) return jsonify(out) @app.route('/clone_model/<int:model_id>', methods=['POST']) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if 'timeout' in data: timeout = data['timeout'] del data['timeout'] opt = data.get('opt', None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out['status'] = STATUS_ERROR out['error'] = str(e) else: out['status'] = STATUS_OK out['model_id'] = model_id out['load_time'] = load_time return jsonify(out) @app.route('/unload_model/<int:model_id>', methods=['GET']) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out['status'] = STATUS_OK except Exception as e: out['status'] = STATUS_ERROR out['error'] = str(e) return jsonify(out) @app.route('/translate-anuvaad', methods=['POST']) def translate(): inputs = request.get_json(force=True) if len(inputs)>0: logger.info("Making translate-anuvaad API call") logger.info(entry_exit_log(LOG_TAGS["input"],inputs)) out = translate_util.translate_func(inputs, translation_server) logger.info("out from translate_func-trans_util done{}".format(out)) logger.info(entry_exit_log(LOG_TAGS["output"],out)) return jsonify(out) else: logger.info("null inputs in request in translate-anuvaad API") return jsonify({'status':statusCode["INVALID_API_REQUEST"]}) @app.route('/to_cpu/<int:model_id>', methods=['GET']) def to_cpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_cpu() out['status'] = STATUS_OK return jsonify(out) @app.route('/to_gpu/<int:model_id>', methods=['GET']) def to_gpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_gpu() out['status'] = STATUS_OK return jsonify(out) app.run(debug=debug, host=host, port=port, use_reloader=False, threaded=True)
def start(config_file, url_root="./translator", host="0.0.0.0", port=5000, debug=True): def prefix_route(route_function, prefix='', mask='{0}{1}'): def newroute(route, *args, **kwargs): return route_function(mask.format(prefix, route), *args, **kwargs) return newroute app = Flask(__name__) app.route = prefix_route(app.route, url_root) translation_server = TranslationServer() translation_server.start(config_file) @app.route('/models', methods=['GET']) def get_models(): out = translation_server.list_models() return jsonify(out) @app.route('/clone_model/<int:model_id>', methods=['POST']) def clone_model(model_id): out = {} data = request.get_json(force=True) timeout = -1 if 'timeout' in data: timeout = data['timeout'] del data['timeout'] opt = data.get('opt', None) try: model_id, load_time = translation_server.clone_model( model_id, opt, timeout) except ServerModelError as e: out['status'] = STATUS_ERROR out['error'] = str(e) else: out['status'] = STATUS_OK out['model_id'] = model_id out['load_time'] = load_time return jsonify(out) @app.route('/unload_model/<int:model_id>', methods=['GET']) def unload_model(model_id): out = {"model_id": model_id} try: translation_server.unload_model(model_id) out['status'] = STATUS_OK except Exception as e: out['status'] = STATUS_ERROR out['error'] = str(e) return jsonify(out) @app.route('/translate', methods=['POST']) def translate(): inputs = request.get_json(force=True) out = {} try: translation, scores, n_best, times = translation_server.run(inputs) assert len(translation) == len(inputs) assert len(scores) == len(inputs) out = [[{"src": inputs[i]['src'], "tgt": translation[i], "n_best": n_best, "pred_score": scores[i]} for i in range(len(translation))]] except ServerModelError as e: out['error'] = str(e) out['status'] = STATUS_ERROR return jsonify(out) @app.route('/to_cpu/<int:model_id>', methods=['GET']) def to_cpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_cpu() out['status'] = STATUS_OK return jsonify(out) @app.route('/to_gpu/<int:model_id>', methods=['GET']) def to_gpu(model_id): out = {'model_id': model_id} translation_server.models[model_id].to_gpu() out['status'] = STATUS_OK return jsonify(out) app.run(debug=debug, host=host, port=port, use_reloader=False, threaded=True)
import logging logging.getLogger("flask_ask").setLevel(logging.DEBUG) STATUS_OK = "ok" STATUS_ERROR = "error" app = Flask(__name__) api = Api(app) parser = reqparse.RequestParser() translation_server = TranslationServer() translation_server.start('conf.json') translation_server.models[0].to_cpu() print(translation_server.list_models()) class Service(Resource): def NLG(self,input): #insert your NLG solution here output="you said "+input inputs = [{"id": 0, "src": input}] try: translation, scores, n_best, times = translation_server.run(inputs) out = [[{"src": inputs[i]['src'], "tgt": translation[i], "n_best": n_best,