Ejemplo n.º 1
0
def __get_tf_server_connection_params__():
    '''
    Returns connection parameters to TensorFlow Server

    :return: Tuple of TF server name and server port
    '''
    server_name = utils.get_env_var_setting('TF_SERVER_NAME', settings.DEFAULT_TF_SERVER_NAME)
    server_port = utils.get_env_var_setting('TF_SERVER_PORT', settings.DEFAULT_TF_SERVER_PORT)

    return server_name, server_port
def __get_tf_server_connection_params__():
    '''
    Returns connection parameters to TensorFlow Server

    :return: Tuple of TF server name and server port
    '''
    server_name = utils.get_env_var_setting('TF_SERVER_NAME', settings.DEFAULT_TF_SERVER_NAME)
    server_port = utils.get_env_var_setting('TF_SERVER_PORT', settings.DEFAULT_TF_SERVER_PORT)

    return server_name, server_port
Ejemplo n.º 3
0
def __get_flask_server_params__():
    '''
    Returns connection parameters of the Flask application

    :return: Tripple of server name, server port and debug settings
    '''
    server_name = utils.get_env_var_setting('FLASK_SERVER_NAME', settings.DEFAULT_FLASK_SERVER_NAME)
    server_port = utils.get_env_var_setting('FLASK_SERVER_PORT', settings.DEFAULT_FLASK_SERVER_PORT)

    flask_debug = utils.get_env_var_setting('FLASK_DEBUG', settings.DEFAULT_FLASK_DEBUG)
    flask_debug = True if flask_debug == '1' else False

    return server_name, server_port, flask_debug
Ejemplo n.º 4
0
def __get_flask_server_params__():
    '''
    Returns connection parameters of the Flask application

    :return: Tripple of server name, server port and debug settings
    '''
    server_name = utils.get_env_var_setting('FLASK_SERVER_NAME', settings.DEFAULT_FLASK_SERVER_NAME)
    server_port = int(utils.get_env_var_setting('FLASK_SERVER_PORT', settings.DEFAULT_FLASK_SERVER_PORT))
    flask_debug = utils.get_env_var_setting('FLASK_DEBUG', settings.DEFAULT_FLASK_DEBUG)

    flask_debug = True if flask_debug == '1' else False

    return server_name, server_port, flask_debug
Ejemplo n.º 5
0
def __setup_tokenizer():
    # ende/1539080952/assets.extra/wmtende.model
    # tokenizer = pyonmttok.Tokenizer("none", sp_model_path=args.sentencepiece_model)
    SP_MODEL = utils.get_env_var_setting(
        'ENDE_MODEL_SENTENCE_PIECE',
        settings.DEFAULT_ENDE_MODEL_SENTENCE_PIECE)
    log.debug('setup tokenizer:' + SP_MODEL)
    tokenizer = pyonmttok.Tokenizer("none", sp_model_path=SP_MODEL)
    return tokenizer
Ejemplo n.º 6
0
def __create_prediction_request__(tokenizer, input_data):
    '''
    Creates prediction request to TensorFlow server for ENDE model

    :param: Byte array, input_data for prediction
    :return: PredictRequest object
    '''
    # create predict request
    request = predict_pb2.PredictRequest()

    log.debug(
        'create prediction request:  tokenize, length and tokens MADNESS')

    # Tensorflow magic
    MODEL_NAME = utils.get_env_var_setting('ENDE_MODEL_NAME',
                                           settings.DEFAULT_ENDE_MODEL_NAME)
    log.debug('using model:' + MODEL_NAME)
    request.model_spec.name = MODEL_NAME

    # TODO:  using signature_def as signature_name  - is that correct?  IDK
    # hint: python yada/lib/python3.5/site-packages/tensorflow/python/tools/saved_model_cli.py show --dir yada/ende/1539080952/ --all
    SIGNATURE_NAME = utils.get_env_var_setting(
        'ENDE_MODEL_SIGNATURE_NAME',
        settings.DEFAULT_ENDE_MODEL_SIGNATURE_NAME)
    log.debug('using signature:' + SIGNATURE_NAME)
    request.model_spec.signature_name = SIGNATURE_NAME

    log.debug('building tokens')
    input_tokens = [tokenizer.tokenize(text)[0] for text in input_data]
    log.debug(type(input_tokens))
    log.debug(input_tokens)

    batch_tokens, lengths, max_length = pad_batch(input_tokens)
    batch_size = len(lengths)
    request.inputs['tokens'].CopyFrom(
        tf.contrib.util.make_tensor_proto(batch_tokens,
                                          shape=(batch_size, max_length)))
    log.debug('building length')
    request.inputs['length'].CopyFrom(
        tf.contrib.util.make_tensor_proto(lengths, shape=(batch_size, )))
    log.debug('throw request to the grpc - here is the request:')
    log.debug(request)

    return request