def gen_example_client(model_name):
            client_type = request.args.get("language",
                                           default="bash",
                                           type=str)
            inference_service = self.manager.model_name_service_map[model_name]
            example_client_string = gen_client.gen_tensorflow_client(
                inference_service, client_type, model_name)

            return example_client_string
    elif args.model_platform == "mxnet":
        inference_service = MxnetInferenceService(args.model_name,
                                                  args.model_base_path,
                                                  args.verbose)
    elif args.model_platform == "onnx":
        inference_service = OnnxInferenceService(args.model_name,
                                                 args.model_base_path,
                                                 args.verbose)

    model_name_service_map[args.model_name] = inference_service

# Generate client code and exit or not
if args.gen_client != "":
    if args.model_platform == "tensorflow":
        inference_service = model_name_service_map[args.model_name]
        gen_client.gen_tensorflow_client(inference_service, args.gen_client,
                                         args.model_name)

    exit(0)

# Start thread to periodically reload models or not
if args.reload_models == "True" or args.reload_models == "true":
    for model_name, inference_service in model_name_service_map.items():
        if inference_service.platform == "tensorflow":
            inference_service.dynmaically_reload_models()


# The API to render the dashboard page
@application.route("/", methods=["GET"])
@requires_auth
def index():
    return render_template("index.html",
 def gen_example_json(model_name):
     inference_service = self.manager.model_name_service_map[model_name]
     data_json_dict = gen_client.gen_tensorflow_client(
         inference_service, "json", model_name)
     return json.dumps(data_json_dict)
Beispiel #4
0
        return f(*decorator_args, **decorator_kwargs)

    return decorated


# Initialize flask application
application = Flask(__name__, template_folder='templates')

# Initialize TensorFlow inference service to load models
inferenceService = TensorFlowInferenceService(args.model_base_path,
                                              args.custom_op_paths,
                                              args.verbose)

# Generate client code and exit or not
if args.gen_client != "":
    gen_client.gen_tensorflow_client(inferenceService, args.gen_client)
    exit(0)

# Start thread to periodically reload models or not
if args.reload_models == True:
    inferenceService.dynmaically_reload_models()


# The API to render the dashboard page
@application.route("/", methods=["GET"])
@requires_auth
def index():
    return render_template(
        "index.html",
        model_versions=inferenceService.version_session_map.keys(),
        model_graph_signature=str(inferenceService.model_graph_signature))
    inference_service = MxnetInferenceService(
        args.model_name, args.model_base_path, args.verbose)
  elif args.model_platform == "h2o":
    inference_service = H2oInferenceService(args.model_name,
                                            args.model_base_path, args.verbose)
  elif args.model_platform == "onnx":
    inference_service = OnnxInferenceService(
        args.model_name, args.model_base_path, args.verbose)

  model_name_service_map[args.model_name] = inference_service

# Generate client code and exit or not
if args.gen_client != "":
  if args.model_platform == "tensorflow":
    inference_service = model_name_service_map[args.model_name]
    gen_client.gen_tensorflow_client(inference_service, args.gen_client,
                                     args.model_name)

  sys.exit(0)

# Start thread to periodically reload models or not
if args.reload_models == "True" or args.reload_models == "true":
  for model_name, inference_service in model_name_service_map.items():
    if inference_service.platform == "tensorflow":
      inference_service.dynmaically_reload_models()


# The API to render the dashboard page
@application.route("/", methods=["GET"])
@requires_auth
def index():
  return render_template(