def start(args): api = None try: ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx if api.get("onnx") is None: raise CortexException(api["name"], "onnx key not configured") _, prefix = ctx.storage.deconstruct_s3_path(api["onnx"]["model"]) model_path = os.path.join(args.model_dir, os.path.basename(prefix)) if api["onnx"].get("request_handler") is not None: local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir) request_handler = local_cache.get("request_handler") if request_handler is not None and util.has_function( request_handler, "pre_inference"): cx_logger().info( "using pre_inference request handler provided in {}".format( api["onnx"]["request_handler"])) else: cx_logger().info("pre_inference request handler not found") if request_handler is not None and util.has_function( request_handler, "post_inference"): cx_logger().info( "using post_inference request handler provided in {}".format( api["onnx"]["request_handler"])) else: cx_logger().info("post_inference request handler not found") sess = rt.InferenceSession(model_path) local_cache["sess"] = sess local_cache["input_metadata"] = sess.get_inputs() cx_logger().info("input_metadata: {}".format( truncate(extract_signature(local_cache["input_metadata"])))) local_cache["output_metadata"] = sess.get_outputs() cx_logger().info("output_metadata: {}".format( truncate(extract_signature(local_cache["output_metadata"])))) except Exception as e: cx_logger().exception("failed to start api") sys.exit(1) if api.get("tracker") is not None and api["tracker"].get( "model_type") == "classification": try: local_cache["class_set"] = api_utils.get_classes(ctx, api["name"]) except Exception as e: cx_logger().warn( "an error occurred while attempting to load classes", exc_info=True) cx_logger().info("API is ready") serve(app, listen="*:{}".format(args.port))
def start(args): api = None try: ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx if api.get("request_handler_impl_key") is not None: package.install_packages(ctx.python_packages, ctx.storage) local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"]) model_cache_path = os.path.join(args.model_dir, args.api) if not os.path.exists(model_cache_path): ctx.storage.download_file_external(api["model"], model_cache_path) sess = rt.InferenceSession(model_cache_path) local_cache["sess"] = sess local_cache["input_metadata"] = sess.get_inputs() local_cache["output_metadata"] = sess.get_outputs() except CortexException as e: e.wrap("error") logger.error(str(e)) if api is not None: logger.exception( "An error occured starting the api, see `cx logs -v api {}` for more details" .format(api["name"])) sys.exit(1) serve(app, listen="*:{}".format(args.port))
def start(args): api = None try: ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx if api.get("request_handler") is not None: local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir) except Exception as e: logger.exception("failed to start api") sys.exit(1) try: validate_model_dir(args.model_dir) except Exception as e: logger.exception("failed to validate model") sys.exit(1) if api.get("tracker") is not None and api["tracker"].get( "model_type") == "classification": try: local_cache["class_set"] = api_utils.get_classes(ctx, api["name"]) except Exception as e: logger.warn("an error occurred while attempting to load classes", exc_info=True) channel = grpc.insecure_channel("localhost:" + str(args.tf_serve_port)) local_cache["stub"] = prediction_service_pb2_grpc.PredictionServiceStub( channel) # wait a bit for tf serving to start before querying metadata limit = 60 for i in range(limit): try: local_cache["metadata"] = run_get_model_metadata() break except Exception as e: if i > 6: logger.warn( "unable to read model metadata - model is still loading. Retrying..." ) if i == limit - 1: logger.exception("retry limit exceeded") sys.exit(1) time.sleep(5) logger.info("model_signature: {}".format( extract_signature( local_cache["metadata"]["signatureDef"], local_cache["api"]["tf_serving"]["signature_key"], ))) serve(app, listen="*:{}".format(args.port))
def start(args): api = None try: ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx _, prefix = ctx.storage.deconstruct_s3_path(api["model"]) model_path = os.path.join(args.model_dir, os.path.basename(prefix)) if api.get("request_handler") is not None: local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir) sess = rt.InferenceSession(model_path) local_cache["sess"] = sess local_cache["input_metadata"] = sess.get_inputs() logger.info("input_metadata: {}".format( truncate(extract_signature(local_cache["input_metadata"])))) local_cache["output_metadata"] = sess.get_outputs() logger.info("output_metadata: {}".format( truncate(extract_signature(local_cache["output_metadata"])))) except Exception as e: logger.exception("failed to start api") sys.exit(1) if api.get("tracker") is not None and api["tracker"].get( "model_type") == "classification": try: local_cache["class_set"] = api_utils.get_classes(ctx, api["name"]) except Exception as e: logger.warn("an error occurred while attempting to load classes", exc_info=True) serve(app, listen="*:{}".format(args.port))
def start(args): ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx if api.get("request_handler_impl_key") is not None: package.install_packages(ctx.python_packages, ctx.storage) local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"]) model_cache_path = os.path.join(args.model_dir, args.api) if not os.path.exists(model_cache_path): ctx.storage.download_file_external(api["model"], model_cache_path) sess = rt.InferenceSession(model_cache_path) local_cache["sess"] = sess local_cache["input_metadata"] = sess.get_inputs() local_cache["output_metadata"] = sess.get_outputs() logger.info("Serving model: {}".format( util.remove_resource_ref(api["model"]))) serve(app, listen="*:{}".format(args.port))
def start(args): api = None try: ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx if api.get("tensorflow") is None: raise CortexException(api["name"], "tensorflow key not configured") if api["tensorflow"].get("request_handler") is not None: cx_logger().info("loading the request handler from {}".format( api["tensorflow"]["request_handler"])) local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir) request_handler = local_cache.get("request_handler") if request_handler is not None and util.has_function( request_handler, "pre_inference"): cx_logger().info( "using pre_inference request handler defined in {}".format( api["tensorflow"]["request_handler"])) else: cx_logger().info("pre_inference request handler not defined") if request_handler is not None and util.has_function( request_handler, "post_inference"): cx_logger().info( "using post_inference request handler defined in {}".format( api["tensorflow"]["request_handler"])) else: cx_logger().info("post_inference request handler not defined") except Exception as e: cx_logger().exception("failed to start api") sys.exit(1) try: validate_model_dir(args.model_dir) except Exception as e: cx_logger().exception("failed to validate model") sys.exit(1) if api.get("tracker") is not None and api["tracker"].get( "model_type") == "classification": try: local_cache["class_set"] = api_utils.get_classes(ctx, api["name"]) except Exception as e: cx_logger().warn( "an error occurred while attempting to load classes", exc_info=True) channel = grpc.insecure_channel("localhost:" + str(args.tf_serve_port)) local_cache["stub"] = prediction_service_pb2_grpc.PredictionServiceStub( channel) # wait a bit for tf serving to start before querying metadata limit = 60 for i in range(limit): try: local_cache["model_metadata"] = run_get_model_metadata() break except Exception as e: if i > 6: cx_logger().warn( "unable to read model metadata - model is still loading. Retrying..." ) if i == limit - 1: cx_logger().exception("retry limit exceeded") sys.exit(1) time.sleep(5) signature_key, parsed_signature = extract_signature( local_cache["model_metadata"]["signatureDef"], api["tensorflow"]["signature_key"]) local_cache["signature_key"] = signature_key local_cache["parsed_signature"] = parsed_signature cx_logger().info("model_signature: {}".format( local_cache["parsed_signature"])) cx_logger().info("{} API is live".format(api["name"])) serve(app, listen="*:{}".format(args.port))
def start(args): ctx = Context(s3_path=args.context, cache_dir=args.cache_dir, workload_id=args.workload_id) api = ctx.apis_id_map[args.api] local_cache["api"] = api local_cache["ctx"] = ctx try: if api.get("request_handler_impl_key") is not None: local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"]) if not util.is_resource_ref(api["model"]): if api.get("request_handler") is not None: package.install_packages(ctx.python_packages, ctx.storage) if not os.path.isdir(args.model_dir): ctx.storage.download_and_unzip_external( api["model"], args.model_dir) else: package.install_packages(ctx.python_packages, ctx.storage) model_name = util.get_resource_ref(api["model"]) model = ctx.models[model_name] estimator = ctx.estimators[model["estimator"]] local_cache["model"] = model local_cache["estimator"] = estimator local_cache["target_col"] = ctx.columns[util.get_resource_ref( model["target_column"])] local_cache["target_col_type"] = ctx.get_inferred_column_type( util.get_resource_ref(model["target_column"])) log_level = "DEBUG" if ctx.environment is not None and ctx.environment.get( "log_level") is not None: log_level = ctx.environment["log_level"].get( "tensorflow", "DEBUG") tf_lib.set_logging_verbosity(log_level) if not os.path.isdir(args.model_dir): ctx.storage.download_and_unzip(model["key"], args.model_dir) for column_name in ctx.extract_column_names( [model["input"], model["target_column"]]): if ctx.is_transformed_column(column_name): trans_impl, _ = ctx.get_transformer_impl(column_name) local_cache["trans_impls"][column_name] = trans_impl transformed_column = ctx.transformed_columns[column_name] # cache aggregate values for resource_name in util.extract_resource_refs( transformed_column["input"]): if resource_name in ctx.aggregates: ctx.get_obj(ctx.aggregates[resource_name]["key"]) local_cache["required_inputs"] = tf_lib.get_base_input_columns( model["name"], ctx) if util.is_dict(model["input"]) and model["input"].get( "target_vocab") is not None: local_cache["target_vocab_populated"] = ctx.populate_values( model["input"]["target_vocab"], None, False) except CortexException as e: e.wrap("error") logger.error(str(e)) logger.exception( "An error occurred, see `cortex logs -v api {}` for more details.". format(api["name"])) sys.exit(1) except Exception as e: logger.exception( "An error occurred, see `cortex logs -v api {}` for more details.". format(api["name"])) sys.exit(1) try: validate_model_dir(args.model_dir) except Exception as e: logger.exception(e) sys.exit(1) channel = grpc.insecure_channel("localhost:" + str(args.tf_serve_port)) local_cache["stub"] = prediction_service_pb2_grpc.PredictionServiceStub( channel) # wait a bit for tf serving to start before querying metadata limit = 300 for i in range(limit): try: local_cache["metadata"] = run_get_model_metadata() break except Exception as e: if i == limit - 1: logger.exception( "An error occurred, see `cortex logs -v api {}` for more details." .format(api["name"])) sys.exit(1) time.sleep(1) serve(app, listen="*:{}".format(args.port))