コード例 #1
0
def create_tf_model(model_path, flags):
    """Returns the appropriate Model implementation based on env vars."""
    engine = os.environ.get("prediction_engine", MODEL_SERVER_ENGINE_NAME)
    if engine == MODEL_SERVER_ENGINE_NAME:
        logging.debug("Starting model server from %s", model_path)
        try:
            _, stub = _start_model_server(model_path, flags)
        except Exception as e:  # pylint: disable=broad-except
            logging.critical("Could not load ModelServer.\n%s", str(e))
            raise mlprediction.PredictionError(
                mlprediction.PredictionError.FAILED_TO_LOAD_MODEL, str(e))
        signature_map = _get_model_signature_map(stub)
        if not signature_map:
            raise mlprediction.PredictionError(
                mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
                "Could not get signature map from the model. ")
        client = ModelServerClient(stub, signature_map)
    elif engine == mlprediction.SESSION_RUN_ENGINE_NAME:
        session, signature_map = _get_session_and_signature_map(
            model_path, flags)
        client = mlprediction.SessionClient(session, signature_map)
    else:
        logging.critical("Illegal prediction engine %s", engine)
        raise mlprediction.PredictionError(
            mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
            "Illegal prediction engine %s" % engine)

    return mlprediction.create_model(client, model_path)
コード例 #2
0
def _validate_custom_user_files(package_uris, local_custom_code_path):
    """Validates that the user provided packages were successfully copied locally.

  Args:
    package_uris: The original user provided list of package uris, from the
      CreateVersionRequest.
   local_custom_code_path: The location of the copied packages in the local
     container.

  Raises:
    PredictionError: if any of the user provided packages paths are invalid or
      failed to copy to local container.
  """
    for uri in package_uris:
        # Each package_uri has already been validated to be non-empty and a valid
        # GCS path in the frontend.
        filename = os.path.basename(uri)
        if not filename:
            raise mlprediction.PredictionError(
                mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
                "User-provided package " + uri +
                " is a directory, not a package file.")
        if not os.path.exists(os.path.join(local_custom_code_path, filename)):
            raise mlprediction.PredictionError(
                mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
                "User-provided package " + uri +
                " was not successfully copied.")
コード例 #3
0
    def predict(self,
                inputs,
                stats=None,
                signature_name=(
                    signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY),
                **unused_kwargs):
        """Predicts over the input_list."""
        stats = stats or mlprediction.Stats()
        stats[mlprediction.ENGINE] = MODEL_SERVER_ENGINE_NAME
        stats[mlprediction.FRAMEWORK] = TENSORFLOW_FRAMEWORK_NAME

        with stats.time(MODEL_SERVER_CREATE_REQUEST_TIME):
            try:
                request = create_predict_request(
                    inputs, self._signature_map[signature_name].inputs,
                    signature_name)
            except Exception as e:  # pylint: disable=broad-except
                raise mlprediction.PredictionError(
                    mlprediction.PredictionError.INVALID_INPUTS,
                    "Error processing input: " + str(e))

        # TODO(b/33849399): Measure the actual session.run() time.
        with stats.time(mlprediction.SESSION_RUN_TIME):
            try:
                response = self._stub.Predict(request, PREDICT_DEADLINE_SECS)
            except Exception as e:  # pylint: disable=broad-except
                raise mlprediction.PredictionError(
                    mlprediction.PredictionError.FAILED_TO_RUN_MODEL,
                    "Error during model execution: " + e.message)
        with stats.time(MODEL_SERVER_PARSE_RESPONSE_TIME):
            return {
                name: tensor_util.MakeNdarray(val)
                for name, val in response.outputs.items()
            }
コード例 #4
0
def pip_install(package_path, target_path):
    """Pip installs the python package from a specified location.

  Args:
    package_path: a string of the package or tar.gz path
    target_path: a string of the target path where the package should be
      installed. This is necessary since write access is limited in the
      containers launched on GAE. The target_path should be a path where write
      access is allowed.

  Raises:
    PredictionError: if pip is unable to install the package provided by
        package_path.
  """
    try:
        subprocess.check_call([
            "python", "-m", "pip", "install", "--upgrade", "--force-reinstall",
            "--target=" + target_path, "--no-cache-dir", "--no-deps",
            package_path
        ])
    except subprocess.CalledProcessError as e:
        # TODO(b/62225324) report a stackdriver link with the pip stderr output
        raise mlprediction.PredictionError(
            mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
            "User-provided package " + os.path.basename(package_path) +
            " failed to install: " + str(e))
コード例 #5
0
def choose_create_model_fn(framework):
    if framework:
        framework = framework.lower()
    else:
        framework = mlprediction.TENSORFLOW_FRAMEWORK_NAME
    if framework not in _FRAMEWORK_TO_MODEL_FN_MAP:
        raise mlprediction.PredictionError(
            mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
            "Could not load model. Unknown framework provided: %s" % framework)
    return _FRAMEWORK_TO_MODEL_FN_MAP[framework]
コード例 #6
0
def prepare_model(model_path, flags, create_model_fn):
    """Prepare the model, including downloading and starting the model server.

  Args:
    model_path: The model path passed to main server. It can be a gcs location
      or a local path.
    flags: The command line flag (flags.FLAGS) passed from server main.
    create_model_fn: Function which takes in a model_path and flags and returns
      a corresponding model instantiation.

  Returns:
    model: The model when successfully loaded.

  Raises:
    ValueError: When no project is specified.
    Exception: Those exceptions raised by create_model_fn.

  """
    # We need to have a directory with 0001 or a number. So we just simply copy
    # it to the temporary model dir.
    # Once we start using the standard exporter, we wouldn't need this.
    temp_model_path = os.path.join(flags.temp_model_path, "0001")
    if model_path.startswith("gs://"):
        # Append a "/" to the end of the model path if it doesn't already end in
        # "/". This ensures that _copy_model will copy the model files directly
        # into the temp_model_path without the source parent directory.
        if not model_path.endswith("/"):
            model_path = os.path.join(model_path, "")
        _copy(model_path, temp_model_path, flags.path_to_gsutil)
        model_path = temp_model_path
    elif not model_path.startswith("@"):
        shutil.copytree(model_path, temp_model_path)
        model_path = temp_model_path

    try:
        model = create_model_fn(model_path, flags)
    except mlprediction.PredictionError as e:
        logging.error("Error when loading the model: " + str(e))
        raise
    except Exception as e:  # pylint: disable=broad-except
        logging.error("Unexpected error when loading the model: " + str(e))
        raise mlprediction.PredictionError(
            mlprediction.PredictionError.FAILED_TO_LOAD_MODEL,
            "Unexpected error when loading the model: " + str(e))
    else:
        # To reduce our memory footprint, remove the model. This is advantageous
        # in GAE because, otherwise, it counts against our memory usage.
        if os.environ.get("delete_model", "") == "True":
            logging.info("Model deleted from %s", model_path)
            shutil.rmtree(model_path)

        return model
コード例 #7
0
def create_predict_request(batched_instances, inputs_type_map, signature_name):
    """Returns the PredictRequest proto for ModelServer."""
    predict_request = predict_pb2.PredictRequest()
    predict_request.model_spec.name = "default"
    predict_request.model_spec.signature_name = signature_name
    for alias, val in batched_instances.iteritems():
        if alias not in inputs_type_map:
            raise mlprediction.PredictionError(
                mlprediction.PredictionError.INVALID_INPUTS,
                "Unexpected tensor name: " + alias)
        predict_request.inputs[alias].CopyFrom(
            tensor_util.make_tensor_proto(val, inputs_type_map[alias].dtype))
    return predict_request
コード例 #8
0
    def testInferenceHandlerError(self, model):
        # Arrange.
        # Model will raise an exception
        model.predict.side_effect = mlprediction.PredictionError(
            3, "exception message")

        # Setup the app.
        config = create_app_config(model=model, model_size=123)
        inference_app = webapp2.WSGIApplication(
            [("/", server_lib._InferenceHandler)], debug=True, config=config)
        test_app = webtest.app.TestApp(app=inference_app)

        # Act.
        instances = [{"x": 30, "y": 40}, {"x": 10, "y": 50}]
        body = {"instances": instances}
        response = test_app.post(url="/",
                                 params=json.dumps(body),
                                 content_type="application/json")

        # Assert.
        self.assertEqual(
            response.body,
            json.dumps({"error": "Prediction failed: exception message"}))