Exemple #1
0
    def initialize_client(self, tf_serving_host=None, tf_serving_port=None):
        signature_message = None
        if self.type == "onnx":
            from cortex.lib.client.onnx import ONNXClient

            client = ONNXClient(self.models)
            if self.models[0].name == consts.SINGLE_MODEL_NAME:
                signature_message = "ONNX model signature: {}".format(
                    client.input_signatures[consts.SINGLE_MODEL_NAME]
                )
            else:
                signature_message = "ONNX model signatures: {}".format(client.input_signatures)
            cx_logger().info(signature_message)
            return client
        elif self.type == "tensorflow":
            from cortex.lib.client.tensorflow import TensorFlowClient

            for model in self.models:
                validate_model_dir(model.base_path)

            tf_serving_address = tf_serving_host + ":" + tf_serving_port
            client = TensorFlowClient(tf_serving_address, self.models)
            if self.models[0].name == consts.SINGLE_MODEL_NAME:
                signature_message = "TensorFlow model signature: {}".format(
                    client.input_signatures[consts.SINGLE_MODEL_NAME]
                )
            else:
                signature_message = "TensorFlow model signatures: {}".format(
                    client.input_signatures
                )
            cx_logger().info(signature_message)
            return client

        return None
Exemple #2
0
    def initialize_client(
        self,
        tf_serving_host: Optional[str] = None,
        tf_serving_port: Optional[str] = None
    ) -> Union[PythonClient, TensorFlowClient, ONNXClient]:
        """
        Initialize client that gives access to models specified in the API spec (cortex.yaml).
        Only applies when models are provided in the API spec.

        Args:
            tf_serving_host: Host of TF serving server. To be only used when the TensorFlow predictor is used.
            tf_serving_port: Port of TF serving server. To be only used when the TensorFlow predictor is used.

        Return:
            The client for the respective predictor type.
        """

        signature_message = None
        client = None

        if _are_models_specified(self.api_spec):
            if self.type == PythonPredictorType:
                client = PythonClient(self.api_spec, self.models,
                                      self.model_dir, self.models_tree)

            if self.type in [
                    TensorFlowPredictorType, TensorFlowNeuronPredictorType
            ]:
                tf_serving_address = tf_serving_host + ":" + tf_serving_port
                client = TensorFlowClient(
                    tf_serving_address,
                    self.api_spec,
                    self.models,
                    self.model_dir,
                    self.models_tree,
                )
                if not self.caching_enabled:
                    cron = TFSAPIServingThreadUpdater(interval=5.0,
                                                      client=client._client)
                    cron.start()

            if self.type == ONNXPredictorType:
                client = ONNXClient(self.api_spec, self.models, self.model_dir,
                                    self.models_tree)

        return client
Exemple #3
0
    def initialize_client(self, args):
        if self.type == "onnx":
            from cortex.lib.client.onnx import ONNXClient

            _, prefix = self.storage.deconstruct_s3_path(self.model)
            model_path = os.path.join(args.model_dir, os.path.basename(prefix))
            client = ONNXClient(model_path)
            cx_logger().info("ONNX model signature: {}".format(client.input_signature))
            return client
        elif self.type == "tensorflow":
            from cortex.lib.client.tensorflow import TensorFlowClient

            validate_model_dir(args.model_dir)
            client = TensorFlowClient("localhost:" + str(args.tf_serve_port), self.signature_key)
            cx_logger().info("TensorFlow model signature: {}".format(client.input_signature))
            return client

        return None
Exemple #4
0
    def initialize_client(self,
                          model_dir=None,
                          tf_serving_host=None,
                          tf_serving_port=None):
        if self.type == "onnx":
            from cortex.lib.client.onnx import ONNXClient

            model_path = os.path.join(model_dir, os.path.basename(self.model))
            client = ONNXClient(model_path)
            cx_logger().info("ONNX model signature: {}".format(
                client.input_signature))
            return client
        elif self.type == "tensorflow":
            from cortex.lib.client.tensorflow import TensorFlowClient

            tf_serving_address = tf_serving_host + ":" + tf_serving_port
            validate_model_dir(model_dir)
            client = TensorFlowClient(tf_serving_address, self.signature_key)
            cx_logger().info("TensorFlow model signature: {}".format(
                client.input_signature))
            return client

        return None