예제 #1
0
def client_from_config(config_path: str) -> cx.Client:
    with open(config_path) as f:
        config = yaml.safe_load(f)

    cluster_name = config["cluster_name"]

    return cx.client(f"{cluster_name}")
예제 #2
0
def main():
    # parse args
    if len(sys.argv) != 3:
        print("usage: python submit.py <cortex-env> <dest-s3-dir>")
        sys.exit(1)
    env_name = sys.argv[1]
    dest_s3_dir = sys.argv[2]

    # read sample file
    with open("sample.json") as f:
        sample_items: List[str] = json.load(f)

    # get batch endpoint
    cx = cortex.client(env_name)
    batch_endpoint = cx.get_api("image-classifier-alexnet")["endpoint"]

    # submit job
    job_spec = {
        "workers": 1,
        "item_list": {
            "items": sample_items,
            "batch_size": 1
        },
        "config": {
            "dest_s3_dir": dest_s3_dir
        },
    }
    response = requests.post(batch_endpoint, json=job_spec)
    print(json.dumps(response.json(), indent=2))
예제 #3
0
 def __init__(self,
              db_connection_pool: ThreadedConnectionPool,
              gc_interval_sec=30 * 60,
              cortex_env="aws"):
     self.db_connection_pool = db_connection_pool
     self._init_garbage_api_collector(gc_interval_sec)
     self.cortex_env = cortex_env
     self.cortex_vanilla = cortex.client(self.cortex_env)
     logger.info(f'Constructing CortexClient for {CORTEX_PATH}.')
예제 #4
0
파일: conftest.py 프로젝트: gtrevg/cortex
def client(config: Dict):
    env_name = config["gcp"]["env"]
    if env_name:
        return cx.client(env_name)

    config_path = config["gcp"]["config"]
    if config_path is not None:
        return client_from_config(config_path)

    pytest.skip("--gcp-env or --gcp-config must be passed to run gcp tests")
예제 #5
0
파일: conftest.py 프로젝트: gtrevg/cortex
def client(config):
    env_name = config["aws"]["env"]
    if env_name:
        return cx.client(env_name)

    config_path = config["aws"]["config"]
    if config_path is not None:
        return client_from_config(config_path)

    pytest.skip("--aws-env or --aws-config must be passed to run aws tests")
예제 #6
0
def main():
    # parse args
    if len(sys.argv) != 3:
        print("usage: python submit.py <cortex-env> <dest-s3-dir>")
        sys.exit(1)
    env_name = sys.argv[1]
    dest_s3_dir = sys.argv[2]

    # get task endpoint
    cx = cortex.client(env_name)
    task_endpoint = cx.get_api("iris-classifier-trainer")["endpoint"]

    # submit job
    job_spec = {"config": {"dest_s3_dir": dest_s3_dir}}
    response = requests.post(task_endpoint, json=job_spec)
    print(json.dumps(response.json(), indent=2))
예제 #7
0
import cortex
import os
import sys
import requests

dir_path = os.path.dirname(os.path.realpath(__file__))

cx = cortex.client()

api_spec = {
    "name": "text-generator",
    "kind": "RealtimeAPI",
}


class PythonPredictor:
    def __init__(self, config):
        from transformers import pipeline

        self.model = pipeline(task="text-generation")

    def predict(self, payload):
        return self.model(payload["text"])[0]


api = cx.create_api(
    api_spec,
    predictor=PythonPredictor,
    requirements=["torch", "transformers"],
    wait=True,
)
예제 #8
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# USAGE: python ./dev/deploy_test.py <env_name>
# e.g.: python ./dev/deploy_test.py aws

import os
import cortex
import sys
import requests

cx = cortex.client(sys.argv[1])
api_config = {
    "name": "text-generator",
    "kind": "RealtimeAPI",
}


class PythonPredictor:
    def __init__(self, config):
        from transformers import pipeline

        self.model = pipeline(task="text-generation")

    def predict(self, payload):
        return self.model(payload["text"])[0]
예제 #9
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]):
        """Overrides the tfx_pusher_executor.

        Args:
          input_dict: Input dict from input key to a list of artifacts,
          including:
            - model_export: exported model from trainer.
            - model_blessing: model blessing path from evaluator.
          output_dict: Output dict from key to a list of artifacts, including:
            - model_push: A list of 'ModelPushPath' artifact of size one. It
            will
              include the model in this push execution if the model was pushed.
          exec_properties: Mostly a passthrough input dict for
            tfx.components.Pusher.executor.custom_config
        Raises:
          ValueError: if custom config not present or not a dict.
          RuntimeError: if
        """
        self._log_startup(input_dict, output_dict, exec_properties)

        # check model blessing
        model_push = artifact_utils.get_single_instance(
            output_dict[tfx_pusher_executor.PUSHED_MODEL_KEY])
        if not self.CheckBlessing(input_dict):
            self._MarkNotPushed(model_push)
            return

        model_export = artifact_utils.get_single_instance(
            input_dict[tfx_pusher_executor.MODEL_KEY])

        custom_config = json_utils.loads(
            exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
        if custom_config is not None and not isinstance(custom_config, Dict):
            raise ValueError(
                'custom_config in execution properties needs to be a '
                'dict.')

        cortex_serving_args = custom_config.get(SERVING_ARGS_KEY)
        if not cortex_serving_args:
            raise ValueError(
                '\'cortex_serving_args\' is missing in \'custom_config\'')

        # Deploy the model.
        io_utils.copy_dir(src=path_utils.serving_model_path(model_export.uri),
                          dst=model_push.uri)
        model_path = model_push.uri

        # Cortex implementation starts here
        # pop the env and initialize client
        cx = cortex.client(cortex_serving_args.pop('env'))

        # load the predictor
        predictor_path = cortex_serving_args.pop('predictor_path')
        with tempfile.TemporaryDirectory() as tmp_dir_name:
            temp_project_dir = tmp_dir_name

            # predictor
            p_dump_path = os.path.join(temp_project_dir, 'predictor.py')
            io_utils.copy_file(predictor_path, p_dump_path)

            # requirements.txt
            reqs = cortex_serving_args.pop('requirements', [])
            if reqs:
                r_dump_path = os.path.join(temp_project_dir,
                                           'requirements.txt')
                io_utils.write_string_file(r_dump_path, '\n'.join(reqs))

            # conda-packages.txt
            c_reqs = cortex_serving_args.pop('conda_packages', [])
            if c_reqs:
                r_dump_path = os.path.join(temp_project_dir,
                                           'conda-packages.txt')
                io_utils.write_string_file(r_dump_path, '\n'.join(c_reqs))

            # edit the api_config
            api_config = cortex_serving_args.pop('api_config')
            if 'config' not in api_config['predictor']:
                api_config['predictor']['config'] = {}
            api_config['predictor']['config']['model_artifact'] = model_path

            # launch the api
            api_config['predictor']['path'] = 'predictor.py'

            # configure the model path
            if 'models' not in api_config['predictor']:
                api_config['predictor']['models'] = {}
            api_config['predictor']['models'].update({'path': model_path})
            cx.create_api(api_config,
                          project_dir=temp_project_dir,
                          **cortex_serving_args)

        self._MarkPushed(model_push, pushed_destination=model_path)
예제 #10
0
    def Do(self, input_dict: Dict[Text, List[types.Artifact]],
           output_dict: Dict[Text, List[types.Artifact]],
           exec_properties: Dict[Text, Any]):
        """Overrides the tfx_pusher_executor.

        Args:
          input_dict: Input dict from input key to a list of artifacts,
          including:
            - model_export: exported model from trainer.
            - model_blessing: model blessing path from evaluator.
          output_dict: Output dict from key to a list of artifacts, including:
            - model_push: A list of 'ModelPushPath' artifact of size one. It
            will
              include the model in this push execution if the model was pushed.
          exec_properties: Mostly a passthrough input dict for
            tfx.components.Pusher.executor.custom_config
        Raises:
          ValueError: if custom config not present or not a dict.
          RuntimeError: if
        """
        self._log_startup(input_dict, output_dict, exec_properties)

        # check model blessing
        model_push = artifact_utils.get_single_instance(
            output_dict[tfx_pusher_executor.PUSHED_MODEL_KEY])
        if not self.CheckBlessing(input_dict):
            self._MarkNotPushed(model_push)
            return

        model_export = artifact_utils.get_single_instance(
            input_dict[tfx_pusher_executor.MODEL_KEY])

        custom_config = json_utils.loads(
            exec_properties.get(_CUSTOM_CONFIG_KEY, 'null'))
        if custom_config is not None and not isinstance(custom_config, Dict):
            raise ValueError(
                'custom_config in execution properties needs to be a '
                'dict.')

        cortex_serving_args = custom_config.get(SERVING_ARGS_KEY)
        if not cortex_serving_args:
            raise ValueError(
                '\'cortex_serving_args\' is missing in \'custom_config\'')

        # Deploy the model.
        io_utils.copy_dir(
            src=path_utils.serving_model_path(model_export.uri),
            dst=model_push.uri)
        model_path = model_push.uri

        # Cortex implementation starts here
        # pop the env and initialize client
        cx = cortex.client(cortex_serving_args.pop('env'))

        # load the predictor
        predictor_path = cortex_serving_args.pop('predictor_path')
        predictor = load_source_path_class(predictor_path)

        # edit the api_config
        api_config = cortex_serving_args.pop('api_config')
        if 'config' not in api_config['predictor']:
            api_config['predictor']['config'] = {}
        api_config['predictor']['config']['model_artifact'] = model_path

        # launch the api
        cx.create_api(
            api_config=api_config, predictor=predictor, **cortex_serving_args)

        self._MarkPushed(
            model_push,
            pushed_destination=model_path)