def ffdlPipeline( GITHUB_TOKEN='', CONFIG_FILE_URL='https://raw.githubusercontent.com/user/repository/branch/creds.ini', model_def_file_path='gender-classification.zip', manifest_file_path='manifest.yml', model_deployment_name='gender-classifier', model_class_name='ThreeLayerCNN', model_class_file='gender_classification.py' ): """A pipeline for end to end machine learning workflow.""" get_configuration = configuration_op( token = GITHUB_TOKEN, url = CONFIG_FILE_URL, name = secret_name ) train = train_op( model_def_file_path, manifest_file_path ).apply(params.use_ai_pipeline_params(secret_name)) serve = serve_op( train.output, model_deployment_name, model_class_name, model_class_file ).apply(params.use_ai_pipeline_params(secret_name))
def icpdPipeline( notebook_url='https://raw.githubusercontent.com/animeshsingh/notebooks/master/sklearn.ipynb', notebook_params='', api_token='', endpoint_url='minio-service:9000', bucket_name='mlpipeline', object_name='notebooks/sklearn-model/runs/train/sklearn-pg_out.ipynb', access_key='minio', secret_access_key='minio123', credentials_id='', ): setup = setup_ops( secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}')) trainer_notebook = notebook_ops( notebook_url=notebook_url, notebook_params=notebook_params, api_token=api_token, endpoint_url=endpoint_url, bucket_name=bucket_name, object_name=object_name, access_key=access_key, secret_access_key=secret_access_key).add_env_variable( k8s_client.V1EnvVar( name='POSTGRES_URL', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='{{workflow.parameters.credentials-id}}-cred', key='POSTGRES_URL')))).after(setup) post_model = post_model_ops().apply( params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}') ).after(trainer_notebook).set_image_pull_policy('Always')
def modelPipeline(model_id='max-image-completer'): """A pipeline for ML/DL model deployment.""" # define workflow model_config = dsl.ContainerOp( name='model_config', image='tomcli/model-config:latest', command=['python'], arguments=[ '-u', 'model-config.py', '--secret_name', generated_secret, '--model_id', model_id ], file_outputs={ 'train_code': '/tmp/train_code', 'execution_command': '/tmp/execution_command', 'framework': '/tmp/framework', 'framework_version': '/tmp/framework_version', 'runtime': '/tmp/runtime', 'runtime_version': '/tmp/runtime_version', 'run_definition': '/tmp/run_definition', 'run_name': '/tmp/run_name' }).apply(params.use_ai_pipeline_params(secret_name)) wml_train = train_op( train_code=model_config.outputs['train_code'], execution_command=model_config.outputs['execution_command'], framework=model_config.outputs['framework'], framework_version=model_config.outputs['framework_version'], runtime=model_config.outputs['runtime'], runtime_version=model_config.outputs['runtime_version'], run_definition=model_config.outputs['run_definition'], run_name=model_config.outputs['run_name']).apply( params.use_ai_pipeline_params(generated_secret))
def kfp_wml_pipeline( GITHUB_TOKEN='', CONFIG_FILE_URL='https://raw.githubusercontent.com/user/repository/branch/creds.ini', train_code='tf-model.zip', execution_command='\'python3 convolutional_network.py --trainImagesFile ${DATA_DIR}/train-images-idx3-ubyte.gz --trainLabelsFile ${DATA_DIR}/train-labels-idx1-ubyte.gz --testImagesFile ${DATA_DIR}/t10k-images-idx3-ubyte.gz --testLabelsFile ${DATA_DIR}/t10k-labels-idx1-ubyte.gz --learningRate 0.001 --trainingIters 20000\'', framework='tensorflow', framework_version='1.14', runtime = 'python', runtime_version='3.6', run_definition = 'wml-tensorflow-definition', run_name = 'wml-tensorflow-run', model_name='wml-tensorflow-mnist', scoring_payload='tf-mnist-test-payload.json', compute_name='k80', compute_nodes='1' ): # op1 - this operation will create the credentials as secrets to be used by other operations get_configuration = configuration_op( token=GITHUB_TOKEN, url=CONFIG_FILE_URL, name=secret_name ) # op2 - this operation trains the model with the model codes and data saved in the cloud object store wml_train = train_op( config=get_configuration.output, train_code=train_code, execution_command=execution_command, framework=framework, framework_version=framework_version, runtime=runtime, runtime_version=runtime_version, run_definition=run_definition, run_name=run_name, compute_name=compute_name, compute_nodes=compute_nodes ).apply(params.use_ai_pipeline_params(secret_name)).set_image_pull_policy('Always') # op3 - this operation stores the model trained above wml_store = store_op( wml_train.outputs['run_uid'], model_name, framework=framework, framework_version=framework_version, runtime_version=runtime_version ).apply(params.use_ai_pipeline_params(secret_name)).set_image_pull_policy('Always') # op4 - this operation deploys the model to a web service and run scoring with the payload in the cloud object store wml_deploy = deploy_op( wml_store.output, model_name, scoring_payload ).apply(params.use_ai_pipeline_params(secret_name)).set_image_pull_policy('Always')
def ffdlPipeline( GITHUB_TOKEN=dsl.PipelineParam(name='github-token', value=''), CONFIG_FILE_URL=dsl. PipelineParam( name='config-file-url', value= 'https://raw.githubusercontent.com/user/repository/branch/creds.ini'), model_def_file_path=dsl.PipelineParam( name='model-def-file-path', value='gender-classification.zip'), manifest_file_path=dsl.PipelineParam(name='manifest-file-path', value='manifest.yml'), model_deployment_name=dsl.PipelineParam(name='model-deployment-name', value='gender-classifier'), model_class_name=dsl.PipelineParam(name='model-class-name', value='ThreeLayerCNN'), model_class_file=dsl.PipelineParam(name='model-class-file', value='gender_classification.py')): """A pipeline for end to end machine learning workflow.""" config_op = dsl.ContainerOp( name="config", image="aipipeline/wml-config", command=['python3'], arguments=[ '/app/config.py', '--token', GITHUB_TOKEN, '--url', CONFIG_FILE_URL, '--name', secret_name ], file_outputs={'secret-name': '/tmp/' + secret_name}) train = dsl.ContainerOp( name='train', image='aipipeline/ffdl-train:0.6', command=['sh', '-c'], arguments=[ 'echo %s > /tmp/logs.txt; python -u train.py --model_def_file_path %s --manifest_file_path %s;' % (config_op.output, model_def_file_path, manifest_file_path) ], file_outputs={ 'output': '/tmp/training_id.txt' }).apply(params.use_ai_pipeline_params(secret_name)) serve = dsl.ContainerOp( name='serve', image='aipipeline/ffdl-serve:0.11', command=['sh', '-c'], arguments=[ 'python -u serve.py --model_id %s --deployment_name %s --model_class_name %s --model_class_file %s;' % (train.output, model_deployment_name, model_class_name, model_class_file) ], file_outputs={ 'output': '/tmp/deployment_result.txt' }).apply(params.use_ai_pipeline_params(secret_name))
def kfp_wml_pipeline(): # op1 - this operation will create the credentials as secrets to be used by other operations config_op = dsl.ContainerOp( name="config", image="aipipeline/wml-config", command=['python3'], arguments=[ '/app/config.py', '--token', GITHUB_TOKEN, '--url', CONFIG_FILE_URL ], file_outputs={'secret-name': '/tmp/' + secret_name}) # op2 - this operation trains the model with the model codes and data saved in the cloud object store train_op = dsl.ContainerOp( name="train", image="aipipeline/wml-train", command=['python3'], arguments=[ '/app/wml-train.py', '--config', config_op.output, '--train-code', 'tf-model.zip', '--execution-command', '\'python3 convolutional_network.py --trainImagesFile ${DATA_DIR}/train-images-idx3-ubyte.gz --trainLabelsFile ${DATA_DIR}/train-labels-idx1-ubyte.gz --testImagesFile ${DATA_DIR}/t10k-images-idx3-ubyte.gz --testLabelsFile ${DATA_DIR}/t10k-labels-idx1-ubyte.gz --learningRate 0.001 --trainingIters 20000\'' ], file_outputs={ 'run-uid': '/tmp/run_uid' }).apply(params.use_ai_pipeline_params(secret_name)) # op3 - this operation stores the model trained above store_op = dsl.ContainerOp( name="store", image="aipipeline/wml-store", command=['python3'], arguments=[ '/app/wml-store.py', '--run-uid', train_op.output, '--model-name', 'python-tensorflow-mnist' ], file_outputs={ 'model-uid': '/tmp/model_uid' }).apply(params.use_ai_pipeline_params(secret_name)) # op4 - this operation deploys the model to a web service and run scoring with the payload in the cloud object store deploy_op = dsl.ContainerOp( name="deploy", image="aipipeline/wml-deploy", command=['python3'], arguments=[ '/app/wml-deploy.py', '--model-uid', store_op.output, '--model-name', 'python-tensorflow-mnist', '--scoring-payload', 'tf-mnist-test-payload.json' ], file_outputs={ 'output': '/tmp/output' }).apply(params.use_ai_pipeline_params(secret_name))
def modelPipeline( model_id='max-image-caption-generator' ): """A pipeline for ML/DL model deployment.""" # define workflow model_config = dsl.ContainerOp( name='model_config', image='tomcli/model-config', command=['python'], arguments=['-u', 'model-config.py', '--secret_name', secret_name, '--model_id', model_id], file_outputs={'model_serving_image': '/tmp/model_serving_image', 'primary_model_revision': '/tmp/primary_model_revision', 'deployment_name': '/tmp/deployment_name'}) model_deployment = dsl.ContainerOp( name='knative_model_deployment', image='aipipeline/knative-model-deploy', command=['python'], arguments=['-u', 'knative_deployment.py', '--model_serving_image', model_config.outputs['model_serving_image'], '--primary_model_revision', model_config.outputs['primary_model_revision'], '--deployment_name', model_config.outputs['deployment_name']], file_outputs={'output': '/tmp/log.txt'}).apply(params.use_ai_pipeline_params(secret_name))
def etlPipeline( spark_master='local[*]', kafka_bootstrap_servers='my-cluster-kafka-bootstrap.kubeflow:9092', kafka_topic='reefer', batch_temp_loc='batch.csv', table_name='reefer_telemetries', credentials_id = '' ): setup = setup_ops(secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')) push = push_ops(kafka_bootstrap_servers=kafka_bootstrap_servers, kafka_topic=kafka_topic).after(setup) etl = etl_ops(spark_master=spark_master, kafka_bootstrap_servers=kafka_bootstrap_servers, kafka_topic=kafka_topic, batch_temp_loc=batch_temp_loc, table_name=table_name).add_env_variable( k8s_client.V1EnvVar( name='POSTGRES_URL', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='{{workflow.parameters.credentials-id}}-cred', key='POSTGRES_URL' ) ) ) ).set_image_pull_policy('Always').after(push) post_template_url = 'https://raw.githubusercontent.com/Tomcli/kfp-components/master/postprocessing.yaml' post_model_ops = components.load_component_from_url(post_template_url) post_model = post_model_ops(notification_type='etl', pipeline_name='{{pod.name}}').apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')).after(etl).set_image_pull_policy('Always')
def model_pipeline(model_id='${model_identifier}'): """A pipeline for ML/DL model deployment.""" from kfp import components, dsl from ai_pipeline_params import use_ai_pipeline_params secret_name = 'wml-creds' generated_secret = 'test-creds' train_op = components.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/ibm-components/watson/train/component.yaml') model_config = dsl.ContainerOp( name='model_config', image='tomcli/model-config:latest', command=['python'], arguments=[ '-u', 'model-config.py', '--secret_name', generated_secret, '--model_id', model_id ], file_outputs={ 'train_code': '/tmp/train_code', 'execution_command': '/tmp/execution_command', 'framework': '/tmp/framework', 'framework_version': '/tmp/framework_version', 'runtime': '/tmp/runtime', 'runtime_version': '/tmp/runtime_version', 'run_definition': '/tmp/run_definition', 'run_name': '/tmp/run_name' } ) model_config.apply(use_ai_pipeline_params(secret_name)) model_training = train_op( train_code=model_config.outputs['train_code'], execution_command=model_config.outputs['execution_command'], framework=model_config.outputs['framework'], framework_version=model_config.outputs['framework_version'], runtime=model_config.outputs['runtime'], runtime_version=model_config.outputs['runtime_version'], run_definition=model_config.outputs['run_definition'], run_name=model_config.outputs['run_name'] ) model_training.apply(use_ai_pipeline_params(generated_secret))
def aiosPipeline( BUCKET_NAME='', TRAINING_DATA_LINK='https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_data_biased_training.csv', POSTGRES_SCHEMA_NAME='data_mart_credit', LABEL_NAME='Risk', PROBLEM_TYPE='BINARY_CLASSIFICATION', THRESHOLD='0.7', AIOS_MANIFEST_PATH='aios.json', MODEL_FILE_PATH='model.py', SPARK_ENTRYPOINT='python model.py', MODEL_NAME='Spark German Risk Model - Final', DEPLOYMENT_NAME='Spark German Risk Deployment - Final' ): """A pipeline for Spark machine learning workflow with OpenScale.""" data_preprocess_spark = preprocess_spark_op( bucket_name=BUCKET_NAME, data_url=TRAINING_DATA_LINK ).apply(params.use_ai_pipeline_params(secret_name)) train_spark = train_spark_op( bucket_name=BUCKET_NAME, data_filename=data_preprocess_spark.output, model_filename=MODEL_FILE_PATH, spark_entrypoint=SPARK_ENTRYPOINT ).apply(params.use_ai_pipeline_params(secret_name)) store_spark_model = store_spark_op( bucket_name=BUCKET_NAME, aios_manifest_path=AIOS_MANIFEST_PATH, problem_type=PROBLEM_TYPE, model_name=MODEL_NAME, deployment_name=DEPLOYMENT_NAME, model_filepath=train_spark.outputs['model_filepath'], train_data_filepath=train_spark.outputs['train_data_filepath'] ).apply(params.use_ai_pipeline_params(secret_name)) deploy = deploy_op( model_uid=store_spark_model.output, model_name=MODEL_NAME, deployment_name=DEPLOYMENT_NAME ).apply(params.use_ai_pipeline_params(secret_name)) subscribe = subscribe_op( model_uid=deploy.outputs['model_uid'], model_name=MODEL_NAME, aios_schema=POSTGRES_SCHEMA_NAME, label_column=LABEL_NAME, aios_manifest_path=AIOS_MANIFEST_PATH, bucket_name=BUCKET_NAME, problem_type=PROBLEM_TYPE ).apply(params.use_ai_pipeline_params(secret_name)) monitor_quality = quality_op( model_name=subscribe.output, quality_threshold=THRESHOLD ).apply(params.use_ai_pipeline_params(secret_name)) monitor_fairness = fairness_op( model_name=subscribe.output, aios_manifest_path=AIOS_MANIFEST_PATH, cos_bucket_name=BUCKET_NAME, data_filename=data_preprocess_spark.output ).apply(params.use_ai_pipeline_params(secret_name))
def deletek8sPipeline( deployment_name='model-serving' ): """A pipeline to cleanup Knative Deployment.""" delete_k8s = dsl.ContainerOp( name='delete_k8s_deployment', image='aipipeline/deployment-k8s-remote', command=['sh', '-c'], arguments=['python -u kube_deployment.py --cleanup True --deployment_name %s' % (deployment_name)], file_outputs={'output': '/tmp/log.txt'}).apply(params.use_ai_pipeline_params(secret_name))
def ffdlPipeline( GITHUB_TOKEN=dsl.PipelineParam(name='github-token', value=''), CONFIG_FILE_URL=dsl. PipelineParam( name='config-file-url', value= 'https://raw.githubusercontent.com/user/repository/branch/creds.ini'), model_def_file_path=dsl.PipelineParam( name='model-def-file-path', value='gender-classification.zip'), manifest_file_path=dsl.PipelineParam(name='manifest-file-path', value='manifest.yml'), model_deployment_name=dsl.PipelineParam(name='model-deployment-name', value='gender-classifier'), model_class_name=dsl.PipelineParam(name='model-class-name', value='ThreeLayerCNN'), model_class_file=dsl.PipelineParam(name='model-class-file', value='gender_classification.py')): """A pipeline for end to end machine learning workflow.""" configuration_op = components.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/ibm-components/commons/config/component.yaml' ) train_op = components.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/ibm-components/ffdl/train/component.yaml' ) serve_op = components.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/785d474699cffb7463986b9abc4b1fbe03796cb6/components/ibm-components/ffdl/serve/component.yaml' ) get_configuration = configuration_op(token=GITHUB_TOKEN, url=CONFIG_FILE_URL, name=secret_name) train = train_op(model_def_file_path, manifest_file_path).apply( params.use_ai_pipeline_params(secret_name)) serve = serve_op(train.output, model_deployment_name, model_class_name, model_class_file).apply( params.use_ai_pipeline_params(secret_name))
def model_pipeline(model_id='${model_identifier}'): """A pipeline for ML/DL model deployment.""" from kfp import dsl from ai_pipeline_params import use_ai_pipeline_params secret_name = 'e2e-creds' model_config = dsl.ContainerOp( name='model_config', image='tomcli/model-config', command=['python'], arguments=[ '-u', 'model-config.py', '--secret_name', secret_name, '--model_id', model_id ], file_outputs={ 'model_serving_image': '/tmp/model_serving_image', 'primary_model_revision': '/tmp/primary_model_revision', 'deployment_name': '/tmp/deployment_name' } ) model_deployment = dsl.ContainerOp( name='knative_model_deployment', image='aipipeline/knative-model-deploy', command=['python'], arguments=[ '-u', 'knative_deployment.py', '--model_serving_image', model_config.outputs['model_serving_image'], '--primary_model_revision', model_config.outputs['primary_model_revision'], '--deployment_name', model_config.outputs['deployment_name'] ], file_outputs={ 'output': '/tmp/log.txt' } ) model_deployment.apply(use_ai_pipeline_params(secret_name))
def icpdPipeline( notebook_url='https://raw.githubusercontent.com/animeshsingh/notebooks/master/sklearn.ipynb', notebook_params='', api_token='', endpoint_url='minio-service:9000', bucket_name='mlpipeline', object_name='notebooks/sklearn-model/runs/train/sklearn-pg_out.ipynb', access_key='minio', secret_access_key='minio123', kfservice_url='istio-ingressgateway.istio-system:80', action='update', model_name='maintenance-model-pg', model_deploy_namespace='model-deploy', default_custom_model_spec='{"name": "maintenance-model-pg", "image": "tomcli/webapp:v0.0.4", "port": "8080", "env": [{"name": "MODEL_PATH", "value": "model_logistic_regression_pg.pkl"}]}', canary_custom_model_spec='{"name": "maintenance-model-pg", "image": "tomcli/webapp:v0.0.4", "port": "8080", "env": [{"name": "MODEL_PATH", "value": "model_logistic_regression_pg.pkl"}]}', canary_model_traffic_percentage='10', autoscaling_target='10', kafka_brokers='my-cluster-kafka-bootstrap.kubeflow:9092', kafka_apikey='', telemetry_topic='reeferTelemetries', container_topic='containers', remote_kfserving_deployer_api='', remote_istio_ingress_endpoint='', credentials_id='', secret_name='icp4d-demo'): def kubedeploy_ops(component_name, deployment_image, deployment_name, container_port, cleanup='False', namespace='kubeflow', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey, telemetry_topic=telemetry_topic, container_topic=container_topic, kfservice_url=kfservice_url, model_serving_metadata=''): return dsl.ContainerOp( name=component_name, image='docker.io/aipipeline/kafka-app-deployment:v0.7', command=['python'], arguments=[ '-u', 'kube_deployment.py', '--model_serving_image', deployment_image, '--deployment_name', deployment_name, "--container_port", container_port, "--cleanup", cleanup, "--namespace", namespace, "--env_kafka_brokers", kafka_brokers, "--env_kafka_apikey", kafka_apikey, "--env_telemetry_topic", telemetry_topic, "--env_container_topic", container_topic, "--env_kfservice_url", kfservice_url, "--env_model_serving_metadata", model_serving_metadata ], file_outputs={'logs': '/tmp/log.txt'}) setup = setup_ops( secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}')) trainer_notebook = notebook_ops( notebook_url=notebook_url, notebook_params=notebook_params, api_token=api_token, endpoint_url=endpoint_url, bucket_name=bucket_name, object_name=object_name, access_key=access_key, secret_access_key=secret_access_key).add_env_variable( k8s_client.V1EnvVar( name='POSTGRES_URL', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='{{workflow.parameters.credentials-id}}-cred', key='POSTGRES_URL')))).after(setup) postprocessing_1 = post_model_ops().apply( params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}') ).after(trainer_notebook).set_image_pull_policy('Always') serving = kfserving_ops( action=action, model_name=model_name, namespace=model_deploy_namespace, framework='custom', default_custom_model_spec=default_custom_model_spec, canary_custom_model_spec=canary_custom_model_spec, canary_model_traffic_percentage=canary_model_traffic_percentage, autoscaling_target=autoscaling_target, kfserving_deployer_api=remote_kfserving_deployer_api).after( trainer_notebook) postprocessing_2 = post_model_ops( notification_type='serving', serving_output=serving.outputs['endpoint_uri']).apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}') ).set_image_pull_policy('Always').apply( params.use_ai_pipeline_params( '{{workflow.parameters.secret-name}}', secret_volume_mount_path='/app/ip')) scoring = kubedeploy_ops( component_name='scoring', deployment_image='aipipeline/predictivescoring:v0.0.3', deployment_name='scoring', container_port='8080', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey, telemetry_topic=telemetry_topic, container_topic=container_topic, kfservice_url=kfservice_url, model_serving_metadata=serving.outputs['endpoint_uri']).apply( params.use_ai_pipeline_params( '{{workflow.parameters.secret-name}}')) monitoring = kubedeploy_ops( component_name='monitoring', deployment_image='ffdlops/consumer:v0.0.1', deployment_name='customer', container_port='8080', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey).after(scoring).apply( params.use_ai_pipeline_params( '{{workflow.parameters.secret-name}}')) postprocessing_3 = post_model_ops( notification_type='other', pipeline_name='{{pod.name}}').apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}')).after( monitoring).set_image_pull_policy('Always')
def icpd_scoring_monitoring( kfservice_url='istio-ingressgateway.istio-system:80', kafka_brokers='my-cluster-kafka-bootstrap.kubeflow:9092', kafka_apikey='', telemetry_topic='reeferTelemetries', container_topic='containers', model_serving_metadata='{"apiVersion":"serving.kubeflow.org/v1alpha1","kind":"KFService","metadata":{"annotations":{"autoscaling.knative.dev/target":"10"},"creationTimestamp":"2019-10-04T18:44:59Z","generation":1,"name":"maintenance-model-pg","namespace":"model-deploy","resourceVersion":"153452239","selfLink":"/apis/serving.kubeflow.org/v1alpha1/namespaces/model-deploy/kfservices/maintenance-model-pg","uid":"1130647c-e6d7-11e9-8107-06078924dd3e"},"spec":{"canary":{"custom":{"container":{"env":[{"name":"MODEL_PATH","value":"model_logistic_regression_pg.pkl"}],"image":"tomcli/webapp:v0.0.4","name":"maintenance-model-pg","ports":[{"containerPort":8080}],"resources":{"requests":{"cpu":"1","memory":"2Gi"}}}}},"canaryTrafficPercent":10,"default":{"custom":{"container":{"env":[{"name":"MODEL_PATH","value":"model_logistic_regression_pg.pkl"}],"image":"tomcli/webapp:v0.0.4","name":"maintenance-model-pg","ports":[{"containerPort":8080}],"resources":{"requests":{"cpu":"1","memory":"2Gi"}}}}}},"status":{"canary":{"name":"maintenance-model-pg-canary-qhmvr","traffic":10},"conditions":[{"lastTransitionTime":"2019-10-22T21:13:30Z","message":"Revision \"maintenance-model-pg-canary-qhmvr\" failed with message: 0/1 nodes are available: 1 Insufficient cpu..","reason":"RevisionFailed","severity":"Info","status":"False","type":"CanaryPredictorReady"},{"lastTransitionTime":"2019-10-22T21:13:30Z","message":"Revision \"maintenance-model-pg-default-8pcqz\" failed with message: 0/1 nodes are available: 1 Insufficient cpu..","reason":"RevisionFailed","status":"False","type":"DefaultPredictorReady"},{"lastTransitionTime":"2019-10-22T21:13:30Z","message":"Revision \"maintenance-model-pg-default-8pcqz\" failed with message: 0/1 nodes are available: 1 Insufficient cpu..","reason":"RevisionFailed","status":"False","type":"Ready"},{"lastTransitionTime":"2019-10-29T16:58:46Z","status":"True","type":"RoutesReady"}],"default":{"name":"maintenance-model-pg-default-8pcqz","traffic":90},"url":"http://maintenance-model-pg.model-deploy.example.com"}}', secret_name='icp4d-demo', credentials_id=''): def kubedeploy_ops(component_name, deployment_image, deployment_name, container_port, cleanup='False', namespace='kubeflow', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey, telemetry_topic=telemetry_topic, container_topic=container_topic, kfservice_url=kfservice_url, model_serving_metadata=''): return dsl.ContainerOp( name=component_name, image='docker.io/aipipeline/kafka-app-deployment:v0.7', command=['python'], arguments=[ '-u', 'kube_deployment.py', '--model_serving_image', deployment_image, '--deployment_name', deployment_name, "--container_port", container_port, "--cleanup", cleanup, "--namespace", namespace, "--env_kafka_brokers", kafka_brokers, "--env_kafka_apikey", kafka_apikey, "--env_telemetry_topic", telemetry_topic, "--env_container_topic", container_topic, "--env_kfservice_url", kfservice_url, "--env_model_serving_metadata", model_serving_metadata ], file_outputs={'logs': '/tmp/log.txt'}) scoring = kubedeploy_ops( component_name='scoring', deployment_image='aipipeline/predictivescoring:v0.0.3', deployment_name='scoring', container_port='8080', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey, telemetry_topic=telemetry_topic, container_topic=container_topic, kfservice_url=kfservice_url, model_serving_metadata=model_serving_metadata).apply( params.use_ai_pipeline_params( '{{workflow.parameters.secret-name}}')) monitoring = kubedeploy_ops( component_name='monitoring', deployment_image='ffdlops/consumer:v0.0.1', deployment_name='customer', container_port='8080', kafka_brokers=kafka_brokers, kafka_apikey=kafka_apikey).after(scoring).apply( params.use_ai_pipeline_params( '{{workflow.parameters.secret-name}}')) post_template_url = 'https://raw.githubusercontent.com/Tomcli/kfp-components/master/postprocessing.yaml' post_model_ops = components.load_component_from_url(post_template_url) post_model = post_model_ops( notification_type='other', pipeline_name='{{pod.name}}').apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}')).after( monitoring).set_image_pull_policy('Always')