def _use_aws_secret(task): from kubernetes import client as k8s_client task.container \ .add_env_variable( k8s_client.V1EnvVar( name='AWS_ACCESS_KEY_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=aws_access_key_id_name ) ) ) ) \ .add_env_variable( k8s_client.V1EnvVar( name='AWS_SECRET_ACCESS_KEY', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=aws_secret_access_key_name ) ) ) ) if aws_region: task.container \ .add_env_variable( k8s_client.V1EnvVar( name='AWS_REGION', value=aws_region ) ) return task
def add_pod_env(op: BaseOp) -> BaseOp: """Adds pod environment info to ContainerOp. """ if isinstance(op, ContainerOp) and op.pod_labels and op.pod_labels['add-pod-env'] == 'true': from kubernetes import client as k8s_client op.container.add_env_variable( k8s_client.V1EnvVar( name='KFP_POD_NAME', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path='metadata.name' ) ) ) ).add_env_variable( k8s_client.V1EnvVar( name='KFP_NAMESPACE', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path='metadata.namespace' ) ) ) ) return op
def _use_azure_secret(task): from kubernetes import client as k8s_client (task.container.add_env_variable( k8s_client.V1EnvVar( name='AZ_SUBSCRIPTION_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_SUBSCRIPTION_ID'))) ).add_env_variable( k8s_client.V1EnvVar( name='AZ_TENANT_ID', value_from=k8s_client. V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_TENANT_ID')))).add_env_variable( k8s_client.V1EnvVar( name='AZ_CLIENT_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_CLIENT_ID')))). add_env_variable( k8s_client.V1EnvVar( name='AZ_CLIENT_SECRET', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZ_CLIENT_SECRET'))))) return task
def _use_mysql_secret(task): from kubernetes import client as k8s_client return ( task .add_env_variable( k8s_client.V1EnvVar( name='MYSQL_USERNAME', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=db_username ) ) ) ) .add_env_variable( k8s_client.V1EnvVar( name='MYSQL_PASSWORD', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=db_password ) ) ) ) )
def add_kfp_pod_env(op: BaseOp) -> BaseOp: """Adds KFP pod environment info to the specified ContainerOp. """ if not isinstance(op, ContainerOp): warnings.warn( 'Trying to add default KFP environment variables to an Op that is ' 'not a ContainerOp. Ignoring request.') return op op.container.add_env_variable( k8s_client.V1EnvVar(name='KFP_POD_NAME', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path='metadata.name'))) ).add_env_variable( k8s_client.V1EnvVar(name='KFP_NAMESPACE', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path='metadata.namespace'))) ).add_env_variable( k8s_client.V1EnvVar( name='WORKFLOW_ID', value_from=k8s_client. V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector( field_path="metadata.labels['workflows.argoproj.io/workflow']") ))) return op
def add_aws_credentials(kube_manager, pod_spec, namespace): if not kube_manager.secret_exists(constants.AWS_CREDS_SECRET_NAME, namespace): raise ValueError( 'Unable to mount credentials: Secret aws-secret not found in namespace {}' .format(namespace)) # Set appropriate secrets env to enable kubeflow-user service # account. env = [ client.V1EnvVar(name='AWS_ACCESS_KEY_ID', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=constants.AWS_CREDS_SECRET_NAME, key='AWS_ACCESS_KEY_ID'))), client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=constants.AWS_CREDS_SECRET_NAME, key='AWS_SECRET_ACCESS_KEY'))) ] if pod_spec.containers[0].env: pod_spec.containers[0].env.extend(env) else: pod_spec.containers[0].env = env
def build_env_list_for_pod(env_vars): env_list = [] for env_name, env_value in env_vars.items(): if env_name is None or env_value is None : continue if type(env_value)==str: # env is key/value pair env_list.append(client.V1EnvVar(name=env_name, value=env_value)) elif type(env_value)==dict: # env is ref if env_value.keys() is None or len(env_value.keys()) < 3: continue if "type" not in env_value.keys() or "name" not in env_value.keys() or "key" not in env_value.keys(): continue ref_type=env_value["type"] ref_name=env_value["name"] ref_key=env_value["key"] ref_selector = None env_var_source = None if ref_type.lower() == "configmap": ref_selector=client.V1ConfigMapKeySelector(key=ref_key, name=ref_name) env_var_source = client.V1EnvVarSource(config_map_key_ref=ref_selector) elif ref_type.lower() == "secret": ref_selector=client.V1SecretKeySelector(key=ref_key, name=ref_name) env_var_source = client.V1EnvVarSource(secret_key_ref=ref_selector) elif ref_type.lower() == "field": pass elif ref_type.lower() == "resource_field": pass if env_var_source is not None: env_list.append(client.V1EnvVar(name=env_name, value_from=env_var_source)) return env_list
def _use_aws_envvars_from_secret(task): api = kube_client.CoreV1Api(K8sHelper()._api_client) ns = secret_namespace or current_namespace() secret = api.read_namespaced_secret(secret_name, ns) if 'access_key' in secret.data: task.add_env_variable( kube_client.V1EnvVar( name='AWS_ACCESS_KEY_ID', value_from=kube_client.V1EnvVarSource( secret_key_ref=kube_client.V1SecretKeySelector( name=secret_name, key='access_key')))) if 'secret_key' in secret.data: task.add_env_variable( kube_client.V1EnvVar( name='AWS_SECRET_ACCESS_KEY', value_from=kube_client.V1EnvVarSource( secret_key_ref=kube_client.V1SecretKeySelector( name=secret_name, key='secret_key')))) if 'token' in secret.data: task.add_env_variable( kube_client.V1EnvVar( name='AWS_SESSION_TOKEN', value_from=kube_client.V1EnvVarSource( secret_key_ref=kube_client.V1SecretKeySelector( name=secret_name, key='token')))) return task
def create_job_object(runner_image, region, s3_path, pvc_name): target_folder = get_target_folder(s3_path) # Configureate Pod template container container = k8s_client.V1Container( name="copy-dataset-worker", image=runner_image, command=["aws"], args=["s3", "sync", s3_path, "/mnt/" + target_folder], volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')], env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region), k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))), k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret"))) ], ) volume = k8s_client.V1Volume( name='data-storage', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name) ) # Create and configurate a spec section template = k8s_client.V1PodTemplateSpec( # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}), spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure")) # Create the specification of deployment spec = k8s_client.V1JobSpec( # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}), template=template) # Instantiate the deployment object deployment = k8s_client.V1Job( api_version="batch/v1", kind="Job", metadata=k8s_client.V1ObjectMeta(name=container.name), spec=spec) return deployment
def add_aws_credentials(kube_manager, pod_spec, namespace): """add AWS credential :param kube_manager: kube manager for handles communication with Kubernetes' client :param pod_spec: pod spec like volumes and security context :param namespace: The custom resource """ if not kube_manager.secret_exists(constants.AWS_CREDS_SECRET_NAME, namespace): raise ValueError( 'Unable to mount credentials: Secret aws-secret not found in namespace {}' .format(namespace)) # Set appropriate secrets env to enable kubeflow-user service # account. env = [ client.V1EnvVar(name='AWS_ACCESS_KEY_ID', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=constants.AWS_CREDS_SECRET_NAME, key='AWS_ACCESS_KEY_ID'))), client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=constants.AWS_CREDS_SECRET_NAME, key='AWS_SECRET_ACCESS_KEY'))) ] if pod_spec.containers[0].env: pod_spec.containers[0].env.extend(env) else: pod_spec.containers[0].env = env
def _use_minio_secret(task): from kubernetes import client as k8s_client return ( task .add_env_variable( k8s_client.V1EnvVar( name='AWS_ACCESS_KEY_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=minio_access_key_id_name ) ) ) ) .add_env_variable( k8s_client.V1EnvVar( name='AWS_SECRET_ACCESS_KEY', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=minio_secret_access_key_name ) ) ) ) .add_env_variable( k8s_client.V1EnvVar( name='S3_USE_HTTPS', value='0' ) ) .add_env_variable( k8s_client.V1EnvVar( name='S3_VERIFY_SSL', value='0' ) ) .add_env_variable( k8s_client.V1EnvVar( name='S3_ENDPOINT', value='minio-service.kubeflow:9000' ) ) .add_env_variable( k8s_client.V1EnvVar( name='S3_REQUEST_TIMEOUT_MSEC', value='1200000' ) ) )
def _use_aws_secret(task): from kubernetes import client as k8s_client return (task.add_env_variable( k8s_client.V1EnvVar( name='AWS_ACCESS_KEY_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=aws_access_key_id_name))) ).add_env_variable( k8s_client.V1EnvVar( name='AWS_SECRET_ACCESS_KEY', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=aws_secret_access_key_name)))))
def downstream_api(): print_workflow_info().add_env_variable( k8s_client.V1EnvVar( name='KFP_RUN_NAME', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path= "metadata.annotations['pipelines.kubeflow.org/run_name']")) )).add_env_variable( k8s_client.V1EnvVar( name='KFP_RUN_ID', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path="metadata.labels['pipeline/runid']"))))
def update_index_op(app_dir: str, base_branch: str, base_git_repo: str, bot_email: str, fork_git_repo: str, index_file: str, lookup_file: str, workflow_id: str): return (dsl.ContainerOp( name='update_index', image= 'gcr.io/kubeflow-examples/code-search/ks:v20181204-ee47a49-dirty-fa8aa3', command=['/usr/local/src/update_index.sh'], arguments=[ '--appDir=%s' % app_dir, '--baseBranch=%s' % base_branch, '--baseGitRepo=%s' % base_git_repo, '--botEmail=%s' % bot_email, '--forkGitRepo=%s' % fork_git_repo, '--indexFile=%s' % index_file, '--lookupFile=%s' % lookup_file, '--workflowId=%s' % workflow_id, ], ).add_volume( k8s_client.V1Volume( name='github-access-token', secret=k8s_client.V1SecretVolumeSource( secret_name='github-access-token'))).add_env_variable( k8s_client.V1EnvVar( name='GITHUB_TOKEN', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='github-access-token', key='token', )))))
def _mount_v3iod(task): from kubernetes import client as k8s_client def add_vol(name, mount_path, host_path): vol = k8s_client.V1Volume( name=name, host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=''), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path=mount_path, name=name) ) add_vol(name='shm', mount_path='/dev/shm', host_path='/dev/shm/' + namespace) add_vol( name='v3iod-comm', mount_path='/var/run/iguazio/dayman', host_path='/var/run/iguazio/dayman/' + namespace, ) vol = k8s_client.V1Volume( name='daemon-health', empty_dir=k8s_client.V1EmptyDirVolumeSource() ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount( mount_path='/var/run/iguazio/daemon_health', name='daemon-health' ) ) vol = k8s_client.V1Volume( name='v3io-config', config_map=k8s_client.V1ConfigMapVolumeSource( name=v3io_config_configmap, default_mode=420 ), ) task.add_volume(vol).add_volume_mount( k8s_client.V1VolumeMount(mount_path='/etc/config/v3io', name='v3io-config') ) # vol = k8s_client.V1Volume(name='v3io-auth', # secret=k8s_client.V1SecretVolumeSource(secret_name=v3io_auth_secret, # default_mode=420)) # task.add_volume(vol).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth')) task.add_env_variable( k8s_client.V1EnvVar( name='CURRENT_NODE_IP', value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( api_version='v1', field_path='status.hostIP' ) ), ) ) task.add_env_variable( k8s_client.V1EnvVar( name='IGZ_DATA_CONFIG_FILE', value='/igz/java/conf/v3io.conf' ) ) return task
def add_default_env(k8s_client, cop): cop.container.add_env_variable( k8s_client.V1EnvVar( "MLRUN_NAMESPACE", value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path="metadata.namespace")), )) if config.httpdb.api_url: cop.container.add_env_variable( k8s_client.V1EnvVar(name="MLRUN_DBPATH", value=config.httpdb.api_url)) if config.mpijob_crd_version: cop.container.add_env_variable( k8s_client.V1EnvVar(name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version)) if "MLRUN_AUTH_SESSION" in os.environ or "V3IO_ACCESS_KEY" in os.environ: cop.container.add_env_variable( k8s_client.V1EnvVar( name="MLRUN_AUTH_SESSION", value=os.environ.get("MLRUN_AUTH_SESSION") or os.environ.get("V3IO_ACCESS_KEY"), ))
def get_from_experiment_config_map(self, key_name): name = constants.CONFIG_MAP_NAME.format( experiment_uuid=self.experiment_uuid) config_map_key_ref = client.V1ConfigMapKeySelector(name=name, key=key_name) value = client.V1EnvVarSource(config_map_key_ref=config_map_key_ref) return client.V1EnvVar(name=key_name, value_from=value)
def set_env_from_secret(self, name, secret=None, secret_key=None): """set pod environment var from secret""" secret_key = secret_key or name value_from = client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key)) return self._set_env(name, value_from=value_from)
def get_run_pod_env_vars(run_context): config = SigOptConfig() config.set_context_entry(GlobalRunContext(run_context)) env = [ k8s_client.V1EnvVar( name="SIGOPT_API_TOKEN", value=os.environ["SIGOPT_API_TOKEN"], ), k8s_client.V1EnvVar( name="SIGOPT_API_URL", value=os.environ["SIGOPT_API_URL"], ), k8s_client.V1EnvVar( name="SIGOPT_PROJECT", value=os.environ["SIGOPT_PROJECT"], ), k8s_client.V1EnvVar( name="SIGOPT_RUN_ID", value=run_context.run.id, ), k8s_client.V1EnvVar( name="SIGOPT_RUN_NAME", value_from=k8s_client.V1EnvVarSource( field_ref=k8s_client.V1ObjectFieldSelector( field_path="metadata.name", ), ), ), *(k8s_client.V1EnvVar( name=key, value=value.decode("ascii"), ) for key, value in config.get_environment_context().items()), ] return env
def modify_pod_hook(spawner, pod): pod.spec.containers[0].env.append( client.V1EnvVar( "MY_POD_IP", None, client.V1EnvVarSource( None, client.V1ObjectFieldSelector(None, "status.podIP")))) return pod
def env(self, owner, title, deployment_name, config): safeowner = clean(owner) safetitle = clean(title) envs = [ kclient.V1EnvVar("OWNER", config["owner"]), kclient.V1EnvVar("TITLE", config["title"]), ] for secret in ModelSecrets(owner=owner, title=title, project=self.project).list(): envs.append( kclient.V1EnvVar( name=secret, value_from=kclient.V1EnvVarSource( secret_key_ref=(kclient.V1SecretKeySelector( key=secret, name=f"{safeowner}-{safetitle}-secret") )), )) envs.append( kclient.V1EnvVar( name="URL_BASE_PATHNAME", value=f"/{owner}/{title}/{deployment_name}/", )) return envs
def icpdPipeline( notebook_url='https://raw.githubusercontent.com/animeshsingh/notebooks/master/sklearn.ipynb', notebook_params='', api_token='', endpoint_url='minio-service:9000', bucket_name='mlpipeline', object_name='notebooks/sklearn-model/runs/train/sklearn-pg_out.ipynb', access_key='minio', secret_access_key='minio123', credentials_id='', ): setup = setup_ops( secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply( params.use_ai_pipeline_params( '{{workflow.parameters.credentials-id}}')) trainer_notebook = notebook_ops( notebook_url=notebook_url, notebook_params=notebook_params, api_token=api_token, endpoint_url=endpoint_url, bucket_name=bucket_name, object_name=object_name, access_key=access_key, secret_access_key=secret_access_key).add_env_variable( k8s_client.V1EnvVar( name='POSTGRES_URL', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='{{workflow.parameters.credentials-id}}-cred', key='POSTGRES_URL')))).after(setup) post_model = post_model_ops().apply( params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}') ).after(trainer_notebook).set_image_pull_policy('Always')
def etlPipeline( spark_master='local[*]', kafka_bootstrap_servers='my-cluster-kafka-bootstrap.kubeflow:9092', kafka_topic='reefer', batch_temp_loc='batch.csv', table_name='reefer_telemetries', credentials_id = '' ): setup = setup_ops(secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')) push = push_ops(kafka_bootstrap_servers=kafka_bootstrap_servers, kafka_topic=kafka_topic).after(setup) etl = etl_ops(spark_master=spark_master, kafka_bootstrap_servers=kafka_bootstrap_servers, kafka_topic=kafka_topic, batch_temp_loc=batch_temp_loc, table_name=table_name).add_env_variable( k8s_client.V1EnvVar( name='POSTGRES_URL', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name='{{workflow.parameters.credentials-id}}-cred', key='POSTGRES_URL' ) ) ) ).set_image_pull_policy('Always').after(push) post_template_url = 'https://raw.githubusercontent.com/Tomcli/kfp-components/master/postprocessing.yaml' post_model_ops = components.load_component_from_url(post_template_url) post_model = post_model_ops(notification_type='etl', pipeline_name='{{pod.name}}').apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')).after(etl).set_image_pull_policy('Always')
def _add_aws_credentials(kube_manager, pod_spec, namespace): """add AWS credential :param kube_manager: kube manager for handles communication with Kubernetes' client :param pod_spec: pod spec like volumes and security context :param namespace: The custom resource """ if not kube_manager.secret_exists(secret_name, namespace): raise ValueError('Unable to mount credentials: Secret {}} not found in namespace {}' .format(secret_name, namespace)) secret = client.CoreV1Api().read_namespaced_secret(secret_name, namespace) annotations = secret.metadata.annotations s3_endpoint = annotations['serving.kubeflow.org/s3-endpoint'] s3_use_https = annotations['serving.kubeflow.org/s3-usehttps'] s3_verify_ssl = annotations['serving.kubeflow.org/s3-verifyssl'] env = [ client.V1EnvVar( name='AWS_ACCESS_KEY_ID', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=secret_name, key='awsAccessKeyID' ) ) ), client.V1EnvVar( name='AWS_SECRET_ACCESS_KEY', value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( name=secret_name, key='awsSecretAccessKey' ) ) ), client.V1EnvVar(name='S3_ENDPOINT', value=s3_endpoint), client.V1EnvVar(name='S3_USE_HTTPS', value=s3_use_https), client.V1EnvVar(name='S3_VERIFY_SSL', value=s3_verify_ssl), ] if pod_spec.containers[0].env: pod_spec.containers[0].env.extend(env) else: pod_spec.containers[0].env = env
def _use_secret_var(task): from kubernetes import client as k8s_client (task.container.add_env_variable( k8s_client.V1EnvVar( name=env_var_name, value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key=secret_key))))) return task
def kube_env(self): kube_env = [] for key, value in self.environment.items(): kube_env.append(client.V1EnvVar(name=key, value=value)) pod_name_value = client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector(field_path='metadata.name')) kube_env.append( client.V1EnvVar(name='POD_NAME', value_from=pod_name_value)) return kube_env
def run_agent_deployment(agent_type, replicas, deploy_name='pymada-agents-deployment', template_label={'app': 'pymada-agent'}, agent_port=5001, container_name='pymada-single-agent', auth_token=None, no_agents_on_master_node=True, pod_limits=None, config_path=None): env_vars = [ client.V1EnvVar("MASTER_URL", "http://pymadamaster:8000"), client.V1EnvVar("AGENT_PORT", str(agent_port)), client.V1EnvVar("AGENT_ADDR", value_from=client.V1EnvVarSource( field_ref=client.V1ObjectFieldSelector( field_path="status.podIP"))) ] if auth_token is not None: env_vars.append(client.V1EnvVar("PYMADA_TOKEN_AUTH", auth_token)) agent_container_ports = [client.V1ContainerPort(container_port=agent_port)] pod_node_selector = None if no_agents_on_master_node: pod_node_selector = {'pymada-role': 'agent'} if agent_type == 'node_puppeteer': agent_image_name = 'pymada/node-puppeteer' pod_spec = create_general_pod_spec(agent_image_name, container_name, agent_container_ports, env_vars, pod_node_selector, pod_limits) elif agent_type == 'python_selenium_firefox': pod_spec = create_selenium_pod_spec('firefox', container_name, agent_container_ports, env_vars, pod_node_selector, pod_limits) elif agent_type == 'python_selenium_chrome': pod_spec = create_selenium_pod_spec('chrome', container_name, agent_container_ports, env_vars, pod_node_selector, pod_limits) elif agent_type == 'python_agent': agent_image_name = 'pymada/python-agent' pod_spec = create_general_pod_spec(agent_image_name, container_name, agent_container_ports, env_vars, pod_node_selector, pod_limits) run_deployment(pod_spec, replicas, deploy_name, template_label, config_path=config_path)
def _use_azstorage_secret(task): from kubernetes import client as k8s_client (task.container.add_env_variable( # noqa: E131 k8s_client.V1EnvVar( name='AZURE_STORAGE_CONNECTION_STRING', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='AZURE_STORAGE_CONNECTION_STRING'))))) return task
def create_env_list(secret_name): envs_list = [] secret = get_nlu_secret(secret_name, namespace="nlu") for k, v in secret.data.items(): envs_list.append(client.V1EnvVar( name=k, value_from=client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector( key=k, name=secret_name)))) return envs_list
def _use_databricks_secret(task): from kubernetes import client as k8s_client (task.container.add_env_variable( k8s_client.V1EnvVar( name='DATABRICKS_HOST', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='DATABRICKS_HOST'))) ).add_env_variable( # noqa: E131 k8s_client.V1EnvVar( name='DATABRICKS_TOKEN', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='DATABRICKS_TOKEN')))). add_env_variable( # noqa: E131 k8s_client.V1EnvVar( name='CLUSTER_ID', value_from=k8s_client.V1EnvVarSource( secret_key_ref=k8s_client.V1SecretKeySelector( name=secret_name, key='CLUSTER_ID'))))) return task