Esempio n. 1
0
 def _use_azure_secret(task):
     from kubernetes import client as k8s_client
     (task.container.add_env_variable(
         k8s_client.V1EnvVar(
             name='AZ_SUBSCRIPTION_ID',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key='AZ_SUBSCRIPTION_ID')))
     ).add_env_variable(
         k8s_client.V1EnvVar(
             name='AZ_TENANT_ID',
             value_from=k8s_client.
             V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(
                 name=secret_name, key='AZ_TENANT_ID')))).add_env_variable(
                     k8s_client.V1EnvVar(
                         name='AZ_CLIENT_ID',
                         value_from=k8s_client.V1EnvVarSource(
                             secret_key_ref=k8s_client.V1SecretKeySelector(
                                 name=secret_name, key='AZ_CLIENT_ID')))).
      add_env_variable(
          k8s_client.V1EnvVar(
              name='AZ_CLIENT_SECRET',
              value_from=k8s_client.V1EnvVarSource(
                  secret_key_ref=k8s_client.V1SecretKeySelector(
                      name=secret_name, key='AZ_CLIENT_SECRET')))))
     return task
Esempio n. 2
0
    def _use_aws_secret(task):
        from kubernetes import client as k8s_client
        task.container \
            .add_env_variable(
                k8s_client.V1EnvVar(
                    name='AWS_ACCESS_KEY_ID',
                    value_from=k8s_client.V1EnvVarSource(
                        secret_key_ref=k8s_client.V1SecretKeySelector(
                            name=secret_name,
                            key=aws_access_key_id_name
                        )
                    )
                )
            ) \
            .add_env_variable(
                k8s_client.V1EnvVar(
                    name='AWS_SECRET_ACCESS_KEY',
                    value_from=k8s_client.V1EnvVarSource(
                        secret_key_ref=k8s_client.V1SecretKeySelector(
                            name=secret_name,
                            key=aws_secret_access_key_name
                        )
                    )
                )
            )

        if aws_region:
            task.container \
                .add_env_variable(
                    k8s_client.V1EnvVar(
                        name='AWS_REGION',
                        value=aws_region
                    )
                )
        return task
Esempio n. 3
0
def add_aws_credentials(kube_manager, pod_spec, namespace):
    if not kube_manager.secret_exists(constants.AWS_CREDS_SECRET_NAME,
                                      namespace):
        raise ValueError(
            'Unable to mount credentials: Secret aws-secret not found in namespace {}'
            .format(namespace))

    # Set appropriate secrets env to enable kubeflow-user service
    # account.
    env = [
        client.V1EnvVar(name='AWS_ACCESS_KEY_ID',
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                name=constants.AWS_CREDS_SECRET_NAME,
                                key='AWS_ACCESS_KEY_ID'))),
        client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY',
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                name=constants.AWS_CREDS_SECRET_NAME,
                                key='AWS_SECRET_ACCESS_KEY')))
    ]

    if pod_spec.containers[0].env:
        pod_spec.containers[0].env.extend(env)
    else:
        pod_spec.containers[0].env = env
Esempio n. 4
0
    def _use_aws_envvars_from_secret(task):
        api = kube_client.CoreV1Api(K8sHelper()._api_client)
        ns = secret_namespace or current_namespace()
        secret = api.read_namespaced_secret(secret_name, ns)

        if 'access_key' in secret.data:
            task.add_env_variable(
                kube_client.V1EnvVar(
                    name='AWS_ACCESS_KEY_ID',
                    value_from=kube_client.V1EnvVarSource(
                        secret_key_ref=kube_client.V1SecretKeySelector(
                            name=secret_name, key='access_key'))))

        if 'secret_key' in secret.data:
            task.add_env_variable(
                kube_client.V1EnvVar(
                    name='AWS_SECRET_ACCESS_KEY',
                    value_from=kube_client.V1EnvVarSource(
                        secret_key_ref=kube_client.V1SecretKeySelector(
                            name=secret_name, key='secret_key'))))

        if 'token' in secret.data:
            task.add_env_variable(
                kube_client.V1EnvVar(
                    name='AWS_SESSION_TOKEN',
                    value_from=kube_client.V1EnvVarSource(
                        secret_key_ref=kube_client.V1SecretKeySelector(
                            name=secret_name, key='token'))))

        return task
Esempio n. 5
0
def add_aws_credentials(kube_manager, pod_spec, namespace):
    """add AWS credential

    :param kube_manager: kube manager for handles communication with Kubernetes' client
    :param pod_spec: pod spec like volumes and security context
    :param namespace: The custom resource

    """
    if not kube_manager.secret_exists(constants.AWS_CREDS_SECRET_NAME,
                                      namespace):
        raise ValueError(
            'Unable to mount credentials: Secret aws-secret not found in namespace {}'
            .format(namespace))

    # Set appropriate secrets env to enable kubeflow-user service
    # account.
    env = [
        client.V1EnvVar(name='AWS_ACCESS_KEY_ID',
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                name=constants.AWS_CREDS_SECRET_NAME,
                                key='AWS_ACCESS_KEY_ID'))),
        client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY',
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                name=constants.AWS_CREDS_SECRET_NAME,
                                key='AWS_SECRET_ACCESS_KEY')))
    ]

    if pod_spec.containers[0].env:
        pod_spec.containers[0].env.extend(env)
    else:
        pod_spec.containers[0].env = env
Esempio n. 6
0
 def _use_mysql_secret(task):
     from kubernetes import client as k8s_client
     return (
         task
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='MYSQL_USERNAME',
                     value_from=k8s_client.V1EnvVarSource(
                         secret_key_ref=k8s_client.V1SecretKeySelector(
                             name=secret_name,
                             key=db_username
                         )
                     )
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='MYSQL_PASSWORD',
                     value_from=k8s_client.V1EnvVarSource(
                         secret_key_ref=k8s_client.V1SecretKeySelector(
                             name=secret_name,
                             key=db_password
                         )
                     )
                 )
             )
     )
Esempio n. 7
0
 def _use_minio_secret(task):
     from kubernetes import client as k8s_client
     return (
         task
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='AWS_ACCESS_KEY_ID',
                     value_from=k8s_client.V1EnvVarSource(
                         secret_key_ref=k8s_client.V1SecretKeySelector(
                             name=secret_name,
                             key=minio_access_key_id_name
                         )
                     )
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='AWS_SECRET_ACCESS_KEY',
                     value_from=k8s_client.V1EnvVarSource(
                         secret_key_ref=k8s_client.V1SecretKeySelector(
                             name=secret_name,
                             key=minio_secret_access_key_name
                         )
                     )
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='S3_USE_HTTPS',
                     value='0'
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='S3_VERIFY_SSL',
                     value='0'
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='S3_ENDPOINT',
                     value='minio-service.kubeflow:9000'
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='S3_REQUEST_TIMEOUT_MSEC',
                     value='1200000'
                 )
             )
     )
Esempio n. 8
0
 def _use_aws_secret(task):
     from kubernetes import client as k8s_client
     return (task.add_env_variable(
         k8s_client.V1EnvVar(
             name='AWS_ACCESS_KEY_ID',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key=aws_access_key_id_name)))
     ).add_env_variable(
         k8s_client.V1EnvVar(
             name='AWS_SECRET_ACCESS_KEY',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key=aws_secret_access_key_name)))))
def create_job_object(runner_image, region, s3_path, pvc_name):
    target_folder = get_target_folder(s3_path)

    # Configureate Pod template container
    container = k8s_client.V1Container(
        name="copy-dataset-worker",
        image=runner_image,
        command=["aws"],
        args=["s3", "sync", s3_path, "/mnt/" + target_folder],
        volume_mounts=[
            k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')
        ],
        env=[
            k8s_client.V1EnvVar(name="AWS_REGION", value=region),
            k8s_client.V1EnvVar(
                name="AWS_ACCESS_KEY_ID",
                value_from=k8s_client.V1EnvVarSource(
                    secret_key_ref=k8s_client.V1SecretKeySelector(
                        key="AWS_ACCESS_KEY_ID", name="aws-secret"))),
            k8s_client.V1EnvVar(
                name="AWS_SECRET_ACCESS_KEY",
                value_from=k8s_client.V1EnvVarSource(
                    secret_key_ref=k8s_client.V1SecretKeySelector(
                        key="AWS_SECRET_ACCESS_KEY", name="aws-secret")))
        ],
    )
    volume = k8s_client.V1Volume(
        name='data-storage',
        persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name))
    # Create and configurate a spec section
    template = k8s_client.V1PodTemplateSpec(
        # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}),
        spec=k8s_client.V1PodSpec(containers=[container],
                                  volumes=[volume],
                                  restart_policy="OnFailure"))
    # Create the specification of deployment
    spec = k8s_client.V1JobSpec(
        # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}),
        template=template)
    # Instantiate the deployment object
    deployment = k8s_client.V1Job(
        api_version="batch/v1",
        kind="Job",
        metadata=k8s_client.V1ObjectMeta(name=container.name),
        spec=spec)

    return deployment
Esempio n. 10
0
def etlPipeline(
    spark_master='local[*]',
    kafka_bootstrap_servers='my-cluster-kafka-bootstrap.kubeflow:9092',
    kafka_topic='reefer',
    batch_temp_loc='batch.csv',
    table_name='reefer_telemetries',
    credentials_id = ''
):

    setup = setup_ops(secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}'))

    push = push_ops(kafka_bootstrap_servers=kafka_bootstrap_servers,
                    kafka_topic=kafka_topic).after(setup)

    etl = etl_ops(spark_master=spark_master,
                  kafka_bootstrap_servers=kafka_bootstrap_servers,
                  kafka_topic=kafka_topic,
                  batch_temp_loc=batch_temp_loc,
                  table_name=table_name).add_env_variable(
                      k8s_client.V1EnvVar(
                          name='POSTGRES_URL',
                          value_from=k8s_client.V1EnvVarSource(
                              secret_key_ref=k8s_client.V1SecretKeySelector(
                                  name='{{workflow.parameters.credentials-id}}-cred',
                                  key='POSTGRES_URL'
                              )
                          )
                      )
                  ).set_image_pull_policy('Always').after(push)

    post_template_url = 'https://raw.githubusercontent.com/Tomcli/kfp-components/master/postprocessing.yaml'
    post_model_ops = components.load_component_from_url(post_template_url)
    post_model = post_model_ops(notification_type='etl',
                                pipeline_name='{{pod.name}}').apply(params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')).after(etl).set_image_pull_policy('Always')
Esempio n. 11
0
 def set_env_from_secret(self, name, secret=None, secret_key=None):
     """set pod environment var from secret"""
     secret_key = secret_key or name
     value_from = client.V1EnvVarSource(
         secret_key_ref=client.V1SecretKeySelector(name=secret,
                                                   key=secret_key))
     return self._set_env(name, value_from=value_from)
Esempio n. 12
0
def update_index_op(app_dir: str, base_branch: str, base_git_repo: str,
                    bot_email: str, fork_git_repo: str, index_file: str,
                    lookup_file: str, workflow_id: str):
    return (dsl.ContainerOp(
        name='update_index',
        image=
        'gcr.io/kubeflow-examples/code-search/ks:v20181204-ee47a49-dirty-fa8aa3',
        command=['/usr/local/src/update_index.sh'],
        arguments=[
            '--appDir=%s' % app_dir,
            '--baseBranch=%s' % base_branch,
            '--baseGitRepo=%s' % base_git_repo,
            '--botEmail=%s' % bot_email,
            '--forkGitRepo=%s' % fork_git_repo,
            '--indexFile=%s' % index_file,
            '--lookupFile=%s' % lookup_file,
            '--workflowId=%s' % workflow_id,
        ],
    ).add_volume(
        k8s_client.V1Volume(
            name='github-access-token',
            secret=k8s_client.V1SecretVolumeSource(
                secret_name='github-access-token'))).add_env_variable(
                    k8s_client.V1EnvVar(
                        name='GITHUB_TOKEN',
                        value_from=k8s_client.V1EnvVarSource(
                            secret_key_ref=k8s_client.V1SecretKeySelector(
                                name='github-access-token',
                                key='token',
                            )))))
Esempio n. 13
0
def build_env_list_for_pod(env_vars):
    env_list = []
    for env_name, env_value in env_vars.items():
        if env_name is None or env_value is None : continue
        if type(env_value)==str: # env is key/value pair
            env_list.append(client.V1EnvVar(name=env_name, value=env_value))

        elif type(env_value)==dict: # env is ref
            if env_value.keys() is None or len(env_value.keys()) < 3: continue
            if "type" not in env_value.keys() or "name" not in env_value.keys() or "key" not in env_value.keys(): continue
            
            ref_type=env_value["type"]
            ref_name=env_value["name"]
            ref_key=env_value["key"]
            ref_selector = None
            env_var_source = None
            if ref_type.lower() == "configmap": 
                ref_selector=client.V1ConfigMapKeySelector(key=ref_key, name=ref_name)
                env_var_source = client.V1EnvVarSource(config_map_key_ref=ref_selector)
            elif ref_type.lower() == "secret": 
                ref_selector=client.V1SecretKeySelector(key=ref_key, name=ref_name)
                env_var_source = client.V1EnvVarSource(secret_key_ref=ref_selector)
            elif ref_type.lower() == "field": pass
            elif ref_type.lower() == "resource_field": pass
            
            if env_var_source is not None: env_list.append(client.V1EnvVar(name=env_name, value_from=env_var_source))
    return env_list
def icpdPipeline(
    notebook_url='https://raw.githubusercontent.com/animeshsingh/notebooks/master/sklearn.ipynb',
    notebook_params='',
    api_token='',
    endpoint_url='minio-service:9000',
    bucket_name='mlpipeline',
    object_name='notebooks/sklearn-model/runs/train/sklearn-pg_out.ipynb',
    access_key='minio',
    secret_access_key='minio123',
    credentials_id='',
):

    setup = setup_ops(
        secret_name=('{{workflow.parameters.credentials-id}}-cred')).apply(
            params.use_ai_pipeline_params(
                '{{workflow.parameters.credentials-id}}'))

    trainer_notebook = notebook_ops(
        notebook_url=notebook_url,
        notebook_params=notebook_params,
        api_token=api_token,
        endpoint_url=endpoint_url,
        bucket_name=bucket_name,
        object_name=object_name,
        access_key=access_key,
        secret_access_key=secret_access_key).add_env_variable(
            k8s_client.V1EnvVar(
                name='POSTGRES_URL',
                value_from=k8s_client.V1EnvVarSource(
                    secret_key_ref=k8s_client.V1SecretKeySelector(
                        name='{{workflow.parameters.credentials-id}}-cred',
                        key='POSTGRES_URL')))).after(setup)
    post_model = post_model_ops().apply(
        params.use_ai_pipeline_params('{{workflow.parameters.credentials-id}}')
    ).after(trainer_notebook).set_image_pull_policy('Always')
Esempio n. 15
0
    def env(self, owner, title, deployment_name, config):
        safeowner = clean(owner)
        safetitle = clean(title)
        envs = [
            kclient.V1EnvVar("OWNER", config["owner"]),
            kclient.V1EnvVar("TITLE", config["title"]),
        ]

        for secret in ModelSecrets(owner=owner,
                                   title=title,
                                   project=self.project).list():
            envs.append(
                kclient.V1EnvVar(
                    name=secret,
                    value_from=kclient.V1EnvVarSource(
                        secret_key_ref=(kclient.V1SecretKeySelector(
                            key=secret, name=f"{safeowner}-{safetitle}-secret")
                                        )),
                ))

        envs.append(
            kclient.V1EnvVar(
                name="URL_BASE_PATHNAME",
                value=f"/{owner}/{title}/{deployment_name}/",
            ))

        return envs
Esempio n. 16
0
    def _add_aws_credentials(kube_manager, pod_spec, namespace):
        """add AWS credential

        :param kube_manager: kube manager for handles communication with Kubernetes' client
        :param pod_spec: pod spec like volumes and security context
        :param namespace: The custom resource

        """
        if not kube_manager.secret_exists(secret_name, namespace):
            raise ValueError('Unable to mount credentials: Secret {}} not found in namespace {}'
                             .format(secret_name, namespace))

        secret = client.CoreV1Api().read_namespaced_secret(secret_name, namespace)
        annotations = secret.metadata.annotations
        s3_endpoint = annotations['serving.kubeflow.org/s3-endpoint']
        s3_use_https = annotations['serving.kubeflow.org/s3-usehttps']
        s3_verify_ssl = annotations['serving.kubeflow.org/s3-verifyssl']

        env = [
            client.V1EnvVar(
                name='AWS_ACCESS_KEY_ID',
                value_from=client.V1EnvVarSource(
                    secret_key_ref=client.V1SecretKeySelector(
                        name=secret_name,
                        key='awsAccessKeyID'
                    )
                )
            ),
            client.V1EnvVar(
                name='AWS_SECRET_ACCESS_KEY',
                value_from=client.V1EnvVarSource(
                    secret_key_ref=client.V1SecretKeySelector(
                        name=secret_name,
                        key='awsSecretAccessKey'
                    )
                )
            ),
            client.V1EnvVar(name='S3_ENDPOINT', value=s3_endpoint),
            client.V1EnvVar(name='S3_USE_HTTPS', value=s3_use_https),
            client.V1EnvVar(name='S3_VERIFY_SSL', value=s3_verify_ssl),

        ]

        if pod_spec.containers[0].env:
            pod_spec.containers[0].env.extend(env)
        else:
            pod_spec.containers[0].env = env
Esempio n. 17
0
 def _use_secret_var(task):
     from kubernetes import client as k8s_client
     (task.container.add_env_variable(
         k8s_client.V1EnvVar(
             name=env_var_name,
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key=secret_key)))))
     return task
Esempio n. 18
0
 def _use_azstorage_secret(task):
     from kubernetes import client as k8s_client
     (task.container.add_env_variable(  # noqa: E131
         k8s_client.V1EnvVar(
             name='AZURE_STORAGE_CONNECTION_STRING',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name,
                     key='AZURE_STORAGE_CONNECTION_STRING')))))
     return task
Esempio n. 19
0
def create_env_list(secret_name):
    envs_list = []
    secret = get_nlu_secret(secret_name, namespace="nlu")
    for k, v in secret.data.items():
        envs_list.append(client.V1EnvVar(
            name=k,
            value_from=client.V1EnvVarSource(
                secret_key_ref=client.V1SecretKeySelector(
                    key=k,
                    name=secret_name))))
    return envs_list
Esempio n. 20
0
 def _use_databricks_secret(task):
     from kubernetes import client as k8s_client
     (task.container.add_env_variable(
         k8s_client.V1EnvVar(
             name='DATABRICKS_HOST',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key='DATABRICKS_HOST')))
     ).add_env_variable(  # noqa: E131
         k8s_client.V1EnvVar(
             name='DATABRICKS_TOKEN',
             value_from=k8s_client.V1EnvVarSource(
                 secret_key_ref=k8s_client.V1SecretKeySelector(
                     name=secret_name, key='DATABRICKS_TOKEN')))).
      add_env_variable(  # noqa: E131
          k8s_client.V1EnvVar(
              name='CLUSTER_ID',
              value_from=k8s_client.V1EnvVarSource(
                  secret_key_ref=k8s_client.V1SecretKeySelector(
                      name=secret_name, key='CLUSTER_ID')))))
     return task
Esempio n. 21
0
def configure_workflow_job(namespace: str,
                           project_name: str,
                           project_repo_url: str,
                           project_repo_branch: str = 'master',
                           retries: int = 2,
                           image: str = BODYWORK_DOCKER_IMAGE) -> k8s.V1Job:
    """Configure a Bodywork workflow execution job.

    :param namespace: The namespace to deploy the job to.
    :param project_name: The name of the Bodywork project that the stage
        belongs to.
    :param project_repo_url: The URL for the Bodywork project Git
        repository.
    :param project_repo_branch: The Bodywork project Git repository
        branch to use, defaults to 'master'.
    :param retries: Number of times to retry running the stage to
        completion (if necessary), defaults to 2.
    :param image: Docker image to use for running the stage within,
        defaults to BODYWORK_DOCKER_IMAGE.
    :return: A configured k8s job object.
    """
    vcs_env_vars = [
        k8s.V1EnvVar(name=SSH_GITHUB_KEY_ENV_VAR,
                     value_from=k8s.V1EnvVarSource(
                         secret_key_ref=k8s.V1SecretKeySelector(
                             key=SSH_GITHUB_KEY_ENV_VAR,
                             name=SSH_GITHUB_SECRET_NAME,
                             optional=True)))
    ]
    container = k8s.V1Container(name='bodywork',
                                image=image,
                                image_pull_policy='Always',
                                env=vcs_env_vars,
                                command=['bodywork', 'workflow'],
                                args=[
                                    f'--namespace={namespace}',
                                    project_repo_url, project_repo_branch
                                ])
    pod_spec = k8s.V1PodSpec(
        service_account_name=BODYWORK_WORKFLOW_SERVICE_ACCOUNT,
        containers=[container],
        restart_policy='Never')
    pod_template_spec = k8s.V1PodTemplateSpec(spec=pod_spec)
    job_spec = k8s.V1JobSpec(
        template=pod_template_spec,
        completions=1,
        backoff_limit=retries,
        ttl_seconds_after_finished=BODYWORK_WORKFLOW_JOB_TIME_TO_LIVE)
    job = k8s.V1Job(metadata=k8s.V1ObjectMeta(name=project_name,
                                              namespace=namespace,
                                              labels={'app': 'bodywork'}),
                    spec=job_spec)
    return job
    def handle_proxy_credentials(self, env):
        credentials_secret = None
        if self.credentials_secret_name:
            credentials_secret = self.run_action_and_parse_error(self.core_api.read_namespaced_secret,
                                                             self.credentials_secret_name,
                                                             self.namespace)
        if not credentials_secret:
            LOGGER.error("No secret named %s was found in the %s namespace, will use unauth access",
                         self.credentials_secret_name, self.namespace)
            return env

        encoded_user = credentials_secret.data.get("username")
        encoded_pass = credentials_secret.data.get("password")

        if not (encoded_user and encoded_pass):
            LOGGER.error("Secret %s does not contain username/password",
                         self.credentials_secret_name)
            return env
        env.append(client.V1EnvVar(name="REGISTRY_PROXY_USERNAME",
                                   value=None,
                                   value_from=client.V1EnvVarSource(
                                        secret_key_ref=client.V1SecretKeySelector(
                                            key="username",
                                            name=self.credentials_secret_name
                                        )
                                   ))
                  )
        env.append(client.V1EnvVar(name="REGISTRY_PROXY_PASSWORD",
                                   value=None,
                                   value_from=client.V1EnvVarSource(
                                        secret_key_ref=client.V1SecretKeySelector(
                                            key="password",
                                            name=self.credentials_secret_name
                                        )
                                   ))
                  )
        LOGGER.info("Secret selected + env vars set successfully")
        return env
Esempio n. 23
0
 def _use_k8s_secret(task):
     from kubernetes import client as k8s_client
     for secret_key, env_var in k8s_secret_key_to_env.items():
         task.container \
             .add_env_variable(
             k8s_client.V1EnvVar(
                 name=env_var,
                 value_from=k8s_client.V1EnvVarSource(
                     secret_key_ref=k8s_client.V1SecretKeySelector(
                         name=secret_name,
                         key=secret_key
                     )
                 )
             )
         )
     return task
Esempio n. 24
0
def _convert_to_kube_env(
        env: infra_validator_pb2.EnvVar) -> k8s_client.V1EnvVar:
    """Convert infra_validator_pb2.EnvVar to kubernetes.V1EnvVar."""
    if not env.name:
        raise ValueError('EnvVar.name must be specified.')
    if env.HasField('value_from'):
        if env.value_from.HasField('secret_key_ref'):
            value_source = k8s_client.V1EnvVarSource(
                secret_key_ref=k8s_client.V1SecretKeySelector(
                    name=env.value_from.secret_key_ref.name,
                    key=env.value_from.secret_key_ref.key))
            return k8s_client.V1EnvVar(name=env.name, value_from=value_source)
        else:
            raise ValueError(f'Bad EnvVar: {env}')
    else:
        # Note that env.value can be empty.
        return k8s_client.V1EnvVar(name=env.name, value=env.value)
Esempio n. 25
0
def etlPipeline(
        spark_master='spark://my-spark-cluster:7077',
        kafka_bootstrap_servers='my-cluster-kafka-bootstrap.kubeflow:9092',
        kafka_topic='reefer',
        batch_temp_loc='batch.csv',
        table_name='reefer_telemetries'):

    push = push_ops(kafka_bootstrap_servers=kafka_bootstrap_servers,
                    kafka_topic=kafka_topic)

    etl = etl_ops(spark_master=spark_master,
                  kafka_bootstrap_servers=kafka_bootstrap_servers,
                  kafka_topic=kafka_topic,
                  batch_temp_loc=batch_temp_loc,
                  table_name=table_name).add_env_variable(
                      k8s_client.V1EnvVar(
                          name='POSTGRES_URL',
                          value_from=k8s_client.V1EnvVarSource(
                              secret_key_ref=k8s_client.V1SecretKeySelector(
                                  name='postgresql', key='POSTGRES_URL')))
                  ).set_image_pull_policy('Always').after(push)
Esempio n. 26
0
def configure_env_vars_from_secrets(
        namespace: str,
        secret_varname_pairs: List[Tuple[str, str]]) -> List[k8s.V1EnvVar]:
    """Configure container environment variables from secrets.

    Enables secret values to be mounted as environment variables in a
    Bodywork stage-runner container. For example, with a secret created
    using kubectl as follows,

        kubectl -n bodywork-dev create secret generic foobar \
            --from-literal=FOO=bar \
            --from-literal=BAR=foo

    This function can be used to configure the environment variables FOO
    and BAR for any batch job or service deployment.

    :param namespece: Kubernetes namespace in which to look for secrets.
    :param secret_varname_pairs: List of secret, variable-name pairs.
    :raises RuntimeError: if any of the secrets or their keys cannot be
        found.
    :return: A configured list of environment variables.
    """
    missing_secrets_info = [
        f"cannot find key={var_name} in secret={secret_name} in namespace={namespace}"
        for secret_name, var_name in secret_varname_pairs
        if not secret_exists(namespace, secret_name, var_name)
    ]
    if missing_secrets_info:
        msg = "; ".join(missing_secrets_info)
        raise RuntimeError(msg)

    env_vars = [
        k8s.V1EnvVar(
            name=var_name,
            value_from=k8s.V1EnvVarSource(
                secret_key_ref=k8s.V1SecretKeySelector(
                    key=var_name, name=secret_name, optional=False)),
        ) for secret_name, var_name in secret_varname_pairs
    ]
    return env_vars
Esempio n. 27
0
 def _assert_pod_env_from_secrets(pod_env, expected_variables):
     for env_variable in pod_env:
         if isinstance(env_variable, dict) and env_variable.setdefault(
                 "valueFrom", None):
             # Nuclio spec comes in as a dict, with some differences from the V1EnvVar - convert it.
             value_from = client.V1EnvVarSource(
                 secret_key_ref=client.V1SecretKeySelector(
                     name=env_variable["valueFrom"]["secretKeyRef"]["name"],
                     key=env_variable["valueFrom"]["secretKeyRef"]["key"],
                 ))
             env_variable = V1EnvVar(name=env_variable["name"],
                                     value_from=value_from)
         if (isinstance(env_variable, V1EnvVar)
                 and env_variable.value_from is not None):
             name = env_variable.name
             if name in expected_variables:
                 expected_value = expected_variables[name]
                 secret_key = env_variable.value_from.secret_key_ref.key
                 secret_name = env_variable.value_from.secret_key_ref.name
                 assert expected_value[secret_name] == secret_key
                 expected_variables.pop(name)
     assert len(expected_variables) == 0
Esempio n. 28
0
    def env(self, owner, title, config):
        safeowner = clean(owner)
        safetitle = clean(title)
        envs = [
            kclient.V1EnvVar("OWNER", owner),
            kclient.V1EnvVar("TITLE", title),
            kclient.V1EnvVar("EXP_TASK_TIME", str(config["exp_task_time"])),
        ]
        # for sec in [
        #     "BUCKET",
        #     "REDIS_HOST",
        #     "REDIS_PORT",
        #     "REDIS_EXECUTOR_PW",
        # ]:
        #     envs.append(
        #         kclient.V1EnvVar(
        #             sec,
        #             value_from=kclient.V1EnvVarSource(
        #                 secret_key_ref=(
        #                     kclient.V1SecretKeySelector(key=sec, name="worker-secret")
        #                 )
        #             ),
        #         )
        #     )

        for secret in ModelSecrets(owner=owner,
                                   title=title,
                                   project=self.project).list():
            envs.append(
                kclient.V1EnvVar(
                    name=secret,
                    value_from=kclient.V1EnvVarSource(
                        secret_key_ref=(kclient.V1SecretKeySelector(
                            key=secret, name=f"{safeowner}-{safetitle}-secret")
                                        )),
                ))
        return envs
Esempio n. 29
0
def create_pod_template_spec(data):
    ports = []

    for port in data["ports"].split(','):
        portDefinition = client.V1ContainerPort(container_port=int(port))
        ports.append(portDefinition)

    envs = []
    if "environments" in data:
        envs_array = data["environments"].splitlines()

        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:
            envs.append(client.V1EnvVar(name=key, value=tmp_envs[key]))

    if "environments_secrets" in data:
        envs_array = data["environments_secrets"].splitlines()
        tmp_envs = dict(s.split('=', 1) for s in envs_array)

        for key in tmp_envs:

            if (":" in tmp_envs[key]):
                # passing secret env
                value = tmp_envs[key]
                secrets = value.split(':')
                secrect_key = secrets[1]
                secrect_name = secrets[0]

                envs.append(
                    client.V1EnvVar(
                        name=key,
                        value="",
                        value_from=client.V1EnvVarSource(
                            secret_key_ref=client.V1SecretKeySelector(
                                key=secrect_key, name=secrect_name))))

    container = client.V1Container(name=data["container_name"],
                                   image=data["image"],
                                   ports=ports,
                                   env=envs)

    if "volume_mounts" in data:
        volume_mounts_data = yaml.full_load(data["volume_mounts"])
        volume_mounts = []

        if (isinstance(volume_mounts_data, list)):
            for volume_mount_data in volume_mounts_data:
                volume_mount = create_volume_mount(volume_mount_data)

                if volume_mount:
                    volume_mounts.append(volume_mount)
        else:
            volume_mount = create_volume_mount(volume_mounts_data)

            if volume_mount:
                volume_mounts.append(volume_mount)

        container.volume_mounts = volume_mounts

    if "liveness_probe" in data:
        container.liveness_probe = load_liveness_readiness_probe(
            data["liveness_probe"])

    if "readiness_probe" in data:
        container.readiness_probe = load_liveness_readiness_probe(
            data["readiness_probe"])

    if "container_command" in data:
        container.command = data["container_command"].split(' ')

    if "container_args" in data:
        args_array = data["container_args"].splitlines()
        container.args = args_array

    if "resources_requests" in data:
        resources_array = data["resources_requests"].split(",")
        tmp_resources = dict(s.split('=', 1) for s in resources_array)
        container.resources = client.V1ResourceRequirements(
            requests=tmp_resources)

    template_spec = client.V1PodSpec(containers=[container])

    if "image_pull_secrets" in data:
        images_array = data["image_pull_secrets"].split(",")
        images = []
        for image in images_array:
            images.append(client.V1LocalObjectReference(name=image))

        template_spec.image_pull_secrets = images

    if "volumes" in data:
        volumes_data = yaml.full_load(data["volumes"])
        volumes = []

        if (isinstance(volumes_data, list)):
            for volume_data in volumes_data:
                volume = create_volume(volume_data)

                if volume:
                    volumes.append(volume)
        else:
            volume = create_volume(volumes_data)

            if volume:
                volumes.append(volume)

        template_spec.volumes = volumes

    return template_spec
Esempio n. 30
0
 def _get_from_experiment_secret(self, key_name):
     name = constants.SECRET_NAME.format(uuid=self.experiment_uuid)
     secret_key_ref = client.V1SecretKeySelector(name=name, key=key_name)
     value = client.V1EnvVarSource(secret_key_ref=secret_key_ref)
     return client.V1EnvVar(name=key_name, value_from=value)