Esempio n. 1
0
def add_ecr_config(kube_manager, pod_spec, namespace):
    if not kube_manager.secret_exists('ecr-config', namespace):
        secret = client.V1Secret(
            metadata = client.V1ObjectMeta(name='ecr-config'),
            string_data={
                'config.json': '{"credsStore": "ecr-login"}'
            })
        kube_manager.create_secret(namespace, secret)

    volume_mount=client.V1VolumeMount(
            name='ecr-config', mount_path='/kaniko/.docker/', read_only=True)

    if pod_spec.containers[0].volume_mounts:
        pod_spec.containers[0].volume_mounts.append(volume_mount)
    else:
        pod_spec.containers[0].volume_mounts = [volume_mount]

    volume=client.V1Volume(
            name='ecr-config',
            secret=client.V1SecretVolumeSource(secret_name='ecr-config'))

    if pod_spec.volumes:
        pod_spec.volumes.append(volume)
    else:
        pod_spec.volumes = [volume]
def create_job_object():
    # Configureate Pod template container

    # volume inside of which you put your service account
    # This is the secret name, basically
    volume_name = "google-cloud-json"
    google_app_credentials_path = os.environ.get(
        'GOOGLE_APPLICATION_CREDENTIALS')

    # create a volume mount
    volume_mount = client.V1VolumeMount(mount_path='/etc/stuff',
                                        name=volume_name)

    # Create environment variables for container.
    # In this case, grab the values from the execution environment
    # perhaps using something like a .env file.
    env = [
        client.V1EnvVar(
            name='GOOGLE_APPLICATION_CREDENTIALS',
            # note this is a path + the filename of the app creds in the secret
            value='/etc/stuff/key.json'),  # google_app_credentials_path),
        client.V1EnvVar(name='GCS_STAGING_URL',
                        value=os.environ.get('GCS_STAGING_URL')),
        client.V1EnvVar(name='GCS_EXPORT_URL',
                        value=os.environ.get('GCS_EXPORT_URL')),
        client.V1EnvVar(name='ES_HOST', value=os.environ.get('ES_HOST'))
    ]

    # Create a volume.
    # This will go into the spec section of the template
    # Note that this specifies a secret volume
    # The secret needs to be created separately.
    volume = client.V1Volume(
        name=volume_name,
        secret=client.V1SecretVolumeSource(secret_name='google-cloud-json'))

    # Create the container section that will
    # go into the spec section
    container = client.V1Container(
        name="omicidx-builder",
        image="seandavi/omicidx-builder",
        volume_mounts=[volume_mount],
        env=env,
        command=['/bin/bash', '/code/biosample_pipeline.sh'])

    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": "omicidx-builder"}),
        spec=client.V1PodSpec(restart_policy="Never",
                              volumes=[volume],
                              containers=[container]))
    # Create the specification of deployment
    spec = client.V1JobSpec(template=template, backoff_limit=4)
    # Instantiate the job object
    job = client.V1Job(api_version="batch/v1",
                       kind="Job",
                       metadata=client.V1ObjectMeta(name=JOB_NAME),
                       spec=spec)

    return job
Esempio n. 3
0
def update_index_op(app_dir: str, base_branch: str, base_git_repo: str,
                    bot_email: str, fork_git_repo: str, index_file: str,
                    lookup_file: str, workflow_id: str):
    return (dsl.ContainerOp(
        name='update_index',
        image=
        'gcr.io/kubeflow-examples/code-search/ks:v20181204-ee47a49-dirty-fa8aa3',
        command=['/usr/local/src/update_index.sh'],
        arguments=[
            '--appDir=%s' % app_dir,
            '--baseBranch=%s' % base_branch,
            '--baseGitRepo=%s' % base_git_repo,
            '--botEmail=%s' % bot_email,
            '--forkGitRepo=%s' % fork_git_repo,
            '--indexFile=%s' % index_file,
            '--lookupFile=%s' % lookup_file,
            '--workflowId=%s' % workflow_id,
        ],
    ).add_volume(
        k8s_client.V1Volume(
            name='github-access-token',
            secret=k8s_client.V1SecretVolumeSource(
                secret_name='github-access-token'))).add_env_variable(
                    k8s_client.V1EnvVar(
                        name='GITHUB_TOKEN',
                        value_from=k8s_client.V1EnvVarSource(
                            secret_key_ref=k8s_client.V1SecretKeySelector(
                                name='github-access-token',
                                key='token',
                            )))))
Esempio n. 4
0
def resourceop_basic(username, password):
    secret_resource = k8s_client.V1Secret(
        api_version="v1",
        kind="Secret",
        metadata=k8s_client.V1ObjectMeta(generate_name="my-secret-"),
        type="Opaque",
        data={
            "username": username,
            "password": password
        })
    rop = dsl.ResourceOp(
        name="create-my-secret",
        k8s_resource=secret_resource,
        attribute_outputs={"name": "{.metadata.name}"})

    secret = k8s_client.V1Volume(
        name="my-secret",
        secret=k8s_client.V1SecretVolumeSource(secret_name=rop.output))

    cop = dsl.ContainerOp(
        name="cop",
        image="library/bash:4.4.23",
        command=["sh", "-c"],
        arguments=["ls /etc/secret-volume"],
        pvolumes={"/etc/secret-volume": secret})
Esempio n. 5
0
def add_ecr_config(kube_manager, pod_spec, namespace):
    """add secret

    :param kube_manager: kube manager for handles communication with Kubernetes' client
    :param pod_spec: pod spec like volumes and security context
    :param namespace: The custom resource

    """
    if not kube_manager.secret_exists('ecr-config', namespace):
        secret = client.V1Secret(metadata=client.V1ObjectMeta(name='ecr-config'),
                                 string_data={
                                     'config.json': '{"credsStore": "ecr-login"}'
                                 })
        kube_manager.create_secret(namespace, secret)

    volume_mount = client.V1VolumeMount(name='ecr-config',
                                        mount_path='/kaniko/.docker/', read_only=True)

    if pod_spec.containers[0].volume_mounts:
        pod_spec.containers[0].volume_mounts.append(volume_mount)
    else:
        pod_spec.containers[0].volume_mounts = [volume_mount]

    volume = client.V1Volume(name='ecr-config',
                             secret=client.V1SecretVolumeSource(secret_name='ecr-config'))

    if pod_spec.volumes:
        pod_spec.volumes.append(volume)
    else:
        pod_spec.volumes = [volume]
Esempio n. 6
0
def mount_secrets(spawner, pod, role_mapping):
    groups = spawner.userdata.get("groups", [])
    _loaded = set()
    
    for group in groups:
        print(f'Checking secrets for group: {group}')
        try:
            secrets = role_mapping['groups'].get(group, {}).get('secrets')
        except KeyError:
            continue
        
        if secrets:
            for secret_name in secrets:
                secret_name = secret_name.lower()

                if secret_name and secret_name not in _loaded:
                    pod.spec.volumes.append(
                        client.V1Volume(
                            name=(secret_name + "-volume"),
                            secret=client.V1SecretVolumeSource(secret_name=secret_name),
                        )
                    )
                    pod.spec.containers[0].volume_mounts.append(
                        client.V1VolumeMount(
                            mount_path="/opt/app-root/secrets/.aws/{}".format(
                                secret_name.lower()
                            ),
                            name=(secret_name + "-volume"),
                            read_only=True
                        )
                    )
                    _loaded.add(secret_name)
    
    return spawner, pod
def create_pod_template_with_pvc(args):
    # Mount bootstrap config secret volume
    config_mount = client.V1VolumeMount(mount_path="/tmp",
                                        name="bootstrap-config-vol")
    # PersistenVolume mount
    pv_mount = client.V1VolumeMount(
        mount_path="/home",  # better name?
        name="data")

    # Configureate Pod template container
    isTTY = args.image == "chainweb-base"  # otherwise container will always "finish" and restart.
    pull_policy = "Never" if args.local else "Always"

    container = client.V1Container(
        name="chainweb",
        image=args.image,
        image_pull_policy=pull_policy,
        tty=isTTY,
        ports=[client.V1ContainerPort(container_port=PORT_NUMBER)],
        volume_mounts=[config_mount, pv_mount])

    # Configure volume(s)
    config_volume = client.V1Volume(
        name="bootstrap-config-vol",
        secret=client.V1SecretVolumeSource(secret_name="bootstrap-config"))

    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": "chainweb"}),
        spec=client.V1PodSpec(containers=[container], volumes=[config_volume]))
    return template
Esempio n. 8
0
 def create_job_object(self, job_name, container_image, args):
     volume_name = ""  # volume inside of which you put your service account
     google_app_credentials_path = os.environ.get(
         'GOOGLE_APPLICATION_CREDENTIALS')
     volume_mount = client.V1VolumeMount(mount_path='/'.join(
         google_app_credentials_path.split('/')[:-1]),
                                         name=volume_name)
     env = client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS',
                           value=google_app_credentials_path)
     container = client.V1Container(name=job_name,
                                    image=container_image,
                                    args=args,
                                    volume_mounts=[volume_mount],
                                    env=[env],
                                    image_pull_policy="Always")
     volume = client.V1Volume(
         name=volume_name,
         secret=client.V1SecretVolumeSource(
             secret_name='<secret-where-you-put-the-service-account>'))
     template = client.V1PodTemplateSpec(
         metadata=client.V1ObjectMeta(labels={"app": "sample"}),
         spec=client.V1PodSpec(restart_policy="Never",
                               containers=[container],
                               volumes=[volume]))
     spec = client.V1JobSpec(template=template,
                             backoff_limit=3,
                             ttl_seconds_after_finished=60)
     job = client.V1Job(api_version="batch/v1",
                        kind="Job",
                        metadata=client.V1ObjectMeta(name=job_name),
                        spec=spec)
     return job
Esempio n. 9
0
    def _volume_mounts(kube_manager, pod_spec, namespace):  #pylint:disable=unused-argument
        volume_mount = client.V1VolumeMount(name=mount_name,
                                            mount_path=mount_path,
                                            sub_path=sub_path)
        if pod_spec.containers[0].volume_mounts:
            pod_spec.containers[0].volume_mounts.append(volume_mount)
        else:
            pod_spec.containers[0].volume_mounts = [volume_mount]

        if volume_type == 'pvc':
            volume = client.V1Volume(
                name=mount_name,
                persistent_volume_claim=client.
                V1PersistentVolumeClaimVolumeSource(claim_name=volume_name))
        elif volume_type == 'secret':
            volume = client.V1Volume(
                name=mount_name,
                secret=client.V1SecretVolumeSource(secret_name=volume_name))
        elif volume_type == 'config_map':
            volume = client.V1Volume(
                name=mount_name,
                config_map=client.V1ConfigMapVolumeSource(name=volume_name))
        else:
            raise RuntimeError("Unsupport type %s" % volume_type)

        if pod_spec.volumes:
            pod_spec.volumes.append(volume)
        else:
            pod_spec.volumes = [volume]
Esempio n. 10
0
def add_gcp_credentials(kube_manager, pod_spec, namespace):
    if not kube_manager.secret_exists(constants.GCP_CREDS_SECRET_NAME,
                                      namespace):
        raise ValueError(
            'Unable to mount credentials: ' +
            'Secret user-gcp-sa not found in namespace {}'.format(namespace))

    # Set appropriate secrets and volumes to enable kubeflow-user service
    # account.
    env_var = client.V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS',
                              value='/etc/secrets/user-gcp-sa.json')
    if pod_spec.containers[0].env:
        pod_spec.containers[0].env.append(env_var)
    else:
        pod_spec.containers[0].env = [env_var]

    volume_mount = client.V1VolumeMount(name='user-gcp-sa',
                                        mount_path='/etc/secrets',
                                        read_only=True)
    if pod_spec.containers[0].volume_mounts:
        pod_spec.containers[0].volume_mounts.append(volume_mount)
    else:
        pod_spec.containers[0].volume_mounts = [volume_mount]

    volume = client.V1Volume(
        name='user-gcp-sa',
        secret=client.V1SecretVolumeSource(secret_name='user-gcp-sa'))
    if pod_spec.volumes:
        pod_spec.volumes.append(volume)
    else:
        pod_spec.volumes = [volume]
Esempio n. 11
0
def add_azure_credentials(kube_manager, pod_spec, namespace):
    if not kube_manager.secret_exists(constants.AZURE_CREDS_SECRET_NAME,
                                      namespace):
        raise ValueError(
            "Unable to mount credentials: " +
            f"Secret {constants.AZURE_CREDS_SECRET_NAME} found in namespace {namespace}"
        )

    # Set appropriate secrets and volumes to enable kubeflow-user service
    # account.
    logging.warn("Adding azure auth location")
    env_var = client.V1EnvVar(name='AZURE_AUTH_LOCATION',
                              value='/etc/secrets/azure-credentials.json')
    if pod_spec.containers[0].env:
        pod_spec.containers[0].env.append(env_var)
    else:
        pod_spec.containers[0].env = [env_var]

    volume_mount = client.V1VolumeMount(name='azure-credentials',
                                        mount_path='/etc/secrets',
                                        read_only=True)
    if pod_spec.containers[0].volume_mounts:
        pod_spec.containers[0].volume_mounts.append(volume_mount)
    else:
        pod_spec.containers[0].volume_mounts = [volume_mount]

    volume = client.V1Volume(
        name='azure-credentials',
        secret=client.V1SecretVolumeSource(
            secret_name=constants.AZURE_CREDS_SECRET_NAME))
    if pod_spec.volumes:
        pod_spec.volumes.append(volume)
    else:
        pod_spec.volumes = [volume]
Esempio n. 12
0
def kaniko_op(image, context_path, secret_name='docker-secret'):
    """use kaniko to build Docker image."""

    from kubernetes import client as k8s_client
    cops = dsl.ContainerOp(
        name='kaniko',
        image='gcr.io/kaniko-project/executor:latest',
        arguments=["--dockerfile", "/context/Dockerfile",
                   "--context", "/context",
                   "--destination", image],
    )

    cops.add_volume(
        k8s_client.V1Volume(
            name='registry-creds',
            secret=k8s_client.V1SecretVolumeSource(
                secret_name=secret_name,
                items=[{'key': '.dockerconfigjson', 'path': '.docker/config.json'}],
            )
        ))
    cops.container.add_volume_mount(
        k8s_client.V1VolumeMount(
            name='registry-creds',
            mount_path='/root/',
        )
    )
    return cops.apply(mount_v3io(remote=context_path, mount_path='/context'))
Esempio n. 13
0
 def mount_secret(self, name, path='/secret', items=None):
     self.add_volume(client.V1Volume(name=name,
                                     secret=client.V1SecretVolumeSource(
                                         secret_name=name,
                                         items=items,
                                     )),
                     mount_path=path)
Esempio n. 14
0
    def submit(self):
        """Submit a image spec to openshift's s2i and wait for completion """
        volume_mounts = [
            client.V1VolumeMount(mount_path="/var/run/docker.sock",
                                 name="docker-socket")
        ]
        volumes = [
            client.V1Volume(name="docker-socket",
                            host_path=client.V1HostPathVolumeSource(
                                path="/var/run/docker.sock"))
        ]

        if self.push_secret:
            volume_mounts.append(
                client.V1VolumeMount(mount_path="/root/.docker",
                                     name='docker-push-secret'))
            volumes.append(
                client.V1Volume(name='docker-push-secret',
                                secret=client.V1SecretVolumeSource(
                                    secret_name=self.push_secret)))

        self.pod = client.V1Pod(metadata=client.V1ObjectMeta(
            name=self.name, labels={"name": self.name}),
                                spec=client.V1PodSpec(containers=[
                                    client.V1Container(
                                        image=self.builder_image,
                                        name="builder",
                                        args=self.get_cmd(),
                                        image_pull_policy='Always',
                                        volume_mounts=volume_mounts,
                                    )
                                ],
                                                      volumes=volumes,
                                                      restart_policy="Never"))

        try:
            ret = self.api.create_namespaced_pod(self.namespace, self.pod)
        except client.rest.ApiException as e:
            if e.status == 409:
                # Someone else created it!
                pass
            else:
                raise

        w = watch.Watch()
        try:
            for f in w.stream(self.api.list_namespaced_pod,
                              self.namespace,
                              label_selector="name={}".format(self.name)):
                if f['type'] == 'DELETED':
                    self.progress('pod.phasechange', 'Deleted')
                    return
                self.pod = f['object']
                self.progress('pod.phasechange', self.pod.status.phase)
                if self.pod.status.phase == 'Succeeded':
                    self.cleanup()
                elif self.pod.status.phase == 'Failed':
                    self.cleanup()
        finally:
            w.stop()
Esempio n. 15
0
 def _use_gcp_secret(task):
     from kubernetes import client as k8s_client
     return (
         task
             .add_volume(
                 k8s_client.V1Volume(
                     name=volume_name,
                     secret=k8s_client.V1SecretVolumeSource(
                         secret_name=secret_name,
                     )
                 )
             )
             .add_volume_mount(
                 k8s_client.V1VolumeMount(
                     name=volume_name,
                     mount_path=secret_volume_mount_path,
                 )
             )
             .add_env_variable(
                 k8s_client.V1EnvVar(
                     name='GOOGLE_APPLICATION_CREDENTIALS',
                     value=secret_volume_mount_path + secret_file_path_in_volume,
                 )
             )
     )
Esempio n. 16
0
 def _create_k8s_job(self, yaml_spec):
   """ _create_k8s_job creates a kubernetes job based on the yaml spec """
   pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName']))
   container = k8s_client.V1Container(name = yaml_spec['spec']['containers'][0]['name'],
                                      image = yaml_spec['spec']['containers'][0]['image'],
                                      args = yaml_spec['spec']['containers'][0]['args'],
                                      volume_mounts = [k8s_client.V1VolumeMount(
                                          name=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['name'],
                                          mount_path=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['mountPath'],
                                      )],
                                      env = [k8s_client.V1EnvVar(
                                          name=yaml_spec['spec']['containers'][0]['env'][0]['name'],
                                          value=yaml_spec['spec']['containers'][0]['env'][0]['value'],
                                      )])
   pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'],
                                   containers = [container],
                                   service_account_name=yaml_spec['spec']['serviceAccountName'],
                                   volumes=[k8s_client.V1Volume(
                                       name=yaml_spec['spec']['volumes'][0]['name'],
                                       secret=k8s_client.V1SecretVolumeSource(
                                          secret_name=yaml_spec['spec']['volumes'][0]['secret']['secretName'],
                                       )
                                   )])
   try:
     api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod)
     return api_response.metadata.name, True
   except k8s_client.rest.ApiException as e:
     logging.exception("Exception when calling CoreV1Api->create_namespaced_pod: {}\n".format(str(e)))
     return '', False
Esempio n. 17
0
 def _use_gcp_secret(task):
     from kubernetes import client as k8s_client
     task = task.add_volume(
         k8s_client.V1Volume(name=volume_name,
                             secret=k8s_client.V1SecretVolumeSource(
                                 secret_name=secret_name, )))
     task.container \
         .add_volume_mount(
                 k8s_client.V1VolumeMount(
                     name=volume_name,
                     mount_path=secret_volume_mount_path,
                 )
             ) \
         .add_env_variable(
             k8s_client.V1EnvVar(
                 name='GOOGLE_APPLICATION_CREDENTIALS',
                 value=secret_volume_mount_path + secret_file_path_in_volume,
             )
         ) \
         .add_env_variable(
             k8s_client.V1EnvVar(
                 name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
                 value=secret_volume_mount_path + secret_file_path_in_volume,
             )
         ) # Set GCloud Credentials by using the env var override.
     # TODO: Is there a better way for GCloud to pick up the credential?
     return task
def pipeline_head(git_repo,
                  branch="master",
                  rev='HEAD',
                  git_secret="git-creds"):
    src_vol_op = dsl.VolumeOp(name="Git_source_pvc",
                              resource_name="git-pvc",
                              size='60Mi',
                              modes=dsl.VOLUME_MODE_RWM)

    gitsync_step = dsl.ContainerOp(name="Git-sync",
                                   image="k8s.gcr.io/git-sync/git-sync:v3.3.0",
                                   arguments=[
                                       "--ssh", f"--repo={git_repo}",
                                       "--root=/tmp/src",
                                       "--dest=pipeline_source",
                                       f"--rev={rev}", f"--branch={branch}",
                                       "--one-time"
                                   ],
                                   pvolumes={"/tmp/src": src_vol_op.volume})

    gitsync_step.add_volume(
        k8s_client.V1Volume(name='git-cred-volume',
                            secret=k8s_client.V1SecretVolumeSource(
                                secret_name=git_secret))).add_volume_mount(
                                    k8s_client.V1VolumeMount(
                                        mount_path="/etc/git-secret",
                                        name="git-cred-volume"))
    gitsync_step.execution_options.caching_strategy.max_cache_staleness = "P0D"

    step1 = dsl.ContainerOp(
        name="step1",
        image="python:3.8",
        command=["python"],
        arguments=[
            "/tmp/src/pipeline_source/step1.py", "--arg1", "input_arg1",
            "--arg2", "input_arg2"
        ],
        pvolumes={
            "/tmp/src": src_vol_op.volume.after(gitsync_step)
        }).add_env_variable(
            k8s_client.V1EnvVar(name="PYTHONPATH",
                                value="/tmp/src/pipeline_source"))
    step1.execution_options.caching_strategy.max_cache_staleness = "P0D"

    step2 = dsl.ContainerOp(name="step2",
                            image="python:3.8",
                            command=["python"],
                            arguments=[
                                "/tmp/src/pipeline_source/step2.py", "--arg1",
                                "input_arg1", "--arg2", "input_arg2"
                            ],
                            pvolumes={
                                "/tmp/src": src_vol_op.volume.after(step1)
                            }).add_env_variable(
                                k8s_client.V1EnvVar(
                                    name="PYTHONPATH",
                                    value="/tmp/src/pipeline_source"))
    step2.execution_options.caching_strategy.max_cache_staleness = "P0D"
Esempio n. 19
0
def get_volume_from_secret(volume_name, mount_path, secret_name, items=None):
    secret = client.V1SecretVolumeSource(secret_name=secret_name, items=items)
    volumes = [client.V1Volume(name=volume_name, secret=secret)]
    volume_mounts = [
        client.V1VolumeMount(name=volume_name,
                             mount_path=mount_path,
                             read_only=True)
    ]
    return volumes, volume_mounts
Esempio n. 20
0
    def _mount_secret(task):
        from kubernetes import client as k8s_client

        vol = k8s_client.V1SecretVolumeSource(secret_name=secret_name,
                                              items=items)
        return task.add_volume(
            k8s_client.V1Volume(
                name=volume_name, secret=vol)).add_volume_mount(
                    k8s_client.V1VolumeMount(mount_path=mount_path,
                                             name=volume_name))
Esempio n. 21
0
 def _use_secret(task):
     return (task.add_volume(
         k8s_client.V1Volume(
             name=volume_name,
             secret=k8s_client.V1SecretVolumeSource(
                 secret_name=secret_name, ))).add_volume_mount(
                     k8s_client.V1VolumeMount(
                         name=volume_name,
                         mount_path=mount_path,
                     )))
Esempio n. 22
0
 def mount_secret(self, name, path="/secret", items=None, sub_path=None):
     self.add_volume(
         client.V1Volume(
             name=name,
             secret=client.V1SecretVolumeSource(
                 secret_name=name,
                 items=items,
             ),
         ),
         mount_path=path,
         sub_path=sub_path,
     )
Esempio n. 23
0
 def _use_ai_pipeline_params(task):
     from kubernetes import client as k8s_client
     task = task.add_volume(
         k8s_client.V1Volume(
             name=secret_name,  # secret_name as volume name
             secret=k8s_client.V1SecretVolumeSource(
                 secret_name=secret_name)))
     task.container.add_volume_mount(
         k8s_client.V1VolumeMount(mount_path=secret_volume_mount_path,
                                  name=secret_name))
     task.container.set_image_pull_policy(image_pull_policy)
     return task
Esempio n. 24
0
def default_gcp_op(name: str, image: str, command: str = None,
           arguments: str = None, file_inputs: Dict[dsl.PipelineParam, str] = None,
           file_outputs: Dict[str, str] = None, is_exit_handler=False):
  """An operator that mounts the default GCP service account to the container.

  The user-gcp-sa secret is created as part of the kubeflow deployment that
  stores the access token for kubeflow user service account.

  With this service account, the container has a range of GCP APIs to
  access to. This service account is automatically created as part of the
  kubeflow deployment.

  For the list of the GCP APIs this service account can access to, check
  https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18

  If you want to call the GCP APIs in a different project, grant the kf-user
  service account access permission.
  """

  return (
    dsl.ContainerOp(
      name,
      image,
      command,
      arguments,
      file_inputs,
      file_outputs,
      is_exit_handler,
    )
      .add_volume(
      k8s_client.V1Volume(
        name='gcp-credentials',
        secret=k8s_client.V1SecretVolumeSource(
          secret_name='user-gcp-sa'
        )
      )
    )
      .add_volume_mount(
      k8s_client.V1VolumeMount(
        mount_path='/secret/gcp-credentials',
        name='gcp-credentials',
      )
    )
      .add_env_variable(
      k8s_client.V1EnvVar(
        name='GOOGLE_APPLICATION_CREDENTIALS',
        value='/secret/gcp-credentials/user-gcp-sa.json'
      )
    )
  )
Esempio n. 25
0
def create_job_object(job_arguments, size, docker_image, docker_image_tag,
                      affinity):

    user = os.environ['USER']
    job = client.V1Job(
        metadata=client.V1ObjectMeta(
            name='kaml-remote-{}-{}'.format(user, uuid.uuid1())),
        spec=client.V1JobSpec(template=client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(name='kaml-remote-{}-{}'.format(
                user, uuid.uuid1()),
                                         labels={'type': size}),
            spec=client.V1PodSpec(containers=[
                client.V1Container(
                    name='kaml-remote',
                    args=job_arguments,
                    image='{}:{}'.format(docker_image, docker_image_tag),
                    image_pull_policy='Always',
                    env=[client.V1EnvVar(name='KAML_HOME', value='/app')],
                    volume_mounts=[
                        client.V1VolumeMount(name='kaml-cfg-volume',
                                             read_only=True,
                                             mount_path='/app/kaml.cfg',
                                             sub_path='kaml.cfg'),
                        client.V1VolumeMount(
                            name='gcp-service-account',
                            read_only=True,
                            mount_path='/app/service-key.json',
                            sub_path='service-key.json'),
                    ])
            ],
                                  affinity=affinity,
                                  volumes=[
                                      client.V1Volume(name='kaml-cfg-volume',
                                                      config_map=client.
                                                      V1ConfigMapVolumeSource(
                                                          name='kaml-cfg')),
                                      client.V1Volume(
                                          name='gcp-service-account',
                                          secret=client.V1SecretVolumeSource(
                                              secret_name='gcp-service-account',
                                              items=[
                                                  client.V1KeyToPath(
                                                      key='service-key.json',
                                                      path='service-key.json')
                                              ]))
                                  ],
                                  restart_policy='Never'))))

    return (job)
Esempio n. 26
0
    def setup_secret_file(self, secret_name, file_path, filename):
        """Configure a file as a cluster namespaced secret

        Args:
            file_path (str): The source file path to be saved as a secret on the cluster.
            secret_name (str): The secret name.
            filename (str): The filename to be used in the secret volume.

        Returns:

        """
        with open(file_path, 'rb') as f:
            secret_content = base64.b64encode(f.read()).decode()
        secret = self.get_secret(secret_name)
        if isinstance(secret, dict):
            if secret.get("data", {}).get(filename) == secret_content:
                logger.debug("%s already in namespace." % secret_name)
                return client.V1SecretVolumeSource(secret_name=secret_name)
            # Remove secret
            logger.debug("Replacing existing %s..." % secret_name)
            res = api_request(core_v1_api.delete_namespaced_secret,
                              name=secret_name,
                              namespace=self.namespace)
            logger.debug(res)

        # See https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Secret.md
        metadata = {'name': secret_name, 'namespace': self.namespace}
        secret = client.V1Secret(data={filename: secret_content},
                                 metadata=metadata)
        logger.debug("Creating %s..." % secret_name)
        res = api_request(core_v1_api.create_namespaced_secret, self.namespace,
                          secret)
        if res.get("error"):
            logger.debug("Failed to create %s" % secret_name)
            return None
        return client.V1SecretVolumeSource(secret_name=secret_name)
Esempio n. 27
0
def create_volume(volume_data):
    if "name" in volume_data:
        volume = client.V1Volume(
            name=volume_data["name"]
        )

        # persistent claim
        if "persistentVolumeClaim" in volume_data:
            volume_pvc = volume_data["persistentVolumeClaim"]
            if "claimName" in volume_pvc:
                pvc = client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=volume_pvc["claimName"]
                )
                volume.persistent_volume_claim = pvc

        # hostpath
        if "hostPath" in volume_data and "path" in volume_data["hostPath"]:
            host_path = client.V1HostPathVolumeSource(
                path=volume_data["hostPath"]["path"]
            )

            if "hostPath" in volume_data and "type" in volume_data["hostPath"]:
                host_path.type = volume_data["hostPath"]["type"]
                volume.host_path = host_path
        # nfs
        if ("nfs" in volume_data and
                "path" in volume_data["nfs"] and
                "server" in volume_data["nfs"]):
            volume.nfs = client.V1NFSVolumeSource(
                path=volume_data["nfs"]["path"],
                server=volume_data["nfs"]["server"]
            )

        # secret
        if "secret" in volume_data:
            volume.secret = client.V1SecretVolumeSource(
                secret_name=volume_data["secret"]["secretName"]
            )

        # configMap 
        if "configMap" in volume_data:
            volume.config_map = client.V1ConfigMapVolumeSource(
                name=volume_data["configMap"]["name"]
            )

        return volume

    return None
def replicate_data_cloud_sync(
        # Define variables that the user can set in the pipelines UI; set default values
        cloud_sync_relationship_id: str,
        cloud_sync_refresh_token_k8s_secret: str = "cloud-sync-refresh-token"):
    # Pipeline Steps:

    # Trigger Cloud Sync update
    replicate = NetappCloudSyncUpdateOp(cloud_sync_relationship_id)
    # Mount k8s secret containing Cloud Sync refresh token
    replicate.add_pvolumes({
        '/mnt/secret':
        k8s_client.V1Volume(
            name='cloud-sync-refresh-token',
            secret=k8s_client.V1SecretVolumeSource(
                secret_name=cloud_sync_refresh_token_k8s_secret))
    })
Esempio n. 29
0
    def with_secret(self, secret_name: str, mount_path: str) -> Optional["HmlContainerOp"]:
        """
        Bind a secret given by `secret_name` to the local path defined in `mount_path`

        Args:
             secret_name (str): The name of the secret (in the same namespace)
             mount_path (str): The path to mount the secret locally

        Returns:
            A reference to the current `HmlContainerOp` (self)
        """
        volume_name = secret_name

        self.op.add_volume(
            k8s_client.V1Volume(name=volume_name, secret=k8s_client.V1SecretVolumeSource(secret_name=secret_name))
        )
        self.op.add_volume_mount(k8s_client.V1VolumeMount(name=volume_name, mount_path=mount_path))
        return self
Esempio n. 30
0
 def _use_secret(task):
     import os
     from kubernetes import client as k8s_client
     task = task.add_volume(
         k8s_client.V1Volume(
             name=volume_name,
             secret=k8s_client.V1SecretVolumeSource(
                 secret_name=secret_name))).add_volume_mount(
                     k8s_client.V1VolumeMount(
                         name=volume_name,
                         mount_path=secret_volume_mount_path))
     if env_variable:
         task.container.add_env_variable(
             k8s_client.V1EnvVar(
                 name=env_variable,
                 value=os.path.join(secret_volume_mount_path,
                                    secret_file_path_in_volume),
             ))
     return task