示例#1
0
def attach_output_volume(op):
    """Attaches emptyDir volumes to container operations.

    See https://github.com/kubeflow/pipelines/issues/1654
    """

    # Handle auto-generated pipeline metadata
    op.output_artifact_paths = {
        'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json',
        'mlpipeline-metrics': '/output/mlpipeline-metrics.json',
    }

    # Add somewhere to store regular output
    op.add_volume(
        k8s_client.V1Volume(name='volume',
                            empty_dir=k8s_client.V1EmptyDirVolumeSource()))
    op.container.add_volume_mount(
        k8s_client.V1VolumeMount(name='volume', mount_path='/output'))

    # func_to_container_op wants to store outputs under /tmp/outputs
    op.add_volume(
        k8s_client.V1Volume(name='outputs',
                            empty_dir=k8s_client.V1EmptyDirVolumeSource()))
    op.container.add_volume_mount(
        k8s_client.V1VolumeMount(name='outputs', mount_path='/tmp/outputs'))

    return op
示例#2
0
def dispatch(endpoint, access_key, secret_key, bucket_name, object_name):
    job_name = f"local-rebuild-{uuid.uuid1()}"

    downloader_env = [
        client.V1EnvVar(name="ENDPOINT", value=endpoint),
        client.V1EnvVar(name="ACCESS_KEY", value=access_key),
        client.V1EnvVar(name="SECRET_KEY", value=secret_key),
        client.V1EnvVar(name="BUCKET_NAME", value=bucket_name),
        client.V1EnvVar(name="OBJECT_NAME", value=object_name)
    ]

    downloader_container = client.V1Container(
        name="downloader",
        image=os.getenv("DOWNLOADER_IMAGE"),
        env=downloader_env,
        volume_mounts=[client.V1VolumeMount(
            name="processor-input", mount_path="/output")])

    processor_container = client.V1Container(
        name="processor",
        image=os.getenv("PROCESSOR_IMAGE"),
        volume_mounts=[client.V1VolumeMount(name="processor-input", mount_path="/input", read_only=True),
                       client.V1VolumeMount(name="processor-output", mount_path="/output")])

    pod_spec = client.V1PodSpec(
        restart_policy="Never",
        init_containers=[downloader_container],
        containers=[processor_container],
        volumes=[
            client.V1Volume(name="processor-input",
                            empty_dir=client.V1EmptyDirVolumeSource()),
            client.V1Volume(name="processor-output",
                            empty_dir=client.V1EmptyDirVolumeSource()),
        ])

    # Create and configure a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(name=job_name, labels={
                                     "app": "local-rebuild-processor"}),
        spec=pod_spec)

    # Create the specification of the job
    spec = client.V1JobSpec(
        template=template,
        backoff_limit=0)

    # Instantiate the job object
    job = client.V1Job(
        api_version="batch/v1",
        kind="Job",
        metadata=client.V1ObjectMeta(name=job_name, labels={
                                     "app": "local-rebuild-processor"}),
        spec=spec)

    client.BatchV1Api().create_namespaced_job(
        body=job,
        namespace="default")
示例#3
0
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=''),
            )
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name)
            )

        add_vol(name='shm', mount_path='/dev/shm', host_path='/dev/shm/' + namespace)
        add_vol(
            name='v3iod-comm',
            mount_path='/var/run/iguazio/dayman',
            host_path='/var/run/iguazio/dayman/' + namespace,
        )

        vol = k8s_client.V1Volume(
            name='daemon-health', empty_dir=k8s_client.V1EmptyDirVolumeSource()
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path='/var/run/iguazio/daemon_health', name='daemon-health'
            )
        )

        vol = k8s_client.V1Volume(
            name='v3io-config',
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420
            ),
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/etc/config/v3io', name='v3io-config')
        )

        # vol = k8s_client.V1Volume(name='v3io-auth',
        #                           secret=k8s_client.V1SecretVolumeSource(secret_name=v3io_auth_secret,
        #                                                                  default_mode=420))
        # task.add_volume(vol).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth'))

        task.add_env_variable(
            k8s_client.V1EnvVar(
                name='CURRENT_NODE_IP',
                value_from=k8s_client.V1EnvVarSource(
                    field_ref=k8s_client.V1ObjectFieldSelector(
                        api_version='v1', field_path='status.hostIP'
                    )
                ),
            )
        )
        task.add_env_variable(
            k8s_client.V1EnvVar(
                name='IGZ_DATA_CONFIG_FILE', value='/igz/java/conf/v3io.conf'
            )
        )

        return task
    def with_empty_dir(self, name: str,
                       mount_path: str) -> Optional['HmlInferenceDeployment']:
        """
        Create an empy, writable volume with the given `name` mounted to the
        specified `mount_path`

        Args:
            name (str): The name of the volume to mount
            mount_path (str): The path to mount the empty volume


        Returns:
            A reference to the current `HmlInferenceDeployment` (self)
        """

        self.pod_volumes.append(
            client.V1Volume(name=name,
                            empty_dir=client.V1EmptyDirVolumeSource()))

        self.k8s_container.add_volume_mount(
            client.V1VolumeMount(
                name=name,
                mount_path=mount_path,
            ))
        return self
示例#5
0
def _add_tls_init_container(init_containers, volumes, volume_mounts, tls_info,
                            tls_config):
    #   Adds an InitContainer to the pod to set up TLS certificate information.  For components that act as a
    #   server(tls_info["use_tls"] is True), the InitContainer will populate a directory with server and CA certificate
    #   materials in various formats.   For other components (tls_info["use_tls"] is False, or tls_info is not specified),
    #   the InitContainer will populate a directory with CA certificate materials in PEM and JKS formats.
    #   In either case, the certificate directory is mounted onto the component container filesystem at the location
    #   specified by tls_info["component_cert_dir"], if present, otherwise at the configured default mount point
    #   (tls_config["component_cert_dir"]).

    cert_directory = tls_info.get("cert_directory") or tls_config.get(
        "component_cert_dir")
    env = {}
    env["TLS_SERVER"] = "true" if tls_info.get("use_tls") else "false"

    # Create the certificate volume and volume mounts
    volumes.append(
        client.V1Volume(name="tls-info",
                        empty_dir=client.V1EmptyDirVolumeSource()))
    volume_mounts.append(
        client.V1VolumeMount(name="tls-info", mount_path=cert_directory))
    init_volume_mounts = [
        client.V1VolumeMount(name="tls-info",
                             mount_path=tls_config["cert_path"])
    ]

    # Create the init container
    init_containers.append(
        _create_container_object("init-tls",
                                 tls_config["image"],
                                 False,
                                 volume_mounts=init_volume_mounts,
                                 env=env))
示例#6
0
def recommender_pipeline():
    # Load new data
    data = dsl.ContainerOp(
        name='updatedata',
        output_artifact_paths={
          'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json',
          'mlpipeline-metrics': '/output/mlpipeline-metrics.json',
        },
        image='lightbend/kubeflow-datapublisher:0.0.1') \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_URL',value='http://minio-service.kubeflow.svc.cluster.local:9000')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_KEY', value='minio')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_SECRET', value='minio123')) \
      .add_volume(k8s_client.V1Volume(name='outputs', empty_dir=k8s_client.V1EmptyDirVolumeSource())) \
      .add_volume_mount(k8s_client.V1VolumeMount(name='outputs', mount_path='/output'))
    # Train the model
    train = dsl.ContainerOp(
        name='trainmodel',
        output_artifact_paths={
          'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json',
          'mlpipeline-metrics': '/output/mlpipeline-metrics.json',
        },
        image='lightbend/ml-tf-recommender:0.0.1') \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_URL',value='minio-service.kubeflow.svc.cluster.local:9000')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_KEY', value='minio')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_SECRET', value='minio123')) \
      .add_volume(k8s_client.V1Volume(name='outputs', empty_dir=k8s_client.V1EmptyDirVolumeSource())) \
      .add_volume_mount(k8s_client.V1VolumeMount(name='outputs', mount_path='/output'))
    train.after(data)
    # Publish new model model
    publish = dsl.ContainerOp(
        name='publishmodel',
        output_artifact_paths={
          'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json',
          'mlpipeline-metrics': '/output/mlpipeline-metrics.json',
        },
        image='lightbend/kubeflow-modelpublisher:0.0.1') \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_URL',value='http://minio-service.kubeflow.svc.cluster.local:9000')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_KEY', value='minio')) \
      .add_env_variable(k8s_client.V1EnvVar(name='MINIO_SECRET', value='minio123')) \
      .add_env_variable(k8s_client.V1EnvVar(name='KAFKA_BROKERS', value='strimzi-kafka-brokers.boris.svc.cluster.local:9092')) \
      .add_env_variable(k8s_client.V1EnvVar(name='DEFAULT_RECOMMENDER_URL', value='http://recommender.boris.svc.cluster.local:8501')) \
      .add_env_variable(k8s_client.V1EnvVar(name='ALTERNATIVE_RECOMMENDER_URL', value='http://recommender1.boris.svc.cluster.local:8501')) \
      .add_volume(k8s_client.V1Volume(name='outputs', empty_dir=k8s_client.V1EmptyDirVolumeSource())) \
      .add_volume_mount(k8s_client.V1VolumeMount(name='outputs', mount_path='/output'))
    publish.after(train)
示例#7
0
def _add_elk_logging_sidecar(containers, volumes, volume_mounts,
                             component_name, log_info, filebeat):
    if not log_info or not filebeat:
        return
    log_dir = log_info.get("log_directory")
    if not log_dir:
        return
    sidecar_volume_mounts = []

    # Create the volume for component log files and volume mounts for the component and sidecar containers
    volumes.append(
        client.V1Volume(name="component-log",
                        empty_dir=client.V1EmptyDirVolumeSource()))
    volume_mounts.append(
        client.V1VolumeMount(name="component-log", mount_path=log_dir))
    sc_path = log_info.get("alternate_fb_path") or "{0}/{1}".format(
        filebeat["log_path"], component_name)
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="component-log", mount_path=sc_path))

    # Create the volume for sidecar data and the volume mount for it
    volumes.append(
        client.V1Volume(name="filebeat-data",
                        empty_dir=client.V1EmptyDirVolumeSource()))
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="filebeat-data",
                             mount_path=filebeat["data_path"]))

    # Create the volume for the sidecar configuration data and the volume mount for it
    # The configuration data is in a k8s ConfigMap that should be created when DCAE is installed.
    volumes.append(
        client.V1Volume(name="filebeat-conf",
                        config_map=client.V1ConfigMapVolumeSource(
                            name=filebeat["config_map"])))
    sidecar_volume_mounts.append(
        client.V1VolumeMount(name="filebeat-conf",
                             mount_path=filebeat["config_path"],
                             sub_path=filebeat["config_subpath"]))

    # Finally create the container for the sidecar
    containers.append(
        _create_container_object("filebeat",
                                 filebeat["image"],
                                 False,
                                 volume_mounts=sidecar_volume_mounts))
示例#8
0
 def _mount_empty_dir(self, op, name: str, path: str):
     # Add a writable volume
     op.add_volume(
         k8s_client.V1Volume(name=name,
                             empty_dir=k8s_client.V1EmptyDirVolumeSource()))
     op.add_volume_mount(
         k8s_client.V1VolumeMount(
             name=name,
             mount_path=path,
         ))
 def get_obj(self):
     """
     :description: Generate volume spec.
     """
     if self.config.get("configmap"):
         return client.V1Volume(name=self.slug,
                                config_map=client.V1ConfigMapVolumeSource(
                                    name=self.config.get("configmap")))
     return client.V1Volume(name=self.slug,
                            empty_dir=client.V1EmptyDirVolumeSource())
示例#10
0
def get_volume(volume, claim_name=None, volume_mount=None):
    if claim_name:
        pv_claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name)
        return client.V1Volume(name=volume, persistent_volume_claim=pv_claim)
    elif volume_mount:
        return client.V1Volume(
            name=volume,
            host_path=client.V1HostPathVolumeSource(path=volume_mount))
    else:
        empty_dir = client.V1EmptyDirVolumeSource()
        return client.V1Volume(name=volume, empty_dir=empty_dir)
示例#11
0
def create_deployment_object(name=None,namespace=None,image=None,port=None,image_pull_policy=None,\
    imagePullSecret=None,labels=None,replicas=None,cpu=None,memory=None,liveness_probe=None,readiness_probe=None):
    #configure pod template container
    resources = None
    volumeMounts = []
    volumes = []
    if (cpu or memory):
        resources = client.V1ResourceRequirements(requests={
            "cpu":
            str(int(cpu / 2)) + "m",
            "memory":
            str(int(memory / 2)) + "Mi"
        },
                                                  limits={
                                                      "cpu": str(cpu) + "m",
                                                      "memory":
                                                      str(memory) + "Mi"
                                                  })
    vm1 = client.V1VolumeMount(name='log',
                               mount_path="/opt/microservices/logs")
    volumeMounts.append(vm1)
    v1 = client.V1Volume(name="log", empty_dir=client.V1EmptyDirVolumeSource())
    volumes.append(v1)
    image_pull_secret = client.V1LocalObjectReference(name=imagePullSecret)
    container = client.V1Container(name=name,
                                   image=image,
                                   image_pull_policy=image_pull_policy,
                                   ports=[
                                       client.V1ContainerPort(
                                           container_port=port,
                                           name="web",
                                           protocol="TCP")
                                   ],
                                   resources=resources,
                                   readiness_probe=readiness_probe,
                                   liveness_probe=liveness_probe,
                                   volume_mounts=volumeMounts)
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels=labels),
        spec=client.V1PodSpec(containers=[container],
                              image_pull_secrets=[image_pull_secret],
                              volumes=volumes))
    spec = client.V1DeploymentSpec(replicas=replicas,
                                   template=template,
                                   selector={'matchLabels': labels}
                                   #strategy
                                   )
    deployment = client.V1Deployment(api_version="apps/v1",
                                     kind="Deployment",
                                     metadata=client.V1ObjectMeta(
                                         name=name, namespace=namespace),
                                     spec=spec)
    return deployment
示例#12
0
def get_volume(volume, claim_name=None, host_path=None, read_only=None):
    if claim_name:
        pv_claim = client.V1PersistentVolumeClaimVolumeSource(
            claim_name=claim_name, read_only=read_only)
        return client.V1Volume(name=volume, persistent_volume_claim=pv_claim)

    if host_path:
        return client.V1Volume(
            name=volume,
            host_path=client.V1HostPathVolumeSource(path=host_path))

    empty_dir = client.V1EmptyDirVolumeSource()
    return client.V1Volume(name=volume, empty_dir=empty_dir)
示例#13
0
    def inner(*args, **kwargs):
        op = fun(*args, **kwargs)
        op.output_artifact_paths = {
            'mlpipeline-ui-metadata': '/output/mlpipeline-ui-metadata.json',
            'mlpipeline-metrics': '/output/mlpipeline-metrics.json',
        }
        op.add_volume(
            k8s_client.V1Volume(name='volume',
                                empty_dir=k8s_client.V1EmptyDirVolumeSource()))
        op.container.add_volume_mount(
            k8s_client.V1VolumeMount(name='volume', mount_path='/output'))

        return op
示例#14
0
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path,
                                                            type=""),
            )
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name))

        add_vol(name="shm",
                mount_path="/dev/shm",
                host_path="/dev/shm/" + namespace)
        add_vol(
            name="v3iod-comm",
            mount_path="/var/run/iguazio/dayman",
            host_path="/var/run/iguazio/dayman/" + namespace,
        )

        vol = k8s_client.V1Volume(
            name="daemon-health",
            empty_dir=k8s_client.V1EmptyDirVolumeSource())
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path="/var/run/iguazio/daemon_health",
                name="daemon-health"))

        vol = k8s_client.V1Volume(
            name="v3io-config",
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420),
        )
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path="/etc/config/v3io",
                                     name="v3io-config"))

        task.add_env_variable(
            k8s_client.V1EnvVar(
                name="CURRENT_NODE_IP",
                value_from=k8s_client.V1EnvVarSource(
                    field_ref=k8s_client.V1ObjectFieldSelector(
                        api_version="v1", field_path="status.hostIP")),
            ))
        task.add_env_variable(
            k8s_client.V1EnvVar(name="IGZ_DATA_CONFIG_FILE",
                                value="/igz/java/conf/v3io.conf"))

        return task
示例#15
0
    def define_empty_dir_volume(self, name, medium="", size_limit=None):
        """
        Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux
        relabeling by default, emptyDir volumes are stored on whatever medium is backing the node - that might be disk
        or SSD or network storage, depending on your environment. However, you can set the emptyDir.medium field to
        "Memory" to tell Kubernetes to mount a tmpfs (RAM-backed filesystem) for you instead. While tmpfs is very fast,
        be aware that unlike disks, tmpfs is cleared on node reboot and any files you write will count against your
        container’s memory limit.

        @param name ,, str name of volume that is created.
        @param medium ,,str What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory.
        @param size_limit ,,int Total amount of local storage required for this EmptyDir volume.
        """
        empty_dir_vol = client.V1EmptyDirVolumeSource(medium=medium,
                                                      size_limit=sizeLimit)
        return client.V1Volume(name=name, empty_dir=empty_dir_vol)
示例#16
0
def get_shm_volumes():
    """
    Mount an tmpfs volume to /dev/shm.
    This will set /dev/shm size to half of the RAM of node.
    By default, /dev/shm is very small, only 64MB.
    Some experiments will fail due to lack of share memory,
    such as some experiments running on Pytorch.
    """
    volumes, volume_mounts = [], []
    shm_volume = client.V1Volume(
        name=constants.SHM_VOLUME,
        empty_dir=client.V1EmptyDirVolumeSource(medium='Memory'))
    volumes.append(shm_volume)
    shm_volume_mount = client.V1VolumeMount(name=shm_volume.name,
                                            mount_path='/dev/shm')
    volume_mounts.append(shm_volume_mount)
    return volumes, volume_mounts
示例#17
0
def demo_op(name: str, is_exit_handler=False) -> ContainerOp:
    op = ContainerOp(name=name,
                     image='alpine:latest',
                     command=['sh', '-c'],
                     arguments=[
                         'echo "Running step $0" && echo "$1" > $2',
                         name,
                         markdown_metadata(name),
                         METADATA_FILE_PATH,
                     ],
                     is_exit_handler=is_exit_handler,
                     output_artifact_paths=default_artifact_path())
    op.add_volume(
        k8s.V1Volume(name='volume',
                     empty_dir=k8s.V1EmptyDirVolumeSource())).add_volume_mount(
                         k8s.V1VolumeMount(name='volume', mount_path=OUT_DIR))
    return op
示例#18
0
    def with_empty_dir(self, name: str, mount_path: str) -> Optional["HmlContainerOp"]:
        """
        Create an empy, writable volume with the given `name` mounted to the
        specified `mount_path`

        Args:
            name (str): The name of the volume to mount
            mount_path (str): The path to mount the empty volume


        Returns:
            A reference to the current `HmlContainerOp` (self)
        """
        # Add a writable volume
        self.op.add_volume(k8s_client.V1Volume(name=name, empty_dir=k8s_client.V1EmptyDirVolumeSource()))
        self.op.add_volume_mount(k8s_client.V1VolumeMount(name=name, mount_path=mount_path))
        return self
    def _generate_pod_template(self, *args, **kwargs):
        containers = kwargs.get("containers", [])
        initial_containers = kwargs.get("initial_containers", [])
        volumes_json = kwargs.get("volumes", [])
        deploy_name = kwargs.get("name")
        labels = kwargs.get("labels", {})
        labels.update({"app": deploy_name})
        restart_policy = kwargs.get("restart_policy", "Always")
        volumes = []
        for volume in volumes_json:
            volume_name = volume.get("name")
            host_path = volume.get("host_path", None)
            empty_dir = volume.get("empty_dir", None)
            parameters = {}
            if host_path:
                host_path = client.V1HostPathVolumeSource(path=host_path)
                parameters.update({"host_path": host_path})
            if empty_dir:
                empty_dir = client.V1EmptyDirVolumeSource(**empty_dir)
                parameters.update({"empty_dir": empty_dir})
            persistent_volume_claim = volume.get("pvc", None)
            if persistent_volume_claim:
                persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=persistent_volume_claim)
                parameters.update(
                    {"persistent_volume_claim": persistent_volume_claim})
            volumes.append(client.V1Volume(name=volume_name, **parameters))
        initial_container_pods = self._generate_container_pods(
            initial_containers)
        container_pods = self._generate_container_pods(containers)
        pod_spec = client.V1PodSpec(
            init_containers=initial_container_pods,
            containers=container_pods,
            volumes=volumes,
            restart_policy=restart_policy,
        )
        spec_metadata = client.V1ObjectMeta(labels=labels)
        template_spec = client.V1PodTemplateSpec(metadata=spec_metadata,
                                                 spec=pod_spec)

        LOG.info("template spec %s", template_spec)

        return template_spec
示例#20
0
def create_deployment_object(scale, cpu, gpu, instance_name, mem, isSSD):
    container = client.V1Container(name = instance_name,
                                    resource = client.V1ResourceRequirements(requests = \
                                                                    dict(cpu = cpu, memory = mem)))
    if isSSD:
        volume_medium = 'SSD'
    else:
        volume_medium = 'memory'

    volume = client.V1Volume(name = instance_name,
                            emptyDir = client.V1EmptyDirVolumeSource(medium=volume_medium))
    template = client.V1PodTemplateSpec(spec = client.V1PodSpec(containers=[container], volumes =[volume]))
    spec = client.ExtensionsV1beta1DeploymentSpec(replicas=scale, 
                                                template=template)
    deployment = client.ExtensionsV1beta1Deployment(api_version="extensions/v1beta1",
                                                    kind="Deployment",
                                                    spec=spec)

    return deployment
示例#21
0
文件: iguazio.py 项目: rajacsp/mlrun
    def _mount_v3iod(task):
        from kubernetes import client as k8s_client

        def add_vol(name, mount_path, host_path):
            vol = k8s_client.V1Volume(
                name=name,
                host_path=k8s_client.V1HostPathVolumeSource(path=host_path,
                                                            type=''))
            task.add_volume(vol).add_volume_mount(
                k8s_client.V1VolumeMount(mount_path=mount_path, name=name))

        add_vol(name='shm',
                mount_path='/dev/shm',
                host_path='/dev/shm/' + namespace)
        add_vol(name='v3iod-comm',
                mount_path='/var/run/iguazio/dayman',
                host_path='/var/run/iguazio/dayman/' + namespace)

        vol = k8s_client.V1Volume(
            name='daemon-health',
            empty_dir=k8s_client.V1EmptyDirVolumeSource())
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(
                mount_path='/var/run/iguazio/daemon_health',
                name='daemon-health'))

        vol = k8s_client.V1Volume(
            name='v3io-config',
            config_map=k8s_client.V1ConfigMapVolumeSource(
                name=v3io_config_configmap, default_mode=420))
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/etc/config/v3io',
                                     name='v3io-config'))

        vol = k8s_client.V1Volume(name='v3io-auth',
                                  secret=k8s_client.V1SecretVolumeSource(
                                      secret_name=v3io_auth_secret,
                                      default_mode=420))
        task.add_volume(vol).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path='/igz/.igz', name='v3io-auth'))

        return (task)
示例#22
0
 def kube_body(self):
     """
     Build kubernetes deployment
     :return: client.V1Deployment
     """
     return client.V1Deployment(
         metadata=client.V1ObjectMeta(name=self.ref),
         spec=client.V1DeploymentSpec(
             replicas=1,
             selector=client.V1LabelSelector(
                 match_labels={'mintzone/ref': self.ref}),
             template=client.V1PodTemplateSpec(
                 metadata=client.V1ObjectMeta(
                     labels={"mintzone/ref": self.ref}),
                 spec=client.V1PodSpec(
                     containers=self.containers,
                     volumes=[
                         client.V1Volume(
                             name='data',
                             empty_dir=client.V1EmptyDirVolumeSource(),
                         )
                     ]))))
示例#23
0
def create_selenium_pod_spec(selenium_type,
                             container_name,
                             agent_container_ports,
                             env_vars,
                             node_selector=None,
                             pod_limits=None):
    if selenium_type == 'firefox':
        agent_image_name = 'pymada/selenium-firefox',
    elif selenium_type == 'chrome':
        agent_image_name = 'pymada/selenium-chrome'

    pod_resource_requirements = None
    if pod_limits is not None and type(pod_limits) == dict:
        pod_resource_requirements = client.V1ResourceRequirements(
            limits=pod_limits)

    selenium_ports = [client.V1ContainerPort(container_port=4444)
                      ] + agent_container_ports
    selenium_container = client.V1Container(
        name=container_name,
        image=agent_image_name,
        ports=selenium_ports,
        env=env_vars,
        volume_mounts=[
            client.V1VolumeMount(mount_path='/dev/shm', name='dshm')
        ],
        resources=pod_resource_requirements)

    pod_spec = client.V1PodSpec(
        containers=[selenium_container],
        node_selector=node_selector,
        volumes=[
            client.V1Volume(
                name='dshm',
                empty_dir=client.V1EmptyDirVolumeSource(medium='Memory'))
        ])

    return pod_spec
示例#24
0
 def _create_kube_podspec(self,
                          container,
                          shm_size=None,
                          scheduler_name=KubernetesConfig.DEFAULT_SCHEDULER,
                          namespace=KubernetesConfig.K8S_NAMESPACE):
     logger.info("Creating pod with scheduler_name = %s, namespace = %s" %
                 (scheduler_name, namespace))
     volumes = []
     if shm_size and isinstance(shm_size, int):
         dshm_vol = client.V1EmptyDirVolumeSource(medium="Memory",
                                                  size_limit=shm_size)
         volumes.append(client.V1Volume(name='dshm', empty_dir=dshm_vol))
     if not volumes:
         volumes = None
     spec = client.V1PodSpec(containers=[container],
                             scheduler_name=scheduler_name,
                             restart_policy="OnFailure",
                             volumes=volumes,
                             image_pull_secrets=[
                                 client.V1LocalObjectReference(
                                     KubernetesConfig.IMAGE_PULL_SECRET)
                             ])
     return spec
示例#25
0
    def _build_container_op(self, overrides=dict()):
        artifacts_volume = k8s_client.V1Volume(
            name="artifacts", empty_dir=k8s_client.V1EmptyDirVolumeSource())

        op = dsl.ContainerOp(name=f"{self.pipeline_name}-{self.op_name}",
                             image=self.container_image_url,
                             command=self.container_command,
                             arguments=self.container_args,
                             file_outputs=self.output_files,
                             output_artifact_paths=self.output_artifact_paths)

        # Mount an empty direcotry for us to write output to
        self._mount_empty_dir(op, "artifacts", self.kfp_artifact_path)

        # Apply the GCP Auth secret
        if not self.gcp_auth_secret is None:
            op.apply(use_gcp_secret(self.gcp_auth_secret))

        # Apply other secrets
        for secret_name in self.secrets:
            mount_path = self.secrets[secret_name]
            op.apply(self._bind_secret(secret_name, mount_path))

        logging.info(f"Build Container {self.pipeline_name}.{self.op_name}")

        # All my parameters
        for name, value in self.data.items():
            logging.info(f"\tContainer.ENV: {name}={value}")
            op.container.add_env_variable(V1EnvVar(name=name,
                                                   value=str(value)))

        for name, value in overrides.items():
            logging.info(f"\tContainer.ENV: {name}={value} (override)")
            op.container.add_env_variable(V1EnvVar(name=name,
                                                   value=str(value)))

        return op
示例#26
0
def create_rc_object(scale, cpu, gpu, instance_name, mem, isSSD):
    resources = client.V1ResourceRequirements(
        requests=dict(cpu=cpu, memory=mem))
    containers = client.V1Container(name=instance_name, resources=resources)
    if isSSD:
        medium = 'memory'
    else:
        medium = 'SSD'

    empty_dir = client.V1EmptyDirVolumeSource(medium=medium, size_limit=mem)
    volumes = client.V1Volume(name=instance_name, empty_dir=empty_dir)
    pod_spec = client.V1PodSpec(containers=[containers], volumes=[volumes])
    template = client.V1PodTemplateSpec(spec=spec)
    rc_spec = client.V1ReplicationControllerSpec(selector=dict(
        name, instance_name),
                                                 replicas=scale,
                                                 template=template)
    metadata = client.V1ObjectMeta(labels=dict(name, instance_name),
                                   name=instance_name)
    rc = client.V1ReplicationController(api_version='corev1',
                                        kind='ReplicationController',
                                        spec=rc_spec,
                                        metadata=metadata)
    return rc
示例#27
0
def get_mcrouter_deployment_object(cluster_object):
    name = cluster_object['metadata']['name']
    namespace = cluster_object['metadata']['namespace']

    try:
        replicas = cluster_object['spec']['mcrouter']['replicas']
    except KeyError:
        replicas = 1

    try:
        mcrouter_limit_cpu = \
            cluster_object['spec']['mcrouter']['mcrouter_limit_cpu']
    except KeyError:
        mcrouter_limit_cpu = '50m'

    try:
        mcrouter_limit_memory = \
            cluster_object['spec']['mcrouter']['mcrouter_limit_memory']
    except KeyError:
        mcrouter_limit_memory = '32Mi'

    deployment = client.AppsV1beta1Deployment()

    # Metadata
    deployment.metadata = client.V1ObjectMeta(
        name="{}-router".format(name),
        namespace=namespace,
        labels=get_default_labels(name=name))
    deployment.metadata.labels['service-type'] = 'mcrouter'

    # Spec
    deployment.spec = client.AppsV1beta1DeploymentSpec(
        replicas=replicas, template=client.V1PodTemplateSpec())

    deployment.spec.template = client.V1PodTemplateSpec()
    deployment.spec.template.metadata = client.V1ObjectMeta(
        labels=deployment.metadata.labels)
    deployment.spec.template.spec = client.V1PodSpec(containers=[])

    # Mcrouter container
    mcrouter_config_volumemount = client.V1VolumeMount(
        name='mcrouter-config', read_only=False, mount_path='/etc/mcrouter')

    mcrouter_port = client.V1ContainerPort(name='mcrouter',
                                           container_port=11211,
                                           protocol='TCP')
    mcrouter_resources = client.V1ResourceRequirements(
        limits={
            'cpu': mcrouter_limit_cpu,
            'memory': mcrouter_limit_memory
        },
        requests={
            'cpu': mcrouter_limit_cpu,
            'memory': mcrouter_limit_memory
        })
    mcrouter_container = client.V1Container(
        name='mcrouter',
        command=[
            'mcrouter', '-p', '11211', '-f', '/etc/mcrouter/mcrouter.conf'
        ],
        image='kubestack/mcrouter:v0.36.0-kbst1',
        ports=[mcrouter_port],
        volume_mounts=[mcrouter_config_volumemount],
        resources=mcrouter_resources)

    # Mcrouter config sidecar
    sidecar_resources = client.V1ResourceRequirements(limits={
        'cpu': '25m',
        'memory': '8Mi'
    },
                                                      requests={
                                                          'cpu': '25m',
                                                          'memory': '8Mi'
                                                      })
    sidecar_config_volumemount = client.V1VolumeMount(
        name='mcrouter-config', read_only=True, mount_path='/etc/mcrouter')
    sidecar_container = client.V1Container(
        name='config-sidecar',
        args=[
            "--debug", "--output=/etc/mcrouter/mcrouter.conf",
            "{}-backend.{}.svc.cluster.local".format(name, namespace)
        ],
        image='kubestack/mcrouter_sidecar:v0.1.0',
        volume_mounts=[mcrouter_config_volumemount],
        resources=sidecar_resources)

    # Config Map Volume
    mcrouter_config_volume = client.V1Volume(
        name='mcrouter-config', empty_dir=client.V1EmptyDirVolumeSource())
    deployment.spec.template.spec.volumes = [mcrouter_config_volume]

    # Metrics container
    metrics_port = client.V1ContainerPort(name='metrics',
                                          container_port=9150,
                                          protocol='TCP')
    metrics_resources = client.V1ResourceRequirements(limits={
        'cpu': '50m',
        'memory': '16Mi'
    },
                                                      requests={
                                                          'cpu': '50m',
                                                          'memory': '16Mi'
                                                      })
    metrics_container = client.V1Container(
        name='prometheus-exporter',
        image='kubestack/mcrouter_exporter:v0.0.1',
        args=[
            '-mcrouter.address', 'localhost:11211', '-web.listen-address',
            ':9150'
        ],
        ports=[metrics_port],
        resources=metrics_resources)

    deployment.spec.template.spec.containers = [
        mcrouter_container, sidecar_container, metrics_container
    ]
    return deployment
示例#28
0
""" Creating a Job in a specific namespace"""

from kubernetes import client, config

# Fetching and loading Kubernetes Information
config.load_kube_config()

extension = client.BatchV1Api()

# Volume
volume = client.V1Volume(name="test-volume",
                         empty_dir=client.V1EmptyDirVolumeSource(medium=""))

# Container
container = client.V1Container(
    name="jobtest",
    image="nginx:1.7.9",
    image_pull_policy="IfNotPresent",
    ports=[client.V1ContainerPort(container_port=80)],
    volume_mounts=[
        client.V1VolumeMount(name=volume.name, mount_path="/kube-example")
    ],
)

# Init-Container
init_container = client.V1Container(
    name="init-container",
    image="alpine",
    image_pull_policy="IfNotPresent",
    command=["touch kube-test.txt"],
    volume_mounts=[
示例#29
0
def create_job_object(name: str,
                      container_image: str,
                      env_list: dict,
                      command: List[str],
                      command_args: List[str],
                      volumes: List[Dict],
                      init_containers: List[Dict],
                      output: Output,
                      namespace: str = "stackl",
                      container_name: str = "jobcontainer",
                      api_version: str = "batch/v1",
                      image_pull_policy: str = "Always",
                      ttl_seconds_after_finished: int = 3600,
                      restart_policy: str = "Never",
                      backoff_limit: int = 0,
                      active_deadline_seconds: int = 3600,
                      service_account: str = "stackl-agent-stackl-agent",
                      image_pull_secrets: List[str] = [],
                      labels=None) -> client.V1Job:
    # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
    """Creates a Job object using the Kubernetes client

    :param name: Job name affix
    :type name: str
    :param container_image: automation container image
    :type container_image: str
    :param env_list: Dict with key/values for the environment inside the automation container
    :type env_list: dict
    :param command: entrypoint command
    :type command: List[str]
    :param command_args: command arguments
    :type command_args: List[str]
    :param volumes: volumes and volumemounts
    :type volumes: List[Dict]
    :param image_pull_secrets: secrets to pull images
    :type image_pull_secrets: List[str]
    :param init_containers: list with init_containers
    :type init_containers: List[Dict]
    :param output: output Object
    :type output: Output
    :param namespace: Kubernetes namespace, defaults to "stackl"
    :type namespace: str, optional
    :param container_name: name of automation container, defaults to "jobcontainer"
    :type container_name: str, optional
    :param api_version: Job api version, defaults to "batch/v1"
    :type api_version: str, optional
    :param image_pull_policy: always pull latest images, defaults to "Always"
    :type image_pull_policy: str, optional
    :param ttl_seconds_after_finished: Remove jobs after execution with ttl, defaults to 600
    :type ttl_seconds_after_finished: int, optional
    :param restart_policy: Restart the pod on the same node after failure, defaults to "Never"
    :type restart_policy: str, optional
    :param backoff_limit: Retries after failure, defaults to 0
    :type backoff_limit: int, optional
    :param active_deadline_seconds: Timeout on a job, defaults to 3600 seconds
    :type active_deadline_seconds: int, optional
    :param service_account: Kubernetes service account, defaults to "stackl-agent-stackl-agent"
    :type service_account: str, optional
    :param labels: metadata labels, defaults to {}
    :type labels: dict, optional
    :return: automation Job object
    :rtype: client.V1Job
    """
    id_job = id_generator()
    name = name + "-" + id_job
    body = client.V1Job(api_version=api_version, kind="Job")
    body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)
    body.status = client.V1JobStatus()
    template = client.V1PodTemplate()
    template.template = client.V1PodTemplateSpec()
    k8s_volumes = []

    cms = []

    logging.debug(f"volumes: {volumes}")
    # create a k8s volume for each element in volumes
    for vol in volumes:
        vol_name = name + "-" + vol["name"]
        k8s_volume = client.V1Volume(name=vol_name)
        if vol["type"] == "config_map":
            config_map = client.V1ConfigMapVolumeSource()
            config_map.name = vol_name
            k8s_volume.config_map = config_map
            cms.append(create_cm(vol_name, namespace, vol['data']))
            vol['name'] = vol_name
        if vol["type"] == "empty_dir":
            k8s_volume.empty_dir = client.V1EmptyDirVolumeSource(
                medium="Memory")
            vol['name'] = vol_name
        k8s_volumes.append(k8s_volume)

    logging.debug(f"Volumes created for job {name}: {k8s_volumes}")

    # create a volume mount for each element in volumes
    k8s_volume_mounts = []
    for vol in volumes:
        if vol["mount_path"]:
            volume_mount = client.V1VolumeMount(name=vol["name"],
                                                mount_path=vol["mount_path"])
            if "sub_path" in vol:
                volume_mount.sub_path = vol["sub_path"]
            k8s_volume_mounts.append(volume_mount)

    logging.debug(f"Volume mounts created for job {name}: {k8s_volume_mounts}")

    # create an environment list
    k8s_env_list = []

    if env_list:
        for key, value in env_list.items():
            if isinstance(value, dict):
                if 'config_map_key_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            config_map_key_ref=client.V1ConfigMapKeySelector(
                                name=value['config_map_key_ref']["name"],
                                key=value['config_map_key_ref']["key"])))
                    k8s_env_list.append(k8s_env_from)
                elif 'field_ref' in value:
                    k8s_env_from = client.V1EnvVar(
                        name=key,
                        value_from=client.V1EnvVarSource(
                            field_ref=client.V1ObjectFieldSelector(
                                field_path=value['field_ref'])))
                    k8s_env_list.append(k8s_env_from)
            else:
                k8s_env = client.V1EnvVar(name=key, value=value)
                k8s_env_list.append(k8s_env)

    k8s_env_from_list = []

    # if env_from:
    #     for env in env_from:
    #         if 'config_map_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 config_map_ref=env['config_map_ref'])
    #             k8s_env_from_list.append(k8s_env_from)
    #         elif 'secret_ref' in env:
    #             k8s_env_from = client.V1EnvFromSource(
    #                 secret_ref=env['secret_ref'])
    #             k8s_env_from_list.append(k8s_env_from)

    logging.debug(f"Environment list created for job {name}: {k8s_env_list}")
    print(f"Environment list created for job {name}: {k8s_env_list}")

    container = client.V1Container(name=container_name,
                                   image=container_image,
                                   env=k8s_env_list,
                                   volume_mounts=k8s_volume_mounts,
                                   image_pull_policy=image_pull_policy,
                                   command=command,
                                   args=command_args,
                                   env_from=k8s_env_from_list)

    k8s_init_containers = []

    logging.debug(f"Init containers for job {name}: {init_containers}")
    for c in init_containers:
        k8s_c = client.V1Container(name=c['name'],
                                   image=c['image'],
                                   volume_mounts=k8s_volume_mounts,
                                   env=k8s_env_list)

        if 'args' in c:
            k8s_c.args = c['args']

        k8s_init_containers.append(k8s_c)

    k8s_secrets = []
    for secret in image_pull_secrets:
        k8s_secrets.append(client.V1LocalObjectReference(name=secret))

    logging.debug(f"Secret list created for job {name}: {k8s_secrets}")

    containers = [container]
    if output:
        output.volume_mounts = k8s_volume_mounts
        output.env = k8s_env_list
        output_containers = output.containers
        containers = containers + output_containers

    template.template.metadata = client.V1ObjectMeta(labels=labels)
    template.template.spec = client.V1PodSpec(
        containers=containers,
        restart_policy=restart_policy,
        image_pull_secrets=k8s_secrets,
        volumes=k8s_volumes,
        init_containers=k8s_init_containers,
        service_account_name=service_account)
    template.template = client.V1PodTemplateSpec(
        metadata=template.template.metadata, spec=template.template.spec)
    body.spec = client.V1JobSpec(
        ttl_seconds_after_finished=ttl_seconds_after_finished,
        template=template.template,
        backoff_limit=backoff_limit,
        active_deadline_seconds=active_deadline_seconds)

    return body, cms
def create_deployment_old(config_file):
    """
    Create IBM Spectrum Scale CSI Operator deployment object in operator namespace using
    deployment_operator_image_for_crd and deployment_driver_image_for_crd parameters from
    config.json file

    Args:
        param1: config_file - configuration json file

    Returns:
       None

    Raises:
        Raises an exception on kubernetes client api failure and asserts

    """

    deployment_apps_api_instance = client.AppsV1Api()

    deployment_labels = {
        "app.kubernetes.io/instance": "ibm-spectrum-scale-csi-operator",
        "app.kubernetes.io/managed-by": "ibm-spectrum-scale-csi-operator",
        "app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator",
        "product": "ibm-spectrum-scale-csi",
        "release": "ibm-spectrum-scale-csi-operator"
    }

    deployment_annotations = {
        "productID": "ibm-spectrum-scale-csi-operator",
        "productName": "IBM Spectrum Scale CSI Operator",
        "productVersion": "2.0.0"
    }

    deployment_metadata = client.V1ObjectMeta(
        name="ibm-spectrum-scale-csi-operator",
        labels=deployment_labels,
        namespace=namespace_value)

    deployment_selector = client.V1LabelSelector(
        match_labels={
            "app.kubernetes.io/name": "ibm-spectrum-scale-csi-operator"
        })

    podtemplate_metadata = client.V1ObjectMeta(
        labels=deployment_labels, annotations=deployment_annotations)

    pod_affinity = client.V1Affinity(node_affinity=client.V1NodeAffinity(
        required_during_scheduling_ignored_during_execution=client.
        V1NodeSelector(node_selector_terms=[
            client.V1NodeSelectorTerm(match_expressions=[
                client.V1NodeSelectorRequirement(key="beta.kubernetes.io/arch",
                                                 operator="Exists")
            ])
        ])))
    ansible_pod_container = client.V1Container(
        image=config_file["deployment_operator_image_for_crd"],
        command=[
            "/usr/local/bin/ao-logs", "/tmp/ansible-operator/runner", "stdout"
        ],
        liveness_probe=client.V1Probe(
            _exec=client.V1ExecAction(command=["/health_check.sh"]),
            initial_delay_seconds=10,
            period_seconds=30),
        readiness_probe=client.V1Probe(
            _exec=client.V1ExecAction(command=["/health_check.sh"]),
            initial_delay_seconds=3,
            period_seconds=1),
        name="ansible",
        image_pull_policy="IfNotPresent",
        security_context=client.V1SecurityContext(
            capabilities=client.V1Capabilities(drop=["ALL"])),
        volume_mounts=[
            client.V1VolumeMount(mount_path="/tmp/ansible-operator/runner",
                                 name="runner",
                                 read_only=True)
        ],
        env=[
            client.V1EnvVar(
                name="CSI_DRIVER_IMAGE",
                value=config_file["deployment_driver_image_for_crd"])
        ])

    operator_pod_container = client.V1Container(
        image=config_file["deployment_operator_image_for_crd"],
        name="operator",
        image_pull_policy="IfNotPresent",
        liveness_probe=client.V1Probe(
            _exec=client.V1ExecAction(command=["/health_check.sh"]),
            initial_delay_seconds=10,
            period_seconds=30),
        readiness_probe=client.V1Probe(
            _exec=client.V1ExecAction(command=["/health_check.sh"]),
            initial_delay_seconds=3,
            period_seconds=1),
        security_context=client.V1SecurityContext(
            capabilities=client.V1Capabilities(drop=["ALL"])),
        env=[
            client.V1EnvVar(name="WATCH_NAMESPACE",
                            value_from=client.V1EnvVarSource(
                                field_ref=client.V1ObjectFieldSelector(
                                    field_path="metadata.namespace"))),
            client.V1EnvVar(name="POD_NAME",
                            value_from=client.V1EnvVarSource(
                                field_ref=client.V1ObjectFieldSelector(
                                    field_path="metadata.name"))),
            client.V1EnvVar(name="OPERATOR_NAME",
                            value="ibm-spectrum-scale-csi-operator"),
            client.V1EnvVar(
                name="CSI_DRIVER_IMAGE",
                value=config_file["deployment_driver_image_for_crd"])
        ],
        volume_mounts=[
            client.V1VolumeMount(mount_path="/tmp/ansible-operator/runner",
                                 name="runner")
        ])
    pod_spec = client.V1PodSpec(
        affinity=pod_affinity,
        containers=[ansible_pod_container, operator_pod_container],
        service_account_name="ibm-spectrum-scale-csi-operator",
        volumes=[
            client.V1Volume(
                empty_dir=client.V1EmptyDirVolumeSource(medium="Memory"),
                name="runner")
        ])

    podtemplate_spec = client.V1PodTemplateSpec(metadata=podtemplate_metadata,
                                                spec=pod_spec)

    deployment_spec = client.V1DeploymentSpec(replicas=1,
                                              selector=deployment_selector,
                                              template=podtemplate_spec)

    body_dep = client.V1Deployment(kind='Deployment',
                                   api_version='apps/v1',
                                   metadata=deployment_metadata,
                                   spec=deployment_spec)

    try:
        LOGGER.info("creating deployment for operator")
        deployment_apps_api_response = deployment_apps_api_instance.create_namespaced_deployment(
            namespace=namespace_value, body=body_dep)
        LOGGER.debug(str(deployment_apps_api_response))
    except ApiException as e:
        LOGGER.error(
            f"Exception when calling RbacAuthorizationV1Api->create_namespaced_deployment: {e}"
        )
        assert False