Exemple #1
0
    def _add_volumes(self):
        # See https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Volume.md
        self.workspace_vc = VolumeClaim(self.job_name+"-wvc", self.workspace_size, self.namespace)
        wrkspc_response = self.workspace_vc.create()
        self.add_volume(
            name="workspace",
            persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                claim_name=self.job_name+"-wvc"
            )
        )

        # See https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1VolumeMount.md
        volume_mounts = [
            client.V1VolumeMount(mount_path=self.workspace_path, name="workspace"),
        ]
        # Add output volume, if outputs_path is not the same as the workspace_path.
        if self.outputs_path and self.outputs_path != self.workspace_path:
            self.outputs_vc = VolumeClaim(self.job_name+"-ovc", self.output_size, self.namespace)
            out_response = self.outputs_vc.create()
            self.add_volume(
                name="outputs",
                persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=self.job_name+"-ovc"
                )
            )
            volume_mounts.append(
                client.V1VolumeMount(mount_path=self.outputs_path, name="outputs"),
            )

        return volume_mounts
def tf_blerssi_pipeline(model_name='blerssi',
                        pvc_name='nfs',
                        tf_model_dir='model',
                        tf_export_dir='model/export',
                        training_steps=200,
                        batch_size=100,
                        learning_rate=0.01):

    nfs_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name='nfs')
    nfs_volume = k8s_client.V1Volume(name='nfs',
                                     persistent_volume_claim=nfs_pvc)
    nfs_volume_mount = k8s_client.V1VolumeMount(mount_path='/mnt/', name='nfs')
    tensorboard_dir = 'model'
    blerssi_training = blerssi_train_op('/mnt/%s' % tf_model_dir,
                                        '/mnt/%s' % tf_export_dir,
                                        training_steps, batch_size,
                                        learning_rate)
    blerssi_training.add_volume(nfs_volume)
    blerssi_training.add_volume_mount(nfs_volume_mount)

    serve = kubeflow_deploy_op('/mnt/%s' % tf_export_dir, model_name, pvc_name)
    serve.add_volume(nfs_volume)
    serve.add_volume_mount(nfs_volume_mount)
    serve.after(blerssi_training)

    web_ui = kubeflow_web_ui_op()
    web_ui.add_volume(nfs_volume)
    web_ui.add_volume_mount(nfs_volume_mount)
    web_ui.after(serve)

    tensorboard = kubeflow_tensorboard_op('/mnt/%s' % tensorboard_dir,
                                          model_name, pvc_name)
    tensorboard.add_volume(nfs_volume)
    tensorboard.add_volume_mount(nfs_volume_mount)
    tensorboard.after(blerssi_training)
Exemple #3
0
    def __init__(self,
                 execution_environment,
                 volume_claim_name='nfs-wf-volume',
                 version='v1'):
        self.execution_environment = execution_environment

        if self.execution_environment == "in-cluster":
            self.pipeline_components_path = "apps/networking/ble-localization/onprem/pipelines/"
        else:
            self.pipeline_components_path = ""

        component_root_train = self.pipeline_components_path + 'components/' + version + '/train/'
        component_root_serve = self.pipeline_components_path + 'components/' + version + '/serve/'
        component_root_inference_app = self.pipeline_components_path + 'components/' + version + '/inference-app/'
        component_root_tensorboard = self.pipeline_components_path + 'components/' + version + '/tensorboard/'

        self.train_op = kfp.components.load_component_from_file(
            os.path.join(component_root_train, 'component.yaml'))
        self.serve_model_op = kfp.components.load_component_from_file(
            os.path.join(component_root_serve, 'component.yaml'))
        self.inference_app_op = kfp.components.load_component_from_file(
            os.path.join(component_root_inference_app, 'component.yaml'))
        self.tensorboard_op = kfp.components.load_component_from_file(
            os.path.join(component_root_tensorboard, 'component.yaml'))

        nfs_pvc = client.V1PersistentVolumeClaimVolumeSource(
            claim_name=volume_claim_name)
        nfs_volume = client.V1Volume(name=volume_claim_name,
                                     persistent_volume_claim=nfs_pvc)
        self.nfs_volume_mount = client.V1VolumeMount(mount_path='/mnt/',
                                                     name='nfs')
Exemple #4
0
def create_job_object(runner_image, region, s3_path, pvc_name):
  target_folder = get_target_folder(s3_path)

  # Configureate Pod template container
  container = k8s_client.V1Container(
      name="copy-dataset-worker",
      image=runner_image,
      command=["aws"],
      args=["s3", "sync", s3_path, "/mnt/" + target_folder],
      volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')],
      env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region),
        k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))),
        k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret")))
        ],
    )
  volume = k8s_client.V1Volume(
    name='data-storage',
    persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
  )
  # Create and configurate a spec section
  template = k8s_client.V1PodTemplateSpec(
      # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}),
      spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure"))
  # Create the specification of deployment
  spec = k8s_client.V1JobSpec(
      # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}),
      template=template)
  # Instantiate the deployment object
  deployment = k8s_client.V1Job(
      api_version="batch/v1",
      kind="Job",
      metadata=k8s_client.V1ObjectMeta(name=container.name),
      spec=spec)

  return deployment
def tf_mnist_pipeline(
        model_name='mnist',
        model_export_dir='model/export',
        training_steps=200,
        batch_size=100,
        learning_rate=0.01):

    # k8s volume resources for workflow
    nfs_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name='nfs')
    nfs_volume = k8s_client.V1Volume(name='nfs', persistent_volume_claim=nfs_pvc)
    nfs_volume_mount = k8s_client.V1VolumeMount(mount_path='/mnt/', name='nfs')

    mnist_training = mnist_train_op(
        '/mnt/%s' % model_export_dir,
        training_steps,
        batch_size,
        learning_rate)
    mnist_training.add_volume(nfs_volume)
    mnist_training.add_volume_mount(nfs_volume_mount)

    serve = kubeflow_serve_op('/mnt/%s' % model_export_dir)
    serve.add_volume(nfs_volume)
    serve.add_volume_mount(nfs_volume_mount)
    serve.after(mnist_training)

    web_ui = kubeflow_web_ui_op()
    web_ui.add_volume(nfs_volume)
    web_ui.add_volume_mount(nfs_volume_mount)
    web_ui.after(serve)
Exemple #6
0
def default_train(
    resource_group,
    workspace,
    dataset
):
    """Pipeline steps"""

    operations = {}
    callback_url = 'kubemlopsbot-svc.kubeflow.svc.cluster.local:8080'

    exit_op = dsl.ContainerOp(
        name='Exit Handler',
        image="curlimages/curl",
        command=['curl'],
        arguments=[
            '-d', get_callback_payload(TRAIN_FINISH_EVENT),
            callback_url
        ]
    )

    with dsl.ExitHandler(exit_op):
        start_callback = \
            dsl.UserContainer('callback',
                              'curlimages/curl',
                              command=['curl'],
                              args=['-d',
                                    get_callback_payload(TRAIN_START_EVENT), callback_url])  # noqa: E501

        operations['start'] = dsl.ContainerOp(
            name='start',
            init_containers=[start_callback],
            image="busybox",
            command=['sh', '-c'],
            arguments=[
                'echo',
                'Pipeline starting'
            ]
        )

        operations['end'] = dsl.ContainerOp(
            name='End',
            image="curlimages/curl",
            command=['curl'],
            arguments=[
                '-d', get_callback_payload("Model is registered"),
                callback_url
            ]
        )
        operations['end'].after(operations['start'])

    for _, op_1 in operations.items():
        op_1.container.set_image_pull_policy("Always")
        op_1.add_volume(
            k8s_client.V1Volume(
              name='azure',
              persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(  # noqa: E501
                claim_name='azure-managed-file')
            )
        ).add_volume_mount(k8s_client.V1VolumeMount(
            mount_path='/mnt/azure', name='azure'))
Exemple #7
0
    def __init__(self, name, storage_pvc):
        internal_volume_name = 'nfs-server-volume'
        mount_path = '/exports'

        ctr = client.V1Container(
            name=name,
            image=self.IMAGE,
            ports=[
                client.V1ContainerPort(name=k, container_port=v)
                for k, v in self.PORTS.items()
            ],
            security_context=client.V1SecurityContext(privileged=True),
            volume_mounts=[
                client.V1VolumeMount(mount_path=mount_path,
                                     name=internal_volume_name)
            ])
        volume_source = client.V1PersistentVolumeClaimVolumeSource(
            claim_name=storage_pvc)
        volume = client.V1Volume(name=internal_volume_name,
                                 persistent_volume_claim=volume_source)
        pod_spec = client.V1PodSpec(containers=[ctr], volumes=[volume])
        pod_metadata = client.V1ObjectMeta(labels=self.LABELS)
        pod_template = client.V1PodTemplateSpec(metadata=pod_metadata,
                                                spec=pod_spec)
        rs_spec = client.V1ReplicaSetSpec(
            replicas=1,
            selector=client.V1LabelSelector(match_labels=self.LABELS),
            template=pod_template)
        metadata = client.V1ObjectMeta(name=name, labels=self.LABELS)
        super(NFSDeployment, self).__init__(metadata=metadata, spec=rs_spec)
Exemple #8
0
def new_container(step_name, step_image, step_command=None, step_arguments=None, step_outputs=None, output_to_local=[], component_directory_location='', step_inputs=[], to_be_eval=False):
    cp_input_command = 'COMP_DIR=' + component_directory_location  + ' && echo "Starting component: ' + step_name + '..."'
    if (len(step_inputs)!=0):
        for input in step_inputs:
            cp_input_command += ' && cp ' + input + ' .'
    if (step_command != ""):
        step_command = cp_input_command + ' && ' + step_command
    else:
        step_command = cp_input_command
    num_output = len(output_to_local)
    if (num_output!=0):
        for index, output in enumerate(output_to_local):
            if (num_output - index <= 3 and to_be_eval is True):
                step_command = step_command + ' && find /mnt -iname "' + output + '" -exec cp {} $COMP_DIR \;'
            else:
                step_command = step_command + ' && find -iname "' + output + '" -exec cp {} $COMP_DIR \;'
            step_command = step_command + ' && echo "' + component_directory_location + '/' + output + '" >> /output' + str(index+1) + '.txt'
    container = dsl.ContainerOp(
        name=step_name,
        image=step_image,
        command=['sh', '-c', (step_command)],
        arguments=step_arguments,
        file_outputs=step_outputs
      )
    container.add_volume(
        k8s_client.V1Volume(name=pv_name, persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name)))
    container.add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name=pv_name))
    return container
Exemple #9
0
def mnist(cm_bucket_name: str,
          cm_path: str,
          epoch: int = 5,
          dropout: float = 0.2,
          hidden_layer_size: int = 512,
          deploy: bool = False):
    pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name='workflow-pvc')
    volume = k8s_client.V1Volume(name='workflow-nfs',
                                 persistent_volume_claim=pvc)
    volume_mount = k8s_client.V1VolumeMount(mount_path='/mnt', name='workflow-nfs')

    s3_endpoint = k8s_client.V1EnvVar('S3_ENDPOINT', f'http://{find_minio_ip()}:9000')
    s3_access_key = k8s_client.V1EnvVar('AWS_ACCESS_KEY_ID', 'minio')
    s3_secret_key = k8s_client.V1EnvVar('AWS_SECRET_ACCESS_KEY', 'minio123')

    preprocess = preprocess_op() \
        .add_volume(volume)\
        .add_volume_mount(volume_mount)

    train = train_op(preprocess.output, epoch, dropout, hidden_layer_size) \
        .add_volume_mount(volume_mount)

    predictions = prediction_op(train.output, preprocess.output, cm_bucket_name, cm_path) \
        .add_volume_mount(volume_mount) \
        .add_env_variable(s3_endpoint) \
        .add_env_variable(s3_access_key) \
        .add_env_variable(s3_secret_key)

    if deploy:
        kubeflow_deploy_op(train.output, 'mnist-pipeline-{{workflow.name}}') \
           .add_volume_mount(volume_mount)
Exemple #10
0
def create_deployment_object(name):
    # Configureate Pod template container
    container = client.V1Container(
        name="minecraft",
        image="openhack/minecraft-server:2.0",
        ports=[
            client.V1ContainerPort(container_port=25565),
            client.V1ContainerPort(container_port=25575)
        ],
        volume_mounts=[
            client.V1VolumeMount(name="volume", mount_path="/data")
        ],
        env=[client.V1EnvVar(name="EULA", value="true")])
    volumes = client.V1Volume(
        name="volume",
        persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
            claim_name="azure-managed-disk"))
    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(
        metadata=client.V1ObjectMeta(labels={"app": name}),
        spec=client.V1PodSpec(containers=[container], volumes=[volumes]))
    # Create the specification of deployment
    spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1,
                                                  template=template)
    # Instantiate the deployment object
    deployment = client.ExtensionsV1beta1Deployment(
        api_version="extensions/v1beta1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name=name),
        spec=spec)

    return deployment
Exemple #11
0
def mount_fs(spawner, pod, role_mapping):
    groups = spawner.userdata.get("groups", [])
    _loaded = set()
    
    # do this only once, or have a bad time
    if any(set(groups).intersection(set(role_mapping['groups'].keys()))):
        pod.spec.volumes.append(
            client.V1Volume(
                name="efs-volume",
                persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=os.environ.get('NFS_SHARE_PVC', "pvc-jupyterhub-nfs")
                )
            )
        )

    for group in groups:
        print(f"Checking shares for group: {group}")
        try:
            fs = role_mapping['groups'].get(group, {}).get('fileshare')
        except KeyError:
            continue
        
        if fs:
            for share in fs:
                if share and share not in _loaded:                    
                    pod.spec.containers[0].volume_mounts.append(
                        client.V1VolumeMount(
                            mount_path="/opt/app-root/share/nfs/{share}".format(share=share),
                            name="efs-volume",
                            sub_path=share
                        )
                    )
                    _loaded.add(share)

    return spawner, pod
 def create(self):
     cmd = "while true; do echo $(date -u) >> /data/out.txt; sleep 1; done"
     if self.out_put != "":
         cmd = "while true; do echo $(date -u) >> /data/{}; sleep 1; done".format(self.out_put)
     container = client.V1Container(
         name="app",
         image="centos",
         command=["sh", "-c", cmd],
         volume_mounts=[client.V1VolumeMount(
             name="juicefs-pv",
             mount_path="/data",
             mount_propagation="HostToContainer",
         )]
     )
     pod = client.V1Pod(
         metadata=client.V1ObjectMeta(name=self.name, namespace=self.namespace),
         spec=client.V1PodSpec(
             containers=[container],
             volumes=[client.V1Volume(
                 name="juicefs-pv",
                 persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=self.pvc)
             )]),
     )
     client.CoreV1Api().create_namespaced_pod(namespace=self.namespace, body=pod)
     PODS.append(self)
Exemple #13
0
def create_volume(volume_data):
    if "name" in volume_data:
        volume = client.V1Volume(name=volume_data["name"])

        # persistent claim
        if "persistentVolumeClaim" in volume_data.has_key:
            volume_pvc = volume_data["persistentVolumeClaim"]
            if "claimName" in volume_pvc:
                pvc = client.V1PersistentVolumeClaimVolumeSource(
                    claim_name=volume_pvc["claimName"])
                volume.persistent_volume_claim = pvc

        # hostpath
        if "hostPath" in volume_data and "path" in volume_data["hostPath"]:
            host_path = client.V1HostPathVolumeSource(
                path=volume_data["hostPath"]["path"])

            if "hostPath" in volume_data and "type" in volume_data["hostPath"]:
                host_path.type = volume_data["hostPath"]["type"]
                volume.host_path = host_path
        # nfs
        if ("nfs" in volume_data and "path" in volume_data["nfs"]
                and "server" in volume_data["nfs"]):
            volume.nfs = client.V1NFSVolumeSource(
                path=volume_data["nfs"]["path"],
                server=volume_data["nfs"]["server"])

        return volume

    return None
Exemple #14
0
    def generate_build_template_resource(self):
        metadata = client.V1ObjectMeta(name='fairing-build',
                                       namespace=self.namespace)

        params = [
            BuildTemplateSpecParameter(
                name='IMAGE', description='The name of the image to push'),
            BuildTemplateSpecParameter(
                name='TAG',
                description='The tag of the image to push',
                default='latest'),
            BuildTemplateSpecParameter(
                name='DOCKERFILE',
                description='Path to the Dockerfile to build',
                default='/src/Dockerfile')
        ]
        volume_mount = client.V1VolumeMount(name='src', mount_path='/src')
        steps = [
            BuildTemplateSpecStep(
                name='build-and-push',
                image='gcr.io/kaniko-project/executor:v0.5.0',
                args=[
                    '--dockerfile=${DOCKERFILE}',
                    '--destination=${IMAGE}:${TAG}', '--context=${CONTEXT}'
                ],
                volume_mounts=[volume_mount])
        ]

        pvc = client.V1PersistentVolumeClaimVolumeSource(
            claim_name='fairing-build')
        volume = client.V1Volume(name='src', persistent_volume_claim=pvc)
        spec = BuildTemplateSpec(parameters=params,
                                 steps=steps,
                                 volumes=[volume])
        return BuildTemplate(metadata=metadata, spec=spec)
Exemple #15
0
 def k8s_object(self):
     depl = client.AppsV1beta1Deployment(
         metadata=client.V1ObjectMeta(
             name=self.name,
             labels=self.labels
         ),
         spec=client.AppsV1beta1DeploymentSpec(
             strategy=client.AppsV1beta1DeploymentStrategy(
                 type='RollingUpdate',
                 rolling_update=client.AppsV1beta1RollingUpdateDeployment(
                     max_surge=0
                 )
             ),
             template=client.V1PodTemplateSpec(
                 metadata=client.V1ObjectMeta(
                     labels=self.template_labels),
                 spec=client.V1PodSpec(
                     affinity=client.V1Affinity(
                         pod_anti_affinity=client.V1PodAntiAffinity(
                             required_during_scheduling_ignored_during_execution=[
                                 {"topologyKey": e2e_globals.ANTI_AFFINITY_KEY},
                             ]
                         ),
                     ),
                     volumes=[client.V1Volume(
                         name='data',
                         config_map=client.V1ConfigMapVolumeSource(
                             name=self.cfgmap_name)
                     )]
                     ,
                     containers=[client.V1Container(
                         image=e2e_globals.TEST_DEPLOYMENT_IMAGE,
                         name="testapp",
                         volume_mounts=[client.V1VolumeMount(
                             name='data',
                             mount_path='/usr/share/nginx/html')
                         ],
                         ports=[client.V1ContainerPort(
                             container_port=e2e_globals.TEST_CONTAINER_PORT)],
                         resources=client.V1ResourceRequirements(
                             requests={
                                 'cpu': '1m',
                                 'memory': '1Mi',
                             },
                         ),
                     )])),
             replicas=self.replicas)
     )
     if self.vol_claim_name is not None:
         volume = client.V1Volume(name='test-volume',
                                  persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                                      claim_name=self.vol_claim_name))
         mount = client.V1VolumeMount(
             name='test-volume',
             mount_path='/usr/blank'
         )
         depl.spec.template.spec.containers[0].volume_mounts.append(mount)
         depl.spec.template.spec.volumes.append(volume)
     return depl
Exemple #16
0
 def expand_container(num):
     for _ in range(0, num):
         pod_name = "task-storage-{}-{}".format(item.uuid,
                                                get_short_uuid())
         shared_pvc_name = "shared-{}".format(item.uuid)
         shared_pvc = client.V1VolumeMount(
             mount_path=conf['persistent_volume']['mount_path'],
             name=shared_pvc_name,
             read_only=True)
         user_storage_name = "user-{}".format(item.uuid)
         user_mount = client.V1VolumeMount(
             mount_path='/cloud_scheduler_userspace/',
             name=user_storage_name)
         container_settings = {
             'name': 'task-storage-container',
             'image': USER_WEBSHELL_DOCKER_IMAGE,
             'volume_mounts': [shared_pvc, user_mount],
         }
         container = client.V1Container(**container_settings)
         pvc = client.V1PersistentVolumeClaimVolumeSource(
             claim_name=conf['persistent_volume']['name'])
         user_volume_claim = client.V1PersistentVolumeClaimVolumeSource(
             claim_name=USERSPACE_NAME)
         volume = client.V1Volume(name=shared_pvc_name,
                                  persistent_volume_claim=pvc)
         user_volume = client.V1Volume(
             name=user_storage_name,
             persistent_volume_claim=user_volume_claim)
         metadata = client.V1ObjectMeta(name=pod_name,
                                        labels={
                                            'task': uuid,
                                            'occupied': '0'
                                        })
         spec = client.V1PodSpec(containers=[container],
                                 restart_policy='Always',
                                 volumes=[volume, user_volume])
         pod = client.V1Pod(api_version='v1',
                            kind='Pod',
                            metadata=metadata,
                            spec=spec)
         try:
             api.create_namespaced_pod(namespace=KUBERNETES_NAMESPACE,
                                       body=pod)
         except ApiException as ex:
             LOGGER.warning(ex)
def get_volume_list():
    pvc_claim_name = "efs-claim"
    persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource(
        claim_name=pvc_claim_name)
    volume = client.V1Volume(persistent_volume_claim=persistent_volume_claim,
                             name=pvc_claim_name)
    logger.info("Volume object created for '%s' pvc" % str(pvc_claim_name))
    volume_list = [volume]
    return volume_list
Exemple #18
0
    def _mount_pvc(task):
        from kubernetes import client as k8s_client

        local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
        return task.add_volume(
            k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc)
        ).add_volume_mount(
            k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name)
        )
Exemple #19
0
def get_volume(volume, claim_name=None, volume_mount=None):
    if claim_name:
        pv_claim = client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name)
        return client.V1Volume(name=volume, persistent_volume_claim=pv_claim)
    elif volume_mount:
        return client.V1Volume(
            name=volume,
            host_path=client.V1HostPathVolumeSource(path=volume_mount))
    else:
        empty_dir = client.V1EmptyDirVolumeSource()
        return client.V1Volume(name=volume, empty_dir=empty_dir)
Exemple #20
0
def storage_maker_op(folder_name='',step_name='make-storage'):
    container = dsl.ContainerOp(
        name=step_name,
        image='alpine:latest',
        command = ['sh', '-c', ('mkdir /mnt/' + folder_name + ' && mkdir /mnt/test_doo0123o/comp1 && mkdir /mnt/test_doo0123o/component2 && echo "/mnt/test_doo0123o/comp1" >> /comp1.txt && echo "/mnt/test_doo0123o/component2" >> /component2.txt')],
        file_outputs={'comp1': '/comp1.txt', 'component2': '/component2.txt'}
    )
    container.add_volume(
        k8s_client.V1Volume(name=pv_name, persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name)))
    container.add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name=pv_name))
    return container
Exemple #21
0
def get_volume(volume, claim_name=None, host_path=None, read_only=None):
    if claim_name:
        pv_claim = client.V1PersistentVolumeClaimVolumeSource(
            claim_name=claim_name, read_only=read_only)
        return client.V1Volume(name=volume, persistent_volume_claim=pv_claim)

    if host_path:
        return client.V1Volume(
            name=volume,
            host_path=client.V1HostPathVolumeSource(path=host_path))

    empty_dir = client.V1EmptyDirVolumeSource()
    return client.V1Volume(name=volume, empty_dir=empty_dir)
Exemple #22
0
def scheduleJobs():
    jobNames = []
    for jobParameters in request.get_json(force=True):
        if not validateJobParameters(jobParameters):
            return abort(422, 'Invalid arguments')

        body = kubeClient.V1Job(api_version="batch/v1", kind="Job")
        # Body needs Metadata
        # Attention: Each JOB must have a different name!
        jobName = "r-job-" + str(uuid.uuid4())
        body.metadata = kubeClient.V1ObjectMeta(namespace="default",
                                                name=jobName)
        # And a Status

        body.status = kubeClient.V1JobStatus()
        # Now we start with the Template...
        template = kubeClient.V1PodTemplate()
        template.template = kubeClient.V1PodTemplateSpec()
        # Passing Arguments in Env:

        env_list = createJobEnv(jobParameters, jobName)

        volume_mounts = kubeClient.V1VolumeMount(mount_path="/mydata",
                                                 name="dose-volume")
        container = kubeClient.V1Container(
            name="r-container",
            image="monikeu/r-script-1:r-image-env",
            env=env_list,
            volume_mounts=[volume_mounts],
            image_pull_policy="Always")
        per_vol_claim = kubeClient.V1PersistentVolumeClaimVolumeSource(
            claim_name="dose-volume-claim")
        volume = kubeClient.V1Volume(name="dose-volume",
                                     persistent_volume_claim=per_vol_claim)
        template.template.spec = kubeClient.V1PodSpec(containers=[container],
                                                      restart_policy='Never',
                                                      volumes=[volume])
        # And finaly we can create our V1JobSpec!
        body.spec = kubeClient.V1JobSpec(ttl_seconds_after_finished=600,
                                         template=template.template)

        try:
            response = api_instance.create_namespaced_job("default",
                                                          body,
                                                          pretty=True)
            pprint(response)
            jobNames.append(jobName)
        except ApiException as e:
            return "Error occurred during an attempt to create a job", e.status

    return 'Created one or more jobs: {}'.format(",".join(jobNames)), 201
Exemple #23
0
def run_io_utility(
    test_engine_name: str,
    namespace: str,
    pvc_name: str,
    io_config: Dict[str, str],
    transfer_path: str,
):
    io_utility_name = f"{test_engine_name}-io-utility-{str(uuid4())[:8]}"

    volume = k8s_client.V1Volume(
        name=pvc_name,
        persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name),
    )

    volume_mount = k8s_client.V1VolumeMount(name=pvc_name,
                                            mount_path=os.path.join(
                                                "/", pvc_name))

    pod_env = [
        k8s_client.V1EnvVar(name=key, value=value)
        for key, value in io_config.items()
    ]

    # NOTE: specify service account?
    pod_body = k8s_client.V1Pod(
        metadata=k8s_client.V1ObjectMeta(
            name=io_utility_name,
            labels={
                "run_id": test_engine_name,
                "run": io_utility_name,
                "family": "cicada",
                "type": "cicada-io-utility",
            },
        ),
        spec=k8s_client.V1PodSpec(
            restart_policy="Never",
            containers=[
                k8s_client.V1Container(
                    image="cicadatesting/cicada-operator-io-utility:latest",
                    name=io_utility_name,
                    volume_mounts=[volume_mount],
                    env=pod_env,
                    args=["transfer", transfer_path],
                )
            ],
            volumes=[volume],
        ),
    )

    return run_pod_to_completion(namespace, pod_body)
Exemple #24
0
def create_pod(value_pod, pvc_name, pod_name):
    """
    creates pod

    Args:
        param1: value_pod - values required for creation of pod
        param2: pvc_name - name of pvc , pod associated with
        param3: pod_name - name of pod to be created

    Returns:
        None

    Raises:
        Raises an exception on kubernetes client api failure and asserts

    """
    if value_pod["read_only"] == "True":
        value_pod["read_only"] = True
    elif value_pod["read_only"] == "False":
        value_pod["read_only"] = False
    api_instance = client.CoreV1Api()
    pod_metadata = client.V1ObjectMeta(name=pod_name, labels={"app": "nginx"})
    pod_volume_mounts = client.V1VolumeMount(
        name="mypvc", mount_path=value_pod["mount_path"])
    pod_ports = client.V1ContainerPort(container_port=80)
    pod_containers = client.V1Container(name="web-server",
                                        image="nginx:1.19.0",
                                        volume_mounts=[pod_volume_mounts],
                                        ports=[pod_ports])
    pod_persistent_volume_claim = client.V1PersistentVolumeClaimVolumeSource(
        claim_name=pvc_name, read_only=value_pod["read_only"])
    pod_volumes = client.V1Volume(
        name="mypvc", persistent_volume_claim=pod_persistent_volume_claim)
    pod_spec = client.V1PodSpec(containers=[pod_containers],
                                volumes=[pod_volumes])
    pod_body = client.V1Pod(api_version="v1",
                            kind="Pod",
                            metadata=pod_metadata,
                            spec=pod_spec)

    try:
        LOGGER.info(f'creating pod {pod_name} with {str(value_pod)}')
        api_response = api_instance.create_namespaced_pod(
            namespace=namespace_value, body=pod_body, pretty=True)
        LOGGER.debug(str(api_response))
    except ApiException as e:
        LOGGER.error(
            f"Exception when calling CoreV1Api->create_namespaced_pod: {e}")
        assert False
Exemple #25
0
    def _mounting_pvc(kube_manager, pod_spec, namespace): #pylint:disable=unused-argument
        volume_mount = client.V1VolumeMount(
            name=mounting_name, mount_path=pvc_mount_path)
        if pod_spec.containers[0].volume_mounts:
            pod_spec.containers[0].volume_mounts.append(volume_mount)
        else:
            pod_spec.containers[0].volume_mounts = [volume_mount]

        volume = client.V1Volume(
            name=mounting_name,
            persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name))
        if pod_spec.volumes:
            pod_spec.volumes.append(volume)
        else:
            pod_spec.volumes = [volume]
Exemple #26
0
 def _mount_pvc(task):
     from kubernetes import client as k8s_client
     # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp)
     # refer to #3906
     if not hasattr(task, "add_volume") or not hasattr(
             task, "add_volume_mount"):
         return task
     local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(
         claim_name=pvc_name)
     return (task.add_volume(
         k8s_client.V1Volume(
             name=volume_name,
             persistent_volume_claim=local_pvc)).add_volume_mount(
                 k8s_client.V1VolumeMount(
                     mount_path=volume_mount_path, name=volume_name)))
Exemple #27
0
def eval_container(to_eval, step_name, eval_inputs=None):
    config_path = eval_inputs[0]
    weights_path = eval_inputs[1]
    data_path = eval_inputs[2]
    step_command = 'echo "Evaluating component: ' + to_eval + ' using config path: ' + config_path + ', weights path: ' + weights_path + ', data path: ' + data_path + '..."'
    step_command += ' && CFGPATH=' + config_path + ' && WTPATH=' + weights_path + ' && DATAPATH=' + data_path + ' && export CFGPATH WTPATH DATAPATH && ./list_darknet.sh'
    container = dsl.ContainerOp(
        name=step_name,
        image='alim95/darknet-test:modeldb-v3',
        command=['sh', '-c', (step_command)]
      )
    container.add_volume(
        k8s_client.V1Volume(name=pv_name, persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name)))
    container.add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name=pv_name))
    return container
Exemple #28
0
 def _get_volumes(self, volumes):
     volume_list = []
     if volumes != None and volumes != "":
         for name, item in volumes.items():
             if item.get("type") == VOLUME_TYPE_PVC:
                 volume_list.append(client.V1Volume(
                     name=name,
                     persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                         claim_name=name
                     )
                 ))
             elif item.get("type") == VOLUME_TYPE_HOST_PATH:
                 volume_list.append(client.V1Volume(
                     name=name,
                     host_path=client.V1HostPathVolumeSource(
                         path=item.get("path")
                     )
                 ))
    def add_pvc(kube_manager, pod_spec, namespace):
        """Add a pvc to the specified pod spec."""
        volume_mount = client.V1VolumeMount(name=pvc_name,
                                            mount_path=mount_path,
                                            read_only=False)

        if not pod_spec.containers[0].volume_mounts:
            pod_spec.containers[0].volume_mounts = []

        pod_spec.containers[0].volume_mounts.append(volume_mount)

        volume = client.V1Volume(
            name=pvc_name,
            persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
                pvc_name))
        if pod_spec.volumes:
            pod_spec.volumes.append(volume)
        else:
            pod_spec.volumes = [volume]
Exemple #30
0
def transformer(containerOp):
  containerOp.arguments = ['/scripts/pipelineWrapper.py', 'Privacy', 'python'] + containerOp.arguments
  # shouldn't hard code this experiment name
  
  containerOp.container.set_image_pull_policy("Always")
  containerOp.add_volume(
    k8s_client.V1Volume(
      name='azure',
      persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
        claim_name='azure-managed-disk')
    )
  ).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt/azure', name='azure'))

  containerOp.container.add_env_variable(V1EnvVar(name='AZ_NAME', value=ws.name))\
    .add_env_variable(V1EnvVar(name='AZ_SUBSCRIPTION_ID', value=ws.subscription_id))\
    .add_env_variable(V1EnvVar(name='AZ_RESOURCE_GROUP', value=ws.resource_group))
  containerOp.apply(use_azure_secret('azcreds'))

  return containerOp