def with_gcp_auth(self,
                      secret_name: str) -> Optional['HmlInferenceDeployment']:
        """
        Use the secret given in `secret_name` as the service account to use for GCP related
        SDK api calls (e.g. mount the secret to a path, then bind an environment variable
        GOOGLE_APPLICATION_CREDENTIALS to point to that path)

        Args:
            secret_name (str): The name of the secret with the Google Service Account json file.

        Returns:
            A reference to the current `HmlInferenceDeployment` (self)
        """
        volume_name = f"{secret_name}-volume"
        secret_volume_mount_path = "/secrets/gcp-credentials"
        secret_file_path_in_volume = '/' + secret_name + '.json'

        # Tell the pod about the secret as a mountable volume
        self.pod_volumes.append(
            client.V1Volume(name=volume_name,
                            secret=client.V1SecretVolumeSource(
                                secret_name=secret_name, )))

        # Mount the volume
        self.k8s_container.add_volume_mount(
            client.V1VolumeMount(
                name=volume_name,
                mount_path=secret_volume_mount_path,
            ))
        # Create environment variables to tell it where my credentials will live
        self.k8s_container.add_env_variable(
            client.V1EnvVar(name="GOOGLE_APPLICATION_CREDENTIALS",
                            value=secret_volume_mount_path +
                            secret_file_path_in_volume))
        self.k8s_container.add_env_variable(
            client.V1EnvVar(name="CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE",
                            value=secret_volume_mount_path +
                            secret_file_path_in_volume))

        return self
Beispiel #2
0
def _create_container_op_from_resolved_task(name: str,
                                            container_image: str,
                                            command=None,
                                            arguments=None,
                                            input_paths=None,
                                            artifact_arguments=None,
                                            output_paths=None,
                                            env: Mapping[str, str] = None,
                                            component_spec=None):
    from .. import dsl

    #Renaming outputs to conform with ContainerOp/Argo
    from ._naming import _sanitize_python_function_name, generate_unique_name_conversion_table
    output_names = (output_paths or {}).keys()
    output_name_to_kubernetes = generate_unique_name_conversion_table(
        output_names, _sanitize_python_function_name)
    output_paths_for_container_op = {
        output_name_to_kubernetes[name]: path
        for name, path in output_paths.items()
    }

    task = dsl.ContainerOp(
        name=name,
        image=container_image,
        command=command,
        arguments=arguments,
        file_outputs=output_paths_for_container_op,
        artifact_argument_paths=[
            dsl.InputArgumentPath(argument=artifact_arguments[input_name],
                                  input=input_name,
                                  path=path)
            for input_name, path in input_paths.items()
        ],
    )

    component_meta = copy.copy(component_spec)
    component_meta.implementation = None
    task._set_metadata(component_meta)

    if env:
        from kubernetes import client as k8s_client
        for name, value in env.items():
            task.container.add_env_variable(
                k8s_client.V1EnvVar(name=name, value=value))

    if component_spec.metadata:
        for key, value in (component_spec.metadata.annotations or {}).items():
            task.add_pod_annotation(key, value)
        for key, value in (component_spec.metadata.labels or {}).items():
            task.add_pod_label(key, value)

    return task
Beispiel #3
0
    def get_task_pod(self,
                     task_type,
                     task_idx,
                     volume_mounts,
                     volumes,
                     labels,
                     env_vars=None,
                     command=None,
                     args=None,
                     ports=None,
                     persistence_outputs=None,
                     persistence_data=None,
                     outputs_refs_jobs=None,
                     outputs_refs_experiments=None,
                     secret_refs=None,
                     configmap_refs=None,
                     resources=None,
                     ephemeral_token=None,
                     node_selector=None,
                     affinity=None,
                     tolerations=None,
                     restart_policy=None):
        resource_name = self.get_resource_name(task_type=task_type,
                                               task_idx=task_idx)
        env_vars = to_list(env_vars, check_none=True)
        env_vars.append(
            client.V1EnvVar(name=constants.CONFIG_MAP_TASK_INFO_KEY_NAME,
                            value=json.dumps({
                                'type': task_type,
                                'index': task_idx
                            })))

        return self.get_pod(resource_name=resource_name,
                            volume_mounts=volume_mounts,
                            volumes=volumes,
                            labels=labels,
                            env_vars=env_vars,
                            command=command,
                            args=args,
                            ports=ports,
                            persistence_outputs=persistence_outputs,
                            persistence_data=persistence_data,
                            outputs_refs_jobs=outputs_refs_jobs,
                            outputs_refs_experiments=outputs_refs_experiments,
                            secret_refs=secret_refs,
                            configmap_refs=configmap_refs,
                            resources=resources,
                            ephemeral_token=ephemeral_token,
                            node_selector=node_selector,
                            affinity=affinity,
                            tolerations=tolerations,
                            restart_policy=restart_policy)
    def init_envs(self, container_props, name):
        config = container_props.config
        config_dict = self.pre_process_config(config)
        configmap_name = name

        list_envs = []
        for key in config_dict:
            config_map_ref = client.V1ConfigMapKeySelector(key=key,
                                                           name=configmap_name)
            env_var = client.V1EnvVarSource(config_map_key_ref=config_map_ref)
            env_object = client.V1EnvVar(name=key, value_from=env_var)
            list_envs.append(env_object)
        return list_envs
Beispiel #5
0
 def get_container(self):
     return client.V1Container(
         name='django-app-migration',
         image=self.context.image,
         command=['python', 'manage.py', 'migrate'],
         env=[
             client.V1EnvVar(
                 name='DATABASE_URL',
                 value=
                 'postgres://*****:*****@postgres-service:5432/dev_db'
             )
         ],
     )
Beispiel #6
0
 def set_init_container(
     self, image, command=None, args=None, env=None, image_pull_policy="IfNotPresent"
 ):
     if isinstance(env, dict):
         env = [client.V1EnvVar(name=k, value=v) for k, v in env.items()]
     self._init_container = client.V1Container(
         name="init",
         image=image,
         env=env,
         command=command,
         args=args,
         image_pull_policy=image_pull_policy,
     )
Beispiel #7
0
 def get_sidecar_container(self):
     """Pod sidecar container for task logs."""
     env_vars = get_sidecar_env_vars(
         job_name=self.job_name, job_container_name=self.job_container_name)
     env_vars += get_service_env_vars(namespace=self.namespace)
     for k, v in self.sidecar_config.items():
         env_vars.append(client.V1EnvVar(name=k, value=v))
     return client.V1Container(
         name=self.sidecar_container_name,
         image=self.sidecar_docker_image,
         command=get_sidecar_command(app_label=self.app_label),
         env=env_vars,
         args=get_sidecar_args(pod_id=self.pod_name))
Beispiel #8
0
 def generate_pod_spec(self):
     """return a V1PodSpec initialized with the proper container"""
     return client.V1PodSpec(containers=[
         client.V1Container(
             name='model',
             image=self.full_image_name(),
             command=self.get_command(),
             env=[client.V1EnvVar(
                 name='FAIRING_RUNTIME',
                 value='1',
             )])
     ],
                             restart_policy='Never')
Beispiel #9
0
 def generate_pod_spec(self):
     """return a V1PodSpec initialized with the proper container"""
     return client.V1PodSpec(containers=[
         client.V1Container(
             name='model',
             image=self.image_tag,
             command=self.preprocessor.get_command(),
             security_context=client.V1SecurityContext(run_as_user=0, ),
             env=[client.V1EnvVar(
                 name='FAIRING_RUNTIME',
                 value='1',
             )])
     ], )
Beispiel #10
0
 def default_job(cls):
     if not client:
         cls.dependency_error()
     job_name = '%(prop:buildername)s-%(prop:buildnumber)s'
     return client.V1Job(
         metadata=client.V1ObjectMeta(name=job_name),
         spec=client.V1JobSpec(
             template=client.V1PodTemplateSpec(
                 metadata=client.V1ObjectMeta(name=job_name),
                 spec=client.V1PodSpec(
                     containers=[
                         client.V1Container(
                             name=job_name,
                             image='buildbot/buildbot-worker',
                             env=[
                                 client.V1EnvVar(
                                     name='BUILDMASTER',
                                     value='%(prop:masterFQDN)s'
                                 ),
                                 client.V1EnvVar(
                                     name='BUILDMASTER_PORT',
                                     value='%(prop:masterPort)s'
                                 ),
                                 client.V1EnvVar(
                                     name='WORKERNAME',
                                     value='%(prop:workerName)s'
                                 ),
                                 client.V1EnvVar(
                                     name='WORKERPASS',
                                     value='%(prop:workerPass)s'
                                 )
                             ]
                         )
                     ],
                     restart_policy='Never'
                 )
             )
         )
     )
Beispiel #11
0
def get_resources_env_vars(resources):
    env_vars = []
    if resources:
        if resources.gpu and settings.LD_LIBRARY_PATH:
            env_vars += [
                client.V1EnvVar(name='LD_LIBRARY_PATH',
                                value=settings.LD_LIBRARY_PATH)
            ]
        if resources.gpu and not settings.LD_LIBRARY_PATH:
            # TODO: logger.warning('`LD_LIBRARY_PATH` was not properly set.')  # Publish error
            pass

    # Fix https://github.com/kubernetes/kubernetes/issues/59629
    # When resources.gpu.limits is not set or set to 0, we explicitly
    # pass NVIDIA_VISIBLE_DEVICES=none into container to avoid exposing GPUs.
    condition = (not resources or not resources.gpu or not resources.gpu.limits
                 or resources.gpu.limits == '0')
    if condition:
        env_vars.append(
            client.V1EnvVar(name='NVIDIA_VISIBLE_DEVICES', value='none'))

    return env_vars
Beispiel #12
0
    def get_task_pod_spec(self,
                          task_type,
                          task_idx,
                          volume_mounts,
                          volumes,
                          env_vars=None,
                          command=None,
                          args=None,
                          sidecar_args=None,
                          resources=None,
                          restart_policy='OnFailure'):
        """Pod spec to be used to create pods for tasks: master, worker, ps."""
        volume_mounts = volume_mounts or []
        volumes = volumes or []

        gpu_volume_mounts, gpu_volumes = get_gpu_volumes_def(resources)
        volume_mounts += gpu_volume_mounts
        volumes += gpu_volumes

        # Add job information
        env_vars = env_vars or []
        env_vars.append(
            client.V1EnvVar(name=constants.CONFIG_MAP_TASK_INFO_KEY_NAME,
                            value=json.dumps({
                                'type': task_type,
                                'index': task_idx
                            })))

        pod_container = self.get_pod_container(volume_mounts=volume_mounts,
                                               env_vars=env_vars,
                                               command=command,
                                               args=args,
                                               resources=resources)

        containers = [pod_container]
        if self.use_sidecar:
            sidecar_container = self.get_sidecar_container(task_type=task_type,
                                                           task_idx=task_idx,
                                                           args=sidecar_args)
            containers.append(sidecar_container)

        node_selector = settings.NODE_SELECTORS_EXPERIMENTS
        node_selector = json.loads(node_selector) if node_selector else None
        service_account_name = None
        if settings.K8S_RBAC_ENABLED:
            service_account_name = settings.K8S_SERVICE_ACCOUNT_NAME
        return client.V1PodSpec(restart_policy=restart_policy,
                                service_account_name=service_account_name,
                                containers=containers,
                                volumes=volumes,
                                node_selector=node_selector)
Beispiel #13
0
def run_io_utility(
    test_engine_name: str,
    namespace: str,
    pvc_name: str,
    io_config: Dict[str, str],
    transfer_path: str,
):
    io_utility_name = f"{test_engine_name}-io-utility-{str(uuid4())[:8]}"

    volume = k8s_client.V1Volume(
        name=pvc_name,
        persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name=pvc_name),
    )

    volume_mount = k8s_client.V1VolumeMount(name=pvc_name,
                                            mount_path=os.path.join(
                                                "/", pvc_name))

    pod_env = [
        k8s_client.V1EnvVar(name=key, value=value)
        for key, value in io_config.items()
    ]

    # NOTE: specify service account?
    pod_body = k8s_client.V1Pod(
        metadata=k8s_client.V1ObjectMeta(
            name=io_utility_name,
            labels={
                "run_id": test_engine_name,
                "run": io_utility_name,
                "family": "cicada",
                "type": "cicada-io-utility",
            },
        ),
        spec=k8s_client.V1PodSpec(
            restart_policy="Never",
            containers=[
                k8s_client.V1Container(
                    image="cicadatesting/cicada-operator-io-utility:latest",
                    name=io_utility_name,
                    volume_mounts=[volume_mount],
                    env=pod_env,
                    args=["transfer", transfer_path],
                )
            ],
            volumes=[volume],
        ),
    )

    return run_pod_to_completion(namespace, pod_body)
Beispiel #14
0
def add_aws_credentials(kube_manager, pod_spec, namespace):
    """add AWS credential

    :param kube_manager: kube manager for handles communication with Kubernetes' client
    :param pod_spec: pod spec like volumes and security context
    :param namespace: The custom resource

    """
    if not kube_manager.secret_exists(constants.AWS_CREDS_SECRET_NAME, namespace):
        raise ValueError('Unable to mount credentials: Secret aws-secret not found in namespace {}'
                         .format(namespace))

    # Set appropriate secrets env to enable kubeflow-user service
    # account.
    env = [
        client.V1EnvVar(
            name='AWS_ACCESS_KEY_ID',
            value_from=client.V1EnvVarSource(
                secret_key_ref=client.V1SecretKeySelector(
                    name=constants.AWS_CREDS_SECRET_NAME,
                    key='AWS_ACCESS_KEY_ID'
                )
            )
        ),
        client.V1EnvVar(
            name='AWS_SECRET_ACCESS_KEY',
            value_from=client.V1EnvVarSource(
                secret_key_ref=client.V1SecretKeySelector(
                    name=constants.AWS_CREDS_SECRET_NAME,
                    key='AWS_SECRET_ACCESS_KEY'
                )
            )
        )]

    if pod_spec.containers[0].env:
        pod_spec.containers[0].env.extend(env)
    else:
        pod_spec.containers[0].env = env
    def handle_proxy_credentials(self, env):
        credentials_secret = None
        if self.credentials_secret_name:
            credentials_secret = self.run_action_and_parse_error(self.core_api.read_namespaced_secret,
                                                             self.credentials_secret_name,
                                                             self.namespace)
        if not credentials_secret:
            LOGGER.error("No secret named %s was found in the %s namespace, will use unauth access",
                         self.credentials_secret_name, self.namespace)
            return env

        encoded_user = credentials_secret.data.get("username")
        encoded_pass = credentials_secret.data.get("password")

        if not (encoded_user and encoded_pass):
            LOGGER.error("Secret %s does not contain username/password",
                         self.credentials_secret_name)
            return env
        env.append(client.V1EnvVar(name="REGISTRY_PROXY_USERNAME",
                                   value=None,
                                   value_from=client.V1EnvVarSource(
                                        secret_key_ref=client.V1SecretKeySelector(
                                            key="username",
                                            name=self.credentials_secret_name
                                        )
                                   ))
                  )
        env.append(client.V1EnvVar(name="REGISTRY_PROXY_PASSWORD",
                                   value=None,
                                   value_from=client.V1EnvVarSource(
                                        secret_key_ref=client.V1SecretKeySelector(
                                            key="password",
                                            name=self.credentials_secret_name
                                        )
                                   ))
                  )
        LOGGER.info("Secret selected + env vars set successfully")
        return env
Beispiel #16
0
    def generate_pod_spec(self, image_name, push):  # pylint: disable=arguments-differ
        """
        :param image_name: name of image to be built
        :param push: whether to push image to given registry or not
        """
        args = [
            "--dockerfile=Dockerfile", "--destination=" + image_name,
            "--context=" + self.uploaded_context_url
        ]
        if not push:
            args.append("--no-push")

        return client.V1PodSpec(
            containers=[
                client.V1Container(
                    name='kaniko',
                    image=constants.KANIKO_IMAGE,
                    args=args,
                    env=[
                        client.V1EnvVar(name='AWS_REGION', value=self.region),
                        client.V1EnvVar(name='AWS_ACCESS_KEY_ID',
                                        value=self.aws_access_key_id),
                        client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY',
                                        value=self.aws_secret_access_key),
                        client.V1EnvVar(name='S3_ENDPOINT',
                                        value=self.cos_endpoint_url),
                    ],
                    volume_mounts=[
                        client.V1VolumeMount(name="docker-config",
                                             mount_path="/kaniko/.docker/")
                    ])
            ],
            restart_policy='Never',
            volumes=[
                client.V1Volume(name="docker-config",
                                config_map=client.V1ConfigMapVolumeSource(
                                    name="docker-config"))
            ])
Beispiel #17
0
def create_deployment_object(url, url_id):

    # Configureate Pod template container
    container = client.V1Container(
        name="{}-container".format(url_id),
        image="cloudcam_main:1.0",
        env=[
            client.V1EnvVar(name="URL", value=url),
            client.V1EnvVar(name="URL_ID", value=url_id)
        ],
        volume_mounts=[client.V1VolumeMount(name="aaaa", mount_path="/DATA")])

    volume = client.V1Volume(
        name="aaaa", host_path=client.V1HostPathVolumeSource(path="/DATA"))

    # Create and configurate a spec section
    template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(
        labels={"app": "{}-container".format(url_id)}),
                                        spec=client.V1PodSpec(
                                            containers=[container],
                                            volumes=[volume]))

    # Create the specification of deployment
    spec = client.V1DeploymentSpec(
        replicas=1,
        template=template,
        selector={'matchLabels': {
            "app": "{}-container".format(url_id)
        }})

    # Instantiate the deployment object
    deployment = client.V1Deployment(
        api_version="apps/v1",
        kind="Deployment",
        metadata=client.V1ObjectMeta(name="{}-main".format(url_id)),
        spec=spec)

    return deployment
Beispiel #18
0
def add_default_env(k8s_client, cop):
    cop.container.add_env_variable(
        k8s_client.V1EnvVar(
            "MLRUN_NAMESPACE",
            value_from=k8s_client.V1EnvVarSource(
                field_ref=k8s_client.V1ObjectFieldSelector(
                    field_path="metadata.namespace")),
        ))

    if config.httpdb.api_url:
        cop.container.add_env_variable(
            k8s_client.V1EnvVar(name="MLRUN_DBPATH",
                                value=config.httpdb.api_url))

    if config.mpijob_crd_version:
        cop.container.add_env_variable(
            k8s_client.V1EnvVar(name="MLRUN_MPIJOB_CRD_VERSION",
                                value=config.mpijob_crd_version))

    if "V3IO_ACCESS_KEY" in os.environ:
        cop.container.add_env_variable(
            k8s_client.V1EnvVar(name="V3IO_ACCESS_KEY",
                                value=os.environ["V3IO_ACCESS_KEY"]))
    def add_environment_variable(self, name, value):
        """Add an environment variable.

        :param name: Environment variable name.
        :param value: Environment variable value.
        """
        env_var = client.V1EnvVar(name, str(value))
        if isinstance(self.kubernetes_objects["deployment"].spec.template.
                      spec.containers[0].env, list):
            self.kubernetes_objects["deployment"].spec.template. \
                spec.containers[0].env.append(env_var)
        else:
            self.kubernetes_objects["deployment"].spec.template. \
                spec.containers[0].env = [env_var]
Beispiel #20
0
def customModelSpec(custom_model_spec):
    env = [client.V1EnvVar(name=i['name'], value=i['value']) for i in custom_model_spec['env']] if custom_model_spec.get('env', '') else None
    ports = [client.V1ContainerPort(container_port=int(custom_model_spec.get('port', '')))] if custom_model_spec.get('port', '') else None
    containerSpec = client.V1Container(
        name=custom_model_spec.get('name', 'custom-container'),
        image=custom_model_spec['image'],
        env=env,
        ports=ports,
        command=custom_model_spec.get('command', None),
        args=custom_model_spec.get('args', None),
        image_pull_policy=custom_model_spec.get('image_pull_policy', None),
        working_dir=custom_model_spec.get('working_dir', None)
    )
    return V1alpha1ModelSpec(custom=V1alpha1CustomSpec(container=containerSpec))
Beispiel #21
0
def default_gcp_op(name: str, image: str, command: str = None,
           arguments: str = None, file_inputs: Dict[dsl.PipelineParam, str] = None,
           file_outputs: Dict[str, str] = None, is_exit_handler=False):
  """An operator that mounts the default GCP service account to the container.

  The user-gcp-sa secret is created as part of the kubeflow deployment that
  stores the access token for kubeflow user service account.

  With this service account, the container has a range of GCP APIs to
  access to. This service account is automatically created as part of the
  kubeflow deployment.

  For the list of the GCP APIs this service account can access to, check
  https://github.com/kubeflow/kubeflow/blob/7b0db0d92d65c0746ac52b000cbc290dac7c62b1/deployment/gke/deployment_manager_configs/iam_bindings_template.yaml#L18

  If you want to call the GCP APIs in a different project, grant the kf-user
  service account access permission.
  """

  return (
    dsl.ContainerOp(
      name,
      image,
      command,
      arguments,
      file_inputs,
      file_outputs,
      is_exit_handler,
    )
      .add_volume(
      k8s_client.V1Volume(
        name='gcp-credentials',
        secret=k8s_client.V1SecretVolumeSource(
          secret_name='user-gcp-sa'
        )
      )
    )
      .add_volume_mount(
      k8s_client.V1VolumeMount(
        mount_path='/secret/gcp-credentials',
        name='gcp-credentials',
      )
    )
      .add_env_variable(
      k8s_client.V1EnvVar(
        name='GOOGLE_APPLICATION_CREDENTIALS',
        value='/secret/gcp-credentials/user-gcp-sa.json'
      )
    )
  )
Beispiel #22
0
def create_pod(environment):
    return client.V1Pod(
        api_version="v1",
        kind="Pod",
        metadata=client.V1ObjectMeta(name="test-pod", ),
        spec=client.V1PodSpec(containers=[
            client.V1Container(
                name="test-container",
                image="nginx",
                env=[client.V1EnvVar(
                    name="ENV",
                    value=environment,
                )])
        ]))
Beispiel #23
0
    def _use_v3io_cred(task):
        from kubernetes import client as k8s_client
        from os import environ
        web_api = api or environ.get('V3IO_API')
        _user = user or environ.get('V3IO_USERNAME')
        _access_key = access_key or environ.get('V3IO_ACCESS_KEY')

        return (task.add_env_variable(
            k8s_client.V1EnvVar(
                name='V3IO_API', value=web_api)).add_env_variable(
                    k8s_client.V1EnvVar(
                        name='V3IO_USERNAME', value=_user)).add_env_variable(
                            k8s_client.V1EnvVar(name='V3IO_ACCESS_KEY',
                                                value=_access_key)).
                add_env_variable(
                    k8s_client.V1EnvVar(
                        name='CURRENT_NODE_IP',
                        value_from=k8s_client.V1EnvVarSource(
                            field_ref=k8s_client.V1ObjectFieldSelector(
                                api_version='v1', field_path='status.hostIP')))
                ).add_env_variable(
                    k8s_client.V1EnvVar(name='IGZ_DATA_CONFIG_FILE',
                                        value='/igz/java/conf/v3io.conf')))
Beispiel #24
0
    def _create_deployment_object(self,
                                  app_info,
                                  tagged_image,
                                  env_vars_dict,
                                  alternate_api=False):
        deployment_name = app_info['app_name']

        container_port, host_port = self._get_ports(app_info)

        env_list = []
        for key, value in env_vars_dict.iteritems():
            v1_envvar = client.V1EnvVar(name=key, value=value)
            env_list.append(v1_envvar)

        # Configure Pod template container
        container = client.V1Container(
            name=deployment_name,
            image=tagged_image,
            ports=[client.V1ContainerPort(container_port=container_port)],
            env=env_list)

        # Create and configurate a spec section
        template = client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(labels={"app": deployment_name}),
            spec=client.V1PodSpec(containers=[container]))

        deployment = ''
        if not alternate_api:
            # Create the specification of deployment
            spec = client.AppsV1beta1DeploymentSpec(replicas=1,
                                                    template=template)

            # Instantiate the deployment object
            deployment = client.AppsV1beta1Deployment(
                api_version="apps/v1beta1",
                kind="Deployment",
                metadata=client.V1ObjectMeta(name=deployment_name),
                spec=spec)
        else:
            # Create the specification of deployment
            spec = client.ExtensionsV1beta1DeploymentSpec(replicas=1,
                                                          template=template)

            # Instantiate the deployment object
            deployment = client.ExtensionsV1beta1Deployment(
                api_version="extensions/v1beta1",
                kind="Deployment",
                metadata=client.V1ObjectMeta(name=deployment_name),
                spec=spec)
        return deployment
Beispiel #25
0
def schedule_job_in_kubernetes(file, file_identifier):
    job_name = file_identifier

    envs = [
        client.V1EnvVar(name="API_TOKEN", value=os.getenv("API_TOKEN")),
        client.V1EnvVar(
            name="TARGET",
            value=
            f"http://file-drop-traffic-generator-backend:5000/backend/static/{file_identifier}"
        ),
        client.V1EnvVar(name="FILENAME", value=file.filename),
        client.V1EnvVar(name="API_URL", value=os.getenv("API_URL"))
    ]

    processor_container = client.V1Container(
        name="processor", image=os.getenv("PROCESSOR_IMAGE"), env=envs)

    pod_spec = client.V1PodSpec(restart_policy="Never",
                                containers=[processor_container])

    # Create and configure a spec section
    template = client.V1PodTemplateSpec(metadata=client.V1ObjectMeta(
        name=job_name, labels={"app": "file-drop-processor"}),
                                        spec=pod_spec)

    # Create the specification of the job
    spec = client.V1JobSpec(template=template, backoff_limit=0)

    # Instantiate the job object
    job = client.V1Job(api_version="batch/v1",
                       kind="Job",
                       metadata=client.V1ObjectMeta(
                           name=job_name,
                           labels={"app": "file-drop-processor"}),
                       spec=spec)

    client.BatchV1Api().create_namespaced_job(body=job, namespace="default")
Beispiel #26
0
 def make_deployment(self, params):
     env=[client.V1EnvVar(
             name=k,
             value=str(v)
     ) for k, v in params.items()]
     env.extend([client.V1EnvVar(
         name="LAN",
         value="1"
     )])
     return [client.V1Container(
         env=env,
         image= "registry.npf.dk/gaas-csgo",
         name="csgo",
         resources=client.V1ResourceRequirements(
             limits={
                 "cpu": "4",
                 "memory": "32G"
             },
             requests={
                 "cpu": "2",
                 "memory": "16G"
             }
         ),
         ports=[client.V1ContainerPort(
             container_port=27015,
             protocol="UDP"
         ), client.V1ContainerPort(
             container_port=27020,
             protocol="UDP"
         ), client.V1ContainerPort(
             container_port=27015,
             protocol="TCP"
         ), client.V1ContainerPort(
             container_port=27020,
             protocol="TCP"
         )]
     )]
Beispiel #27
0
    def env(self, owner, title, config):
        safeowner = clean(owner)
        safetitle = clean(title)
        envs = [
            kclient.V1EnvVar("OWNER", owner),
            kclient.V1EnvVar("TITLE", title),
            kclient.V1EnvVar("EXP_TASK_TIME", str(config["exp_task_time"])),
        ]
        # for sec in [
        #     "BUCKET",
        #     "REDIS_HOST",
        #     "REDIS_PORT",
        #     "REDIS_EXECUTOR_PW",
        # ]:
        #     envs.append(
        #         kclient.V1EnvVar(
        #             sec,
        #             value_from=kclient.V1EnvVarSource(
        #                 secret_key_ref=(
        #                     kclient.V1SecretKeySelector(key=sec, name="worker-secret")
        #                 )
        #             ),
        #         )
        #     )

        for secret in ModelSecrets(owner=owner,
                                   title=title,
                                   project=self.project).list():
            envs.append(
                kclient.V1EnvVar(
                    name=secret,
                    value_from=kclient.V1EnvVarSource(
                        secret_key_ref=(kclient.V1SecretKeySelector(
                            key=secret, name=f"{safeowner}-{safetitle}-secret")
                                        )),
                ))
        return envs
Beispiel #28
0
def _create_container_object(name, image, always_pull, **kwargs):
    # Set up environment variables
    # Copy any passed in environment variables
    env = kwargs.get('env') or {}
    env_vars = [client.V1EnvVar(name=k, value=env[k]) for k in env]
    # Add POD_IP with the IP address of the pod running the container
    pod_ip = client.V1EnvVarSource(field_ref=client.V1ObjectFieldSelector(
        field_path="status.podIP"))
    env_vars.append(client.V1EnvVar(name="POD_IP", value_from=pod_ip))

    # If a health check is specified, create a readiness/liveness probe
    # (For an HTTP-based check, we assume it's at the first container port)
    readiness = kwargs.get('readiness')
    liveness = kwargs.get('liveness')
    resources = kwargs.get('resources')
    container_ports = kwargs.get('container_ports') or []

    hc_port = container_ports[0][0] if container_ports else None
    probe = _create_probe(readiness, hc_port) if readiness else None
    live_probe = _create_probe(liveness, hc_port) if liveness else None
    resources_obj = _create_resources(resources) if resources else None
    port_objs = [
        client.V1ContainerPort(container_port=port, protocol=proto)
        for port, proto in container_ports
    ]

    # Define container for pod
    return client.V1Container(
        name=name,
        image=image,
        image_pull_policy='Always' if always_pull else 'IfNotPresent',
        env=env_vars,
        ports=port_objs,
        volume_mounts=kwargs.get('volume_mounts') or [],
        resources=resources_obj,
        readiness_probe=probe,
        liveness_probe=live_probe)
Beispiel #29
0
def create_custom_container_spec(custom_model_spec):
    """
    Given a JSON container spec, return a V1Container object
    representing the container. This is used for passing in
    custom server images. The expected format for the input is:

    { "image": "test/containerimage",
      "port":5000,
      "name": "custom-container" }
    """

    env = (
        [
            client.V1EnvVar(name=i["name"], value=i["value"])
            for i in custom_model_spec["env"]
        ]
        if custom_model_spec.get("env", "")
        else None
    )
    ports = (
        [client.V1ContainerPort(container_port=int(custom_model_spec.get("port", "")), protocol="TCP")]
        if custom_model_spec.get("port", "")
        else None
    )
    resources = (
        client.V1ResourceRequirements(
            requests=(custom_model_spec["resources"]["requests"]
                      if custom_model_spec.get('resources', {}).get('requests')
                      else None
                      ),
            limits=(custom_model_spec["resources"]["limits"]
                    if custom_model_spec.get('resources', {}).get('limits')
                    else None
                    ),
        )
        if custom_model_spec.get("resources", {})
        else None
    )
    return client.V1Container(
        name=custom_model_spec.get("name", "custom-container"),
        image=custom_model_spec["image"],
        env=env,
        ports=ports,
        command=custom_model_spec.get("command", None),
        args=custom_model_spec.get("args", None),
        image_pull_policy=custom_model_spec.get("image_pull_policy", None),
        working_dir=custom_model_spec.get("working_dir", None),
        resources=resources
    )
Beispiel #30
0
def create_job_object(job_arguments, size, docker_image, docker_image_tag,
                      affinity):

    user = os.environ['USER']
    job = client.V1Job(
        metadata=client.V1ObjectMeta(
            name='kaml-remote-{}-{}'.format(user, uuid.uuid1())),
        spec=client.V1JobSpec(template=client.V1PodTemplateSpec(
            metadata=client.V1ObjectMeta(name='kaml-remote-{}-{}'.format(
                user, uuid.uuid1()),
                                         labels={'type': size}),
            spec=client.V1PodSpec(containers=[
                client.V1Container(
                    name='kaml-remote',
                    args=job_arguments,
                    image='{}:{}'.format(docker_image, docker_image_tag),
                    image_pull_policy='Always',
                    env=[client.V1EnvVar(name='KAML_HOME', value='/app')],
                    volume_mounts=[
                        client.V1VolumeMount(name='kaml-cfg-volume',
                                             read_only=True,
                                             mount_path='/app/kaml.cfg',
                                             sub_path='kaml.cfg'),
                        client.V1VolumeMount(
                            name='gcp-service-account',
                            read_only=True,
                            mount_path='/app/service-key.json',
                            sub_path='service-key.json'),
                    ])
            ],
                                  affinity=affinity,
                                  volumes=[
                                      client.V1Volume(name='kaml-cfg-volume',
                                                      config_map=client.
                                                      V1ConfigMapVolumeSource(
                                                          name='kaml-cfg')),
                                      client.V1Volume(
                                          name='gcp-service-account',
                                          secret=client.V1SecretVolumeSource(
                                              secret_name='gcp-service-account',
                                              items=[
                                                  client.V1KeyToPath(
                                                      key='service-key.json',
                                                      path='service-key.json')
                                              ]))
                                  ],
                                  restart_policy='Never'))))

    return (job)