Example #1
0
 def __init__(self,
              namespace,
              image,
              name,
              cmds=None,
              arguments=None,
              env_vars=None,
              secrets=None,
              in_cluster=False,
              labels=None,
              startup_timeout_seconds=120,
              get_logs=True,
              image_pull_policy='IfNotPresent',
              annotations=None,
              resources=None,
              *args,
              **kwargs):
     super(KubernetesPodOperator, self).__init__(*args, **kwargs)
     self.image = image
     self.namespace = namespace
     self.cmds = cmds or []
     self.arguments = arguments or []
     self.labels = labels or {}
     self.startup_timeout_seconds = startup_timeout_seconds
     self.name = name
     self.env_vars = env_vars or {}
     self.secrets = secrets or []
     self.in_cluster = in_cluster
     self.get_logs = get_logs
     self.image_pull_policy = image_pull_policy
     self.annotations = annotations or {}
     self.resources = resources or Resources()
 def __init__(self,
              namespace,
              image,
              name,
              cmds=None,
              arguments=None,
              volume_mounts=None,
              volumes=None,
              env_vars=None,
              secrets=None,
              in_cluster=False,
              cluster_context=None,
              labels=None,
              startup_timeout_seconds=120,
              get_logs=True,
              image_pull_policy='IfNotPresent',
              annotations=None,
              resources=None,
              affinity=None,
              config_file=None,
              xcom_push=False,
              node_selectors=None,
              image_pull_secrets=None,
              service_account_name="default",
              is_delete_operator_pod=False,
              hostnetwork=False,
              tolerations=None,
              configmaps=None,
              security_context=None,
              *args,
              **kwargs):
     super(KubernetesPodOperator, self).__init__(*args, **kwargs)
     self.image = image
     self.namespace = namespace
     self.cmds = cmds or []
     self.arguments = arguments or []
     self.labels = labels or {}
     self.startup_timeout_seconds = startup_timeout_seconds
     self.name = name
     self.env_vars = env_vars or {}
     self.volume_mounts = volume_mounts or []
     self.volumes = volumes or []
     self.secrets = secrets or []
     self.in_cluster = in_cluster
     self.cluster_context = cluster_context
     self.get_logs = get_logs
     self.image_pull_policy = image_pull_policy
     self.node_selectors = node_selectors or {}
     self.annotations = annotations or {}
     self.affinity = affinity or {}
     self.xcom_push = xcom_push
     self.resources = resources or Resources()
     self.config_file = config_file
     self.image_pull_secrets = image_pull_secrets
     self.service_account_name = service_account_name
     self.is_delete_operator_pod = is_delete_operator_pod
     self.hostnetwork = hostnetwork
     self.tolerations = tolerations or []
     self.configmaps = configmaps or []
     self.security_context = security_context or {}
Example #3
0
 def _set_resources(self, resources):
     # Legacy
     inputResource = Resources()
     if resources:
         for item in resources.keys():
             setattr(inputResource, item, resources[item])
     return inputResource
Example #4
0
def convert_resources(container: V1Container) -> Resources:
    resources: V1ResourceRequirements = container.resources
    if not resources:
        return Resources()

    limits = resources.limits or {}
    requests = resources.requests or {}
    gpu_limit = limits.get(
        "nvidia.com/gpu", limits.get("amd.com/gpu")
    )
    return Resources(
        request_memory=requests.get("memory"),
        request_cpu=requests.get("cpu"),
        limit_memory=limits.get("memory"),
        limit_cpu=limits.get("cpu"),
        limit_gpu=gpu_limit,
    )
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id,
                 execution_date, try_number, airflow_command,
                 kube_executor_config):
        volumes_dict, volume_mounts_dict = self._get_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers()
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu,
            limit_gpu=kube_executor_config.limit_gpu)
        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = dict(kube_executor_config.annotations
                           ) or self.kube_config.kube_annotations
        if gcp_sa_key:
            annotations['iam.cloud.google.com/service-account'] = gcp_sa_key

        volumes = [value for value in volumes_dict.values()
                   ] + kube_executor_config.volumes
        volume_mounts = [value for value in volume_mounts_dict.values()
                         ] + kube_executor_config.volume_mounts

        affinity = kube_executor_config.affinity or self.kube_config.kube_affinity
        tolerations = kube_executor_config.tolerations or self.kube_config.kube_tolerations

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            image_pull_policy=(kube_executor_config.image_pull_policy
                               or self.kube_config.kube_image_pull_policy),
            cmds=airflow_command,
            labels=self._get_labels(
                kube_executor_config.labels, {
                    'airflow-worker': worker_uuid,
                    'dag_id': dag_id,
                    'task_id': task_id,
                    'execution_date': execution_date,
                    'try_number': str(try_number),
                    'airflow_version': airflow_version.replace('+', '-'),
                    'kubernetes_executor': 'True',
                }),
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations,
            node_selectors=(kube_executor_config.node_selectors
                            or self.kube_config.kube_node_selectors),
            affinity=affinity,
            tolerations=tolerations,
            security_context=self._get_security_context(),
            configmaps=self._get_configmaps())
    def test_extract_limits(self):
        # Test when resources is not empty
        resources = Resources(limit_memory='1Gi', limit_cpu=1)

        pod = Pod('v3.14', {}, [], resources=resources)
        self.expected['spec']['containers'][0]['resources'] = {
            'limits': {
                'memory': '1Gi',
                'cpu': 1
            }
        }
        KubernetesRequestFactory.extract_resources(pod, self.input_req)
        self.assertEqual(self.expected, self.input_req)
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id,
                 execution_date, airflow_command, kube_executor_config):
        volumes_dict, volume_mounts_dict = self.init_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers(
            copy.deepcopy(volume_mounts_dict))
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu)
        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = dict(kube_executor_config.annotations)
        if gcp_sa_key:
            annotations['iam.cloud.google.com/service-account'] = gcp_sa_key

        volumes = [value for value in volumes_dict.values()
                   ] + kube_executor_config.volumes
        volume_mounts = [value for value in volume_mounts_dict.values()
                         ] + kube_executor_config.volume_mounts

        affinity = kube_executor_config.affinity or self.kube_config.kube_affinity
        tolerations = kube_executor_config.tolerations or self.kube_config.kube_tolerations

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            image_pull_policy=(kube_executor_config.image_pull_policy
                               or self.kube_config.kube_image_pull_policy),
            cmds=airflow_command,
            labels={
                'airflow-worker': worker_uuid,
                'dag_id': dag_id,
                'task_id': task_id,
                'execution_date': execution_date
            },
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations,
            node_selectors=(kube_executor_config.node_selectors
                            or self.kube_config.kube_node_selectors),
            affinity=affinity,
            tolerations=tolerations)
Example #8
0
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id, execution_date,
                 airflow_command, kube_executor_config):
        volumes, volume_mounts = self.init_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers(
            copy.deepcopy(volume_mounts))
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu
        )
        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = {
            'iam.cloud.google.com/service-account': gcp_sa_key
        } if gcp_sa_key else {}

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            image_pull_policy=(kube_executor_config.image_pull_policy or
                               self.kube_config.kube_image_pull_policy),
            cmds=['bash', '-cx', '--'],
            args=[airflow_command],
            labels={
                'airflow-worker': worker_uuid,
                'dag_id': dag_id,
                'task_id': task_id,
                'execution_date': execution_date
            },
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations,
            node_selectors=(kube_executor_config.node_selectors or
                            self.kube_config.kube_node_selectors),
            affinity=kube_executor_config.affinity
        )
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id,
                 execution_date, airflow_command, kube_executor_config):
        volumes, volume_mounts = self.init_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers(
            copy.deepcopy(volume_mounts))
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu)
        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = {
            'iam.cloud.google.com/service-account': gcp_sa_key
        } if gcp_sa_key else {}
        airflow_command = airflow_command.replace("-sd", "-i -sd")
        airflow_path = airflow_command.split('-sd')[-1]
        airflow_path = self.worker_airflow_home + airflow_path.split('/')[-1]
        airflow_command = airflow_command.split(
            '-sd')[0] + '-sd ' + airflow_path

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            cmds=['bash', '-cx', '--'],
            args=[airflow_command],
            labels={
                'airflow-worker': worker_uuid,
                'dag_id': dag_id,
                'task_id': task_id,
                'execution_date': execution_date
            },
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations)
    def test_extract_all_resources(self):
        # Test when resources is not empty
        resources = Resources(request_memory='1Gi',
                              request_cpu=1,
                              limit_memory='2Gi',
                              limit_cpu=2,
                              limit_gpu=3)

        pod = Pod('v3.14', {}, [], resources=resources)
        self.expected['spec']['containers'][0]['resources'] = {
            'requests': {
                'memory': '1Gi',
                'cpu': 1
            },
            'limits': {
                'memory': '2Gi',
                'cpu': 2,
                'nvidia.com/gpu': 3
            }
        }
        KubernetesRequestFactory.extract_resources(pod, self.input_req)
        self.assertEqual(self.input_req, self.expected)
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id,
                 execution_date, airflow_command, kube_executor_config):
        volumes, volume_mounts = self._get_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers(
            copy.deepcopy(volume_mounts))
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu)
        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = {
            "iam.cloud.google.com/service-account": gcp_sa_key
        } if gcp_sa_key else {}

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            cmds=["bash", "-cx", "--"],
            args=[airflow_command],
            labels={
                "airflow-worker": worker_uuid,
                "dag_id": dag_id,
                "task_id": task_id,
                "execution_date": execution_date
            },
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations)
Example #12
0
          concurrency=100)

start = KubernetesPodOperator(
    namespace='dev',
    image="python:3.6",
    cmds=["python", "-c"],
    arguments=[
        f"from time import sleep; sleep(5); print('slept for 5 seconds')"
    ],
    labels={"foo": "bar"},
    name=f"sleeper-agent-start",
    task_id=f"sleeper-agent-start-task",
    get_logs=True,
    dag=dag,
    affinity=solver_affinity,
    resources=Resources(request_cpu='100m'),
    in_cluster=True)
end = KubernetesPodOperator(
    namespace='dev',
    image="python:3.6",
    cmds=["python", "-c"],
    arguments=[
        f"from time import sleep; sleep(5); print('slept for 5 seconds')"
    ],
    labels={"foo": "bar"},
    name=f"sleeper-agent-end",
    task_id=f"sleeper-agent-end-task",
    get_logs=True,
    dag=dag,
    affinity=solver_affinity,
    resources=Resources(request_cpu='100m'),
Example #13
0
# USEFUL VARIABLES
gcloud_image = Variable.get("gcloud_image")
image = Variable.get("pod_image")
cluster_name = Variable.get("cluster_name")
service_account_email = Variable.get("service_account_email")


_MAIN_DAG_ID = "test-dag-spawn-pools"
pool_name = "test-dag-spawn-pool"
machine_type = "n1-standard-4"
num_nodes = 1
node_taint = "only-test-pod"

pod_resources = Resources(request_memory = "10000Mi",
	                      limit_memory = "10000Mi",
                          limit_cpu = "2",
                          request_cpu = "2")

default_args = {
    "owner":               "airflow",
    "start_date":          datetime(2018, 12, 1),
    "email":               ["*****@*****.**"],
    # Define some variables to cancel email sending
    "email_on_failure":    False,
    "email_on_retry":      False,
    "retries":             3,
    "retry_delay":         timedelta(seconds=5),
    'provide_context':     True,
    'catchup':             True
}
Example #14
0
    gke_location = "us-central1-a"
    gke_cluster_name = "bq-load-gke-1"

    # Built from repo https://github.com/mozilla/probe-scraper
    probe_scraper_image = 'gcr.io/moz-fx-data-airflow-prod-88e0/probe-scraper:latest'
    probe_scraper_args = [
        'python3', '-m', 'probe_scraper.runner', '--out-dir',
        '/app/probe_data', '--cache-dir', '/app/probe_cache',
        '--output-bucket', 'net-mozaws-prod-us-west-2-data-pitmo',
        '--cache-bucket', 'telemetry-airflow-cache', '--env', 'prod'
    ]

    # Cluster autoscaling works on pod resource requests, instead of usage
    resources = Resources(request_memory='13312Mi',
                          request_cpu=None,
                          limit_memory='20480Mi',
                          limit_cpu=None)

    probe_scraper = GKEPodOperator(
        task_id="probe_scraper",
        gcp_conn_id=gcp_conn_id,
        project_id=connection.project_id,
        location=gke_location,
        cluster_name=gke_cluster_name,
        name='probe-scraper',
        namespace='default',
        # Needed to scale the highmem pool from 0 -> 1
        resources=resources,
        # This python job requires 13 GB of memory, thus the highmem node pool
        node_selectors={"nodepool": "highmem"},
        # Due to the nature of the container run, we set get_logs to False,
Example #15
0
 def test_display_resources(self):
     resources_string = str(Resources('1Gi', 1))
     self.assertEqual(
         resources_string,
         "Request: [cpu: 1, memory: 1Gi], Limit: [cpu: None, memory: None, gpu: None]"
     )
Example #16
0
 def _set_resources(self, resources):
     return Resources(**resources) if resources else Resources()
Example #17
0
    # The environment variable the secret is specified under
    deploy_target='SQL_CONN',
    # Name of secret in Kubernetes
    secret='airflow-secrets',
    # Key of the secret within Kubernetes
    key='sql_alchemy_conn')

resources_obj = Resources(
    # Amount of memory requested, can use E, P, T, G, M, K, Ei, Pi,
    # Ti, Gi, Mi, Ki as suffixes
    request_memory='200Mi',
    # Tells container to attempt to use specified number of cpus. One 'cpu' is
    # equivalent to 1 AWS vCPU, 1 GCP Core, 1 Azure vCore or 1 Hyperthread on
    # a bare-metal intel processor with Hyperthreading
    # If CPU request exceeds all of your node's capacities it will fail to ever
    # get scheduled.
    request_cpu='2',
    # If memory limit is exceeded the Pod goes up for termination, if no
    # limit is specified there is no upper bound on the amount of memory it can
    # use. You can also specify a default memory limit on a per-namespace basis
    limit_memory='100Mi',
    # If cpu request exceeds your node's capacity, it will fail to ever get
    # scheduled. The m suffix stands for milli-cpus, therefore .5 cpu and 500m
    # cpu are equivalent.
    limit_cpu='500m')

# Creates a volume of type emptyDir without any configs
volumes = [Volume(name='empty-vol', configs={'emptyDir', {}})]

# Used to mount pod level volumes to a running container
volume_mounts = [
    VolumeMount(name='test-vol-mount',
        split_date_parts((from_date + timedelta(days=i)), partition)
        for i in range(delta.days + 1)
    ]

    seen = set()
    parts = []
    # loops through every day and pulls out unique set of date parts
    for p in all_parts:
        if p["part"] not in seen:
            seen.add(p["part"])
            parts.append({k: v for k, v in p.items()})
    return parts


# Set the resources for the task pods
pod_resources = Resources(request_memory="500Mi", request_cpu="500m")

# Default settings for all DAGs
pod_defaults = dict(
    get_logs=True,
    image_pull_policy="Always",
    in_cluster=True,
    is_delete_operator_pod=True,
    namespace=os.environ["NAMESPACE"],
    #    resources=pod_resources,
    cmds=["/bin/bash", "-c"],
)

# Default environment variables for worker pods
env = os.environ.copy()
pod_env_vars = {
Example #19
0
    def make_pod(self, namespace, worker_uuid, pod_id, dag_id, task_id,
                 execution_date, try_number, airflow_command,
                 kube_executor_config):
        volumes_dict, volume_mounts_dict = self._get_volumes_and_mounts()
        worker_init_container_spec = self._get_init_containers()
        #TODO add resourses merge
        #
        resources = Resources(
            request_memory=kube_executor_config.request_memory,
            request_cpu=kube_executor_config.request_cpu,
            limit_memory=kube_executor_config.limit_memory,
            limit_cpu=kube_executor_config.limit_cpu,
            limit_gpu=kube_executor_config.limit_gpu)
        #hack for creating default resources
        #TODO make it safe
        if resources.is_empty_resource_request(
        ) and self.kube_config.default_limits:
            resources = Resources(
                request_memory=deep_get(self.kube_config.default_limits,
                                        'request.memory'),
                request_cpu=deep_get(self.kube_config.default_limits,
                                     'request.cpu'),
                limit_memory=deep_get(self.kube_config.default_limits,
                                      'limit.memory'),
                limit_cpu=deep_get(self.kube_config.default_limits,
                                   'limit.cpu'))

        gcp_sa_key = kube_executor_config.gcp_service_account_key
        annotations = dict(kube_executor_config.annotations
                           ) or self.kube_config.kube_annotations
        if gcp_sa_key:
            annotations['iam.cloud.google.com/service-account'] = gcp_sa_key

        volumes = [value for value in volumes_dict.values()
                   ] + kube_executor_config.volumes
        volume_mounts = [value for value in volume_mounts_dict.values()
                         ] + kube_executor_config.volume_mounts

        affinity = kube_executor_config.affinity or self.kube_config.kube_affinity
        tolerations = kube_executor_config.tolerations or self.kube_config.kube_tolerations

        return Pod(
            namespace=namespace,
            name=pod_id,
            image=kube_executor_config.image or self.kube_config.kube_image,
            image_pull_policy=(kube_executor_config.image_pull_policy
                               or self.kube_config.kube_image_pull_policy),
            cmds=airflow_command,
            labels=self._get_labels(
                kube_executor_config.labels, {
                    'airflow-worker': worker_uuid,
                    'dag_id': dag_id,
                    'task_id': task_id,
                    'execution_date': execution_date,
                    'try_number': str(try_number),
                }),
            envs=self._get_environment(),
            secrets=self._get_secrets(),
            service_account_name=self.kube_config.worker_service_account_name,
            image_pull_secrets=self.kube_config.image_pull_secrets,
            init_containers=worker_init_container_spec,
            volumes=volumes,
            volume_mounts=volume_mounts,
            resources=resources,
            annotations=annotations,
            node_selectors=(kube_executor_config.node_selectors
                            or self.kube_config.kube_node_selectors),
            affinity=affinity,
            tolerations=tolerations,
            security_context=self._get_security_context(),
            configmaps=self._get_configmaps(),
            scheduler_name=self.kube_config.scheduler_name)
 def setUp(self):
     self.simple_pod_request_factory = SimplePodRequestFactory()
     self.xcom_pod_request_factory = ExtractXcomPodRequestFactory()
     self.pod = Pod(
         image='busybox',
         envs={
             'ENVIRONMENT': 'prod',
             'LOG_LEVEL': 'warning'
         },
         name='myapp-pod',
         cmds=['sh', '-c', 'echo Hello Kubernetes!'],
         labels={'app': 'myapp'},
         image_pull_secrets='pull_secret_a,pull_secret_b',
         configmaps=['configmap_a', 'configmap_b'],
         ports=[{'name': 'foo', 'containerPort': 1234}],
         resources=Resources('1Gi', 1, '2Gi', 2, 1),
         secrets=[
             # This should be a secretRef
             Secret('env', None, 'secret_a'),
             # This should be a single secret mounted in volumeMounts
             Secret('volume', '/etc/foo', 'secret_b'),
             # This should produce a single secret mounted in env
             Secret('env', 'TARGET', 'secret_b', 'source_b'),
         ],
         security_context={
             'runAsUser': 1000,
             'fsGroup': 2000,
         }
     )
     self.maxDiff = None
     self.expected = {
         'apiVersion': 'v1',
         'kind': 'Pod',
         'metadata': {
             'name': 'myapp-pod',
             'labels': {'app': 'myapp'},
             'annotations': {}},
         'spec': {
             'containers': [{
                 'name': 'base',
                 'image': 'busybox',
                 'command': [
                     'sh', '-c', 'echo Hello Kubernetes!'
                 ],
                 'imagePullPolicy': 'IfNotPresent',
                 'args': [],
                 'env': [{
                     'name': 'ENVIRONMENT',
                     'value': 'prod'
                 }, {
                     'name': 'LOG_LEVEL',
                     'value': 'warning'
                 }, {
                     'name': 'TARGET',
                     'valueFrom': {
                         'secretKeyRef': {
                             'name': 'secret_b',
                             'key': 'source_b'
                         }
                     }
                 }],
                 'envFrom': [{
                     'secretRef': {
                         'name': 'secret_a'
                     }
                 }, {
                     'configMapRef': {
                         'name': 'configmap_a'
                     }
                 }, {
                     'configMapRef': {
                         'name': 'configmap_b'
                     }
                 }],
                 'resources': {
                     'requests': {
                         'memory': '1Gi',
                         'cpu': 1
                     },
                     'limits': {
                         'memory': '2Gi',
                         'cpu': 2,
                         'nvidia.com/gpu': 1
                     },
                 },
                 'ports': [{'name': 'foo', 'containerPort': 1234}],
                 'volumeMounts': [{
                     'mountPath': '/etc/foo',
                     'name': 'secretvol0',
                     'readOnly': True
                 }]
             }],
             'restartPolicy': 'Never',
             'nodeSelector': {},
             'volumes': [{
                 'name': 'secretvol0',
                 'secret': {
                     'secretName': 'secret_b'
                 }
             }],
             'imagePullSecrets': [
                 {'name': 'pull_secret_a'},
                 {'name': 'pull_secret_b'}
             ],
             'affinity': {},
             'securityContext': {
                 'runAsUser': 1000,
                 'fsGroup': 2000,
             },
         }
     }