Пример #1
0
 def my_pipeline():
   affinity = V1Affinity(
     node_affinity=V1NodeAffinity(
       required_during_scheduling_ignored_during_execution=V1NodeSelector(
         node_selector_terms=[V1NodeSelectorTerm(
           match_expressions=[V1NodeSelectorRequirement(
             key='beta.kubernetes.io/instance-type', operator='In', values=['p2.xlarge'])])])))
   some_op().add_affinity(affinity)
Пример #2
0
def affinity_pipeline(
):
    """A pipeline with affinity"""
    affinity = V1Affinity(
        node_affinity=V1NodeAffinity(
            required_during_scheduling_ignored_during_execution=V1NodeSelector(
                node_selector_terms=[V1NodeSelectorTerm(
                    match_expressions=[V1NodeSelectorRequirement(
                        key='kubernetes.io/os',
                        operator='In',
                        values=['linux'])])])))
    echo_op().add_affinity(affinity)
Пример #3
0
 def _set_preemptible(task):
     task.add_toleration(toleration)
     node_selector_term = V1NodeSelectorTerm(match_expressions=[
         V1NodeSelectorRequirement(key='cloud.google.com/gke-preemptible',
                                   operator='In',
                                   values=['true'])
     ])
     if hard_constraint:
         node_affinity = V1NodeAffinity(
             required_during_scheduling_ignored_during_execution=
             V1NodeSelector(node_selector_terms=[node_selector_term]))
     else:
         node_affinity = V1NodeAffinity(
             preferred_during_scheduling_ignored_during_execution=
             V1PreferredSchedulingTerm(preference=node_selector_term,
                                       weight=50))
     affinity = V1Affinity(node_affinity=node_affinity)
     task.add_affinity(affinity=affinity)
     return task
def test_run(mock_get_node_affinity, k8s_executor):
    task_config = KubernetesTaskConfig(
        name="fake_task_name",
        uuid="fake_id",
        image="fake_docker_image",
        command="fake_command",
        cpus=1,
        memory=1024,
        disk=1024,
        volumes=[{
            "host_path": "/a",
            "container_path": "/b",
            "mode": "RO"
        }],
        node_selectors={"hello": "world"},
        node_affinities=[dict(key="a_label", operator="In", value=[])],
        labels={
            "some_label": "some_label_value",
        },
        annotations={
            "paasta.yelp.com/some_annotation": "some_value",
        },
        service_account_name="testsa",
    )
    expected_container = V1Container(
        image=task_config.image,
        name="main",
        command=["/bin/sh", "-c"],
        args=[task_config.command],
        security_context=V1SecurityContext(
            capabilities=V1Capabilities(drop=list(task_config.cap_drop)), ),
        resources=V1ResourceRequirements(limits={
            "cpu": 1.0,
            "memory": "1024.0Mi",
            "ephemeral-storage": "1024.0Mi",
        }),
        env=[],
        volume_mounts=[
            V1VolumeMount(
                mount_path="/b",
                name="host--slash-a",
                read_only=True,
            )
        ],
    )
    expected_pod = V1Pod(
        metadata=V1ObjectMeta(
            name=task_config.pod_name,
            namespace="task_processing_tests",
            labels={
                "some_label": "some_label_value",
            },
            annotations={
                "paasta.yelp.com/some_annotation": "some_value",
            },
        ),
        spec=V1PodSpec(
            restart_policy=task_config.restart_policy,
            containers=[expected_container],
            volumes=[
                V1Volume(
                    host_path=V1HostPathVolumeSource(path="/a"),
                    name="host--slash-a",
                )
            ],
            share_process_namespace=True,
            security_context=V1PodSecurityContext(
                fs_group=task_config.fs_group, ),
            node_selector={"hello": "world"},
            affinity=V1Affinity(
                node_affinity=mock_get_node_affinity.return_value),
            dns_policy="Default",
            service_account_name=task_config.service_account_name,
        ),
    )

    assert k8s_executor.run(task_config) == task_config.pod_name
    assert k8s_executor.kube_client.core.create_namespaced_pod.call_args_list == [
        mock.call(body=expected_pod, namespace='task_processing_tests')
    ]
    assert mock_get_node_affinity.call_args_list == [
        mock.call(pvector([dict(key="a_label", operator="In", value=[])])),
    ]
Пример #5
0
    def run(self, task_config: KubernetesTaskConfig) -> Optional[str]:
        try:
            container = V1Container(
                image=task_config.image,
                # XXX: we were initially planning on using the name from KubernetesTaskConfig here,
                # but its too easy to go over the length limit for container names (63 characters),
                # so we're just hardcoding something for now since container names aren't used for
                # anything at the moment
                name="main",
                command=["/bin/sh", "-c"],
                args=[task_config.command],
                security_context=get_security_context_for_capabilities(
                    cap_add=task_config.cap_add,
                    cap_drop=task_config.cap_drop,
                ),
                resources=V1ResourceRequirements(
                    limits={
                        "cpu": task_config.cpus,
                        "memory": f"{task_config.memory}Mi",
                        "ephemeral-storage": f"{task_config.disk}Mi",
                    }),
                env=get_kubernetes_env_vars(
                    task_config.environment,
                    task_config.secret_environment,
                ),
                volume_mounts=get_kubernetes_volume_mounts(
                    task_config.volumes))
            pod = V1Pod(
                metadata=V1ObjectMeta(
                    name=task_config.pod_name,
                    namespace=self.namespace,
                    labels=dict(task_config.labels),
                    annotations=dict(task_config.annotations),
                ),
                spec=V1PodSpec(
                    restart_policy=task_config.restart_policy,
                    containers=[container],
                    volumes=get_pod_volumes(task_config.volumes),
                    node_selector=dict(task_config.node_selectors),
                    affinity=V1Affinity(node_affinity=get_node_affinity(
                        task_config.node_affinities), ),
                    # we're hardcoding this as Default as this is what we generally use
                    # internally - until we have a usecase for something that needs one
                    # of the other DNS policies, we can probably punt on plumbing all the
                    # bits for making this configurable
                    dns_policy="Default",
                    share_process_namespace=True,
                    security_context=V1PodSecurityContext(
                        fs_group=task_config.fs_group, ),
                    service_account_name=task_config.service_account_name,
                ),
            )
        except Exception:
            logger.exception(
                f"Unable to create PodSpec for {task_config.pod_name}")
            return None

        # we need to lock here since there will be other threads updating this metadata in response
        # to k8s events
        with self.task_metadata_lock:
            self.task_metadata = self.task_metadata.set(
                task_config.pod_name,
                KubernetesTaskMetadata(
                    task_config=task_config,
                    task_state=KubernetesTaskState.TASK_PENDING,
                    task_state_history=v(
                        (KubernetesTaskState.TASK_PENDING, time.time())),
                ),
            )

        if self.kube_client.create_pod(
                namespace=self.namespace,
                pod=pod,
        ):
            return task_config.pod_name

        with self.task_metadata_lock:
            # if we weren't able to create this pod, then we shouldn't keep it around in
            # task_metadata
            self.task_metadata = self.task_metadata.discard(
                task_config.pod_name)

        return None