def test_extend_object_field(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_obj = k8s.V1Container(name='base_container', ports=base_ports) client_ports = [k8s.V1ContainerPort(container_port=1, name='client_port')] client_obj = k8s.V1Container(name='client_container', ports=client_ports) res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = base_ports + client_ports assert client_obj == res
def test_extend_object_field_not_list(self): base_obj = k8s.V1Container(name='base_container', image='image') client_obj = k8s.V1Container(name='client_container') with pytest.raises(ValueError): extend_object_field(base_obj, client_obj, 'image') base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='client_container', image='image') with pytest.raises(ValueError): extend_object_field(base_obj, client_obj, 'image')
def test_reconcile_specs(self): base_objs = [k8s.V1Container(name='base_container1', image='base_image')] client_objs = [k8s.V1Container(name='client_container1')] base_spec = k8s.V1PodSpec(priority=1, active_deadline_seconds=100, containers=base_objs) client_spec = k8s.V1PodSpec(priority=2, hostname='local', containers=client_objs) res = PodGenerator.reconcile_specs(base_spec, client_spec) client_spec.containers = [k8s.V1Container(name='client_container1', image='base_image')] client_spec.active_deadline_seconds = 100 assert client_spec == res
def test_extend_object_field_empty(self): ports = [k8s.V1ContainerPort(container_port=1, name='port')] base_obj = k8s.V1Container(name='base_container', ports=ports) client_obj = k8s.V1Container(name='client_container') res = extend_object_field(base_obj, client_obj, 'ports') client_obj.ports = ports self.assertEqual(client_obj, res) base_obj = k8s.V1Container(name='base_container') client_obj = k8s.V1Container(name='base_container', ports=ports) res = extend_object_field(base_obj, client_obj, 'ports') self.assertEqual(client_obj, res)
def test_reconcile_containers_empty(self): base_objs = [k8s.V1Container(name='base_container')] client_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) assert base_objs == res client_objs = [k8s.V1Container(name='client_container')] base_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) assert client_objs == res res = PodGenerator.reconcile_containers([], []) assert res == []
def test_reconcile_containers_empty(self): base_objs = [k8s.V1Container(name='base_container')] client_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) self.assertEqual(base_objs, res) client_objs = [k8s.V1Container(name='client_container')] base_objs = [] res = PodGenerator.reconcile_containers(base_objs, client_objs) self.assertEqual(client_objs, res) res = PodGenerator.reconcile_containers([], []) self.assertEqual(res, [])
def test_reconcile_containers(self): base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [k8s.V1ContainerPort(container_port=2, name='client_port')] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', image='client_image'), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports assert client_objs == res base_ports = [k8s.V1ContainerPort(container_port=1, name='base_port')] base_objs = [ k8s.V1Container(name='base_container1', ports=base_ports), k8s.V1Container(name='base_container2', image='base_image'), ] client_ports = [k8s.V1ContainerPort(container_port=2, name='client_port')] client_objs = [ k8s.V1Container(name='client_container1', ports=client_ports), k8s.V1Container(name='client_container2', stdin=True), ] res = PodGenerator.reconcile_containers(base_objs, client_objs) client_objs[0].ports = base_ports + client_ports client_objs[1].image = 'base_image' assert client_objs == res
def build_pod_spec(name, bucket, data_dir): metadata = k8s.V1ObjectMeta(name=make_unique_pod_name(name), ) container = k8s.V1Container( name=name, lifecycle=k8s.V1Lifecycle( post_start=k8s.V1Handler(_exec=k8s.V1ExecAction(command=[ "gcsfuse", "--log-file", "/var/log/gcs_fuse.log", "--temp-dir", "/tmp", "--debug_gcs", bucket, data_dir, ])), pre_stop=k8s.V1Handler(_exec=k8s.V1ExecAction( command=["fusermount", "-u", data_dir])), ), security_context=k8s.V1SecurityContext( privileged=True, capabilities=k8s.V1Capabilities(add=["SYS_ADMIN"])), ) pod = k8s.V1Pod(metadata=metadata, spec=k8s.V1PodSpec(containers=[container])) return pod
def test_pod_template_file_with_full_pod_spec(self): fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml' pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(labels={ "foo": "bar", "fizz": "buzz" }, ), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", env=[k8s.V1EnvVar(name="env_name", value="value")], ) ]), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, pod_template_file=fixture, full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) self.assertIsNotNone(result) self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'}) self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")]) self.assertDictEqual(result, {"hello": "world"})
def test_construct_pod_empty_execuctor_config(self, mock_uuid): mock_uuid.return_value = self.static_uuid worker_config = k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name='', resources=k8s.V1ResourceRequirements( limits={ 'cpu': '1m', 'memory': '1G' } ) ) ] ) ) executor_config = None result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual({ 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'imagePullPolicy': 'IfNotPresent', 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '1m', 'memory': '1G' } }, 'volumeMounts': [] }], 'hostNetwork': False, 'imagePullSecrets': [], 'restartPolicy': 'Never', 'volumes': [] } }, sanitized_result)
def test_encode_k8s_v1pod(self): from kubernetes.client import models as k8s pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="foo", namespace="bar", ), spec=k8s.V1PodSpec( containers=[k8s.V1Container( name="foo", image="bar", )]), ) self.assertEqual( json.loads(json.dumps(pod, cls=utils_json.AirflowJsonEncoder)), { "metadata": { "name": "foo", "namespace": "bar" }, "spec": { "containers": [{ "image": "bar", "name": "foo" }] }, }, )
def create_pod_request_obj(self) -> k8s.V1Pod: """ Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file` will supersede all other values. """ self.log.debug("Creating pod for K8sPodOperator task %s", self.task_id) if self.pod_template_file: self.log.debug("Pod template file found, will parse for base pod") pod_template = pod_generator.PodGenerator.deserialize_model_file( self.pod_template_file) else: pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name")) pod = k8s.V1Pod(api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta( namespace=self.namespace, labels=self.labels, name=self.name, annotations=self.annotations, ), spec=k8s.V1PodSpec( node_selector=self.node_selectors, affinity=self.affinity, tolerations=self.tolerations, init_containers=self.init_containers, containers=[ k8s.V1Container( image=self.image, name="base", command=self.cmds, ports=self.ports, resources=self.k8s_resources, volume_mounts=self.volume_mounts, args=self.arguments, env=self.env_vars, env_from=self.env_from, ) ], image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, host_network=self.hostnetwork, security_context=self.security_context, dns_policy=self.dnspolicy, scheduler_name=self.schedulername, restart_policy='Never', priority_class_name=self.priority_class_name, volumes=self.volumes, )) pod = PodGenerator.reconcile_pods(pod_template, pod) for secret in self.secrets: self.log.debug("Adding secret to task %s", self.task_id) pod = secret.attach_to_pod(pod) if self.do_xcom_push: self.log.debug("Adding xcom sidecar to task %s", self.task_id) pod = PodGenerator.add_xcom_sidecar(pod) return pod
def __init__(self, dag, version, release_stream, platform, profile, build): self.exec_config = { "pod_override": k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="quay.io/mukrishn/jetski:2.0", image_pull_policy="Always", volume_mounts=[kubeconfig.get_empty_dir_volume_mount()]) ], volumes=[ kubeconfig. get_empty_dir_volume_mount() ])) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. 4.6/4.7, major.minor only self.openshift_release = release_stream # true release stream to follow. Nightlies, CI, etc. self.profile = profile # e.g. default/ovn self.openshift_build = build # Specific Task Configuration self.vars = var_loader.build_task_vars(task="install", version=version, platform=platform, profile=profile) self.install_secrets = Variable.get( f"baremetal_openshift_install_config", deserialize_json=True)
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta( labels={"foo": "bar", "fizz": "buzz"}, namespace="default", name="test-pod" ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="perl", command=["/bin/bash"], args=["-c", 'echo {\\"hello\\" : \\"world\\"} | cat > /airflow/xcom/return.json'], env=[k8s.V1EnvVar(name="env_name", value="value")], ) ], restart_policy="Never", ), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) self.assertIsNotNone(result) self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'}) self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")]) self.assertDictEqual(result, {"hello": "world"})
def initCronJob(self, key: str, value: str, full_path: str) -> None: name = self.dnsify(value) metadata = K.V1ObjectMeta(name=name) assert self.manifest_list clusterMeta = self.manifest_list.clusterMeta() if clusterMeta: metadata.annotations = clusterMeta.annotations # intentionally written this way so one can easily scan down paths container1 = K.V1Container( name="job", resources=K.V1ResourceRequirements(limits={}, requests={}), ) self.manifest = Manifest( data=[ K.V1beta1CronJob( metadata=metadata, kind="CronJob", api_version="batch/v1beta1", spec=K.V1beta1CronJobSpec( schedule="* * * * *", suspend=True, job_template=K.V1beta1JobTemplateSpec(spec=K.V1JobSpec( template=K.V1PodTemplateSpec(spec=K.V1PodSpec( containers=[container1])))), ), ) ], pluginName="metronome", manifestName=name, )
class PodDefaults: """ Static defaults for Pods """ XCOM_MOUNT_PATH = '/airflow/xcom' SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar' XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;' VOLUME_MOUNT = k8s.V1VolumeMount( name='xcom', mount_path=XCOM_MOUNT_PATH ) VOLUME = k8s.V1Volume( name='xcom', empty_dir=k8s.V1EmptyDirVolumeSource() ) SIDECAR_CONTAINER = k8s.V1Container( name=SIDECAR_CONTAINER_NAME, command=['sh', '-c', XCOM_CMD], image='alpine', volume_mounts=[VOLUME_MOUNT], resources=k8s.V1ResourceRequirements( requests={ "cpu": "1m", } ), )
def test_full_pod_spec(self, mock_client, monitor_mock, start_mock): from airflow.utils.state import State pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name="hello", labels={"foo": "bar"}, namespace="mynamespace"), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="ubuntu:16.04", command=["something"], ) ]), ) k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', full_pod_spec=pod_spec, ) monitor_mock.return_value = (State.SUCCESS, None) context = self.create_context(k) k.execute(context=context) assert start_mock.call_args[0][ 0].metadata.name == pod_spec.metadata.name assert start_mock.call_args[0][ 0].metadata.labels == pod_spec.metadata.labels assert start_mock.call_args[0][ 0].metadata.namespace == pod_spec.metadata.namespace assert start_mock.call_args[0][0].spec.containers[ 0].image == pod_spec.spec.containers[0].image assert start_mock.call_args[0][0].spec.containers[ 0].command == pod_spec.spec.containers[0].command # kwargs take precedence, however start_mock.reset_mock() image = "some.custom.image:andtag" name_base = "world" k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', full_pod_spec=pod_spec, name=name_base, image=image, ) context = self.create_context(k) k.execute(context=context) # make sure the kwargs takes precedence (and that name is randomized) assert start_mock.call_args[0][0].metadata.name.startswith(name_base) assert start_mock.call_args[0][0].metadata.name != name_base assert start_mock.call_args[0][0].spec.containers[0].image == image
def test_reconcile_pods(self, mock_uuid): mock_uuid.return_value = self.static_uuid path = sys.path[ 0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' base_pod = PodGenerator(pod_template_file=path, extract_xcom=False).gen_pod() mutator_pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="name2", labels={"bar": "baz"}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( image='', name='name', command=['/bin/command2.sh', 'arg2'], volume_mounts=[ k8s.V1VolumeMount( mount_path="/foo/", name="example-kubernetes-test-volume2") ], ) ], volumes=[ k8s.V1Volume( host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2", ) ], ), ) result = PodGenerator.reconcile_pods(base_pod, mutator_pod) expected: k8s.V1Pod = self.expected expected.metadata.name = "name2" expected.metadata.labels['bar'] = 'baz' expected.spec.volumes = expected.spec.volumes or [] expected.spec.volumes.append( k8s.V1Volume(host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2")) base_container: k8s.V1Container = expected.spec.containers[0] base_container.command = ['/bin/command2.sh', 'arg2'] base_container.volume_mounts = [ k8s.V1VolumeMount(mount_path="/foo/", name="example-kubernetes-test-volume2") ] base_container.name = "name" expected.spec.containers[0] = base_container result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization(expected) assert result_dict == expected_dict
def test_init_container(self): # GIVEN volume_mounts = [ k8s.V1VolumeMount(mount_path='/etc/foo', name='test-volume', sub_path=None, read_only=True) ] init_environments = [ k8s.V1EnvVar(name='key1', value='value1'), k8s.V1EnvVar(name='key2', value='value2'), ] init_container = k8s.V1Container( name="init-container", image="ubuntu:16.04", env=init_environments, volume_mounts=volume_mounts, command=["bash", "-cx"], args=["echo 10"], ) volume = k8s.V1Volume( name='test-volume', persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(claim_name='test-volume'), ) expected_init_container = { 'name': 'init-container', 'image': 'ubuntu:16.04', 'command': ['bash', '-cx'], 'args': ['echo 10'], 'env': [{'name': 'key1', 'value': 'value1'}, {'name': 'key2', 'value': 'value2'}], 'volumeMounts': [{'mountPath': '/etc/foo', 'name': 'test-volume', 'readOnly': True}], } k = KubernetesPodOperator( namespace='default', image="ubuntu:16.04", cmds=["bash", "-cx"], arguments=["echo 10"], labels={"foo": "bar"}, name="test-" + str(random.randint(0, 1000000)), task_id="task" + self.get_current_task_name(), volumes=[volume], init_containers=[init_container], in_cluster=False, do_xcom_push=False, ) context = create_context(k) k.execute(context) actual_pod = self.api_client.sanitize_for_serialization(k.pod) self.expected_pod['spec']['initContainers'] = [expected_init_container] self.expected_pod['spec']['volumes'] = [ {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}} ] assert self.expected_pod == actual_pod
def construct_pod( # pylint: disable=too-many-arguments dag_id: str, task_id: str, pod_id: str, try_number: int, kube_image: str, date: datetime.datetime, command: List[str], pod_override_object: Optional[k8s.V1Pod], base_worker_pod: k8s.V1Pod, namespace: str, worker_uuid: str) -> k8s.V1Pod: """ Construct a pod by gathering and consolidating the configuration from 3 places: - airflow.cfg - executor_config - dynamic arguments """ try: image = pod_override_object.spec.containers[ 0].image # type: ignore if not image: image = kube_image except Exception: # pylint: disable=W0703 image = kube_image dynamic_pod = k8s.V1Pod(metadata=k8s.V1ObjectMeta( namespace=namespace, annotations={ 'dag_id': dag_id, 'task_id': task_id, 'execution_date': date.isoformat(), 'try_number': str(try_number), }, name=PodGenerator.make_unique_pod_id(pod_id), labels={ 'airflow-worker': worker_uuid, 'dag_id': dag_id, 'task_id': task_id, 'execution_date': datetime_to_label_safe_datestring(date), 'try_number': str(try_number), 'airflow_version': airflow_version.replace('+', '-'), 'kubernetes_executor': 'True', }), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", command=command, image=image, ) ])) # Reconcile the pods starting with the first chronologically, # Pod from the pod_template_File -> Pod from executor_config arg -> Pod from the K8s executor pod_list = [base_worker_pod, pod_override_object, dynamic_pod] return reduce(PodGenerator.reconcile_pods, pod_list)
def __init__(self, dag, version, release_stream, latest_release, platform, profile, default_args): self.exec_config = { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="quay.io/keithwhitley4/airflow-ansible:2.1.0", image_pull_policy="Always", env=[ kubeconfig.get_kubeadmin_password( version, platform, profile) ], volume_mounts=[ kubeconfig.get_kubeconfig_volume_mount()] ) ], volumes=[kubeconfig.get_kubeconfig_volume( version, platform, profile)] ) ) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. stable/.next/.future self.release_stream = release_stream self.latest_release = latest_release # latest relase from the release stream self.profile = profile # e.g. default/ovn self.default_args = default_args # Airflow Variables self.SNAPPY_DATA_SERVER_URL = Variable.get("SNAPPY_DATA_SERVER_URL") self.SNAPPY_DATA_SERVER_USERNAME = Variable.get("SNAPPY_DATA_SERVER_USERNAME") self.SNAPPY_DATA_SERVER_PASSWORD = Variable.get("SNAPPY_DATA_SERVER_PASSWORD") # Specific Task Configuration self.vars = var_loader.build_task_vars( task="utils", version=version, platform=platform, profile=profile) self.git_name=self._git_name() self.env = { "OPENSHIFT_CLIENT_LOCATION": self.latest_release["openshift_client_location"], "SNAPPY_DATA_SERVER_URL": self.SNAPPY_DATA_SERVER_URL, "SNAPPY_DATA_SERVER_USERNAME": self.SNAPPY_DATA_SERVER_USERNAME, "SNAPPY_DATA_SERVER_PASSWORD": self.SNAPPY_DATA_SERVER_PASSWORD, "SNAPPY_USER_FOLDER": self.git_name }
def to_v1_kubernetes_pod(self): """ Convert to support k8s V1Pod :return: k8s.V1Pod """ import kubernetes.client.models as k8s meta = k8s.V1ObjectMeta( labels=self.labels, name=self.name, namespace=self.namespace, ) spec = k8s.V1PodSpec( init_containers=self.init_containers, containers=[ k8s.V1Container( image=self.image, command=self.cmds, name="base", env=[k8s.V1EnvVar(name=key, value=val) for key, val in self.envs.items()], args=self.args, image_pull_policy=self.image_pull_policy, ) ], image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, dns_policy=self.dnspolicy, host_network=self.hostnetwork, tolerations=self.tolerations, affinity=self.affinity, security_context=self.security_context, ) pod = k8s.V1Pod( spec=spec, metadata=meta, ) for port in _extract_ports(self.ports): pod = port.attach_to_pod(pod) volumes = _extract_volumes(self.volumes) for volume in volumes: pod = volume.attach_to_pod(pod) for volume_mount in _extract_volume_mounts(self.volume_mounts): pod = volume_mount.attach_to_pod(pod) for secret in self.secrets: pod = secret.attach_to_pod(pod) for runtime_info in self.pod_runtime_info_envs: pod = runtime_info.attach_to_pod(pod) pod = _extract_resources(self.resources).attach_to_pod(pod) return pod
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(labels={ "foo": "bar", "fizz": "buzz" }, namespace="default", name="test-pod"), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="perl", command=["/bin/bash"], args=[ "-c", 'echo {\\"hello\\" : \\"world\\"} | cat > /airflow/xcom/return.json' ], env=[k8s.V1EnvVar(name="env_name", value="value")], ) ], restart_policy="Never", ), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, full_pod_spec=pod_spec, do_xcom_push=True, is_delete_operator_pod=False, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { 'fizz': 'buzz', 'foo': 'bar', 'airflow_version': mock.ANY, 'dag_id': 'dag', 'run_id': 'manual__2016-01-01T0100000100-da4d1ce7b', 'kubernetes_pod_operator': 'True', 'task_id': mock.ANY, 'try_number': '1', 'already_checked': 'True', } assert k.pod.spec.containers[0].env == [ k8s.V1EnvVar(name="env_name", value="value") ] assert result == {"hello": "world"}
def test_construct_pod(self, mock_uuid): path = sys.path[ 0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' worker_config = PodGenerator.deserialize_model_file(path) mock_uuid.return_value = self.static_uuid executor_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '1m', 'memory': '1G' })) ])) result = PodGenerator.construct_pod( dag_id=self.dag_id, task_id=self.task_id, pod_id='pod_id', kube_image='airflow_image', try_number=self.try_number, date=self.execution_date, args=['command'], pod_override_object=executor_config, base_worker_pod=worker_config, namespace='test_namespace', scheduler_job_id='uuid', ) expected = self.expected expected.metadata.labels = self.labels expected.metadata.labels['app'] = 'myapp' expected.metadata.annotations = self.annotations expected.metadata.name = 'pod_id.' + self.static_uuid.hex expected.metadata.namespace = 'test_namespace' expected.spec.containers[0].args = ['command'] expected.spec.containers[0].image = 'airflow_image' expected.spec.containers[0].resources = { 'limits': { 'cpu': '1m', 'memory': '1G' } } expected.spec.containers[0].env.append( k8s.V1EnvVar( name="AIRFLOW_IS_K8S_EXECUTOR_POD", value='True', )) result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization( self.expected) assert expected_dict == result_dict
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name="hello", labels={"foo": "bar"}, namespace="mynamespace"), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="ubuntu:16.04", command=["something"], ) ]), ) k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context="default", full_pod_spec=pod_spec, ) pod = k.create_pod_request_obj() assert pod.metadata.name == pod_spec.metadata.name assert pod.metadata.labels == pod_spec.metadata.labels assert pod.metadata.namespace == pod_spec.metadata.namespace assert pod.spec.containers[0].image == pod_spec.spec.containers[ 0].image assert pod.spec.containers[0].command == pod_spec.spec.containers[ 0].command # kwargs take precedence, however image = "some.custom.image:andtag" name_base = "world" k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context="default", full_pod_spec=pod_spec, name=name_base, image=image, ) pod = k.create_pod_request_obj() # make sure the kwargs takes precedence (and that name is randomized) assert pod.metadata.name.startswith(name_base) assert pod.metadata.name != name_base assert pod.spec.containers[0].image == image
def test_invalid_executor_config(self, mock_get_kube_client, mock_kubernetes_job_watcher): executor = self.kubernetes_executor executor.start() assert executor.event_buffer == {} executor.execute_async( key=('dag', 'task', datetime.utcnow(), 1), queue=None, command=['airflow', 'tasks', 'run', 'true', 'some_parameter'], executor_config=k8s.V1Pod( spec=k8s.V1PodSpec( containers=[k8s.V1Container(name="base", image="myimage", image_pull_policy="Always")] ) ), ) assert list(executor.event_buffer.values())[0][1] == "Invalid executor_config passed"
def get_default_executor_config(dag_config: DagConfig, executor_image='airflow-ansible'): return { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image=f"{dag_config.executor_image['repository']}/{executor_image}:{dag_config.executor_image['tag']}", image_pull_policy="Always", volume_mounts=[ get_empty_dir_volume_mount()] ) ], volumes=[get_empty_dir_volume_mount()] ) ) }
class PodDefaults: """ Static defaults for the PodGenerator """ XCOM_MOUNT_PATH = '/airflow/xcom' SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar' XCOM_CMD = """import time while True: try: time.sleep(3600) except KeyboardInterrupt: exit(0) """ VOLUME_MOUNT = k8s.V1VolumeMount(name='xcom', mount_path=XCOM_MOUNT_PATH) VOLUME = k8s.V1Volume(name='xcom', empty_dir=k8s.V1EmptyDirVolumeSource()) SIDECAR_CONTAINER = k8s.V1Container(name=SIDECAR_CONTAINER_NAME, command=['python', '-c', XCOM_CMD], image='python:3.5-alpine', volume_mounts=[VOLUME_MOUNT])
def mock_kubernetes_read_namespaced_pod(*_args, **_kwargs): """ Represents the mocked output of kubernetes.client.read_namespaced_pod """ return models.V1Pod( metadata=models.V1ObjectMeta( namespace="default", name="gordo-test-pod-name-1234", labels={"app": "gordo-model-builder"}, ), status=models.V1PodStatus(phase="Running"), spec=models.V1PodSpec(containers=[ models.V1Container( name="some-generated-test-container-name", env=[ models.V1EnvVar(name="MACHINE_NAME", value="test-machine-name") ], ) ]), )
def get_executor_config_with_cluster_access(dag_config: DagConfig, release: OpenshiftRelease, executor_image="airflow-ansible"): return { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image=f"{dag_config.executor_image['repository']}/{executor_image}:{dag_config.executor_image['tag']}", image_pull_policy="Always", env=[ get_kubeadmin_password(release) ], volume_mounts=[ get_kubeconfig_volume_mount()] ) ], volumes=[get_kubeconfig_volume(release)] ) ) }