def test_reconcile_specs(self): base_objs = [k8s.V1Container(name='base_container1', image='base_image')] client_objs = [k8s.V1Container(name='client_container1')] base_spec = k8s.V1PodSpec(priority=1, active_deadline_seconds=100, containers=base_objs) client_spec = k8s.V1PodSpec(priority=2, hostname='local', containers=client_objs) res = PodGenerator.reconcile_specs(base_spec, client_spec) client_spec.containers = [k8s.V1Container(name='client_container1', image='base_image')] client_spec.active_deadline_seconds = 100 assert client_spec == res
def test_reconcile_specs_empty(self): base_spec = k8s.V1PodSpec(containers=[]) client_spec = None res = PodGenerator.reconcile_specs(base_spec, client_spec) self.assertEqual(base_spec, res) base_spec = None client_spec = k8s.V1PodSpec(containers=[]) res = PodGenerator.reconcile_specs(base_spec, client_spec) self.assertEqual(client_spec, res)
def test_reconcile_specs_empty(self): base_spec = k8s.V1PodSpec(containers=[]) client_spec = None res = PodGenerator.reconcile_specs(base_spec, client_spec) assert base_spec == res base_spec = None client_spec = k8s.V1PodSpec(containers=[]) res = PodGenerator.reconcile_specs(base_spec, client_spec) assert client_spec == res
def __init__(self, dag, version, release_stream, platform, profile, build): self.exec_config = { "pod_override": k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="quay.io/mukrishn/jetski:2.0", image_pull_policy="Always", volume_mounts=[kubeconfig.get_empty_dir_volume_mount()]) ], volumes=[ kubeconfig. get_empty_dir_volume_mount() ])) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. 4.6/4.7, major.minor only self.openshift_release = release_stream # true release stream to follow. Nightlies, CI, etc. self.profile = profile # e.g. default/ovn self.openshift_build = build # Specific Task Configuration self.vars = var_loader.build_task_vars(task="install", version=version, platform=platform, profile=profile) self.install_secrets = Variable.get( f"baremetal_openshift_install_config", deserialize_json=True)
def test_construct_pod_empty_execuctor_config(self, mock_uuid): mock_uuid.return_value = self.static_uuid worker_config = k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name='', resources=k8s.V1ResourceRequirements( limits={ 'cpu': '1m', 'memory': '1G' } ) ) ] ) ) executor_config = None result = PodGenerator.construct_pod( 'dag_id', 'task_id', 'pod_id', 3, 'date', ['command'], executor_config, worker_config, 'namespace', 'uuid', ) sanitized_result = self.k8s_client.sanitize_for_serialization(result) self.assertEqual({ 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': self.metadata, 'spec': { 'containers': [{ 'args': [], 'command': ['command'], 'env': [], 'envFrom': [], 'imagePullPolicy': 'IfNotPresent', 'name': 'base', 'ports': [], 'resources': { 'limits': { 'cpu': '1m', 'memory': '1G' } }, 'volumeMounts': [] }], 'hostNetwork': False, 'imagePullSecrets': [], 'restartPolicy': 'Never', 'volumes': [] } }, sanitized_result)
def test_encode_k8s_v1pod(self): from kubernetes.client import models as k8s pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="foo", namespace="bar", ), spec=k8s.V1PodSpec( containers=[k8s.V1Container( name="foo", image="bar", )]), ) self.assertEqual( json.loads(json.dumps(pod, cls=utils_json.AirflowJsonEncoder)), { "metadata": { "name": "foo", "namespace": "bar" }, "spec": { "containers": [{ "image": "bar", "name": "foo" }] }, }, )
def build_pod_spec(name, bucket, data_dir): metadata = k8s.V1ObjectMeta(name=make_unique_pod_name(name), ) container = k8s.V1Container( name=name, lifecycle=k8s.V1Lifecycle( post_start=k8s.V1Handler(_exec=k8s.V1ExecAction(command=[ "gcsfuse", "--log-file", "/var/log/gcs_fuse.log", "--temp-dir", "/tmp", "--debug_gcs", bucket, data_dir, ])), pre_stop=k8s.V1Handler(_exec=k8s.V1ExecAction( command=["fusermount", "-u", data_dir])), ), security_context=k8s.V1SecurityContext( privileged=True, capabilities=k8s.V1Capabilities(add=["SYS_ADMIN"])), ) pod = k8s.V1Pod(metadata=metadata, spec=k8s.V1PodSpec(containers=[container])) return pod
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta( labels={"foo": "bar", "fizz": "buzz"}, namespace="default", name="test-pod" ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="perl", command=["/bin/bash"], args=["-c", 'echo {\\"hello\\" : \\"world\\"} | cat > /airflow/xcom/return.json'], env=[k8s.V1EnvVar(name="env_name", value="value")], ) ], restart_policy="Never", ), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) self.assertIsNotNone(result) self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'}) self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")]) self.assertDictEqual(result, {"hello": "world"})
def initCronJob(self, key: str, value: str, full_path: str) -> None: name = self.dnsify(value) metadata = K.V1ObjectMeta(name=name) assert self.manifest_list clusterMeta = self.manifest_list.clusterMeta() if clusterMeta: metadata.annotations = clusterMeta.annotations # intentionally written this way so one can easily scan down paths container1 = K.V1Container( name="job", resources=K.V1ResourceRequirements(limits={}, requests={}), ) self.manifest = Manifest( data=[ K.V1beta1CronJob( metadata=metadata, kind="CronJob", api_version="batch/v1beta1", spec=K.V1beta1CronJobSpec( schedule="* * * * *", suspend=True, job_template=K.V1beta1JobTemplateSpec(spec=K.V1JobSpec( template=K.V1PodTemplateSpec(spec=K.V1PodSpec( containers=[container1])))), ), ) ], pluginName="metronome", manifestName=name, )
def test_pod_template_file_with_full_pod_spec(self): fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml' pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(labels={ "foo": "bar", "fizz": "buzz" }, ), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", env=[k8s.V1EnvVar(name="env_name", value="value")], ) ]), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, pod_template_file=fixture, full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) self.assertIsNotNone(result) self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'}) self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")]) self.assertDictEqual(result, {"hello": "world"})
def create_pod_request_obj(self) -> k8s.V1Pod: """ Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file` will supersede all other values. """ self.log.debug("Creating pod for K8sPodOperator task %s", self.task_id) if self.pod_template_file: self.log.debug("Pod template file found, will parse for base pod") pod_template = pod_generator.PodGenerator.deserialize_model_file( self.pod_template_file) else: pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name")) pod = k8s.V1Pod(api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta( namespace=self.namespace, labels=self.labels, name=self.name, annotations=self.annotations, ), spec=k8s.V1PodSpec( node_selector=self.node_selectors, affinity=self.affinity, tolerations=self.tolerations, init_containers=self.init_containers, containers=[ k8s.V1Container( image=self.image, name="base", command=self.cmds, ports=self.ports, resources=self.k8s_resources, volume_mounts=self.volume_mounts, args=self.arguments, env=self.env_vars, env_from=self.env_from, ) ], image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, host_network=self.hostnetwork, security_context=self.security_context, dns_policy=self.dnspolicy, scheduler_name=self.schedulername, restart_policy='Never', priority_class_name=self.priority_class_name, volumes=self.volumes, )) pod = PodGenerator.reconcile_pods(pod_template, pod) for secret in self.secrets: self.log.debug("Adding secret to task %s", self.task_id) pod = secret.attach_to_pod(pod) if self.do_xcom_push: self.log.debug("Adding xcom sidecar to task %s", self.task_id) pod = PodGenerator.add_xcom_sidecar(pod) return pod
def test_full_pod_spec(self, mock_client, monitor_mock, start_mock): from airflow.utils.state import State pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name="hello", labels={"foo": "bar"}, namespace="mynamespace"), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="ubuntu:16.04", command=["something"], ) ]), ) k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', full_pod_spec=pod_spec, ) monitor_mock.return_value = (State.SUCCESS, None) context = self.create_context(k) k.execute(context=context) assert start_mock.call_args[0][ 0].metadata.name == pod_spec.metadata.name assert start_mock.call_args[0][ 0].metadata.labels == pod_spec.metadata.labels assert start_mock.call_args[0][ 0].metadata.namespace == pod_spec.metadata.namespace assert start_mock.call_args[0][0].spec.containers[ 0].image == pod_spec.spec.containers[0].image assert start_mock.call_args[0][0].spec.containers[ 0].command == pod_spec.spec.containers[0].command # kwargs take precedence, however start_mock.reset_mock() image = "some.custom.image:andtag" name_base = "world" k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context='default', full_pod_spec=pod_spec, name=name_base, image=image, ) context = self.create_context(k) k.execute(context=context) # make sure the kwargs takes precedence (and that name is randomized) assert start_mock.call_args[0][0].metadata.name.startswith(name_base) assert start_mock.call_args[0][0].metadata.name != name_base assert start_mock.call_args[0][0].spec.containers[0].image == image
def test_reconcile_pods(self, mock_uuid): mock_uuid.return_value = self.static_uuid path = sys.path[ 0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' base_pod = PodGenerator(pod_template_file=path, extract_xcom=False).gen_pod() mutator_pod = k8s.V1Pod( metadata=k8s.V1ObjectMeta( name="name2", labels={"bar": "baz"}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( image='', name='name', command=['/bin/command2.sh', 'arg2'], volume_mounts=[ k8s.V1VolumeMount( mount_path="/foo/", name="example-kubernetes-test-volume2") ], ) ], volumes=[ k8s.V1Volume( host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2", ) ], ), ) result = PodGenerator.reconcile_pods(base_pod, mutator_pod) expected: k8s.V1Pod = self.expected expected.metadata.name = "name2" expected.metadata.labels['bar'] = 'baz' expected.spec.volumes = expected.spec.volumes or [] expected.spec.volumes.append( k8s.V1Volume(host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), name="example-kubernetes-test-volume2")) base_container: k8s.V1Container = expected.spec.containers[0] base_container.command = ['/bin/command2.sh', 'arg2'] base_container.volume_mounts = [ k8s.V1VolumeMount(mount_path="/foo/", name="example-kubernetes-test-volume2") ] base_container.name = "name" expected.spec.containers[0] = base_container result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization(expected) assert result_dict == expected_dict
def construct_pod( # pylint: disable=too-many-arguments dag_id: str, task_id: str, pod_id: str, try_number: int, kube_image: str, date: datetime.datetime, command: List[str], pod_override_object: Optional[k8s.V1Pod], base_worker_pod: k8s.V1Pod, namespace: str, worker_uuid: str) -> k8s.V1Pod: """ Construct a pod by gathering and consolidating the configuration from 3 places: - airflow.cfg - executor_config - dynamic arguments """ try: image = pod_override_object.spec.containers[ 0].image # type: ignore if not image: image = kube_image except Exception: # pylint: disable=W0703 image = kube_image dynamic_pod = k8s.V1Pod(metadata=k8s.V1ObjectMeta( namespace=namespace, annotations={ 'dag_id': dag_id, 'task_id': task_id, 'execution_date': date.isoformat(), 'try_number': str(try_number), }, name=PodGenerator.make_unique_pod_id(pod_id), labels={ 'airflow-worker': worker_uuid, 'dag_id': dag_id, 'task_id': task_id, 'execution_date': datetime_to_label_safe_datestring(date), 'try_number': str(try_number), 'airflow_version': airflow_version.replace('+', '-'), 'kubernetes_executor': 'True', }), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", command=command, image=image, ) ])) # Reconcile the pods starting with the first chronologically, # Pod from the pod_template_File -> Pod from executor_config arg -> Pod from the K8s executor pod_list = [base_worker_pod, pod_override_object, dynamic_pod] return reduce(PodGenerator.reconcile_pods, pod_list)
def __init__(self, dag, version, release_stream, latest_release, platform, profile, default_args): self.exec_config = { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="quay.io/keithwhitley4/airflow-ansible:2.1.0", image_pull_policy="Always", env=[ kubeconfig.get_kubeadmin_password( version, platform, profile) ], volume_mounts=[ kubeconfig.get_kubeconfig_volume_mount()] ) ], volumes=[kubeconfig.get_kubeconfig_volume( version, platform, profile)] ) ) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. stable/.next/.future self.release_stream = release_stream self.latest_release = latest_release # latest relase from the release stream self.profile = profile # e.g. default/ovn self.default_args = default_args # Airflow Variables self.SNAPPY_DATA_SERVER_URL = Variable.get("SNAPPY_DATA_SERVER_URL") self.SNAPPY_DATA_SERVER_USERNAME = Variable.get("SNAPPY_DATA_SERVER_USERNAME") self.SNAPPY_DATA_SERVER_PASSWORD = Variable.get("SNAPPY_DATA_SERVER_PASSWORD") # Specific Task Configuration self.vars = var_loader.build_task_vars( task="utils", version=version, platform=platform, profile=profile) self.git_name=self._git_name() self.env = { "OPENSHIFT_CLIENT_LOCATION": self.latest_release["openshift_client_location"], "SNAPPY_DATA_SERVER_URL": self.SNAPPY_DATA_SERVER_URL, "SNAPPY_DATA_SERVER_USERNAME": self.SNAPPY_DATA_SERVER_USERNAME, "SNAPPY_DATA_SERVER_PASSWORD": self.SNAPPY_DATA_SERVER_PASSWORD, "SNAPPY_USER_FOLDER": self.git_name }
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(labels={ "foo": "bar", "fizz": "buzz" }, namespace="default", name="test-pod"), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="perl", command=["/bin/bash"], args=[ "-c", 'echo {\\"hello\\" : \\"world\\"} | cat > /airflow/xcom/return.json' ], env=[k8s.V1EnvVar(name="env_name", value="value")], ) ], restart_policy="Never", ), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, full_pod_spec=pod_spec, do_xcom_push=True, is_delete_operator_pod=False, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { 'fizz': 'buzz', 'foo': 'bar', 'airflow_version': mock.ANY, 'dag_id': 'dag', 'run_id': 'manual__2016-01-01T0100000100-da4d1ce7b', 'kubernetes_pod_operator': 'True', 'task_id': mock.ANY, 'try_number': '1', 'already_checked': 'True', } assert k.pod.spec.containers[0].env == [ k8s.V1EnvVar(name="env_name", value="value") ] assert result == {"hello": "world"}
def test_construct_pod(self, mock_uuid): path = sys.path[ 0] + '/tests/kubernetes/pod_generator_base_with_secrets.yaml' worker_config = PodGenerator.deserialize_model_file(path) mock_uuid.return_value = self.static_uuid executor_config = k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container(name='', resources=k8s.V1ResourceRequirements(limits={ 'cpu': '1m', 'memory': '1G' })) ])) result = PodGenerator.construct_pod( dag_id=self.dag_id, task_id=self.task_id, pod_id='pod_id', kube_image='airflow_image', try_number=self.try_number, date=self.execution_date, args=['command'], pod_override_object=executor_config, base_worker_pod=worker_config, namespace='test_namespace', scheduler_job_id='uuid', ) expected = self.expected expected.metadata.labels = self.labels expected.metadata.labels['app'] = 'myapp' expected.metadata.annotations = self.annotations expected.metadata.name = 'pod_id.' + self.static_uuid.hex expected.metadata.namespace = 'test_namespace' expected.spec.containers[0].args = ['command'] expected.spec.containers[0].image = 'airflow_image' expected.spec.containers[0].resources = { 'limits': { 'cpu': '1m', 'memory': '1G' } } expected.spec.containers[0].env.append( k8s.V1EnvVar( name="AIRFLOW_IS_K8S_EXECUTOR_POD", value='True', )) result_dict = self.k8s_client.sanitize_for_serialization(result) expected_dict = self.k8s_client.sanitize_for_serialization( self.expected) assert expected_dict == result_dict
def to_v1_kubernetes_pod(self): """ Convert to support k8s V1Pod :return: k8s.V1Pod """ import kubernetes.client.models as k8s meta = k8s.V1ObjectMeta( labels=self.labels, name=self.name, namespace=self.namespace, ) spec = k8s.V1PodSpec( init_containers=self.init_containers, containers=[ k8s.V1Container( image=self.image, command=self.cmds, name="base", env=[k8s.V1EnvVar(name=key, value=val) for key, val in self.envs.items()], args=self.args, image_pull_policy=self.image_pull_policy, ) ], image_pull_secrets=self.image_pull_secrets, service_account_name=self.service_account_name, dns_policy=self.dnspolicy, host_network=self.hostnetwork, tolerations=self.tolerations, affinity=self.affinity, security_context=self.security_context, ) pod = k8s.V1Pod( spec=spec, metadata=meta, ) for port in _extract_ports(self.ports): pod = port.attach_to_pod(pod) volumes = _extract_volumes(self.volumes) for volume in volumes: pod = volume.attach_to_pod(pod) for volume_mount in _extract_volume_mounts(self.volume_mounts): pod = volume_mount.attach_to_pod(pod) for secret in self.secrets: pod = secret.attach_to_pod(pod) for runtime_info in self.pod_runtime_info_envs: pod = runtime_info.attach_to_pod(pod) pod = _extract_resources(self.resources).attach_to_pod(pod) return pod
def test_full_pod_spec(self): pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(name="hello", labels={"foo": "bar"}, namespace="mynamespace"), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="ubuntu:16.04", command=["something"], ) ]), ) k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context="default", full_pod_spec=pod_spec, ) pod = k.create_pod_request_obj() assert pod.metadata.name == pod_spec.metadata.name assert pod.metadata.labels == pod_spec.metadata.labels assert pod.metadata.namespace == pod_spec.metadata.namespace assert pod.spec.containers[0].image == pod_spec.spec.containers[ 0].image assert pod.spec.containers[0].command == pod_spec.spec.containers[ 0].command # kwargs take precedence, however image = "some.custom.image:andtag" name_base = "world" k = KubernetesPodOperator( task_id="task", in_cluster=False, do_xcom_push=False, cluster_context="default", full_pod_spec=pod_spec, name=name_base, image=image, ) pod = k.create_pod_request_obj() # make sure the kwargs takes precedence (and that name is randomized) assert pod.metadata.name.startswith(name_base) assert pod.metadata.name != name_base assert pod.spec.containers[0].image == image
def test_invalid_executor_config(self, mock_get_kube_client, mock_kubernetes_job_watcher): executor = self.kubernetes_executor executor.start() assert executor.event_buffer == {} executor.execute_async( key=('dag', 'task', datetime.utcnow(), 1), queue=None, command=['airflow', 'tasks', 'run', 'true', 'some_parameter'], executor_config=k8s.V1Pod( spec=k8s.V1PodSpec( containers=[k8s.V1Container(name="base", image="myimage", image_pull_policy="Always")] ) ), ) assert list(executor.event_buffer.values())[0][1] == "Invalid executor_config passed"
def get_default_executor_config(dag_config: DagConfig, executor_image='airflow-ansible'): return { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image=f"{dag_config.executor_image['repository']}/{executor_image}:{dag_config.executor_image['tag']}", image_pull_policy="Always", volume_mounts=[ get_empty_dir_volume_mount()] ) ], volumes=[get_empty_dir_volume_mount()] ) ) }
def mock_kubernetes_read_namespaced_pod(*_args, **_kwargs): """ Represents the mocked output of kubernetes.client.read_namespaced_pod """ return models.V1Pod( metadata=models.V1ObjectMeta( namespace="default", name="gordo-test-pod-name-1234", labels={"app": "gordo-model-builder"}, ), status=models.V1PodStatus(phase="Running"), spec=models.V1PodSpec(containers=[ models.V1Container( name="some-generated-test-container-name", env=[ models.V1EnvVar(name="MACHINE_NAME", value="test-machine-name") ], ) ]), )
def _new_pod(self, immortalcontainer): """Returns the pod definition to create the pod for an ImmortalContainer""" labels = dict(controller=immortalcontainer['metadata']['name']) return models.V1Pod( metadata=models.V1ObjectMeta( name=immortalcontainer['metadata']['name'] + "-immortalpod", labels=labels, namespace=immortalcontainer['metadata']['namespace'], owner_references=[ models.V1OwnerReference( api_version=self.custom_group + "/" + self.custom_version, controller=True, kind=self.custom_kind, name=immortalcontainer['metadata']['name'], uid=immortalcontainer['metadata']['uid']) ]), spec=models.V1PodSpec(containers=[ models.V1Container(name="acontainer", image=immortalcontainer['spec']['image']) ]))
def get_executor_config_with_cluster_access(dag_config: DagConfig, release: OpenshiftRelease, executor_image="airflow-ansible"): return { "pod_override": k8s.V1Pod( spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image=f"{dag_config.executor_image['repository']}/{executor_image}:{dag_config.executor_image['tag']}", image_pull_policy="Always", env=[ get_kubeadmin_password(release) ], volume_mounts=[ get_kubeconfig_volume_mount()] ) ], volumes=[get_kubeconfig_volume(release)] ) ) }
def __init__(self, dag, version, release_stream, platform, profile): self.exec_config = { "pod_override": k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="quay.io/keithwhitley4/airflow-ansible:2.0.0", image_pull_policy="Always", volume_mounts=[kubeconfig.get_kubeconfig_volume_mount()]) ], volumes=[ kubeconfig.get_kubeconfig_volume( version, platform, profile) ])) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. 4.6/4.7, major.minor only self.release_stream = release_stream # true release stream to follow. Nightlies, CI, etc. self.profile = profile # e.g. default/ovn # Specific Task Configuration self.vars = var_loader.build_task_vars(task="index", version=version, platform=platform, profile=profile) self.release_stream_base_url = Variable.get("release_stream_base_url") latest_release = var_loader.get_latest_release_from_stream( self.release_stream_base_url, self.release_stream) self.env = { "OPENSHIFT_CLIENT_LOCATION": latest_release["openshift_client_location"], "RELEASE_STREAM": self.release_stream }
def __init__(self, dag, version, release_stream, platform, profile): self.exec_config = { "pod_override": k8s.V1Pod(spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", image="quay.io/keithwhitley4/airflow-ansible:2.0.0", image_pull_policy="Always", volume_mounts=[ kubeconfig.get_empty_dir_volume_mount() ]) ], volumes=[kubeconfig.get_empty_dir_volume_mount()])) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. 4.6/4.7, major.minor only self.release_stream = release_stream # true release stream to follow. Nightlies, CI, etc. self.profile = profile # e.g. default/ovn # Specific Task Configuration self.vars = var_loader.build_task_vars(task="install", version=version, platform=platform, profile=profile) # Airflow Variables self.ansible_orchestrator = Variable.get("ansible_orchestrator", deserialize_json=True) self.release_stream_base_url = Variable.get("release_stream_base_url") self.install_secrets = Variable.get(f"openshift_install_config", deserialize_json=True) self.aws_creds = Variable.get("aws_creds", deserialize_json=True) self.gcp_creds = Variable.get("gcp_creds", deserialize_json=True) self.azure_creds = Variable.get("azure_creds", deserialize_json=True)
def __init__(self, dag, version, release_stream, platform, profile, default_args): self.exec_config = { "pod_override": k8s.V1Pod(spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", image="quay.io/keithwhitley4/airflow-ansible:2.0.0", image_pull_policy="Always", volume_mounts=[kubeconfig.get_kubeconfig_volume_mount()]) ], volumes=[ kubeconfig.get_kubeconfig_volume( version, platform, profile) ])) } # General DAG Configuration self.dag = dag self.platform = platform # e.g. aws self.version = version # e.g. stable/.next/.future self.release_stream = release_stream self.profile = profile # e.g. default/ovn self.default_args = default_args # Specific Task Configuration self.vars = var_loader.build_task_vars(task="benchmarks", version=version, platform=platform, profile=profile) self.release_stream_base_url = Variable.get("release_stream_base_url") latest_release = var_loader.get_latest_release_from_stream( self.release_stream_base_url, self.release_stream) self.env = { "OPENSHIFT_CLIENT_LOCATION": latest_release["openshift_client_location"] }
def test_pod_template_file_with_full_pod_spec(self): fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml' pod_spec = k8s.V1Pod( metadata=k8s.V1ObjectMeta(labels={ "foo": "bar", "fizz": "buzz" }, ), spec=k8s.V1PodSpec(containers=[ k8s.V1Container( name="base", env=[k8s.V1EnvVar(name="env_name", value="value")], ) ]), ) k = KubernetesPodOperator( task_id="task" + self.get_current_task_name(), in_cluster=False, pod_template_file=fixture, full_pod_spec=pod_spec, do_xcom_push=True, ) context = create_context(k) result = k.execute(context) assert result is not None assert k.pod.metadata.labels == { 'fizz': 'buzz', 'foo': 'bar', 'airflow_version': mock.ANY, 'dag_id': 'dag', 'execution_date': mock.ANY, 'kubernetes_pod_operator': 'True', 'task_id': mock.ANY, 'try_number': '1', } assert k.pod.spec.containers[0].env == [ k8s.V1EnvVar(name="env_name", value="value") ] assert result == {"hello": "world"}
def setUp(self): self.static_uuid = uuid.UUID('cf4a56d2-8101-4217-b027-2af6216feb48') self.deserialize_result = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': 'memory-demo', 'namespace': 'mem-example'}, 'spec': { 'containers': [ { 'args': ['--vm', '1', '--vm-bytes', '150M', '--vm-hang', '1'], 'command': ['stress'], 'image': 'apache/airflow:stress-2020.07.10-1.0.4', 'name': 'memory-demo-ctr', 'resources': {'limits': {'memory': '200Mi'}, 'requests': {'memory': '100Mi'}}, } ] }, } self.envs = {'ENVIRONMENT': 'prod', 'LOG_LEVEL': 'warning'} self.secrets = [ # This should be a secretRef Secret('env', None, 'secret_a'), # This should be a single secret mounted in volumeMounts Secret('volume', '/etc/foo', 'secret_b'), # This should produce a single secret mounted in env Secret('env', 'TARGET', 'secret_b', 'source_b'), ] self.execution_date = parser.parse('2020-08-24 00:00:00.000000') self.execution_date_label = datetime_to_label_safe_datestring(self.execution_date) self.dag_id = 'dag_id' self.task_id = 'task_id' self.try_number = 3 self.labels = { 'airflow-worker': 'uuid', 'dag_id': self.dag_id, 'execution_date': self.execution_date_label, 'task_id': self.task_id, 'try_number': str(self.try_number), 'airflow_version': __version__.replace('+', '-'), 'kubernetes_executor': 'True', } self.annotations = { 'dag_id': self.dag_id, 'task_id': self.task_id, 'execution_date': self.execution_date.isoformat(), 'try_number': str(self.try_number), } self.metadata = { 'labels': self.labels, 'name': 'pod_id-' + self.static_uuid.hex, 'namespace': 'namespace', 'annotations': self.annotations, } self.resources = k8s.V1ResourceRequirements( requests={ "cpu": 1, "memory": "1Gi", "ephemeral-storage": "2Gi", }, limits={"cpu": 2, "memory": "2Gi", "ephemeral-storage": "4Gi", 'nvidia.com/gpu': 1}, ) self.k8s_client = ApiClient() self.expected = k8s.V1Pod( api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta( namespace="default", name='myapp-pod-' + self.static_uuid.hex, labels={'app': 'myapp'}, ), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name='base', image='busybox', command=['sh', '-c', 'echo Hello Kubernetes!'], env=[ k8s.V1EnvVar(name='ENVIRONMENT', value='prod'), k8s.V1EnvVar( name="LOG_LEVEL", value='warning', ), k8s.V1EnvVar( name='TARGET', value_from=k8s.V1EnvVarSource( secret_key_ref=k8s.V1SecretKeySelector(name='secret_b', key='source_b') ), ), ], env_from=[ k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_a')), k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name='configmap_b')), k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name='secret_a')), ], ports=[k8s.V1ContainerPort(name="foo", container_port=1234)], resources=k8s.V1ResourceRequirements( requests={'memory': '100Mi'}, limits={ 'memory': '200Mi', }, ), ) ], security_context=k8s.V1PodSecurityContext( fs_group=2000, run_as_user=1000, ), host_network=True, image_pull_secrets=[ k8s.V1LocalObjectReference(name="pull_secret_a"), k8s.V1LocalObjectReference(name="pull_secret_b"), ], ), )
def test_from_obj(self): result = PodGenerator.from_obj( { "pod_override": k8s.V1Pod( api_version="v1", kind="Pod", metadata=k8s.V1ObjectMeta(name="foo", annotations={"test": "annotation"}), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", volume_mounts=[ k8s.V1VolumeMount( mount_path="/foo/", name="example-kubernetes-test-volume" ) ], ) ], volumes=[ k8s.V1Volume( name="example-kubernetes-test-volume", host_path=k8s.V1HostPathVolumeSource(path="/tmp/"), ) ], ), ) } ) result = self.k8s_client.sanitize_for_serialization(result) assert { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'foo', 'annotations': {'test': 'annotation'}, }, 'spec': { 'containers': [ { 'name': 'base', 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}], }, } == result result = PodGenerator.from_obj( { "KubernetesExecutor": { "annotations": {"test": "annotation"}, "volumes": [ { "name": "example-kubernetes-test-volume", "hostPath": {"path": "/tmp/"}, }, ], "volume_mounts": [ { "mountPath": "/foo/", "name": "example-kubernetes-test-volume", }, ], } } ) result_from_pod = PodGenerator.from_obj( { "pod_override": k8s.V1Pod( metadata=k8s.V1ObjectMeta(annotations={"test": "annotation"}), spec=k8s.V1PodSpec( containers=[ k8s.V1Container( name="base", volume_mounts=[ k8s.V1VolumeMount( name="example-kubernetes-test-volume", mount_path="/foo/" ) ], ) ], volumes=[k8s.V1Volume(name="example-kubernetes-test-volume", host_path="/tmp/")], ), ) } ) result = self.k8s_client.sanitize_for_serialization(result) result_from_pod = self.k8s_client.sanitize_for_serialization(result_from_pod) expected_from_pod = { 'metadata': {'annotations': {'test': 'annotation'}}, 'spec': { 'containers': [ { 'name': 'base', 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'volumes': [{'hostPath': '/tmp/', 'name': 'example-kubernetes-test-volume'}], }, } assert ( result_from_pod == expected_from_pod ), "There was a discrepency between KubernetesExecutor and pod_override" assert { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'annotations': {'test': 'annotation'}, }, 'spec': { 'containers': [ { 'args': [], 'command': [], 'env': [], 'envFrom': [], 'name': 'base', 'ports': [], 'volumeMounts': [{'mountPath': '/foo/', 'name': 'example-kubernetes-test-volume'}], } ], 'hostNetwork': False, 'imagePullSecrets': [], 'volumes': [{'hostPath': {'path': '/tmp/'}, 'name': 'example-kubernetes-test-volume'}], }, } == result