def volume(self):
     return Volume(
         self.volume_name,
         config_map=ConfigMapVolumeSource(
             ValueOrchestrator().airflow_worker_pod_template_config_file,
             optional=False,
             items=[
                 KeyToPath(
                     ValueOrchestrator().
                     airflow_worker_pod_template_config_file,
                     "pod_template.yaml",
                 )
             ],
         ),
     )
Example #2
0
 def __init__(self, airflow_options: AirflowOptions):
     values = ValueOrchestrator()
     super().__init__(
         ObjectMeta(
             name=values.airflow_pod_service_account,
             namespace=airflow_options.namespace,
         ))
Example #3
0
 def __init__(
     self,
     sql_options: SqlOptions,
     redis_options: RedisOptions,
     airflow_options: AirflowOptions,
     monitoring_options: MonitoringOptions,
     cloud_options: CloudOptions,
 ):
     config_file = ValueOrchestrator(
     ).airflow_worker_pod_template_config_file
     super().__init__(
         AirflowMeta(config_file),
         data={
             config_file:
             dump(
                 AirflowWorkerPodTemplate(
                     sql_options,
                     redis_options,
                     airflow_options,
                     monitoring_options,
                     cloud_options,
                     "worker-pod-template",
                 ).to_dict(), )
         },
     )
 def __init__(
     self,
     sql_options: SqlOptions,
     airflow_options: AirflowOptions,
     redis_options: RedisOptions,
     namespace: str,
     service_factory: ServiceFactory,
 ):
     sql_conn_string = sql_options.get_sql_alchemy_conn_string(
         service_factory.database_service.kube_dns_name)
     data = {
         "AIRFLOW_CONN_POSTGRES_BACKEND":
         sql_options.get_sql_uri(
             service_factory.database_service.kube_dns_name),
         "AIRFLOW__CORE__FERNET_KEY":
         airflow_options.fernet_key,
         "AIRFLOW__CORE__SQL_ALCHEMY_CONN":
         sql_conn_string,
         "AIRFLOW__CELERY__RESULT_BACKEND":
         sql_conn_string,
         "AIRFLOW__CELERY__BROKER_URL":
         redis_options.redis_connection_string,
     }
     if airflow_options.smtp_notification_options:
         data[
             "AIRFLOW__SMTP__SMTP_PASSWORD"] = airflow_options.smtp_notification_options.smtp_password
     if airflow_options.git_ssh_key:
         data["gitSshKey"] = airflow_options.git_ssh_key
     values = ValueOrchestrator()
     super().__init__(AirflowMeta(values.secret_name, namespace=namespace),
                      string_data=data)
Example #5
0
 def __init__(
     self,
     efs_id: str,
     cluster_name: str,
     elastic_search_access_role_arn: str,
     default_role_arn: str,
     alb_role_arn: str,
     external_dns_role_arn: str,
     autoscaling_role_arn: str,
     dag_sync_role_arn: str,
     domain: str,
     domain_filters: Optional[List[str]] = None,
     use_ssl: bool = False,
 ):
     self.__efs_id = efs_id
     self.__cluster_name = cluster_name
     self.__elastic_search_access_role = elastic_search_access_role_arn
     self.__default_role = default_role_arn
     self.__alb_role_arn = alb_role_arn
     self.__domain = domain
     self.__domain_filters = domain_filters
     self.__external_dns_role_arn = external_dns_role_arn
     self.__values = ValueOrchestrator()
     self.__use_ssl = use_ssl
     self.__autoscaling_role_arn = autoscaling_role_arn
     self.__dag_sync_role_arn = dag_sync_role_arn
     super().__init__(
         StorageClass(ObjectMeta(name="efs-sc"), "efs.csi.aws.com"),
         "Filesystem",
     )
 def __init__(self, redis_options: RedisOptions):
     labels = ValueOrchestrator()
     super().__init__(
         AirflowMeta("redis-deployment"),
         DeploymentSpec(RedisPodTemplate(redis_options),
                        LabelSelector(labels.redis_labels)),
     )
Example #7
0
 def __init__(self, sql_options: SqlOptions):
     super().__init__(
         AirflowMeta(name="postgres-database-deployment"),
         DeploymentSpec(
             PostgresPodTemplate(sql_options),
             LabelSelector(ValueOrchestrator().database_labels),
         ),
     )
 def volume(self):
     return Volume(
         self.volume_name,
         secret=SecretVolumeSource(
             False,
             ValueOrchestrator().secret_name,
             items=[KeyToPath("gitSshKey", "id_rsa", 256)],
         ),
     )
Example #9
0
 def __init__(self, redis_options: RedisOptions):
     labels = ValueOrchestrator()
     super().__init__(
         labels.redis_service_name,
         redis_options.port,
         redis_options.port,
         30002,
         selector_labels=labels.redis_labels,
         node_ports_open=True,
     )
Example #10
0
 def __init__(self):
     values = ValueOrchestrator()
     super().__init__(
         values.es_proxy_service_name,
         values.elasticsearch_proxy_port,
         values.elasticsearch_proxy_port,
         33000,
         values.elasticsearch_proxy_labels,
         port_name="https",
         protocol="TCP",
     )
Example #11
0
 def __init__(
     self,
     sql_options: SqlOptions,
     cloud_options: CloudOptions,
     airflow_options: AirflowOptions,
 ):
     self._namespace = airflow_options.namespace
     self._pod_namespace = airflow_options.pods_namespace
     self._sql_options = sql_options
     self._cloud_options = cloud_options
     self._airflow_options = airflow_options
     self._values = ValueOrchestrator()
 def __init__(self, redis_options: RedisOptions):
     labels = ValueOrchestrator()
     super().__init__(
         AirflowMeta("redis-pod", labels=labels.redis_labels),
         PodSpec([
             Container(
                 "redis",
                 image="redis",
                 image_pull_policy="IfNotPresent",
                 ports=[ContainerPort(redis_options.port)],
             )
         ], ),
     )
Example #13
0
 def __init__(
     self,
     port: int = 6379,
     host: str = ValueOrchestrator().redis_service_name,
     proto: str = "redis://",
     password: str = "",
     db_num: int = 1,
 ):
     self.port = port
     self.host = host
     self.proto = proto
     self.password = password
     self.db_num = db_num
Example #14
0
 def __init__(self, sql_options: SqlOptions):
     super().__init__(
         AirflowMeta(name="postgres-database-pod",
                     labels=ValueOrchestrator().database_labels),
         spec=PodSpec([
             Container(
                 name="postgres-database",
                 image="postgres",
                 env=sql_options.get_postgres_envioronment(),
                 ports=[ContainerPort(5432, name="postgres")],
                 image_pull_policy="IfNotPresent",
             )
         ]),
     )
Example #15
0
 def __init__(
     self,
     enabled: bool = True,
     elastic_search_uri: str = DEFAULT_ELASTIC_SEARCH_URI,
     grafana_role: str = "Viewer",
     elastic_search_proxy_uri: str = ValueOrchestrator().
     es_proxy_service_name,
 ):
     if grafana_role not in self.view_modes:
         raise Exception(
             f"{grafana_role} is not a valid role, choose one of "
             f"{self.view_modes}")
     self.enabled = enabled
     self.elastic_search_uri = elastic_search_uri
     self.grafana_role = grafana_role
     self.enable_elasticsearch_dependency = False
     if elastic_search_uri == DEFAULT_ELASTIC_SEARCH_URI:
         self.enable_elasticsearch_dependency = True
     self.elastic_search_proxy_uri = f"http://{elastic_search_proxy_uri}:9200"
Example #16
0
    def __init__(self, cloud_options: CloudOptions, elastic_search_uri: str):
        values = ValueOrchestrator()

        probe = Probe(
            http_get=HTTPGetAction(path="/_cluster/health", port="https"))

        super().__init__(
            AirflowMeta("aws-es-proxy", ),
            DeploymentSpec(
                replicas=1,
                selector=LabelSelector(values.elasticsearch_proxy_labels),
                template=PodTemplateSpec(
                    AirflowMeta(
                        "es-proxy",
                        labels=values.elasticsearch_proxy_labels,
                        annotations=cloud_options.
                        elasticsearch_connection_annotations,
                    ),
                    spec=PodSpec([
                        Container(
                            "es-proxy",
                            image="abutaha/aws-es-proxy:latest",
                            image_pull_policy="IfNotPresent",
                            args=[
                                "-listen",
                                "0.0.0.0:9200",
                                "-endpoint",
                                elastic_search_uri,
                                "-verbose",
                            ],
                            ports=[
                                ContainerPort(9200,
                                              protocol="TCP",
                                              name="https")
                            ],
                            liveness_probe=probe,
                            readiness_probe=probe,
                        )
                    ]),
                ),
            ),
        )
Example #17
0
 def __init__(
     self,
     sql_options: SqlOptions,
     redis_options: RedisOptions,
     airflow_options: AirflowOptions,
     monitoring_options: MonitoringOptions,
     cloud_options: CloudOptions,
 ):
     values = ValueOrchestrator()
     service_account = (values.airflow_pod_service_account
                        if airflow_options.in_kube_mode else "default")
     super().__init__(
         sql_options,
         redis_options,
         airflow_options,
         monitoring_options,
         cloud_options,
         "airflow-master-pod",
         values.master_node_labels,
         StorageGroupFactory(airflow_options, cloud_options,
                             airflow_options.namespace),
         service_account,
     )
Example #18
0
 def __init__(
     self,
     sql_options: SqlOptions,
     redis_options: RedisOptions,
     airflow_options: AirflowOptions,
     monitoring_options: MonitoringOptions,
     cloud_options: CloudOptions,
 ):
     super().__init__(
         AirflowMeta(name="airflow-master-deployment"),
         DeploymentSpec(
             AirflowMasterPodTemplate(
                 sql_options,
                 redis_options,
                 airflow_options,
                 monitoring_options,
                 cloud_options,
             ),
             LabelSelector(ValueOrchestrator().master_node_labels),
             strategy=DeploymentStrategy(
                 RollingUpdateDeployment(max_surge=1, max_unavailable=1)),
         ),
     )
Example #19
0
 def __init__(
     self,
     sql_options: SqlOptions,
     redis_options: RedisOptions,
     airflow_options: AirflowOptions,
     monitoring_options: MonitoringOptions,
     cloud_options: CloudOptions,
     name: str,
     service_account: str = "default",
 ):
     values = ValueOrchestrator()
     super().__init__(
         sql_options,
         redis_options,
         airflow_options,
         monitoring_options,
         cloud_options,
         name,
         values.worker_node_labels,
         StorageGroupFactory(airflow_options, cloud_options,
                             airflow_options.pods_namespace),
         service_account,
         "Never",
     )
Example #20
0
def label():
    return ValueOrchestrator()
def test_services_present(airflow_options, service: AirflowService):
    service_name = service.metadata.name
    if service_name in {"flower-svc", "redis-svc"}:
        skip_if_not_celery(airflow_options)

    service_info = kubectl_name_dict("service")
    assert service_info[service_name]["TYPE"] == "NodePort"

    def get_port(service: str):
        return service_info[service]["PORT(S)"][:4]

    assert get_port(service_name) == str(service.spec.ports[0].port)


@pytest.fixture(params=[
    ValueOrchestrator().master_deployment_name,
    ValueOrchestrator().redis_deployment_name,
    ValueOrchestrator().database_deployment_name,
])
def deployment(request):
    return request.param


def test_deployments_present(deployment, label, airflow_options):
    if deployment == label.redis_deployment_name:
        skip_if_not_celery(airflow_options)
    deployment_info = kubectl_name_dict("deployment")
    assert deployment in deployment_info
    assert deployment_info[deployment]["READY"] == "1/1"