def _create(self, app_spec, selector, labels): LOG.info("Creating/updating service for %s with labels: %s", app_spec.name, labels) ports = [ self._make_service_port(port_spec) for port_spec in app_spec.ports ] try: svc = Service.get(app_spec.name, app_spec.namespace) ports = self._merge_ports(svc.spec.ports, ports) except NotFound: pass service_name = app_spec.name custom_labels = merge_dicts(app_spec.labels.service, labels) custom_annotations = merge_dicts( app_spec.annotations.service, self._make_tcp_port_annotation(app_spec)) metadata = ObjectMeta(name=service_name, namespace=app_spec.namespace, labels=custom_labels, annotations=custom_annotations) spec = ServiceSpec(selector=selector, ports=ports, type=self._service_type) svc = Service.get_or_create(metadata=metadata, spec=spec) self._owner_references.apply(svc, app_spec) svc.save()
def _create(self, app_spec, labels): LOG.info("Creating/updating ingress for %s", app_spec.name) annotations = { u"fiaas/expose": u"true" if _has_explicitly_set_host(app_spec) else u"false" } custom_labels = merge_dicts(app_spec.labels.ingress, labels) custom_annotations = merge_dicts(app_spec.annotations.ingress, annotations) metadata = ObjectMeta(name=app_spec.name, namespace=app_spec.namespace, labels=custom_labels, annotations=custom_annotations) per_host_ingress_rules = [ IngressRule(host=self._apply_host_rewrite_rules(ingress_item.host), http=self._make_http_ingress_rule_value( app_spec, ingress_item.pathmappings)) for ingress_item in app_spec.ingresses if ingress_item.host is not None ] default_host_ingress_rules = self._create_default_host_ingress_rules( app_spec) ingress_spec = IngressSpec(rules=per_host_ingress_rules + default_host_ingress_rules) ingress = Ingress.get_or_create(metadata=metadata, spec=ingress_spec) self._ingress_tls.apply(ingress, app_spec, self._get_hosts(app_spec)) ingress.save()
def run_bootstrap(self, kubernetes, k8s_version, use_docker_for_e2e): args = [ "fiaas-deploy-daemon-bootstrap", "--debug", "--api-server", kubernetes["server"], "--api-cert", kubernetes["api-cert"], "--client-cert", kubernetes["client-cert"], "--client-key", kubernetes["client-key"], ] if tpr_supported(k8s_version): args.append("--enable-tpr-support") if crd_supported(k8s_version): args.append("--enable-crd-support") cert_path = os.path.dirname(kubernetes["api-cert"]) args = use_docker_for_e2e(cert_path, "bootstrap", k8s_version, get_unbound_port()) + args bootstrap = subprocess.Popen(args, stdout=sys.stderr, env=merge_dicts(os.environ, {"NAMESPACE": "default"})) return bootstrap.wait()
def apply(self, ingress, app_spec, hosts): if self._should_have_ingress_tls(app_spec): tls_annotations = {} if self._cert_issuer or app_spec.ingress_tls.certificate_issuer: issuer = app_spec.ingress_tls.certificate_issuer if app_spec.ingress_tls.certificate_issuer else self._cert_issuer tls_annotations[u"certmanager.k8s.io/cluster-issuer"] = issuer else: tls_annotations[u"kubernetes.io/tls-acme"] = u"true" ingress.metadata.annotations = merge_dicts( ingress.metadata.annotations if ingress.metadata.annotations else {}, tls_annotations) if self.enable_deprecated_tls_entry_per_host: # TODO: DOCD-1846 - Once new certificates has been provisioned, remove the single host entries and # associated configuration flag ingress.spec.tls = [ IngressTLS(hosts=[host], secretName=host) for host in hosts if len(host) < 64 ] else: ingress.spec.tls = [] collapsed = self._collapse_hosts(app_spec, hosts) ingress.spec.tls.append( IngressTLS(hosts=collapsed, secretName="{}-ingress-tls".format(app_spec.name)))
def apply(self, ingress, app_spec, hosts, issuer_type, use_suffixes=True): if self._should_have_ingress_tls(app_spec): tls_annotations = {} if self._cert_issuer or app_spec.ingress_tls.certificate_issuer: issuer = app_spec.ingress_tls.certificate_issuer if app_spec.ingress_tls.certificate_issuer else self._cert_issuer tls_annotations[issuer_type] = issuer else: tls_annotations[u"kubernetes.io/tls-acme"] = u"true" ingress.metadata.annotations = merge_dicts( ingress.metadata.annotations if ingress.metadata.annotations else {}, tls_annotations) if self.enable_deprecated_tls_entry_per_host: # TODO: DOCD-1846 - Once new certificates has been provisioned, remove the single host entries and # associated configuration flag ingress.spec.tls = [ IngressTLS(hosts=[host], secretName=host) for host in hosts if len(host) < 64 ] else: ingress.spec.tls = [] if use_suffixes: # adding app-name to suffixes could result in a host too long to be the common-name of a cert, and # as the user doesn't control it we should generate a host we know will fit hosts = self._collapse_hosts(app_spec, hosts) ingress.spec.tls.append( IngressTLS(hosts=hosts, secretName="{}-ingress-tls".format( ingress.metadata.name)))
def deploy(self, app_spec, labels): if should_have_autoscaler(app_spec): LOG.info("Creating/updating %s for %s", self.name, app_spec.name) custom_labels = merge_dicts( app_spec.labels.horizontal_pod_autoscaler, labels) metadata = ObjectMeta( name=app_spec.name, namespace=app_spec.namespace, labels=custom_labels, annotations=app_spec.annotations.horizontal_pod_autoscaler) scale_target_ref = CrossVersionObjectReference( kind=u"Deployment", name=app_spec.name, apiVersion="apps/v1") spec = HorizontalPodAutoscalerSpec( scaleTargetRef=scale_target_ref, minReplicas=app_spec.autoscaler.min_replicas, maxReplicas=app_spec.autoscaler.max_replicas, targetCPUUtilizationPercentage=app_spec.autoscaler. cpu_threshold_percentage) autoscaler = HorizontalPodAutoscaler.get_or_create( metadata=metadata, spec=spec) self._owner_references.apply(autoscaler, app_spec) autoscaler.save() else: try: LOG.info("Deleting any pre-existing autoscaler for %s", app_spec.name) HorizontalPodAutoscaler.delete(app_spec.name, app_spec.namespace) except NotFound: pass
def run_bootstrap(self, request, kubernetes, k8s_version, use_docker_for_e2e): cert_path = os.path.dirname(kubernetes["api-cert"]) docker_args = use_docker_for_e2e( request, cert_path, "bootstrap", k8s_version, get_unbound_port(), kubernetes['container-to-container-server-ip']) server = kubernetes[ 'container-to-container-server'] if docker_args else kubernetes[ "host-to-container-server"] args = [ "fiaas-deploy-daemon-bootstrap", "--debug", "--api-server", server, "--api-cert", kubernetes["api-cert"], "--client-cert", kubernetes["client-cert"], "--client-key", kubernetes["client-key"], ] if crd_supported(k8s_version): args.append("--enable-crd-support") args = docker_args + args bootstrap = subprocess.Popen(args, stdout=sys.stderr, env=merge_dicts(os.environ, {"NAMESPACE": "default"})) return bootstrap.wait()
def apply(self, deployment, app_spec): deployment_spec = deployment.spec pod_template_spec = deployment_spec.template pod_spec = pod_template_spec.spec main_container = pod_spec.containers[0] if app_spec.secrets_in_environment: LOG.warning("%s is attempting to use 'secrets_in_environment' and strongbox at the same time," " which is not supported.", app_spec.name) self._apply_mounts(app_spec, main_container) strongbox_env = { "AWS_REGION": app_spec.strongbox.aws_region, "SECRET_GROUPS": ",".join(app_spec.strongbox.groups), } init_container = self._make_secrets_init_container(app_spec, self._strongbox_init_container_image, env_vars=strongbox_env) pod_spec.initContainers.append(init_container) self._apply_volumes(app_spec, pod_spec) strongbox_annotations = self._make_strongbox_annotations(app_spec) if self._uses_strongbox_init_container( app_spec) else {} pod_metadata = pod_template_spec.metadata pod_metadata.annotations = merge_dicts(pod_metadata.annotations, strongbox_annotations)
def _make_env(self, app_spec): fiaas_managed_env = { 'FIAAS_ARTIFACT_NAME': app_spec.name, 'FIAAS_IMAGE': app_spec.image, 'FIAAS_VERSION': app_spec.version, } if not self._disable_deprecated_managed_env_vars: fiaas_managed_env.update({ 'ARTIFACT_NAME': app_spec.name, 'IMAGE': app_spec.image, 'VERSION': app_spec.version, }) # fiaas_managed_env overrides global_env overrides legacy_fiaas_env static_env = merge_dicts(self._legacy_fiaas_env, self._global_env, fiaas_managed_env) env = [ EnvVar(name=name, value=value) for name, value in static_env.items() ] # FIAAS managed environment variables using the downward API env.extend([ EnvVar( name="FIAAS_REQUESTS_CPU", valueFrom=EnvVarSource(resourceFieldRef=ResourceFieldSelector( containerName=app_spec.name, resource="requests.cpu", divisor=1))), EnvVar( name="FIAAS_REQUESTS_MEMORY", valueFrom=EnvVarSource(resourceFieldRef=ResourceFieldSelector( containerName=app_spec.name, resource="requests.memory", divisor=1))), EnvVar( name="FIAAS_LIMITS_CPU", valueFrom=EnvVarSource(resourceFieldRef=ResourceFieldSelector( containerName=app_spec.name, resource="limits.cpu", divisor=1))), EnvVar( name="FIAAS_LIMITS_MEMORY", valueFrom=EnvVarSource(resourceFieldRef=ResourceFieldSelector( containerName=app_spec.name, resource="limits.memory", divisor=1))), EnvVar(name="FIAAS_NAMESPACE", valueFrom=EnvVarSource(fieldRef=ObjectFieldSelector( fieldPath="metadata.namespace"))), EnvVar(name="FIAAS_POD_NAME", valueFrom=EnvVarSource(fieldRef=ObjectFieldSelector( fieldPath="metadata.name"))), ]) env.sort(key=lambda x: x.name) return env
def custom_resource_definition_test_case(self, fiaas_path, namespace, labels, expected): fiaas_yml = read_yml(file_relative_path(fiaas_path)) expected = {kind: read_yml_if_exists(path) for kind, path in expected.items()} name = sanitize_resource_name(fiaas_path) metadata = ObjectMeta(name=name, namespace=namespace, labels=merge_dicts(labels, {"fiaas/deployment_id": DEPLOYMENT_ID})) spec = FiaasApplicationSpec(application=name, image=IMAGE, config=fiaas_yml) return name, FiaasApplication(metadata=metadata, spec=spec), expected
def _create(self, app_spec, labels): LOG.info("Creating/updating ingresses for %s", app_spec.name) custom_labels = merge_dicts(app_spec.labels.ingress, labels) ingresses = self._group_ingresses(app_spec) LOG.info("Will create %s ingresses", len(ingresses)) for annotated_ingress in ingresses: if len(annotated_ingress.ingress_items) == 0: LOG.info("No items, skipping: %s", annotated_ingress) continue self._create_ingress(app_spec, annotated_ingress, custom_labels) self._delete_unused(app_spec, custom_labels)
def _create_ingress(self, app_spec, annotated_ingress, labels): default_annotations = { u"fiaas/expose": u"true" if _has_explicitly_set_host( annotated_ingress.ingress_items) else u"false" } annotations = merge_dicts(app_spec.annotations.ingress, annotated_ingress.annotations, default_annotations) metadata = ObjectMeta(name=annotated_ingress.name, namespace=app_spec.namespace, labels=labels, annotations=annotations) per_host_ingress_rules = [ IngressRule(host=self._apply_host_rewrite_rules(ingress_item.host), http=self._make_http_ingress_rule_value( app_spec, ingress_item.pathmappings)) for ingress_item in annotated_ingress.ingress_items if ingress_item.host is not None ] if annotated_ingress.annotations: use_suffixes = False host_ingress_rules = per_host_ingress_rules else: use_suffixes = True host_ingress_rules = per_host_ingress_rules + self._create_default_host_ingress_rules( app_spec) ingress_spec = IngressSpec(rules=host_ingress_rules) ingress = Ingress.get_or_create(metadata=metadata, spec=ingress_spec) hosts_for_tls = [rule.host for rule in host_ingress_rules] self._ingress_tls.apply(ingress, app_spec, hosts_for_tls, use_suffixes=use_suffixes) self._owner_references.apply(ingress, app_spec) ingress.save()
def fdd(self, request, kubernetes, service_type, k8s_version, use_docker_for_e2e): port = get_unbound_port() cert_path = os.path.dirname(kubernetes["api-cert"]) docker_args = use_docker_for_e2e(request, cert_path, service_type, k8s_version, port, kubernetes['container-to-container-server-ip']) server = kubernetes['container-to-container-server'] if docker_args else kubernetes["host-to-container-server"] args = [ "fiaas-deploy-daemon", "--port", str(port), "--api-server", server, "--api-cert", kubernetes["api-cert"], "--client-cert", kubernetes["client-cert"], "--client-key", kubernetes["client-key"], "--service-type", service_type, "--ingress-suffix", "svc.test.example.com", "--environment", "test", "--datadog-container-image", "DATADOG_IMAGE:tag", "--strongbox-init-container-image", "STRONGBOX_IMAGE", "--secret-init-containers", "parameter-store=PARAM_STORE_IMAGE", "--use-ingress-tls", "default_off", ] if crd_supported(k8s_version): args.append("--enable-crd-support") args = docker_args + args fdd = subprocess.Popen(args, stdout=sys.stderr, env=merge_dicts(os.environ, {"NAMESPACE": "default"})) time.sleep(1) if fdd.poll() is not None: pytest.fail("fiaas-deploy-daemon has crashed after startup, inspect logs") def ready(): resp = requests.get("http://localhost:{}/healthz".format(port), timeout=TIMEOUT) resp.raise_for_status() try: wait_until(ready, "web-interface healthy", RuntimeError, patience=PATIENCE) if crd_supported(k8s_version): wait_until(crd_available(kubernetes, timeout=TIMEOUT), "CRD available", RuntimeError, patience=PATIENCE) yield "http://localhost:{}/fiaas".format(port) finally: self._end_popen(fdd)
def deploy(self, app_spec, selector, labels, besteffort_qos_is_required): LOG.info("Creating new deployment for %s", app_spec.name) deployment_labels = merge_dicts(app_spec.labels.deployment, labels) metadata = ObjectMeta(name=app_spec.name, namespace=app_spec.namespace, labels=deployment_labels, annotations=app_spec.annotations.deployment) container_ports = [ ContainerPort(name=port_spec.name, containerPort=port_spec.target_port) for port_spec in app_spec.ports ] env = self._make_env(app_spec) pull_policy = "IfNotPresent" if (":" in app_spec.image and ":latest" not in app_spec.image) else "Always" env_from = [ EnvFromSource(configMapRef=ConfigMapEnvSource(name=app_spec.name, optional=True)) ] containers = [ Container( name=app_spec.name, image=app_spec.image, ports=container_ports, env=env, envFrom=env_from, lifecycle=self._lifecycle, livenessProbe=_make_probe(app_spec.health_checks.liveness), readinessProbe=_make_probe(app_spec.health_checks.readiness), imagePullPolicy=pull_policy, volumeMounts=self._make_volume_mounts(app_spec), resources=_make_resource_requirements(app_spec.resources)) ] automount_service_account_token = app_spec.admin_access init_containers = [] service_account_name = "default" pod_spec = PodSpec( containers=containers, initContainers=init_containers, volumes=self._make_volumes(app_spec), serviceAccountName=service_account_name, automountServiceAccountToken=automount_service_account_token, terminationGracePeriodSeconds=self._grace_period) pod_labels = merge_dicts(app_spec.labels.pod, _add_status_label(labels)) pod_metadata = ObjectMeta(name=app_spec.name, namespace=app_spec.namespace, labels=pod_labels, annotations=app_spec.annotations.pod) pod_template_spec = PodTemplateSpec(metadata=pod_metadata, spec=pod_spec) replicas = app_spec.replicas # we must avoid that the deployment scales up to app_spec.replicas if autoscaler has set another value if should_have_autoscaler(app_spec): try: deployment = Deployment.get(app_spec.name, app_spec.namespace) replicas = deployment.spec.replicas except NotFound: pass deployment_strategy = DeploymentStrategy( rollingUpdate=RollingUpdateDeployment( maxUnavailable=self._max_unavailable, maxSurge=self._max_surge)) if app_spec.replicas == 1 and app_spec.singleton: deployment_strategy = DeploymentStrategy( rollingUpdate=RollingUpdateDeployment(maxUnavailable=1, maxSurge=0)) spec = DeploymentSpec(replicas=replicas, selector=LabelSelector(matchLabels=selector), template=pod_template_spec, revisionHistoryLimit=5, strategy=deployment_strategy) deployment = Deployment.get_or_create(metadata=metadata, spec=spec) _clear_pod_init_container_annotations(deployment) self._datadog.apply(deployment, app_spec, besteffort_qos_is_required) self._prometheus.apply(deployment, app_spec) self._secrets.apply(deployment, app_spec) deployment.save()
def _get_expected_template_labels(custom_labels): return merge_dicts(custom_labels, {"fiaas/status": "active"}, LABELS)
def create_expected_deployment(config, app_spec, image='finntech/testimage:version', version="version", replicas=None, add_init_container_annotations=False): expected_volumes = _get_expected_volumes(app_spec, config.use_in_memory_emptydirs) expected_volume_mounts = _get_expected_volume_mounts(app_spec) base_expected_health_check = { 'initialDelaySeconds': 10, 'periodSeconds': 10, 'successThreshold': 1, 'failureThreshold': 3, 'timeoutSeconds': 1, } if app_spec.ports: expected_liveness_check = merge_dicts(base_expected_health_check, {'tcpSocket': { 'port': 8080 }}) expected_readiness_check = merge_dicts( base_expected_health_check, { 'httpGet': { 'path': '/', 'scheme': 'HTTP', 'port': 8080, 'httpHeaders': [] } }) else: exec_check = merge_dicts(base_expected_health_check, {'exec': { 'command': ['/app/check.sh'], }}) expected_liveness_check = exec_check expected_readiness_check = exec_check expected_env_from = [{ 'configMapRef': { 'name': app_spec.name, 'optional': True, } }] resources = defaultdict(dict) if app_spec.resources.limits.cpu: resources["limits"]["cpu"] = app_spec.resources.limits.cpu if app_spec.resources.limits.memory: resources["limits"]["memory"] = app_spec.resources.limits.memory if app_spec.resources.requests.cpu: resources["requests"]["cpu"] = app_spec.resources.requests.cpu if app_spec.resources.requests.memory: resources["requests"]["memory"] = app_spec.resources.requests.memory resources = dict(resources) containers = [{ 'livenessProbe': expected_liveness_check, 'name': app_spec.name, 'image': image, 'volumeMounts': expected_volume_mounts, 'lifecycle': { 'preStop': { 'exec': { 'command': ['sleep', '1'] } } }, 'command': [], 'env': create_environment_variables(config.infrastructure, global_env=config.global_env, version=version, environment=config.environment), 'envFrom': expected_env_from, 'imagePullPolicy': 'IfNotPresent', 'readinessProbe': expected_readiness_check, 'ports': [{ 'protocol': 'TCP', 'containerPort': 8080, 'name': 'http' }] if app_spec.ports else [], 'resources': resources, }] deployment_annotations = app_spec.annotations.deployment if app_spec.annotations.deployment else None pod_annotations = app_spec.annotations.pod if app_spec.annotations.pod else {} init_container_annotations = { "pod.alpha.kubernetes.io/init-containers": 'some data', "pod.beta.kubernetes.io/init-containers": 'some data' } if add_init_container_annotations else {} pod_annotations = _none_if_empty( merge_dicts(pod_annotations, init_container_annotations)) max_surge = u"25%" max_unavailable = 0 if app_spec.replicas == 1 and app_spec.singleton: max_surge = 0 max_unavailable = 1 deployment = { 'metadata': pytest.helpers.create_metadata(app_spec.name, labels=merge_dicts( app_spec.labels.deployment, LABELS), annotations=deployment_annotations), 'spec': { 'selector': { 'matchLabels': SELECTOR }, 'template': { 'spec': { 'dnsPolicy': 'ClusterFirst', 'automountServiceAccountToken': app_spec.admin_access, 'serviceAccountName': "default", 'terminationGracePeriodSeconds': 31, 'restartPolicy': 'Always', 'volumes': expected_volumes, 'imagePullSecrets': [], 'containers': containers, 'initContainers': [] }, 'metadata': pytest.helpers.create_metadata( app_spec.name, labels=_get_expected_template_labels(app_spec.labels.pod), annotations=pod_annotations) }, 'replicas': replicas if replicas else app_spec.replicas, 'revisionHistoryLimit': 5, 'strategy': { 'type': 'RollingUpdate', 'rollingUpdate': { 'maxSurge': max_surge, 'maxUnavailable': max_unavailable } } } } return deployment
def test_action_on_signal(self, request, get_or_create, app_spec, test_data, signal): app_name = '{}-isb5oqum36ylo'.format(test_data.signal_name) labels = app_spec.labels._replace(status={"status/label": "true"}) annotations = app_spec.annotations._replace( status={"status/annotations": "true"}) app_spec = app_spec._replace(name=test_data.signal_name, labels=labels, annotations=annotations) expected_labels = merge_dicts( app_spec.labels.status, { "app": app_spec.name, "fiaas/deployment_id": app_spec.deployment_id }) expected_annotations = merge_dicts(app_spec.annotations.status, {"fiaas/last_updated": LAST_UPDATE}) expected_metadata = ObjectMeta(name=app_name, namespace="default", labels=expected_labels, annotations=expected_annotations) expected_logs = [LOG_LINE] get_or_create.return_value = FiaasApplicationStatus( new=test_data.new, metadata=expected_metadata, result=test_data.result, logs=expected_logs) status.connect_signals() expected_call = { 'apiVersion': 'fiaas.schibsted.io/v1', 'kind': 'ApplicationStatus', 'result': test_data.result, 'logs': expected_logs, 'metadata': { 'labels': { 'app': app_spec.name, 'fiaas/deployment_id': app_spec.deployment_id, 'status/label': 'true' }, 'annotations': { 'fiaas/last_updated': LAST_UPDATE, 'status/annotations': 'true' }, 'namespace': 'default', 'name': app_name, 'ownerReferences': [], 'finalizers': [], } } called_mock = request.getfixturevalue(test_data.called_mock) mock_response = mock.create_autospec(Response) mock_response.json.return_value = expected_call called_mock.return_value = mock_response with mock.patch("fiaas_deploy_daemon.crd.status.now") as mnow: mnow.return_value = LAST_UPDATE signal(test_data.signal_name).send( app_name=app_spec.name, namespace=app_spec.namespace, deployment_id=app_spec.deployment_id, labels=app_spec.labels.status, annotations=app_spec.annotations.status) get_or_create.assert_called_once_with(metadata=expected_metadata, result=test_data.result, logs=expected_logs) if test_data.action == "create": url = '/apis/fiaas.schibsted.io/v1/namespaces/default/application-statuses/' else: url = '/apis/fiaas.schibsted.io/v1/namespaces/default/application-statuses/{}'.format( app_name) ignored_mock = request.getfixturevalue(test_data.ignored_mock) called_mock.assert_called_once_with(url, expected_call) ignored_mock.assert_not_called()
def apply(self, deployment, app_spec): if app_spec.prometheus.enabled: annotations = _make_prometheus_annotations(app_spec) original_annotations = deployment.spec.template.metadata.annotations deployment.spec.template.metadata.annotations = merge_dicts( original_annotations, annotations)