def _get_certificate_arn(certificate_scope) -> str: cert = search_certificate_arn(ClusterRules().boto, certificate_scope) # Check against None to allow empty string if cert is None: cert = new_certificate_arn(ClusterRules().boto, certificate_scope) return cert
def stop(namespace, charts): pc = ProjectConf() helm = Helm() cr = ClusterRules(namespace=namespace) if charts is None: # Stop each of the project's charts charts = [ os.path.basename(chart_path) for chart_path in pc.get_helm_chart_paths() ] sync_required = False try: for chart_path in pc.get_helm_chart_paths(): chart_name = os.path.basename(chart_path) if chart_name in charts: hr = HelmRules(cr, chart_name) helm.stop(hr, namespace) sync_required = True finally: # Sync if any helm.stop() call succeeded, even if a subsequent one failed if sync_required: sync_ingress.sync_ingress(namespace) sync_dns.sync_dns(namespace)
def sync_dns(namespace): cr = ClusterRules(namespace=namespace) if cr.is_local: logging.info("Not syncing DNS because you are on local") return k_utils = KubernetesUtils(Kubernetes(namespace=namespace)) service_loadbalancers = k_utils.get_services_to_load_balancers_map( cr.dual_dns_prefix_annotation_name, cr.ingress_info ) # Apply our dns to the service names service_hostnames_to_loadbalancers = { f"{service_name}.{cr.namespace_domain_name}": loadbalancer_hostname for service_name, loadbalancer_hostname in service_loadbalancers.items() } nb_updated = fill_hostedzone( cr.boto, service_hostnames_to_loadbalancers, cr.cluster_domain_name, cr.namespace_domain_name, ) logging.info("%s DNS mapping updated" % (nb_updated or "No",))
def rollback(project, num_versions, namespace, chart=None): helm = Helm() hr = HelmRules(ClusterRules(namespace=namespace), chart or project) helm.rollback_relative(hr, num_versions, namespace) sync_ingress.sync_ingress(namespace) sync_dns.sync_dns(namespace)
def undeploy(project, namespace, chart=None): helm = Helm() cr = ClusterRules(namespace=namespace) hr = HelmRules(cr, chart or project) helm.stop(hr, namespace) sync_ingress.sync_ingress(namespace) sync_dns.sync_dns(namespace)
def fill_hostedzone(boto_session, hostnames_to_loadbalancers, cluster_domain_name=None, namespace_domain_name=None): cluster_domain_name = cluster_domain_name or ClusterRules( ).cluster_domain_name namespace_domain_name = namespace_domain_name or ClusterRules( ).namespace_domain_name namespace_hosted_zone = get_hostedzone( boto_session, namespace_domain_name) or create_hostedzone( boto_session, namespace_domain_name) cluster_hosted_zone = get_hostedzone(boto_session, cluster_domain_name) if cluster_hosted_zone is None: raise KeyError("route 53 for [%s] not found" % cluster_domain_name) dns_nameservers = get_ns_from_hostedzone(boto_session, namespace_hosted_zone) check_ns_values(boto_session, cluster_hosted_zone, namespace_domain_name, dns_nameservers) return fill_dns_dict(boto_session, namespace_hosted_zone, hostnames_to_loadbalancers)
def get_certificate(namespace: str, for_cluster: bool = False, wait: int = 0, poll_interval: int = 10): cr = ClusterRules(namespace=namespace) cert = None timeout_start = time.time() while not cert: if for_cluster: cert = get_cluster_certificate_arn() else: cert = get_service_certificate_arn() if not wait: return cert if wait > 0 and time.time() - timeout_start > wait: return cert if not cert: time.sleep(poll_interval) return cert
def cluster_init(force=False): helm = Helm() cluster_init_projects = ClusterRules().cluster_init for project in cluster_init_projects: project_name = project["project"] ref = project["ref"] project_namespace = project["namespace"] repo = project.get("repo") or project_name if not force and helm.release_exists( f"{project_name}-{project_namespace}", project_namespace ): continue deploy.deploy( project_name, ref, project_namespace, dry_run=False, force=True, repo=repo, set_override_values=[], )
def sync_ingress(namespace): cr = ClusterRules(namespace=namespace) ingress_info = cr.ingress_info if ingress_info and ingress_info["use_ingress_per_namespace"]: k = Kubernetes(namespace=namespace) ingress_list = k.get_ingresses() ingress = build_ingress( k.get_services(), cr.service_domain_name_suffix, cr.dual_dns_prefix_annotation_name, ingress_info, ) if any(i for i in ingress_list if i.metadata.name == ingress.metadata.name): # update existing ingress if cr.is_local: k.delete_ingress(ingress.metadata.name) k.create_ingress(ingress) else: k.update_ingress(ingress.metadata.name, ingress) logging.info("Successfully updated ingress") else: # create new ingress k.create_ingress(ingress) logging.info("Successfully created ingress")
def start( namespace, charts=None, dry_run=False, force_helm=False, set_override_values=None, values_files=None, ): if set_override_values is None: set_override_values = [] pc = ProjectConf() helm = Helm() cr = ClusterRules(namespace=namespace) if not charts: # Start each of the project's charts charts = [ os.path.basename(chart_path) for chart_path in pc.get_helm_chart_paths() ] # Values precedence is command < cluster rules < --set-override-values # Start command values values = { "deploy.imageTag": "local", "deploy.namespace": namespace, "project.name": pc.name, "service.certificateScope": cr.service_certificate_scope, "service.domainName": cr.service_domain_name_suffix, "service.clusterCertificateScope": cr.cluster_certificate_scope, "service.clusterDomainName": cr.cluster_domain_name_suffix, "service.clusterName": cr.cluster_domain_name, # aka root_dns } if cr.certificate_lookup: values.update({ "service.certificateArn": get_service_certificate_arn(), "service.clusterCertificateArn": get_cluster_certificate_arn(), }) # Update with cluster rule values values.update(cr.values) helm_args = [] # Update with --set-override-values values.update(dict(value.split("=") for value in set_override_values)) sync_required = False try: for chart_path in pc.get_helm_chart_paths(): chart_name = os.path.basename(chart_path) # Add user-specified values files if values_files: for file_path in values_files: helm_args.append( f"--values={os.path.join(chart_path, 'values', file_path)}" ) if chart_name in charts: hr = HelmRules(cr, chart_name) if dry_run: helm.dry_run(hr, chart_path, cr.cluster_name, namespace, helm_args=helm_args, **values) else: helm.start(hr, chart_path, cr.cluster_name, namespace, force=force_helm, helm_args=helm_args, **values) sync_required = True finally: # Sync if any helm.start() call succeeded, even if a subsequent one failed if sync_required: sync_ingress.sync_ingress(namespace) sync_dns.sync_dns(namespace)
def get_cluster_certificate_arn(certificate_scope: str = None) -> str: certificate_scope = certificate_scope or ClusterRules( ).cluster_certificate_scope return _get_certificate_arn(certificate_scope)
def deploy( project, git_ref, namespace, chart=None, dry_run=False, force=False, force_helm=False, repo=None, set_override_values=None, values_files=None ): if set_override_values is None: set_override_values = [] with tempfile.TemporaryDirectory() as tmpdirname: pr = PublishRules() helm = Helm() cr = ClusterRules(namespace=namespace) helm_chart_path = "{}/{}".format(tmpdirname, chart or project) hr = HelmRules(cr, chart or project) git_account = load_git_configs()["account"] repo = repo or project git_url = f"[email protected]:{git_account}/{repo}.git" git_ref = Git.extract_hash(git_ref, git_url) if not force and cr.check_branch and Git.extract_hash(cr.check_branch, git_url) != git_ref: logging.error( f"You are deploying hash {git_ref} which does not match branch" f" {cr.check_branch} on cluster {cr.cluster_name} for project" f" {project}... exiting" ) sys.exit(1) helm.pull_packages(project, pr, git_ref, tmpdirname) # We need to use --set-string in case the git ref is all digits helm_args = ["--set-string", f"deploy.imageTag={git_ref}"] # Values precedence is command < cluster rules < --set-override-values # Deploy command values values = { "deploy.ecr": pr.docker_registry, "deploy.namespace": namespace, "project.name": project, "service.certificateScope": cr.service_certificate_scope, "service.domainName": cr.service_domain_name_suffix, "service.clusterCertificateScope": cr.cluster_certificate_scope, "service.clusterDomainName": cr.cluster_domain_name_suffix, "service.clusterName": cr.cluster_domain_name, # aka root_dns } if cr.certificate_lookup: values.update({ "service.certificateArn": get_service_certificate_arn(), "service.clusterCertificateArn": get_cluster_certificate_arn(), }) # Update with cluster rule values values.update(cr.values) # Add user-specified values files if values_files: for file_path in values_files: helm_args.append(f"--values={os.path.join(helm_chart_path, 'values', file_path)}") # Update with --set-override-values values.update(dict(value.split("=") for value in set_override_values)) if dry_run: helm.dry_run( hr, helm_chart_path, cr.cluster_name, namespace, helm_args=helm_args, **values ) else: helm.start( hr, helm_chart_path, cr.cluster_name, namespace, force_helm, helm_args=helm_args, **values, ) sync_ingress.sync_ingress(namespace) sync_dns.sync_dns(namespace)