def deep_delete_and_create(self, kube_client: KubeClient) -> None: self.deep_delete(kube_client) timer = 0 while (self.kube_deployment in set(list_all_deployments(kube_client)) and timer < 60): sleep(1) timer += 1 if timer >= 60 and self.kube_deployment in set( list_all_deployments(kube_client)): try: force_delete_pods( self.item.metadata.name, self.kube_deployment.service, self.kube_deployment.instance, self.item.metadata.namespace, kube_client, ) except ApiException as e: if e.status == 404: # Deployment does not exist, nothing to delete but # we can consider this a success. self.logging.debug( "not deleting nonexistent deploy/{} from namespace/{}". format(self.kube_deployment.service, self.item.metadata.namespace)) else: raise else: self.logging.info("deleted deploy/{} from namespace/{}".format( self.kube_deployment.service, self.item.metadata.namespace)) self.create(kube_client=kube_client)
def test_list_all_deployments(): mock_deployments = mock.Mock(items=[]) mock_client = mock.Mock(deployments=mock.Mock( list_namespaced_deployment=mock.Mock(return_value=mock_deployments))) assert list_all_deployments(mock_client) == [] mock_item = mock.Mock(metadata=mock.Mock(labels={ 'service': 'kurupt', 'instance': 'fm', 'git_sha': 'a12345', 'config_sha': 'b12345', }, ), ) type(mock_item).spec = mock.Mock(replicas=3) mock_deployments = mock.Mock(items=[mock_item]) mock_client = mock.Mock(deployments=mock.Mock( list_namespaced_deployment=mock.Mock(return_value=mock_deployments))) assert list_all_deployments(mock_client) == [ KubeDeployment( service='kurupt', instance='fm', git_sha='a12345', config_sha='b12345', replicas=3, ) ]
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], soa_dir: str = DEFAULT_SOA_DIR, ) -> bool: suceeded = True if service_instances: deployments = list_all_deployments(kube_client) for service_instance in service_instances: try: service, instance, _, __ = decompose_job_id(service_instance) except InvalidJobNameError: log.error( "Invalid service instance specified. Format is service%sinstance." % SPACER) suceeded = False else: if reconcile_kubernetes_deployment( kube_client=kube_client, service=service, instance=instance, kube_deployments=deployments, soa_dir=soa_dir, )[0]: suceeded = False return suceeded
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], soa_dir: str = DEFAULT_SOA_DIR, ) -> bool: if service_instances: existing_kube_deployments = set(list_all_deployments(kube_client)) existing_apps = {(deployment.service, deployment.instance) for deployment in existing_kube_deployments} service_instances_with_valid_names = [ decompose_job_id(service_instance) for service_instance in service_instances if validate_job_name(service_instance) ] applications = [ create_application_object( kube_client=kube_client, service=service_instance[0], instance=service_instance[1], soa_dir=soa_dir, ) for service_instance in service_instances_with_valid_names ] for _, app in applications: if (app and (app.kube_deployment.service, app.kube_deployment.instance) not in existing_apps): app.create(kube_client) elif app and app.kube_deployment not in existing_kube_deployments: app.update(kube_client) else: log.debug(f"{app} is up to date, no action taken") return (False, None) not in applications and len( service_instances_with_valid_names) == len(service_instances)
def assert_kube_deployments( kube_client: KubeClient, ) -> HealthCheckResult: num_deployments = len(list_all_deployments(kube_client)) return HealthCheckResult( message=f"Kubernetes deployments: {num_deployments:>3}", healthy=True )
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], cluster: str, soa_dir: str = DEFAULT_SOA_DIR, ) -> bool: if service_instances: existing_kube_deployments = set(list_all_deployments(kube_client)) existing_apps = {(deployment.service, deployment.instance) for deployment in existing_kube_deployments} service_instances_with_valid_names = [ decompose_job_id(service_instance) for service_instance in service_instances if validate_job_name(service_instance) ] applications = [ create_application_object( kube_client=kube_client, service=service_instance[0], instance=service_instance[1], cluster=cluster, soa_dir=soa_dir, ) for service_instance in service_instances_with_valid_names ] apps_updated = [] for _, app in applications: if app: apps_updated.append(str(app)) if ( app.kube_deployment.service, app.kube_deployment.instance, ) not in existing_apps: log.info(f"Creating {app} because it does not exist yet.") app.create(kube_client) elif app.kube_deployment not in existing_kube_deployments: log.info(f"Updating {app} because configs have changed.") app.update(kube_client) elif autoscaling_is_paused() and not is_deployment_marked_paused( kube_client, app.soa_config): log.info( f"Updating {app} because autoscaler needs to be paused.") app.update(kube_client) elif not autoscaling_is_paused() and is_deployment_marked_paused( kube_client, app.soa_config): log.info( f"Updating {app} because autoscaler needs to be resumed.") app.update(kube_client) else: log.debug(f"{app} is up to date, no action taken") apps_updated.pop() log.info(json.dumps({"service_instance_updated": apps_updated})) return (False, None) not in applications and len( service_instances_with_valid_names) == len(service_instances)
def deep_delete_and_create(self, kube_client: KubeClient) -> None: self.deep_delete(kube_client) timer = 0 while (self.kube_deployment in set(list_all_deployments(kube_client)) and timer < 60): sleep(1) timer += 1 if timer >= 60 and self.kube_deployment in set( list_all_deployments(kube_client)): # When deleting then immediately creating, we need to use Background # deletion to ensure we can create the deployment immediately self.deep_delete(kube_client, propagation_policy="Background") try: force_delete_pods( self.item.metadata.name, self.kube_deployment.service, self.kube_deployment.instance, self.item.metadata.namespace, kube_client, ) except ApiException as e: if e.status == 404: # Pod(s) may have been deleted by GC before we got to it # We can consider this a success self.logging.debug( "pods already deleted for {} from namespace/{}. Continuing." .format(self.kube_deployment.service, self.item.metadata.namespace)) else: raise if self.kube_deployment in set(list_all_deployments(kube_client)): # deployment deletion failed, we cannot continue raise Exception( f"Could not delete deployment {self.item.metadata.name}") else: self.logging.info("deleted deploy/{} from namespace/{}".format( self.kube_deployment.service, self.item.metadata.namespace)) self.create(kube_client=kube_client)
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], cluster: str, rate_limit: int = 0, soa_dir: str = DEFAULT_SOA_DIR, ) -> bool: if service_instances: existing_kube_deployments = set(list_all_deployments(kube_client)) existing_apps = {(deployment.service, deployment.instance) for deployment in existing_kube_deployments} service_instances_with_valid_names = [ decompose_job_id(service_instance) for service_instance in service_instances if validate_job_name(service_instance) ] applications = [ create_application_object( kube_client=kube_client, service=service_instance[0], instance=service_instance[1], cluster=cluster, soa_dir=soa_dir, ) for service_instance in service_instances_with_valid_names ] api_updates = 0 for _, app in applications: if app: if ( app.kube_deployment.service, app.kube_deployment.instance, ) not in existing_apps: log.info(f"Creating {app} because it does not exist yet.") app.create(kube_client) api_updates += 1 elif app.kube_deployment not in existing_kube_deployments: log.info(f"Updating {app} because configs have changed.") app.update(kube_client) api_updates += 1 else: log.info(f"{app} is up-to-date!") log.info(f"Ensuring related API objects for {app} are in sync") app.update_related_api_objects(kube_client) if rate_limit > 0 and api_updates >= rate_limit: log.info( f"Not doing any further updates as we reached the limit ({api_updates})" ) break return (False, None) not in applications and len( service_instances_with_valid_names) == len(service_instances)
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], cluster: str, soa_dir: str = DEFAULT_SOA_DIR, ) -> bool: if service_instances: existing_kube_deployments = set(list_all_deployments(kube_client)) existing_apps = { (deployment.service, deployment.instance) for deployment in existing_kube_deployments } service_instances_with_valid_names = [ decompose_job_id(service_instance) for service_instance in service_instances if validate_job_name(service_instance) ] applications = [ create_application_object( kube_client=kube_client, service=service_instance[0], instance=service_instance[1], cluster=cluster, soa_dir=soa_dir, ) for service_instance in service_instances_with_valid_names ] apps_updated = [] for _, app in applications: if app: apps_updated.append(str(app)) if ( app.kube_deployment.service, app.kube_deployment.instance, ) not in existing_apps: log.info(f"Creating {app} because it does not exist yet.") app.create(kube_client) elif app.kube_deployment not in existing_kube_deployments: log.info(f"Updating {app} because configs have changed.") app.update(kube_client) else: log.info(f"Ensuring related API objects for {app} are in sync") app.update_related_api_objects(kube_client) log.info(json.dumps({"service_instance_updated": apps_updated})) return (False, None) not in applications and len( service_instances_with_valid_names ) == len(service_instances)
def setup_kube_deployments( kube_client: KubeClient, service_instances: Sequence[str], cluster: str, rate_limit: int = 0, soa_dir: str = DEFAULT_SOA_DIR, metrics_interface: metrics_lib.BaseMetrics = metrics_lib.NoMetrics( "paasta"), ) -> bool: if service_instances: existing_kube_deployments = set(list_all_deployments(kube_client)) existing_apps = {(deployment.service, deployment.instance) for deployment in existing_kube_deployments} service_instances_with_valid_names = [ decompose_job_id(service_instance) for service_instance in service_instances if validate_job_name(service_instance) ] applications = [ create_application_object( kube_client=kube_client, service=service_instance[0], instance=service_instance[1], cluster=cluster, soa_dir=soa_dir, ) for service_instance in service_instances_with_valid_names ] api_updates = 0 for _, app in applications: if app: app_dimensions = { "paasta_service": app.kube_deployment.service, "paasta_instance": app.kube_deployment.instance, "paasta_cluster": cluster, } try: if ( app.kube_deployment.service, app.kube_deployment.instance, ) not in existing_apps: log.info(f"Creating {app} because it does not exist yet.") app.create(kube_client) app_dimensions["deploy_event"] = "create" metrics_interface.emit_event( name="deploy", dimensions=app_dimensions, ) api_updates += 1 elif app.kube_deployment not in existing_kube_deployments: log.info(f"Updating {app} because configs have changed.") app.update(kube_client) app_dimensions["deploy_event"] = "update" metrics_interface.emit_event( name="deploy", dimensions=app_dimensions, ) api_updates += 1 else: log.info(f"{app} is up-to-date!") log.info(f"Ensuring related API objects for {app} are in sync") app.update_related_api_objects(kube_client) except Exception: log.exception(f"Error while processing: {app}") if rate_limit > 0 and api_updates >= rate_limit: log.info( f"Not doing any further updates as we reached the limit ({api_updates})" ) break return (False, None) not in applications and len( service_instances_with_valid_names) == len(service_instances)