def _build_smartstack_location_dict_for_backends( synapse_host: str, registration: str, tasks: Sequence[MarathonTask], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: sorted_smartstack_backends = sorted( smartstack_tools.get_backends( registration, synapse_host=synapse_host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config. get_synapse_haproxy_url_format(), ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_smartstack_backends_and_tasks = smartstack_tools.match_backends_and_tasks( sorted_smartstack_backends, tasks) return smartstack_tools.build_smartstack_location_dict( location, matched_smartstack_backends_and_tasks, should_return_individual_backends, )
def _build_smartstack_location_dict( synapse_host: str, synapse_port: int, synapse_haproxy_url_format: str, registration: str, pods: Iterable[V1Pod], location: str, should_return_individual_backends: bool, ) -> MutableMapping[str, Any]: sorted_backends = sorted( smartstack_tools.get_backends( registration, synapse_host=synapse_host, synapse_port=synapse_port, synapse_haproxy_url_format=synapse_haproxy_url_format, ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_pods = match_backends_and_pods(sorted_backends, pods) location_dict = smartstack_tools.build_smartstack_location_dict( location, matched_backends_and_pods, should_return_individual_backends ) return location_dict
def smartstack_status( service: str, instance: str, job_config: LongRunningServiceConfig, service_namespace_config: ServiceNamespaceConfig, pods: Sequence[V1Pod], settings: Any, should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] instance_pool = job_config.get_pool() smartstack_replication_checker = KubeSmartstackReplicationChecker( nodes=kubernetes_tools.get_all_nodes(settings.kubernetes_client), system_paasta_config=settings.system_paasta_config, ) node_hostname_by_location = smartstack_replication_checker.get_allowed_locations_and_hosts( job_config ) expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace( service=service, namespace=instance, cluster=settings.cluster, instance_type_class=KubernetesDeploymentConfig, ) expected_count_per_location = int( expected_smartstack_count / len(node_hostname_by_location) ) smartstack_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in node_hostname_by_location.items(): synapse_host = smartstack_replication_checker.get_first_host_in_pool( hosts, instance_pool ) sorted_backends = sorted( smartstack_tools.get_backends( registration, synapse_host=synapse_host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config.get_synapse_haproxy_url_format(), ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_pods = match_backends_and_pods(sorted_backends, pods) location_dict = smartstack_tools.build_smartstack_location_dict( location, matched_backends_and_pods, should_return_individual_backends ) smartstack_status["locations"].append(location_dict) return smartstack_status
def marathon_smartstack_status( service: str, instance: str, job_config: marathon_tools.MarathonServiceConfig, service_namespace_config: ServiceNamespaceConfig, tasks: Sequence[MarathonTask], should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] discover_location_type = service_namespace_config.get_discover() grouped_slaves = get_mesos_slaves_grouped_by_attribute( slaves=get_slaves(), attribute=discover_location_type) # rebuild the dict, replacing the slave object with just their hostname slave_hostname_by_location = { attribute_value: [slave["hostname"] for slave in slaves] for attribute_value, slaves in grouped_slaves.items() } expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace( service, instance, settings.cluster) expected_count_per_location = int(expected_smartstack_count / len(slave_hostname_by_location)) smartstack_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in slave_hostname_by_location.items(): synapse_host = hosts[0] sorted_backends = sorted( get_backends( registration, synapse_host=synapse_host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config. get_synapse_haproxy_url_format(), ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_tasks = match_backends_and_tasks( sorted_backends, tasks) location_dict = smartstack_tools.build_smartstack_location_dict( location, matched_backends_and_tasks, should_return_individual_backends) smartstack_status["locations"].append(location_dict) return smartstack_status