def marathon_smartstack_status( service: str, instance: str, job_config: marathon_tools.MarathonServiceConfig, service_namespace_config: ServiceNamespaceConfig, tasks: Sequence[MarathonTask], should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] discover_location_type = service_namespace_config.get_discover() monitoring_blacklist = job_config.get_monitoring_blacklist( system_deploy_blacklist=settings.system_paasta_config. get_deploy_blacklist()) filtered_slaves = get_all_slaves_for_blacklist_whitelist( blacklist=monitoring_blacklist, whitelist=None) grouped_slaves = get_mesos_slaves_grouped_by_attribute( slaves=filtered_slaves, attribute=discover_location_type) # rebuild the dict, replacing the slave object with just their hostname slave_hostname_by_location = { attribute_value: [slave["hostname"] for slave in slaves] for attribute_value, slaves in grouped_slaves.items() } expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace( service, instance, settings.cluster) expected_count_per_location = int(expected_smartstack_count / len(slave_hostname_by_location)) smartstack_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in slave_hostname_by_location.items(): synapse_host = hosts[0] sorted_backends = sorted( get_backends( registration, synapse_host=synapse_host, synapse_port=settings.system_paasta_config.get_synapse_port(), synapse_haproxy_url_format=settings.system_paasta_config. get_synapse_haproxy_url_format(), ), key=lambda backend: backend["status"], reverse=True, # put 'UP' backends above 'MAINT' backends ) matched_backends_and_tasks = match_backends_and_tasks( sorted_backends, tasks) location_dict = build_smartstack_location_dict( location, matched_backends_and_tasks, should_return_individual_backends) smartstack_status["locations"].append(location_dict) return smartstack_status
def marathon_service_mesh_status( service: str, service_mesh: pik.ServiceMesh, instance: str, job_config: marathon_tools.MarathonServiceConfig, service_namespace_config: ServiceNamespaceConfig, tasks: Sequence[MarathonTask], should_return_individual_backends: bool = False, ) -> Mapping[str, Any]: registration = job_config.get_registrations()[0] discover_location_type = service_namespace_config.get_discover() grouped_slaves = get_mesos_slaves_grouped_by_attribute( slaves=get_slaves(), attribute=discover_location_type) # rebuild the dict, replacing the slave object with just their hostname slave_hostname_by_location = { attribute_value: [slave["hostname"] for slave in slaves] for attribute_value, slaves in grouped_slaves.items() } expected_instance_count = marathon_tools.get_expected_instance_count_for_namespace( service, instance, settings.cluster) expected_count_per_location = int(expected_instance_count / len(slave_hostname_by_location)) service_mesh_status: MutableMapping[str, Any] = { "registration": registration, "expected_backends_per_location": expected_count_per_location, "locations": [], } for location, hosts in slave_hostname_by_location.items(): if service_mesh == pik.ServiceMesh.SMARTSTACK: service_mesh_status["locations"].append( _build_smartstack_location_dict_for_backends( synapse_host=hosts[0], registration=registration, tasks=tasks, location=location, should_return_individual_backends= should_return_individual_backends, )) elif service_mesh == pik.ServiceMesh.ENVOY: service_mesh_status["locations"].append( _build_envoy_location_dict_for_backends( envoy_host=hosts[0], registration=registration, tasks=tasks, location=location, should_return_individual_backends= should_return_individual_backends, )) return service_mesh_status