def generate_acls_for_service(service_name, discover_type, advertise_types): frontend_acl_configs = [] for advertise_type in advertise_types: if compare_types(discover_type, advertise_type) < 0: # don't create acls that downcast requests continue backend_identifier = get_backend_name( service_name=service_name, discover_type=discover_type, advertise_type=advertise_type, ) # use connslots acl condition frontend_acl_configs.extend( [ 'acl {backend_identifier}_has_connslots connslots({backend_identifier}) gt 0'.format( backend_identifier=backend_identifier, ), 'use_backend {backend_identifier} if {backend_identifier}_has_connslots'.format( backend_identifier=backend_identifier, ), ] ) return frontend_acl_configs
def generate_acls_for_service( service_name, discover_type, advertise_types, proxied_through, healthcheck_uri): frontend_acl_configs = [] # check for proxied_through first, use_backend ordering matters if proxied_through: frontend_acl_configs.extend([ 'acl is_status_request path {healthcheck_uri}'.format( healthcheck_uri=healthcheck_uri, ), 'acl request_from_proxy hdr_beg(X-Smartstack-Source) -i {proxied_through}'.format( proxied_through=proxied_through, ), 'acl proxied_through_backend_has_connslots connslots({proxied_through}) gt 0'.format( proxied_through=proxied_through, ), 'use_backend {proxied_through} if !is_status_request !request_from_proxy proxied_through_backend_has_connslots'.format( proxied_through=proxied_through, ), 'reqadd X-Smartstack-Destination:\ {service_name} if !is_status_request !request_from_proxy proxied_through_backend_has_connslots'.format( service_name=service_name, ), ]) for advertise_type in advertise_types: if compare_types(discover_type, advertise_type) < 0: # don't create acls that downcast requests continue backend_identifier = get_backend_name( service_name=service_name, discover_type=discover_type, advertise_type=advertise_type, ) # use connslots acl condition frontend_acl_configs.extend( [ 'acl {backend_identifier}_has_connslots connslots({backend_identifier}) gt 0'.format( backend_identifier=backend_identifier, ), 'use_backend {backend_identifier} if {backend_identifier}_has_connslots'.format( backend_identifier=backend_identifier, ), ] ) return frontend_acl_configs
def generate_acls_for_service( service_name: str, discover_type: str, advertise_types: Iterable[str], endpoint_timeouts: Dict[str, int], ) -> ServiceAcls: frontend_acl_configs = [] for (advertise_type, endpoint_name) in _get_backends_for_service( advertise_types, endpoint_timeouts, ): if compare_types(discover_type, advertise_type) < 0: # don't create acls that downcast requests continue backend_identifier = get_backend_name( service_name=service_name, discover_type=discover_type, advertise_type=advertise_type, endpoint_name=endpoint_name, ) # non-default backends have an extra ACL to match the path if endpoint_name != HAPROXY_DEFAULT_SECTION: path = endpoint_name # There is no reason to prefix-match on "/" # special case it for now to support overriding the timeout for "/" if path == "/": acl_type = "path" else: acl_type = "path_beg" # note: intentional " " in the beginning of this string path_acl_name = f' {backend_identifier}_path' path_acl = [f'acl{path_acl_name} {acl_type} {path}'] else: path_acl_name = '' path_acl = [] # use connslots acl condition frontend_acl_configs.extend(path_acl + [ f'acl {backend_identifier}_has_connslots connslots({backend_identifier}) gt 0', f'use_backend {backend_identifier} if {backend_identifier}_has_connslots{path_acl_name}', ]) return frontend_acl_configs
def generate_acls_for_service(service_name, discover_type, advertise_types): frontend_acl_configs = [] for advertise_type in advertise_types: if compare_types(discover_type, advertise_type) < 0: # don't create acls that downcast requests continue backend_identifier = get_backend_name( service_name=service_name, discover_type=discover_type, advertise_type=advertise_type, ) # use connslots acl condition frontend_acl_configs.extend([ 'acl {backend_identifier}_has_connslots connslots({backend_identifier}) gt 0' .format(backend_identifier=backend_identifier, ), 'use_backend {backend_identifier} if {backend_identifier}_has_connslots' .format(backend_identifier=backend_identifier, ), ]) return frontend_acl_configs
def test_compare_types(self, mock_data): assert compare_types('environment', 'az') < 0 assert compare_types('az', 'az') == 0 assert compare_types('az', 'region') > 0
def generate_subconfiguration(service_name, advertise, extra_advertise, port, ip_address, healthcheck_timeout_s, hacheck_uri, healthcheck_headers): config = {} # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add((get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue # Convert the destination into the 'advertise' type(s) for advertise_typ in advertise: # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) > 0: continue for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: superregions = convert_location_type(loc, typ, 'superregion') for superregion in superregions: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type='infrastructure', cluster_location=superregion ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, superregion, typ, loc, port ) config[key] = { 'port': port, 'host': ip_address, 'weight': CPUS, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': HACHECK_PORT, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } ] } return config
def generate_subconfiguration( service_name: str, service_info: ServiceInfo, ip_address: str, hacheck_port: int, weight: int, zk_topology_dir: str, zk_location_type: str, zk_cluster_type: str, labels_dir: str, envoy_service_info: Optional[ServiceInfo], ) -> SubConfiguration: port = service_info['port'] # if this is a k8s pod the dict will have the pod IP and we have # an hacheck sidecar in the pod that caches checks otherwise it is # a marathon/puppet etc service and we use the system hacheck hacheck_ip = service_info.get('hacheck_ip', '127.0.0.1') # ditto for the IP of the service, in k8s this is the pod IP, # otherwise we use the hosts IP ip_address = service_info.get('service_ip', ip_address) mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) custom_labels = get_labels_by_service_and_port(service_name, port, labels_dir=labels_dir) hacheck_uri = '/%s/%s/%s/%s' % (healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) healthcheck_body_expect = service_info.get('healthcheck_body_expect') deploy_group = service_info.get('deploy_group') paasta_instance = service_info.get('paasta_instance') config: SubConfiguration = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except Exception: continue key = '%s.%s.%s:%s.%s.%d.new' % ( service_name, zk_location, typ, loc, ip_address, port, ) checks_dict: CheckDict = { 'type': 'http', 'host': hacheck_ip, 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } if healthcheck_body_expect: checks_dict['expect'] = healthcheck_body_expect config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'weight': weight, } v2_key = '%s.%s:%s.%d.v2.new' % ( service_name, zk_location, ip_address, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'labels': {}, 'weight': weight, } config[v2_key]['labels'].update(custom_labels) # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' # Having the deploy group and paasta instance will enable Envoy # routing via these values for canary instance routing if deploy_group: config[v2_key]['labels']['deploy_group'] = deploy_group if paasta_instance: config[v2_key]['labels']['paasta_instance'] = paasta_instance if envoy_service_info: envoy_key = f'{service_name}.{zk_location}:{ip_address}.{port}' config[envoy_key] = generate_envoy_configuration( envoy_service_info, healthcheck_mode, service_name, hacheck_port, ip_address, zookeeper_topology, custom_labels, weight, deploy_group, paasta_instance, ) return config
def generate_subconfiguration( service_name, service_info, ip_address, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type, labels_dir, ): port = service_info.get('port') mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) custom_labels = get_labels_by_service_and_port(service_name, port, labels_dir=labels_dir) hacheck_uri = '/%s/%s/%s/%s' % (healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) healthcheck_body_expect = service_info.get('healthcheck_body_expect') config = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, zk_location, typ, loc, port, ) checks_dict = { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } if healthcheck_body_expect: checks_dict['expect'] = healthcheck_body_expect config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'weight': weight, } v2_key = '%s.%s:%d.v2.new' % ( service_name, zk_location, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'labels': {}, 'weight': weight, } config[v2_key]['labels'].update(custom_labels) # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' return config
def generate_subconfiguration( service_name, service_info, ip_address, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type, ): port = service_info.get('port') mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) hacheck_uri = '/%s/%s/%s/%s' % ( healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) config = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add((get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, zk_location, typ, loc, port, ) config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }, ], 'weight': weight, } v2_key = '%s.%s:%d.v2.new' % ( service_name, zk_location, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }, ], 'labels': {}, 'weight': weight, } # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' return config
def generate_subconfiguration(service_name, advertise, extra_advertise, port, ip_address, healthcheck_timeout_s, hacheck_uri, healthcheck_headers, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type): config = {} # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue # Convert the destination into the 'advertise' type(s) for advertise_typ in advertise: # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) > 0: continue for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % (service_name, zk_location, typ, loc, port) config[key] = { 'port': port, 'host': ip_address, 'weight': weight, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [{ 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }] } return config