def test_get_current_location(self, mock_data): mock_open = mock.mock_open(read_data='test ') if six.PY2: open_module = '__builtin__.open' else: open_module = 'builtins.open' with mock.patch(open_module, mock_open): assert get_current_location('az') == 'test'
def haproxy_cfg_for_service(service_name, service_info, zookeeper_topology): proxy_port = service_info['proxy_port'] # If the service sets one timeout but not the other, set both # as per haproxy best practices. default_timeout = max( service_info.get('timeout_client_ms'), service_info.get('timeout_server_ms') ) # Server options mode = service_info.get('mode', 'http') if mode == 'http': server_options = 'check port %d observe layer7' % HACHECK_PORT else: server_options = 'check port %d observe layer4' % HACHECK_PORT # Frontend options frontend_options = [] timeout_client_ms = service_info.get( 'timeout_client_ms', default_timeout ) if timeout_client_ms is not None: frontend_options.append('timeout client %dms' % timeout_client_ms) if mode == 'http': frontend_options.append('capture request header X-B3-SpanId len 64') frontend_options.append('capture request header X-B3-TraceId len 64') frontend_options.append('capture request header X-B3-ParentSpanId len 64') frontend_options.append('capture request header X-B3-Flags len 10') frontend_options.append('capture request header X-B3-Sampled len 10') frontend_options.append('option httplog') elif mode == 'tcp': frontend_options.append('option tcplog') # backend options backend_options = [] extra_headers = service_info.get('extra_headers', {}) for header, value in extra_headers.iteritems(): backend_options.append('reqadd %s:\ %s' % (header, value)) # Listen options listen_options = [] # hacheck healthchecking # Note that we use a dummy port value of '0' here because HAProxy is # passing in the real port using the X-Haproxy-Server-State header. # See SRV-1492 / SRV-1498 for more details. port = 0 extra_healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) if len(extra_healthcheck_headers) > 0: healthcheck_base = 'HTTP/1.1' headers_string = healthcheck_base + ''.join(r'\r\n%s:\ %s' % (k, v) for (k, v) in extra_healthcheck_headers.iteritems()) else: headers_string = "" healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_string = r'option httpchk GET /%s/%s/%d/%s %s' % \ (mode, service_name, port, healthcheck_uri.lstrip('/'), headers_string) healthcheck_string = healthcheck_string.strip() listen_options.append(healthcheck_string) listen_options.append('http-check send-state') if mode == 'tcp': listen_options.append('mode tcp') retries = service_info.get('retries') if retries is not None: listen_options.append('retries %d' % retries) allredisp = service_info.get('allredisp') if allredisp is not None and allredisp: listen_options.append('option allredisp') timeout_connect_ms = service_info.get('timeout_connect_ms') if timeout_connect_ms is not None: listen_options.append('timeout connect %dms' % timeout_connect_ms) timeout_server_ms = service_info.get( 'timeout_server_ms', default_timeout ) if timeout_server_ms is not None: listen_options.append('timeout server %dms' % timeout_server_ms) balance = service_info.get('balance') # Validations are done in config post-receive so invalid config should # be ignored if balance is not None and balance in ('leastconn', 'roundrobin'): listen_options.append('balance %s' % balance) discover_type = service_info.get('discover', 'region') location = get_current_location(discover_type) discovery = { 'method': 'zookeeper', 'path': '/nerve/%s:%s/%s' % (discover_type, location, service_name), 'hosts': zookeeper_topology, } chaos = service_info.get('chaos') if chaos: frontend_chaos, discovery = chaos_options(chaos, discovery) frontend_options.extend(frontend_chaos) # Now write the actual synapse service entry service = { 'default_servers': [], # See SRV-1190 'use_previous_backends': False, 'discovery': discovery, 'haproxy': { 'port': '%d' % proxy_port, 'server_options': server_options, 'frontend': frontend_options, 'listen': listen_options, 'backend': backend_options } } return service
def haproxy_cfg_for_service(service_name, service_info, zookeeper_topology): proxy_port = service_info['proxy_port'] # If the service sets one timeout but not the other, set both # as per haproxy best practices. default_timeout = max(service_info.get('timeout_client_ms'), service_info.get('timeout_server_ms')) # Server options mode = service_info.get('mode', 'http') if mode == 'http': server_options = 'check port %d observe layer7' % HACHECK_PORT else: server_options = 'check port %d observe layer4' % HACHECK_PORT # Frontend options frontend_options = [] timeout_client_ms = service_info.get('timeout_client_ms', default_timeout) if timeout_client_ms is not None: frontend_options.append('timeout client %dms' % timeout_client_ms) if mode == 'http': frontend_options.append('capture request header X-B3-SpanId len 64') frontend_options.append('capture request header X-B3-TraceId len 64') frontend_options.append( 'capture request header X-B3-ParentSpanId len 64') frontend_options.append('capture request header X-B3-Flags len 10') frontend_options.append('capture request header X-B3-Sampled len 10') frontend_options.append('option httplog') elif mode == 'tcp': frontend_options.append('option tcplog') # backend options backend_options = [] extra_headers = service_info.get('extra_headers', {}) for header, value in extra_headers.iteritems(): backend_options.append('reqadd %s:\ %s' % (header, value)) # Listen options listen_options = [] # hacheck healthchecking # Note that we use a dummy port value of '0' here because HAProxy is # passing in the real port using the X-Haproxy-Server-State header. # See SRV-1492 / SRV-1498 for more details. port = 0 extra_healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) if len(extra_healthcheck_headers) > 0: healthcheck_base = 'HTTP/1.1' headers_string = healthcheck_base + ''.join( r'\r\n%s:\ %s' % (k, v) for (k, v) in extra_healthcheck_headers.iteritems()) else: headers_string = "" healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_string = r'option httpchk GET /%s/%s/%d/%s %s' % \ (mode, service_name, port, healthcheck_uri.lstrip('/'), headers_string) healthcheck_string = healthcheck_string.strip() listen_options.append(healthcheck_string) listen_options.append('http-check send-state') if mode == 'tcp': listen_options.append('mode tcp') retries = service_info.get('retries') if retries is not None: listen_options.append('retries %d' % retries) allredisp = service_info.get('allredisp') if allredisp is not None and allredisp: listen_options.append('option allredisp') timeout_connect_ms = service_info.get('timeout_connect_ms') if timeout_connect_ms is not None: listen_options.append('timeout connect %dms' % timeout_connect_ms) timeout_server_ms = service_info.get('timeout_server_ms', default_timeout) if timeout_server_ms is not None: listen_options.append('timeout server %dms' % timeout_server_ms) discover_type = service_info.get('discover', 'region') location = get_current_location(discover_type) discovery = { 'method': 'zookeeper', 'path': '/nerve/%s:%s/%s' % (discover_type, location, service_name), 'hosts': zookeeper_topology, } chaos = service_info.get('chaos') if chaos: frontend_chaos, discovery = chaos_options(chaos, discovery) frontend_options.extend(frontend_chaos) # Now write the actual synapse service entry service = { 'default_servers': [], # See SRV-1190 'use_previous_backends': False, 'discovery': discovery, 'haproxy': { 'port': '%d' % proxy_port, 'server_options': server_options, 'frontend': frontend_options, 'listen': listen_options, 'backend': backend_options } } return service
def generate_configuration( synapse_tools_config: SynapseToolsConfig, zookeeper_topology: Iterable[str], services: Iterable[Tuple[str, ServiceNamespaceConfig]], ) -> BaseConfig: synapse_config = generate_base_config(synapse_tools_config) available_locations = available_location_types() location_depth_mapping = { loc: depth for depth, loc in enumerate(available_locations) } available_locations = set(available_locations) for (service_name, service_info) in services: proxy_port = service_info.get('proxy_port', -1) # If we end up with the default value or a negative number in general, # then we know that the service does not want to be in SmartStack if proxy_port is not None and proxy_port < 0: continue # Note that at this point proxy_port can be: # * valid number: Wants Load balancing (HAProxy/Nginx) # * None: Wants discovery, but no load balancing (files) discover_type = service_info.get('discover', 'region') advertise_types = sorted( [ advertise_typ for advertise_typ in service_info.get('advertise', ['region']) # don't consider invalid advertise types if advertise_typ in available_locations ], key=lambda typ: location_depth_mapping[typ], reverse=True, # consider the most specific types first ) if discover_type not in advertise_types: return {} base_watcher_cfg = base_watcher_cfg_for_service( service_name=service_name, service_info=cast(ServiceInfo, service_info), zookeeper_topology=zookeeper_topology, synapse_tools_config=synapse_tools_config, ) socket_path = _get_socket_path(synapse_tools_config, service_name) socket_proxy_path = _get_socket_path(synapse_tools_config, service_name, proxy_proto=True) endpoint_timeouts = service_info.get('endpoint_timeouts', {}) for (advertise_type, endpoint_name) in _get_backends_for_service( advertise_types, endpoint_timeouts, ): backend_identifier = get_backend_name(service_name, discover_type, advertise_type, endpoint_name) config = copy.deepcopy(base_watcher_cfg) config['discovery']['label_filters'] = [ { 'label': '%s:%s' % (advertise_type, get_current_location(advertise_type)), 'value': '', 'condition': 'equals', }, ] if endpoint_name != HAPROXY_DEFAULT_SECTION: endpoint_timeout = endpoint_timeouts[endpoint_name] # Override the 'timeout server' value timeout_index_list = [ i for i, v in enumerate(config['haproxy']['backend']) if v.startswith("timeout server ") ] if len(timeout_index_list) > 0: timeout_index = timeout_index_list[0] config['haproxy']['backend'][ timeout_index] = 'timeout server %dms' % endpoint_timeout else: config['haproxy']['backend'].append('timeout server %dms' % endpoint_timeout) if proxy_port is None: config['haproxy'] = {'disabled': True} if synapse_tools_config['listen_with_nginx']: config['nginx'] = {'disabled': True} else: if advertise_type == discover_type and endpoint_name == HAPROXY_DEFAULT_SECTION: # Specify a proxy port to create a frontend for this service if synapse_tools_config['listen_with_haproxy']: config['haproxy']['port'] = str(proxy_port) config['haproxy']['frontend'].extend([ 'bind {0}'.format(socket_path), 'bind {0} accept-proxy'.format(socket_proxy_path), ]) # If listen_with_haproxy is False, then have # HAProxy bind only to the socket. Nginx may or may not # be listening on ports based on listen_with_nginx values # at this stage. else: config['haproxy']['port'] = None config['haproxy']['bind_address'] = socket_path config['haproxy']['frontend'].append( 'bind {0} accept-proxy'.format(socket_proxy_path)) else: # The backend only watchers don't need frontend # because they have no listen port, so Synapse doens't # generate a frontend section for them at all del config['haproxy']['frontend'] # type: ignore config['haproxy']['backend_name'] = backend_identifier synapse_config['services'][backend_identifier] = config if proxy_port is not None: # If nginx is supported, include a single additional static # service watcher per service that listens on the right port and # proxies back to the unix socket exposed by HAProxy if synapse_tools_config['listen_with_nginx']: listener_name = '{0}.nginx_listener'.format(service_name) synapse_config['services'][listener_name] = ( _generate_nginx_for_watcher( service_name=service_name, service_info=cast(ServiceInfo, service_info), synapse_tools_config=synapse_tools_config, )) # Add HAProxy options for plugins for plugin_name in PLUGIN_REGISTRY: plugin_instance = PLUGIN_REGISTRY[plugin_name]( service_name=service_name, service_info=cast(ServiceInfo, service_info), synapse_tools_config=synapse_tools_config, ) config_to_opts = [ (synapse_config['services'][service_name]['haproxy'] ['frontend'], plugin_instance.frontend_options(), plugin_instance.prepend_options('frontend')), (synapse_config['services'][service_name]['haproxy'] ['backend'], plugin_instance.backend_options(), plugin_instance.prepend_options('backend')), (synapse_config['haproxy']['global'], plugin_instance.global_options(), plugin_instance.prepend_options('global')), (synapse_config['haproxy']['defaults'], plugin_instance.defaults_options(), plugin_instance.prepend_options('defaults')) ] for (cfg, opts, prepend_options) in config_to_opts: options = [x for x in opts if x not in cfg] if prepend_options: cfg[0:0] += options else: cfg.extend(options) # TODO(jlynch|2017-08-15): move this to a plugin! # populate the ACLs to route to the service backends, this must # happen last because ordering of use_backend ACLs matters. synapse_config['services'][service_name]['haproxy'][ 'frontend'].extend( generate_acls_for_service( service_name=service_name, discover_type=discover_type, advertise_types=advertise_types, endpoint_timeouts=endpoint_timeouts, )) return synapse_config
def generate_subconfiguration( service_name: str, service_info: ServiceInfo, ip_address: str, hacheck_port: int, weight: int, zk_topology_dir: str, zk_location_type: str, zk_cluster_type: str, labels_dir: str, envoy_service_info: Optional[ServiceInfo], ) -> SubConfiguration: port = service_info['port'] # if this is a k8s pod the dict will have the pod IP and we have # an hacheck sidecar in the pod that caches checks otherwise it is # a marathon/puppet etc service and we use the system hacheck hacheck_ip = service_info.get('hacheck_ip', '127.0.0.1') # ditto for the IP of the service, in k8s this is the pod IP, # otherwise we use the hosts IP ip_address = service_info.get('service_ip', ip_address) mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) custom_labels = get_labels_by_service_and_port(service_name, port, labels_dir=labels_dir) hacheck_uri = '/%s/%s/%s/%s' % (healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) healthcheck_body_expect = service_info.get('healthcheck_body_expect') deploy_group = service_info.get('deploy_group') paasta_instance = service_info.get('paasta_instance') config: SubConfiguration = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except Exception: continue key = '%s.%s.%s:%s.%s.%d.new' % ( service_name, zk_location, typ, loc, ip_address, port, ) checks_dict: CheckDict = { 'type': 'http', 'host': hacheck_ip, 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } if healthcheck_body_expect: checks_dict['expect'] = healthcheck_body_expect config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'weight': weight, } v2_key = '%s.%s:%s.%d.v2.new' % ( service_name, zk_location, ip_address, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'labels': {}, 'weight': weight, } config[v2_key]['labels'].update(custom_labels) # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' # Having the deploy group and paasta instance will enable Envoy # routing via these values for canary instance routing if deploy_group: config[v2_key]['labels']['deploy_group'] = deploy_group if paasta_instance: config[v2_key]['labels']['paasta_instance'] = paasta_instance if envoy_service_info: envoy_key = f'{service_name}.{zk_location}:{ip_address}.{port}' config[envoy_key] = generate_envoy_configuration( envoy_service_info, healthcheck_mode, service_name, hacheck_port, ip_address, zookeeper_topology, custom_labels, weight, deploy_group, paasta_instance, ) return config
def generate_subconfiguration(service_name, advertise, extra_advertise, port, ip_address, healthcheck_timeout_s, hacheck_uri, healthcheck_headers): config = {} # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add((get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue # Convert the destination into the 'advertise' type(s) for advertise_typ in advertise: # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) > 0: continue for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: superregions = convert_location_type(loc, typ, 'superregion') for superregion in superregions: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type='infrastructure', cluster_location=superregion ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, superregion, typ, loc, port ) config[key] = { 'port': port, 'host': ip_address, 'weight': CPUS, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': HACHECK_PORT, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } ] } return config
def generate_subconfiguration( service_name, service_info, ip_address, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type, labels_dir, ): port = service_info.get('port') mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) custom_labels = get_labels_by_service_and_port(service_name, port, labels_dir=labels_dir) hacheck_uri = '/%s/%s/%s/%s' % (healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) healthcheck_body_expect = service_info.get('healthcheck_body_expect') config = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, zk_location, typ, loc, port, ) checks_dict = { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, } if healthcheck_body_expect: checks_dict['expect'] = healthcheck_body_expect config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'weight': weight, } v2_key = '%s.%s:%d.v2.new' % ( service_name, zk_location, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ checks_dict, ], 'labels': {}, 'weight': weight, } config[v2_key]['labels'].update(custom_labels) # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' return config
def generate_subconfiguration( service_name, service_info, ip_address, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type, ): port = service_info.get('port') mode = service_info.get('mode', 'http') healthcheck_timeout_s = service_info.get('healthcheck_timeout_s', 1.0) healthcheck_port = service_info.get('healthcheck_port', port) # hacheck will simply ignore the healthcheck_uri for TCP mode checks healthcheck_uri = service_info.get('healthcheck_uri', '/status') healthcheck_mode = service_info.get('healthcheck_mode', mode) hacheck_uri = '/%s/%s/%s/%s' % ( healthcheck_mode, service_name, healthcheck_port, healthcheck_uri.lstrip('/')) advertise = service_info.get('advertise', ['region']) extra_advertise = service_info.get('extra_advertise', []) healthcheck_headers = service_info.get('extra_healthcheck_headers', {}) config = {} if not advertise or not port: return config # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add((get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue valid_advertise_types = [ advertise_typ for advertise_typ in advertise # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) <= 0 ] # Convert the destination into the 'advertise' type(s) for advertise_typ in valid_advertise_types: for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % ( service_name, zk_location, typ, loc, port, ) config[key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }, ], 'weight': weight, } v2_key = '%s.%s:%d.v2.new' % ( service_name, zk_location, port, ) if v2_key not in config: config[v2_key] = { 'port': port, 'host': ip_address, 'zk_hosts': zookeeper_topology, 'zk_path': '/smartstack/global/%s' % service_name, 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [ { 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }, ], 'labels': {}, 'weight': weight, } # Set a label that maps the location to an empty string. This # allows synapse to find all servers being advertised to it by # checking discover_typ:discover_loc == '' config[v2_key]['labels']['%s:%s' % (typ, loc)] = '' return config
def generate_configuration(synapse_tools_config, zookeeper_topology, services): synapse_config = generate_base_config(synapse_tools_config) available_locations = available_location_types() location_depth_mapping = { loc: depth for depth, loc in enumerate(available_locations) } available_locations = set(available_locations) for (service_name, service_info) in services: proxy_port = service_info.get('proxy_port') if proxy_port is None: continue discover_type = service_info.get('discover', 'region') advertise_types = sorted( [ advertise_typ for advertise_typ in service_info.get('advertise', ['region']) # don't consider invalid advertise types if advertise_typ in available_locations ], key=lambda typ: location_depth_mapping[typ], reverse=True, # consider the most specific types first ) if discover_type not in advertise_types: return {} base_haproxy_cfg = base_haproxy_cfg_for_service( service_name=service_name, service_info=service_info, zookeeper_topology=zookeeper_topology, synapse_tools_config=synapse_tools_config, ) for advertise_type in advertise_types: config = copy.deepcopy(base_haproxy_cfg) backend_identifier = get_backend_name(service_name, discover_type, advertise_type) if advertise_type == discover_type: # Specify a proxy port to create a frontend for this service config['haproxy']['port'] = str(proxy_port) config['discovery']['label_filters'] = [ { 'label': '%s:%s' % (advertise_type, get_current_location(advertise_type)), 'value': '', 'condition': 'equals', }, ] config['haproxy']['backend_name'] = backend_identifier synapse_config['services'][backend_identifier] = config # populate the ACLs to route to the service backends synapse_config['services'][service_name]['haproxy']['frontend'].extend( generate_acls_for_service( service_name=service_name, discover_type=discover_type, advertise_types=advertise_types, ) ) return synapse_config
def generate_subconfiguration(service_name, advertise, extra_advertise, port, ip_address, healthcheck_timeout_s, hacheck_uri, healthcheck_headers, hacheck_port, weight, zk_topology_dir, zk_location_type, zk_cluster_type): config = {} # Register at the specified location types in the current superregion locations_to_register_in = set() for advertise_typ in advertise: locations_to_register_in.add( (get_current_location(advertise_typ), advertise_typ)) # Also register in any other locations specified in extra advertisements for (src, dst) in extra_advertise: src_typ, src_loc = src.split(':') dst_typ, dst_loc = dst.split(':') if get_current_location(src_typ) != src_loc: # We do not match the source continue # Convert the destination into the 'advertise' type(s) for advertise_typ in advertise: # Prevent upcasts, otherwise the service may be made available to # more hosts than intended. if compare_types(dst_typ, advertise_typ) > 0: continue for loc in convert_location_type(dst_loc, dst_typ, advertise_typ): locations_to_register_in.add((loc, advertise_typ)) # Create a separate service entry for each location that we need to register in. for loc, typ in locations_to_register_in: zk_locations = convert_location_type(loc, typ, zk_location_type) for zk_location in zk_locations: try: zookeeper_topology = get_named_zookeeper_topology( cluster_type=zk_cluster_type, cluster_location=zk_location, zk_topology_dir=zk_topology_dir, ) except: continue key = '%s.%s.%s:%s.%d.new' % (service_name, zk_location, typ, loc, port) config[key] = { 'port': port, 'host': ip_address, 'weight': weight, 'zk_hosts': zookeeper_topology, 'zk_path': '/nerve/%s:%s/%s' % (typ, loc, service_name), 'check_interval': healthcheck_timeout_s + 1.0, # Hit the localhost hacheck instance 'checks': [{ 'type': 'http', 'host': '127.0.0.1', 'port': hacheck_port, 'uri': hacheck_uri, 'timeout': healthcheck_timeout_s, 'open_timeout': healthcheck_timeout_s, 'rise': 1, 'fall': 2, 'headers': healthcheck_headers, }] } return config
def generate_configuration(synapse_tools_config, zookeeper_topology, services): synapse_config = generate_base_config(synapse_tools_config) available_locations = available_location_types() location_depth_mapping = { loc: depth for depth, loc in enumerate(available_locations) } available_locations = set(available_locations) proxies = [ service_info['proxied_through'] for _, service_info in services if service_info.get('proxied_through') is not None ] for (service_name, service_info) in services: proxy_port = service_info.get('proxy_port', -1) # If we end up with the default value or a negative number in general, # then we know that the service does not want to be in SmartStack if proxy_port is not None and proxy_port < 0: continue # Note that at this point proxy_port can be: # * valid number: Wants Load balancing (HAProxy/Nginx) # * None: Wants discovery, but no load balancing (files) discover_type = service_info.get('discover', 'region') advertise_types = sorted( [ advertise_typ for advertise_typ in service_info.get('advertise', ['region']) # don't consider invalid advertise types if advertise_typ in available_locations ], key=lambda typ: location_depth_mapping[typ], reverse=True, # consider the most specific types first ) if discover_type not in advertise_types: return {} base_watcher_cfg = base_watcher_cfg_for_service( service_name=service_name, service_info=service_info, zookeeper_topology=zookeeper_topology, synapse_tools_config=synapse_tools_config, is_proxy=service_name in proxies, ) socket_path = _get_socket_path( synapse_tools_config, service_name ) for advertise_type in advertise_types: backend_identifier = get_backend_name( service_name, discover_type, advertise_type ) config = copy.deepcopy(base_watcher_cfg) config['discovery']['label_filters'] = [ { 'label': '%s:%s' % (advertise_type, get_current_location(advertise_type)), 'value': '', 'condition': 'equals', }, ] if proxy_port is None: config['haproxy'] = {'disabled': True} if synapse_tools_config['listen_with_nginx']: config['nginx'] = {'disabled': True} else: if advertise_type == discover_type: # Specify a proxy port to create a frontend for this service if synapse_tools_config['listen_with_haproxy']: config['haproxy']['port'] = str(proxy_port) config['haproxy']['frontend'].append( 'bind {0}'.format(socket_path) ) # If listen_with_haproxy is False, then have # HAProxy bind only to the socket. Nginx may or may not # be listening on ports based on listen_with_nginx values # at this stage. else: config['haproxy']['port'] = None config['haproxy']['bind_address'] = _get_socket_path( synapse_tools_config, service_name ) else: # The backend only watchers don't need frontend # because they have no listen port, so Synapse doens't # generate a frontend section for them at all del config['haproxy']['frontend'] config['haproxy']['backend_name'] = backend_identifier synapse_config['services'][backend_identifier] = config if proxy_port is not None: proxied_through = service_info.get('proxied_through') healthcheck_uri = service_info.get('healthcheck_uri', '/status') # populate the ACLs to route to the service backends synapse_config['services'][service_name]['haproxy']['frontend'].extend( generate_acls_for_service( service_name=service_name, discover_type=discover_type, advertise_types=advertise_types, proxied_through=proxied_through, healthcheck_uri=healthcheck_uri, ) ) # If nginx is supported, include a single additional static # service watcher per service that listens on the right port and # proxies back to the unix socket exposed by HAProxy if synapse_tools_config['listen_with_nginx']: listener_name = '{0}.nginx_listener'.format(service_name) synapse_config['services'][listener_name] = ( _generate_nginx_for_watcher( service_name, service_info, synapse_tools_config ) ) return synapse_config