def __get_monitoring_config_value(key, overrides, service, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration(service, soa_dir=soa_dir) monitor_config = read_monitoring_config(service, soa_dir=soa_dir) service_default = general_config.get(key, monitoring_defaults(key)) service_default = general_config.get('monitoring', {key: service_default}).get(key, service_default) service_default = monitor_config.get(key, service_default) return overrides.get(key, service_default)
def check_local_healthcheck(service_name): """Makes a local HTTP healthcheck call to the service and returns True if it gets a 2XX response, else returns False. :param service_name: a string like 'service_one.main' :return: Whether healthcheck call was successful for a http service. Returns false for a tcp service. :rtype: boolean """ srv_name, namespace = service_name.split('.') srv_config = read_service_configuration(srv_name) smartstack_config = srv_config.get('smartstack', {}) namespace_config = smartstack_config.get(namespace, {}) healthcheck_uri = namespace_config.get('healthcheck_uri', '/status') healthcheck_port = namespace_config.get('healthcheck_port', srv_config.get('port')) healthcheck_mode = namespace_config.get('mode', 'http') # TODO: Add support for TCP healthcheck using hacheck - Ref. RB: 109478 if healthcheck_mode == 'http' and healthcheck_port: try: url = "http://{host}:{port}{uri}".format(host="127.0.0.1", port=healthcheck_port, uri=healthcheck_uri) requests.get(url).raise_for_status() return True except RequestException as e: print >> sys.stderr, "Calling {0}, got - {1}".format(url, str(e)) return False
def get_service_info(service): service_configuration = read_service_configuration(service) description = service_configuration.get('description', NO_DESCRIPTION_MESSAGE) external_link = service_configuration.get('external_link', NO_EXTERNAL_LINK_MESSAGE) pipeline_url = get_pipeline_url(service) smartstack_endpoints = get_smartstack_endpoints(service) git_url = get_git_url(service) output = [] output.append('Service Name: %s' % service) output.append('Description: %s' % description) output.append('External Link: %s' % PaastaColors.cyan(external_link)) output.append('Monitored By: team %s' % get_team(service=service, overrides={})) output.append('Runbook: %s' % PaastaColors.cyan(get_runbook(service=service, overrides={}))) output.append('Git Repo: %s' % git_url) output.append('Jenkins Pipeline: %s' % pipeline_url) output.append('Deployed to the following clusters:') output.extend(get_deployments_strings(service)) if smartstack_endpoints: output.append('Smartstack endpoint(s):') for endpoint in smartstack_endpoints: output.append(' - %s' % endpoint) output.append('Dashboard(s):') output.extend(get_dashboard_urls(service)) return '\n'.join(output)
def load_adhoc_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) adhoc_conf_file = "adhoc-%s" % cluster log.info("Reading adhoc configuration file: %s.yaml", adhoc_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=adhoc_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, adhoc_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) deploy_group = general_config.get('deploy_group', branch) branch_dict = deployments_json.get_branch_dict_v2(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def get_service_info(service, soa_dir): service_configuration = read_service_configuration(service, soa_dir) description = service_configuration.get("description", NO_DESCRIPTION_MESSAGE) external_link = service_configuration.get("external_link", NO_EXTERNAL_LINK_MESSAGE) smartstack_endpoints = get_smartstack_endpoints(service, soa_dir) git_url = get_git_url(service, soa_dir) output = [] output.append("Service Name: %s" % service) output.append("Description: %s" % description) output.append("External Link: %s" % PaastaColors.cyan(external_link)) output.append("Monitored By: team %s" % get_team(service=service, overrides={}, soa_dir=soa_dir)) output.append("Runbook: %s" % PaastaColors.cyan( get_runbook(service=service, overrides={}, soa_dir=soa_dir))) output.append("Git Repo: %s" % git_url) output.append("Deployed to the following clusters:") output.extend(get_deployments_strings(service, soa_dir)) if smartstack_endpoints: output.append("Smartstack endpoint(s):") for endpoint in smartstack_endpoints: output.append(" - %s" % endpoint) output.append("Dashboard(s):") output.extend(get_dashboard_urls(service)) return "\n".join(output)
def get_service_info(service, soa_dir): service_configuration = read_service_configuration(service, soa_dir) description = service_configuration.get('description', NO_DESCRIPTION_MESSAGE) external_link = service_configuration.get('external_link', NO_EXTERNAL_LINK_MESSAGE) smartstack_endpoints = get_smartstack_endpoints(service, soa_dir) git_url = get_git_url(service, soa_dir) output = [] output.append('Service Name: %s' % service) output.append('Description: %s' % description) output.append('External Link: %s' % PaastaColors.cyan(external_link)) output.append('Monitored By: team %s' % get_team(service=service, overrides={})) output.append( 'Runbook: %s' % PaastaColors.cyan(get_runbook(service=service, overrides={}))) output.append('Git Repo: %s' % git_url) output.append('Deployed to the following clusters:') output.extend(get_deployments_strings(service, soa_dir)) if smartstack_endpoints: output.append('Smartstack endpoint(s):') for endpoint in smartstack_endpoints: output.append(' - %s' % endpoint) output.append('Dashboard(s):') output.extend(get_dashboard_urls(service)) return '\n'.join(output)
def test_read_service_configuration(self, abs_patch, read_patch): expected = 'bye' actual = service_configuration_lib.read_service_configuration( 'boba', soa_dir='tea') abs_patch.assert_called_once_with('tea') read_patch.assert_called_once_with('cafe', 'boba') T.assert_equal(expected, actual)
def load_chronos_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError('No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = get_paasta_branch(cluster=cluster, instance=instance) branch_dict = deployments_json.get_branch_dict(service, branch) general_config = deep_merge_dictionaries(overrides=service_chronos_jobs[instance], defaults=general_config) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def check_local_healthcheck(service_name): """Makes a local HTTP healthcheck call to the service and returns True if it gets a 2XX response, else returns False. :param service_name: a string like 'service_one.main' :return: Whether healthcheck call was successful for a http service. Returns false for a tcp service. :rtype: boolean """ srv_name, namespace = service_name.split('.') srv_config = read_service_configuration(srv_name) smartstack_config = srv_config.get('smartstack', {}) namespace_config = smartstack_config.get(namespace, {}) healthcheck_uri = namespace_config.get('healthcheck_uri', '/status') healthcheck_port = namespace_config.get('healthcheck_port', srv_config.get('port')) healthcheck_mode = namespace_config.get('mode', 'http') # TODO: Add support for TCP healthcheck using hacheck - Ref. RB: 109478 if healthcheck_mode == 'http' and healthcheck_port: try: url = "http://{host}:{port}{uri}".format( host="127.0.0.1", port=healthcheck_port, uri=healthcheck_uri) requests.get(url).raise_for_status() return True except RequestException as e: print >>sys.stderr, "Calling {0}, got - {1}".format(url, str(e)) return False
def _get_merged_config( self, config: utils.InstanceConfigDict) -> utils.InstanceConfigDict: if self._general_config is None: self._general_config = read_service_configuration( service_name=self._service, soa_dir=self._soa_dir) return deep_merge_dictionaries(overrides=config, defaults=self._general_config)
def _should_manage_service(service_name): srv_name, namespace = service_name.split('.') marathon_config = load_service_namespace_config(srv_name, namespace) classic_config = read_service_configuration(srv_name) should_manage = marathon_config.get('proxy_port') is not None blacklisted = classic_config.get('no_updown_service') return (should_manage and not blacklisted)
def load_marathon_service_config_no_cache(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file)) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def _should_manage_service(service_name): srv_name, namespace = service_name.split('.') marathon_config = load_service_namespace_config(srv_name, namespace) classic_config = read_service_configuration(srv_name) # None is a valid value of proxy_port indicating a discovery only service should_manage = marathon_config.get('proxy_port', -1) != -1 blacklisted = classic_config.get('no_updown_service') return (should_manage and not blacklisted)
def _get_merged_config(self, config): if self._general_config is None: self._general_config = read_service_configuration( service_name=self._service, soa_dir=self._soa_dir, ) return deep_merge_dictionaries( overrides=config, defaults=self._general_config, )
def get_git_url(service, soa_dir=DEFAULT_SOA_DIR): """Get the git url for a service. Assumes that the service's repo matches its name, and that it lives in services- i.e. if this is called with the string 'test', the returned url will be [email protected]:services/test.git. :param service: The service name to get a URL for :returns: A git url to the service's repository""" general_config = service_configuration_lib.read_service_configuration(service, soa_dir=soa_dir) default_location = "[email protected]:services/%s.git" % service return general_config.get("git_url", default_location)
def get_service_lines_for_service(service): lines = [] config = service_configuration_lib.read_service_configuration(service) port = config.get('port', None) if port is not None: lines.append("%s (%d/tcp)" % (service, port)) for namespace, config in get_all_namespaces_for_service(service, full_name=False): proxy_port = config.get('proxy_port', None) if proxy_port is not None: lines.append("%s (%d/tcp)" % (compose_job_id(service, namespace), proxy_port)) return lines
def load_monkrelaycluster_instance_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> MonkRelayClusterDeploymentConfig: """Read a service instance's configuration for MonkRelayCluster. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param service: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir) instance_config = load_service_instance_config(service, instance, "monkrelays", cluster, soa_dir=soa_dir) general_config = deep_merge_dictionaries(overrides=instance_config, defaults=general_config) branch_dict: Optional[BranchDictV2] = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = MonkRelayClusterDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return MonkRelayClusterDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def __init__( self, soa_dir: str, service_name: str, cluster_names: List[str], **kwargs: Any, ) -> None: self.soa_dir = soa_dir self.service_name = service_name self.secret_dir = os.path.join(self.soa_dir, self.service_name, "secrets") self.cluster_names = cluster_names service_config = read_service_configuration(self.service_name, self.soa_dir) self.encryption_key = service_config.get('encryption_key', 'paasta')
def get_service_lines_for_service(service): lines = [] config = service_configuration_lib.read_service_configuration(service) port = config.get('port', None) description = config.get('description', "No description") if port is not None: lines.append("%s\t%d/tcp\t# %s" % (service, port, description)) for namespace, config in get_all_namespaces_for_service(service, full_name=False): proxy_port = config.get('proxy_port', None) if proxy_port is not None: lines.append("%s\t%d/tcp\t# %s" % (compose_job_id(service, namespace), proxy_port, description)) return [line.encode('utf-8') for line in lines]
def load_chronos_job_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> 'ChronosJobConfig': general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) if instance.startswith('_'): raise InvalidJobNameError( "Unable to load chronos job config for %s.%s as instance name starts with '_'" % (service, instance), ) service_chronos_jobs = read_chronos_jobs_for_service(service, cluster, soa_dir=soa_dir) if instance not in service_chronos_jobs: raise NoConfigurationForServiceError( 'No job named "%s" in config file chronos-%s.yaml' % (instance, cluster)) branch_dict = None general_config = deep_merge_dictionaries( overrides=service_chronos_jobs[instance], defaults=general_config) if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return ChronosJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def get_git_url(service, soa_dir=DEFAULT_SOA_DIR): """Get the git url for a service. Assumes that the service's repo matches its name, and that it lives in services- i.e. if this is called with the string 'test', the returned url will be [email protected]:services/test.git. :param service: The service name to get a URL for :returns: A git url to the service's repository""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) default_location = '[email protected]:services/%s.git' % service return general_config.get('git_url', default_location)
def load_adhoc_job_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) adhoc_conf_file = "adhoc-%s" % cluster instance_configs = service_configuration_lib.read_extra_service_information( service_name=service, extra_info=adhoc_conf_file, soa_dir=soa_dir, ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, adhoc_conf_file), ) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_marathon_service_config(service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR): """Read a service instance's configuration for marathon. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" log.info("Reading service configuration files from dir %s/ in %s" % (service, soa_dir)) log.info("Reading general configuration file: service.yaml") general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) marathon_conf_file = "marathon-%s" % cluster log.info("Reading marathon configuration file: %s.yaml", marathon_conf_file) instance_configs = service_configuration_lib.read_extra_service_information( service, marathon_conf_file, soa_dir=soa_dir ) if instance not in instance_configs: raise NoConfigurationForServiceError( "%s not found in config file %s/%s/%s.yaml." % (instance, soa_dir, service, marathon_conf_file) ) general_config = deep_merge_dictionaries(overrides=instance_configs[instance], defaults=general_config) branch_dict = {} if load_deployments: deployments_json = load_deployments_json(service, soa_dir=soa_dir) branch = general_config.get('branch', get_paasta_branch(cluster, instance)) branch_dict = deployments_json.get_branch_dict(service, branch) return MarathonServiceConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, )
def get_all_namespaces_for_service(service, soa_dir=DEFAULT_SOA_DIR, full_name=True): """Get all the smartstack namespaces listed for a given service name. :param service: The service name :param soa_dir: The SOA config directory to read from :param full_name: A boolean indicating if the service name should be prepended to the namespace in the returned tuples as described below (Default: True) :returns: A list of tuples of the form (service<SPACER>namespace, namespace_config) if full_name is true, otherwise of the form (namespace, namespace_config) """ service_config = service_configuration_lib.read_service_configuration(service, soa_dir) smartstack = service_config.get('smartstack', {}) namespace_list = [] for namespace in smartstack: if full_name: name = compose_job_id(service, namespace) else: name = namespace namespace_list.append((name, smartstack[namespace])) return namespace_list
def load_vitess_instance_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> VitessDeploymentConfig: general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir) instance_config = load_service_instance_config(service, instance, " vitesscluster", cluster, soa_dir=soa_dir) general_config = deep_merge_dictionaries(overrides=instance_config, defaults=general_config) branch_dict: Optional[BranchDictV2] = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = VitessDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return VitessDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_adhoc_job_config( service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR ): general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) instance_config = load_service_instance_config( service=service, instance=instance, instance_type="adhoc", cluster=cluster, soa_dir=soa_dir, ) general_config = deep_merge_dictionaries( overrides=instance_config, defaults=general_config ) branch_dict = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return AdhocJobConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_service_namespace_config( service: str, namespace: str, soa_dir: str = DEFAULT_SOA_DIR) -> ServiceNamespaceConfig: """Attempt to read the configuration for a service's namespace in a more strict fashion. Retrevies the following keys: - proxy_port: the proxy port defined for the given namespace - healthcheck_mode: the mode for the healthcheck (http or tcp) - healthcheck_port: An alternate port to use for health checking - healthcheck_uri: URI target for healthchecking - healthcheck_timeout_s: healthcheck timeout in seconds - healthcheck_body_expect: an expected string in healthcheck response body - updown_timeout_s: updown_service timeout in seconds - timeout_connect_ms: proxy frontend timeout in milliseconds - timeout_server_ms: proxy server backend timeout in milliseconds - timeout_client_ms: proxy server client timeout in milliseconds - retries: the number of retries on a proxy backend - mode: the mode the service is run in (http or tcp) - routes: a list of tuples of (source, destination) - discover: the scope at which to discover services e.g. 'habitat' - advertise: a list of scopes to advertise services at e.g. ['habitat', 'region'] - extra_advertise: a list of tuples of (source, destination) e.g. [('region:dc6-prod', 'region:useast1-prod')] - extra_healthcheck_headers: a dict of HTTP headers that must be supplied when health checking. E.g. { 'Host': 'example.com' } :param service: The service name :param namespace: The namespace to read :param soa_dir: The SOA config directory to read from :returns: A dict of the above keys, if they were defined """ service_config = service_configuration_lib.read_service_configuration( service_name=service, soa_dir=soa_dir, ) smartstack_config = service_config.get('smartstack', {}) namespace_config_from_file = smartstack_config.get(namespace, {}) service_namespace_config = ServiceNamespaceConfig() # We can't really use .get, as we don't want the key to be in the returned # dict at all if it doesn't exist in the config file. # We also can't just copy the whole dict, as we only care about some keys # and there's other things that appear in the smartstack section in # several cases. key_whitelist = { 'healthcheck_mode', 'healthcheck_uri', 'healthcheck_port', 'healthcheck_timeout_s', 'healthcheck_body_expect', 'updown_timeout_s', 'proxy_port', 'timeout_connect_ms', 'timeout_server_ms', 'timeout_client_ms', 'retries', 'mode', 'discover', 'advertise', 'extra_healthcheck_headers', } for key, value in namespace_config_from_file.items(): if key in key_whitelist: service_namespace_config[key] = value # Other code in paasta_tools checks 'mode' after the config file # is loaded, so this ensures that it is set to the appropriate default # if not otherwise specified, even if appropriate default is None. service_namespace_config['mode'] = service_namespace_config.get_mode() if 'routes' in namespace_config_from_file: service_namespace_config['routes'] = [ (route['source'], dest) for route in namespace_config_from_file['routes'] for dest in route['destinations'] ] if 'extra_advertise' in namespace_config_from_file: service_namespace_config['extra_advertise'] = [ (src, dst) for src in namespace_config_from_file['extra_advertise'] for dst in namespace_config_from_file['extra_advertise'][src] ] return service_namespace_config
def get_pipeline_config(service, soa_dir): service_configuration = read_service_configuration(service, soa_dir) return service_configuration.get('deploy', {}).get('pipeline', [])
def load_kubernetes_service_config_no_cache( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> "KubernetesDeploymentConfig": """Read a service instance's configuration for kubernetes. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param name: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir, ) kubernetes_conf_file = "kubernetes-%s" % cluster instance_configs = service_configuration_lib.read_extra_service_information( service, kubernetes_conf_file, soa_dir=soa_dir, ) if instance.startswith('_'): raise InvalidJobNameError( f"Unable to load kubernetes job config for {service}.{instance} as instance name starts with '_'", ) if instance not in instance_configs: raise NoConfigurationForServiceError( f"{instance} not found in config file {soa_dir}/{service}/{kubernetes_conf_file}.yaml.", ) general_config = deep_merge_dictionaries( overrides=instance_configs[instance], defaults=general_config) branch_dict: Optional[BranchDictV2] = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = KubernetesDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return KubernetesDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, )
def load_service_namespace_config(service, namespace, soa_dir=DEFAULT_SOA_DIR): """Attempt to read the configuration for a service's namespace in a more strict fashion. Retrevies the following keys: - proxy_port: the proxy port defined for the given namespace - healthcheck_mode: the mode for the healthcheck (http or tcp) - healthcheck_port: An alternate port to use for health checking - healthcheck_uri: URI target for healthchecking - healthcheck_timeout_s: healthcheck timeout in seconds - updown_timeout_s: updown_service timeout in seconds - timeout_connect_ms: proxy frontend timeout in milliseconds - timeout_server_ms: proxy server backend timeout in milliseconds - timeout_client_ms: proxy server client timeout in milliseconds - retries: the number of retries on a proxy backend - mode: the mode the service is run in (http or tcp) - routes: a list of tuples of (source, destination) - discover: the scope at which to discover services e.g. 'habitat' - advertise: a list of scopes to advertise services at e.g. ['habitat', 'region'] - extra_advertise: a list of tuples of (source, destination) e.g. [('region:dc6-prod', 'region:useast1-prod')] - extra_healthcheck_headers: a dict of HTTP headers that must be supplied when health checking. E.g. { 'Host': 'example.com' } :param service: The service name :param namespace: The namespace to read :param soa_dir: The SOA config directory to read from :returns: A dict of the above keys, if they were defined """ service_config = service_configuration_lib.read_service_configuration(service, soa_dir) smartstack_config = service_config.get('smartstack', {}) namespace_config_from_file = smartstack_config.get(namespace, {}) service_namespace_config = ServiceNamespaceConfig() # We can't really use .get, as we don't want the key to be in the returned # dict at all if it doesn't exist in the config file. # We also can't just copy the whole dict, as we only care about some keys # and there's other things that appear in the smartstack section in # several cases. key_whitelist = set([ 'healthcheck_mode', 'healthcheck_uri', 'healthcheck_port', 'healthcheck_timeout_s', 'updown_timeout_s', 'proxy_port', 'timeout_connect_ms', 'timeout_server_ms', 'timeout_client_ms', 'retries', 'mode', 'discover', 'advertise', 'extra_healthcheck_headers' ]) for key, value in namespace_config_from_file.items(): if key in key_whitelist: service_namespace_config[key] = value # Other code in paasta_tools checks 'mode' after the config file # is loaded, so this ensures that it is set to the appropriate default # if not otherwise specified, even if appropriate default is None. service_namespace_config['mode'] = service_namespace_config.get_mode() if 'routes' in namespace_config_from_file: service_namespace_config['routes'] = [(route['source'], dest) for route in namespace_config_from_file['routes'] for dest in route['destinations']] if 'extra_advertise' in namespace_config_from_file: service_namespace_config['extra_advertise'] = [ (src, dst) for src in namespace_config_from_file['extra_advertise'] for dst in namespace_config_from_file['extra_advertise'][src] ] return service_namespace_config
def get_pipeline_config(service, soa_dir): service_configuration = read_service_configuration(service, soa_dir) return service_configuration.get("deploy", {}).get("pipeline", [])