def https(): ''' Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean ''' use_https = config_get('use-https') if use_https and bool_from_string(use_https): return True if config_get('ssl_cert') and config_get('ssl_key'): return True for r_id in relation_ids('certificates'): for unit in relation_list(r_id): ca = relation_get('ca', rid=r_id, unit=unit) if ca: return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) if (None not in rel_state) and ('' not in rel_state): return True return False
def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. If config values are not set, default to modulo=3 and wait=30. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value :param operation_name: string Operation name for status message i.e. 'restart' :side effect: Calls config_get() :side effect: Calls log() :side effect: Calls status_set() :side effect: Calls time.sleep() ''' if modulo is None: modulo = config_get('modulo-nodes') or 3 if wait is None: wait = config_get('known-wait') or 30 if juju_is_leader(): # The leader should never wait calculated_wait = 0 else: # non_zero_wait=True guarantees the non-leader who gets modulo 0 # will still wait calculated_wait = modulo_distribution(modulo=modulo, wait=wait, non_zero_wait=True) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) status_set('maintenance', msg) time.sleep(calculated_wait)
def config_proxy(): ports = config_get("proxy_port").split(",") port_ints = convert_ports_config() file_loader = FileSystemLoader("templates") env = Environment(loader=file_loader) fe_part = env.get_template("fe_part.tmpl") with open("/etc/haproxy/haproxy.cfg", "w") as f: cfg = [] pcount = 0 if not port_ints: log("ports not defined, stopping...") block_service("proxy_port undefined, please define this config") return for b in config_get("backend_list").split(","): if pcount >= len(port_ints): log("WARNING: not enough port supplied") block_service("Not enough ports supplied") f.close() return cfg.append( fe_part.render(bind_port=port_ints[pcount], service_name="service_{}".format(pcount), backend=b)) pcount += 1 f.write("\n\n\n".join(cfg)) f.close() service_restart("haproxy")
def config_changed(): status_set("maintenance", "Resetting configs...") if config_get("mode") is "balancer": config_balancer() else: config_proxy() for r_id in relation_ids('ha'): ha_joined(relation_id=r_id) if config_get("mode") is "balancer": status_set("active", "Now balancing...") else: status_set("active", "Now proxying")
def get_cert(): cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: cert = relation_get('ssl_cert', rid=r_id, unit=unit) if not key: key = relation_get('ssl_key', rid=r_id, unit=unit) return (cert, key)
def config_changed(): config_data = config_get() ensure_package_status(service_affecting_packages, config_data['package_status']) old_service_ports = get_service_ports() old_stanzas = get_listen_stanzas() haproxy_globals = create_haproxy_globals() haproxy_defaults = create_haproxy_defaults() if config_data['enable_monitoring'] is True: haproxy_monitoring = create_monitoring_stanza() else: haproxy_monitoring = None remove_services() if not create_services(): sys.exit() haproxy_services = load_services() update_sysctl(config_data) construct_haproxy_config(haproxy_globals, haproxy_defaults, haproxy_monitoring, haproxy_services) if service_haproxy("check"): update_service_ports(old_service_ports, get_service_ports()) service_haproxy("reload") if not (get_listen_stanzas() == old_stanzas): notify_website() notify_peer() else: # XXX Ideally the config should be restored to a working state if the # check fails, otherwise an inadvertent reload will cause the service # to be broken. log("HAProxy configuration check failed, exiting.") sys.exit(1)
def create_monitoring_stanza(service_name="haproxy_monitoring"): config_data = config_get() if config_data['enable_monitoring'] is False: return None monitoring_password = get_monitoring_password() if config_data['monitoring_password'] != "changeme": monitoring_password = config_data['monitoring_password'] elif (monitoring_password is None and config_data['monitoring_password'] == "changeme"): monitoring_password = pwgen(length=20) monitoring_config = [] monitoring_config.append("mode http") monitoring_config.append("acl allowed_cidr src %s" % config_data['monitoring_allowed_cidr']) monitoring_config.append("block unless allowed_cidr") monitoring_config.append("stats enable") monitoring_config.append("stats uri /") monitoring_config.append("stats realm Haproxy\ Statistics") monitoring_config.append("stats auth %s:%s" % (config_data['monitoring_username'], monitoring_password)) monitoring_config.append("stats refresh %d" % config_data['monitoring_stats_refresh']) return create_listen_stanza(service_name, "0.0.0.0", config_data['monitoring_port'], monitoring_config)
def get_innodb_flush_log_at_trx_commit(self): """Get value for innodb_flush_log_at_trx_commit. Use the innodb-flush-log-at-trx-commit or the tunning-level setting translated by INNODB_FLUSH_CONFIG_VALUES to get the innodb_flush_log_at_trx_commit value. :returns: Numeric value for innodb_flush_log_at_trx_commit :rtype: Union[None, int] """ _iflatc = config_get('innodb-flush-log-at-trx-commit') _tuning_level = config_get('tuning-level') if _iflatc: return _iflatc elif _tuning_level: return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1)
def create_monitoring_stanza(service_name="haproxy_monitoring"): config_data = config_get() if config_data['enable_monitoring'] is False: return None monitoring_password = get_monitoring_password() if config_data['monitoring_password'] != "changeme": monitoring_password = config_data['monitoring_password'] elif (monitoring_password is None and config_data['monitoring_password'] == "changeme"): monitoring_password = pwgen(length=20) monitoring_config = [] monitoring_config.append("mode http") monitoring_config.append("acl allowed_cidr src %s" % config_data['monitoring_allowed_cidr']) monitoring_config.append("block unless allowed_cidr") monitoring_config.append("stats enable") monitoring_config.append("stats uri /") monitoring_config.append("stats realm Haproxy\ Statistics") monitoring_config.append( "stats auth %s:%s" % (config_data['monitoring_username'], monitoring_password)) monitoring_config.append("stats refresh %d" % config_data['monitoring_stats_refresh']) return create_listen_stanza(service_name, "0.0.0.0", config_data['monitoring_port'], monitoring_config)
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) log("Option 'dataset-size' has been deprecated, instead by default %d%% of system \ available RAM will be used for innodb_buffer_pool_size allocation" % (self.DEFAULT_INNODB_BUFFER_FACTOR * 100), level="WARN") innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') else: innodb_buffer_pool_size = int( total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) log("Option 'dataset-size' has been deprecated, instead by default %d%% of system \ available RAM will be used for innodb_buffer_pool_size allocation" % (self.DEFAULT_INNODB_BUFFER_FACTOR * 100), level="WARN") innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') else: innodb_buffer_pool_size = int( total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip, os-internal-hostname, os-admin-hostname, os-public-hostname, os-access-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing or incorrect. ''' settings = [ 'ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname' ] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: continue conf[setting] = config_get(setting) if not valid_hacluster_config(): raise HAIncorrectConfig('Insufficient or incorrect config data to ' 'configure hacluster.') return conf
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if self.get_innodb_flush_log_at_trx_commit() is not None: mysql_config['innodb_flush_log_at_trx_commit'] = \ self.get_innodb_flush_log_at_trx_commit() if self.get_innodb_change_buffering() is not None: mysql_config['innodb_change_buffering'] = config[ 'innodb-change-buffering'] if 'innodb-io-capacity' in config: mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') mysql_config[ 'innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size() return mysql_config
def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip, os-internal-hostname, os-admin-hostname, os-public-hostname, os-access-hostname param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing or incorrect. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: continue conf[setting] = config_get(setting) if not valid_hacluster_config(): raise HAIncorrectConfig('Insufficient or incorrect config data to ' 'configure hacluster.') return conf
def install_hook(): if not os.path.exists(default_haproxy_service_config_dir): os.mkdir(default_haproxy_service_config_dir, 0600) apt_install('haproxy', fatal=True) ensure_package_status(service_affecting_packages, config_get('package_status')) enable_haproxy()
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ config['innodb-flush-log-at-trx-commit'] elif 'tuning-level' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ self.INNODB_FLUSH_CONFIG_VALUES.get(config['tuning-level'], 1) if ('innodb-change-buffering' in config and config['innodb-change-buffering'] in self.INNODB_VALID_BUFFERING_VALUES): mysql_config['innodb_change_buffering'] = config[ 'innodb-change-buffering'] if 'innodb-io-capacity' in config: mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log( "Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes(dataset_bytes) else: # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min( int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), self.DEFAULT_INNODB_BUFFER_SIZE_MAX) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}" .format(innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def install_hook(): # Run both during initial install and during upgrade-charm. if not os.path.exists(default_haproxy_service_config_dir): os.mkdir(default_haproxy_service_config_dir, 0600) apt_install(['haproxy', 'python-jinja2'], fatal=True) ensure_package_status(service_affecting_packages, config_get('package_status')) enable_haproxy()
def get_ca_cert(): ca_cert = config_get("ssl_ca") if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) for r_id in relation_ids("identity-service"): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get("ca_cert", rid=r_id, unit=unit) return ca_cert
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ config['innodb-flush-log-at-trx-commit'] elif 'tuning-level' in config: mysql_config['innodb_flush_log_at_trx_commit'] = \ self.INNODB_FLUSH_CONFIG_VALUES.get(config['tuning-level'], 1) if ('innodb-change-buffering' in config and config['innodb-change-buffering'] in self.INNODB_VALID_BUFFERING_VALUES): mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] if 'innodb-io-capacity' in config: mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log("Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes( dataset_bytes) else: # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min( int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), self.DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def get_ca_cert(): ca_cert = config_get('ssl_ca') if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', rid=r_id, unit=unit) return ca_cert
def valid_hacluster_config(): ''' Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname must be set. Note: ha-bindiface and ha-macastport both have defaults and will always be set. We only care that either vip or dns-ha is set. :returns: boolean: valid config returns true. raises: HAIncompatibileConfig if settings conflict. raises: HAIncompleteConfig if settings are missing. ''' vip = config_get('vip') dns = config_get('dns-ha') if not (bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) raise HAIncorrectConfig(msg) # If dns-ha then one of os-*-hostname must be set if dns: dns_settings = [ 'os-internal-hostname', 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname' ] # At this point it is unknown if one or all of the possible # network spaces are in HA. Validate at least one is set which is # the minimum required. for setting in dns_settings: if config_get(setting): log('DNS HA: At least one hostname is set {}: {}' ''.format(setting, config_get(setting)), level=DEBUG) return True msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' 'DNS HA') status_set('blocked', msg) raise HAIncompleteConfig(msg) log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) return True
def normalize_address(self, hostname): """Ensure that address returned is an IP address (i.e. not fqdn)""" if config_get('prefer-ipv6'): # TODO: add support for ipv6 dns return hostname if hostname != unit_get('private-address'): return get_host_ip(hostname, fallback=hostname) # Otherwise assume localhost return '127.0.0.1'
def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config cert = config_get("ssl_cert") key = config_get("ssl_key") if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None if cn: ssl_cert_attr = "ssl_cert_{}".format(cn) ssl_key_attr = "ssl_key_{}".format(cn) else: ssl_cert_attr = "ssl_cert" ssl_key_attr = "ssl_key" for r_id in relation_ids("identity-service"): for unit in relation_list(r_id): if not cert: cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key)
def valid_hacluster_config(): ''' Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname must be set. Note: ha-bindiface and ha-macastport both have defaults and will always be set. We only care that either vip or dns-ha is set. :returns: boolean: valid config returns true. raises: HAIncompatibileConfig if settings conflict. raises: HAIncompleteConfig if settings are missing. ''' vip = config_get('vip') dns = config_get('dns-ha') if not(bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) raise HAIncorrectConfig(msg) # If dns-ha then one of os-*-hostname must be set if dns: dns_settings = ['os-internal-hostname', 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] # At this point it is unknown if one or all of the possible # network spaces are in HA. Validate at least one is set which is # the minimum required. for setting in dns_settings: if config_get(setting): log('DNS HA: At least one hostname is set {}: {}' ''.format(setting, config_get(setting)), level=DEBUG) return True msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' 'DNS HA') status_set('blocked', msg) raise HAIncompleteConfig(msg) log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) return True
def convert_ports_config(): ports = config_get("proxy_port").split(",") port_ints = [] for p in ports: if ":" in p: port_ints.extend( range(int(p.split(":")[0]), int(p.split(":")[-1]) + 1)) else: port_ints.append(int(p)) port_ints.sort() return port_ints
def get_innodb_change_buffering(self): """Get value for innodb_change_buffering. Use the innodb-change-buffering validated against INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value. :returns: String value for innodb_change_buffering. :rtype: Union[None, str] """ _icb = config_get('innodb-change-buffering') if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES: return _icb
def get_ca_cert(): ca_cert = config_get('ssl_ca') if ca_cert is None: log("Inspecting identity-service relations for CA SSL certificate.", level=INFO) for r_id in (relation_ids('identity-service') + relation_ids('identity-credentials')): for unit in relation_list(r_id): if ca_cert is None: ca_cert = relation_get('ca_cert', rid=r_id, unit=unit) return ca_cert
def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None if cn: ssl_cert_attr = 'ssl_cert_{}'.format(cn) ssl_key_attr = 'ssl_key_{}'.format(cn) else: ssl_cert_attr = 'ssl_cert' ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key)
def get_innodb_buffer_pool_size(self): """Get value for innodb_buffer_pool_size. Return the number value of innodb-buffer-pool-size or dataset-size. If neither is set, calculate a sane default based on total memory. :returns: Numeric value for innodb_buffer_pool_size. :rtype: int """ total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config_get('dataset-size') innodb_buffer_pool_size = config_get('innodb-buffer-pool-size') if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log("Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes( dataset_bytes) else: # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB # to ensure that deployments in containers # without constraints don't try to consume # silly amounts of memory. innodb_buffer_pool_size = min( int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), self.DEFAULT_INNODB_BUFFER_SIZE_MAX ) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') return innodb_buffer_pool_size
def https(): ''' Determines whether enough data has been provided in configuration or relation data to configure HTTPS . returns: boolean ''' if config_get('use-https') == "yes": return True if config_get('ssl_cert') and config_get('ssl_key'): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), relation_get('ssl_cert', rid=r_id, unit=unit), relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) if (None not in rel_state) and ('' not in rel_state): return True return False
def create_haproxy_defaults(): config_data = config_get() default_options = comma_split(config_data['default_options']) default_timeouts = comma_split(config_data['default_timeouts']) haproxy_defaults = [] haproxy_defaults.append("defaults") haproxy_defaults.append(" log %s" % config_data['default_log']) haproxy_defaults.append(" mode %s" % config_data['default_mode']) for option_item in default_options: haproxy_defaults.append(" option %s" % option_item.strip()) haproxy_defaults.append(" retries %d" % config_data['default_retries']) for timeout_item in default_timeouts: haproxy_defaults.append(" timeout %s" % timeout_item.strip()) return '\n'.join(haproxy_defaults)
def distributed_wait(modulo=None, wait=None, operation_name='operation'): ''' Distribute operations by waiting based on modulo_distribution If modulo and or wait are not set, check config_get for those values. :param modulo: int The modulo number creates the group distribution :param wait: int The constant time wait value :param operation_name: string Operation name for status message i.e. 'restart' :side effect: Calls config_get() :side effect: Calls log() :side effect: Calls status_set() :side effect: Calls time.sleep() ''' if modulo is None: modulo = config_get('modulo-nodes') if wait is None: wait = config_get('known-wait') calculated_wait = modulo_distribution(modulo=modulo, wait=wait) msg = "Waiting {} seconds for {} ...".format(calculated_wait, operation_name) log(msg, DEBUG) status_set('maintenance', msg) time.sleep(calculated_wait)
def create_haproxy_globals(): config_data = config_get() global_log = comma_split(config_data['global_log']) haproxy_globals = [] haproxy_globals.append('global') for global_log_item in global_log: haproxy_globals.append(" log %s" % global_log_item.strip()) haproxy_globals.append(" maxconn %d" % config_data['global_maxconn']) haproxy_globals.append(" user %s" % config_data['global_user']) haproxy_globals.append(" group %s" % config_data['global_group']) if config_data['global_debug'] is True: haproxy_globals.append(" debug") if config_data['global_quiet'] is True: haproxy_globals.append(" quiet") haproxy_globals.append(" spread-checks %d" % config_data['global_spread_checks']) return '\n'.join(haproxy_globals)
def write_metrics_cronjob(script_path, cron_path): config_data = config_get() if config_data['enable_monitoring'] is False: log("enable_monitoring must be set to true for metrics") delete_metrics_cronjob(cron_path) return # need the following two configs to be valid metrics_target = config_data['metrics_target'].strip() metrics_sample_interval = config_data['metrics_sample_interval'] if (not metrics_target or ':' not in metrics_target or not metrics_sample_interval): log("Required config not found or invalid " "(metrics_target, metrics_sample_interval), " "disabling metrics") delete_metrics_cronjob(cron_path) return charm_dir = os.environ['CHARM_DIR'] statsd_host, statsd_port = metrics_target.split(':', 1) metrics_prefix = config_data['metrics_prefix'].strip() metrics_prefix = metrics_prefix.replace( "$UNIT", local_unit().replace('.', '-').replace('/', '-')) haproxy_hostport = ":".join(['localhost', str(config_data['monitoring_port'])]) haproxy_httpauth = ":".join([config_data['monitoring_username'].strip(), get_monitoring_password()]) # ensure script installed shutil.copy2('%s/files/metrics/haproxy_to_statsd.sh' % charm_dir, metrics_script_path) # write the crontab with open(cron_path, 'w') as cronjob: cronjob.write(render_template("metrics_cronjob.template", { 'interval': config_data['metrics_sample_interval'], 'script': script_path, 'metrics_prefix': metrics_prefix, 'metrics_sample_interval': metrics_sample_interval, 'haproxy_hostport': haproxy_hostport, 'haproxy_httpauth': haproxy_httpauth, 'statsd_host': statsd_host, 'statsd_port': statsd_port, }))
def canonical_url(configs, vip_setting='vip'): ''' Returns the correct HTTP URL to this host given the state of HTTPS configuration and hacluster. :configs : OSTemplateRenderer: A config tempating object to inspect for a complete https context. :vip_setting: str: Setting in charm config that specifies VIP address. ''' scheme = 'http' if 'https' in configs.complete_contexts(): scheme = 'https' if is_clustered(): addr = config_get(vip_setting) else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr)
def get_hacluster_config(): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] conf = {} for setting in settings: conf[setting] = config_get(setting) missing = [] [missing.append(s) for s, v in conf.iteritems() if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig return conf
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = config[ 'innodb-flush-log-at-trx-commit'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log( "Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes(dataset_bytes) else: innodb_buffer_pool_size = int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}" .format(innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def ensure_service_host_port(services): config_data = config_get() seen = [] missing = [] for service, options in sorted(services.iteritems()): if not "service_host" in options: missing.append(options) continue if not "service_port" in options: missing.append(options) continue seen.append((options["service_host"], int(options["service_port"]))) seen.sort() last_port = seen and seen[-1][1] or int(config_data["monitoring_port"]) for options in missing: last_port += 2 options["service_host"] = "0.0.0.0" options["service_port"] = last_port return services
def config_changed(): config_data = config_get() ensure_package_status(service_affecting_packages, config_data['package_status']) old_service_ports = get_service_ports() old_stanzas = get_listen_stanzas() haproxy_globals = create_haproxy_globals() haproxy_defaults = create_haproxy_defaults() if config_data['enable_monitoring'] is True: haproxy_monitoring = create_monitoring_stanza() else: haproxy_monitoring = None remove_services() if not create_services(): sys.exit() haproxy_services = load_services() update_sysctl(config_data) construct_haproxy_config(haproxy_globals, haproxy_defaults, haproxy_monitoring, haproxy_services) write_metrics_cronjob(metrics_script_path, metrics_cronjob_path) if service_haproxy("check"): update_service_ports(old_service_ports, get_service_ports()) service_haproxy("reload") if not (get_listen_stanzas() == old_stanzas): notify_website() notify_peer() else: # XXX Ideally the config should be restored to a working state if the # check fails, otherwise an inadvertent reload will cause the service # to be broken. log("HAProxy configuration check failed, exiting.") sys.exit(1)
def parse_config(self): """Parse charm configuration and calculate values for config files.""" config = config_get() mysql_config = {} if 'max-connections' in config: mysql_config['max_connections'] = config['max-connections'] if 'wait-timeout' in config: mysql_config['wait_timeout'] = config['wait-timeout'] if 'innodb-flush-log-at-trx-commit' in config: mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit'] # Set a sane default key_buffer size mysql_config['key_buffer'] = self.human_to_bytes('32M') total_memory = self.human_to_bytes(self.get_mem_total()) dataset_bytes = config.get('dataset-size', None) innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) if innodb_buffer_pool_size: innodb_buffer_pool_size = self.human_to_bytes( innodb_buffer_pool_size) elif dataset_bytes: log("Option 'dataset-size' has been deprecated, please use" "innodb_buffer_pool_size option instead", level="WARN") innodb_buffer_pool_size = self.human_to_bytes( dataset_bytes) else: innodb_buffer_pool_size = int( total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) if innodb_buffer_pool_size > total_memory: log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( innodb_buffer_pool_size, total_memory), level='WARN') mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size return mysql_config
def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip'] conf = {} for setting in settings: if exclude_keys and setting in exclude_keys: continue conf[setting] = config_get(setting) missing = [] [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig return conf
def get_config_services(): config_data = config_get() services = {} return parse_services_yaml(services, config_data['services'])