def get_mysql_password(self, username=None, password=None): """Retrieve, generate or store a mysql password for the provided username using peer relation cluster.""" excludes = [] # First check peer relation. try: for key in self.passwd_keys(username): _password = peer_retrieve(key) if _password: break # If root password available don't update peer relation from local if _password and not username: excludes.append(self.root_passwd_file_template) except ValueError: # cluster relation is not yet started; use on-disk _password = None # If none available, generate new one if not _password: _password = self.get_mysql_password_on_disk(username, password) # Put on wire if required if self.migrate_passwd_to_peer_relation: self.migrate_passwords_to_peer_relation(excludes=excludes) return _password
def cluster_changed(relation_id=None, remote_unit=None): # Future travelers beware ordering is significant rdata = relation_get(rid=relation_id, unit=remote_unit) # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) cookie = peer_retrieve('cookie') if not cookie: log('cluster_changed: cookie not yet set.', level=INFO) return if rdata: hostname = rdata.get('hostname', None) private_address = rdata.get('private-address', None) if hostname and private_address: rabbit.update_hosts_file({private_address: hostname}) # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return # NOTE(freyes): all the nodes need to marked as 'clustered' (LP: #1691510) rabbit.cluster_with() if not is_leader(): update_nrpe_checks()
def leader_node(): ''' Provide the leader node for clustering ''' # Each rabbitmq node should join_cluster with the leader # to avoid split-brain clusters. leader_node_ip = peer_retrieve('leader_node_ip') if leader_node_ip: return "rabbit@" + get_node_hostname(leader_node_ip)
def cluster_changed(): CONFIGS.write_all() if hookenv.relation_ids('cluster'): ch_peerstorage.peer_echo(includes=['dbsync_state']) dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state') if dbsync_state == 'complete': if not ch_utils.is_unit_paused_set(): for svc in ncc_utils.services(): ch_host.service_resume(svc) else: hookenv.log('Unit is in paused state, not issuing ' 'start/resume to all services') else: if not ch_utils.is_unit_paused_set(): hookenv.log('Database sync not ready. Shutting down services') for svc in ncc_utils.services(): ch_host.service_pause(svc) else: hookenv.log( 'Database sync not ready. Would shut down services but ' 'unit is in paused state, not issuing stop/pause to all ' 'services') # The shared metadata secret is stored in the leader-db and if its changed # the gateway needs to know. for rid in hookenv.relation_ids('quantum-network-service'): quantum_joined(rid=rid, remote_restart=False)
def cluster_changed(): shared_secret = peer_retrieve('shared_secret') if shared_secret is None or shared_secret.strip() == '': log('waiting for shared secret to be provided by leader') elif not shared_secret == get_shared_secret(): set_shared_secret(shared_secret) CONFIGS.write_all()
def is_db_initialised(): if relation_ids('cluster'): dbsync_state = peer_retrieve('dbsync_state') if dbsync_state == 'complete': log("Database is initialised", level=DEBUG) return True log("Database is NOT initialised", level=DEBUG) return False
def leader_node(): ''' Provide the leader node for clustering @returns leader node's hostname or None ''' # Each rabbitmq node should join_cluster with the leader # to avoid split-brain clusters. leader_node_hostname = peer_retrieve('leader_node_hostname') if leader_node_hostname: return "rabbit@" + leader_node_hostname else: return None
def cluster_changed(): CONFIGS.write_all() if relation_ids('cluster'): peer_echo(includes=['dbsync_state']) dbsync_state = peer_retrieve('dbsync_state') if dbsync_state == 'complete': enable_services() cmd_all_services('start') else: log('Database sync not ready. Shutting down services') disable_services() cmd_all_services('stop')
def cluster_changed(relation_id=None, remote_unit=None): # Future travelers beware ordering is significant rdata = relation_get(rid=relation_id, unit=remote_unit) # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) cookie = peer_retrieve('cookie') if not cookie: log('cluster_changed: cookie not yet set.', level=INFO) return if rdata: hostname = rdata.get('hostname', None) private_address = rdata.get('private-address', None) if hostname and private_address: rabbit.update_hosts_file({private_address: hostname}) # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log( 'hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return if rabbit.is_sufficient_peers(): # NOTE(freyes): all the nodes need to marked as 'clustered' # (LP: #1691510) rabbit.cluster_with() # Local rabbit maybe clustered now so check and inform clients if # needed. update_clients() if is_leader(): if (leader_get(rabbit.CLUSTER_MODE_KEY) != config( rabbit.CLUSTER_MODE_KEY)): log("Informing peers via leaderdb to change {} to {}".format( rabbit.CLUSTER_MODE_KEY, config(rabbit.CLUSTER_MODE_KEY))) leader_set( {rabbit.CLUSTER_MODE_KEY: config(rabbit.CLUSTER_MODE_KEY)}) rabbit.ConfigRenderer(rabbit.CONFIG_FILES).write_all() if not is_leader() and is_relation_made('nrpe-external-master'): update_nrpe_checks()
def cluster_changed(): # Future travelers beware ordering is significant rdata = relation_get() # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) cookie = peer_retrieve('cookie') if not cookie: log('cluster_joined: cookie not yet set.', level=INFO) return rdata = relation_get() if rdata: hostname = rdata.get('hostname', None) private_address = rdata.get('private-address', None) if hostname and private_address: rabbit.update_hosts_file({private_address: hostname}) if not is_sufficient_peers(): log('Not enough peers, waiting until leader is configured', level=INFO) return # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return # cluster with node? try: if not is_leader(): rabbit.cluster_with() update_nrpe_checks() except NotImplementedError: if is_newer(): rabbit.cluster_with() update_nrpe_checks() # If cluster has changed peer db may have changed so run amqp_changed # to sync any changes for rid in relation_ids('amqp'): for unit in related_units(rid): amqp_changed(relation_id=rid, remote_unit=unit)
def update_cookie(): # sync cookie cookie = peer_retrieve('cookie') cookie_local = None with open(rabbit.COOKIE_PATH, 'r') as f: cookie_local = f.read().strip() if cookie_local == cookie: log('Cookie already synchronized with peer.') return log('Synchronizing erlang cookie from peer.', level=INFO) service_stop('rabbitmq-server') with open(rabbit.COOKIE_PATH, 'wb') as out: out.write(cookie) service_restart('rabbitmq-server')
def leader_node(): ''' Provide the leader node for clustering @returns leader node's hostname or None ''' # Each rabbitmq node should join_cluster with the leader # to avoid split-brain clusters. try: leader_node_hostname = peer_retrieve('leader_node_hostname') except ValueError: # This is a single unit return None if leader_node_hostname: return "rabbit@" + leader_node_hostname else: return None
def cluster_changed(): cookie = peer_retrieve('cookie') if not cookie: log('cluster_joined: cookie not yet set.', level=INFO) return rdata = relation_get() if config('prefer-ipv6') and rdata.get('hostname'): private_address = rdata['private-address'] hostname = rdata['hostname'] if hostname: rabbit.update_hosts_file({private_address: hostname}) # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) if not is_sufficient_peers(): # Stop rabbit until leader has finished configuring service_stop('rabbitmq-server') return # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return # cluster with node? try: if not is_leader(): rabbit.cluster_with() update_nrpe_checks() except NotImplementedError: if is_newer(): rabbit.cluster_with() update_nrpe_checks() # If cluster has changed peer db may have changed so run amqp_changed # to sync any changes for rid in relation_ids('amqp'): for unit in related_units(rid): amqp_changed(relation_id=rid, remote_unit=unit)
def get_rabbit_password(username, password=None, local=False): ''' Retrieve, generate or store a rabbit password for the provided username using peer relation cluster''' if local: return get_rabbit_password_on_disk(username, password, local) else: migrate_passwords_to_peer_relation() _key = '{}.passwd'.format(username) try: _password = peer_retrieve(_key) if _password is None: _password = password or pwgen(length=64) peer_store(_key, _password) except ValueError: # cluster relation is not yet started, use on-disk _password = get_rabbit_password_on_disk(username, password) return _password
def update_cookie(leaders_cookie=None): # sync cookie if leaders_cookie: cookie = leaders_cookie else: cookie = peer_retrieve('cookie') cookie_local = None with open(rabbit.COOKIE_PATH, 'r') as f: cookie_local = f.read().strip() if cookie_local == cookie: log('Cookie already synchronized with peer.') return service_stop('rabbitmq-server') with open(rabbit.COOKIE_PATH, 'wb') as out: out.write(cookie) service_restart('rabbitmq-server') rabbit.wait_app()
def update_cookie(leaders_cookie=None): # sync cookie if leaders_cookie: cookie = leaders_cookie else: cookie = peer_retrieve('cookie') cookie_local = None with open(rabbit.COOKIE_PATH, 'r') as f: cookie_local = f.read().strip() if cookie_local == cookie: log('Cookie already synchronized with peer.') return service_stop('rabbitmq-server') with open(rabbit.COOKIE_PATH, 'wb') as out: out.write(cookie) if not is_unit_paused_set(): service_restart('rabbitmq-server') rabbit.wait_app()
def update_cookie(leaders_cookie=None): # sync cookie if leaders_cookie: cookie = leaders_cookie else: cookie = peer_retrieve('cookie') cookie_local = None with open(rabbit.COOKIE_PATH, 'r') as f: cookie_local = f.read().strip() if cookie_local == cookie: log('Cookie already synchronized with peer.') return elif not is_restart_permitted(): raise Exception("rabbitmq-server must be restarted but not permitted") service_stop('rabbitmq-server') with open(rabbit.COOKIE_PATH, 'wb') as out: out.write(cookie.encode('ascii')) if not is_unit_paused_set(): service_restart('rabbitmq-server') rabbit.wait_app()
def cluster_changed(relation_id=None, remote_unit=None): # Future travelers beware ordering is significant rdata = relation_get(rid=relation_id, unit=remote_unit) # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) cookie = peer_retrieve('cookie') if not cookie: log('cluster_changed: cookie not yet set.', level=INFO) return if rdata: hostname = rdata.get('hostname', None) private_address = rdata.get('private-address', None) if hostname and private_address: rabbit.update_hosts_file({private_address: hostname}) # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log('hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return if rabbit.is_sufficient_peers(): # NOTE(freyes): all the nodes need to marked as 'clustered' (LP: #1691510) rabbit.cluster_with() if not is_leader() and is_relation_made('nrpe-external-master'): update_nrpe_checks()
def test_peer_retrieve_with_relation(self): self.relation_ids.return_value = FAKE_RELATION_IDS peerstorage.peer_retrieve('key', self.fake_relation_name) self.relation_get.assert_called_with(attribute='key', rid=FAKE_RELATION_IDS[0], unit=FAKE_LOCAL_UNIT)