def upgrade(): if is_leader(): if is_unit_paused_set(): log('Unit is paused, skiping upgrade', level=INFO) return # broadcast the bootstrap-uuid wsrep_ready = get_wsrep_value('wsrep_ready') or "" if wsrep_ready.lower() in ['on', 'ready']: cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid') if cluster_state_uuid: mark_seeded() notify_bootstrapped(cluster_uuid=cluster_state_uuid) else: # Ensure all the peers have the bootstrap-uuid attribute set # as this is all happening during the upgrade-charm hook is reasonable # to expect the cluster is running. # Wait until the leader has set the try: update_bootstrap_uuid() except LeaderNoBootstrapUUIDError: status_set('waiting', "Waiting for bootstrap-uuid set by leader") config_changed()
def bootstrap_pxc(args): """ Force a bootstrap on this node This action will run bootstrap-pxc on this node bootstrapping the cluster. This action should only be run after a cold start requiring a bootstrap. This action should only be run on the node with the highest sequence number as displayed in workgoup status and found in grastate.dat. If this unit has the highest sequence number and is not the juju leader node, a subsequent action run of notify-bootstrapped is required. """ try: # Force safe to bootstrap percona_utils.set_grastate_safe_to_bootstrap() # Boostrap this node percona_utils.bootstrap_pxc() percona_utils.notify_bootstrapped() except (percona_utils.GRAStateFileNotFound, OSError) as e: action_set({'output': e.output, 'return-code': e.returncode}) action_fail("The GRAState file does not exist or cannot " "be written to.") except (subprocess.CalledProcessError, Exception) as e: action_set({ 'output': e.output, 'return-code': e.returncode, 'traceback': traceback.format_exc() }) action_fail("The bootstrap-pxc failed. " "See traceback in show-action-output") action_set({ 'output': "Bootstrap succeeded. " "Wait for the other units to run update-status" }) percona_utils.assess_status(percona_utils.register_configs())
def cluster_joined(): relation_settings = {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_settings = { 'private-address': addr, 'hostname': socket.gethostname() } cluster_network = config('cluster-network') if cluster_network: cluster_addr = get_address_in_network(cluster_network, fatal=True) relation_settings['cluster-address'] = cluster_addr else: try: cluster_addr = network_get_primary_address('cluster') relation_settings['cluster-address'] = cluster_addr except NotImplementedError: # NOTE(jamespage): skip - fallback to previous behaviour pass log("Setting cluster relation: '%s'" % (relation_settings), level=INFO) relation_set(relation_settings=relation_settings) # Ensure all new peers are aware cluster_state_uuid = relation_get('bootstrap-uuid', unit=local_unit()) if cluster_state_uuid: notify_bootstrapped(cluster_rid=relation_id(), cluster_uuid=cluster_state_uuid)
def notify_bootstrapped(args): """Notify the cluster of the new bootstrap cluster UUID. As a consequence of timing, this action will often need to be executed after the bootstrap-pxc action. It will need to be run on a different unit than was bootstrap-pxc was executed on. """ percona_utils.notify_bootstrapped()
def render_config_restart_on_changed(hosts, bootstrap=False): """Render mysql config and restart mysql service if file changes as a result. If bootstrap is True we do a bootstrap-pxc in order to bootstrap the percona cluster. This should only be performed once at cluster creation time. If percona is already bootstrapped we can get away with just ensuring that it is started so long as the new node to be added is guaranteed to have been restarted so as to apply the new config. """ config_file = resolve_cnf_file() pre_hash = file_hash(config_file) render_config(hosts) create_binlogs_directory() update_db_rels = False if file_hash(config_file) != pre_hash or bootstrap: if bootstrap: bootstrap_pxc() # NOTE(dosaboy): this will not actually do anything if no cluster # relation id exists yet. notify_bootstrapped() update_db_rels = True else: # NOTE(jamespage): # if mysql@bootstrap is running, then the native # bootstrap systemd service was used to start this # instance, and it was the initial seed unit # stop the bootstap version before restarting normal mysqld if service_running('mysql@bootstrap'): service_stop('mysql@bootstrap') attempts = 0 max_retries = 5 cluster_wait() while not service_restart('mysql'): if attempts == max_retries: raise Exception("Failed to start mysql (max retries " "reached)") log("Failed to start mysql - retrying per distributed wait", WARNING) attempts += 1 cluster_wait() # If we get here we assume prior actions have succeeded to always # this unit is marked as seeded so that subsequent calls don't result # in a restart. mark_seeded() if update_db_rels: update_client_db_relations() else: log("Config file '{}' unchanged".format(config_file), level=DEBUG)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False): """Render mysql config and restart mysql service if file changes as a result. If bootstrap is True we do a bootstrap-pxc in order to bootstrap the percona cluster. This should only be performed once at cluster creation time. If percona is already bootstrapped we can get away with just ensuring that it is started so long as the new node to be added is guaranteed to have been restarted so as to apply the new config. """ config_file = resolve_cnf_file() pre_hash = file_hash(config_file) render_config(clustered, hosts) create_binlogs_directory() update_db_rels = False if file_hash(config_file) != pre_hash or bootstrap: if bootstrap: bootstrap_pxc() # NOTE(dosaboy): this will not actually do anything if no cluster # relation id exists yet. notify_bootstrapped() update_db_rels = True else: delay = 1 attempts = 0 max_retries = 5 # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started # it needn't be restarted when new units join the cluster since the # new units will join and apply their own config. if not seeded(): action = service_restart else: action = service_start while not action('mysql'): if attempts == max_retries: raise Exception("Failed to start mysql (max retries " "reached)") log("Failed to start mysql - retrying in %ss" % (delay), WARNING) time.sleep(delay) delay += 2 attempts += 1 # If we get here we assume prior actions have succeeded to always # this unit is marked as seeded so that subsequent calls don't result # in a restart. mark_seeded() if update_db_rels: update_shared_db_rels() else: log("Config file '{}' unchanged".format(config_file), level=DEBUG)
def render_config_restart_on_changed(clustered, hosts, bootstrap=False): """Render mysql config and restart mysql service if file changes as a result. If bootstrap is True we do a bootstrap-pxc in order to bootstrap the percona cluster. This should only be performed once at cluster creation time. If percona is already bootstrapped we can get away with just ensuring that it is started so long as the new node to be added is guaranteed to have been restarted so as to apply the new config. """ pre_hash = file_hash(resolve_cnf_file()) render_config(clustered, hosts) create_binlogs_directory() update_db_rels = False if file_hash(resolve_cnf_file()) != pre_hash or bootstrap: if bootstrap: service('stop', 'mysql') service('bootstrap-pxc', 'mysql') # NOTE(dosaboy): this will not actually do anything if no cluster # relation id exists yet. notify_bootstrapped() update_db_rels = True else: delay = 1 attempts = 0 max_retries = 5 # NOTE(dosaboy): avoid unnecessary restarts. Once mysql is started # it needn't be restarted when new units join the cluster since the # new units will join and apply their own config. if not seeded(): action = service_restart else: action = service_start while not action('mysql'): if attempts == max_retries: raise Exception("Failed to start mysql (max retries " "reached)") log("Failed to start mysql - retrying in %ss" % (delay), WARNING) time.sleep(delay) delay += 2 attempts += 1 # If we get here we assume prior actions have succeeded to always # this unit is marked as seeded so that subsequent calls don't result # in a restart. mark_seeded() if update_db_rels: update_shared_db_rels() else: log("Config file '%s' unchanged", level=DEBUG)
def cluster_joined(): if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_settings = {'private-address': addr, 'hostname': socket.gethostname()} log("Setting cluster relation: '%s'" % (relation_settings), level=INFO) relation_set(relation_settings=relation_settings) # Ensure all new peers are aware cluster_state_uuid = relation_get('bootstrap-uuid', unit=local_unit()) if cluster_state_uuid: notify_bootstrapped(cluster_rid=relation_id(), cluster_uuid=cluster_state_uuid)
def upgrade(): check_bootstrap = False try: if is_leader(): check_bootstrap = True except: if oldest_peer(peer_units()): check_bootstrap = True if check_bootstrap and not is_bootstrapped() and is_sufficient_peers(): # If this is the leader but we have not yet broadcast the cluster uuid # then do so now. wsrep_ready = get_wsrep_value('wsrep_ready') or "" if wsrep_ready.lower() in ['on', 'ready']: cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid') if cluster_state_uuid: mark_seeded() notify_bootstrapped(cluster_uuid=cluster_state_uuid) config_changed()
def upgrade(): if is_leader(): if is_unit_paused_set(): log('Unit is paused, skiping upgrade', level=INFO) return # Leader sets on upgrade leader_set(**{'leader-ip': get_relation_ip('cluster')}) configure_sstuser(sst_password()) if not leader_get('root-password') and leader_get('mysql.passwd'): leader_set(**{'root-password': leader_get('mysql.passwd')}) # On upgrade-charm we assume the cluster was complete at some point kvstore = kv() initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False) if not initial_clustered: kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True) kvstore.flush() # broadcast the bootstrap-uuid wsrep_ready = get_wsrep_value('wsrep_ready') or "" if wsrep_ready.lower() in ['on', 'ready']: cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid') if cluster_state_uuid: mark_seeded() notify_bootstrapped(cluster_uuid=cluster_state_uuid) else: # Ensure all the peers have the bootstrap-uuid attribute set # as this is all happening during the upgrade-charm hook is reasonable # to expect the cluster is running. # Wait until the leader has set the try: update_bootstrap_uuid() except LeaderNoBootstrapUUIDError: status_set('waiting', "Waiting for bootstrap-uuid set by leader")
def upgrade(): if is_leader(): if is_unit_paused_set() or is_unit_upgrading_set(): log('Unit is paused, skiping upgrade', level=INFO) return # Leader sets on upgrade leader_set(**{'leader-ip': get_relation_ip('cluster')}) configure_sstuser(sst_password()) if not leader_get('root-password') and leader_get('mysql.passwd'): leader_set(**{'root-password': leader_get('mysql.passwd')}) # On upgrade-charm we assume the cluster was complete at some point kvstore = kv() initial_clustered = kvstore.get(INITIAL_CLUSTERED_KEY, False) if not initial_clustered: kvstore.set(key=INITIAL_CLUSTERED_KEY, value=True) kvstore.flush() # broadcast the bootstrap-uuid wsrep_ready = get_wsrep_value('wsrep_ready') or "" if wsrep_ready.lower() in ['on', 'ready']: cluster_state_uuid = get_wsrep_value('wsrep_cluster_state_uuid') if cluster_state_uuid: mark_seeded() notify_bootstrapped(cluster_uuid=cluster_state_uuid) else: # Ensure all the peers have the bootstrap-uuid attribute set # as this is all happening during the upgrade-charm hook is reasonable # to expect the cluster is running. # Wait until the leader has set the try: update_bootstrap_uuid() except LeaderNoBootstrapUUIDError: status_set('waiting', "Waiting for bootstrap-uuid set by leader")