def series_upgrade_complete(): log("Running complete series upgrade hook", "INFO") # NOTE(jamespage): If a newer RMQ version is # installed and the old style configuration file # is still on disk, remove before re-rendering # any new configuration. if (os.path.exists(rabbit.RABBITMQ_CONFIG) and cmp_pkgrevno('rabbitmq-server', '3.7') >= 0): os.remove(rabbit.RABBITMQ_CONFIG) rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all() clear_unit_paused() clear_unit_upgrading() rabbit.resume_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES()))
def ha_joined(): corosync_bindiface = config('ha-bindiface') corosync_mcastport = config('ha-mcastport') vip = config('vip') vip_iface = config('vip_iface') vip_cidr = config('vip_cidr') vip_only = config('ha-vip-only') if None in [ corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr ] and vip_only is True: log('Insufficient configuration data to configure VIP-only hacluster.', level=ERROR) sys.exit(1) ctxt = {rabbit.ENV_CONF: rabbit.CONFIG_FILES()[rabbit.ENV_CONF]} rabbit.ConfigRenderer(ctxt).write(rabbit.ENV_CONF) relation_settings = {} relation_settings['corosync_bindiface'] = corosync_bindiface relation_settings['corosync_mcastport'] = corosync_mcastport if vip_only is True: relation_settings['resources'] = { 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', } relation_settings['resource_params'] = { 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), } else: relation_settings['resources'] = { 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', 'res_rabbitmq-server': 'lsb:rabbitmq-server', } relation_settings['resource_params'] = { 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), 'res_rabbitmq-server': 'op start start-delay="5s" ' 'op monitor interval="5s"', } for rel_id in relation_ids('ha'): relation_set(relation_id=rel_id, relation_settings=relation_settings) env_vars = { 'OPENSTACK_PORT_EPMD': 4369, 'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'), } save_script_rc(**env_vars)
def series_upgrade_prepare(): set_unit_upgrading() if not is_unit_paused_set(): log("Pausing unit for series upgrade.") rabbit.pause_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES())) if is_leader(): if not leader_get('cluster_series_upgrading'): # Inform the entire cluster a series upgrade is occurring. # Run the complete-cluster-series-upgrade action on the leader to # clear this setting when the full cluster has completed its # upgrade. leader_set(cluster_series_upgrading=True)
def cluster_changed(relation_id=None, remote_unit=None): # Future travelers beware ordering is significant rdata = relation_get(rid=relation_id, unit=remote_unit) # sync passwords blacklist = ['hostname', 'private-address', 'public-address'] whitelist = [a for a in rdata.keys() if a not in blacklist] peer_echo(includes=whitelist) cookie = peer_retrieve('cookie') if not cookie: log('cluster_changed: cookie not yet set.', level=INFO) return if rdata: hostname = rdata.get('hostname', None) private_address = rdata.get('private-address', None) if hostname and private_address: rabbit.update_hosts_file({private_address: hostname}) # sync the cookie with peers if necessary update_cookie() if is_relation_made('ha') and \ config('ha-vip-only') is False: log( 'hacluster relation is present, skipping native ' 'rabbitmq cluster config.', level=INFO) return if rabbit.is_sufficient_peers(): # NOTE(freyes): all the nodes need to marked as 'clustered' # (LP: #1691510) rabbit.cluster_with() # Local rabbit maybe clustered now so check and inform clients if # needed. update_clients() if is_leader(): if (leader_get(rabbit.CLUSTER_MODE_KEY) != config( rabbit.CLUSTER_MODE_KEY)): log("Informing peers via leaderdb to change {} to {}".format( rabbit.CLUSTER_MODE_KEY, config(rabbit.CLUSTER_MODE_KEY))) leader_set( {rabbit.CLUSTER_MODE_KEY: config(rabbit.CLUSTER_MODE_KEY)}) rabbit.ConfigRenderer(rabbit.CONFIG_FILES).write_all() if not is_leader() and is_relation_made('nrpe-external-master'): update_nrpe_checks()
def test_render_rabbitmq_env(self, mock_config, mock_relation_ids): mock_relation_ids.return_value = [] mock_config.return_value = 3 with mock.patch('rabbit_utils.render') as mock_render: ctxt = { rabbit_utils.ENV_CONF: rabbit_utils.CONFIG_FILES[rabbit_utils.ENV_CONF] } rabbit_utils.ConfigRenderer(ctxt).write(rabbit_utils.ENV_CONF) ctxt = { 'settings': { 'RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS': "'+A 6'", 'RABBITMQ_SERVER_START_ARGS': "'-proto_dist inet6_tcp'" } } mock_render.assert_called_with('rabbitmq-env.conf', '/etc/rabbitmq/rabbitmq-env.conf', ctxt, perms=420)
def leader_settings_changed(): if is_unit_paused_set(): log("Do not run config_changed while unit is paused", "WARNING") return if not os.path.exists(rabbit.RABBITMQ_CTL): log('Deferring cookie configuration, RabbitMQ not yet installed') return # Get cookie from leader, update cookie locally and # force cluster-relation-changed hooks to run on peers cookie = leader_get(attribute='cookie') if cookie: update_cookie(leaders_cookie=cookie) # Force cluster-relation-changed hooks to run on peers # This will precipitate peer clustering # Without this a chicken and egg scenario prevails when # using LE and peerstorage for rid in relation_ids('cluster'): relation_set(relation_id=rid, relation_settings={'cookie': cookie}) update_clients() rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all()
def render_and_restart(): rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all()
def config_changed(check_deferred_restarts=True): """Run config-chaged hook. :param check_deferred_events: Whether to check if restarts are permitted before running hook. :type check_deferred_events: bool """ configure_deferred_restarts(rabbit.services()) allowed, reason = is_hook_allowed( 'config-changed', check_deferred_restarts=check_deferred_restarts) if not allowed: log(reason, "WARN") return # Update hosts with this unit's information cluster_ip = ch_ip.get_relation_ip( rabbit_net_utils.CLUSTER_INTERFACE, cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG)) rabbit.update_hosts_file({cluster_ip: rabbit.get_unit_hostname()}) # Add archive source if provided and not in the upgrade process if not leader_get("cluster_series_upgrading"): add_source(config('source'), config('key')) # Copy in defaults file for updated ulimits shutil.copyfile('templates/rabbitmq-server', '/etc/default/rabbitmq-server') # Install packages to ensure any changes to source # result in an upgrade if applicable only if we change the 'source' # config option if rabbit.archive_upgrade_available(): # Avoid packge upgrade collissions # Stopping and attempting to start rabbitmqs at the same time leads to # failed restarts rabbit.cluster_wait() rabbit.install_or_upgrade_packages() if config('ssl') == 'off': open_port(5672) close_port(int(config('ssl_port'))) elif config('ssl') == 'on': open_port(5672) open_port(int(config('ssl_port'))) elif config('ssl') == 'only': close_port(5672) open_port(int(config('ssl_port'))) else: log("Unknown ssl config value: '%s'" % config('ssl'), level=ERROR) chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER) chmod(RABBIT_DIR, 0o775) if rabbit.management_plugin_enabled(): rabbit.enable_plugin(MAN_PLUGIN) open_port(rabbit.get_managment_port()) else: rabbit.disable_plugin(MAN_PLUGIN) close_port(rabbit.get_managment_port()) # LY: Close the old managment port since it may have been opened in a # previous version of the charm. close_port is a noop if the port # is not open close_port(55672) # NOTE(jamespage): If a newer RMQ version is # installed and the old style configuration file # is still on disk, remove before re-rendering # any new configuration. if (os.path.exists(rabbit.RABBITMQ_CONFIG) and cmp_pkgrevno('rabbitmq-server', '3.7') >= 0): os.remove(rabbit.RABBITMQ_CONFIG) rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all() if is_relation_made("ha"): ha_is_active_active = config("ha-vip-only") if ha_is_active_active: update_nrpe_checks() else: if is_elected_leader('res_rabbitmq_vip'): update_nrpe_checks() else: log("hacluster relation is present but this node is not active" " skipping update nrpe checks") elif is_relation_made('nrpe-external-master'): update_nrpe_checks() # Only set values if this is the leader if not is_leader(): return rabbit.set_all_mirroring_queues(config('mirroring-queues')) # Update cluster in case min-cluster-size has changed for rid in relation_ids('cluster'): for unit in related_units(rid): cluster_changed(relation_id=rid, remote_unit=unit) # NOTE(jamespage): Workaround until we have a good way # of generally disabling notifications # based on which services are deployed. if 'openstack' in rabbit.list_vhosts(): rabbit.configure_notification_ttl('openstack', config('notification-ttl'))
def render_and_restart(): rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all() render_and_restart() update_clients() @hooks.hook('update-status') @harden() def update_status(): log('Updating status.') if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) # This solves one off problems waiting for the cluster to complete # It will get executed only once as soon as leader_node_is_ready() # or client_node_is_ready() returns True # Subsequent client requests will be handled by normal # amqp-relation-changed hooks kvstore = kv() if not kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False): log("Rerunning update_clients as initial update not yet performed", level=DEBUG) update_clients() rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES()))
def config_changed(): # Update hosts with this unit's information rabbit.update_hosts_file({ rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG, interface=rabbit.CLUSTER_INTERFACE): rabbit.get_unit_hostname() }) # Add archive source if provided add_source(config('source'), config('key')) apt_update(fatal=True) # Copy in defaults file for updated ulimits shutil.copyfile('templates/rabbitmq-server', '/etc/default/rabbitmq-server') # Install packages to ensure any changes to source # result in an upgrade if applicable. status_set('maintenance', 'Installing/upgrading RabbitMQ packages') apt_install(rabbit.PACKAGES, fatal=True) open_port(5672) chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER) chmod(RABBIT_DIR, 0o775) if config('management_plugin') is True: rabbit.enable_plugin(MAN_PLUGIN) open_port(rabbit.get_managment_port()) else: rabbit.disable_plugin(MAN_PLUGIN) close_port(rabbit.get_managment_port()) # LY: Close the old managment port since it may have been opened in a # previous version of the charm. close_port is a noop if the port # is not open close_port(55672) rabbit.ConfigRenderer(rabbit.CONFIG_FILES).write_all() # Only set values if this is the leader if not is_leader(): return rabbit.set_all_mirroring_queues(config('mirroring-queues')) if is_relation_made("ha"): ha_is_active_active = config("ha-vip-only") if ha_is_active_active: update_nrpe_checks() else: if is_elected_leader('res_rabbitmq_vip'): update_nrpe_checks() else: log("hacluster relation is present but this node is not active" " skipping update nrpe checks") else: update_nrpe_checks() # Update cluster in case min-cluster-size has changed for rid in relation_ids('cluster'): for unit in related_units(rid): cluster_changed(relation_id=rid, remote_unit=unit)
def setUp(self): super(ConfigRendererTests, self).setUp(rabbit_utils, TO_PATCH) self.renderer = rabbit_utils.ConfigRenderer(self.config_map)
def config_changed(): # Update hosts with this unit's information rabbit.update_hosts_file( {rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG, interface=rabbit.CLUSTER_INTERFACE): rabbit.get_unit_hostname()}) # Add archive source if provided add_source(config('source'), config('key')) # Copy in defaults file for updated ulimits shutil.copyfile( 'templates/rabbitmq-server', '/etc/default/rabbitmq-server') # Install packages to ensure any changes to source # result in an upgrade if applicable only if we change the 'source' # config option if rabbit.archive_upgrade_available(): # Avoid packge upgrade collissions # Stopping and attempting to start rabbitmqs at the same time leads to # failed restarts rabbit.cluster_wait() rabbit.install_or_upgrade_packages() if config('ssl') == 'off': open_port(5672) close_port(int(config('ssl_port'))) elif config('ssl') == 'on': open_port(5672) open_port(int(config('ssl_port'))) elif config('ssl') == 'only': close_port(5672) open_port(int(config('ssl_port'))) else: log("Unknown ssl config value: '%s'" % config('ssl'), level=ERROR) chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER) chmod(RABBIT_DIR, 0o775) if config('management_plugin') is True: rabbit.enable_plugin(MAN_PLUGIN) open_port(rabbit.get_managment_port()) else: rabbit.disable_plugin(MAN_PLUGIN) close_port(rabbit.get_managment_port()) # LY: Close the old managment port since it may have been opened in a # previous version of the charm. close_port is a noop if the port # is not open close_port(55672) rabbit.ConfigRenderer( rabbit.CONFIG_FILES).write_all() # Only set values if this is the leader if not is_leader(): return rabbit.set_all_mirroring_queues(config('mirroring-queues')) if is_relation_made("ha"): ha_is_active_active = config("ha-vip-only") if ha_is_active_active: update_nrpe_checks() else: if is_elected_leader('res_rabbitmq_vip'): update_nrpe_checks() else: log("hacluster relation is present but this node is not active" " skipping update nrpe checks") else: update_nrpe_checks() # Update cluster in case min-cluster-size has changed for rid in relation_ids('cluster'): for unit in related_units(rid): cluster_changed(relation_id=rid, remote_unit=unit)
def ha_joined(): corosync_bindiface = config('ha-bindiface') corosync_mcastport = config('ha-mcastport') vip = config('vip') vip_iface = config('vip_iface') vip_cidr = config('vip_cidr') rbd_name = config('rbd-name') vip_only = config('ha-vip-only') if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr, rbd_name] and vip_only is False: log('Insufficient configuration data to configure hacluster.', level=ERROR) sys.exit(1) elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface, vip_cidr] and vip_only is True: log('Insufficient configuration data to configure VIP-only hacluster.', level=ERROR) sys.exit(1) if not is_relation_made('ceph', 'auth') and vip_only is False: log('ha_joined: No ceph relation yet, deferring.') return ctxt = {rabbit.ENV_CONF: rabbit.CONFIG_FILES[rabbit.ENV_CONF]} rabbit.ConfigRenderer(ctxt).write(rabbit.ENV_CONF) relation_settings = {} relation_settings['corosync_bindiface'] = corosync_bindiface relation_settings['corosync_mcastport'] = corosync_mcastport if vip_only is True: relation_settings['resources'] = { 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', } relation_settings['resource_params'] = { 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), } else: relation_settings['resources'] = { 'res_rabbitmq_rbd': 'ocf:ceph:rbd', 'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem', 'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2', 'res_rabbitmq-server': 'lsb:rabbitmq-server', } relation_settings['resource_params'] = { 'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="******" ' 'secret="%s"' % (rbd_name, POOL_NAME, SERVICE_NAME, ceph._keyfile_path( SERVICE_NAME)), 'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" ' 'fstype="ext4" op start start-delay="10s"' % (POOL_NAME, rbd_name, RABBIT_DIR), 'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % (vip, vip_cidr, vip_iface), 'res_rabbitmq-server': 'op start start-delay="5s" ' 'op monitor interval="5s"', } relation_settings['groups'] = { 'grp_rabbitmq': 'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip ' 'res_rabbitmq-server', } for rel_id in relation_ids('ha'): relation_set(relation_id=rel_id, relation_settings=relation_settings) env_vars = { 'OPENSTACK_PORT_EPMD': 4369, 'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'), } save_script_rc(**env_vars)
def series_upgrade_complete(): log("Running complete series upgrade hook", "INFO") clear_unit_paused() clear_unit_upgrading() rabbit.resume_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))