def test_assess_status_installed(self, mock_get_upstream_version, mock_application_version_set): mock_get_upstream_version.return_value = '3.5.7' with mock.patch.object(rabbit_utils, 'assess_status_func') as asf: callee = mock.MagicMock() asf.return_value = callee rabbit_utils.assess_status('test-config') asf.assert_called_once_with('test-config') callee.assert_called_once_with() mock_application_version_set.assert_called_with('3.5.7')
def render_and_restart(): rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all() render_and_restart() update_clients() @hooks.hook('update-status') @harden() def update_status(): log('Updating status.') if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) # This solves one off problems waiting for the cluster to complete # It will get executed only once as soon as leader_node_is_ready() # or client_node_is_ready() returns True # Subsequent client requests will be handled by normal # amqp-relation-changed hooks kvstore = kv() if not kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False): log("Rerunning update_clients as initial update not yet performed", level=DEBUG) update_clients() rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES()))
# Without this a chicken and egg scenario prevails when # using LE and peerstorage for rid in relation_ids('cluster'): relation_set(relation_id=rid, relation_settings={'cookie': cookie}) # If leader has changed and access credentials, ripple these # out from all units for rid in relation_ids('amqp'): for unit in related_units(rid): amqp_changed(relation_id=rid, remote_unit=unit) def pre_install_hooks(): for f in glob.glob('exec.d/*/charm-pre-install'): if os.path.isfile(f) and os.access(f, os.X_OK): subprocess.check_call(['sh', '-c', f]) @hooks.hook('update-status') @harden() def update_status(): log('Updating status.') if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) rabbit.assess_status()
# # This must be done here and not in the cluster-relation-departed hook. At # the point in time the cluster-relation-departed hook is called we know # that a unit is departing. We also know that RabbitMQ will not have # noticed its departure yet. We cannot remove a node pre-emptively. # # In the normal case the departing node should remove itself from the # cluster in its stop hook. We clean up the ones that for whatever reason # are unable to clean up after themselves successfully here. # # Have a look at the docstring of the stop() function for detailed # explanation. if is_leader() and not is_unit_paused_set(): rabbit.check_cluster_memberships() if __name__ == '__main__': try: hooks.execute(sys.argv) except UnregisteredHookError as e: log('Unknown hook {} - skipping.'.format(e)) # This solves one off problems waiting for the cluster to complete # It will get executed only once as soon as leader_node_is_ready() # or client_node_is_ready() returns True # Subsequent client requests will be handled by normal # amqp-relation-changed hooks kvstore = kv() if not kvstore.get(INITIAL_CLIENT_UPDATE_KEY, False): update_clients() rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))