def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
Exemple #2
0
def cluster_changed():
    unison.ssh_authorized_peers(user=SSH_USER, group="juju_keystone", peer_interface="cluster", ensure_local_user=True)
    # NOTE(jamespage) re-echo passwords for peer storage
    echo_whitelist = ["_passwd", "identity-service:", "ssl-cert-master", "db-initialised", "ssl-cert-available-updates"]
    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
    peer_echo(includes=echo_whitelist, force=True)

    check_peer_actions()

    initialise_pki()

    # Figure out if we need to mandate a sync
    units = get_ssl_sync_request_units()
    synced_units = relation_get(attribute="ssl-synced-units", unit=local_unit())
    diff = None
    if synced_units:
        synced_units = json.loads(synced_units)
        diff = set(units).symmetric_difference(set(synced_units))

    if units and (not synced_units or diff):
        log("New peers joined and need syncing - %s" % (", ".join(units)), level=DEBUG)
        update_all_identity_relation_units_force_sync()
    else:
        update_all_identity_relation_units()

    if not is_elected_leader(CLUSTER_RES) and is_ssl_cert_master():
        # Force and sync and trigger a sync master re-election since we are not
        # leader anymore.
        force_ssl_sync()
    else:
        CONFIGS.write_all()
def cluster_changed():
    CONFIGS.write_all()
    if hookenv.relation_ids('cluster'):
        ch_peerstorage.peer_echo(includes=['dbsync_state'])
        dbsync_state = ch_peerstorage.peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            if not ch_utils.is_unit_paused_set():
                for svc in ncc_utils.services():
                    ch_host.service_resume(svc)
            else:
                hookenv.log('Unit is in paused state, not issuing '
                            'start/resume to all services')
        else:
            if not ch_utils.is_unit_paused_set():
                hookenv.log('Database sync not ready. Shutting down services')
                for svc in ncc_utils.services():
                    ch_host.service_pause(svc)
            else:
                hookenv.log(
                    'Database sync not ready. Would shut down services but '
                    'unit is in paused state, not issuing stop/pause to all '
                    'services')
    # The shared metadata secret is stored in the leader-db and if its changed
    # the gateway needs to know.
    for rid in hookenv.relation_ids('quantum-network-service'):
        quantum_joined(rid=rid, remote_restart=False)
Exemple #4
0
def cluster_changed(relation_id=None, remote_unit=None):
    # Future travelers beware ordering is significant
    rdata = relation_get(rid=relation_id, unit=remote_unit)

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_changed: cookie not yet set.', level=INFO)
        return

    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    # NOTE(freyes): all the nodes need to marked as 'clustered' (LP: #1691510)
    rabbit.cluster_with()

    if not is_leader():
        update_nrpe_checks()
def cluster_changed():
    # Need to make sure hostname is excluded to build inclusion list (paying
    # attention to those excluded by default in peer_echo().
    # TODO(dosaboy): extend peer_echo() to support providing exclusion list as
    #                well as inclusion list.
    # NOTE(jamespage): deprecated - leader-election
    rdata = relation_get()
    inc_list = []
    for attr in rdata.keys():
        if attr not in [
                'hostname', 'private-address', 'cluster-address',
                'public-address', 'ready'
        ]:
            inc_list.append(attr)

    peer_echo(includes=inc_list)
    # NOTE(jamespage): deprecated - leader-election

    maybe_notify_bootstrapped()

    cluster_joined()
    config_changed()

    if is_bootstrapped() and not seeded():
        mark_seeded()
 def test_peer_echo_includes(self):
     peerstorage.is_leader.side_effect = NotImplementedError
     settings = {'key1': 'value1'}
     self._relation_get.copy.return_value = settings
     self._relation_get.return_value = settings
     peerstorage.peer_echo(['key1'])
     self._relation_set.assert_called_with(relation_settings=settings)
Exemple #7
0
def cluster_changed():
    # NOTE(jamespage) re-echo passwords for peer storage
    echo_whitelist = ['_passwd', 'identity-service:']

    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
    peer_echo(includes=echo_whitelist, force=True)

    update_all_identity_relation_units()

    CONFIGS.write_all()
def cluster_changed():
    # NOTE(jamespage) re-echo passwords for peer storage
    echo_whitelist = ['_passwd', 'identity-service:']

    log("Peer echo whitelist: {}".format(echo_whitelist), level=DEBUG)
    peer_echo(includes=echo_whitelist, force=True)

    update_all_identity_relation_units()

    CONFIGS.write_all()
def cluster_changed():
    CONFIGS.write_all()
    if relation_ids('cluster'):
        peer_echo(includes=['dbsync_state'])
        dbsync_state = peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            enable_services()
            cmd_all_services('start')
        else:
            log('Database sync not ready. Shutting down services')
            disable_services()
            cmd_all_services('stop')
Exemple #10
0
def cluster_changed():
    CONFIGS.write_all()
    if relation_ids('cluster'):
        peer_echo(includes=['dbsync_state'])
        dbsync_state = peer_retrieve('dbsync_state')
        if dbsync_state == 'complete':
            enable_services()
            cmd_all_services('start')
        else:
            log('Database sync not ready. Shutting down services')
            disable_services()
            cmd_all_services('stop')
def cluster_changed():
    # Future travelers beware ordering is significant
    rdata = relation_get()
    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_joined: cookie not yet set.', level=INFO)
        return

    rdata = relation_get()
    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    if not is_sufficient_peers():
        log('Not enough peers, waiting until leader is configured',
            level=INFO)
        return

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    # cluster with node?
    try:
        if not is_leader():
            rabbit.cluster_with()
            update_nrpe_checks()
    except NotImplementedError:
        if is_newer():
            rabbit.cluster_with()
            update_nrpe_checks()

    # If cluster has changed peer db may have changed so run amqp_changed
    # to sync any changes
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
def cluster_changed(relation_id=None, remote_unit=None):
    # Future travelers beware ordering is significant
    rdata = relation_get(rid=relation_id, unit=remote_unit)

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_changed: cookie not yet set.', level=INFO)
        return

    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log(
            'hacluster relation is present, skipping native '
            'rabbitmq cluster config.',
            level=INFO)
        return

    if rabbit.is_sufficient_peers():
        # NOTE(freyes): all the nodes need to marked as 'clustered'
        # (LP: #1691510)
        rabbit.cluster_with()
        # Local rabbit maybe clustered now so check and inform clients if
        # needed.
        update_clients()
        if is_leader():
            if (leader_get(rabbit.CLUSTER_MODE_KEY) != config(
                    rabbit.CLUSTER_MODE_KEY)):
                log("Informing peers via leaderdb to change {} to {}".format(
                    rabbit.CLUSTER_MODE_KEY, config(rabbit.CLUSTER_MODE_KEY)))
                leader_set(
                    {rabbit.CLUSTER_MODE_KEY: config(rabbit.CLUSTER_MODE_KEY)})
                rabbit.ConfigRenderer(rabbit.CONFIG_FILES).write_all()

    if not is_leader() and is_relation_made('nrpe-external-master'):
        update_nrpe_checks()
def cluster_changed():
    # Need to make sure hostname is excluded to build inclusion list (paying
    # attention to those excluded by default in peer_echo().
    # TODO(dosaboy): extend peer_echo() to support providing exclusion list as
    #                well as inclusion list.
    # NOTE(jamespage): deprecated - leader-election
    rdata = relation_get()
    inc_list = []
    for attr in rdata.iterkeys():
        if attr not in ['hostname', 'private-address', 'public-address']:
            inc_list.append(attr)
    peer_echo(includes=inc_list)
    # NOTE(jamespage): deprecated - leader-election

    config_changed()
def cluster_changed():
    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_joined: cookie not yet set.', level=INFO)
        return

    rdata = relation_get()
    if config('prefer-ipv6') and rdata.get('hostname'):
        private_address = rdata['private-address']
        hostname = rdata['hostname']
        if hostname:
            rabbit.update_hosts_file({private_address: hostname})

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    if not is_sufficient_peers():
        # Stop rabbit until leader has finished configuring
        service_stop('rabbitmq-server')
        return

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    # cluster with node?
    try:
        if not is_leader():
            rabbit.cluster_with()
            update_nrpe_checks()
    except NotImplementedError:
        if is_newer():
            rabbit.cluster_with()
            update_nrpe_checks()

    # If cluster has changed peer db may have changed so run amqp_changed
    # to sync any changes
    for rid in relation_ids('amqp'):
        for unit in related_units(rid):
            amqp_changed(relation_id=rid, remote_unit=unit)
Exemple #15
0
def cluster_changed():
    unison.ssh_authorized_peers(user=SSH_USER,
                                group=SSH_USER,
                                peer_interface='cluster',
                                ensure_local_user=True)
    # NOTE(jamespage) re-echo passwords for peer storage
    echo_whitelist = [
        '_passwd', 'identity-service:', 'db-initialised',
        'ssl-cert-available-updates'
    ]
    # Don't echo if leader since a re-election may be in progress.
    if not is_leader():
        echo_whitelist.append('ssl-cert-master')

    log("Peer echo whitelist: %s" % (echo_whitelist), level=DEBUG)
    peer_echo(includes=echo_whitelist, force=True)

    check_peer_actions()

    initialise_pki()

    if is_leader():
        # Figure out if we need to mandate a sync
        units = get_ssl_sync_request_units()
        synced_units = relation_get_and_migrate(attribute='ssl-synced-units',
                                                unit=local_unit())
        diff = None
        if synced_units:
            synced_units = json.loads(synced_units)
            diff = set(units).symmetric_difference(set(synced_units))
    else:
        units = None

    if units and (not synced_units or diff):
        log("New peers joined and need syncing - %s" % (', '.join(units)),
            level=DEBUG)
        update_all_identity_relation_units_force_sync()
    else:
        update_all_identity_relation_units()

    if not is_leader() and is_ssl_cert_master():
        # Force and sync and trigger a sync master re-election since we are not
        # leader anymore.
        force_ssl_sync()
    else:
        CONFIGS.write_all()
def cluster_changed(relation_id=None, remote_unit=None):
    # Future travelers beware ordering is significant
    rdata = relation_get(rid=relation_id, unit=remote_unit)

    # sync passwords
    blacklist = ['hostname', 'private-address', 'public-address']
    whitelist = [a for a in rdata.keys() if a not in blacklist]
    peer_echo(includes=whitelist)

    cookie = peer_retrieve('cookie')
    if not cookie:
        log('cluster_changed: cookie not yet set.', level=INFO)
        return

    if rdata:
        hostname = rdata.get('hostname', None)
        private_address = rdata.get('private-address', None)

        if hostname and private_address:
            rabbit.update_hosts_file({private_address: hostname})

    # sync the cookie with peers if necessary
    update_cookie()

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.', level=INFO)
        return

    if rabbit.is_sufficient_peers():
        # NOTE(freyes): all the nodes need to marked as 'clustered' (LP: #1691510)
        rabbit.cluster_with()

    if not is_leader() and is_relation_made('nrpe-external-master'):
        update_nrpe_checks()
def cluster_changed():
    # Echo any passwords placed on peer relation
    peer_echo(includes=['.passwd'])
Exemple #18
0
 def test_peer_echo_includes(self):
     peerstorage.peer_echo(['key1'])
     self.relation_set.assert_called_with(
         relation_settings={'key1': 'value1'})
Exemple #19
0
def cluster_changed():
    # Echo any passwords placed on peer relation
    peer_echo(includes=['.passwd'])