예제 #1
0
def finalize_migrate_nova_databases():
    if relation_ids('cluster'):
        log('Informing peers that dbsync is complete', level=INFO)
        peer_store('dbsync_state', 'complete')
    log('Enabling services', level=INFO)
    enable_services()
    cmd_all_services('start')
def cluster_joined(relation_id=None):
    if config('prefer-ipv6'):
        relation_settings = {'hostname': socket.gethostname(),
                             'private-address': get_ipv6_addr()[0]}
        relation_set(relation_id=relation_id,
                     relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    configure_nodename()

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if not is_sufficient_peers():
        return

    if is_elected_leader('res_rabbitmq_vip'):
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
def cluster_joined():
    install_ceilometer_ocf()

    # If this node is the elected leader then share our secret with other nodes
    if is_elected_leader('grp_ceilometer_vips'):
        peer_store('shared_secret', get_shared_secret())

    CONFIGS.write_all()
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    cur_os_rel = os_release('nova-common')
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option', 'Dpkg::Options::=--force-confnew',
        '--option', 'Dpkg::Options::=--force-confdef',
    ]

    # NOTE(jamespage) pre-stamp neutron database before upgrade from grizzly
    if cur_os_rel == 'grizzly':
        neutron_db_manage(['stamp', 'grizzly'])

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    if cur_os_rel == 'grizzly':
        # NOTE(jamespage) when upgrading from grizzly->havana, config
        # files need to be generated prior to performing the db upgrade
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()
        neutron_db_manage(['upgrade', 'head'])
    else:
        if new_os_rel < 'kilo':
            neutron_db_manage(['stamp', cur_os_rel])
            migrate_neutron_database()
        # NOTE(jamespage) upgrade with existing config files as the
        # havana->icehouse migration enables new service_plugins which
        # create issues with db upgrades
        reset_os_release()
        configs = register_configs(release=new_os_rel)
        configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and
                is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
예제 #5
0
def migrate_nova_database():
    '''Runs nova-manage to initialize a new database or migrate existing'''
    log('Migrating the nova database.', level=INFO)
    cmd = ['nova-manage', 'db', 'sync']
    subprocess.check_output(cmd)
    if relation_ids('cluster'):
        log('Informing peers that dbsync is complete', level=INFO)
        peer_store('dbsync_state', 'complete')
    log('Enabling services', level=INFO)
    enable_services()
    cmd_all_services('start')
예제 #6
0
def migrate_nova_database():
    '''Runs nova-manage to initialize a new database or migrate existing'''
    log('Migrating the nova database.', level=INFO)
    cmd = ['nova-manage', 'db', 'sync']
    subprocess.check_output(cmd)
    if relation_ids('cluster'):
        log('Informing peers that dbsync is complete', level=INFO)
        peer_store('dbsync_state', 'complete')
    log('Enabling services', level=INFO)
    enable_services()
    cmd_all_services('start')
def migrate_passwords_to_peer_relation():
    '''Migrate any passwords storage on disk to cluster peer relation'''
    for f in glob.glob('/var/lib/charm/{}/*.passwd'.format(service_name())):
        _key = os.path.basename(f)
        with open(f, 'r') as passwd:
            _value = passwd.read().strip()
        try:
            peer_store(_key, _value)
            os.unlink(f)
        except ValueError:
            # NOTE cluster relation not yet ready - skip for now
            pass
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    # All upgrades to Liberty are forced to step through Kilo. Liberty does
    # not have the migrate_flavor_data option (Bug #1511466) available so it
    # must be done pre-upgrade
    if os_release('nova-common') == 'kilo' and is_elected_leader(CLUSTER_RES):
        migrate_nova_flavors()
    new_os_rel = get_os_codename_install_source(new_src)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    # NOTE(jamespage) upgrade with existing config files as the
    # havana->icehouse migration enables new service_plugins which
    # create issues with db upgrades
    reset_os_release()
    configs = register_configs(release=new_os_rel)
    configs.write_all()

    if new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and is_elected_leader(CLUSTER_RES)):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_elected_leader(CLUSTER_RES):
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_database()
    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
def get_rabbit_password(username, password=None, local=False):
    ''' Retrieve, generate or store a rabbit password for
    the provided username using peer relation cluster'''
    if local:
        return get_rabbit_password_on_disk(username, password, local)
    else:
        migrate_passwords_to_peer_relation()
        _key = '{}.passwd'.format(username)
        try:
            _password = peer_retrieve(_key)
            if _password is None:
                _password = password or pwgen(length=64)
                peer_store(_key, _password)
        except ValueError:
            # cluster relation is not yet started, use on-disk
            _password = get_rabbit_password_on_disk(username, password)
        return _password
def cluster_joined(relation_id=None):
    # If this node is the elected leader then share our secret with other nodes
    if is_elected_leader('grp_ceilometer_vips'):
        peer_store('shared_secret', get_shared_secret())

    CONFIGS.write_all()

    settings = {}

    for addr_type in ADDRESS_TYPES:
        address = get_relation_ip(addr_type,
                                  cidr_network=config(
                                      'os-{}-network'.format(addr_type)))
        if address:
            settings['{}-address'.format(addr_type)] = address

    settings['private-address'] = get_relation_ip('cluster')

    relation_set(relation_id=relation_id, relation_settings=settings)
예제 #11
0
    def migrate_passwords_to_peer_relation(self, excludes=None):
        """Migrate any passwords storage on disk to cluster peer relation."""
        dirname = os.path.dirname(self.root_passwd_file_template)
        path = os.path.join(dirname, '*.passwd')
        for f in glob.glob(path):
            if excludes and f in excludes:
                log("Excluding %s from peer migration" % (f), level=DEBUG)
                continue

            _key = os.path.basename(f)
            with open(f, 'r') as passwd:
                _value = passwd.read().strip()

            try:
                peer_store(_key, _value)
                if self.delete_ondisk_passwd_file:
                    os.unlink(f)
            except ValueError:
                # NOTE cluster relation not yet ready - skip for now
                pass
예제 #12
0
    def migrate_passwords_to_peer_relation(self, excludes=None):
        """Migrate any passwords storage on disk to cluster peer relation."""
        dirname = os.path.dirname(self.root_passwd_file_template)
        path = os.path.join(dirname, '*.passwd')
        for f in glob.glob(path):
            if excludes and f in excludes:
                log("Excluding %s from peer migration" % (f), level=DEBUG)
                continue

            _key = os.path.basename(f)
            with open(f, 'r') as passwd:
                _value = passwd.read().strip()

            try:
                peer_store(_key, _value)
                if self.delete_ondisk_passwd_file:
                    os.unlink(f)
            except ValueError:
                # NOTE cluster relation not yet ready - skip for now
                pass
def cluster_joined(relation_id=None):
    relation_settings = {
        'hostname': rabbit.get_unit_hostname(),
        'private-address':
            ch_ip.get_relation_ip(
                rabbit_net_utils.CLUSTER_INTERFACE,
                cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG)),
    }

    relation_set(relation_id=relation_id,
                 relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
            level=ERROR)
        return

    if is_leader():
        log('Leader peer_storing cookie', level=INFO)
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
        peer_store('leader_node_ip', unit_private_ip())
        peer_store('leader_node_hostname', rabbit.get_unit_hostname())
def cluster_joined(relation_id=None):
    relation_settings = {
        'hostname':
        rabbit.get_unit_hostname(),
        'private-address':
        ch_ip.get_relation_ip(rabbit_net_utils.CLUSTER_INTERFACE,
                              cidr_network=config(
                                  rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG)),
    }

    relation_set(relation_id=relation_id, relation_settings=relation_settings)

    if is_relation_made('ha') and \
            config('ha-vip-only') is False:
        log('hacluster relation is present, skipping native '
            'rabbitmq cluster config.')
        return

    try:
        if not is_leader():
            log('Not the leader, deferring cookie propagation to leader')
            return
    except NotImplementedError:
        if is_newer():
            log('cluster_joined: Relation greater.')
            return

    if not os.path.isfile(rabbit.COOKIE_PATH):
        log('erlang cookie missing from %s' % rabbit.COOKIE_PATH, level=ERROR)
        return

    if is_leader():
        log('Leader peer_storing cookie', level=INFO)
        cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
        peer_store('cookie', cookie)
        peer_store('leader_node_ip', unit_private_ip())
        peer_store('leader_node_hostname', rabbit.get_unit_hostname())
예제 #15
0
def _do_openstack_upgrade(new_src):
    enable_policy_rcd()
    # All upgrades to Liberty are forced to step through Kilo. Liberty does
    # not have the migrate_flavor_data option (Bug #1511466) available so it
    # must be done pre-upgrade
    if (CompareOpenStackReleases(os_release('nova-common')) == 'kilo'
            and is_leader()):
        migrate_nova_flavors()

    # 'nova-manage db online_data_migrations' needs to be run before moving to
    # the next release for environments upgraded using old charms where this
    # step was not being executed (LP: #1711209).
    online_data_migrations_if_needed()

    new_os_rel = get_os_codename_install_source(new_src)
    cmp_new_os_rel = CompareOpenStackReleases(new_os_rel)
    log('Performing OpenStack upgrade to %s.' % (new_os_rel))

    configure_installation_source(new_src)
    dpkg_opts = [
        '--option',
        'Dpkg::Options::=--force-confnew',
        '--option',
        'Dpkg::Options::=--force-confdef',
    ]

    apt_update(fatal=True)
    apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
    reset_os_release()
    apt_install(determine_packages(), fatal=True)

    disable_policy_rcd()

    # NOTE(jamespage) upgrade with existing config files as the
    # havana->icehouse migration enables new service_plugins which
    # create issues with db upgrades
    configs = register_configs(release=new_os_rel)
    configs.write_all()

    if cmp_new_os_rel >= 'mitaka' and not database_setup(prefix='novaapi'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_api database is not yet created
        if (relation_ids('cluster') and is_leader()):
            # NOTE: reset dbsync state so that migration will complete
            #       when the nova_api database is setup.
            peer_store('dbsync_state', None)
        return configs

    if cmp_new_os_rel >= 'ocata' and not database_setup(prefix='novacell0'):
        # NOTE: Defer service restarts and database migrations for now
        #       as nova_cell0 database is not yet created
        if (relation_ids('cluster') and is_leader()):
            # NOTE: reset dbsync state so that migration will complete
            #       when the novacell0 database is setup.
            peer_store('dbsync_state', None)
        return configs

    if is_leader():
        status_set('maintenance', 'Running nova db migration')
        migrate_nova_databases()

    if not is_unit_paused_set():
        [service_start(s) for s in services()]

    return configs
예제 #16
0
 def test_peer_store_with_relation(self):
     self.relation_ids.return_value = FAKE_RELATION_IDS
     peerstorage.peer_store('key', 'value', self.fake_relation_name)
     self.relation_set.assert_called_with(
         relation_id=FAKE_RELATION_IDS[0],
         relation_settings={'key': 'value'})