Пример #1
0
    def __call__(self):
        ctxt = super(IdentityServiceContext, self).__call__()
        if not ctxt:
            return

        if cmp_pkgrevno('radosgw', "10.2.0") >= 0:
            ctxt['auth_keystone_v3_supported'] = True

        if (not ctxt.get('admin_domain_id')
                and float(ctxt.get('api_version', '2.0')) < 3):
            ctxt.pop('admin_domain_id')

        ctxt['auth_type'] = 'keystone'
        if cmp_pkgrevno('radosgw', "11.0.0") >= 0:
            ctxt['user_roles'] = config('operator-roles')
            ctxt['admin_roles'] = config('admin-roles')
        else:
            ctxt['user_roles'] = config('operator-roles')
            if config('admin-roles'):
                ctxt['user_roles'] += (',' + config('admin-roles'))
        ctxt['cache_size'] = config('cache-size')
        ctxt['namespace_tenants'] = leader_get('namespace_tenants') == 'True'
        if self.context_complete(ctxt):
            return ctxt
        return {}
Пример #2
0
    def enable_ssl(self,
                   ssl_key,
                   ssl_cert,
                   ssl_port,
                   ssl_ca=None,
                   ssl_only=False,
                   ssl_client=None):

        if not os.path.exists(RABBITMQ_CTL):
            log('Deferring SSL configuration, RabbitMQ not yet installed')
            return {}

        uid = pwd.getpwnam("root").pw_uid
        gid = grp.getgrnam("rabbitmq").gr_gid

        for contents, path in ((ssl_key, SSL_KEY_FILE),
                               (ssl_cert, SSL_CERT_FILE), (ssl_ca,
                                                           SSL_CA_FILE)):

            if not contents:
                continue

            with open(path, 'w') as fh:
                fh.write(contents)

            if path == SSL_CA_FILE:
                # the CA can be world readable and it will allow clients to
                # verify the certificate offered by rabbit.
                os.chmod(path, 0o644)
            else:
                os.chmod(path, 0o640)

            os.chown(path, uid, gid)

        data = {
            "ssl_port":
            ssl_port,
            "ssl_cert_file":
            SSL_CERT_FILE,
            "ssl_key_file":
            SSL_KEY_FILE,
            "ssl_client":
            ssl_client,
            "ssl_ca_file":
            "",
            "ssl_only":
            ssl_only,
            "tls13": (cmp_pkgrevno('erlang-base', '23.0') >= 0
                      and cmp_pkgrevno('rabbitmq-server', '3.8.11') >= 0),
        }

        if ssl_ca:
            data["ssl_ca_file"] = SSL_CA_FILE

        return data
Пример #3
0
def osdize_dev(dev,
               osd_format,
               osd_journal,
               reformat_osd=False,
               ignore_errors=False,
               encrypt=False):
    if not os.path.exists(dev):
        log('Path {} does not exist - bailing'.format(dev))
        return

    if not is_block_device(dev):
        log('Path {} is not a block device - bailing'.format(dev))
        return

    if (is_osd_disk(dev) and not reformat_osd):
        log('Looks like {} is already an'
            ' OSD data or journal, skipping.'.format(dev))
        return

    if is_device_mounted(dev):
        log('Looks like {} is in use, skipping.'.format(dev))
        return

    status_set('maintenance', 'Initializing device {}'.format(dev))
    cmd = ['ceph-disk', 'prepare']
    # Later versions of ceph support more options
    if cmp_pkgrevno('ceph', '0.60') >= 0:
        if encrypt:
            cmd.append('--dmcrypt')
    if cmp_pkgrevno('ceph', '0.48.3') >= 0:
        if osd_format:
            cmd.append('--fs-type')
            cmd.append(osd_format)
        if reformat_osd:
            cmd.append('--zap-disk')
        cmd.append(dev)
        if osd_journal:
            least_used = find_least_used_journal(osd_journal)
            cmd.append(least_used)
    else:
        # Just provide the device - no other options
        # for older versions of ceph
        cmd.append(dev)
        if reformat_osd:
            zap_disk(dev)

    try:
        log("osdize cmd: {}".format(cmd))
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError as e:
        if ignore_errors:
            log('Unable to initialize device: {}'.format(dev), WARNING)
        else:
            log('Unable to initialize device: {}'.format(dev), ERROR)
            raise e
Пример #4
0
def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False,
               ignore_errors=False, encrypt=False):
    if not os.path.exists(dev):
        log('Path {} does not exist - bailing'.format(dev))
        return

    if not is_block_device(dev):
        log('Path {} is not a block device - bailing'.format(dev))
        return

    if (is_osd_disk(dev) and not reformat_osd):
        log('Looks like {} is already an'
            ' OSD data or journal, skipping.'.format(dev))
        return

    if is_device_mounted(dev):
        log('Looks like {} is in use, skipping.'.format(dev))
        return

    status_set('maintenance', 'Initializing device {}'.format(dev))
    cmd = ['ceph-disk', 'prepare']
    # Later versions of ceph support more options
    if cmp_pkgrevno('ceph', '0.60') >= 0:
        if encrypt:
            cmd.append('--dmcrypt')
    if cmp_pkgrevno('ceph', '0.48.3') >= 0:
        if osd_format:
            cmd.append('--fs-type')
            cmd.append(osd_format)
        if reformat_osd:
            cmd.append('--zap-disk')
        cmd.append(dev)
        if osd_journal:
            least_used = find_least_used_journal(osd_journal)
            cmd.append(least_used)
    else:
        # Just provide the device - no other options
        # for older versions of ceph
        cmd.append(dev)
        if reformat_osd:
            zap_disk(dev)

    try:
        log("osdize cmd: {}".format(cmd))
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError as e:
        if ignore_errors:
            log('Unable to initialize device: {}'.format(dev), WARNING)
        else:
            log('Unable to initialize device: {}'.format(dev), ERROR)
            raise e
Пример #5
0
    def test_cmp_pkgrevno_revnos(self, pkg_cache):
        class MockPackage:
            class MockPackageRevno:
                def __init__(self, ver_str):
                    self.ver_str = ver_str

            def __init__(self, current_ver):
                self.current_ver = self.MockPackageRevno(current_ver)

        pkg_dict = {'python': MockPackage('2.4')}
        pkg_cache.return_value = pkg_dict
        self.assertEqual(host.cmp_pkgrevno('python', '2.3'), 1)
        self.assertEqual(host.cmp_pkgrevno('python', '2.4'), 0)
        self.assertEqual(host.cmp_pkgrevno('python', '2.5'), -1)
Пример #6
0
def emit_cephconf():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
Пример #7
0
def prometheus_relation(relid=None,
                        unit=None,
                        prometheus_permitted=None,
                        module_enabled=None):
    if not ceph.is_bootstrapped():
        return
    if prometheus_permitted is None:
        prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0
    if module_enabled is None:
        module_enabled = (is_mgr_module_enabled('prometheus')
                          or mgr_enable_module('prometheus'))
    log("checking if prometheus module is enabled")
    if prometheus_permitted and module_enabled:
        log("Updating prometheus")
        data = {
            'hostname': get_relation_ip('prometheus'),
            'port': 9283,
        }
        relation_set(relation_id=relid, relation_settings=data)
    else:
        log("Couldn't enable prometheus, but are related. "
            "Prometheus is available in Ceph version: {} ; "
            "Prometheus Module is enabled: {}".format(prometheus_permitted,
                                                      module_enabled),
            level=WARNING)
Пример #8
0
def upgrade_charm():
    emit_cephconf()
    apt_install(packages=filter_installed_packages(ceph.determine_packages()),
                fatal=True)
    try:
        # we defer and explicitly run `ceph-create-keys` from
        # add_keyring_to_ceph() as part of bootstrap process
        # LP: #1719436.
        service_pause('ceph-create-keys')
    except ValueError:
        pass
    ceph.update_monfs()
    mon_relation_joined()
    if is_relation_made("nrpe-external-master"):
        update_nrpe_config()
    if not ceph.monitor_key_exists('admin', 'autotune'):
        autotune = config('pg-autotune')
        if (cmp_pkgrevno('ceph', '14.2.0') >= 0
                and (autotune == 'true' or autotune == 'auto')):
            ceph.monitor_key_set('admin', 'autotune', 'true')
        else:
            ceph.monitor_key_set('admin', 'autotune', 'false')

    # NOTE(jamespage):
    # Reprocess broker requests to ensure that any cephx
    # key permission changes are applied
    notify_client()
    notify_radosgws()
    notify_rbd_mirrors()
    notify_prometheus()
Пример #9
0
def emit_cephconf():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf),
          owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    render('ceph.conf', charm_ceph_conf, cephcontext, perms=0o644)
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf', charm_ceph_conf,
                        100)
    def __call__(self):
        ctxt = super(IdentityServiceContext, self).__call__()
        if not ctxt:
            return

        ctxt['admin_token'] = None
        for relid in relation_ids('identity-service'):
            for unit in related_units(relid):
                if not ctxt.get('admin_token'):
                    ctxt['admin_token'] = \
                        relation_get('admin_token', unit, relid)

        if cmp_pkgrevno('radosgw', "10.2.0") >= 0:
            ctxt['auth_keystone_v3_supported'] = True

        if (not ctxt.get('admin_domain_id') and
                float(ctxt.get('api_version', '2.0')) < 3):
            ctxt.pop('admin_domain_id')

        ctxt['auth_type'] = 'keystone'
        ctxt['user_roles'] = config('operator-roles')
        ctxt['cache_size'] = config('cache-size')
        if self.context_complete(ctxt):
            return ctxt
        return {}
def get_ceph_context():
    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'mds', 'mon']
    cephcontext.update(CephConfContext(permitted_sections=sections)())
    return cephcontext
Пример #12
0
def emit_cephconf():
    cephcontext = {
        'auth_supported': config('auth-supported'),
        'mon_hosts': ' '.join(get_mon_hosts()),
        'fsid': leader_get('fsid'),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': config('ceph-public-network'),
        'ceph_cluster_network': config('ceph-cluster-network'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not config('ceph-public-network'):
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not config('ceph-cluster-network'):
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf))
    with open(charm_ceph_conf, 'w') as cephconf:
        cephconf.write(render_template('ceph.conf', cephcontext))
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 100)
Пример #13
0
def config_changed():
    # Get the cfg object so we can see if the no-bootstrap value has changed
    # and triggered this hook invocation
    cfg = config()
    if config('prefer-ipv6'):
        assert_charm_supports_ipv6()

    check_for_upgrade()

    log('Monitor hosts are ' + repr(get_mon_hosts()))

    sysctl_dict = config('sysctl')
    if sysctl_dict:
        create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
    if relations_of_type('nrpe-external-master'):
        update_nrpe_config()

    if is_leader():
        if not config('no-bootstrap'):
            if not leader_get('fsid') or not leader_get('monitor-secret'):
                if config('fsid'):
                    fsid = config('fsid')
                else:
                    fsid = "{}".format(uuid.uuid1())
                if config('monitor-secret'):
                    mon_secret = config('monitor-secret')
                else:
                    mon_secret = "{}".format(ceph.generate_monitor_secret())
                status_set('maintenance', 'Creating FSID and Monitor Secret')
                opts = {
                    'fsid': fsid,
                    'monitor-secret': mon_secret,
                }
                log("Settings for the cluster are: {}".format(opts))
                leader_set(opts)
        elif cfg.changed('no-bootstrap') and \
                is_relation_made('bootstrap-source'):
            # User changed the no-bootstrap config option, we're the leader,
            # and the bootstrap-source relation has been made. The charm should
            # be in a blocked state indicating that the no-bootstrap option
            # must be set. This block is invoked when the user is trying to
            # get out of that scenario by enabling no-bootstrap.
            bootstrap_source_relation_changed()
    elif leader_get('fsid') is None or leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        sys.exit(0)

    emit_cephconf()

    # Support use of single node ceph
    if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1
            and is_leader()):
        status_set('maintenance', 'Bootstrapping single Ceph MON')
        ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
        ceph.wait_for_bootstrap()
        if cmp_pkgrevno('ceph', '12.0.0') >= 0:
            status_set('maintenance', 'Bootstrapping single Ceph MGR')
            ceph.bootstrap_manager()
Пример #14
0
def get_ceph_context(upgrading=False):
    """Returns the current context dictionary for generating ceph.conf

    :param upgrading: bool - determines if the context is invoked as
                      part of an upgrade proedure Setting this to true
                      causes settings useful during an upgrade to be
                      defined in the ceph.conf file
    """
    mon_hosts = get_mon_hosts()
    log('Monitor hosts are ' + repr(mon_hosts))

    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': get_auth(),
        'mon_hosts': ' '.join(mon_hosts),
        'fsid': get_fsid(),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
        'short_object_len': use_short_objects(),
        'upgrade_in_progress': upgrading,
        'bluestore': config('bluestore'),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    if config('customize-failure-domain'):
        az = az_info()
        if az:
            cephcontext['crush_location'] = "root=default {} host={}" \
                .format(az, socket.gethostname())
        else:
            log(
                "Your Juju environment doesn't"
                "have support for Availability Zones"
            )

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'osd']
    cephcontext.update(CephConfContext(permitted_sections=sections)())
    return cephcontext
Пример #15
0
def start_osds(devices):
    # Scan for ceph block devices
    rescan_osd_devices()
    if cmp_pkgrevno('ceph', "0.56.6") >= 0:
        # Use ceph-disk activate for directory based OSD's
        for dev_or_path in devices:
            if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
                subprocess.check_call(['ceph-disk', 'activate', dev_or_path])
def db_migration():
    if cmp_pkgrevno('python-django', '1.9') >= 0:
        # syncdb was removed in django 1.9
        subcommand = 'migrate'
    else:
        subcommand = 'syncdb'
    cmd = ['/usr/share/openstack-dashboard/manage.py', subcommand, '--noinput']
    subprocess.check_call(cmd)
Пример #17
0
def use_bluestore():
    """Determine whether bluestore should be used for OSD's

    :returns: whether bluestore disk format should be used
    :rtype: bool"""
    if cmp_pkgrevno('ceph', '12.2.0') < 0:
        return False
    return config('bluestore')
Пример #18
0
def use_bluestore():
    """Determine whether bluestore should be used for OSD's

    :returns: whether bluestore disk format should be used
    :rtype: bool"""
    if cmp_pkgrevno('ceph', '12.2.0') < 0:
        return False
    return config('bluestore')
Пример #19
0
def start_osds(devices):
    # Scan for ceph block devices
    rescan_osd_devices()
    if cmp_pkgrevno('ceph', "0.56.6") >= 0:
        # Use ceph-disk activate for directory based OSD's
        for dev_or_path in devices:
            if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
                subprocess.check_call(['ceph-disk', 'activate', dev_or_path])
Пример #20
0
def start():
    # In case we're being redeployed to the same machines, try
    # to make sure everything is running as soon as possible.
    if ceph.systemd():
        service_restart('ceph-mon')
    else:
        service_restart('ceph-mon-all')
    if cmp_pkgrevno('ceph', '12.0.0') >= 0:
        service_restart('ceph-mgr@{}'.format(socket.gethostname()))
Пример #21
0
def notify_prometheus():
    if relation_ids('prometheus') and ceph.is_bootstrapped():
        prometheus_permitted = cmp_pkgrevno('ceph', '12.2.0') >= 0
        module_enabled = (is_mgr_module_enabled('prometheus')
                          or mgr_enable_module('prometheus'))
    for relid in relation_ids('prometheus'):
        for unit in related_units(relid):
            prometheus_relation(relid=relid,
                                unit=unit,
                                prometheus_permitted=prometheus_permitted,
                                module_enabled=module_enabled)
Пример #22
0
def get_optional_interfaces():
    """Return the optional interfaces that should be checked if the relavent
    relations have appeared.
    :returns: {general_interface: [specific_int1, specific_int2, ...], ...}
    """
    optional_interfaces = {}
    if relation_ids('ha'):
        optional_interfaces['ha'] = ['cluster']
    if (cmp_pkgrevno('radosgw', '0.55') >= 0
            and relation_ids('identity-service')):
        optional_interfaces['identity'] = ['identity-service']
    return optional_interfaces
Пример #23
0
def osdize_dir(path, encrypt=False):
    if os.path.exists(os.path.join(path, 'upstart')):
        log('Path {} is already configured as an OSD - bailing'.format(path))
        return

    if cmp_pkgrevno('ceph', "0.56.6") < 0:
        log('Unable to use directories for OSDs with ceph < 0.56.6',
            level=ERROR)
        raise

    mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
    chownr('/var/lib/ceph', ceph_user(), ceph_user())
    cmd = [
        'sudo', '-u',
        ceph_user(), 'ceph-disk', 'prepare', '--data-dir', path
    ]
    if cmp_pkgrevno('ceph', '0.60') >= 0:
        if encrypt:
            cmd.append('--dmcrypt')
    log("osdize dir cmd: {}".format(cmd))
    subprocess.check_call(cmd)
Пример #24
0
def mon_relation():
    if leader_get('monitor-secret') is None:
        log('still waiting for leader to setup keys')
        status_set('waiting', 'Waiting for leader to setup keys')
        return
    emit_cephconf()

    moncount = int(config('monitor-count'))
    if len(get_mon_hosts()) >= moncount:
        if not ceph.is_bootstrapped():
            status_set('maintenance', 'Bootstrapping MON cluster')
            # the following call raises an exception
            # if it can't add the keyring
            try:
                ceph.bootstrap_monitor_cluster(leader_get('monitor-secret'))
            except FileNotFoundError as e:  # NOQA -- PEP8 is still PY2
                log("Couldn't bootstrap the monitor yet: {}".format(str(e)))
                exit(0)
            ceph.wait_for_bootstrap()
            ceph.wait_for_quorum()
            if cmp_pkgrevno('ceph', '12.0.0') >= 0:
                status_set('maintenance', 'Bootstrapping Ceph MGR')
                ceph.bootstrap_manager()
            # If we can and want to
            if is_leader() and config('customize-failure-domain'):
                # But only if the environment supports it
                if os.environ.get('JUJU_AVAILABILITY_ZONE'):
                    cmds = [
                        "ceph osd getcrushmap -o /tmp/crush.map",
                        "crushtool -d /tmp/crush.map| "
                        "sed 's/step chooseleaf firstn 0 type host/step "
                        "chooseleaf firstn 0 type rack/' > "
                        "/tmp/crush.decompiled",
                        "crushtool -c /tmp/crush.decompiled -o /tmp/crush.map",
                        "crushtool -i /tmp/crush.map --test",
                        "ceph osd setcrushmap -i /tmp/crush.map"
                    ]
                    for cmd in cmds:
                        try:
                            subprocess.check_call(cmd, shell=True)
                        except subprocess.CalledProcessError as e:
                            log("Failed to modify crush map:", level='error')
                            log("Cmd: {}".format(cmd), level='error')
                            log("Error: {}".format(e.output), level='error')
                            break
                else:
                    log("Your Juju environment doesn't"
                        "have support for Availability Zones")
            notify_osds()
            notify_radosgws()
            notify_client()
    else:
        log('Not enough mons ({}), punting.'.format(len(get_mon_hosts())))
Пример #25
0
def get_optional_interfaces():
    """Return the optional interfaces that should be checked if the relavent
    relations have appeared.
    :returns: {general_interface: [specific_int1, specific_int2, ...], ...}
    """
    optional_interfaces = {}
    if relation_ids('ha'):
        optional_interfaces['ha'] = ['cluster']
    if (cmp_pkgrevno('radosgw', '0.55') >= 0 and
            relation_ids('identity-service')):
        optional_interfaces['identity'] = ['identity-service']
    return optional_interfaces
Пример #26
0
def register_configs(release='icehouse'):
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    CONFIGS = resource_map()
    pkg = 'radosgw'
    if not filter_installed_packages([pkg]) and cmp_pkgrevno(pkg, '0.55') >= 0:
        # Add keystone configuration if found
        CONFIGS[CEPH_CONF]['contexts'].append(
            ceph_radosgw_context.IdentityServiceContext())
    for cfg, rscs in CONFIGS.items():
        configs.register(cfg, rscs['contexts'])
    return configs
def prometheus_scrape_joined(relation_id=None, remote_unit=None):
    """
    scrape relation joined
    enable prometheus plugin and open port
    """
    err_msg = "rabbitmq-server needs to be >= 3.8 to support Prometheus plugin"
    if cmp_pkgrevno('rabbitmq-server', '3.8.0') < 0:
        log(err_msg, level=WARNING)
        status_set("blocked", err_msg)
        raise Exception(err_msg)
    rabbit.enable_plugin(PROM_PLUGIN)
    open_port(RMQ_MON_PORT)
    relation_set(relation_id, relation_settings={"port": RMQ_MON_PORT})
def series_upgrade_complete():
    log("Running complete series upgrade hook", "INFO")
    # NOTE(jamespage): If a newer RMQ version is
    # installed and the old style configuration file
    # is still on disk, remove before re-rendering
    # any new configuration.
    if (os.path.exists(rabbit.RABBITMQ_CONFIG)
            and cmp_pkgrevno('rabbitmq-server', '3.7') >= 0):
        os.remove(rabbit.RABBITMQ_CONFIG)
        rabbit.ConfigRenderer(rabbit.CONFIG_FILES()).write_all()
    clear_unit_paused()
    clear_unit_upgrading()
    rabbit.resume_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES()))
Пример #29
0
def register_configs(release='icehouse'):
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    CONFIGS = resource_map()
    pkg = 'radosgw'
    if not filter_installed_packages([pkg]) and cmp_pkgrevno(pkg, '0.55') >= 0:
        # Add keystone configuration if found
        CONFIGS[CEPH_CONF]['contexts'].append(
            ceph_radosgw_context.IdentityServiceContext()
        )
    for cfg, rscs in CONFIGS.iteritems():
        configs.register(cfg, rscs['contexts'])
    return configs
Пример #30
0
def identity_joined(relid=None):
    if cmp_pkgrevno('radosgw', '0.55') < 0:
        log('Integration with keystone requires ceph >= 0.55')
        sys.exit(1)

    port = listen_port()
    admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port)
    if leader_get('namespace_tenants') == 'True':
        internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \
            (canonical_url(CONFIGS, INTERNAL), port)
        public_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \
            (canonical_url(CONFIGS, PUBLIC), port)
    else:
        internal_url = '%s:%s/swift/v1' % \
            (canonical_url(CONFIGS, INTERNAL), port)
        public_url = '%s:%s/swift/v1' % \
            (canonical_url(CONFIGS, PUBLIC), port)
    roles = [x for x in [config('operator-roles'), config('admin-roles')] if x]
    requested_roles = ''
    if roles:
        requested_roles = ','.join(roles) if len(roles) > 1 else roles[0]
    relation_set(swift_service='swift',
                 swift_region=config('region'),
                 swift_public_url=public_url,
                 swift_internal_url=internal_url,
                 swift_admin_url=admin_url,
                 requested_roles=requested_roles,
                 relation_id=relid)
    if cmp_pkgrevno('radosgw', '12.2') >= 0:
        relation_set(
            s3_service='s3',
            s3_region=config('region'),
            s3_public_url='{}:{}/'.format(canonical_url(CONFIGS, PUBLIC),
                                          port),
            s3_internal_url='{}:{}/'.format(canonical_url(CONFIGS, INTERNAL),
                                            port),
            s3_admin_url='{}:{}/'.format(canonical_url(CONFIGS, ADMIN), port),
            relation_id=relid)
Пример #31
0
def use_vaultlocker():
    """Determine whether vaultlocker should be used for OSD encryption

    :returns: whether vaultlocker should be used for key management
    :rtype: bool
    :raises: ValueError if vaultlocker is enable but ceph < 12.2.4"""
    if (config('osd-encrypt') and
            config('osd-encrypt-keymanager') == ceph.VAULT_KEY_MANAGER):
        if cmp_pkgrevno('ceph', '12.2.4') < 0:
            msg = ('vault usage only supported with ceph >= 12.2.4')
            status_set('blocked', msg)
            raise ValueError(msg)
        else:
            return True
    return False
Пример #32
0
def osdize_dir(path, encrypt=False):
    if os.path.exists(os.path.join(path, 'upstart')):
        log('Path {} is already configured as an OSD - bailing'.format(path))
        return

    if cmp_pkgrevno('ceph', "0.56.6") < 0:
        log('Unable to use directories for OSDs with ceph < 0.56.6',
            level=ERROR)
        raise

    mkdir(path, owner=ceph_user(), group=ceph_user(), perms=0o755)
    chownr('/var/lib/ceph', ceph_user(), ceph_user())
    cmd = [
        'sudo', '-u', ceph_user(),
        'ceph-disk',
        'prepare',
        '--data-dir',
        path
    ]
    if cmp_pkgrevno('ceph', '0.60') >= 0:
        if encrypt:
            cmd.append('--dmcrypt')
    log("osdize dir cmd: {}".format(cmd))
    subprocess.check_call(cmd)
def join_cluster(node):
    ''' Join cluster with node '''
    if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
        cluster_cmd = 'join_cluster'
    else:
        cluster_cmd = 'cluster'
    status_set('maintenance',
               'Clustering with remote rabbit host (%s).' % node)
    rabbitmqctl('stop_app')
    # Intentionally using check_output so we can see rabbitmqctl error
    # message if it fails
    cmd = [RABBITMQ_CTL, cluster_cmd, node]
    subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    start_app()
    log('Host clustered with %s.' % node, 'INFO')
Пример #34
0
def use_vaultlocker():
    """Determine whether vaultlocker should be used for OSD encryption

    :returns: whether vaultlocker should be used for key management
    :rtype: bool
    :raises: ValueError if vaultlocker is enable but ceph < 12.2.4"""
    if (config('osd-encrypt')
            and config('osd-encrypt-keymanager') == ceph.VAULT_KEY_MANAGER):
        if cmp_pkgrevno('ceph', '12.2.4') < 0:
            msg = ('vault usage only supported with ceph >= 12.2.4')
            status_set('blocked', msg)
            raise ValueError(msg)
        else:
            return True
    return False
def db_migration():
    release = CompareOpenStackReleases(os_release('openstack-dashboard'))
    if release >= 'rocky':
        python = 'python3'
        python_django = 'python3-django'
    else:
        python = 'python2'
        python_django = 'python-django'
    if cmp_pkgrevno(python_django, '1.9') >= 0:
        # syncdb was removed in django 1.9
        subcommand = 'migrate'
    else:
        subcommand = 'syncdb'
    cmd = [python, '/usr/share/openstack-dashboard/manage.py', subcommand,
           '--noinput']
    subprocess.check_call(cmd)
def db_migration():
    release = CompareOpenStackReleases(os_release('openstack-dashboard'))
    if release >= 'rocky':
        python = 'python3'
        python_django = 'python3-django'
    else:
        python = 'python2'
        python_django = 'python-django'
    if cmp_pkgrevno(python_django, '1.9') >= 0:
        # syncdb was removed in django 1.9
        subcommand = 'migrate'
    else:
        subcommand = 'syncdb'
    cmd = [python, '/usr/share/openstack-dashboard/manage.py', subcommand,
           '--noinput']
    subprocess.check_call(cmd)
Пример #37
0
def use_short_objects():
    '''
    Determine whether OSD's should be configured with
    limited object name lengths.

    @return: boolean indicating whether OSD's should be limited
    '''
    if cmp_pkgrevno('ceph', "10.2.0") >= 0:
        if config('osd-format') in ('ext4'):
            return True
        for device in config('osd-devices'):
            if device and not device.startswith('/dev'):
                # TODO: determine format of directory based
                #       OSD location
                return True
    return False
Пример #38
0
def forget_cluster_node(node):
    ''' Remove previously departed node from cluster '''
    if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
        log('rabbitmq-server version < 3.0.0, '
            'forget_cluster_node not supported.', level=DEBUG)
        return
    try:
        rabbitmqctl('forget_cluster_node', node)
    except subprocess.CalledProcessError, e:
        if e.returncode == 2:
            log("Unable to remove node '{}' from cluster. It is either still "
                "running or already removed. (Output: '{}')"
                "".format(node, e.output), level=ERROR)
            return
        else:
            raise
Пример #39
0
def set_all_mirroring_queues(enable):
    """
    :param enable: if True then enable mirroring queue for all the vhosts,
                   otherwise the HA policy is removed
    """
    if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
        log(("Mirroring queues not supported "
             "in rabbitmq-server >= 3.0"), level='WARN')
        log(("More information at http://www.rabbitmq.com/blog/"
             "2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
        return

    for vhost in list_vhosts():
        if enable:
            set_ha_mode(vhost, 'all')
        else:
            clear_ha_mode(vhost, force=True)
def register_configs():
    ''' Register config files with their respective contexts. '''
    release = os_release('openstack-dashboard')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [LOCAL_SETTINGS,
             HAPROXY_CONF,
             PORTS_CONF]

    if CompareOpenStackReleases(release) >= 'mitaka':
        configs.register(KEYSTONEV3_POLICY,
                         CONFIG_FILES[KEYSTONEV3_POLICY]['hook_contexts'])
        CONFIG_FILES[LOCAL_SETTINGS]['hook_contexts'].append(
            context.SharedDBContext(
                user=config('database-user'),
                database=config('database'),
                ssl_dir=DASHBOARD_CONF_DIR))

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if os.path.isdir(APACHE_CONF_DIR) and cmp_pkgrevno('apache2', '2.4') >= 0:
        for conf in [APACHE_CONF, APACHE_SSL, APACHE_DEFAULT]:
            if os.path.isfile(conf):
                log('Removing old config %s' % (conf))
                os.remove(conf)
        configs.register(APACHE_24_DEFAULT,
                         CONFIG_FILES[APACHE_24_DEFAULT]['hook_contexts'])
        configs.register(APACHE_24_CONF,
                         CONFIG_FILES[APACHE_24_CONF]['hook_contexts'])
        configs.register(APACHE_24_SSL,
                         CONFIG_FILES[APACHE_24_SSL]['hook_contexts'])
    else:
        configs.register(APACHE_DEFAULT,
                         CONFIG_FILES[APACHE_DEFAULT]['hook_contexts'])
        configs.register(APACHE_CONF,
                         CONFIG_FILES[APACHE_CONF]['hook_contexts'])
        configs.register(APACHE_SSL,
                         CONFIG_FILES[APACHE_SSL]['hook_contexts'])

    if os.path.exists(os.path.dirname(ROUTER_SETTING)):
        configs.register(ROUTER_SETTING,
                         CONFIG_FILES[ROUTER_SETTING]['hook_contexts'])

    return configs
Пример #41
0
def osdize_dir(path):
    if os.path.exists(os.path.join(path, 'upstart')):
        log('Path {} is already configured as an OSD - bailing'.format(path))
        return

    if cmp_pkgrevno('ceph', "0.56.6") < 0:
        log('Unable to use directories for OSDs with ceph < 0.56.6',
            level=ERROR)
        raise

    mkdir(path)
    cmd = [
        'ceph-disk-prepare',
        '--data-dir',
        path
    ]
    subprocess.check_call(cmd)
Пример #42
0
def register_configs():
    ''' Register config files with their respective contexts. '''
    release = os_release('openstack-dashboard')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [LOCAL_SETTINGS,
             HAPROXY_CONF,
             PORTS_CONF]

    if CompareOpenStackReleases(release) >= 'mitaka':
        configs.register(KEYSTONEV3_POLICY,
                         CONFIG_FILES[KEYSTONEV3_POLICY]['hook_contexts'])
        CONFIG_FILES[LOCAL_SETTINGS]['hook_contexts'].append(
            context.SharedDBContext(
                user=config('database-user'),
                database=config('database'),
                ssl_dir=DASHBOARD_CONF_DIR))

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if os.path.isdir(APACHE_CONF_DIR) and cmp_pkgrevno('apache2', '2.4') >= 0:
        for conf in [APACHE_CONF, APACHE_SSL, APACHE_DEFAULT]:
            if os.path.isfile(conf):
                log('Removing old config %s' % (conf))
                os.remove(conf)
        configs.register(APACHE_24_DEFAULT,
                         CONFIG_FILES[APACHE_24_DEFAULT]['hook_contexts'])
        configs.register(APACHE_24_CONF,
                         CONFIG_FILES[APACHE_24_CONF]['hook_contexts'])
        configs.register(APACHE_24_SSL,
                         CONFIG_FILES[APACHE_24_SSL]['hook_contexts'])
    else:
        configs.register(APACHE_DEFAULT,
                         CONFIG_FILES[APACHE_DEFAULT]['hook_contexts'])
        configs.register(APACHE_CONF,
                         CONFIG_FILES[APACHE_CONF]['hook_contexts'])
        configs.register(APACHE_SSL,
                         CONFIG_FILES[APACHE_SSL]['hook_contexts'])

    if os.path.exists(os.path.dirname(ROUTER_SETTING)):
        configs.register(ROUTER_SETTING,
                         CONFIG_FILES[ROUTER_SETTING]['hook_contexts'])

    return configs
Пример #43
0
def identity_joined(relid=None):
    if cmp_pkgrevno('radosgw', '0.55') < 0:
        log('Integration with keystone requires ceph >= 0.55')
        sys.exit(1)

    port = config('port')
    admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port)
    internal_url = '%s:%s/swift/v1' % \
        (canonical_url(CONFIGS, INTERNAL), port)
    public_url = '%s:%s/swift/v1' % \
        (canonical_url(CONFIGS, PUBLIC), port)
    relation_set(service='swift',
                 region=config('region'),
                 public_url=public_url, internal_url=internal_url,
                 admin_url=admin_url,
                 requested_roles=config('operator-roles'),
                 relation_id=relid)
def clear_ha_mode(vhost, name='HA', force=False):
    """
    Clear policy from the `vhost` by `name`
    """
    if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
        log(("Mirroring queues not supported "
             "in rabbitmq-server >= 3.0"), level=WARNING)
        log(("More information at http://www.rabbitmq.com/blog/"
             "2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
        return

    log("Clearing '%s' policy from vhost '%s'" % (name, vhost), level='INFO')
    try:
        rabbitmqctl('clear_policy', '-p', vhost, name)
    except subprocess.CalledProcessError as ex:
        if not force:
            raise ex
def forget_cluster_node(node):
    ''' Remove previously departed node from cluster '''
    if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
        log('rabbitmq-server version < 3.0.0, '
            'forget_cluster_node not supported.', level=DEBUG)
        return
    try:
        rabbitmqctl('forget_cluster_node', node)
    except subprocess.CalledProcessError as e:
        if e.returncode == 2:
            log("Unable to remove node '{}' from cluster. It is either still "
                "running or already removed. (Output: '{}')"
                "".format(node, e.output), level=ERROR)
            return
        else:
            raise
    log("Removed previously departed node from cluster: '{}'."
        "".format(node), level=INFO)
Пример #46
0
def identity_joined(relid=None):
    if cmp_pkgrevno('radosgw', '0.55') < 0:
        log('Integration with keystone requires ceph >= 0.55')
        sys.exit(1)

    port = config('port')
    admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port)
    internal_url = '%s:%s/swift/v1' % \
        (canonical_url(CONFIGS, INTERNAL), port)
    public_url = '%s:%s/swift/v1' % \
        (canonical_url(CONFIGS, PUBLIC), port)
    relation_set(service='swift',
                 region=config('region'),
                 public_url=public_url,
                 internal_url=internal_url,
                 admin_url=admin_url,
                 requested_roles=config('operator-roles'),
                 relation_id=relid)
Пример #47
0
    def __call__(self):
        ctxt = {
            'cluster_partition_handling':
            (leader_get(rabbit_utils.CLUSTER_MODE_KEY)
             or rabbit_utils.CLUSTER_MODE_FOR_INSTALL),
            'mnesia_table_loading_retry_timeout':
            config('mnesia-table-loading-retry-timeout'),
            'mnesia_table_loading_retry_limit':
            config('mnesia-table-loading-retry-limit')
        }

        if config('connection-backlog'):
            ctxt['connection_backlog'] = config('connection-backlog')

        if cmp_pkgrevno('rabbitmq-server', '3.6') >= 0:
            ctxt['queue_master_locator'] = config('queue-master-locator')

        return ctxt
Пример #48
0
def check_optional_relations(configs):
    required_interfaces = {}
    if relation_ids('ha'):
        required_interfaces['ha'] = ['cluster']
        try:
            get_hacluster_config()
        except:
            return ('blocked',
                    'hacluster missing configuration: '
                    'vip, vip_iface, vip_cidr')
    if cmp_pkgrevno('radosgw', '0.55') >= 0 and \
            relation_ids('identity-service'):
        required_interfaces['identity'] = ['identity-service']
    if required_interfaces:
        set_os_workload_status(configs, required_interfaces)
        return status_get()
    else:
        return 'unknown', 'No optional relations'
Пример #49
0
def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False,
               ignore_errors=False):
    if not os.path.exists(dev):
        log('Path {} does not exist - bailing'.format(dev))
        return

    if not is_block_device(dev):
        log('Path {} is not a block device - bailing'.format(dev))
        return

    if (is_osd_disk(dev) and not reformat_osd):
        log('Looks like {} is already an OSD, skipping.'.format(dev))
        return

    if is_device_mounted(dev):
        log('Looks like {} is in use, skipping.'.format(dev))
        return

    cmd = ['ceph-disk-prepare']
    # Later versions of ceph support more options
    if cmp_pkgrevno('ceph', '0.48.3') >= 0:
        if osd_format:
            cmd.append('--fs-type')
            cmd.append(osd_format)
        if reformat_osd:
            cmd.append('--zap-disk')
        cmd.append(dev)
        if osd_journal and os.path.exists(osd_journal):
            cmd.append(osd_journal)
    else:
        # Just provide the device - no other options
        # for older versions of ceph
        cmd.append(dev)
        if reformat_osd:
            zap_disk(dev)

    try:
        subprocess.check_call(cmd)
    except subprocess.CalledProcessError as e:
        if ignore_errors:
            log('Unable to initialize device: {}'.format(dev), WARNING)
        else:
            log('Unable to initialize device: {}'.format(dev), ERROR)
            raise e
Пример #50
0
def use_short_objects():
    '''
    Determine whether OSD's should be configured with
    limited object name lengths.

    @return: boolean indicating whether OSD's should be limited
    '''
    if cmp_pkgrevno('ceph', "10.2.0") >= 0:
        if config('osd-format') in ('ext4'):
            return True
        devices = config('osd-devices')
        if not devices:
            return False

        for device in devices.split():
            if device and not device.startswith('/dev'):
                # TODO: determine format of directory based
                #       OSD location
                return True
    return False
Пример #51
0
def set_ha_mode(vhost, mode, params=None, sync_mode='automatic'):
    """Valid mode values:

      * 'all': Queue is mirrored across all nodes in the cluster. When a new
         node is added to the cluster, the queue will be mirrored to that node.
      * 'exactly': Queue is mirrored to count nodes in the cluster.
      * 'nodes': Queue is mirrored to the nodes listed in node names

    More details at http://www.rabbitmq.com./ha.html

    :param vhost: virtual host name
    :param mode: ha mode
    :param params: values to pass to the policy, possible values depend on the
                   mode chosen.
    :param sync_mode: when `mode` is 'exactly' this used to indicate how the
                      sync has to be done
                      http://www.rabbitmq.com./ha.html#eager-synchronisation
    """

    if cmp_pkgrevno('rabbitmq-server', '3.0.0') < 0:
        log(("Mirroring queues cannot be enabled, only supported "
             "in rabbitmq-server >= 3.0"), level='WARN')
        log(("More information at http://www.rabbitmq.com/blog/"
             "2012/11/19/breaking-things-with-rabbitmq-3-0"), level='INFO')
        return

    if mode == 'all':
        value = '{"ha-mode": "all"}'
    elif mode == 'exactly':
        value = '{"ha-mode":"exactly","ha-params":%s,"ha-sync-mode":"%s"}' \
                % (params, sync_mode)
    elif mode == 'nodes':
        value = '{"ha-mode":"nodes","ha-params":[%s]}' % ",".join(params)
    else:
        raise RabbitmqError(("Unknown mode '%s', known modes: "
                             "all, exactly, nodes"))

    log("Setting HA policy to vhost '%s'" % vhost, level='INFO')
    set_policy(vhost, 'HA', '^(?!amq\.).*', value)
Пример #52
0
def emit_cephconf():
    mon_hosts = get_mon_hosts()
    log('Monitor hosts are ' + repr(mon_hosts))

    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': get_auth(),
        'mon_hosts': ' '.join(mon_hosts),
        'fsid': get_fsid(),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'osd_journal_size': config('osd-journal-size'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
    }

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address

    # Install ceph.conf as an alternative to support
    # co-existence with other charms that write this file
    charm_ceph_conf = "/var/lib/charm/{}/ceph.conf".format(service_name())
    mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
          group=ceph.ceph_user())
    with open(charm_ceph_conf, 'w') as cephconf:
        cephconf.write(render_template('ceph.conf', cephcontext))
    install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
                        charm_ceph_conf, 90)
def register_configs():
    ''' Register config files with their respective contexts. '''
    release = os_release('openstack-dashboard')
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [LOCAL_SETTINGS,
             HAPROXY_CONF,
             PORTS_CONF]

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if os.path.isdir(APACHE_CONF_DIR) and cmp_pkgrevno('apache2', '2.4') >= 0:
        for conf in [APACHE_CONF, APACHE_SSL, APACHE_DEFAULT]:
            if os.path.isfile(conf):
                log('Removing old config %s' % (conf))
                os.remove(conf)
        configs.register(APACHE_24_DEFAULT,
                         CONFIG_FILES[APACHE_24_DEFAULT]['hook_contexts'])
        configs.register(APACHE_24_CONF,
                         CONFIG_FILES[APACHE_24_CONF]['hook_contexts'])
        configs.register(APACHE_24_SSL,
                         CONFIG_FILES[APACHE_24_SSL]['hook_contexts'])
    else:
        configs.register(APACHE_DEFAULT,
                         CONFIG_FILES[APACHE_DEFAULT]['hook_contexts'])
        configs.register(APACHE_CONF,
                         CONFIG_FILES[APACHE_CONF]['hook_contexts'])
        configs.register(APACHE_SSL,
                         CONFIG_FILES[APACHE_SSL]['hook_contexts'])

    if os.path.exists(os.path.dirname(ROUTER_SETTING)):
        configs.register(ROUTER_SETTING,
                         CONFIG_FILES[ROUTER_SETTING]['hook_contexts'])
    return configs
def caching_cmp_pkgrevno(package, revno, pkgcache=None):
    return cmp_pkgrevno(package, revno, pkgcache)
Пример #55
0
def install_upstart_scripts():
    # Only install upstart configurations for older versions
    if cmp_pkgrevno('ceph', "0.55.1") < 0:
        for x in glob.glob('files/upstart/*.conf'):
            shutil.copy(x, '/etc/init/')
def amqp_changed(relation_id=None, remote_unit=None):
    if config('prefer-ipv6'):
        host_addr = get_ipv6_addr()[0]
    else:
        host_addr = unit_get('private-address')

    if not is_elected_leader('res_rabbitmq_vip'):
        # NOTE(jamespage) clear relation to deal with data being
        #                 removed from peer storage
        relation_clear(relation_id)
        # Each unit needs to set the db information otherwise if the unit
        # with the info dies the settings die with it Bug# 1355848
        exc_list = ['hostname', 'private-address']
        for rel_id in relation_ids('amqp'):
            peerdb_settings = peer_retrieve_by_prefix(rel_id,
                                                      exc_list=exc_list)
            peerdb_settings['hostname'] = host_addr
            peerdb_settings['private-address'] = host_addr
            if 'password' in peerdb_settings:
                relation_set(relation_id=rel_id, **peerdb_settings)

        log('amqp_changed(): Deferring amqp_changed'
            ' to is_elected_leader.')

        # NOTE: active/active case
        if config('prefer-ipv6'):
            relation_settings = {'private-address': host_addr}
            relation_set(relation_id=relation_id,
                         relation_settings=relation_settings)

        return

    relation_settings = {}
    settings = relation_get(rid=relation_id, unit=remote_unit)

    singleset = set(['username', 'vhost'])

    if singleset.issubset(settings):
        if None in [settings['username'], settings['vhost']]:
            log('amqp_changed(): Relation not ready.')
            return

        relation_settings['password'] = configure_amqp(
            username=settings['username'],
            vhost=settings['vhost'],
            admin=settings.get('admin', False))
    else:
        queues = {}
        for k, v in settings.iteritems():
            amqp = k.split('_')[0]
            x = '_'.join(k.split('_')[1:])
            if amqp not in queues:
                queues[amqp] = {}
            queues[amqp][x] = v
        for amqp in queues:
            if singleset.issubset(queues[amqp]):
                relation_settings[
                    '_'.join([amqp, 'password'])] = configure_amqp(
                    queues[amqp]['username'],
                    queues[amqp]['vhost'])

    if config('prefer-ipv6'):
        relation_settings['private-address'] = host_addr
    else:
        # NOTE(jamespage)
        # override private-address settings if access-network is
        # configured and an appropriate network interface is configured.
        relation_settings['hostname'] = \
            relation_settings['private-address'] = \
            get_address_in_network(config('access-network'),
                                   unit_get('private-address'))

    ssl_utils.configure_client_ssl(relation_settings)

    if is_clustered():
        relation_settings['clustered'] = 'true'
        if is_relation_made('ha'):
            # active/passive settings
            relation_settings['vip'] = config('vip')
            # or ha-vip-only to support active/active, but
            # accessed via a VIP for older clients.
            if config('ha-vip-only') is True:
                relation_settings['ha-vip-only'] = 'true'

    # set if need HA queues or not
    if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
        relation_settings['ha_queues'] = True
    peer_store_and_set(relation_id=relation_id,
                       relation_settings=relation_settings)
Пример #57
0
def get_ceph_context(upgrading=False):
    """Returns the current context dictionary for generating ceph.conf

    :param upgrading: bool - determines if the context is invoked as
                      part of an upgrade proedure Setting this to true
                      causes settings useful during an upgrade to be
                      defined in the ceph.conf file
    """
    mon_hosts = get_mon_hosts()
    log('Monitor hosts are ' + repr(mon_hosts))

    networks = get_networks('ceph-public-network')
    public_network = ', '.join(networks)

    networks = get_networks('ceph-cluster-network')
    cluster_network = ', '.join(networks)

    cephcontext = {
        'auth_supported': get_auth(),
        'mon_hosts': ' '.join(mon_hosts),
        'fsid': get_fsid(),
        'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
        'crush_initial_weight': config('crush-initial-weight'),
        'osd_journal_size': config('osd-journal-size'),
        'osd_max_backfills': config('osd-max-backfills'),
        'osd_recovery_max_active': config('osd-recovery-max-active'),
        'use_syslog': str(config('use-syslog')).lower(),
        'ceph_public_network': public_network,
        'ceph_cluster_network': cluster_network,
        'loglevel': config('loglevel'),
        'dio': str(config('use-direct-io')).lower(),
        'short_object_len': use_short_objects(),
        'upgrade_in_progress': upgrading,
        'bluestore': use_bluestore(),
        'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
        'bluestore_block_wal_size': config('bluestore-block-wal-size'),
        'bluestore_block_db_size': config('bluestore-block-db-size'),
    }

    if config('bdev-enable-discard').lower() == 'enabled':
        cephcontext['bdev_discard'] = True
    elif config('bdev-enable-discard').lower() == 'auto':
        cephcontext['bdev_discard'] = should_enable_discard(get_devices())
    else:
        cephcontext['bdev_discard'] = False

    if config('prefer-ipv6'):
        dynamic_ipv6_address = get_ipv6_addr()[0]
        if not public_network:
            cephcontext['public_addr'] = dynamic_ipv6_address
        if not cluster_network:
            cephcontext['cluster_addr'] = dynamic_ipv6_address
    else:
        cephcontext['public_addr'] = get_public_addr()
        cephcontext['cluster_addr'] = get_cluster_addr()

    if config('customize-failure-domain'):
        az = az_info()
        if az:
            cephcontext['crush_location'] = "root=default {} host={}" \
                .format(az, socket.gethostname())
        else:
            log(
                "Your Juju environment doesn't"
                "have support for Availability Zones"
            )

    # NOTE(dosaboy): these sections must correspond to what is supported in the
    #                config template.
    sections = ['global', 'osd']
    cephcontext.update(CephConfContext(permitted_sections=sections)())
    return cephcontext