Пример #1
0
 def test_os_codename_from_bad_package_version(self, mocked_error):
     '''Test deriving OpenStack codename for a poorly versioned package'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         openstack.get_os_codename_package('bad-version')
         _e = ('Could not determine OpenStack codename for version 2016.1')
         mocked_error.assert_called_with(_e)
Пример #2
0
 def test_os_codename_from_uninstalled_package(self, mock_error):
     '''Test OpenStack codename from an available but uninstalled pkg'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         try:
             openstack.get_os_codename_package('cinder-common', fatal=True)
         except:
             pass
         e = ('Could not determine version of uninstalled package: '
              'cinder-common')
         mock_error.assert_called_with(e)
Пример #3
0
 def test_os_codename_from_bad_package(self, mocked_error):
     '''Test deriving OpenStack codename from an unavailable package'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         try:
             openstack.get_os_codename_package('foo')
         except:
             # ignore exceptions that raise when error_out is mocked
             # and doesn't sys.exit(1)
             pass
         e = 'Could not determine version of package with no installation '\
             'candidate: foo'
         mocked_error.assert_called_with(e)
Пример #4
0
def register_configs():
    """Register config files with their respective contexts.

    Registration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.
    release = get_os_codename_package('swift-proxy', fatal=False) or 'essex'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = [SWIFT_CONF,
             SWIFT_PROXY_CONF,
             HAPROXY_CONF,
             MEMCACHED_CONF]

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if os.path.exists('/etc/apache2/conf-available'):
        configs.register(SWIFT_RINGS_24_CONF,
                         CONFIG_FILES[SWIFT_RINGS_24_CONF]['hook_contexts'])
        configs.register(APACHE_SITE_24_CONF,
                         CONFIG_FILES[APACHE_SITE_24_CONF]['hook_contexts'])
    else:
        configs.register(SWIFT_RINGS_CONF,
                         CONFIG_FILES[SWIFT_RINGS_CONF]['hook_contexts'])
        configs.register(APACHE_SITE_CONF,
                         CONFIG_FILES[APACHE_SITE_CONF]['hook_contexts'])
    return configs
Пример #5
0
def register_configs():
    release = get_os_codename_package('swift', fatal=False) or 'essex'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    configs.register('/etc/swift/swift.conf',
                     [SwiftStorageContext()])
    configs.register('/etc/rsync-juju.d/050-swift-storage.conf',
                     [RsyncContext(), SwiftStorageServerContext()])
    # NOTE: add VaultKVContext so interface status can be assessed
    for server in ['account', 'object', 'container']:
        contexts = [SwiftStorageServerContext(),
                    context.BindHostContext(),
                    context.WorkerConfigContext()]

        # if vault deps are not installed it is not yet possible to check the
        # vault context status since it requires the hvac dependency.
        if vaultlocker_installed():
            contexts.append(vaultlocker.VaultKVContext(
                            vaultlocker.VAULTLOCKER_BACKEND))

        configs.register('/etc/swift/%s-server.conf' % server, contexts)

        if enable_replication():
            configs.register(
                '/etc/swift/{svc}-server/{svc}-server-replicator.conf'.format(
                    svc=server),
                contexts)

    return configs
    def __call__(self):
        """Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}

        release = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(release) < "icehouse":
            raise Exception("Unsupported version of Openstack")

        service = service_name()
        backup_driver = 'cinder.backup.drivers.ceph'
        return {
            "cinder": {
                "/etc/cinder/cinder.conf": {
                    "sections": {
                        'DEFAULT': [
                            ('backup_driver', backup_driver),
                            ('backup_ceph_conf',
                                os.path.join('/var/lib/charm',
                                             service, 'ceph.conf')),
                            ('backup_ceph_pool', service),
                            ('backup_ceph_user', service),
                        ]
                    }
                }
            }
        }
Пример #7
0
 def __call__(self):
     """
     Used to generate template context to be added to cinder.conf in the
     presence of a ceph relation.
     """
     if not is_relation_made('ceph', 'key'):
         return {}
     service = service_name()
     os_codename = get_os_codename_package('cinder-common')
     if CompareOpenStackReleases(os_codename) >= "icehouse":
         volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
     else:
         volume_driver = 'cinder.volume.driver.RBDDriver'
     return {
         "cinder": {
             "/etc/cinder/cinder.conf": {
                 "sections": {
                     service: [
                         ('volume_backend_name', service),
                         ('volume_driver', volume_driver),
                         ('rbd_pool', service),
                         ('rbd_user', service),
                         ('rbd_secret_uuid', leader_get('secret-uuid')),
                         ('rbd_ceph_conf', ceph_config_file()),
                     ]
                 }
             }
         }
     }
Пример #8
0
 def test_os_codename_from_uninstalled_package_nonfatal(self):
     '''Test OpenStack codename from avail uninstalled pkg is non fatal'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         self.assertEquals(
             None,
             openstack.get_os_codename_package('cinder-common',
                                               fatal=False))
Пример #9
0
def _get_services():
    """Return a list of services that need to be (un)paused."""
    services = SWIFT_SVCS[:]
    # Before Icehouse there was no swift-container-sync
    _os_release = get_os_codename_package("swift-container")
    if CompareOpenStackReleases(_os_release) < "icehouse":
        services.remove("swift-container-sync")
    return services
Пример #10
0
def register_configs():
    """
    Register config files with their respective contexts.
    Regstration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.

    release = get_os_codename_package('ceilometer-common', fatal=False)
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    if not release:
        log("Not installed yet, no way to determine the OS release. "
            "Skipping register configs", DEBUG)
        return configs

    if CompareOpenStackReleases(release) >= 'queens':
        for conf in QUEENS_CONFIG_FILES:
            configs.register(conf, QUEENS_CONFIG_FILES[conf]['hook_contexts'])
        configs.register(PIPELINE_CONF, [RemoteSinksContext()])
    else:
        for conf in (CEILOMETER_CONF, HAPROXY_CONF):
            configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

        if init_is_systemd():
            configs.register(
                CEILOMETER_API_SYSTEMD_CONF,
                CONFIG_FILES[CEILOMETER_API_SYSTEMD_CONF]['hook_contexts']
            )

        if os.path.exists('/etc/apache2/conf-available'):
            configs.register(
                HTTPS_APACHE_24_CONF,
                CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts']
            )
        else:
            configs.register(
                HTTPS_APACHE_CONF,
                CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts']
            )
        if enable_memcache(release=release):
            configs.register(MEMCACHED_CONF, [context.MemcacheContext()])

        if run_in_apache():
            wsgi_script = "/usr/share/ceilometer/app.wsgi"
            configs.register(
                WSGI_CEILOMETER_API_CONF,
                [context.WSGIWorkerConfigContext(name="ceilometer",
                                                 script=wsgi_script),
                 CeilometerContext(),
                 HAProxyContext()]
            )
        if CompareOpenStackReleases(release) >= 'mitaka':
            configs.register(PIPELINE_CONF, [RemoteSinksContext()])
    return configs
Пример #11
0
 def test_os_codename_from_package(self):
     '''Test deriving OpenStack codename from an installed package'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         for pkg, vers in six.iteritems(FAKE_REPO):
             # test fake repo for all "installed" packages
             if pkg.startswith('bad-'):
                 continue
             if 'pkg_vers' not in vers:
                 continue
             self.assertEquals(openstack.get_os_codename_package(pkg),
                               vers['os_release'])
Пример #12
0
    def __call__(self):
        """Used to generate template context to be added to cinder.conf.
        """

        release = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(release) < "queens":
            raise Exception("Unsupported version of Openstack")

        backup_driver = 'cinder.backup.drivers.swift.SwiftBackupDriver'
        backup_auth_method = 'single_user'
        if config('auth-version') == 2:
            ctxt = [
                ('backup_driver', backup_driver),
                ('backup_swift_auth', backup_auth_method),
                ('backup_swift_auth_version', config('auth-version')),
                ('backup_swift_url', config('endpoint-url')),
                ('backup_swift_auth_url', config('auth-url')),
                ('backup_swift_user', config('swift-user')),
                ('backup_swift_key', config('swift-key')),
                ('backup_swift_container', config('container-name')),
                ('backup_swift_object_size', config('object-size')),
                ('backup_swift_block_size', config('block-size')),
                ('backup_swift_tenant', config('tenant-name'))
            ]
        elif config('auth-version') == 3:
            ctxt = [
                ('backup_driver', backup_driver),
                ('backup_swift_auth', backup_auth_method),
                ('backup_swift_auth_version', config('auth-version')),
                ('backup_swift_url', config('endpoint-url')),
                ('backup_swift_auth_url', config('auth-url')),
                ('backup_swift_user', config('swift-user')),
                ('backup_swift_key', config('swift-key')),
                ('backup_swift_container', config('container-name')),
                ('backup_swift_object_size', config('object-size')),
                ('backup_swift_block_size', config('block-size')),
                ('backup_swift_user_domain', config('user-domain')),
                ('backup_swift_project_domain', config('project-domain')),
                ('backup_swift_project', config('project-name'))
            ]
        else:
            raise Exception("Unsupported swift auth version")
        return {
            "cinder": {
                "/etc/cinder/cinder.conf": {
                    "sections": {
                        'DEFAULT': ctxt
                    }
                }
            }
        }
Пример #13
0
def register_configs():
    release = get_os_codename_package('python-swift', fatal=False) or 'essex'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)
    configs.register('/etc/swift/swift.conf',
                     [SwiftStorageContext()])
    configs.register('/etc/rsync-juju.d/050-swift-storage.conf',
                     [RsyncContext(), SwiftStorageServerContext()])
    for server in ['account', 'object', 'container']:
        configs.register('/etc/swift/%s-server.conf' % server,
                         [SwiftStorageServerContext(),
                          context.BindHostContext(),
                          context.WorkerConfigContext()]),
    return configs
    def __call__(self):
        """
        Used to generate template context to be added to cinder.conf in the
        presence of a ceph relation.
        """
        if not is_relation_made('ceph', 'key'):
            return {}
        service = service_name()
        os_codename = get_os_codename_package('cinder-common')
        if CompareOpenStackReleases(os_codename) >= "icehouse":
            volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
        else:
            volume_driver = 'cinder.volume.driver.RBDDriver'

        if config('pool-type') == 'erasure-coded':
            pool_name = (config('ec-rbd-metadata-pool')
                         or "{}-metadata".format(
                             config('rbd-pool-name') or service))
        else:
            pool_name = config('rbd-pool-name') or service

        section = {
            service: [('volume_backend_name', service),
                      ('volume_driver', volume_driver),
                      ('rbd_pool', pool_name), ('rbd_user', service),
                      ('rbd_secret_uuid', leader_get('secret-uuid')),
                      ('rbd_ceph_conf', ceph_config_file())]
        }

        if CompareOpenStackReleases(os_codename) >= "mitaka":
            section[service].append(('report_discard_supported', True))

        if CompareOpenStackReleases(os_codename) >= "ocata":
            section[service].append(('rbd_exclusive_cinder_pool', True))

        if CompareOpenStackReleases(os_codename) >= "pike" \
                and config('backend-availability-zone'):
            section[service].append(('backend_availability_zone',
                                     config('backend-availability-zone')))

        if CompareOpenStackReleases(os_codename) >= "queens":
            section[service].append(
                ('rbd_flatten_volume_from_snapshot',
                 config('rbd-flatten-volume-from-snapshot')))

        return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}}
def restart_map():
    '''
    Determine the correct resource map to be passed to
    charmhelpers.core.restart_on_change() based on the services configured.

    :returns: dict: A dictionary mapping config file to lists of services
                    that should be restarted when file changes.
    '''
    release = (get_os_codename_package('ceilometer-common', fatal=False)
               or 'icehouse')
    _map = {}
    for f, ctxt in CONFIG_FILES.iteritems():
        svcs = []
        for svc in ctxt['services']:
            svcs.append(svc)
        if svcs:
            _map[f] = svcs
    if enable_memcache(release=release):
        _map[MEMCACHED_CONF] = ['memcached']
    return _map
def register_configs():
    """
    Register config files with their respective contexts.
    Regstration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.
    release = get_os_codename_package('ceilometer-common', fatal=False) \
        or 'icehouse'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    for conf in CONFIG_FILES:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    if enable_memcache(release=release):
        configs.register(MEMCACHED_CONF, [context.MemcacheContext()])

    return configs
Пример #17
0
def register_configs():
    """
    Register config files with their respective contexts.
    Regstration of some configs may not be required depending on
    existing of certain relations.
    """
    # if called without anything installed (eg during install hook)
    # just default to earliest supported release. configs dont get touched
    # till post-install, anyway.
    release = get_os_codename_package('cinder-common', fatal=False) or 'folsom'
    configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
                                          openstack_release=release)

    confs = []

    if relation_ids('ceph') and hook_name() != 'ceph-relation-broken':
        # Add charm ceph configuration to resources and
        # ensure directory actually exists
        mkdir(os.path.dirname(ceph_config_file()))
        mkdir(os.path.dirname(CEPH_CONF))
        # Install ceph config as an alternative for co-location with
        # ceph and ceph-osd charms - nova-compute ceph.conf will be
        # lower priority that both of these but thats OK
        if not os.path.exists(ceph_config_file()):
            # touch file for pre-templated generation
            open(ceph_config_file(), 'wt').close()
        install_alternative(os.path.basename(CEPH_CONF), CEPH_CONF,
                            ceph_config_file())
        CONFIG_FILES[ceph_config_file()] = {
            'hook_contexts':
            [context.CephContext(),
             cinder_contexts.CephAccessContext()],
            'services': ['cinder-volume'],
        }
        confs.append(ceph_config_file())

    for conf in confs:
        configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])

    return configs
    def __call__(self):
        '''Generates context for remote sinks for Panko and other compatible
        remote consumers of Ceilometer event data.
        '''
        ctxt = {}
        if config('remote-sink'):
            ctxt['remote_sinks'] = config('remote-sink').split(' ')
        for relid in relation_ids('event-service'):
            for unit in related_units(relid):
                publisher = relation_get('publisher', unit=unit, rid=relid)
                if publisher:
                    if not ctxt.get('internal_sinks'):
                        ctxt['internal_sinks'] = {}
                    ctxt['internal_sinks'][unit.split('/')[0]] = publisher

        release = get_os_codename_package('ceilometer-common', fatal=False)
        ctxt['event_sink_publisher'] = None
        if CompareOpenStackReleases(release) >= 'queens':
            # NOTE: see bug LP 1676586
            if config('events-publisher') == "aodh":
                ctxt['event_sink_publisher'] = 'notifier://?topic=alarm.all'
            elif config('events-publisher') == "gnocchi":
                if relation_ids('metric-service'):
                    ctxt['event_sink_publisher'] = 'gnocchi://'
                else:
                    log("Unable to configure event publisher '{}' since "
                        "no gnocchi relation found".format(
                            config('events-publisher')),
                        level=INFO)
            elif config('events-publisher') == "":
                log("Not configuring any event publishers", level=INFO)
            else:
                log("Invalid event publisher config provided '{}'. Not "
                    "configuring any event publishers".format(
                        config('events-publisher')),
                    level=WARNING)

        return ctxt
def get_release():
    return get_os_codename_package('glance-common', fatal=False) or 'icehouse'
def get_packages():
    release = (get_os_codename_package('ceilometer-common', fatal=False)
               or 'icehouse')
    packages = deepcopy(CEILOMETER_AGENT_PACKAGES)
    packages.extend(token_cache_pkgs(release=release))
    return packages
Пример #21
0
def get_common_package():
    if get_os_codename_package('quantum-common', fatal=False) is not None:
        return 'quantum-common'
    else:
        return 'neutron-common'
Пример #22
0
 def test_os_codename_from_bad_package_nonfatal(self):
     '''Test OpenStack codename from an unavailable package is non-fatal'''
     with patch('apt_pkg.Cache') as cache:
         cache.return_value = self._apt_cache()
         self.assertEquals(
             None, openstack.get_os_codename_package('foo', fatal=False))
Пример #23
0
 def _since_openstack_release(audit_options=None):
     _release = openstack_utils.get_os_codename_package(pkg)
     return openstack_utils.CompareOpenStackReleases(_release) >= release
Пример #24
0
        # default no-op so that None still means "missing" for config
        # validation (see elsewhere)
        if len(modify_hook_scripts) == 0:
            modify_hook_scripts.append('/bin/true')

        return dict(mirror_list=config['mirror_list'],
                    modify_hook_scripts=', '.join(modify_hook_scripts),
                    name_prefix=config['name_prefix'],
                    content_id_template=config['content_id_template'],
                    use_swift=config['use_swift'],
                    region=config['region'],
                    cloud_name=config['cloud_name'])


release = get_os_codename_package('glance-common', fatal=False) or 'icehouse'
configs = OSConfigRenderer(templates_dir='templates/',
                           openstack_release=release)

configs.register(MIRRORS_CONF_FILE_NAME, [MirrorsConfigServiceContext()])
configs.register(ID_CONF_FILE_NAME, [IdentityServiceContext(), AMQPContext()])


def install_cron_script():
    """Installs cron job in /etc/cron.$frequency/ for repeating sync

    Script is not a template but we always overwrite, to ensure it is
    up-to-date.

    """
    sync_script_source = os.path.join("scripts", SCRIPT_NAME)