コード例 #1
0
 def test_listen_port(self):
     self.https.return_value = False
     self.assertEquals(80, utils.listen_port())
     self.https.return_value = True
     self.assertEquals(443, utils.listen_port())
     self.test_config.set('port', 42)
     self.assertEquals(42, utils.listen_port())
コード例 #2
0
    def _config_changed():
        # if we are paused, delay doing any config changed hooks.
        # It is forced on the resume.
        if is_unit_paused_set():
            log("Unit is pause or upgrading. Skipping config_changed", "WARN")
            return

        install_packages()

        if config('prefer-ipv6'):
            status_set('maintenance', 'configuring ipv6')
            setup_ipv6()

        for r_id in relation_ids('identity-service'):
            identity_changed(relid=r_id)

        for r_id in relation_ids('cluster'):
            cluster_joined(rid=r_id)

        # NOTE(jamespage): Re-exec mon relation for any changes to
        #                  enable ceph pool permissions restrictions
        for r_id in relation_ids('mon'):
            for unit in related_units(r_id):
                mon_relation(r_id, unit)

        # Re-trigger hacluster relations to switch to ifaceless
        # vip configuration
        for r_id in relation_ids('ha'):
            ha_relation_joined(r_id)

        # Refire certificates relations for VIP changes
        for r_id in relation_ids('certificates'):
            certs_joined(r_id)

        # Refire object-store relations for VIP/port changes
        for r_id in relation_ids('object-store'):
            object_store_joined(r_id)

        process_multisite_relations()

        CONFIGS.write_all()
        configure_https()

        update_nrpe_config()

        open_port(port=listen_port())
コード例 #3
0
    def __call__(self):
        ctxt = super(HAProxyContext, self).__call__()
        port = utils.listen_port()

        # Apache ports
        a_cephradosgw_api = determine_apache_port(port, singlenode_mode=True)

        port_mapping = {'cephradosgw-server': [port, a_cephradosgw_api]}

        ctxt['cephradosgw_bind_port'] = determine_api_port(
            port,
            singlenode_mode=True,
        )

        # for haproxy.conf
        ctxt['service_ports'] = port_mapping
        return ctxt
コード例 #4
0
def identity_joined(relid=None):
    if cmp_pkgrevno('radosgw', '0.55') < 0:
        log('Integration with keystone requires ceph >= 0.55')
        sys.exit(1)

    port = listen_port()
    admin_url = '%s:%i/swift' % (canonical_url(CONFIGS, ADMIN), port)
    if leader_get('namespace_tenants') == 'True':
        internal_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \
            (canonical_url(CONFIGS, INTERNAL), port)
        public_url = '%s:%s/swift/v1/AUTH_$(project_id)s' % \
            (canonical_url(CONFIGS, PUBLIC), port)
    else:
        internal_url = '%s:%s/swift/v1' % \
            (canonical_url(CONFIGS, INTERNAL), port)
        public_url = '%s:%s/swift/v1' % \
            (canonical_url(CONFIGS, PUBLIC), port)
    roles = [x for x in [config('operator-roles'), config('admin-roles')] if x]
    requested_roles = ''
    if roles:
        requested_roles = ','.join(roles) if len(roles) > 1 else roles[0]
    relation_set(swift_service='swift',
                 swift_region=config('region'),
                 swift_public_url=public_url,
                 swift_internal_url=internal_url,
                 swift_admin_url=admin_url,
                 requested_roles=requested_roles,
                 relation_id=relid)
    if cmp_pkgrevno('radosgw', '12.2') >= 0:
        relation_set(
            s3_service='s3',
            s3_region=config('region'),
            s3_public_url='{}:{}/'.format(canonical_url(CONFIGS, PUBLIC),
                                          port),
            s3_internal_url='{}:{}/'.format(canonical_url(CONFIGS, INTERNAL),
                                            port),
            s3_admin_url='{}:{}/'.format(canonical_url(CONFIGS, ADMIN), port),
            relation_id=relid)
コード例 #5
0
def slave_relation_changed(relation_id=None, unit=None):
    if not is_leader():
        return
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    master_data = relation_get(rid=relation_id, unit=unit)
    if not all((master_data.get('realm'), master_data.get('zonegroup'),
                master_data.get('access_key'), master_data.get('secret'),
                master_data.get('url'))):
        log("Defer processing until master RGW has provided required data")
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]

    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')

    if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']):
        log("Mismatched configuration so stop multi-site configuration now")
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.pull_realm(url=master_data['url'],
                             access_key=master_data['access_key'],
                             secret=master_data['secret'])
        multisite.pull_period(url=master_data['url'],
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        multisite.set_default_realm(realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=False,
                              master=False,
                              zonegroup=zonegroup,
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))
コード例 #6
0
def master_relation_joined(relation_id=None):
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]
    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')
    access_key = leader_get('access_key')
    secret = leader_get('secret')

    if not all((realm, zonegroup, zone)):
        return

    relation_set(relation_id=relation_id,
                 realm=realm,
                 zonegroup=zonegroup,
                 url=endpoints[0],
                 access_key=access_key,
                 secret=secret)

    if not is_leader():
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.create_realm(realm, default=True)
        mutation = True

    if zonegroup not in multisite.list_zonegroups():
        multisite.create_zonegroup(zonegroup,
                                   endpoints=endpoints,
                                   default=True,
                                   master=True,
                                   realm=realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=True,
                              master=True,
                              zonegroup=zonegroup)
        mutation = True

    if MULTISITE_SYSTEM_USER not in multisite.list_users():
        access_key, secret = multisite.create_system_user(
            MULTISITE_SYSTEM_USER)
        multisite.modify_zone(zone, access_key=access_key, secret=secret)
        leader_set(access_key=access_key, secret=secret)
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))

    relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
コード例 #7
0
def gateway_relation():
    relation_set(hostname=get_relation_ip('gateway-relation'),
                 port=listen_port())
コード例 #8
0
def object_store_joined(relation_id=None):
    relation_data = {
        'swift-url':
        "{}:{}".format(canonical_url(CONFIGS, INTERNAL), listen_port())
    }
    relation_set(relation_id=relation_id, relation_settings=relation_data)
コード例 #9
0
 def __call__(self):
     self.external_ports = [utils.listen_port()]
     return super(ApacheSSLContext, self).__call__()
コード例 #10
0
    def __call__(self):
        if not relation_ids(self.interfaces[0]):
            return {}

        host = socket.gethostname()
        systemd_rgw = False

        mon_hosts = []
        auths = []
        fsid = None

        for rid in relation_ids(self.interfaces[0]):
            for unit in related_units(rid):
                fsid = relation_get('fsid', rid=rid, unit=unit)
                _auth = relation_get('auth', rid=rid, unit=unit)
                if _auth:
                    auths.append(_auth)

                ceph_pub_addr = relation_get('ceph-public-address',
                                             rid=rid,
                                             unit=unit)
                unit_priv_addr = relation_get('private-address',
                                              rid=rid,
                                              unit=unit)
                ceph_addr = ceph_pub_addr or unit_priv_addr
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
                if ceph_addr:
                    mon_hosts.append(ceph_addr)
                if relation_get('rgw.{}_key'.format(host), rid=rid, unit=unit):
                    systemd_rgw = True

        if len(set(auths)) != 1:
            e = ("Inconsistent or absent auth returned by mon units. Setting "
                 "auth_supported to 'none'")
            log(e, level=WARNING)
            auth = 'none'
        else:
            auth = auths[0]

        # /etc/init.d/radosgw mandates that a dns name is used for this
        # parameter so ensure that address is resolvable
        if config('prefer-ipv6'):
            ensure_host_resolvable_v6(host)

        port = determine_api_port(utils.listen_port(), singlenode_mode=True)
        if config('prefer-ipv6'):
            port = "[::]:%s" % (port)

        mon_hosts.sort()
        ctxt = {
            'auth_supported': auth,
            'mon_hosts': ' '.join(mon_hosts),
            'hostname': host,
            'old_auth': cmp_pkgrevno('radosgw', "0.51") < 0,
            'systemd_rgw': systemd_rgw,
            'use_syslog': str(config('use-syslog')).lower(),
            'loglevel': config('loglevel'),
            'port': port,
            'ipv6': config('prefer-ipv6'),
            # The public unit IP is only used in case the authentication is
            # *Not* keystone - in which case it is used to make sure the
            # storage endpoint returned by the built-in auth is the HAproxy
            # (since it defaults to the port the service runs on, and that is
            # not available externally). ~tribaal
            'unit_public_ip': unit_public_ip(),
            'fsid': fsid,
        }

        # NOTE(dosaboy): these sections must correspond to what is supported in
        #                the config template.
        sections = ['global', 'client.radosgw.gateway']
        user_provided = CephConfContext(permitted_sections=sections)()
        user_provided = {
            k.replace('.', '_'): user_provided[k]
            for k in user_provided
        }
        ctxt.update(user_provided)

        if self.context_complete(ctxt):
            # Multi-site Zone configuration is optional,
            # so add after assessment
            ctxt['rgw_zone'] = config('zone')
            return ctxt

        return {}
コード例 #11
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        legacy = True
        if request_per_unit_key():
            legacy = False
            relation_set(relation_id=rid, key_name=key_name)
        try:
            rq = ceph.get_create_rgw_pools_rq(
                prefix=config('zone') or config('pool-prefix'))
        except ValueError as e:
            # The end user has most likely provided a invalid value for
            # a configuration option. Just log the traceback here, the
            # end user will be notified by assess_status() called at
            # the end of the hook execution.
            log('Caught ValueError, invalid value provided for '
                'configuration?: "{}"'.format(str(e)),
                level=DEBUG)
            return

        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            if multisite_deployment():
                process_multisite_relations()
            elif (ready_for_service(legacy=legacy) and is_leader()
                  and 'mon' in CONFIGS.complete_contexts()):
                # In a non multi-site deployment create the
                # zone using the default zonegroup and restart the service
                internal_url = '{}:{}'.format(
                    canonical_url(CONFIGS, INTERNAL),
                    listen_port(),
                )
                endpoints = [internal_url]
                zonegroup = 'default'
                zone = config('zone')
                existing_zones = multisite.list_zones()
                log('Existing zones {}'.format(existing_zones), level=DEBUG)
                if zone not in existing_zones:
                    log("Zone '{}' doesn't exist, creating".format(zone))
                    try:
                        multisite.create_zone(zone,
                                              endpoints=endpoints,
                                              default=True,
                                              master=True,
                                              zonegroup=zonegroup)
                    except subprocess.CalledProcessError as e:
                        if 'File exists' in e.stderr.decode('UTF-8'):
                            # NOTE(lourot): may have been created in the
                            # background by the Rados Gateway daemon, see
                            # lp:1856106
                            log("Zone '{}' existed already after all".format(
                                zone))
                        else:
                            raise

                    existing_zones = multisite.list_zones(retry_on_empty=True)
                    log('Existing zones {}'.format(existing_zones),
                        level=DEBUG)
                    if zone not in existing_zones:
                        raise RuntimeError(
                            "Could not create zone '{}'".format(zone))

                    service_restart(service_name())
        else:
            send_request_if_needed(rq, relation='mon')