def test_service_name(self, mock_systemd_based_radosgw):
     mock_systemd_based_radosgw.return_value = True
     self.assertEqual(utils.service_name(),
                      '*****@*****.**')
     mock_systemd_based_radosgw.return_value = False
     self.assertEqual(utils.service_name(),
                      'radosgw')
 def test_service_name(self, mock_systemd_based_radosgw):
     mock_systemd_based_radosgw.return_value = True
     self.assertEqual(utils.service_name(),
                      '*****@*****.**')
     mock_systemd_based_radosgw.return_value = False
     self.assertEqual(utils.service_name(),
                      'radosgw')
Exemplo n.º 3
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid,
                         key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid, unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key',
                                   rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key,
                                                      name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                service('enable', service_name())
                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and
                        new_keyring and
                        not multisite_deployment()):
                    service_restart(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
Exemplo n.º 4
0
def install():
    status_set('maintenance', 'Executing pre-install')
    execd_preinstall()
    install_packages()
    # hold the service down until we have keys from ceph
    log('Disable service "{}" until we have keys for it.'.format(
        service_name()),
        level=DEBUG)
    service_pause(service_name())
    if not os.path.exists('/etc/ceph'):
        os.makedirs('/etc/ceph')
    if is_leader():
        leader_set(namespace_tenants=config('namespace-tenants'))
Exemplo n.º 5
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        if request_per_unit_key():
            relation_set(relation_id=rid, key_name=key_name)
        # NOTE: prefer zone name if in use over pool-prefix.
        rq = ceph.get_create_rgw_pools_rq(
            prefix=config('zone') or config('pool-prefix'))
        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            process_multisite_relations()
        else:
            send_request_if_needed(rq, relation='mon')
Exemplo n.º 6
0
def leader_settings_changed():
    # NOTE: leader unit will only ever set leader storage
    #       data when multi-site realm, zonegroup, zone or user
    #       data has been created/changed - trigger restarts
    #       of rgw services.
    if restart_nonce_changed(leader_get('restart_nonce')):
        service_restart(service_name())
    if not is_leader():
        for r_id in relation_ids('master'):
            master_relation_joined(r_id)
Exemplo n.º 7
0
def leader_settings_changed():
    # NOTE: leader unit will only ever set leader storage
    #       data when multi-site realm, zonegroup, zone or user
    #       data has been created/changed - trigger restarts
    #       of rgw services.
    if restart_nonce_changed(leader_get('restart_nonce')):
        service_restart(service_name())
    if not is_leader():
        for r_id in relation_ids('master'):
            master_relation_joined(r_id)
Exemplo n.º 8
0
def slave_relation_changed(relation_id=None, unit=None):
    if not is_leader():
        return
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    master_data = relation_get(rid=relation_id, unit=unit)
    if not all((master_data.get('realm'), master_data.get('zonegroup'),
                master_data.get('access_key'), master_data.get('secret'),
                master_data.get('url'))):
        log("Defer processing until master RGW has provided required data")
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]

    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')

    if (realm, zonegroup) != (master_data['realm'], master_data['zonegroup']):
        log("Mismatched configuration so stop multi-site configuration now")
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.pull_realm(url=master_data['url'],
                             access_key=master_data['access_key'],
                             secret=master_data['secret'])
        multisite.pull_period(url=master_data['url'],
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        multisite.set_default_realm(realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=False,
                              master=False,
                              zonegroup=zonegroup,
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))
Exemplo n.º 9
0
def master_relation_joined(relation_id=None):
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        listen_port(),
    )
    endpoints = [internal_url]
    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')
    access_key = leader_get('access_key')
    secret = leader_get('secret')

    if not all((realm, zonegroup, zone)):
        return

    relation_set(relation_id=relation_id,
                 realm=realm,
                 zonegroup=zonegroup,
                 url=endpoints[0],
                 access_key=access_key,
                 secret=secret)

    if not is_leader():
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.create_realm(realm, default=True)
        mutation = True

    if zonegroup not in multisite.list_zonegroups():
        multisite.create_zonegroup(zonegroup,
                                   endpoints=endpoints,
                                   default=True,
                                   master=True,
                                   realm=realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=True,
                              master=True,
                              zonegroup=zonegroup)
        mutation = True

    if MULTISITE_SYSTEM_USER not in multisite.list_users():
        access_key, secret = multisite.create_system_user(
            MULTISITE_SYSTEM_USER)
        multisite.modify_zone(zone, access_key=access_key, secret=secret)
        leader_set(access_key=access_key, secret=secret)
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))

    relation_set(relation_id=relation_id, access_key=access_key, secret=secret)
Exemplo n.º 10
0
def slave_relation_changed(relation_id=None, unit=None):
    if not is_leader():
        return
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    master_data = relation_get(rid=relation_id, unit=unit)
    if not all((master_data.get('realm'),
                master_data.get('zonegroup'),
                master_data.get('access_key'),
                master_data.get('secret'),
                master_data.get('url'))):
        log("Defer processing until master RGW has provided required data")
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        config('port')
    )
    endpoints = [internal_url]

    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')

    if (realm, zonegroup) != (master_data['realm'],
                              master_data['zonegroup']):
        log("Mismatched configuration so stop multi-site configuration now")
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.pull_realm(url=master_data['url'],
                             access_key=master_data['access_key'],
                             secret=master_data['secret'])
        multisite.pull_period(url=master_data['url'],
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        multisite.set_default_realm(realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=False, master=False,
                              zonegroup=zonegroup,
                              access_key=master_data['access_key'],
                              secret=master_data['secret'])
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))
Exemplo n.º 11
0
def master_relation_joined(relation_id=None):
    if not ready_for_service(legacy=False):
        log('unit not ready, deferring multisite configuration')
        return

    internal_url = '{}:{}'.format(
        canonical_url(CONFIGS, INTERNAL),
        config('port')
    )
    endpoints = [internal_url]
    realm = config('realm')
    zonegroup = config('zonegroup')
    zone = config('zone')
    access_key = leader_get('access_key')
    secret = leader_get('secret')

    if not all((realm, zonegroup, zone)):
        return

    relation_set(relation_id=relation_id,
                 realm=realm,
                 zonegroup=zonegroup,
                 url=endpoints[0],
                 access_key=access_key,
                 secret=secret)

    if not is_leader():
        return

    if not leader_get('restart_nonce'):
        # NOTE(jamespage):
        # This is an ugly kludge to force creation of the required data
        # items in the .rgw.root pool prior to the radosgw process being
        # started; radosgw-admin does not currently have a way of doing
        # this operation but a period update will force it to be created.
        multisite.update_period(fatal=False)

    mutation = False

    if realm not in multisite.list_realms():
        multisite.create_realm(realm, default=True)
        mutation = True

    if zonegroup not in multisite.list_zonegroups():
        multisite.create_zonegroup(zonegroup,
                                   endpoints=endpoints,
                                   default=True, master=True,
                                   realm=realm)
        mutation = True

    if zone not in multisite.list_zones():
        multisite.create_zone(zone,
                              endpoints=endpoints,
                              default=True, master=True,
                              zonegroup=zonegroup)
        mutation = True

    if MULTISITE_SYSTEM_USER not in multisite.list_users():
        access_key, secret = multisite.create_system_user(
            MULTISITE_SYSTEM_USER
        )
        multisite.modify_zone(zone,
                              access_key=access_key,
                              secret=secret)
        leader_set(access_key=access_key,
                   secret=secret)
        mutation = True

    if mutation:
        multisite.update_period()
        service_restart(service_name())
        leader_set(restart_nonce=str(uuid.uuid4()))

    relation_set(relation_id=relation_id,
                 access_key=access_key,
                 secret=secret)
Exemplo n.º 12
0
    def _mon_relation():
        key_name = 'rgw.{}'.format(socket.gethostname())
        legacy = True
        if request_per_unit_key():
            legacy = False
            relation_set(relation_id=rid, key_name=key_name)
        try:
            rq = ceph.get_create_rgw_pools_rq(
                prefix=config('zone') or config('pool-prefix'))
        except ValueError as e:
            # The end user has most likely provided a invalid value for
            # a configuration option. Just log the traceback here, the
            # end user will be notified by assess_status() called at
            # the end of the hook execution.
            log('Caught ValueError, invalid value provided for '
                'configuration?: "{}"'.format(str(e)),
                level=DEBUG)
            return

        if is_request_complete(rq, relation='mon'):
            log('Broker request complete', level=DEBUG)
            CONFIGS.write_all()
            # New style per unit keys
            key = relation_get(attribute='{}_key'.format(key_name),
                               rid=rid,
                               unit=unit)
            if not key:
                # Fallback to old style global key
                key = relation_get(attribute='radosgw_key', rid=rid, unit=unit)
                key_name = None

            if key:
                new_keyring = ceph.import_radosgw_key(key, name=key_name)
                # NOTE(jamespage):
                # Deal with switch from radosgw init script to
                # systemd named units for radosgw instances by
                # stopping and disabling the radosgw unit
                if systemd_based_radosgw():
                    service_stop('radosgw')
                    service('disable', 'radosgw')
                    # Update the nrpe config. If we wait for the below
                    # to be called elsewhere, there exists a period
                    # where nagios will report the radosgw service as
                    # down, and also not be monitoring the per
                    # host services.
                    update_nrpe_config(checks_to_remove=['radosgw'])

                # NOTE(jamespage):
                # Multi-site deployments need to defer restart as the
                # zone is not created until the master relation is
                # joined; restarting here will cause a restart burst
                # in systemd and stop the process restarting once
                # zone configuration is complete.
                if (not is_unit_paused_set() and new_keyring
                        and not multisite_deployment()):
                    log('Resume service "{}" as we now have keys for it.'.
                        format(service_name()),
                        level=DEBUG)
                    service_resume(service_name())

            if multisite_deployment():
                process_multisite_relations()
            elif (ready_for_service(legacy=legacy) and is_leader()
                  and 'mon' in CONFIGS.complete_contexts()):
                # In a non multi-site deployment create the
                # zone using the default zonegroup and restart the service
                internal_url = '{}:{}'.format(
                    canonical_url(CONFIGS, INTERNAL),
                    listen_port(),
                )
                endpoints = [internal_url]
                zonegroup = 'default'
                zone = config('zone')
                existing_zones = multisite.list_zones()
                log('Existing zones {}'.format(existing_zones), level=DEBUG)
                if zone not in existing_zones:
                    log("Zone '{}' doesn't exist, creating".format(zone))
                    try:
                        multisite.create_zone(zone,
                                              endpoints=endpoints,
                                              default=True,
                                              master=True,
                                              zonegroup=zonegroup)
                    except subprocess.CalledProcessError as e:
                        if 'File exists' in e.stderr.decode('UTF-8'):
                            # NOTE(lourot): may have been created in the
                            # background by the Rados Gateway daemon, see
                            # lp:1856106
                            log("Zone '{}' existed already after all".format(
                                zone))
                        else:
                            raise

                    existing_zones = multisite.list_zones(retry_on_empty=True)
                    log('Existing zones {}'.format(existing_zones),
                        level=DEBUG)
                    if zone not in existing_zones:
                        raise RuntimeError(
                            "Could not create zone '{}'".format(zone))

                    service_restart(service_name())
        else:
            send_request_if_needed(rq, relation='mon')