Example #1
0
 def start(self, args=None):
     """ (re)start the daemon """
     self.daemon.restart()
     # wait until startup completes
     wait_for_radosgw(self.endpoint(), self.remote)
Example #2
0
def start_rgw(ctx, config, clients):
    """
    Start rgw on remote sites.
    """
    log.info('Starting rgw...')
    testdir = teuthology.get_testdir(ctx)
    for client in clients:
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        cluster_name, daemon_type, client_id = teuthology.split_role(client)
        client_with_id = daemon_type + '.' + client_id
        client_with_cluster = cluster_name + '.' + client_with_id

        client_config = config.get(client)
        if client_config is None:
            client_config = {}
        log.info("rgw %s config is %s", client, client_config)
        cmd_prefix = [
            'sudo',
            'adjust-ulimits',
            'ceph-coverage',
            '{tdir}/archive/coverage'.format(tdir=testdir),
            'daemon-helper',
            'term',
        ]

        rgw_cmd = ['radosgw']

        log.info("Using %s as radosgw frontend", ctx.rgw.frontend)

        endpoint = ctx.rgw.role_endpoints[client]
        frontends = ctx.rgw.frontend
        frontend_prefix = client_config.get('frontend_prefix', None)
        if frontend_prefix:
            frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)

        if endpoint.cert:
            # add the ssl certificate path
            frontends += ' ssl_certificate={}'.format(
                endpoint.cert.certificate)
            if ctx.rgw.frontend == 'civetweb':
                frontends += ' port={}s'.format(endpoint.port)
            else:
                frontends += ' ssl_port={}'.format(endpoint.port)
        else:
            frontends += ' port={}'.format(endpoint.port)

        rgw_cmd.extend([
            '--rgw-frontends',
            frontends,
            '-n',
            client_with_id,
            '--cluster',
            cluster_name,
            '-k',
            '/etc/ceph/{client_with_cluster}.keyring'.format(
                client_with_cluster=client_with_cluster),
            '--log-file',
            '/var/log/ceph/rgw.{client_with_cluster}.log'.format(
                client_with_cluster=client_with_cluster),
            '--rgw_ops_log_socket_path',
            '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(
                tdir=testdir, client_with_cluster=client_with_cluster),
        ])

        keystone_role = client_config.get('use-keystone-role', None)
        if keystone_role is not None:
            if not ctx.keystone:
                raise ConfigError('rgw must run after the keystone task')
            url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(
                host=endpoint.hostname, port=endpoint.port)
            ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)

            keystone_host, keystone_port = \
                ctx.keystone.public_endpoints[keystone_role]
            rgw_cmd.extend([
                '--rgw_keystone_url',
                'http://{khost}:{kport}'.format(khost=keystone_host,
                                                kport=keystone_port),
            ])

        if client_config.get('dns-name') is not None:
            rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
        if client_config.get('dns-s3website-name') is not None:
            rgw_cmd.extend(
                ['--rgw-dns-s3website-name', endpoint.website_dns_name])

        vault_role = client_config.get('use-vault-role', None)
        barbican_role = client_config.get('use-barbican-role', None)
        pykmip_role = client_config.get('use-pykmip-role', None)

        token_path = teuthology.get_testdir(ctx) + '/vault-token'
        if barbican_role is not None:
            if not hasattr(ctx, 'barbican'):
                raise ConfigError('rgw must run after the barbican task')

            barbican_host, barbican_port = \
                ctx.barbican.endpoints[barbican_role]
            log.info("Use barbican url=%s:%s", barbican_host, barbican_port)

            rgw_cmd.extend([
                '--rgw_barbican_url',
                'http://{bhost}:{bport}'.format(bhost=barbican_host,
                                                bport=barbican_port),
            ])
        elif vault_role is not None:
            if not ctx.vault.root_token:
                raise ConfigError('vault: no "root_token" specified')
            # create token on file
            ctx.rgw.vault_role = vault_role
            ctx.cluster.only(client).run(args=[
                'echo', '-n', ctx.vault.root_token,
                run.Raw('>'), token_path
            ])
            log.info("Token file content")
            ctx.cluster.only(client).run(args=['cat', token_path])
            log.info("Restrict access to token file")
            ctx.cluster.only(client).run(args=['chmod', '600', token_path])
            ctx.cluster.only(client).run(
                args=['sudo', 'chown', 'ceph', token_path])

            rgw_cmd.extend([
                '--rgw_crypt_vault_addr',
                "{}:{}".format(*ctx.vault.endpoints[vault_role]),
                '--rgw_crypt_vault_token_file', token_path
            ])
        elif pykmip_role is not None:
            if not hasattr(ctx, 'pykmip'):
                raise ConfigError('rgw must run after the pykmip task')
            ctx.rgw.pykmip_role = pykmip_role
            rgw_cmd.extend([
                '--rgw_crypt_kmip_addr',
                "{}:{}".format(*ctx.pykmip.endpoints[pykmip_role]),
            ])

        rgw_cmd.extend([
            '--foreground',
            run.Raw('|'),
            'sudo',
            'tee',
            '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(
                client_with_cluster=client_with_cluster),
            run.Raw('2>&1'),
        ])

        if client_config.get('valgrind'):
            cmd_prefix = get_valgrind_args(
                testdir,
                client_with_cluster,
                cmd_prefix,
                client_config.get('valgrind'),
                # see https://github.com/ceph/teuthology/pull/1600
                exit_on_first_error=False)

        run_cmd = list(cmd_prefix)
        run_cmd.extend(rgw_cmd)

        ctx.daemons.add_daemon(
            remote,
            'rgw',
            client_with_id,
            cluster=cluster_name,
            fsid=ctx.ceph[cluster_name].fsid,
            args=run_cmd,
            logger=log.getChild(client),
            stdin=run.PIPE,
            wait=False,
        )

    # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
    for client in clients:
        endpoint = ctx.rgw.role_endpoints[client]
        url = endpoint.url()
        log.info(
            'Polling {client} until it starts accepting connections on {url}'.
            format(client=client, url=url))
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        wait_for_radosgw(url, remote)

    try:
        yield
    finally:
        for client in clients:
            cluster_name, daemon_type, client_id = teuthology.split_role(
                client)
            client_with_id = daemon_type + '.' + client_id
            client_with_cluster = cluster_name + '.' + client_with_id
            ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
            ctx.cluster.only(client).run(args=[
                'rm',
                '-f',
                '{tdir}/rgw.opslog.{client}.sock'.format(
                    tdir=testdir, client=client_with_cluster),
            ], )
            ctx.cluster.only(client).run(args=['rm', '-f', token_path])
Example #3
0
def start_rgw(ctx, config, clients):
    """
    Start rgw on remote sites.
    """
    log.info('Starting rgw...')
    testdir = teuthology.get_testdir(ctx)
    for client in clients:
        (remote,) = ctx.cluster.only(client).remotes.keys()
        cluster_name, daemon_type, client_id = teuthology.split_role(client)
        client_with_id = daemon_type + '.' + client_id
        client_with_cluster = cluster_name + '.' + client_with_id

        client_config = config.get(client)
        if client_config is None:
            client_config = {}
        log.info("rgw %s config is %s", client, client_config)
        cmd_prefix = [
            'sudo',
            'adjust-ulimits',
            'ceph-coverage',
            '{tdir}/archive/coverage'.format(tdir=testdir),
            'daemon-helper',
            'term',
            ]

        rgw_cmd = ['radosgw']

        log.info("Using %s as radosgw frontend", ctx.rgw.frontend)

        endpoint = ctx.rgw.role_endpoints[client]
        frontends = ctx.rgw.frontend
        frontend_prefix = client_config.get('frontend_prefix', None)
        if frontend_prefix:
            frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)

        if endpoint.cert:
            # add the ssl certificate path
            frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate)
            if ctx.rgw.frontend == 'civetweb':
                frontends += ' port={}s'.format(endpoint.port)
            else:
                frontends += ' ssl_port={}'.format(endpoint.port)
        else:
            frontends += ' port={}'.format(endpoint.port)

        rgw_cmd.extend([
            '--rgw-frontends', frontends,
            '-n', client_with_id,
            '--cluster', cluster_name,
            '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
            '--log-file',
            '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
            '--rgw_ops_log_socket_path',
            '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
                                                     client_with_cluster=client_with_cluster)
	    ])

        keystone_role = client_config.get('use-keystone-role', None)
        if keystone_role is not None:
            if not ctx.keystone:
                raise ConfigError('rgw must run after the keystone task')
            url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname,
                                                                     port=endpoint.port)
            ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)

            keystone_host, keystone_port = \
                ctx.keystone.public_endpoints[keystone_role]
            rgw_cmd.extend([
                '--rgw_keystone_url',
                'http://{khost}:{kport}'.format(khost=keystone_host,
                                                kport=keystone_port),
                ])

        if client_config.get('dns-name'):
            rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
        if client_config.get('dns-s3website-name'):
            rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])

        rgw_cmd.extend([
            '--foreground',
            run.Raw('|'),
            'sudo',
            'tee',
            '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
                                                       client_with_cluster=client_with_cluster),
            run.Raw('2>&1'),
            ])

        if client_config.get('valgrind'):
            cmd_prefix = teuthology.get_valgrind_args(
                testdir,
                client_with_cluster,
                cmd_prefix,
                client_config.get('valgrind')
                )

        run_cmd = list(cmd_prefix)
        run_cmd.extend(rgw_cmd)

        ctx.daemons.add_daemon(
            remote, 'rgw', client_with_id,
            cluster=cluster_name,
            args=run_cmd,
            logger=log.getChild(client),
            stdin=run.PIPE,
            wait=False,
            )

    # XXX: add_daemon() doesn't let us wait until radosgw finishes startup
    for client in clients:
        endpoint = ctx.rgw.role_endpoints[client]
        url = endpoint.url()
        log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
        (remote,) = ctx.cluster.only(client).remotes.keys()
        wait_for_radosgw(url, remote)

    try:
        yield
    finally:
        for client in clients:
            cluster_name, daemon_type, client_id = teuthology.split_role(client)
            client_with_id = daemon_type + '.' + client_id
            client_with_cluster = cluster_name + '.' + client_with_id
            ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
            ctx.cluster.only(client).run(
                args=[
                    'rm',
                    '-f',
                    '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
                                                             client=client_with_cluster),
                    ],
                )
Example #4
0
    def setup(self):
        super(RGWCloudTier, self).setup()

        overrides = self.ctx.config.get('overrides', {})
        teuthology.deep_merge(self.config, overrides.get('rgw-cloudtier', {}))

        if not self.ctx.rgw:
            raise ConfigError('rgw-cloudtier must run after the rgw task')

        self.ctx.rgw_cloudtier = argparse.Namespace()
        self.ctx.rgw_cloudtier.config = self.config

        log.info('Configuring rgw cloudtier ...')
        clients = self.config.keys()  # http://tracker.ceph.com/issues/20417
        for client in clients:
            client_config = self.config.get(client)
            if client_config is None:
                client_config = {}

            if client_config is not None:
                log.info(
                    'client %s - cloudtier config is -----------------%s ',
                    client, client_config)
                # configuring cloudtier

                cloud_client = client_config.get('cloud_client')
                cloud_storage_class = client_config.get('cloud_storage_class')
                cloud_target_path = client_config.get('cloud_target_path')
                cloud_target_storage_class = client_config.get(
                    'cloud_target_storage_class')
                cloud_retain_head_object = client_config.get(
                    'cloud_retain_head_object')

                cloudtier_user = client_config.get('cloudtier_user')
                cloud_access_key = cloudtier_user.get('cloud_access_key')
                cloud_secret = cloudtier_user.get('cloud_secret')

                # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
                # issue a 'radosgw-admin user list' command to trigger this
                rgwadmin(self.ctx,
                         client,
                         cmd=['user', 'list'],
                         check_status=True)

                endpoint = self.ctx.rgw.role_endpoints[cloud_client]

                # create cloudtier storage class
                tier_config_params = "endpoint=" + endpoint.url() + \
                           ",access_key=" + cloud_access_key + \
                            ",secret=" + cloud_secret + \
                            ",retain_head_object=" + cloud_retain_head_object

                if (cloud_target_path != None):
                    tier_config_params += ",target_path=" + cloud_target_path
                if (cloud_target_storage_class != None):
                    tier_config_params += ",target_storage_class=" + cloud_target_storage_class

                log.info('Configuring cloud-s3 tier storage class type = %s',
                         cloud_storage_class)

                rgwadmin(self.ctx,
                         client,
                         cmd=[
                             'zonegroup', 'placement', 'add', '--rgw-zone',
                             'default', '--placement-id', 'default-placement',
                             '--storage-class', cloud_storage_class,
                             '--tier-type', 'cloud-s3', '--tier-config',
                             tier_config_params
                         ],
                         check_status=True)

                ## create cloudtier user with the access keys given on the cloud client
                cloud_tier_user_id = "cloud-tier-user-" + cloud_client
                cloud_tier_user_name = "CLOUD TIER USER - " + cloud_client
                rgwadmin(self.ctx,
                         cloud_client,
                         cmd=[
                             'user', 'create', '--uid', cloud_tier_user_id,
                             '--display-name', cloud_tier_user_name,
                             '--access-key', cloud_access_key, '--secret',
                             cloud_secret, '--caps', 'user-policy=*'
                         ],
                         check_status=True)

                log.info('Finished Configuring rgw cloudtier ...')

                cluster_name, daemon_type, client_id = teuthology.split_role(
                    client)
                client_with_id = daemon_type + '.' + client_id
                self.ctx.daemons.get_daemon('rgw', client_with_id,
                                            cluster_name).restart()
                log.info('restarted rgw daemon ...')

                (remote, ) = self.ctx.cluster.only(client).remotes.keys()
                wait_for_radosgw(endpoint.url(), remote)