Ejemplo n.º 1
0
def _setup_mon(ctx, manager, remote, mon, name, data_path, conf_path):
    # co-locate a new monitor on remote where an existing monitor is hosted
    cluster = manager.cluster
    remote.run(args=['sudo', 'mkdir', '-p', data_path])
    keyring_path = '/etc/ceph/{cluster}.keyring'.format(
        cluster=manager.cluster)
    testdir = teuthology.get_testdir(ctx)
    monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
                                                   cluster=cluster)
    manager.raw_cluster_cmd('mon', 'getmap', '-o', monmap_path)
    if manager.controller != remote:
        monmap = teuthology.get_file(manager.controller, monmap_path)
        teuthology.write_file(remote, monmap_path, StringIO(monmap))
    remote.run(args=[
        'sudo', 'ceph-mon', '--cluster', cluster, '--mkfs', '-i', mon,
        '--monmap', monmap_path, '--keyring', keyring_path
    ])
    if manager.controller != remote:
        teuthology.delete_file(remote, monmap_path)
    # raw_cluster_cmd() is performed using sudo, so sudo here also.
    teuthology.delete_file(manager.controller, monmap_path, sudo=True)
    # update ceph.conf so that the ceph CLI is able to connect to the cluster
    if conf_path:
        ip = remote.ip_address
        port = _get_next_port(ctx, ip, cluster)
        mon_addr = '{ip}:{port}'.format(ip=ip, port=port)
        ctx.ceph[cluster].conf[name] = {'mon addr': mon_addr}
        write_conf(ctx, conf_path, cluster)
Ejemplo n.º 2
0
def configure(ctx, config):
    assert isinstance(config, dict)
    log.info("Configuring testswift...")
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config["clients"].iteritems():
        log.info("client={c}".format(c=client))
        log.info("config={c}".format(c=config))
        testswift_conf = config["testswift_conf"][client]
        if properties is not None and "rgw_server" in properties:
            host = None
            for target, roles in zip(ctx.config["targets"].iterkeys(), ctx.config["roles"]):
                log.info("roles: " + str(roles))
                log.info("target: " + str(target))
                if properties["rgw_server"] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            testswift_conf["func_test"]["auth_host"] = host
        else:
            testswift_conf["func_test"]["auth_host"] = "localhost"

        log.info(client)
        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(args=["cd", "{tdir}/swift".format(tdir=testdir), run.Raw("&&"), "./bootstrap"])
        conf_fp = StringIO()
        testswift_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path="{tdir}/archive/testswift.{client}.conf".format(tdir=testdir, client=client),
            data=conf_fp.getvalue(),
        )
    yield
Ejemplo n.º 3
0
def write_core_site(ctx, config):
    coreSiteFile = "/tmp/cephtest/hadoop/conf/core-site.xml" 

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():

        # check the config to see if we should use hdfs or ceph
        default_fs_string = ""
        if config.get('hdfs'):
            default_fs_string = 'hdfs://{master_ip}:54310'.format(master_ip=get_hadoop_master_ip(ctx))
        else:
            default_fs_string = 'ceph:///'

        teuthology.write_file(remote, coreSiteFile, 
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file.  -->
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/tmp/hadoop/tmp</value>
    </property>
    <property>
        <name>fs.default.name</name>
        <value>{default_fs}</value>
    </property>
    <property>
        <name>ceph.conf.file</name>
        <value>/tmp/cephtest/ceph.conf</value>
    </property>
</configuration>
'''.format(default_fs=default_fs_string))

        log.info("wrote file: " + coreSiteFile + " to host: " + str(remote))
Ejemplo n.º 4
0
def write_mapred_site(ctx):
    mapredSiteFile = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(tdir=teuthology.get_testdir(ctx))

    master_ip = get_hadoop_master_ip(ctx)
    log.info("adding host {remote} as jobtracker".format(remote=master_ip))

    hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote,
            mapredSiteFile,
            """<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>mapred.job.tracker</name>
        <value>{remote}:54311</value>
    </property>
</configuration>  
""".format(
                remote=master_ip
            ),
        )

        log.info("wrote file: " + mapredSiteFile + " to host: " + str(remote))
Ejemplo n.º 5
0
def configure(ctx, config):
    assert isinstance(config, dict)
    log.info('Configuring s3-tests...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].iteritems():
        s3tests_conf = config['s3tests_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(),
                                     ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf['DEFAULT']['host'] = host
        else:
            s3tests_conf['DEFAULT']['host'] = 'localhost'

        (remote, ) = ctx.cluster.only(client).remotes.keys()
        remote.run(args=[
            'cd',
            '{tdir}/s3-tests'.format(tdir=testdir),
            run.Raw('&&'),
            './bootstrap',
        ], )
        conf_fp = StringIO()
        s3tests_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir,
                                                                client=client),
            data=conf_fp.getvalue(),
        )
    yield
Ejemplo n.º 6
0
    def write_rotate_conf(ctx, daemons):
        testdir = teuthology.get_testdir(ctx)
        rotate_conf_path = os.path.join(os.path.dirname(__file__), "logrotate.conf")
        with file(rotate_conf_path, "rb") as f:
            conf = ""
            for daemon, size in daemons.iteritems():
                log.info("writing logrotate stanza for {daemon}".format(daemon=daemon))
                conf += f.read().format(daemon_type=daemon, max_size=size)
                f.seek(0, 0)

            for remote in ctx.cluster.remotes.iterkeys():
                teuthology.write_file(
                    remote=remote, path="{tdir}/logrotate.ceph-test.conf".format(tdir=testdir), data=StringIO(conf)
                )
                remote.run(
                    args=[
                        "sudo",
                        "mv",
                        "{tdir}/logrotate.ceph-test.conf".format(tdir=testdir),
                        "/etc/logrotate.d/ceph-test.conf",
                        run.Raw("&&"),
                        "sudo",
                        "chmod",
                        "0644",
                        "/etc/logrotate.d/ceph-test.conf",
                        run.Raw("&&"),
                        "sudo",
                        "chown",
                        "root.root",
                        "/etc/logrotate.d/ceph-test.conf",
                    ]
                )
                remote.chcon("/etc/logrotate.d/ceph-test.conf", "system_u:object_r:etc_t:s0")
Ejemplo n.º 7
0
def write_mapred_site(ctx):
    """
    Add required entries to conf/mapred-site.xml
    """
    mapred_site_file = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(
            tdir=teuthology.get_testdir(ctx))

    master_ip = get_hadoop_master_ip(ctx)
    log.info('adding host {remote} as jobtracker'.format(remote=master_ip))

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote, mapred_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>mapred.job.tracker</name>
        <value>{remote}:54311</value>
    </property>
</configuration>
'''.format(remote=master_ip))

        log.info("wrote file: " + mapred_site_file + " to host: " + str(remote))
Ejemplo n.º 8
0
def _setup_mon(ctx, manager, remote, mon, name, data_path, conf_path):
    # co-locate a new monitor on remote where an existing monitor is hosted
    cluster = manager.cluster
    remote.run(args=['sudo', 'mkdir', '-p', data_path])
    keyring_path = '/etc/ceph/{cluster}.keyring'.format(
        cluster=manager.cluster)
    testdir = teuthology.get_testdir(ctx)
    monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
                                                   cluster=cluster)
    manager.raw_cluster_cmd('mon', 'getmap', '-o', monmap_path)
    if manager.controller != remote:
        monmap = teuthology.get_file(manager.controller, monmap_path)
        teuthology.write_file(remote, monmap_path, StringIO(monmap))
    remote.run(
        args=[
            'sudo',
            'ceph-mon',
            '--cluster', cluster,
            '--mkfs',
            '-i', mon,
            '--monmap', monmap_path,
            '--keyring', keyring_path])
    if manager.controller != remote:
        teuthology.delete_file(remote, monmap_path)
    # raw_cluster_cmd() is performed using sudo, so sudo here also.
    teuthology.delete_file(manager.controller, monmap_path, sudo=True)
    # update ceph.conf so that the ceph CLI is able to connect to the cluster
    if conf_path:
        ip = remote.ip_address
        port = _get_next_port(ctx, ip, cluster)
        mon_addr = '{ip}:{port}'.format(ip=ip, port=port)
        ctx.ceph[cluster].conf[name] = {'mon addr': mon_addr}
        write_conf(ctx, conf_path, cluster)
Ejemplo n.º 9
0
def configure(ctx, config):
    """
    Configure rgw and Swift
    """
    assert isinstance(config, dict)
    log.info('Configuring testswift...')
    testdir = teuthology.get_testdir(ctx)
    for client, testswift_conf in config.iteritems():
        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '{tdir}/swift'.format(tdir=testdir),
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        testswift_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client),
            data=conf_fp.getvalue(),
            )
    yield
Ejemplo n.º 10
0
    def write_rotate_conf(ctx, daemons):
        testdir = teuthology.get_testdir(ctx)
        rotate_conf_path = os.path.join(os.path.dirname(__file__),
                                        'logrotate.conf')
        with file(rotate_conf_path, 'rb') as f:
            conf = ""
            for daemon, size in daemons.iteritems():
                log.info('writing logrotate stanza for {daemon}'.format(
                    daemon=daemon))
                conf += f.read().format(daemon_type=daemon, max_size=size)
                f.seek(0, 0)

            for remote in ctx.cluster.remotes.iterkeys():
                teuthology.write_file(
                    remote=remote,
                    path='{tdir}/logrotate.ceph-test.conf'.format(
                        tdir=testdir),
                    data=StringIO(conf))
                remote.run(args=[
                    'sudo', 'mv', '{tdir}/logrotate.ceph-test.conf'.format(
                        tdir=testdir), '/etc/logrotate.d/ceph-test.conf',
                    run.Raw('&&'), 'sudo', 'chmod', '0644',
                    '/etc/logrotate.d/ceph-test.conf',
                    run.Raw('&&'), 'sudo', 'chown', 'root.root',
                    '/etc/logrotate.d/ceph-test.conf'
                ])
                remote.chcon('/etc/logrotate.d/ceph-test.conf',
                             'system_u:object_r:etc_t:s0')
Ejemplo n.º 11
0
def configure(ctx, config):
    assert isinstance(config, dict)
    log.info('Configuring s3-tests...')
    for client, properties in config['clients'].iteritems():
        s3tests_conf = config['s3tests_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf['DEFAULT']['host'] = host
        else:
            s3tests_conf['DEFAULT']['host'] = 'localhost'

        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '/tmp/cephtest/s3-tests',
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        s3tests_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client),
            data=conf_fp.getvalue(),
            )
    yield
Ejemplo n.º 12
0
def setup_dnsmasq(remote, testdir, cnames):
    """ configure dnsmasq on the given remote, adding each cname given """
    log.info('Configuring dnsmasq on remote %s..', remote.name)

    # add address entries for each cname
    dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n"
    address_template = "address=/{cname}/{ip_address}\n"
    for cname, ip_address in cnames.items():
        dnsmasq += address_template.format(cname=cname, ip_address=ip_address)

    # write to temporary dnsmasq file
    dnsmasq_tmp = '/'.join((testdir, 'ceph.tmp'))
    misc.write_file(remote, dnsmasq_tmp, dnsmasq)

    # move into /etc/dnsmasq.d/
    dnsmasq_path = '/etc/dnsmasq.d/ceph'
    remote.run(args=['sudo', 'mv', dnsmasq_tmp, dnsmasq_path])
    # restore selinux context if necessary
    remote.run(args=['sudo', 'restorecon', dnsmasq_path], check_status=False)

    # restart dnsmasq
    remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
    # verify dns name is set
    remote.run(args=['ping', '-c', '4', cnames.keys()[0]])

    try:
        yield
    finally:
        log.info('Removing dnsmasq configuration from remote %s..',
                 remote.name)
        # remove /etc/dnsmasq.d/ceph
        remote.run(args=['sudo', 'rm', dnsmasq_path])
        # restart dnsmasq
        remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
Ejemplo n.º 13
0
    def write_rotate_conf(ctx, daemons):
        testdir = teuthology.get_testdir(ctx)
        rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
        with file(rotate_conf_path, 'rb') as f:
            conf = ""
            for daemon, size in daemons.iteritems():
                log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
                conf += f.read().format(daemon_type=daemon, max_size=size)
                f.seek(0, 0)

            for remote in ctx.cluster.remotes.iterkeys():
                teuthology.write_file(remote=remote,
                                      path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
                                      data=StringIO(conf)
                                      )
                remote.run(
                    args=[
                        'sudo',
                        'mv',
                        '{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
                        '/etc/logrotate.d/ceph-test.conf',
                        run.Raw('&&'),
                        'sudo',
                        'chmod',
                        '0644',
                        '/etc/logrotate.d/ceph-test.conf',
                        run.Raw('&&'),
                        'sudo',
                        'chown',
                        'root.root',
                        '/etc/logrotate.d/ceph-test.conf'
                    ]
                )
                remote.chcon('/etc/logrotate.d/ceph-test.conf',
                             'system_u:object_r:etc_t:s0')
Ejemplo n.º 14
0
def write_mapred_site(ctx):
    """
    Add required entries to conf/mapred-site.xml
    """
    mapred_site_file = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(
        tdir=teuthology.get_testdir(ctx))

    master_ip = get_hadoop_master_ip(ctx)
    log.info('adding host {remote} as jobtracker'.format(remote=master_ip))

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(
            remote, mapred_site_file, '''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>mapred.job.tracker</name>
        <value>{remote}:54311</value>
    </property>
</configuration>
'''.format(remote=master_ip))

        log.info("wrote file: " + mapred_site_file + " to host: " +
                 str(remote))
Ejemplo n.º 15
0
def configure(ctx, config, run_stages):
    """
    Configure the ragweed.  This includes the running of the
    bootstrap code and the updating of local conf files.
    """
    assert isinstance(config, dict)
    log.info('Configuring ragweed...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].items():
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        remote.run(args=[
            'cd',
            '{tdir}/ragweed'.format(tdir=testdir),
            run.Raw('&&'),
            './bootstrap',
        ], )

        preparing = 'prepare' in run_stages[client]
        if not preparing:
            # should have been prepared in a previous run
            continue

        ragweed_conf = config['ragweed_conf'][client]
        if properties is not None and 'slow_backend' in properties:
            ragweed_conf['fixtures']['slow backend'] = properties[
                'slow_backend']

        conf_fp = BytesIO()
        ragweed_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir,
                                                               client=client),
            data=conf_fp.getvalue(),
        )

    log.info('Configuring boto...')
    boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
    for client, properties in config['clients'].items():
        with open(boto_src, 'rb') as f:
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            conf = f.read().format(idle_timeout=config.get('idle_timeout', 30))
            teuthology.write_file(
                remote=remote,
                path='{tdir}/boto.cfg'.format(tdir=testdir),
                data=conf,
            )

    try:
        yield

    finally:
        log.info('Cleaning up boto...')
        for client, properties in config['clients'].items():
            (remote, ) = ctx.cluster.only(client).remotes.keys()
            remote.run(args=[
                'rm',
                '{tdir}/boto.cfg'.format(tdir=testdir),
            ], )
Ejemplo n.º 16
0
def write_master(ctx):
    mastersFile = "/tmp/cephtest/hadoop/conf/masters"
    master = _get_master(ctx)
    remote, _ = master


    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote, mastersFile, '{remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0]))
        log.info("wrote file: " + mastersFile + " to host: " + str(remote))
Ejemplo n.º 17
0
def replace_resolv(remote, path):
    """
    Update resolv.conf to point the nameserver at localhost.
    """
    misc.write_file(remote, path, "nameserver 127.0.0.1\n")
    try:
        # install it
        remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf'])
        yield
    finally:
        remote.run(args=['rm', path])
Ejemplo n.º 18
0
 def setup(self):
     super(CBT, self).setup()
     self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
     self.cbt_config = self.generate_cbt_config()
     self.log.info('cbt configuration is %s', self.cbt_config)
     self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
     self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', self.cbt_dir])
     misc.write_file(self.first_mon, os.path.join(self.cbt_dir, 'cbt_config.yaml'),
                     yaml.safe_dump(self.cbt_config, default_flow_style=False))
     self.checkout_cbt()
     self.install_dependencies()
Ejemplo n.º 19
0
 def setup(self):
     super(CBT, self).setup()
     self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
     self.cbt_config = self.generate_cbt_config()
     self.log.info('cbt configuration is %s', self.cbt_config)
     self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
     self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', self.cbt_dir])
     misc.write_file(self.first_mon, os.path.join(self.cbt_dir, 'cbt_config.yaml'),
                     yaml.safe_dump(self.cbt_config, default_flow_style=False))
     self.checkout_cbt()
     self.install_dependencies()
Ejemplo n.º 20
0
    def write_file(self, path, data, perms=None):
        """
        Write the given data at the given path and set the given perms to the
        file on the path.
        """
        if path.find(self.hostfs_mntpt) == -1:
            path = os.path.join(self.hostfs_mntpt, path)

        write_file(self.client_remote, path, data)

        if perms:
            self.run_shell(args=f'chmod {perms} {path}')
Ejemplo n.º 21
0
def write_master(ctx):
    mastersFile = "{tdir}/apache_hadoop/conf/masters".format(tdir=teuthology.get_testdir(ctx))
    master = _get_master(ctx)
    master_remote, _ = master

    hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote,
            mastersFile,
            "{master_host}\n".format(master_host=master_remote.ssh.get_transport().getpeername()[0]),
        )
        log.info("wrote file: " + mastersFile + " to host: " + str(remote))
Ejemplo n.º 22
0
def write_master(ctx):
    mastersFile = "{tdir}/apache_hadoop/conf/masters".format(
        tdir=teuthology.get_testdir(ctx))
    master = _get_master(ctx)
    master_remote, _ = master

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote, mastersFile,
            '{master_host}\n'.format(master_host=master_remote.ssh.
                                     get_transport().getpeername()[0]))
        log.info("wrote file: " + mastersFile + " to host: " + str(remote))
Ejemplo n.º 23
0
def replace_resolv(remote, path):
    """
    Update resolv.conf to point the nameserver at localhost.
    """
    misc.write_file(remote, path, "nameserver 127.0.0.1\n")
    try:
        # install it
        if remote.os.package_type == "rpm":
            # for centos ovh resolv.conf has immutable attribute set
            remote.run(args=['sudo', 'chattr', '-i', '/etc/resolv.conf'],
                       check_status=False)
        remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf'])
        yield
    finally:
        remote.run(args=['rm', path])
Ejemplo n.º 24
0
def write_master(ctx):
    """
    Add required entries to conf/masters
    These nodes host JobTrackers and Namenodes
    """
    masters_file = "{tdir}/apache_hadoop/conf/masters".format(
            tdir=teuthology.get_testdir(ctx))
    master = _get_master(ctx)
    master_remote, _ = master

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote, masters_file, '{master_host}\n'.format(
                master_host=master_remote.ssh.get_transport().getpeername()[0]))
        log.info("wrote file: " + masters_file + " to host: " + str(remote))
Ejemplo n.º 25
0
def _setup_calamari_cluster(remote, restapi_remote):
    """
    Add restapi db entry to the server.
    """
    restapi_hostname = str(restapi_remote).split('@')[1]
    sqlcmd = 'insert into ceph_cluster (name, api_base_url) ' \
             'values ("{host}", "http://{host}:5000/api/v0.1/");'. \
             format(host=restapi_hostname)
    teuthology.write_file(remote, '/tmp/create.cluster.sql', sqlcmd)
    return remote.run(args=[
        'cat', '/tmp/create.cluster.sql',
        run.Raw('|'), 'sudo', 'sqlite3',
        '/opt/calamari/webapp/calamari/db.sqlite3'
    ],
                      stdout=StringIO())
def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT):

    objects = range(1, NUM_OBJECTS + 1)
    for i in objects:
        NAME = REP_NAME + "{num}".format(num=i)
        DDNAME = os.path.join(DATADIR, NAME)

        remote.run(args=['rm', '-f', DDNAME ])

        dataline = range(DATALINECOUNT)
        data = "This is the replicated data for " + NAME + "\n"
        DATA = ""
        for _ in dataline:
            DATA += data
        teuthology.write_file(remote, DDNAME, DATA)
Ejemplo n.º 27
0
def write_hadoop_env(ctx, config):
    hadoopEnvFile = "/tmp/cephtest/hadoop/conf/hadoop-env.sh"

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote, hadoopEnvFile, 
'''export JAVA_HOME=/usr/lib/jvm/default-java
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib:/usr/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/tmp/cephtest/binary/usr/local/lib/libcephfs.jar:/tmp/cephtest/hadoop/build/hadoop-core*.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
'''     )
        log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote))
Ejemplo n.º 28
0
def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME,
                          DATALINECOUNT):

    objects = range(1, NUM_OBJECTS + 1)
    for i in objects:
        NAME = BASE_NAME + "{num}".format(num=i)
        DDNAME = os.path.join(DATADIR, NAME)

        remote.run(args=['rm', '-f', DDNAME])

        dataline = range(DATALINECOUNT)
        data = "This is the data for " + NAME + "\n"
        DATA = ""
        for _ in dataline:
            DATA += data
        teuthology.write_file(remote, DDNAME, DATA)
Ejemplo n.º 29
0
def _setup_calamari_cluster(remote, restapi_remote):
    """
    Add restapi db entry to the server.
    """
    restapi_hostname = str(restapi_remote).split('@')[1]
    sqlcmd = 'insert into ceph_cluster (name, api_base_url) ' \
             'values ("{host}", "http://{host}:5000/api/v0.1/");'. \
             format(host=restapi_hostname)
    teuthology.write_file(remote, '/tmp/create.cluster.sql', sqlcmd)
    return remote.run(args=['cat',
                            '/tmp/create.cluster.sql',
                            run.Raw('|'),
                            'sudo',
                            'sqlite3',
                            '/opt/calamari/webapp/calamari/db.sqlite3'],
                      stdout=StringIO())
Ejemplo n.º 30
0
def configure(ctx, config):
    """
    Configure the s3-tests.  This includes the running of the
    bootstrap code and the updating of local conf files.
    """
    assert isinstance(config, dict)
    log.info('Configuring s3-roundtrip-tests...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].iteritems():
        s3tests_conf = config['s3tests_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].keys(),
                                     ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf['s3']['host'] = host
        else:
            s3tests_conf['s3']['host'] = 'localhost'

        def_conf = s3tests_conf['DEFAULT']
        s3tests_conf['s3'].setdefault('port', def_conf['port'])
        s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])

        (remote, ) = ctx.cluster.only(client).remotes.keys()
        remote.run(args=[
            'cd',
            '{tdir}/s3-tests'.format(tdir=testdir),
            run.Raw('&&'),
            './bootstrap',
        ], )
        conf_fp = StringIO()
        conf = dict(
            s3=s3tests_conf['s3'],
            roundtrip=s3tests_conf['roundtrip'],
        )
        yaml.safe_dump(conf, conf_fp, default_flow_style=False)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(
                tdir=testdir, client=client),
            data=conf_fp.getvalue(),
        )
    yield
Ejemplo n.º 31
0
def configure(ctx, config):
    """
    Configure the s3-tests.  This includes the running of the
    bootstrap code and the updating of local conf files.
    """
    assert isinstance(config, dict)
    log.info('Configuring s3-roundtrip-tests...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].iteritems():
        s3tests_conf = config['s3tests_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf['s3']['host'] = host
        else:
            s3tests_conf['s3']['host'] = 'localhost'

        def_conf = s3tests_conf['DEFAULT']
        s3tests_conf['s3'].setdefault('port', def_conf['port'])
        s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])

        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '{tdir}/s3-tests'.format(tdir=testdir),
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        conf = dict(
                        s3=s3tests_conf['s3'],
                        roundtrip=s3tests_conf['roundtrip'],
                    )
        yaml.safe_dump(conf, conf_fp, default_flow_style=False)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
            data=conf_fp.getvalue(),
            )
    yield
Ejemplo n.º 32
0
def write_slaves(ctx):
    log.info("Setting up slave nodes...")

    slavesFile = "{tdir}/apache_hadoop/conf/slaves".format(tdir=teuthology.get_testdir(ctx))
    tmpFile = StringIO()

    slaves = ctx.cluster.only(teuthology.is_type("hadoop.slave"))
    for remote, roles_for_host in slaves.remotes.iteritems():
        tmpFile.write("{remote}\n".format(remote=remote.ssh.get_transport().getpeername()[0]))

    tmpFile.seek(0)

    hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote=remote, path=slavesFile, data=tmpFile)
        tmpFile.seek(0)
        log.info("wrote file: " + slavesFile + " to host: " + str(remote))
Ejemplo n.º 33
0
def write_master(ctx):
    """
    Add required entries to conf/masters
    These nodes host JobTrackers and Namenodes
    """
    masters_file = "{tdir}/apache_hadoop/conf/masters".format(
        tdir=teuthology.get_testdir(ctx))
    master = _get_master(ctx)
    master_remote, _ = master

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(
            remote, masters_file,
            '{master_host}\n'.format(master_host=master_remote.ssh.
                                     get_transport().getpeername()[0]))
        log.info("wrote file: " + masters_file + " to host: " + str(remote))
Ejemplo n.º 34
0
def write_slaves(ctx):
    log.info('Setting up slave nodes...')

    slavesFile = "/tmp/cephtest/hadoop/conf/slaves"
    tmpFile = StringIO()

    slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
    for remote, roles_for_host in slaves.remotes.iteritems():
        tmpFile.write('{remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0]))

    tmpFile.seek(0)

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote=remote, path=slavesFile, data=tmpFile)
        tmpFile.seek(0)
        log.info("wrote file: " + slavesFile + " to host: " + str(remote))
Ejemplo n.º 35
0
def write_hadoop_env(ctx, config):
    hadoopEnvFile = "{tdir}/apache_hadoop/conf/hadoop-env.sh".format(
        tdir=teuthology.get_testdir(ctx))

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote, hadoopEnvFile,
            '''export JAVA_HOME=/usr/lib/jvm/default-java
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/share/java/libcephfs.jar:{tdir}/apache_hadoop/build/hadoop-core*.jar:{tdir}/inktank_hadoop/build/hadoop-cephfs.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
'''.format(tdir=teuthology.get_testdir(ctx)))
        log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote))
Ejemplo n.º 36
0
def write_hdfs_site(ctx):
    hdfsSiteFile = "/tmp/cephtest/hadoop/conf/hdfs-site.xml"

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote, hdfsSiteFile, 
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
</configuration>
'''     )
        log.info("wrote file: " + hdfsSiteFile + " to host: " + str(remote))
Ejemplo n.º 37
0
def write_hdfs_site(ctx):
    hdfsSiteFile = "{tdir}/apache_hadoop/conf/hdfs-site.xml".format(
        tdir=teuthology.get_testdir(ctx))

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote, hdfsSiteFile, '''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
</configuration>
''')
        log.info("wrote file: " + hdfsSiteFile + " to host: " + str(remote))
Ejemplo n.º 38
0
def write_core_site(ctx, config):
    """
    Add required entries to conf/core-site.xml
    """
    testdir = teuthology.get_testdir(ctx)
    core_site_file = "{tdir}/apache_hadoop/conf/core-site.xml".format(
        tdir=testdir)

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:

        # check the config to see if we should use hdfs or ceph
        default_fs_string = ""
        if config.get('hdfs'):
            default_fs_string = 'hdfs://{master_ip}:54310'.format(
                master_ip=get_hadoop_master_ip(ctx))
        else:
            default_fs_string = 'ceph:///'

        teuthology.write_file(
            remote, core_site_file, '''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file.  -->
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/tmp/hadoop/tmp</value>
    </property>
    <property>
        <name>fs.default.name</name>
        <value>{default_fs}</value>
    </property>
    <property>
      <name>ceph.conf.file</name>
      <value>/etc/ceph/ceph.conf</value>
    </property>
    <property>
      <name>fs.ceph.impl</name>
      <value>org.apache.hadoop.fs.ceph.CephFileSystem</value>
    </property>
</configuration>
'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string))

        log.info("wrote file: " + core_site_file + " to host: " + str(remote))
Ejemplo n.º 39
0
def write_core_site(ctx, config):
    """
    Add required entries to conf/core-site.xml
    """
    testdir = teuthology.get_testdir(ctx)
    core_site_file = "{tdir}/apache_hadoop/conf/core-site.xml".format(
            tdir=testdir)

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:

        # check the config to see if we should use hdfs or ceph
        default_fs_string = ""
        if config.get('hdfs'):
            default_fs_string = 'hdfs://{master_ip}:54310'.format(
                    master_ip=get_hadoop_master_ip(ctx))
        else:
            default_fs_string = 'ceph:///'

        teuthology.write_file(remote, core_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file.  -->
<configuration>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/tmp/hadoop/tmp</value>
    </property>
    <property>
        <name>fs.default.name</name>
        <value>{default_fs}</value>
    </property>
    <property>
      <name>ceph.conf.file</name>
      <value>/etc/ceph/ceph.conf</value>
    </property>
    <property>
      <name>fs.ceph.impl</name>
      <value>org.apache.hadoop.fs.ceph.CephFileSystem</value>
    </property>
</configuration>
'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string))

        log.info("wrote file: " + core_site_file + " to host: " + str(remote))
Ejemplo n.º 40
0
def ship_utilities(ctx, config):
    assert config is None
    FILES = ['daemon-helper', 'adjust-ulimits', 'chdir-coredump',
             'valgrind.supp', 'kcon_most']
    testdir = teuthology.get_testdir(ctx)
    for filename in FILES:
        log.info('Shipping %r...', filename)
        src = os.path.join(os.path.dirname(__file__), filename)
        dst = os.path.join(testdir, filename)
        with file(src, 'rb') as f:
            for rem in ctx.cluster.remotes.iterkeys():
                teuthology.write_file(
                    remote=rem,
                    path=dst,
                    data=f,
                    )
                f.seek(0)
                rem.run(
                    args=[
                        'chmod',
                        'a=rx',
                        '--',
                        dst,
                        ],
                    )

    try:
        yield
    finally:
        log.info('Removing shipped files: %s...', ' '.join(FILES))
        filenames = (
            os.path.join(testdir, filename)
            for filename in FILES
            )
        run.wait(
            ctx.cluster.run(
                args=[
                    'rm',
                    '-rf',
                    '--',
                    ] + list(filenames),
                wait=False,
                ),
            )
Ejemplo n.º 41
0
def ship_config(ctx, config):
    assert isinstance(config, list)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf')
    for client in config:
        (remote,) = ctx.cluster.only(client).remotes.keys()
        with file(src, 'rb') as f:
            teuthology.write_file(
                remote=remote,
                path='/tmp/cephtest/apache/apache.conf',
                data=f,
                )
        teuthology.write_file(
            remote=remote,
            path='/tmp/cephtest/apache/htdocs/rgw.fcgi',
            data="""#!/bin/sh
ulimit -c unlimited
export LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib
exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf
"""
            )
        remote.run(
            args=[
                'chmod',
                'a=rx',
                '/tmp/cephtest/apache/htdocs/rgw.fcgi',
                ],
            )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in config:
            ctx.cluster.only(client).run(
                args=[
                    'rm',
                    '-f',
                    '/tmp/cephtest/apache/apache.conf',
                    run.Raw('&&'),
                    'rm',
                    '-f',
                    '/tmp/cephtest/apache/htdocs/rgw.fcgi',
                    ],
                )
Ejemplo n.º 42
0
def write_hadoop_env(ctx):
    """
    Add required entries to conf/hadoop-env.sh
    """
    hadoop_envfile = "{tdir}/apache_hadoop/conf/hadoop-env.sh".format(
            tdir=teuthology.get_testdir(ctx))

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote, hadoop_envfile,
'''export JAVA_HOME=/usr/lib/jvm/default-java
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/share/java/libcephfs.jar:{tdir}/apache_hadoop/build/hadoop-core*.jar:{tdir}/inktank_hadoop/build/hadoop-cephfs.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
'''.format(tdir=teuthology.get_testdir(ctx)))
        log.info("wrote file: " + hadoop_envfile + " to host: " + str(remote))
Ejemplo n.º 43
0
def write_slaves(ctx):
    log.info('Setting up slave nodes...')

    slavesFile = "{tdir}/apache_hadoop/conf/slaves".format(
        tdir=teuthology.get_testdir(ctx))
    tmpFile = StringIO()

    slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
    for remote, roles_for_host in slaves.remotes.iteritems():
        tmpFile.write('{remote}\n'.format(
            remote=remote.ssh.get_transport().getpeername()[0]))

    tmpFile.seek(0)

    hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(remote=remote, path=slavesFile, data=tmpFile)
        tmpFile.seek(0)
        log.info("wrote file: " + slavesFile + " to host: " + str(remote))
Ejemplo n.º 44
0
def ship_utilities(ctx, config):
    assert config is None
    FILES = ['daemon-helper', 'enable-coredump']
    for filename in FILES:
        log.info('Shipping %r...', filename)
        src = os.path.join(os.path.dirname(__file__), filename)
        dst = os.path.join('/tmp/cephtest', filename)
        with file(src, 'rb') as f:
            for rem in ctx.cluster.remotes.iterkeys():
                teuthology.write_file(
                    remote=rem,
                    path=dst,
                    data=f,
                    )
                f.seek(0)
                rem.run(
                    args=[
                        'chmod',
                        'a=rx',
                        '--',
                        dst,
                        ],
                    )

    try:
        yield
    finally:
        log.info('Removing shipped files: %s...', ' '.join(FILES))
        filenames = (
            os.path.join('/tmp/cephtest', filename)
            for filename in FILES
            )
        run.wait(
            ctx.cluster.run(
                args=[
                    'rm',
                    '-rf',
                    '--',
                    ] + list(filenames),
                wait=False,
                ),
            )
Ejemplo n.º 45
0
def write_hadoop_env(ctx, config):
    hadoopEnvFile = "{tdir}/apache_hadoop/conf/hadoop-env.sh".format(tdir=teuthology.get_testdir(ctx))

    hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
    for remote, roles_for_host in hadoopNodes.remotes.iteritems():
        teuthology.write_file(
            remote,
            hadoopEnvFile,
            """export JAVA_HOME=/usr/lib/jvm/default-java
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/share/java/libcephfs.jar:{tdir}/apache_hadoop/build/hadoop-core*.jar:{tdir}/inktank_hadoop/build/hadoop-cephfs.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
""".format(
                tdir=teuthology.get_testdir(ctx)
            ),
        )
        log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote))
Ejemplo n.º 46
0
def configure(ctx, config):
    """
    Configure rgw and Swift
    """
    assert isinstance(config, dict)
    log.info('Configuring testswift...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].iteritems():
        log.info('client={c}'.format(c=client))
        log.info('config={c}'.format(c=config))
        testswift_conf = config['testswift_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            testswift_conf['func_test']['auth_host'] = host
        else:
            testswift_conf['func_test']['auth_host'] = 'localhost'

        log.info(client)
        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '{tdir}/swift'.format(tdir=testdir),
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        testswift_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client),
            data=conf_fp.getvalue(),
            )
    yield
Ejemplo n.º 47
0
def write_hdfs_site(ctx):
    """
    Add required entries to conf/hdfs-site.xml
    """
    hdfs_site_file = "{tdir}/apache_hadoop/conf/hdfs-site.xml".format(
            tdir=teuthology.get_testdir(ctx))

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote, hdfs_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
</configuration>
''')
        log.info("wrote file: " + hdfs_site_file + " to host: " + str(remote))
Ejemplo n.º 48
0
    def _write_cfg_file(self, cfg_dict, client):
        """
        To write the final s3 tests config file on the remote
        a temporary one is created on the local machine
        """
        testdir = teuthology.get_testdir(self.ctx)
        (remote, ) = self.ctx.cluster.only(client).remotes.keys()
        with open('s3_tests_tmp.yaml', 'w') as outfile:
            yaml.dump(cfg_dict, outfile, default_flow_style=False)

        conf_fp = StringIO()
        with open('s3_tests_tmp.yaml', 'r') as infile:
            for line in infile:
                conf_fp.write(line)

        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/s3-tests-java.{client}.conf'.format(
                tdir=testdir, client=client),
            data=conf_fp.getvalue(),
        )
        os.remove('s3_tests_tmp.yaml')
Ejemplo n.º 49
0
def ship_config(ctx, config):
    assert isinstance(config, list)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf')
    for client in config:
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        with file(src, 'rb') as f:
            teuthology.write_file(
                remote=remote,
                path='/tmp/cephtest/apache/apache.conf',
                data=f,
            )
        teuthology.write_file(remote=remote,
                              path='/tmp/cephtest/apache/htdocs/rgw.fcgi',
                              data="""#!/bin/sh
ulimit -c unlimited
export LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib
exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf
""")
        remote.run(args=[
            'chmod',
            'a=rx',
            '/tmp/cephtest/apache/htdocs/rgw.fcgi',
        ], )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in config:
            ctx.cluster.only(client).run(args=[
                'rm',
                '-f',
                '/tmp/cephtest/apache/apache.conf',
                run.Raw('&&'),
                'rm',
                '-f',
                '/tmp/cephtest/apache/htdocs/rgw.fcgi',
            ], )
Ejemplo n.º 50
0
def write_slaves(ctx):
    """
    Add required entries to conf/slaves
    These nodes host TaskTrackers and DataNodes
    """
    log.info('Setting up slave nodes...')

    slaves_file = "{tdir}/apache_hadoop/conf/slaves".format(
            tdir=teuthology.get_testdir(ctx))
    tmp_file = StringIO()

    slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
    for remote in slaves.remotes:
        tmp_file.write('{remote}\n'.format(
                remote=remote.ssh.get_transport().getpeername()[0]))

    tmp_file.seek(0)

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote=remote, path=slaves_file, data=tmp_file)
        tmp_file.seek(0)
        log.info("wrote file: " + slaves_file + " to host: " + str(remote))
Ejemplo n.º 51
0
def write_slaves(ctx):
    """
    Add required entries to conf/slaves
    These nodes host TaskTrackers and DataNodes
    """
    log.info('Setting up slave nodes...')

    slaves_file = "{tdir}/apache_hadoop/conf/slaves".format(
        tdir=teuthology.get_testdir(ctx))
    tmp_file = StringIO()

    slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
    for remote in slaves.remotes:
        tmp_file.write('{remote}\n'.format(
            remote=remote.ssh.get_transport().getpeername()[0]))

    tmp_file.seek(0)

    hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
    for remote in hadoop_nodes.remotes:
        teuthology.write_file(remote=remote, path=slaves_file, data=tmp_file)
        tmp_file.seek(0)
        log.info("wrote file: " + slaves_file + " to host: " + str(remote))
Ejemplo n.º 52
0
def configure(ctx, config):
    assert isinstance(config, dict)
    log.info('Configuring testswift...')
    for client, properties in config['clients'].iteritems():
        print 'client={c}'.format(c=client)
        print 'config={c}'.format(c=config)
        testswift_conf = config['testswift_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            testswift_conf['func_test']['auth_host'] = host
        else:
            testswift_conf['func_test']['auth_host'] = 'localhost'

        print client
        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '/tmp/cephtest/swift',
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        testswift_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='/tmp/cephtest/archive/testswift.{client}.conf'.format(client=client),
            data=conf_fp.getvalue(),
            )
    yield
Ejemplo n.º 53
0
def configure(ctx, config):
    assert isinstance(config, dict)
    log.info("Configuring s3-readwrite-tests...")
    for client, properties in config["clients"].iteritems():
        s3tests_conf = config["s3tests_conf"][client]
        if properties is not None and "rgw_server" in properties:
            host = None
            for target, roles in zip(ctx.config["targets"].iterkeys(), ctx.config["roles"]):
                log.info("roles: " + str(roles))
                log.info("target: " + str(target))
                if properties["rgw_server"] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf["s3"]["host"] = host
        else:
            s3tests_conf["s3"]["host"] = "localhost"

        def_conf = s3tests_conf["DEFAULT"]
        s3tests_conf["s3"].setdefault("port", def_conf["port"])
        s3tests_conf["s3"].setdefault("is_secure", def_conf["is_secure"])

        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=["cd", "{tdir}/s3-tests".format(tdir=teuthology.get_testdir(ctx)), run.Raw("&&"), "./bootstrap"]
        )
        conf_fp = StringIO()
        conf = dict(s3=s3tests_conf["s3"], readwrite=s3tests_conf["readwrite"])
        yaml.safe_dump(conf, conf_fp, default_flow_style=False)
        teuthology.write_file(
            remote=remote,
            path="{tdir}/archive/s3readwrite.{client}.config.yaml".format(
                tdir=teuthology.get_testdir(ctx), client=client
            ),
            data=conf_fp.getvalue(),
        )
    yield
Ejemplo n.º 54
0
def ceph_bootstrap(ctx, config):
    cluster_name = config['cluster']
    testdir = teuthology.get_testdir(ctx)
    fsid = ctx.ceph[cluster_name].fsid

    bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
    first_mon = ctx.ceph[cluster_name].first_mon
    first_mon_role = ctx.ceph[cluster_name].first_mon_role
    mons = ctx.ceph[cluster_name].mons
    
    ctx.cluster.run(args=[
        'sudo', 'mkdir', '-p', '/etc/ceph',
        ]);
    ctx.cluster.run(args=[
        'sudo', 'chmod', '777', '/etc/ceph',
        ]);
    add_mirror_to_cluster(ctx, config.get('docker_registry_mirror', 'vossi04.front.sepia.ceph.com:5000'))
    try:
        # write seed config
        log.info('Writing seed config...')
        conf_fp = BytesIO()
        seed_config = build_initial_config(ctx, config)
        seed_config.write(conf_fp)
        teuthology.write_file(
            remote=bootstrap_remote,
            path='{}/seed.{}.conf'.format(testdir, cluster_name),
            data=conf_fp.getvalue())
        log.debug('Final config:\n' + conf_fp.getvalue().decode())
        ctx.ceph[cluster_name].conf = seed_config

        # register initial daemons
        ctx.daemons.register_daemon(
            bootstrap_remote, 'mon', first_mon,
            cluster=cluster_name,
            fsid=fsid,
            logger=log.getChild('mon.' + first_mon),
            wait=False,
            started=True,
        )
        if not ctx.ceph[cluster_name].roleless:
            first_mgr = ctx.ceph[cluster_name].first_mgr
            ctx.daemons.register_daemon(
                bootstrap_remote, 'mgr', first_mgr,
                cluster=cluster_name,
                fsid=fsid,
                logger=log.getChild('mgr.' + first_mgr),
                wait=False,
                started=True,
            )

        # bootstrap
        log.info('Bootstrapping...')
        cmd = [
            'sudo',
            ctx.cephadm,
            '--image', ctx.ceph[cluster_name].image,
            '-v',
            'bootstrap',
            '--fsid', fsid,
            '--config', '{}/seed.{}.conf'.format(testdir, cluster_name),
            '--output-config', '/etc/ceph/{}.conf'.format(cluster_name),
            '--output-keyring',
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
            '--output-pub-ssh-key', '{}/{}.pub'.format(testdir, cluster_name),
        ]
        if not ctx.ceph[cluster_name].roleless:
            cmd += [
                '--mon-id', first_mon,
                '--mgr-id', first_mgr,
                '--orphan-initial-daemons',   # we will do it explicitly!
                '--skip-monitoring-stack',    # we'll provision these explicitly
            ]
        if mons[first_mon_role].startswith('['):
            cmd += ['--mon-addrv', mons[first_mon_role]]
        else:
            cmd += ['--mon-ip', mons[first_mon_role]]
        if config.get('skip_dashboard'):
            cmd += ['--skip-dashboard']
        # bootstrap makes the keyring root 0600, so +r it for our purposes
        cmd += [
            run.Raw('&&'),
            'sudo', 'chmod', '+r',
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
        ]
        bootstrap_remote.run(args=cmd)

        # fetch keys and configs
        log.info('Fetching config...')
        ctx.ceph[cluster_name].config_file = teuthology.get_file(
            remote=bootstrap_remote,
            path='/etc/ceph/{}.conf'.format(cluster_name))
        log.info('Fetching client.admin keyring...')
        ctx.ceph[cluster_name].admin_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name))
        log.info('Fetching mon keyring...')
        ctx.ceph[cluster_name].mon_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='/var/lib/ceph/%s/mon.%s/keyring' % (fsid, first_mon),
            sudo=True)

        # fetch ssh key, distribute to additional nodes
        log.info('Fetching pub ssh key...')
        ssh_pub_key = teuthology.get_file(
            remote=bootstrap_remote,
            path='{}/{}.pub'.format(testdir, cluster_name)
        ).decode('ascii').strip()

        log.info('Installing pub ssh key for root users...')
        ctx.cluster.run(args=[
            'sudo', 'install', '-d', '-m', '0700', '/root/.ssh',
            run.Raw('&&'),
            'echo', ssh_pub_key,
            run.Raw('|'),
            'sudo', 'tee', '-a', '/root/.ssh/authorized_keys',
            run.Raw('&&'),
            'sudo', 'chmod', '0600', '/root/.ssh/authorized_keys',
        ])

        # set options
        _shell(ctx, cluster_name, bootstrap_remote,
               ['ceph', 'config', 'set', 'mgr', 'mgr/cephadm/allow_ptrace', 'true'])

        # add other hosts
        for remote in ctx.cluster.remotes.keys():
            if remote == bootstrap_remote:
                continue
            log.info('Writing (initial) conf and keyring to %s' % remote.shortname)
            teuthology.write_file(
                remote=remote,
                path='/etc/ceph/{}.conf'.format(cluster_name),
                data=ctx.ceph[cluster_name].config_file)
            teuthology.write_file(
                remote=remote,
                path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
                data=ctx.ceph[cluster_name].admin_keyring)

            log.info('Adding host %s to orchestrator...' % remote.shortname)
            _shell(ctx, cluster_name, remote, [
                'ceph', 'orch', 'host', 'add',
                remote.shortname
            ])
            r = _shell(ctx, cluster_name, remote,
                       ['ceph', 'orch', 'host', 'ls', '--format=json'],
                       stdout=StringIO())
            hosts = [node['hostname'] for node in json.loads(r.stdout.getvalue())]
            assert remote.shortname in hosts

        yield

    finally:
        log.info('Cleaning up testdir ceph.* files...')
        ctx.cluster.run(args=[
            'rm', '-f',
            '{}/seed.{}.conf'.format(testdir, cluster_name),
            '{}/{}.pub'.format(testdir, cluster_name),
        ])

        log.info('Stopping all daemons...')

        # this doesn't block until they are all stopped...
        #ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'])

        # so, stop them individually
        for role in ctx.daemons.resolve_role_list(None, CEPH_ROLE_TYPES):
            cluster, type_, id_ = teuthology.split_role(role)
            ctx.daemons.get_daemon(type_, id_, cluster).stop()

        # clean up /etc/ceph
        ctx.cluster.run(args=[
            'sudo', 'rm', '-f',
            '/etc/ceph/{}.conf'.format(cluster_name),
            '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
        ])
Ejemplo n.º 55
0
def configure(ctx, config, hadoops):
    tempdir = teuthology.get_testdir(ctx)

    log.info("Writing Hadoop slaves file...")
    for remote in hadoops.remotes:
        path, data = get_slaves_data(ctx)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Writing Hadoop masters file...")
    for remote in hadoops.remotes:
        path, data = get_masters_data(ctx)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Writing Hadoop core-site.xml file...")
    for remote in hadoops.remotes:
        path, data = get_core_site_data(ctx, config)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Writing Hadoop yarn-site.xml file...")
    for remote in hadoops.remotes:
        path, data = get_yarn_site_data(ctx)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Writing Hadoop hdfs-site.xml file...")
    for remote in hadoops.remotes:
        path, data = get_hdfs_site_data(ctx)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Writing Hadoop mapred-site.xml file...")
    for remote in hadoops.remotes:
        path, data = get_mapred_site_data(ctx)
        teuthology.write_file(remote, path, StringIO(data))

    log.info("Setting JAVA_HOME in hadoop-env.sh...")
    for remote in hadoops.remotes:
        path = "{tdir}/hadoop/etc/hadoop/hadoop-env.sh".format(tdir=tempdir)
        if remote.os.package_type == 'rpm':
            data = "JAVA_HOME=/usr/lib/jvm/java\n"
        elif remote.os.package_type == 'deb':
            data = "JAVA_HOME=/usr/lib/jvm/default-java\n"
        else:
            raise UnsupportedPackageTypeError(remote)
        teuthology.prepend_lines_to_file(remote, path, data)

    if config.get('hdfs', False):
        log.info("Formatting HDFS...")
        testdir = teuthology.get_testdir(ctx)
        hadoop_dir = "{tdir}/hadoop/".format(tdir=testdir)
        masters = ctx.cluster.only(teuthology.is_type('hadoop.master'))
        assert len(masters.remotes) == 1
        master = masters.remotes.keys()[0]
        master.run(
            args=[hadoop_dir + "bin/hadoop", "namenode", "-format"],
            wait=True,
        )
Ejemplo n.º 56
0
def configure_s3a(client, dns_name, access_key, secret_key, bucket_name, testdir):
    """
    Use the template to configure s3a test, Fill in access_key, secret_key
    and other details required for test.
    """
    config_template = """<configuration>
<property>
<name>fs.s3a.endpoint</name>
<value>{name}</value>
</property>

<property>
<name>fs.s3a.connection.ssl.enabled</name>
<value>false</value>
</property>

<property>
<name>test.fs.s3n.name</name>
<value>s3n://{bucket_name}/</value>
</property>

<property>
<name>test.fs.s3a.name</name>
<value>s3a://{bucket_name}/</value>
</property>

<property>
<name>test.fs.s3.name</name>
<value>s3://{bucket_name}/</value>
</property>

<property>
<name>fs.s3.awsAccessKeyId</name>
<value>{access_key}</value>
</property>

<property>
<name>fs.s3.awsSecretAccessKey</name>
<value>{secret_key}</value>
</property>

<property>
<name>fs.s3n.awsAccessKeyId</name>
<value>{access_key}</value>
</property>

<property>
<name>fs.s3n.awsSecretAccessKey</name>
<value>{secret_key}</value>
</property>

<property>
<name>fs.s3a.access.key</name>
<description>AWS access key ID. Omit for Role-based authentication.</description>
<value>{access_key}</value>
</property>

<property>
<name>fs.s3a.secret.key</name>
<description>AWS secret key. Omit for Role-based authentication.</description>
<value>{secret_key}</value>
</property>
</configuration>
""".format(name=dns_name, bucket_name=bucket_name, access_key=access_key, secret_key=secret_key)
    config_path = testdir + '/hadoop/hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml'
    misc.write_file(
        remote=client,
        path=config_path,
        data=config_template,
    )
    # output for debug
    client.run(args=['cat', config_path])
Ejemplo n.º 57
0
def task(ctx, config):
    """
    Setup MPI and execute commands

    Example that starts an MPI process on specific clients::

        tasks:
        - ceph:
        - ceph-fuse: [client.0, client.1]
        - ssh_keys:
        - mpi: 
            nodes: [client.0, client.1]
            exec: ior ...

    Example that starts MPI processes on all clients::

        tasks:
        - ceph:
        - ceph-fuse:
        - ssh_keys:
        - mpi:
            exec: ior ...

    Example that starts MPI processes on all roles::

        tasks:
        - ceph:
        - ssh_keys:
        - mpi:
            nodes: all
            exec: ...

    Example that specifies a working directory for MPI processes:

        tasks:
        - ceph:
        - ceph-fuse:
        - pexec:
            clients:
              - ln -s {testdir}/mnt.* {testdir}/gmnt
        - ssh_keys:
        - mpi:
            exec: fsx-mpi
            workdir: {testdir}/gmnt
        - pexec:
            clients:
              - rm -f {testdir}/gmnt

    :param ctx: Context
    :param config: Configuration
    """
    assert isinstance(config, dict), 'task mpi got invalid config'
    assert 'exec' in config, 'task mpi got invalid config, missing exec'

    testdir = teuthology.get_testdir(ctx)

    mpiexec = config['exec'].replace('$TESTDIR', testdir)
    hosts = []
    remotes = []
    master_remote = None
    if 'nodes' in config:
        if isinstance(config['nodes'], basestring) and config['nodes'] == 'all':
            for role in  teuthology.all_roles(ctx.cluster):
                (remote,) = ctx.cluster.only(role).remotes.iterkeys()
                ip,port = remote.ssh.get_transport().getpeername()
                hosts.append(ip)
                remotes.append(remote)
            (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
        elif isinstance(config['nodes'], list):
            for role in config['nodes']:
                (remote,) = ctx.cluster.only(role).remotes.iterkeys()
                ip,port = remote.ssh.get_transport().getpeername()
                hosts.append(ip)
                remotes.append(remote)
            (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
    else:
        roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
        (master_remote,) = ctx.cluster.only(roles[0]).remotes.iterkeys()
        for role in roles:
            (remote,) = ctx.cluster.only(role).remotes.iterkeys()
            ip,port = remote.ssh.get_transport().getpeername()
            hosts.append(ip)
            remotes.append(remote)

    workdir = []
    if 'workdir' in config:
        workdir = ['-wdir', config['workdir'].replace('$TESTDIR', testdir) ]

    log.info('mpi rank 0 is: {name}'.format(name=master_remote.name))

    # write out the mpi hosts file
    log.info('mpi nodes: [%s]' % (', '.join(hosts)))
    teuthology.write_file(remote=master_remote,
                          path='{tdir}/mpi-hosts'.format(tdir=testdir),
                          data='\n'.join(hosts))
    log.info('mpiexec on {name}: {cmd}'.format(name=master_remote.name, cmd=mpiexec))
    args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)]
    args.extend(workdir)
    args.extend(mpiexec.split(' '))
    master_remote.run(args=args, )
    log.info('mpi task completed')
    master_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)])
Ejemplo n.º 58
0
def ceph_bootstrap(ctx, config):
    cluster_name = config['cluster']
    testdir = teuthology.get_testdir(ctx)
    fsid = ctx.ceph[cluster_name].fsid

    mons = ctx.ceph[cluster_name].mons
    first_mon_role = sorted(mons.keys())[0]
    _, _, first_mon = teuthology.split_role(first_mon_role)
    (bootstrap_remote, ) = ctx.cluster.only(first_mon_role).remotes.keys()
    log.info('First mon is mon.%s on %s' %
             (first_mon, bootstrap_remote.shortname))
    ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
    ctx.ceph[cluster_name].first_mon = first_mon

    others = ctx.cluster.remotes[bootstrap_remote]
    log.info('others %s' % others)
    mgrs = sorted(
        [r for r in others if teuthology.is_type('mgr', cluster_name)(r)])
    if not mgrs:
        raise RuntimeError('no mgrs on the same host as first mon %s' %
                           first_mon)
    _, _, first_mgr = teuthology.split_role(mgrs[0])
    log.info('First mgr is %s' % (first_mgr))
    ctx.ceph[cluster_name].first_mgr = first_mgr

    try:
        # write seed config
        log.info('Writing seed config...')
        conf_fp = StringIO()
        seed_config = build_initial_config(ctx, config)
        seed_config.write(conf_fp)
        teuthology.write_file(remote=bootstrap_remote,
                              path='{}/seed.{}.conf'.format(
                                  testdir, cluster_name),
                              data=conf_fp.getvalue())
        log.debug('Final config:\n' + conf_fp.getvalue())
        ctx.ceph[cluster_name].conf = seed_config

        # register initial daemons
        ctx.daemons.register_daemon(
            bootstrap_remote,
            'mon',
            first_mon,
            cluster=cluster_name,
            fsid=fsid,
            logger=log.getChild('mon.' + first_mon),
            wait=False,
            started=True,
        )
        ctx.daemons.register_daemon(
            bootstrap_remote,
            'mgr',
            first_mgr,
            cluster=cluster_name,
            fsid=fsid,
            logger=log.getChild('mgr.' + first_mgr),
            wait=False,
            started=True,
        )

        # bootstrap
        log.info('Bootstrapping...')
        cmd = [
            'sudo',
            ctx.cephadm,
            '--image',
            ctx.ceph[cluster_name].image,
            'bootstrap',
            '--fsid',
            fsid,
            '--mon-id',
            first_mon,
            '--mgr-id',
            first_mgr,
            '--config',
            '{}/seed.{}.conf'.format(testdir, cluster_name),
            '--output-config',
            '{}/{}.conf'.format(testdir, cluster_name),
            '--output-keyring',
            '{}/{}.keyring'.format(testdir, cluster_name),
            '--output-pub-ssh-key',
            '{}/{}.pub'.format(testdir, cluster_name),
        ]
        if mons[first_mon_role].startswith('['):
            cmd += ['--mon-addrv', mons[first_mon_role]]
        else:
            cmd += ['--mon-ip', mons[first_mon_role]]
        if config.get('skip_dashboard'):
            cmd += ['--skip-dashboard']
        # bootstrap makes the keyring root 0600, so +r it for our purposes
        cmd += [
            run.Raw('&&'),
            'sudo',
            'chmod',
            '+r',
            '{}/{}.keyring'.format(testdir, cluster_name),
        ]
        bootstrap_remote.run(args=cmd)

        # fetch keys and configs
        log.info('Fetching config...')
        ctx.ceph[cluster_name].config_file = teuthology.get_file(
            remote=bootstrap_remote,
            path='{}/{}.conf'.format(testdir, cluster_name))
        log.info('Fetching client.admin keyring...')
        ctx.ceph[cluster_name].admin_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='{}/{}.keyring'.format(testdir, cluster_name))
        log.info('Fetching mon keyring...')
        ctx.ceph[cluster_name].mon_keyring = teuthology.get_file(
            remote=bootstrap_remote,
            path='/var/lib/ceph/%s/mon.%s/keyring' % (fsid, first_mon),
            sudo=True)

        # fetch ssh key, distribute to additional nodes
        log.info('Fetching pub ssh key...')
        ssh_pub_key = teuthology.get_file(remote=bootstrap_remote,
                                          path='{}/{}.pub'.format(
                                              testdir, cluster_name)).strip()

        log.info('Installing pub ssh key for root users...')
        ctx.cluster.run(args=[
            'sudo',
            'install',
            '-d',
            '-m',
            '0700',
            '/root/.ssh',
            run.Raw('&&'),
            'echo',
            ssh_pub_key,
            run.Raw('|'),
            'sudo',
            'tee',
            '-a',
            '/root/.ssh/authorized_keys',
            run.Raw('&&'),
            'sudo',
            'chmod',
            '0600',
            '/root/.ssh/authorized_keys',
        ])

        # add other hosts
        for remote in ctx.cluster.remotes.keys():
            if remote == bootstrap_remote:
                continue
            log.info('Writing conf and keyring to %s' % remote.shortname)
            teuthology.write_file(remote=remote,
                                  path='{}/{}.conf'.format(
                                      testdir, cluster_name),
                                  data=ctx.ceph[cluster_name].config_file)
            teuthology.write_file(remote=remote,
                                  path='{}/{}.keyring'.format(
                                      testdir, cluster_name),
                                  data=ctx.ceph[cluster_name].admin_keyring)

            log.info('Adding host %s to orchestrator...' % remote.shortname)
            _shell(ctx, cluster_name, remote,
                   ['ceph', 'orch', 'host', 'add', remote.shortname])
            r = _shell(ctx,
                       cluster_name,
                       remote, ['ceph', 'orch', 'host', 'ls', '--format=json'],
                       stdout=StringIO())
            hosts = [node['host'] for node in json.loads(r.stdout.getvalue())]
            assert remote.shortname in hosts

        yield

    finally:
        log.info('Cleaning up testdir ceph.* files...')
        ctx.cluster.run(args=[
            'rm',
            '-f',
            '{}/seed.{}.conf'.format(testdir, cluster_name),
            '{}/{}.pub'.format(testdir, cluster_name),
            '{}/{}.conf'.format(testdir, cluster_name),
            '{}/{}.keyring'.format(testdir, cluster_name),
        ])

        log.info('Stopping all daemons...')

        # this doesn't block until they are all stopped...
        #ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'])

        # so, stop them individually
        for role in ctx.daemons.resolve_role_list(None, CEPH_ROLE_TYPES):
            cluster, type_, id_ = teuthology.split_role(role)
            ctx.daemons.get_daemon(type_, id_, cluster).stop()
Ejemplo n.º 59
0
def configure(ctx, config):
    """
    Configure the s3-tests.  This includes the running of the
    bootstrap code and the updating of local conf files.
    """
    assert isinstance(config, dict)
    log.info('Configuring s3-tests...')
    testdir = teuthology.get_testdir(ctx)
    for client, properties in config['clients'].iteritems():
        s3tests_conf = config['s3tests_conf'][client]
        if properties is not None and 'rgw_server' in properties:
            host = None
            for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
                log.info('roles: ' + str(roles))
                log.info('target: ' + str(target))
                if properties['rgw_server'] in roles:
                    _, host = split_user(target)
            assert host is not None, "Invalid client specified as the rgw_server"
            s3tests_conf['DEFAULT']['host'] = host
        else:
            s3tests_conf['DEFAULT']['host'] = 'localhost'

        if properties is not None and 'slow_backend' in properties:
	    s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']

        (remote,) = ctx.cluster.only(client).remotes.keys()
        remote.run(
            args=[
                'cd',
                '{tdir}/s3-tests'.format(tdir=testdir),
                run.Raw('&&'),
                './bootstrap',
                ],
            )
        conf_fp = StringIO()
        s3tests_conf.write(conf_fp)
        teuthology.write_file(
            remote=remote,
            path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
            data=conf_fp.getvalue(),
            )

    log.info('Configuring boto...')
    boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
    for client, properties in config['clients'].iteritems():
        with file(boto_src, 'rb') as f:
            (remote,) = ctx.cluster.only(client).remotes.keys()
            conf = f.read().format(
                idle_timeout=config.get('idle_timeout', 30)
                )
            teuthology.write_file(
                remote=remote,
                path='{tdir}/boto.cfg'.format(tdir=testdir),
                data=conf,
                )

    try:
        yield

    finally:
        log.info('Cleaning up boto...')
        for client, properties in config['clients'].iteritems():
            (remote,) = ctx.cluster.only(client).remotes.keys()
            remote.run(
                args=[
                    'rm',
                    '{tdir}/boto.cfg'.format(tdir=testdir),
                    ],
                )
Ejemplo n.º 60
0
def ship_apache_configs(ctx, config, role_endpoints):
    """
    Ship apache config and rgw.fgci to all clients.  Clean up on termination
    """
    assert isinstance(config, dict)
    assert isinstance(role_endpoints, dict)
    testdir = teuthology.get_testdir(ctx)
    log.info('Shipping apache config and rgw.fcgi...')
    src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
    for client, conf in config.iteritems():
        (remote, ) = ctx.cluster.only(client).remotes.keys()
        system_type = teuthology.get_system_type(remote)
        if not conf:
            conf = {}
        idle_timeout = conf.get('idle_timeout', 30)
        if system_type == 'deb':
            mod_path = '/usr/lib/apache2/modules'
            print_continue = 'on'
        else:
            mod_path = '/usr/lib64/httpd/modules'
            print_continue = 'off'
        host, port = role_endpoints[client]
        with file(src, 'rb') as f:
            conf = f.read().format(
                testdir=testdir,
                mod_path=mod_path,
                print_continue=print_continue,
                host=host,
                port=port,
                client=client,
                idle_timeout=idle_timeout,
            )
            teuthology.write_file(
                remote=remote,
                path='{tdir}/apache/apache.{client}.conf'.format(
                    tdir=testdir, client=client),
                data=conf,
            )
        teuthology.write_file(
            remote=remote,
            path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
                tdir=testdir, client=client),
            data="""#!/bin/sh
ulimit -c unlimited
exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock

""".format(tdir=testdir, client=client))
        remote.run(args=[
            'chmod',
            'a=rx',
            '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                            client=client),
        ], )
    try:
        yield
    finally:
        log.info('Removing apache config...')
        for client in config.iterkeys():
            ctx.cluster.only(client).run(args=[
                'rm',
                '-f',
                '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
                                                            client=client),
                run.Raw('&&'),
                'rm',
                '-f',
                '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
                                                                client=client),
            ], )