def install_epel(remote): ''' install a disabled-by-default epel repo config file ''' remove = False try: if remote.os.package_type == 'deb': yield else: remove = True distromajor = remote.os.version.split('.')[0] repofiledata = textwrap.dedent(''' [epel] name=epel{version} metalink=http://mirrors.fedoraproject.org/metalink?repo=epel-{version}&arch=$basearch enabled=0 gpgcheck=0 ''').format(version=distromajor) misc.create_file(remote, '/etc/yum.repos.d/epel.repo', data=repofiledata, sudo=True) remote.run(args='sudo yum clean all') yield finally: if remove: misc.delete_file(remote, '/etc/yum.repos.d/epel.repo', sudo=True)
def push_keys_to_host(ctx, config, public_key, private_key): """ Push keys to all hosts """ log.info('generated public key {pub_key}'.format(pub_key=public_key)) # add an entry for all hosts in ctx to auth_keys_data auth_keys_data = '' for inner_host in ctx.cluster.remotes.keys(): inner_username, inner_hostname = str(inner_host).split('@') # create a 'user@hostname' string using our fake hostname fake_hostname = '{user}@{host}'.format(user=ssh_keys_user, host=str(inner_hostname)) auth_keys_data += '\nssh-rsa {pub_key} {user_host}\n'.format( pub_key=public_key, user_host=fake_hostname) key_backup_files = dict() # for each host in ctx, add keys for all other hosts for remote in ctx.cluster.remotes: username, hostname = str(remote).split('@') if "" == username or "" == hostname: continue else: log.info('pushing keys to {host} for {user}'.format(host=hostname, user=username)) # adding a private key priv_key_file = '/home/{user}/.ssh/id_rsa'.format(user=username) priv_key_data = '{priv_key}'.format(priv_key=private_key) misc.delete_file(remote, priv_key_file, force=True) # Hadoop requires that .ssh/id_rsa have permissions of '500' misc.create_file(remote, priv_key_file, priv_key_data, str(500)) # then a private key pub_key_file = '/home/{user}/.ssh/id_rsa.pub'.format(user=username) pub_key_data = 'ssh-rsa {pub_key} {user_host}'.format( pub_key=public_key, user_host=str(remote)) misc.delete_file(remote, pub_key_file, force=True) misc.create_file(remote, pub_key_file, pub_key_data) # add appropriate entries to the authorized_keys file for this host auth_keys_file = '/home/{user}/.ssh/authorized_keys'.format( user=username) key_backup_files[remote] = backup_file(remote, auth_keys_file) misc.append_lines_to_file(remote, auth_keys_file, auth_keys_data) try: yield finally: # cleanup the keys log.info("Cleaning up SSH keys") cleanup_added_key(ctx, key_backup_files, auth_keys_file)
def write_info_yaml(cluster, client): ''' write info.yaml to client for nosetests ''' try: info = { 'cluster': { rem.name: {'roles': roles} for rem, roles in cluster.remotes.iteritems() } } misc.create_file(client, 'calamari/info.yaml', data=yaml.safe_dump(info, default_flow_style=False)) yield finally: misc.delete_file(client, 'calamari/info.yaml')
def push_keys_to_host(ctx, config, public_key, private_key): """ Push keys to all hosts """ log.info('generated public key {pub_key}'.format(pub_key=public_key)) # add an entry for all hosts in ctx to auth_keys_data auth_keys_data = '' for inner_host in ctx.cluster.remotes.iterkeys(): inner_username, inner_hostname = str(inner_host).split('@') # create a 'user@hostname' string using our fake hostname fake_hostname = '{user}@{host}'.format(user=ssh_keys_user, host=str(inner_hostname)) auth_keys_data += '\nssh-rsa {pub_key} {user_host}\n'.format(pub_key=public_key, user_host=fake_hostname) # for each host in ctx, add keys for all other hosts for remote in ctx.cluster.remotes: username, hostname = str(remote).split('@') if "" == username or "" == hostname: continue else: log.info('pushing keys to {host} for {user}'.format(host=hostname, user=username)) # adding a private key priv_key_file = '/home/{user}/.ssh/id_rsa'.format(user=username) priv_key_data = '{priv_key}'.format(priv_key=private_key) misc.delete_file(remote, priv_key_file, force=True) # Hadoop requires that .ssh/id_rsa have permissions of '500' misc.create_file(remote, priv_key_file, priv_key_data, str(500)) # then a private key pub_key_file = '/home/{user}/.ssh/id_rsa.pub'.format(user=username) pub_key_data = 'ssh-rsa {pub_key} {user_host}'.format(pub_key=public_key, user_host=str(remote)) misc.delete_file(remote, pub_key_file, force=True) misc.create_file(remote, pub_key_file, pub_key_data) # add appropriate entries to the authorized_keys file for this host auth_keys_file = '/home/{user}/.ssh/authorized_keys'.format( user=username) backup_file(remote, auth_keys_file) lines = '#TEUTHOLOGY_START\n' + auth_keys_data + '\n#TEUTHOLOGY_END\n' misc.append_lines_to_file(remote, auth_keys_file, lines) try: yield finally: # cleanup the keys log.info("Cleaning up SSH keys") cleanup_added_key(ctx)
def write_info_yaml(cluster, client): ''' write info.yaml to client for nosetests ''' try: info = { 'cluster': { rem.name: { 'roles': roles } for rem, roles in cluster.remotes.iteritems() } } misc.create_file(client, 'calamari/info.yaml', data=yaml.safe_dump(info, default_flow_style=False)) yield finally: misc.delete_file(client, 'calamari/info.yaml')
def write_test_conf(client): ''' write calamari/tests/test.conf to client for nosetests ''' try: testconf = textwrap.dedent(''' [testing] calamari_control = external ceph_control = external bootstrap = False api_username = admin api_password = admin embedded_timeout_factor = 1 external_timeout_factor = 3 external_cluster_path = info.yaml ''') misc.create_file(client, 'calamari/tests/test.conf', data=testconf) yield finally: misc.delete_file(client, 'calamari/tests/test.conf')
def task(ctx, config): log.info('starting nfs_ganesha_rgw tests') # RGW and NFS should be on the same machine if config is None: config = {} assert isinstance(config, dict), \ "task set-repo only supports a dictionary for configuration" test_name = config['test-name'] + ".yaml" script_name = tests_mapper.get(config['test-name'], None) + ".py" nfs_version = config['nfs-version'] mount_dir = config['mount-dir'] branch = config.get("branch", "master") nfs_test_config = {'config': config.get('config')} log.info('got test_name: %s' % test_name) log.info('got nfs version: %s' % nfs_version) log.info('got mount dir: %s' % mount_dir) remotes = ctx.cluster.only(teuthology.is_type('mon')) mon = [remote for remote, roles_for_host in remotes.remotes.items()] rgw_remote = ctx.cluster.only(teuthology.is_type('rgw')) rgw = [remote for remote, roles_for_host in rgw_remote.remotes.items()] # installing nfs-ganesha-selinux package if rgw[0].os.version.startswith('7'): rgw[0].run( args=['sudo', 'yum', 'install', '-y', 'nfs-ganesha-selinux']) # clone the repo rgw[0].run(args=['sudo', 'rm', '-rf', 'nfs_ganesha_rgw'], check_status=False) rgw[0].run(args=['sudo', 'rm', '-rf', run.Raw('/tmp/nfs-ganesh-rgw_log*')], check_status=False) rgw[0].run(args=['mkdir', '-p', 'nfs_ganesha_rgw']) # stop native nfs_ganesha service. rgw[0].run(args=['sudo', 'systemctl', 'stop', 'nfs-server.service' ]) # systemctl stop nfs-server.service rgw[0].run(args=['sudo', 'systemctl', 'disable', 'nfs-server.service' ]) # systemctl disable nfs-server.service out = io.StringIO() mon[0].run(args=['sudo', 'cat', '/etc/ceph/ceph.client.admin.keyring'], stdout=out) v_as_out = out.read() teuthology.create_file(rgw[0], '/etc/ceph/ceph.client.admin.keyring', data=v_as_out, sudo=True) # parsing nfs_ganesha conf file out = io.StringIO() rgw[0].run(args=['sudo', 'cat', '/etc/ganesha/ganesha.conf'], stdout=out) v_as_out = out.readlines() clean = lambda x: re.sub('[^A-Za-z0-9]+', '', x) for content in v_as_out: if 'Access_Key_Id' in content: access_key = clean(content.split('=')[1]) if 'Secret_Access_Key' in content: secret_key = clean(content.split('=')[1]) if 'User_Id' in content: rgw_user_id = clean(content.split('=')[1]) if 'Pseudo' in content: pseudo = content.split('=')[1].strip(' ').strip('\n').strip( ' ').strip(';').strip('/') rgw[0].run(args=['sudo', 'setenforce', '1']) log.info('restarting nfs-ganesha service') rgw[0].run(args=['sudo', 'systemctl', 'restart', 'nfs-ganesha.service']) time.sleep(60) rgw[0].run(args=[ 'cd', 'nfs_ganesha_rgw', run.Raw(';'), 'git', 'clone', 'https://github.com/red-hat-storage/ceph-qe-scripts.git' ]) rgw[0].run(args=[ 'cd', 'nfs_ganesha_rgw/ceph-qe-scripts', run.Raw(';'), 'git', 'checkout', '%s' % branch ]) rgw[0].run(args=['python3', '-m', 'venv', 'venv']) rgw[0].run(args=[ 'source', 'venv/bin/activate', run.Raw(';'), run.Raw('pip3 install --upgrade setuptools'), run.Raw(';'), 'deactivate' ]) rgw[0].run(args=[ 'source', 'venv/bin/activate', run.Raw(';'), run.Raw( 'pip3 install boto boto3 names PyYaml psutil ConfigParser python-swiftclient ' 'swiftly simplejson rgwadmin'), run.Raw(';'), 'deactivate' ]) # copy rgw user details (yaml format) to nfs node or rgw node rgw_user_config = dict(user_id=rgw_user_id, access_key=access_key, secret_key=secret_key, rgw_hostname=rgw[0].shortname, ganesha_config_exists=True, already_mounted=False, nfs_version=nfs_version, nfs_mnt_point=mount_dir, Pseudo=pseudo) client_config_destination = 'nfs_ganesha_rgw/ceph-qe-scripts/rgw/v2/tests/nfs_ganesha/config/' rgw_user_config_fname = 'rgw_user.yaml' temp_yaml_file = rgw_user_config_fname + "_" + str( os.getpid()) + pwd.getpwuid(os.getuid()).pw_name log.info('creating rgw_user_config_fname: %s' % rgw_user_config) local_file = '/tmp/' + temp_yaml_file with open(local_file, 'w') as outfile: outfile.write(yaml.dump(rgw_user_config, default_flow_style=False)) log.info('copying rgw_user_config_fname to the client node') rgw_user_config_destination = client_config_destination + rgw_user_config_fname rgw[0].put_file(local_file, rgw_user_config_destination) rgw[0].run(args=[run.Raw('sudo rm -rf %s' % local_file)], check_status=False) log.info('creating nfs test config file') log.info('creating configuration from data: %s' % nfs_test_config) local_file = os.path.join( '/tmp/', config.get('test-name') + "_" + str(os.getpid()) + pwd.getpwuid(os.getuid()).pw_name) with open(local_file, 'w') as outfile: outfile.write(yaml.dump(nfs_test_config, default_flow_style=False)) log.info('local_file: %s' % local_file) log.info('copying nfs test config temp yaml to the client node') nfs_test_config_file = client_config_destination + test_name rgw[0].put_file(local_file, nfs_test_config_file) # run the test rgw[0].run(args=[ 'source', 'venv/bin/activate', run.Raw(';'), run.Raw( 'python3 nfs_ganesha_rgw/ceph-qe-scripts/rgw/v2/tests/nfs_ganesha/%s ' '-r nfs_ganesha_rgw/ceph-qe-scripts/rgw/v2/tests/nfs_ganesha/config/rgw_user.yaml ' '-c %s ' % (script_name, nfs_test_config_file)), run.Raw(';'), 'deactivate' ]) try: yield finally: log.info("Deleting the test soot") rgw[0].run(args=['sudo', 'umount', run.Raw('%s' % mount_dir)]) cleanup = lambda x: rgw[0].run(args=[run.Raw('sudo rm -rf %s' % x)]) soot = [ 'venv', 'rgw-tests', 'test_data' '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*', '*.key.*' ] list(map(cleanup, soot))