def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11219,11224' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) config = kw.get('config') num_of_osds = config.get('num_of_osds') fs_util = FsUtils(ceph_cluster) build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) cluster_health_beforeIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len(client_info['mon_node']), build, None, 300) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: log.error("Activate multiple mdss failed") return 1 with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 2, iotype='crefi') p.spawn(fs_util.read_write_IO, client4, client_info['mounting_dir'], 'g', 'readwrite') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir']) for op in p: return_counts, rc = op result = fs_util.rc_verify('', return_counts) if result == 'Data validation success': print("Data validation success") dirs, rc = fs_util.mkdir(client1, 0, 3, client_info['mounting_dir'], dir_name) if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split('\n') with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='fio') p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 100, iotype='touch') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[1], 0, 1, iotype='dd') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[2], 0, 1, iotype='crefi') for op in p: return_counts, rc = op with parallel() as p: p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_create', fnum=10, fsize=1024) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[1], 0, 1, iotype='smallfile_create', fnum=10, fsize=1024) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_create', fnum=10, fsize=1024) for op in p: return_counts, rc = op with parallel() as p: p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_delete', fnum=10, fsize=1024) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[1], 0, 1, iotype='smallfile_delete') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_delete', fnum=10, fsize=1024) for op in p: return_counts, rc = op cluster_health_afterIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len(client_info['mon_node']), build, None, 300) log.info("Execution of Test case CEPH-%s ended" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) if cluster_health_beforeIO == cluster_health_afterIO: print(result) print('-----------------------------------------') with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[0], 0, 5, iotype='fio') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[0], 0, 10, iotype='dd') p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[0], 0, 1, iotype='crefi') print('-------------------------------------------------------') with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'read', dir_name=dirs[0]) p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read', dir_name=dirs[0]) print('-------------------------------------------------------') with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[0], 0, 5, iotype='fio') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[0], 0, 10, iotype='dd') p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[0], 0, 1, iotype='crefi') print('-------------------------------------------------------') with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'read', dir_name=dirs[0]) p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read', dir_name=dirs[0]) print('-------------------------------------------------------') log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 log.info("Execution of Test case CEPH-%s ended" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) print(result) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11298' source_dir = '/mnt/source' target_dir = 'target' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) client_info, rc = fs_util.get_clients() if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") for client in client_info['clients']: client.exec_command(cmd='sudo rm -rf %s' % source_dir) client.exec_command(cmd='sudo mkdir %s' % source_dir) for client in client_info['clients']: client.exec_command(cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], target_dir)) break with parallel() as p: p.spawn(fs_util.stress_io, client1, source_dir, '', 0, 100, iotype='touch') p.spawn(fs_util.read_write_IO, client1, source_dir, 'g', 'write') p.spawn(fs_util.stress_io, client2, source_dir, '', 0, 10, iotype='dd') p.spawn(fs_util.stress_io, client3, source_dir, '', 0, 10, iotype='crefi') p.spawn(fs_util.stress_io, client4, source_dir, '', 0, 1, iotype='fio') for op in p: return_counts1, rc = op with parallel() as p: p.spawn(fs_util.rsync, client1, source_dir, '%s%s' % (client_info['mounting_dir'], target_dir)) p.spawn(fs_util.rsync, client2, source_dir, '%s%s' % (client_info['mounting_dir'], target_dir)) p.spawn(fs_util.rsync, client3, source_dir, '%s%s' % (client_info['mounting_dir'], target_dir)) p.spawn(fs_util.rsync, client4, source_dir, '%s%s' % (client_info['mounting_dir'], target_dir)) for op in p: return_counts2, rc = op with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], target_dir, 0, 100, iotype='touch') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], target_dir, 0, 11, iotype='dd') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], target_dir, 0, 3, iotype='fio') p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], target_dir, 0, 1, iotype='fio') for op in p: return_counts3, rc = op with parallel() as p: p.spawn(fs_util.rsync, client1, '%s%s/*' % (client_info['mounting_dir'], target_dir), source_dir) p.spawn(fs_util.rsync, client2, '%s%s/*' % (client_info['mounting_dir'], target_dir), source_dir) p.spawn(fs_util.rsync, client3, '%s%s/*' % (client_info['mounting_dir'], target_dir), source_dir) p.spawn(fs_util.rsync, client4, '%s%s/*' % (client_info['mounting_dir'], target_dir), source_dir) for op in p: return_counts4, rc = op rc = list(return_counts1.values()) + list(return_counts2.values()) + \ list(return_counts3.values()) + list(return_counts4.values()) rc_set = set(rc) if len(rc_set) == 1: print("Test case CEPH-%s passed" % (tc)) else: print(("Test case CEPH-%s failed" % (tc))) log.info("Test completed for CEPH-%s" % (tc)) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() config = kw.get("config") num_of_dirs = config.get("num_of_dirs") num_of_dirs = num_of_dirs / 5 tc = "11228" dir_name = "dir" test_dir = "testdir/" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], "", 0, 1, iotype="crefi", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 1, iotype="crefi", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: return_counts, rc = op result = fs_util.rc_verify("", return_counts) if result == "Data validation success": print("Data validation success") fs_util.activate_multiple_mdss(client_info["mds_nodes"]) log.info("Execution of Test case CEPH-%s started:" % (tc)) for client in client1: client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], test_dir)) with parallel() as p: p.spawn( fs_util.mkdir_bulk, client1, 0, num_of_dirs * 2, client_info["mounting_dir"] + test_dir, dir_name, ) p.spawn( fs_util.mkdir_bulk, client2, num_of_dirs * 2 + 1, num_of_dirs * 4, client_info["mounting_dir"] + test_dir, dir_name, ) p.spawn( fs_util.mkdir_bulk, client2, num_of_dirs * 4 + 1, num_of_dirs * 6, client_info["mounting_dir"] + test_dir, dir_name, ) p.spawn( fs_util.mkdir_bulk, client2, num_of_dirs * 6 + 1, num_of_dirs * 8, client_info["mounting_dir"] + test_dir, dir_name, ) p.spawn( fs_util.mkdir_bulk, client2, num_of_dirs * 8 + 1, num_of_dirs * 10, client_info["mounting_dir"] + test_dir, dir_name, ) for op in p: rc = op if rc == 0: log.info("Directories created successfully") else: raise CommandFailed("Directory creation failed") with parallel() as p: p.spawn( fs_util.pinning, client2, 0, num_of_dirs * 1, client_info["mounting_dir"] + test_dir, dir_name, 1, ) p.spawn( fs_util.pinning, client3, num_of_dirs * 1, num_of_dirs * 2, client_info["mounting_dir"] + test_dir, dir_name, 1, ) p.spawn( fs_util.pinning, client4, num_of_dirs * 2, num_of_dirs * 3, client_info["mounting_dir"] + test_dir, dir_name, 1, ) p.spawn( fs_util.pinning, client1, num_of_dirs * 3, num_of_dirs * 4, client_info["mounting_dir"] + test_dir, dir_name, 0, ) p.spawn( fs_util.pinning, client3, num_of_dirs * 4, num_of_dirs * 5, client_info["mounting_dir"] + test_dir, dir_name, 0, ) with parallel() as p: p.spawn( fs_util.pinned_dir_io_mdsfailover, client1, client_info["mounting_dir"] + test_dir, dir_name, num_of_dirs * 1, num_of_dirs * 5, 10, fs_util.mds_fail_over, client_info["mds_nodes"], ) for op in p: return_counts, rc = op with parallel() as p: p.spawn( fs_util.pinned_dir_io_mdsfailover, client3, client_info["mounting_dir"] + test_dir, dir_name, num_of_dirs * 7, num_of_dirs * 8, 20, fs_util.mds_fail_over, client_info["mds_nodes"], ) for op in p: return_counts, rc = op log.info("Execution of Test case CEPH-%s ended:" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) print(result) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc_client = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) else: rc_client = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) if rc_client == 0 and rc_mds == 0: log.info("Cleaning up successfull") else: return 1 print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc_client = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) else: rc_client = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) if rc_client == 0 and rc_mds == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: tc = 'nfs-ganesha' nfs_mounting_dir = '/mnt/nfs_mount/' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") nfs_server = client_info['kernel_clients'][0] nfs_client = [client_info['kernel_clients'][1]] client1 = [client_info['fuse_clients'][0]] client2 = [client_info['fuse_clients'][1]] client3 = [client_info['kernel_clients'][0]] client4 = [client_info['kernel_clients'][1]] rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount( client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount( client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") dirs, rc = fs_util.mkdir( client1, 0, 4, client_info['mounting_dir'], 'dir') if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split('\n') rc = fs_util.nfs_ganesha_install(nfs_server) if rc == 0: log.info('NFS ganesha installed successfully') else: raise CommandFailed('NFS ganesha installation failed') rc = fs_util.nfs_ganesha_conf(nfs_server, 'admin') if rc == 0: log.info('NFS ganesha config added successfully') else: raise CommandFailed('NFS ganesha config adding failed') rc = fs_util.nfs_ganesha_mount( nfs_client[0], nfs_mounting_dir, nfs_server.node.hostname) if rc == 0: log.info('NFS-ganesha mount passed') else: raise CommandFailed('NFS ganesha mount failed') with parallel() as p: p.spawn( fs_util.stress_io, nfs_client, nfs_mounting_dir + 'ceph/', dirs[0], 0, 5, iotype='fio') p.spawn( fs_util.stress_io, nfs_client, nfs_mounting_dir + 'ceph/', dirs[2], 0, 5, iotype='dd') p.spawn( fs_util.stress_io, nfs_client, nfs_mounting_dir + 'ceph/', dirs[1], 0, 1, iotype='crefi') p.spawn( fs_util.stress_io, nfs_client, nfs_mounting_dir + 'ceph/', dirs[3], 0, 1, iotype='smallfile_create', fnum=1000, fsize=1024) for client in nfs_client: log.info('Unmounting nfs-ganesha mount on client:') client.exec_command(cmd='sudo umount %s -l' % (nfs_mounting_dir)) log.info('Removing nfs-ganesha mount dir on client:') client.exec_command(cmd='sudo rm -rf %s' % (nfs_mounting_dir)) if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up( client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up( client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() config = kw.get('config') num_of_dirs = config.get('num_of_dirs') tc = '11227' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], '', 0, 2, iotype='crefi', ) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], '', 0, 2, iotype='crefi') p.spawn(fs_util.read_write_IO, client4, client_info['mounting_dir'], 'g', 'readwrite') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir']) for op in p: return_counts, rc = op result = fs_util.rc_verify('', return_counts) client1[0].exec_command(cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], 'testdir')) if result == 'Data validation success': print("Data validation success") fs_util.activate_multiple_mdss(client_info['mds_nodes']) log.info("Execution of Test case CEPH-%s started:" % (tc)) num_of_dirs = int(num_of_dirs / 5) with parallel() as p: p.spawn(fs_util.mkdir_bulk, client1, 0, num_of_dirs * 1, client_info['mounting_dir'] + 'testdir/', dir_name) p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 1 + 1, num_of_dirs * 2, client_info['mounting_dir'] + 'testdir/', dir_name) p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 2 + 1, num_of_dirs * 3, client_info['mounting_dir'] + 'testdir/', dir_name) p.spawn(fs_util.mkdir_bulk, client2, num_of_dirs * 3 + 1, num_of_dirs * 4, client_info['mounting_dir'] + 'testdir/', dir_name) p.spawn(fs_util.mkdir_bulk, client1, num_of_dirs * 4 + 1, num_of_dirs * 5, client_info['mounting_dir'] + 'testdir/', dir_name) for op in p: rc = op if rc == 0: log.info('Directories created successfully') else: raise CommandFailed('Directory creation failed') with parallel() as p: p.spawn(fs_util.max_dir_io, client1, client_info['mounting_dir'] + '/testdir/', dir_name, 0, num_of_dirs * 1, 10) p.spawn(fs_util.max_dir_io, client2, client_info['mounting_dir'] + '/testdir/', dir_name, num_of_dirs * 1, num_of_dirs * 2, 10) rc = fs_util.check_mount_exists(client1[0]) if rc == 0: fs_util.pinning(client1, 0, 10, client_info['mounting_dir'] + 'testdir/', dir_name, 0) p.spawn(fs_util.max_dir_io, client3, client_info['mounting_dir'] + '/testdir/', dir_name, num_of_dirs * 3, num_of_dirs * 4, 10) p.spawn(fs_util.max_dir_io, client4, client_info['mounting_dir'] + '/testdir/', dir_name, num_of_dirs * 4, num_of_dirs * 5, 10) with parallel() as p: p.spawn(fs_util.pinned_dir_io_mdsfailover, client1, client_info['mounting_dir'] + '/testdir/', dir_name, 0, 10, 100, fs_util.mds_fail_over, client_info['mds_nodes']) with parallel() as p: p.spawn(fs_util.pinning, client2, 10, num_of_dirs * 1, client_info['mounting_dir'] + '/testdir/', dir_name, 1) p.spawn(fs_util.pinning, client3, num_of_dirs * 1, num_of_dirs * 2, client_info['mounting_dir'] + '/testdir/', dir_name, 1) p.spawn(fs_util.pinning, client4, num_of_dirs * 2, num_of_dirs * 3, client_info['mounting_dir'] + '/testdir/', dir_name, 1) p.spawn(fs_util.pinning, client1, num_of_dirs * 3, num_of_dirs * 4, client_info['mounting_dir'] + '/testdir/', dir_name, 1) p.spawn(fs_util.pinning, client3, num_of_dirs * 4, num_of_dirs * 5, client_info['mounting_dir'] + '/testdir/', dir_name, 1) with parallel() as p: p.spawn(fs_util.pinned_dir_io_mdsfailover, client1, client_info['mounting_dir'] + '/testdir/', dir_name, 0, 10, 100, fs_util.mds_fail_over, client_info['mds_nodes']) with parallel() as p: p.spawn(fs_util.pinned_dir_io_mdsfailover, client2, client_info['mounting_dir'] + '/testdir/', dir_name, 0, 10, 100, fs_util.mds_fail_over, client_info['mds_nodes']) with parallel() as p: p.spawn(fs_util.pinned_dir_io_mdsfailover, client3, client_info['mounting_dir'] + '/testdir/', dir_name, 0, 10, 100, fs_util.mds_fail_over, client_info['mds_nodes']) with parallel() as p: p.spawn(fs_util.pinned_dir_io_mdsfailover, client4, client_info['mounting_dir'] + '/testdir/', dir_name, 0, 10, 100, fs_util.mds_fail_over, client_info['mds_nodes']) log.info("Execution of Test case CEPH-%s ended:" % (tc)) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc_client = fs_util.client_clean_up( client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) else: rc_client = fs_util.client_clean_up( client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) if rc_client == 0 and rc_mds == 0: log.info('Cleaning up successfull') else: return 1 print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc_client = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) else: rc_client = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) if rc_client == 0 and rc_mds == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11231" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [client_info["fuse_clients"][0]] client2 = [client_info["fuse_clients"][1]] client3 = [client_info["kernel_clients"][0]] client4 = [client_info["kernel_clients"][1]] rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") client1[0].exec_command(sudo=True, cmd="mkdir %s%s" % (client_info["mounting_dir"], dir_name)) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], "", 0, 1, iotype="smallfile", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 1, iotype="smallfile", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: return_counts, rc = op result = fs_util.rc_verify("", return_counts) if result == "Data validation success": print("Data validation success") fs_util.activate_multiple_mdss(client_info["mds_nodes"]) log.info("Execution of Test case CEPH-%s started:" % (tc)) for client in client1: client.exec_command(cmd="sudo mkdir %s%s_{1..50}" % (client_info["mounting_dir"], dir_name)) if client.node.exit_status == 0: log.info("directories created succcessfully") else: raise CommandFailed("directories creation failed") with parallel() as p: p.spawn( fs_util.pinning, client1, 1, 25, client_info["mounting_dir"], dir_name, 0, ) p.spawn( fs_util.pinning, client3, 26, 50, client_info["mounting_dir"], dir_name, 1, ) with parallel() as p: p.spawn( fs_util.max_dir_io, client1, client_info["mounting_dir"], dir_name, 1, 25, 1000, ) p.spawn( fs_util.max_dir_io, client3, client_info["mounting_dir"], dir_name, 26, 50, 1000, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: p.spawn(fs_util.reboot, node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: fs_util.network_disconnect(node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: fs_util.pid_kill(node, "mon") with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["osd_nodes"]: p.spawn(fs_util.reboot, node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ), p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["osd_nodes"]: fs_util.network_disconnect(node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["osd_nodes"]: fs_util.pid_kill(node, "osd") with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ), p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["osd_nodes"]: fs_util.network_disconnect(node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: p.spawn(fs_util.daemon_systemctl, node, "mon", "restart") log.info("Execution of Test case CEPH-%s ended:" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc_client = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc_client = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) if rc_client == 0 and rc_mds == 0: log.info("Cleaning up successfull") else: return 1 print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11230' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) client_info, rc = fs_util.get_clients() if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount( client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount( client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], '', 0, 1, iotype='crefi', ) p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 1, iotype='crefi' ) p.spawn(fs_util.read_write_IO, client4, client_info['mounting_dir'], 'g', 'readwrite') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir']) for op in p: return_counts, rc = op result = fs_util.rc_verify('', return_counts) if result == 'Data validation success': print("Data validation success") fs_util.activate_multiple_mdss(client_info['mds_nodes']) log.info("Execution of Test case CEPH-%s started:" % (tc)) for client in client1: client.exec_command( cmd='sudo mkdir %s%s_{1..50}' % (client_info['mounting_dir'], dir_name)) if client.node.exit_status == 0: log.info("directories created succcessfully") else: raise CommandFailed("directories creation failed") with parallel() as p: p.spawn( fs_util.pinning, client1, 1, 25, client_info['mounting_dir'], dir_name, 0) p.spawn( fs_util.pinning, client3, 26, 50, client_info['mounting_dir'], dir_name, 1) with parallel() as p: p.spawn( fs_util.max_dir_io, client1, client_info['mounting_dir'], dir_name, 1, 25, 1000) p.spawn( fs_util.max_dir_io, client3, client_info['mounting_dir'], dir_name, 26, 50, 1000) with parallel() as p: p.spawn( fs_util.pinned_dir_io_mdsfailover, client1, client_info['mounting_dir'], dir_name, 1, 25, 10, fs_util.mds_fail_over, client_info['mds_nodes']) for op in p: return_counts, rc = op with parallel() as p: p.spawn( fs_util.pinned_dir_io_mdsfailover, client4, client_info['mounting_dir'], dir_name, 26, 50, 20, fs_util.mds_fail_over, client_info['mds_nodes']) for op in p: return_counts, rc = op log.info("Execution of Test case CEPH-%s ended:" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) log.info(result) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc_client = fs_util.client_clean_up( client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) else: rc_client = fs_util.client_clean_up( client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') rc_mds = fs_util.mds_cleanup(client_info['mds_nodes'], None) if rc_client == 0 and rc_mds == 0: log.info('Cleaning up successfull') else: return 1 print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() bz = '1798719' log.info('Running cephfs test for bug %s' % bz) fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info('Got client info') else: raise CommandFailed('fetching client info failed') client1 = [] client1.append(client_info['kernel_clients'][0]) mon_node_ip = client_info['mon_node_ip'] mounting_dir = client_info['mounting_dir'] user_name = 'qwertyuiopasdfghjklzxcvbnm1234567890123' p_flag = 'rw' log.info("Creating user with more than 37 letters") for client in client1: client.exec_command( cmd="sudo ceph auth get-or-create client.%s " "mon 'allow r' mds " "'allow %s' osd 'allow rw' " "-o /etc/ceph/ceph.client.%s.keyring" % (user_name, p_flag, user_name)) log.info("Creating mounting dir:") client.exec_command(cmd='sudo mkdir %s' % (mounting_dir)) out, rc = client.exec_command( cmd='sudo ceph auth get-key client.%s' % (user_name)) secret_key = out.read().decode().rstrip('\n') key_file = client.write_file( sudo=True, file_name='/etc/ceph/%s.secret' % (user_name), file_mode='w') key_file.write(secret_key) key_file.flush() op, rc = client.exec_command( cmd='sudo mount -t ceph %s,%s,%s:/ ' '%s -o name=%s,secretfile=/etc/ceph/%s.secret' % ( mon_node_ip[0], mon_node_ip[1], mon_node_ip[2], mounting_dir, user_name, user_name)) out, rc = client.exec_command(cmd='mount') mount_output = out.read().decode() mount_output = mount_output.split() log.info("Checking if kernel mount is passed or failed:") assert mounting_dir.rstrip('/') in mount_output log.info("mount is passed") log.info("Execution of Test for bug %s ended:" % (bz)) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client1[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['kernel_clients'], client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() dir_name = "dir" log.info("Running cephfs 11338 test case") fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 dirs, rc = fs_util.mkdir(client1, 1, 3, client_info["mounting_dir"], dir_name) if rc == 0: log.info("Directories created") dirs = dirs.split("\n") """ new clients with restrictions """ new_client1_name = client_info["fuse_clients"][ 0].node.hostname + "_%s" % (dirs[0]) new_client2_name = client_info["fuse_clients"][ 1].node.hostname + "_%s" % (dirs[0]) new_client3_name = client_info["kernel_clients"][ 0].node.hostname + "_%s" % (dirs[1]) new_client3_mouting_dir = "/mnt/%s_%s/" % ( client_info["kernel_clients"][0].node.hostname, dirs[1], ) new_client2_mouting_dir = "/mnt/%s_%s/" % ( client_info["fuse_clients"][1].node.hostname, dirs[0], ) new_client1_mouting_dir = "/mnt/%s_%s/" % ( client_info["fuse_clients"][0].node.hostname, dirs[0], ) rc1 = fs_util.auth_list(client1, path=dirs[0], permission="rw", mds=True) rc2 = fs_util.auth_list(client2, path=dirs[0], permission="r", mds=True) rc3 = fs_util.auth_list(client3, path=dirs[1], permission="*", mds=True) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount( client1, new_client1_mouting_dir, new_client=new_client1_name, sub_dir=dirs[0], ) rc2 = fs_util.fuse_mount( client2, new_client2_mouting_dir, new_client=new_client2_name, sub_dir=dirs[0], ) rc3 = fs_util.kernel_mount( client3, new_client3_mouting_dir, client_info["mon_node_ip"], new_client=new_client3_name, sub_dir=dirs[1], ) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 if rc3 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 _, rc = fs_util.stress_io( client1, new_client1_mouting_dir, "", 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) if rc == 0: log.info("Permissions set for client %s is working " % new_client1_name) else: log.error("Permissions set for client %s is failed" % new_client1_name) return 1 _, rc = fs_util.stress_io( client1, new_client1_mouting_dir, "", 0, 1, iotype="smallfile_delete", fnum=1000, fsize=10, ) if rc == 0: log.info("Permissions set for client %s is working properly" % new_client1_name) else: log.error("Permissions set for client %s is failed" % new_client1_name) return 1 try: _, rc = fs_util.stress_io(client2, new_client2_mouting_dir, "", 0, 1, iotype="touch") except CommandFailed: log.info("Permissions set for client %s is working properly" % new_client2_name) _, rc = fs_util.stress_io( client3, new_client3_mouting_dir, "", 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) if rc == 0: log.info("Permissions set for client %s is working properly" % new_client3_name) else: log.error("Permissions set for client %s is failed" % new_client3_name) return 1 _, rc = fs_util.stress_io( client3, new_client3_mouting_dir, "", 0, 1, iotype="smallfile_delete", fnum=1000, fsize=10, ) if rc == 0: log.info("Permissions set for client %s is working properly") else: log.error("Permissions set for client %s is failed") return 1 fs_util.client_clean_up(client1, "", new_client1_mouting_dir, "umount", client_name=new_client1_name) fs_util.client_clean_up(client2, "", new_client2_mouting_dir, "umount", client_name=new_client2_name) fs_util.client_clean_up("", client3, new_client3_mouting_dir, "umount", client_name=new_client3_name) fs_util.auth_list(client1, path=dirs[0], permission="rw", osd=True) fs_util.auth_list(client3, path=dirs[1], permission="r", osd=True) fs_util.fuse_mount(client1, new_client1_mouting_dir, new_client=new_client1_name) fs_util.kernel_mount( client3, new_client3_mouting_dir, client_info["mon_node_ip"], new_client=new_client3_name, ) fs_util.stress_io( client1, new_client1_mouting_dir, "", 0, 1, iotype="smallfile_delete", fnum=1000, fsize=10, ) try: if client_info["kernel_clients"][0].pkg_type == "rpm": client_info["kernel_clients"][0].exec_command( cmd="sudo dd if=/dev/zero of=%s/file bs=10M count=10" % new_client3_mouting_dir) except CommandFailed as e: log.info(e) log.info("Permissions set for client %s is working properly" % (client_info["kernel_clients"][0].node.hostname + "_" + (dirs[1]))) fs_util.client_clean_up( client1, "", new_client1_mouting_dir, "umount", client_name=client_info["fuse_clients"][0].node.hostname + "_%s" % (dirs[0]), ) fs_util.client_clean_up("", client3, new_client3_mouting_dir, "umount", client_name=new_client3_name) fs_util.auth_list(client1, path=dirs[0], layout_quota="p_flag") fs_util.auth_list(client3, path=dirs[1], layout_quota="!p_flag") fs_util.fuse_mount(client1, new_client1_mouting_dir, new_client=new_client1_name) fs_util.kernel_mount( client3, new_client3_mouting_dir, client_info["mon_node_ip"], new_client=new_client3_name, ) file_name = "file1" client_info["fuse_clients"][0].exec_command( cmd="sudo touch %s/%s" % (new_client1_mouting_dir, file_name)) client_info["fuse_clients"][0].exec_command( cmd="sudo mkdir %s/%s" % (new_client1_mouting_dir, dirs[0])) try: fs_util.setfattr(client3, "stripe_unit", "1048576", new_client3_mouting_dir, file_name) fs_util.setfattr(client3, "max_bytes", "100000000", new_client3_mouting_dir, dirs[1]) except CommandFailed: log.info("Permission denied for setting attrs,success") fs_util.setfattr(client1, "stripe_unit", "1048576", new_client1_mouting_dir, file_name) fs_util.setfattr(client1, "max_bytes", "100000000", new_client1_mouting_dir, dirs[0]) fs_util.client_clean_up(client1, "", new_client1_mouting_dir, "umount", client_name=new_client1_name) fs_util.client_clean_up("", client3, new_client3_mouting_dir, "umount", client_name=new_client3_name) fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11256-fuse' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get('config') num_of_osds = config.get('num_of_osds') client_info, rc = fs_util.get_clients() if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise Exception("kernel mount failed") cluster_health_beforeIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len(client_info['mon_node']), None, 300) rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") rc = fs_util.standby_rank(client_info['mds_nodes'], client_info['mon_node'], todo='add_rank') if rc == 0: log.info("Added standby ranks") else: raise Exception("Adding standby ranks failed") dirs, rc = fs_util.mkdir(client1, 0, 4, client_info['mounting_dir'], dir_name) if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split('\n') with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[1], 0, 1, iotype='fio') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[0], 0, 1, iotype='crefi') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[2], '', '', iotype='smallfile_create', fnum=1000, fsize=1024) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[3], 0, 10, iotype='dd') for node in client_info['mds_nodes']: p.spawn(fs_util.reboot, node) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[2], 0, 1, iotype='fio') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[0], 0, 1, iotype='crefi') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[1], '', '', iotype='smallfile_create', fnum=1000, fsize=1024) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[3], 0, 10, iotype='dd') for node in client_info['mds_nodes']: p.spawn(fs_util.daemon_systemctl, node, 'mds', 'active_mds_restart') with parallel() as p: for node in client_info['mds_nodes']: p.spawn(fs_util.heartbeat_map, node) cluster_health_afterIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len(client_info['mon_node']), None, 300) if cluster_health_afterIO == cluster_health_beforeIO: log.info('cluster is healthy') else: log.error("cluster is not healty") return 1 with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[3], 0, 1, iotype='fio') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[0], 0, 1, iotype='crefi') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_create', fnum=10, fsize=1024) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[1], 0, 1, iotype='dd') for node in client_info['mds_nodes']: fs_util.network_disconnect(node) with parallel() as p: for node in client_info['mds_nodes']: p.spawn(fs_util.heartbeat_map, node) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='fio') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[1], 0, 1, iotype='crefi') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[3], 0, 1, iotype='smallfile_create', fnum=10, fsize=1024) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[2], 0, 1, iotype='dd') for node in client_info['mds_nodes']: fs_util.pid_kill(node, 'mds') with parallel() as p: for node in client_info['mds_nodes']: p.spawn(fs_util.heartbeat_map, node) cluster_health_afterIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len(client_info['mon_node']), None, 300) if cluster_health_beforeIO == cluster_health_afterIO: log.info("Cluster is healthy") else: return 1 log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') fs_util.standby_rank(client_info['mds_nodes'], client_info['mon_node'], todo='add_rank_revert') log.info("Execution of Test cases CEPH-%s ended:" % (tc)) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') rc = fs_util.standby_rank(client_info['mds_nodes'], client_info['mon_node'], todo='add_rank_revert') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') rc = fs_util.standby_rank(client_info['mds_nodes'], client_info['mon_node'], todo='add_rank_revert') if rc == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11254-fuse_clients" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) config = kw.get("config") num_of_osds = config.get("num_of_osds") fs_util = FsUtils(ceph_cluster) build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount( client3, client_info["mounting_dir"], client_info["mon_node_ip"] ) rc4 = fs_util.kernel_mount( client4, client_info["mounting_dir"], client_info["mon_node_ip"] ) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 cluster_health_beforeIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") rc = fs_util.standby_rank( client_info["mds_nodes"], client_info["mon_node"], todo="add_rank" ) if rc == 0: log.info("Added standby ranks") else: raise Exception("Adding standby ranks failed") client1[0].exec_command( cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name) ) if client1[0].node.exit_status == 0: log.info("Dir created") else: fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], ) rc1 = fs_util.fstab_entry( client1, client_info["mounting_dir"], action="doEntry" ) rc2 = fs_util.fstab_entry( client2, client_info["mounting_dir"], action="doEntry" ) if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: raise CommandFailed("FsEntry failed") with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn( fs_util.read_write_IO, client3, client_info["mounting_dir"], "g", "read" ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 50, iotype="fio", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 1, iotype="smallfile_create", fnum=1000, fsize=100, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dir_name, 0, 1, iotype="smallfile", ) p.spawn(fs_util.reboot, client1[0]) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "read" ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 1, iotype="fio", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) p.spawn(fs_util.reboot, client2[0]) cluster_health_afterIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) if cluster_health_afterIO == cluster_health_beforeIO: log.info("cluster is healthy") else: log.error("cluster is not healty") return 1 with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: p.spawn(fs_util.reboot, node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: fs_util.network_disconnect(node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 10, iotype="fio", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 500, iotype="touch", ) for node in client_info["mon_node"]: fs_util.pid_kill(node, "mon") cluster_health_afterIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) if cluster_health_beforeIO == cluster_health_afterIO: log.info("Cluster is healthy") else: return 1 log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) rc = fs_util.standby_rank( client_info["mds_nodes"], client_info["mon_node"], todo="add_rank_revert", ) if rc == 0: log.info("removed standby ranks") rc1 = fs_util.fstab_entry( client1, client_info["mounting_dir"], action="revertEntry" ) rc2 = fs_util.fstab_entry( client2, client_info["mounting_dir"], action="revertEntry" ) if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: return 1 else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount" ) rc = fs_util.standby_rank( client_info["mds_nodes"], client_info["mon_node"], todo="add_rank_revert", ) if rc == 0: log.info("removed standby ranks") else: return 1 rc1 = fs_util.fstab_entry( client1, client_info["mounting_dir"], action="revertEntry" ) rc2 = fs_util.fstab_entry( client2, client_info["mounting_dir"], action="revertEntry" ) if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: return 1 if rc == 0: log.info("Cleaning up successfull") log.info("Execution of Test cases CEPH-%s ended:" % (tc)) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount" ) if rc == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "10528" log.info("Running cephfs %s test case" % tc) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], "", 0, 2, iotype="smallfile", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], "", 0, 2, iotype="smallfile", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: (return_counts, rc) = op log.info("Test completed for CEPH-%s" % tc) print("Results:") result = fs_util.rc_verify(tc, return_counts) print(result) tc = "10529" log.info("Test for CEPH-%s will start:" % tc) md5sum_file_lock = [] with parallel() as p: p.spawn(fs_util.file_locking, client1, client_info["mounting_dir"]) p.spawn(fs_util.file_locking, client2, client_info["mounting_dir"]) for output in p: md5sum_file_lock = output if 0 in md5sum_file_lock: log.info("file locking success") else: raise CommandFailed("file locking failed") if len(md5sum_file_lock) == 2: log.info("File Locking mechanism is working,data is not corrupted," "test case CEPH-%s passed" % tc) else: log.error("File Locking mechanism is failed,data is corrupted," "test case CEPH-%s failed" % (tc)) log.info("Test completed for CEPH-%s" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") else: raise CommandFailed("Cleanup failed") print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start (mins, secs) = divmod(total_time, 60) (hours, mins) = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() log.info("Running test 11262") config = kw.get('config') osp_cred = config.get('osp_cred') fs_util = FsUtils(ceph_cluster) client_info, rc = fs_util.get_clients() if rc == 0: log.info('Got client info') else: raise CommandFailed('fetching client info failed') client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info('got auth keys') else: raise CommandFailed('auth list failed') rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info('Fuse mount passed') else: raise CommandFailed('Fuse mount failed') rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info('kernel mount passed') else: raise CommandFailed('kernel mount failed') dir_name = 'dir' client1[0].exec_command(cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir_name)) with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir'], 'g', 'read') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 50, iotype='fio') p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 1, iotype='smallfile_create', fnum=10000, fsize=100) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dir_name, 0, 1, iotype='crefi') for node in client_info['mds_nodes']: rc = fs_util.heartbeat_map(node) if rc == 0: log.info('heartbeat_map entry not found') else: return 1 with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir'], 'g', 'read') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 50, iotype='fio') for osd in client_info['osd_nodes']: node_power_failure(osp_cred, name=osd.hostname) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 1, iotype='smallfile_create', fnum=10000, fsize=100) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dir_name, 0, 1, iotype='crefi') log.info('Test completed for CEPH-11262') log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: raise CommandFailed('Cleanup failed') print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11232 and 11233" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], "", 0, 2, iotype="crefi", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: return_counts, rc = op result = fs_util.rc_verify("", return_counts) if "Data validation success" in result: print("Data validation success") tc = "11232 and 11233" log.info("Execution of Test cases %s started:" % (tc)) fs_util.allow_dir_fragmentation(client_info["mds_nodes"]) log.info("Creating directory:") for node in client_info["fuse_clients"]: out, rc = node.exec_command( cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)) print(out.read().decode()) break active_mds_node_1, active_mds_node_2, rc = fs_util.get_active_mdss( client_info["mds_nodes"]) if rc == 0: log.info("Got active mdss") else: raise CommandFailed("getting active-mdss failed") node1_before_io, _, rc = fs_util.get_mds_info(active_mds_node_1, active_mds_node_2, info="get subtrees") if rc == 0: log.info("Got mds subtree info") else: raise CommandFailed("Mds info command failed") with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir_name, 0, 1000, iotype="touch", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 1000, 2000, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir_name, 2000, 3000, iotype="touch", ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dir_name, 3000, 4000, iotype="touch", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 4000, 5000, iotype="touch", ) node1_after_io, _, rc = fs_util.get_mds_info(active_mds_node_1, active_mds_node_2, info="get subtrees") if rc == 0: log.info("Got mds subtree info") else: raise CommandFailed("Mds info command failed") rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], ) if rc == 0: log.info("Cleaning mount success") else: raise CommandFailed("Cleaning mount failed") node1_after_del, _, rc = fs_util.get_mds_info(active_mds_node_1, active_mds_node_2, info="get subtrees") if rc == 0: log.info("Got mds subtree info") else: raise CommandFailed("Mds info command failed") log.info("Execution of Test case 11232 and 11233 ended:") print("Results:") if node1_before_io != node1_after_io and node1_after_io != node1_after_del: log.info("Test case %s Passed" % (tc)) else: return 1 if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc_client = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) else: rc_client = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) rc_mds = fs_util.mds_cleanup(client_info["mds_nodes"], None) if rc_client == 0 and rc_mds == 0: log.info("Cleaning up successfull") else: return 1 print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount( client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount( client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") tc1 = '11293' tc2 = '11296' tc3 = '11297' tc4 = '11295' dir1 = ''.join( random.choice( string.ascii_lowercase + string.digits) for _ in range(10)) dir2 = ''.join( random.choice( string.ascii_lowercase + string.digits) for _ in range(10)) dir3 = ''.join( random.choice( string.ascii_lowercase + string.digits) for _ in range(10)) results = [] return_counts = [] log.info("Create files and directories of 1000 depth and 1000 breadth") for client in client_info['fuse_clients']: client.exec_command( cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir1)) client.exec_command( cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir2)) client.exec_command( cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir3)) log.info('Execution of testcase %s started' % tc1) out, rc = client.exec_command( cmd='sudo crefi %s%s --fop create --multi -b 1000 -d 1000 ' '-n 1 -T 5 --random --min=1K --max=10K' % (client_info['mounting_dir'], dir1), long_running=True) log.info('Execution of testcase %s ended' % tc1) if client.node.exit_status == 0: results.append("TC %s passed" % tc1) log.info('Execution of testcase %s started' % tc2) client.exec_command( cmd='sudo cp -r %s%s/* %s%s/' % (client_info['mounting_dir'], dir1, client_info['mounting_dir'], dir2)) client.exec_command( cmd="diff -qr %s%s %s%s/" % (client_info['mounting_dir'], dir1, client_info['mounting_dir'], dir2)) log.info('Execution of testcase %s ended' % tc2) if client.node.exit_status == 0: results.append("TC %s passed" % tc2) log.info('Execution of testcase %s started' % tc3) out, rc = client.exec_command( cmd='sudo mv %s%s/* %s%s/' % (client_info['mounting_dir'], dir1, client_info['mounting_dir'], dir3)) log.info('Execution of testcase %s ended' % tc3) if client.node.exit_status == 0: results.append("TC %s passed" % tc3) log.info('Execution of testcase %s started' % tc4) for client in client_info['clients']: if client.pkg_type != 'deb': client.exec_command( cmd='sudo dd if=/dev/zero of=%s%s.txt bs=100M ' 'count=5' % (client_info['mounting_dir'], client.node.hostname)) out1, rc1 = client.exec_command( cmd='sudo ls -c -ltd -- %s%s.*' % (client_info['mounting_dir'], client.node.hostname)) client.exec_command( cmd='sudo dd if=/dev/zero of=%s%s.txt bs=200M ' 'count=5' % (client_info['mounting_dir'], client.node.hostname)) out2, rc2 = client.exec_command( cmd='sudo ls -c -ltd -- %s%s.*' % (client_info['mounting_dir'], client.node.hostname)) a = out1.read().decode() print("------------") b = out2.read().decode() if a != b: return_counts.append(out1.channel.recv_exit_status()) return_counts.append(out2.channel.recv_exit_status()) else: raise CommandFailed("Metadata info command failed") break log.info('Execution of testcase %s ended' % tc4) print(return_counts) rc_set = set(return_counts) if len(rc_set) == 1: results.append("TC %s passed" % tc4) print("Testcase Results:") for res in results: print(res) break log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11222" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") cluster_health_beforeIO = check_ceph_healthly(client_info["mon_node"], 12, 1, build, None, 300) dir1 = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) for client in client_info["clients"]: log.info("Creating directory:") client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir1)) log.info("Creating directories with breadth and depth:") out, rc = client.exec_command( cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 " "--random --min=1K --max=10K" % (client_info["mounting_dir"], dir1)) print(out.read().decode()) break with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="dd", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="crefi", ) for op in p: return_counts, rc = op result1 = fs_util.rc_verify("", return_counts) print(result1) for client in client_info["clients"]: client.exec_command(cmd="sudo rm -rf %s%s" % (client_info["mounting_dir"], dir1)) break for client in client_info["clients"]: log.info("Creating directories with breadth and depth:") out, rc = client.exec_command( cmd="sudo crefi %s%s --fop create --multi -b 10 -d 10 " "--random --min=1K --max=10K" % (client_info["mounting_dir"], dir1)) print(out.read().decode()) log.info("Renaming the dirs:") out, rc = client.exec_command( cmd="sudo crefi " "%s%s --fop rename --multi -b 10 -d 10 --random " "--min=1K --max=10K" % (client_info["mounting_dir"], dir1)) print(out.read().decode()) break with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="dd", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="crefi", ) for op in p: return_counts, rc = op result2 = fs_util.rc_verify("", return_counts) print(result2) cluster_health_afterIO = check_ceph_healthly(client_info["mon_node"], 12, 1, build, None, 300) if cluster_health_beforeIO == cluster_health_afterIO: print("Testcase %s passed" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) if rc == 0: log.info("Cleaning up successfull") print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '10625,11225' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: log.error("Activate multiple mdss failed") return 1 with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], '', 0, 1, iotype='crefi', ) p.spawn(fs_util.read_write_IO, client4, client_info['mounting_dir'], 'g', 'readwrite') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir']) for op in p: return_counts, rc = op result = fs_util.rc_verify('', return_counts) if result == 'Data validation success': dirs, rc = fs_util.mkdir(client1, 0, 6, client_info['mounting_dir'], dir_name) if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split('\n') with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[1], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_rename', fnum=1000, fsize=10) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[0], 0, 1, iotype='smallfile_delete-renamed', fnum=1000, fsize=10) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[2], 0, 1, iotype='smallfile_delete', fnum=1000, fsize=10) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[3], 0, 1, iotype='smallfile_create', fnum=1, fsize=1000000) p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dirs[4], 0, 1, iotype='smallfile_create', fnum=1, fsize=1000000) p.spawn(fs_util.stress_io, client3, client_info['mounting_dir'], dirs[5], 0, 1, iotype='smallfile_create', fnum=1, fsize=1000000) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[6], 0, 1, iotype='smallfile_create', fnum=1, fsize=1000000) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[3], 0, 1, iotype='smallfile_rename', fnum=1, fsize=1000000) with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dirs[3], 0, 1, iotype='smallfile_delete-renamed', fnum=1, fsize=1000000) p.spawn(fs_util.stress_io, client4, client_info['mounting_dir'], dirs[4], 0, 1, iotype='smallfile_delete', fnum=1, fsize=1000000) dir_name = '!@#$%^&*()-_=+[]{};:,.<>?' out, rc = client1[0].exec_command( cmd="sudo mkdir '%s%s'" % (client_info['mounting_dir'], dir_name)) if client1[0].node.exit_status == 0: log.info("Directory created") else: raise CommandFailed("Directory creation failed") for client in client_info['fuse_clients']: file_name = ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(255)) client.exec_command( cmd="sudo touch '%s%s/%s'" % (client_info['mounting_dir'], dir_name, file_name)) for client in client_info['kernel_clients']: if client.pkg_type == 'rpm': file_name = ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(255)) client.exec_command( cmd="sudo touch '%s%s/%s'" % (client_info['mounting_dir'], dir_name, file_name)) for num in range(0, 5): for client in client_info['fuse_clients']: client.exec_command( cmd="sudo crefi %s'%s' --fop create -t %s " "--multi -b 10 -d 10 -n 10 -T 10 " "--random --min=1K --max=%dK" % (client_info['mounting_dir'], dir_name, 'text', 5), long_running=True) for i in range(0, 6): ops = [ 'create', 'rename', 'chmod', 'chown', 'chgrp', 'setxattr' ] rand_ops = random.choice(ops) ftypes = ['text', 'sparse', 'binary', 'tar'] rand_filetype = random.choice(ftypes) rand_count = random.randint(2, 10) client.exec_command(cmd='sudo crefi %s%s --fop %s -t %s ' '--multi -b 10 -d 10 -n 10 -T 10 ' '--random --min=1K --max=%dK' % (client_info['mounting_dir'], dir_name, rand_ops, rand_filetype, rand_count), long_running=True) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') else: return 1 log.info("Execution of Test cases CEPH-%s ended:" % (tc)) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") tc1 = "11293" tc2 = "11296" tc3 = "11297" tc4 = "11295" dir1 = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) dir2 = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) dir3 = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) results = [] return_counts = [] log.info("Create files and directories of 1000 depth and 1000 breadth") for client in client_info["fuse_clients"]: client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir1)) client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir2)) client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir3)) log.info("Execution of testcase %s started" % tc1) out, rc = client.exec_command( sudo=True, cmd= f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 " f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top " f"{client_info['mounting_dir']}{dir1}", long_running=True, ) log.info("Execution of testcase %s ended" % tc1) if client.node.exit_status == 0: results.append("TC %s passed" % tc1) log.info("Execution of testcase %s started" % tc2) client.exec_command(cmd="sudo cp -r %s%s/* %s%s/" % (client_info["mounting_dir"], dir1, client_info["mounting_dir"], dir2)) client.exec_command(cmd="diff -qr %s%s %s%s/" % (client_info["mounting_dir"], dir1, client_info["mounting_dir"], dir2)) log.info("Execution of testcase %s ended" % tc2) if client.node.exit_status == 0: results.append("TC %s passed" % tc2) log.info("Execution of testcase %s started" % tc3) out, rc = client.exec_command(cmd="sudo mv %s%s/* %s%s/" % (client_info["mounting_dir"], dir1, client_info["mounting_dir"], dir3)) log.info("Execution of testcase %s ended" % tc3) if client.node.exit_status == 0: results.append("TC %s passed" % tc3) log.info("Execution of testcase %s started" % tc4) for client in client_info["clients"]: if client.pkg_type != "deb": client.exec_command( cmd="sudo dd if=/dev/zero of=%s%s.txt bs=100M " "count=5" % (client_info["mounting_dir"], client.node.hostname)) out1, rc1 = client.exec_command( cmd="sudo ls -c -ltd -- %s%s.*" % (client_info["mounting_dir"], client.node.hostname)) client.exec_command( cmd="sudo dd if=/dev/zero of=%s%s.txt bs=200M " "count=5" % (client_info["mounting_dir"], client.node.hostname)) out2, rc2 = client.exec_command( cmd="sudo ls -c -ltd -- %s%s.*" % (client_info["mounting_dir"], client.node.hostname)) a = out1.read().decode() print("------------") b = out2.read().decode() if a != b: return_counts.append(out1.channel.recv_exit_status()) return_counts.append(out2.channel.recv_exit_status()) else: raise CommandFailed("Metadata info command failed") break log.info("Execution of testcase %s ended" % tc4) print(return_counts) rc_set = set(return_counts) if len(rc_set) == 1: results.append("TC %s passed" % tc4) print("Testcase Results:") for res in results: print(res) break log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") else: return 1 print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11221' log.info('Running cephfs %s test case' % tc) fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info('Got client info') else: raise CommandFailed('fetching client info failed') c1 = 1 client1 = [] client2 = [] client3 = [] client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) print(rc1, rc2, rc3) if rc1 == 0 and rc2 == 0 and rc3 == 0: log.info('got auth keys') else: raise CommandFailed('auth list failed') rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info('Fuse mount passed') else: raise CommandFailed('Fuse mount failed') rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0: log.info('kernel mount passed') else: raise CommandFailed('kernel mount failed') while c1: with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client2, client_info['mounting_dir'], 'g', 'read') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], '', 0, 2, iotype='crefi', ) p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], '', 0, 2, iotype='crefi', ) p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir']) for op in p: (return_counts, rc) = op c1 = ceph_df(ceph_cluster) check_health(ceph_cluster) log.info('Test completed for CEPH-%s' % tc) print('Results:') result = fs_util.rc_verify(tc, return_counts) print(result) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start (mins, secs) = divmod(total_time, 60) (hours, mins) = divmod(mins, 60) print('Hours:%d Minutes:%d Seconds:%f' % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11255_11336-fuse client' dir_name = 'dir' log.info("Running cephfs %s test case" % (tc)) config = kw.get('config') num_of_osds = config.get('num_of_osds') fs_util = FsUtils(ceph_cluster) build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("creating auth failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount( client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount( client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 cluster_health_beforeIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len( client_info['mon_node']), build, None, 300) rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") rc = fs_util.standby_rank( client_info['mds_nodes'], client_info['mon_node'], todo='add_rank') if rc == 0: log.info("Added standby ranks") else: log.error("Failed to add standby ranks") return 1 client1[0].exec_command( cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir_name)) if client1[0].node.exit_status == 0: log.info("Dir created") else: raise CommandFailed('Dir creation failed') rc1 = fs_util.fstab_entry( client1, client_info['mounting_dir'], action='doEntry') rc2 = fs_util.fstab_entry( client2, client_info['mounting_dir'], action='doEntry') if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: raise CommandFailed("FsEntry failed") rc1 = fs_util.fstab_entry( client3, client_info['mounting_dir'], action='doEntry', mon_node_ip=client_info['mon_node_ip']) rc2 = fs_util.fstab_entry( client4, client_info['mounting_dir'], action='doEntry', mon_node_ip=client_info['mon_node_ip']) if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: raise CommandFailed("FsEntry failed") with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client3, client_info['mounting_dir'], 'g', 'read') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 50, iotype='fio') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 1, iotype='smallfile_create', fnum=1000, fsize=100) p.spawn( fs_util.stress_io, client4, client_info['mounting_dir'], dir_name, 0, 1, iotype='crefi') p.spawn(fs_util.reboot, client1[0]) res = [] with parallel() as p: for node in client_info['mds_nodes']: p.spawn(fs_util.heartbeat_map, node) for op in p: res.append(op) print(res) with parallel() as p: p.spawn(fs_util.read_write_IO, client1, client_info['mounting_dir'], 'g', 'write') p.spawn(fs_util.read_write_IO, client4, client_info['mounting_dir'], 'g', 'read') p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dir_name, 0, 1, iotype='fio') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 10, iotype='dd') p.spawn( fs_util.stress_io, client4, client_info['mounting_dir'], dir_name, 0, 500, iotype='touch'), p.spawn(fs_util.reboot, client2[0]) cluster_health_afterIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len( client_info['mon_node']), build, None, 300) if cluster_health_afterIO == cluster_health_beforeIO: log.info('cluster is healthy') else: log.error("cluster is not healty") return 1 with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info['mounting_dir'], dir_name, 0, 10, iotype='fio') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 10, iotype='dd') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 500, iotype='touch') for node in client_info['osd_nodes']: p.spawn(fs_util.reboot, node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info['mounting_dir'], dir_name, 0, 10, iotype='fio'), p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 10, iotype='dd') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 500, iotype='touch') for node in client_info['osd_nodes']: fs_util.network_disconnect(node) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info['mounting_dir'], dir_name, 0, 10, iotype='fio') p.spawn( fs_util.stress_io, client2, client_info['mounting_dir'], dir_name, 0, 10, iotype='dd') p.spawn( fs_util.stress_io, client3, client_info['mounting_dir'], dir_name, 0, 500, iotype='touch') for node in client_info['osd_nodes']: fs_util.pid_kill(node, 'osd') time.sleep(100) cluster_health_afterIO = check_ceph_healthly( client_info['mon_node'][0], num_of_osds, len( client_info['mon_node']), build, None, 300) if cluster_health_beforeIO == cluster_health_afterIO: log.info("Cluster is healthy") else: return 1 log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up( client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') rc = fs_util.standby_rank( client_info['mds_nodes'], client_info['mon_node'], todo='add_rank_revert') if rc == 0: log.info("removed standby ranks") rc1 = fs_util.fstab_entry( client1, client_info['mounting_dir'], action='revertEntry') rc2 = fs_util.fstab_entry( client2, client_info['mounting_dir'], action='revertEntry') if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") else: return 1 else: rc = fs_util.client_clean_up( client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') rc = fs_util.standby_rank( client_info['mds_nodes'], client_info['mon_node'], todo='add_rank_revert') if rc == 0: log.info("removed standby ranks") rc1 = fs_util.fstab_entry( client1, client_info['mounting_dir'], action='revertEntry') rc2 = fs_util.fstab_entry( client2, client_info['mounting_dir'], action='revertEntry') if rc1 == 0 and rc2 == 0: log.info("FSentry for clients are done") if rc == 0: log.info('Cleaning up successfull') log.info("Execution of Test cases CEPH-%s ended:" % (tc)) print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up( client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up( client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11219,11224" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) config = kw.get("config") num_of_osds = config.get("num_of_osds") fs_util = FsUtils(ceph_cluster) build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) cluster_health_beforeIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: log.error("Activate multiple mdss failed") return 1 client1[0].exec_command( sudo=True, cmd=f"mkdir {client_info['mounting_dir']}{dir_name}", ) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dir_name, 0, 2, iotype="smallfile", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: return_counts, rc = op result = fs_util.rc_verify("", return_counts) if result == "Data validation success": print("Data validation success") dirs, rc = fs_util.mkdir(client1, 0, 3, client_info["mounting_dir"], dir_name) if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split("\n") with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="fio", ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[1], 0, 1, iotype="dd", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile", ) for op in p: return_counts, rc = op with parallel() as p: p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_create", fnum=10, fsize=1024, ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[1], 0, 1, iotype="smallfile_create", fnum=10, fsize=1024, ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile_create", fnum=10, fsize=1024, ) for op in p: return_counts, rc = op with parallel() as p: p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_delete", fnum=10, fsize=1024, ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[1], 0, 1, iotype="smallfile_delete", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile_delete", fnum=10, fsize=1024, ) for op in p: return_counts, rc = op cluster_health_afterIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) log.info("Execution of Test case CEPH-%s ended" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) if cluster_health_beforeIO == cluster_health_afterIO: print(result) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[0], 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[0], 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile", ) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "read", dir_name=dirs[2], ) p.spawn( fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read", dir_name=dirs[0], ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[0], 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[0], 0, 10, iotype="dd", ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile", ) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "read", dir_name=dirs[0], ) p.spawn( fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read", dir_name=dirs[0], ) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) if rc == 0: log.info("Cleaning up successfull") else: return 1 else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) if rc == 0: log.info("Cleaning up successfull") else: return 1 log.info("Execution of Test case CEPH-%s ended" % (tc)) print("Results:") result = fs_util.rc_verify(tc, return_counts) print(result) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11220" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") log.info("Creating directory:") for node in client1: out, rc = node.exec_command( cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)) print(out) break return_counts1, rc1 = fs_util.stress_io(client1, client_info["mounting_dir"], dir_name, 0, 1, iotype="smallfile") return_counts2, rc2 = fs_util.stress_io(client2, client_info["mounting_dir"], dir_name, 0, 1, iotype="fio") return_counts3, rc3 = fs_util.read_write_IO( client3, client_info["mounting_dir"], dir_name=dir_name) if rc1 == 0 and rc2 == 0 and rc3 == 0: log.info("IOs on clients successfull") log.info("Testcase %s passed" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) if rc == 0: log.info("Cleaning up successfull") print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11222" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") num_of_osds = config.get("num_of_osds") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") cluster_health_beforeIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) dir1 = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) for client in client_info["clients"]: log.info("Creating directory:") client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir1)) log.info("Creating directories with breadth and depth:") client.exec_command( sudo=True, cmd= f"python3 smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 --files 1000 " f"--files-per-dir 10 --dirs-per-dir 2 --top {client_info['mounting_dir']}{dir1}", long_running=True, timeout=300, ) return_counts = fs_util.io_verify(client) result = fs_util.rc_verify("", return_counts) print(result) break with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="dd", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="smallfile", ) for op in p: return_counts, rc = op result1 = fs_util.rc_verify("", return_counts) print(result1) for client in client_info["clients"]: client.exec_command(cmd="sudo rm -rf %s%s" % (client_info["mounting_dir"], dir1)) break for client in client_info["clients"]: log.info("Creating directories with breadth and depth:") client.exec_command( sudo=True, cmd="python3 smallfile/smallfile_cli.py " "--operation create --threads 10 " " --file-size 4 --files 1000 " "--files-per-dir 10 --dirs-per-dir 2" " --top %s%s" % (client_info["mounting_dir"], dir1), long_running=True, timeout=300, ) return_counts = fs_util.io_verify(client) result = fs_util.rc_verify("", return_counts) print(result) log.info("Renaming the dirs:") client.exec_command( sudo=True, cmd="python3 smallfile/smallfile_cli.py " "--operation rename --threads 10 --file-size 4" " --file-size 4 --files 1000 " "--files-per-dir 10 --dirs-per-dir 2" " --top %s%s" % (client_info["mounting_dir"], dir1), long_running=True, timeout=300, ) return_counts = fs_util.io_verify(client) result = fs_util.rc_verify("", return_counts) print(result) break with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 5, iotype="fio", ) p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dir1, 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="dd", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dir1, 0, 5, iotype="smallfile", ) for op in p: return_counts, rc = op result2 = fs_util.rc_verify("", return_counts) print(result2) cluster_health_afterIO = check_ceph_healthly( client_info["mon_node"][0], num_of_osds, len(client_info["mon_node"]), build, None, 300, ) client1[0].exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], dir_name)) with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "read", "m", dir_name=dir_name, ) p.spawn( fs_util.read_write_IO, client3, client_info["mounting_dir"], "write", "m", dir_name=dir_name, ) p.spawn( fs_util.read_write_IO, client2, client_info["mounting_dir"], "read", "m", dir_name=dir_name, ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "write", "m", dir_name=dir_name, ) p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], dir_name=dir_name, ) p.spawn( fs_util.read_write_IO, client3, client_info["mounting_dir"], dir_name=dir_name, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dir_name, 0, 5, iotype="smallfile", ) for op in p: return_counts, rc = op result = fs_util.rc_verify("11223", return_counts) print(result) if cluster_health_beforeIO == cluster_health_afterIO: print("Testcase %s passed" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up( client_info["fuse_clients"], "", client_info["mounting_dir"], "umount", ) if rc == 0: log.info("Cleaning up successfull") print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11222' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) client_info, rc = fs_util.get_clients() if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") rc = fs_util.activate_multiple_mdss(client_info['mds_nodes']) if rc == 0: log.info("Activate multiple mdss successfully") else: raise CommandFailed("Activate multiple mdss failed") cluster_health_beforeIO = check_ceph_healthly(client_info['mon_node'], 12, 1, None, 300) dir1 = ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(10)) for client in client_info['clients']: log.info("Creating directory:") client.exec_command(cmd='sudo mkdir %s%s' % (client_info['mounting_dir'], dir1)) log.info("Creating directories with breadth and depth:") out, rc = client.exec_command( cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 ' '--random --min=1K --max=10K' % (client_info['mounting_dir'], dir1)) print(out.read().decode()) break with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dir1, 0, 5, iotype='fio') p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dir1, 0, 100, iotype='touch') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir1, 0, 5, iotype='dd') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir1, 0, 5, iotype='crefi') for op in p: return_counts, rc = op result1 = fs_util.rc_verify('', return_counts) print(result1) for client in client_info['clients']: client.exec_command(cmd='sudo rm -rf %s%s' % (client_info['mounting_dir'], dir1)) break for client in client_info['clients']: log.info("Creating directories with breadth and depth:") out, rc = client.exec_command( cmd='sudo crefi %s%s --fop create --multi -b 10 -d 10 ' '--random --min=1K --max=10K' % (client_info['mounting_dir'], dir1)) print(out.read().decode()) log.info("Renaming the dirs:") out, rc = client.exec_command( cmd='sudo crefi ' '%s%s --fop rename --multi -b 10 -d 10 --random ' '--min=1K --max=10K' % (client_info['mounting_dir'], dir1)) print(out.read().decode()) break with parallel() as p: p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dir1, 0, 5, iotype='fio') p.spawn(fs_util.stress_io, client1, client_info['mounting_dir'], dir1, 0, 100, iotype='touch') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir1, 0, 5, iotype='dd') p.spawn(fs_util.stress_io, client2, client_info['mounting_dir'], dir1, 0, 5, iotype='crefi') for op in p: return_counts, rc = op result2 = fs_util.rc_verify('', return_counts) print(result2) cluster_health_afterIO = check_ceph_healthly(client_info['mon_node'], 12, 1, None, 300) if cluster_health_beforeIO == cluster_health_afterIO: print("Testcase %s passed" % (tc)) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc == 0: log.info('Cleaning up successfull') print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() dir_name = 'dir' log.info("Running cephfs 11338 test case") fs_util = FsUtils(ceph_cluster) client_info, rc = fs_util.get_clients() if rc == 0: log.info("Got client info") else: log.error("fetching client info failed") return 1 client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 dirs, rc = fs_util.mkdir(client1, 1, 3, client_info['mounting_dir'], dir_name) if rc == 0: log.info("Directories created") dirs = dirs.split('\n') ''' new clients with restrictions ''' new_client1_name = client_info['fuse_clients'][ 0].node.hostname + '_%s' % (dirs[0]) new_client2_name = client_info['fuse_clients'][ 1].node.hostname + '_%s' % (dirs[0]) new_client3_name = client_info['kernel_clients'][ 0].node.hostname + '_%s' % (dirs[1]) new_client3_mouting_dir = '/mnt/%s_%s/' % ( client_info['kernel_clients'][0].node.hostname, dirs[1]) new_client2_mouting_dir = '/mnt/%s_%s/' % ( client_info['fuse_clients'][1].node.hostname, dirs[0]) new_client1_mouting_dir = '/mnt/%s_%s/' % ( client_info['fuse_clients'][0].node.hostname, dirs[0]) rc1 = fs_util.auth_list(client1, path=dirs[0], permission='rw', mds=True) rc2 = fs_util.auth_list(client2, path=dirs[0], permission='r', mds=True) rc3 = fs_util.auth_list(client3, path=dirs[1], permission='*', mds=True) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, new_client1_mouting_dir, new_client=new_client1_name, sub_dir=dirs[0]) rc2 = fs_util.fuse_mount(client2, new_client2_mouting_dir, new_client=new_client2_name, sub_dir=dirs[0]) rc3 = fs_util.kernel_mount(client3, new_client3_mouting_dir, client_info['mon_node_ip'], new_client=new_client3_name, sub_dir=dirs[1]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 if rc3 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 _, rc = fs_util.stress_io(client1, new_client1_mouting_dir, '', 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) if rc == 0: log.info('Permissions set for client %s is working ' % new_client1_name) else: log.error('Permissions set for client %s is failed' % new_client1_name) return 1 _, rc = fs_util.stress_io(client1, new_client1_mouting_dir, '', 0, 1, iotype='smallfile_delete', fnum=1000, fsize=10) if rc == 0: log.info('Permissions set for client %s is working properly' % new_client1_name) else: log.error('Permissions set for client %s is failed' % new_client1_name) return 1 try: _, rc = fs_util.stress_io(client2, new_client2_mouting_dir, '', 0, 1, iotype='touch') except CommandFailed: log.info('Permissions set for client %s is working properly' % new_client2_name) _, rc = fs_util.stress_io(client3, new_client3_mouting_dir, '', 0, 1, iotype='smallfile_create', fnum=1000, fsize=10) if rc == 0: log.info('Permissions set for client %s is working properly' % new_client3_name) else: log.error('Permissions set for client %s is failed' % new_client3_name) return 1 _, rc = fs_util.stress_io(client3, new_client3_mouting_dir, '', 0, 1, iotype='smallfile_delete', fnum=1000, fsize=10) if rc == 0: log.info('Permissions set for client %s is working properly') else: log.error('Permissions set for client %s is failed') return 1 fs_util.client_clean_up(client1, '', new_client1_mouting_dir, 'umount', client_name=new_client1_name) fs_util.client_clean_up(client2, '', new_client2_mouting_dir, 'umount', client_name=new_client2_name) fs_util.client_clean_up('', client3, new_client3_mouting_dir, 'umount', client_name=new_client3_name) fs_util.auth_list(client1, path=dirs[0], permission='rw', osd=True) fs_util.auth_list(client3, path=dirs[1], permission='r', osd=True) fs_util.fuse_mount(client1, new_client1_mouting_dir, new_client=new_client1_name) fs_util.kernel_mount(client3, new_client3_mouting_dir, client_info['mon_node_ip'], new_client=new_client3_name) fs_util.stress_io(client1, new_client1_mouting_dir, '', 0, 1, iotype='smallfile_delete', fnum=1000, fsize=10) try: if client_info['kernel_clients'][0].pkg_type == 'rpm': client_info['kernel_clients'][0].exec_command( cmd='sudo dd if=/dev/zero of=%s/file bs=10M count=10' % new_client3_mouting_dir) except CommandFailed as e: log.info(e) log.info('Permissions set for client %s is working properly' % (client_info['kernel_clients'][0].node.hostname + '_' + (dirs[1]))) fs_util.client_clean_up( client1, '', new_client1_mouting_dir, 'umount', client_name=client_info['fuse_clients'][0].node.hostname + '_%s' % (dirs[0])) fs_util.client_clean_up('', client3, new_client3_mouting_dir, 'umount', client_name=new_client3_name) fs_util.auth_list(client1, path=dirs[0], layout_quota='p_flag') fs_util.auth_list(client3, path=dirs[1], layout_quota='!p_flag') fs_util.fuse_mount(client1, new_client1_mouting_dir, new_client=new_client1_name) fs_util.kernel_mount(client3, new_client3_mouting_dir, client_info['mon_node_ip'], new_client=new_client3_name) file_name = 'file1' client_info['fuse_clients'][0].exec_command( cmd='sudo touch %s/%s' % (new_client1_mouting_dir, file_name)) client_info['fuse_clients'][0].exec_command( cmd='sudo mkdir %s/%s' % (new_client1_mouting_dir, dirs[0])) try: fs_util.setfattr(client3, 'stripe_unit', '1048576', new_client3_mouting_dir, file_name) fs_util.setfattr(client3, 'max_bytes', '100000000', new_client3_mouting_dir, dirs[1]) except CommandFailed: log.info('Permission denied for setting attrs,success') fs_util.setfattr(client1, 'stripe_unit', '1048576', new_client1_mouting_dir, file_name) fs_util.setfattr(client1, 'max_bytes', '100000000', new_client1_mouting_dir, dirs[0]) fs_util.client_clean_up(client1, '', new_client1_mouting_dir, 'umount', client_name=new_client1_name) fs_util.client_clean_up('', client3, new_client3_mouting_dir, 'umount', client_name=new_client3_name) fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = '11334' file_name = 'file' log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get('config') build = config.get('build', config.get('rhbuild')) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info['fuse_clients'][0]) client2.append(client_info['fuse_clients'][1]) client3.append(client_info['kernel_clients'][0]) client4.append(client_info['kernel_clients'][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info['mounting_dir']) rc2 = fs_util.fuse_mount(client2, client_info['mounting_dir']) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info['mounting_dir'], client_info['mon_node_ip']) rc4 = fs_util.kernel_mount(client4, client_info['mounting_dir'], client_info['mon_node_ip']) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") vals, rc = fs_util.getfattr(client1, client_info['mounting_dir'], file_name) rc = fs_util.setfattr(client1, 'stripe_unit', '1048576', client_info['mounting_dir'], file_name) if rc == 0: log.info("Setfattr stripe_unit for file %s success" % file_name) else: raise CommandFailed("Setfattr stripe_unit for file %s success" % file_name) rc = fs_util.setfattr(client1, 'stripe_count', '8', client_info['mounting_dir'], file_name) if rc == 0: log.info("Setfattr stripe_count for file %s success" % file_name) else: raise CommandFailed("Setfattr stripe_count for file %s success" % file_name) rc = fs_util.setfattr(client1, 'object_size', '10485760', client_info['mounting_dir'], file_name) if rc == 0: log.info("Setfattr object_size for file %s success" % file_name) else: raise CommandFailed("Setfattr object_size for file %s success" % file_name) fs_info = fs_util.get_fs_info(client_info['mon_node'][0]) fs_util.create_pool(client_info['mon_node'][0], 'new_data_pool', 64, 64) rc = fs_util.add_pool_to_fs(client_info['mon_node'][0], fs_info.get('fs_name'), 'new_data_pool') if 0 in rc: log.info("Adding new pool to cephfs success") else: raise CommandFailed("Adding new pool to cephfs failed") rc = fs_util.setfattr(client1, 'pool', 'new_data_pool', client_info['mounting_dir'], file_name) if rc == 0: log.info("Setfattr pool for file %s success" % file_name) else: raise CommandFailed("Setfattr pool for file %s success" % file_name) vals, rc = fs_util.getfattr(client1, client_info['mounting_dir'], file_name) log.info("Read individual layout fields by using getfattr:") for client in client1: out, rc = client.exec_command( cmd="sudo getfattr -n ceph.file.layout.pool %s%s" % (client_info['mounting_dir'], file_name)) if vals['pool'] in out.read().decode(): log.info("reading pool by getfattr successfull") out, rc = client.exec_command( cmd="sudo getfattr -n ceph.file.layout.stripe_unit %s%s" % (client_info['mounting_dir'], file_name)) if vals['stripe_unit'] in out.read().decode(): log.info("reading stripe_unit by getfattr successfull") out, rc = client.exec_command( cmd="sudo getfattr -n ceph.file.layout.stripe_count %s%s" % (client_info['mounting_dir'], file_name)) if vals['stripe_count'] in out.read().decode(): log.info("reading stripe_count by getfattr successfull") out, rc = client.exec_command( cmd="sudo getfattr -n ceph.file.layout.object_size %s%s" % (client_info['mounting_dir'], file_name)) if vals['object_size'] in out.read().decode(): log.info("reading object_size by getfattr successfull") break rc = fs_util.remove_pool_from_fs(client_info['mon_node'][0], fs_info.get('fs_name'), 'new_data_pool') if 0 in rc: log.info("Pool removing success") else: raise CommandFailed("Pool removing failed") log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc_client = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc_client = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc_client == 0: log.info('Cleaning up successfull') else: return 1 print('Script execution time:------') stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info('Cleaning up!-----') if client3[0].pkg_type != 'deb' and client4[0].pkg_type != 'deb': rc_client = fs_util.client_clean_up(client_info['fuse_clients'], client_info['kernel_clients'], client_info['mounting_dir'], 'umount') else: rc_client = fs_util.client_clean_up(client_info['fuse_clients'], '', client_info['mounting_dir'], 'umount') if rc_client == 0: log.info('Cleaning up successfull') return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "10625,11225" dir_name = "dir" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1, client2, client3, client4 = ([] for _ in range(4)) client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: log.error("auth list failed") return 1 rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: log.error("Fuse mount failed") return 1 rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: log.error("kernel mount failed") return 1 rc = fs_util.activate_multiple_mdss(client_info["mds_nodes"]) if rc == 0: log.info("Activate multiple mdss successfully") else: log.error("Activate multiple mdss failed") return 1 with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn(fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read") p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], "", 0, 1, iotype="smallfile", ) p.spawn( fs_util.read_write_IO, client4, client_info["mounting_dir"], "g", "readwrite", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: return_counts, rc = op result = fs_util.rc_verify("", return_counts) if result == "Data validation success": dirs, rc = fs_util.mkdir(client1, 0, 6, client_info["mounting_dir"], dir_name) if rc == 0: log.info("Directories created") else: raise CommandFailed("Directory creation failed") dirs = dirs.split("\n") with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[1], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile_create", fnum=1000, fsize=10, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_rename", fnum=1000, fsize=10, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[0], 0, 1, iotype="smallfile_delete-renamed", fnum=1000, fsize=10, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[2], 0, 1, iotype="smallfile_delete", fnum=1000, fsize=10, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[3], 0, 1, iotype="smallfile_create", fnum=1, fsize=1000000, ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], dirs[4], 0, 1, iotype="smallfile_create", fnum=1, fsize=1000000, ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], dirs[5], 0, 1, iotype="smallfile_create", fnum=1, fsize=1000000, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[6], 0, 1, iotype="smallfile_create", fnum=1, fsize=1000000, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[3], 0, 1, iotype="smallfile_rename", fnum=1, fsize=1000000, ) with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], dirs[3], 0, 1, iotype="smallfile_delete-renamed", fnum=1, fsize=1000000, ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], dirs[4], 0, 1, iotype="smallfile_delete", fnum=1, fsize=1000000, ) dir_name = "!@#$%^&*()-_=+[]{};:,.<>?" out, rc = client1[0].exec_command( cmd="sudo mkdir '%s%s'" % (client_info["mounting_dir"], dir_name)) if client1[0].node.exit_status == 0: log.info("Directory created") else: raise CommandFailed("Directory creation failed") for client in client_info["fuse_clients"]: file_name = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(255)) client.exec_command( cmd="sudo touch '%s%s/%s'" % (client_info["mounting_dir"], dir_name, file_name)) for client in client_info["kernel_clients"]: if client.pkg_type == "rpm": file_name = "".join( random.choice(string.ascii_lowercase + string.digits) for _ in range(255)) client.exec_command( cmd="sudo touch '%s%s/%s'" % (client_info["mounting_dir"], dir_name, file_name)) for num in range(0, 5): for client in client_info["fuse_clients"]: ops = ["create", "setxattr", "getxattr", "chmod", "rename"] for op in ops: client.exec_command( sudo=True, cmd= f"python3 smallfile/smallfile_cli.py --operation {op} --threads 10 --file-size 4 " f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top " f"{client_info['mounting_dir']}{dir_name}", long_running=True, timeout=300, ) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") else: return 1 log.info("Execution of Test cases CEPH-%s ended:" % (tc)) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() bz = "1798719" log.info("Running cephfs test for bug %s" % bz) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client1.append(client_info["kernel_clients"][0]) mon_node_ip = client_info["mon_node_ip"] mounting_dir = client_info["mounting_dir"] user_name = "qwertyuiopasdfghjklzxcvbnm1234567890123" p_flag = "rw" log.info("Creating user with more than 37 letters") for client in client1: client.exec_command(cmd="sudo ceph auth get-or-create client.%s " "mon 'allow r' mds " "'allow %s' osd 'allow rw' " "-o /etc/ceph/ceph.client.%s.keyring" % (user_name, p_flag, user_name)) log.info("Creating mounting dir:") client.exec_command(cmd="sudo mkdir %s" % (mounting_dir)) out, rc = client.exec_command( cmd="sudo ceph auth get-key client.%s" % (user_name)) secret_key = out.rstrip("\n") key_file = client.remote_file(sudo=True, file_name="/etc/ceph/%s.secret" % (user_name), file_mode="w") key_file.write(secret_key) key_file.flush() op, rc = client.exec_command( cmd="sudo mount -t ceph %s,%s,%s:/ " "%s -o name=%s,secretfile=/etc/ceph/%s.secret" % ( mon_node_ip[0], mon_node_ip[1], mon_node_ip[2], mounting_dir, user_name, user_name, )) out, rc = client.exec_command(cmd="mount") mount_output = out.split() log.info("Checking if kernel mount is passed or failed:") assert mounting_dir.rstrip("/") in mount_output log.info("mount is passed") log.info("Execution of Test for bug %s ended:" % (bz)) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client1[0].pkg_type != "deb": rc = fs_util.client_clean_up(client_info["kernel_clients"], client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") else: return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11298" source_dir = "/mnt/source" target_dir = "target" log.info("Running cephfs %s test case" % (tc)) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") client1 = [] client2 = [] client3 = [] client4 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) client4.append(client_info["kernel_clients"][1]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) rc4 = fs_util.auth_list(client4) print(rc1, rc2, rc3, rc4) if rc1 == 0 and rc2 == 0 and rc3 == 0 and rc4 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) rc4 = fs_util.kernel_mount(client4, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0 and rc4 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") for client in client_info["clients"]: client.exec_command(cmd="sudo rm -rf %s" % source_dir) client.exec_command(cmd="sudo mkdir %s" % source_dir) for client in client_info["clients"]: client.exec_command(cmd="sudo mkdir %s%s" % (client_info["mounting_dir"], target_dir)) break with parallel() as p: p.spawn(fs_util.stress_io, client1, source_dir, "", 0, 100, iotype="touch") p.spawn(fs_util.read_write_IO, client1, source_dir, "g", "write") p.spawn(fs_util.stress_io, client2, source_dir, "", 0, 10, iotype="dd") p.spawn(fs_util.stress_io, client3, source_dir, "", 0, 10, iotype="smallfile") p.spawn(fs_util.stress_io, client4, source_dir, "", 0, 1, iotype="fio") for op in p: return_counts1, rc = op with parallel() as p: p.spawn( fs_util.rsync, client1, source_dir, "%s%s" % (client_info["mounting_dir"], target_dir), ) p.spawn( fs_util.rsync, client2, source_dir, "%s%s" % (client_info["mounting_dir"], target_dir), ) p.spawn( fs_util.rsync, client3, source_dir, "%s%s" % (client_info["mounting_dir"], target_dir), ) p.spawn( fs_util.rsync, client4, source_dir, "%s%s" % (client_info["mounting_dir"], target_dir), ) for op in p: return_counts2, rc = op with parallel() as p: p.spawn( fs_util.stress_io, client1, client_info["mounting_dir"], target_dir, 0, 100, iotype="touch", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], target_dir, 0, 11, iotype="dd", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], target_dir, 0, 3, iotype="fio", ) p.spawn( fs_util.stress_io, client4, client_info["mounting_dir"], target_dir, 0, 1, iotype="fio", ) for op in p: return_counts3, rc = op with parallel() as p: p.spawn( fs_util.rsync, client1, "%s%s/*" % (client_info["mounting_dir"], target_dir), source_dir, ) p.spawn( fs_util.rsync, client2, "%s%s/*" % (client_info["mounting_dir"], target_dir), source_dir, ) p.spawn( fs_util.rsync, client3, "%s%s/*" % (client_info["mounting_dir"], target_dir), source_dir, ) p.spawn( fs_util.rsync, client4, "%s%s/*" % (client_info["mounting_dir"], target_dir), source_dir, ) for op in p: return_counts4, rc = op rc = (list(return_counts1.values()) + list(return_counts2.values()) + list(return_counts3.values()) + list(return_counts4.values())) rc_set = set(rc) if len(rc_set) == 1: print("Test case CEPH-%s passed" % (tc)) else: print(("Test case CEPH-%s failed" % (tc))) log.info("Test completed for CEPH-%s" % (tc)) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") else: return 1 print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start mins, secs = divmod(total_time, 60) hours, mins = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb": fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") return 1 except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1
def run(ceph_cluster, **kw): try: start = timeit.default_timer() tc = "11221" log.info("Running cephfs %s test case" % tc) fs_util = FsUtils(ceph_cluster) config = kw.get("config") build = config.get("build", config.get("rhbuild")) client_info, rc = fs_util.get_clients(build) if rc == 0: log.info("Got client info") else: raise CommandFailed("fetching client info failed") c1 = 1 client1 = [] client2 = [] client3 = [] client1.append(client_info["fuse_clients"][0]) client2.append(client_info["fuse_clients"][1]) client3.append(client_info["kernel_clients"][0]) rc1 = fs_util.auth_list(client1) rc2 = fs_util.auth_list(client2) rc3 = fs_util.auth_list(client3) print(rc1, rc2, rc3) if rc1 == 0 and rc2 == 0 and rc3 == 0: log.info("got auth keys") else: raise CommandFailed("auth list failed") rc1 = fs_util.fuse_mount(client1, client_info["mounting_dir"]) rc2 = fs_util.fuse_mount(client2, client_info["mounting_dir"]) if rc1 == 0 and rc2 == 0: log.info("Fuse mount passed") else: raise CommandFailed("Fuse mount failed") rc3 = fs_util.kernel_mount(client3, client_info["mounting_dir"], client_info["mon_node_ip"]) if rc3 == 0: log.info("kernel mount passed") else: raise CommandFailed("kernel mount failed") while c1: with parallel() as p: p.spawn( fs_util.read_write_IO, client1, client_info["mounting_dir"], "g", "write", ) p.spawn( fs_util.read_write_IO, client2, client_info["mounting_dir"], "g", "read", ) p.spawn( fs_util.stress_io, client3, client_info["mounting_dir"], "", 0, 2, iotype="crefi", ) p.spawn( fs_util.stress_io, client2, client_info["mounting_dir"], "", 0, 2, iotype="crefi", ) p.spawn(fs_util.read_write_IO, client3, client_info["mounting_dir"]) for op in p: (return_counts, rc) = op c1 = ceph_df(ceph_cluster) check_health(ceph_cluster) log.info("Test completed for CEPH-%s" % tc) print("Results:") result = fs_util.rc_verify(tc, return_counts) print(result) print("Script execution time:------") stop = timeit.default_timer() total_time = stop - start (mins, secs) = divmod(total_time, 60) (hours, mins) = divmod(mins, 60) print("Hours:%d Minutes:%d Seconds:%f" % (hours, mins, secs)) return 0 except CommandFailed as e: log.info(e) log.info(traceback.format_exc()) log.info("Cleaning up!-----") if client3[0].pkg_type != "deb": rc = fs_util.client_clean_up( client_info["fuse_clients"], client_info["kernel_clients"], client_info["mounting_dir"], "umount", ) else: rc = fs_util.client_clean_up(client_info["fuse_clients"], "", client_info["mounting_dir"], "umount") if rc == 0: log.info("Cleaning up successfull") except Exception as e: log.info(e) log.info(traceback.format_exc()) return 1