def main(options): # Check dependencies if not os.path.exists(options.results_dir): print( "Not all dependencies are met. Make sure the result directory exists." ) return if not check_package('sshpass') or not check_package( 'python3-prettytable'): return # Prepare test run hostname = run_cmd_via_gevent('hostname').replace("\n", "") test_num = len(os.listdir('{}'.format(options.results_dir))) + 1 test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d') test_dir += "_" + hostname + "_fio_testresults_{}".format(test_num) results_dir = options.results_dir + test_dir run_cmd_via_gevent('mkdir -p {}'.format(results_dir)) # list virtual and deployed cloudspaces vms = [] vms_index = set() j.clients.itsyouonline.get(data={ 'application_id_': options.application_id, 'secret_': options.secret }) ovc = j.clients.openvcloud.get(data={ 'address': options.environment, 'account': options.username }) cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list() cloudspaces_ids = options.cloudspaces if cloudspaces_ids: cloudspaces_ids = [int(_id) for _id in cloudspaces_ids.split(',')] cloudspaces_per_user = [ cs for cs in cloudspaces_per_user if cs['id'] in cloudspaces_ids ] for cs in cloudspaces_per_user: if cs['name'] == 'template_space': continue portforwards = ovc.api.cloudapi.portforwarding.list( cloudspaceId=cs['id']) for pi in portforwards: if 'machineId' not in pi or pi['machineId'] in vms_index: continue vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']]) vms_index.add(pi['machineId']) if len(vms) < options.required_vms: print("Not enough vms available to run this test. {} < {}".format( len(vms), options.required_vms)) return vms = vms[:options.required_vms] # prepare test files = [ '{}/1_fio_vms/Machine_script.py'.format(options.testsuite), '{}/1_fio_vms/mount_disks.sh'.format(options.testsuite) ] pjobs = [ gevent.spawn(prepare_test, ovc, concurrency, options, files, *vm) for vm in vms ] gevent.joinall(pjobs) # mount disks if the filesystem will be used if options.type == "filesytem": gevent.joinall( [gevent.spawn(mount_disks, ovc, options, *vm) for vm in vms]) # run fio tests rjobs = [ gevent.spawn(fio_test, options, *job.value) for job in pjobs if job.value is not None ] gevent.joinall(rjobs) # collect results from machines rjobs = [ gevent.spawn(assemble_fio_test_results, results_dir, *job.value) for job in rjobs if job.value is not None ] gevent.joinall(rjobs) # collecting results in csv file with open(os.path.join(results_dir, 'parameters.md'), 'w') as params: params.write("# Parameters\n\n") for key, value in vars(options).items(): params.write("- **{}**: {}\n".format(key, value)) cwd = os.getcwd() j.sal.fs.copyFile( '{}/1_fio_vms/collect_results.py'.format(options.testsuite), results_dir) os.chdir(results_dir) j.sal.process.execute( 'python3 collect_results.py -dir {} -env {} -u {} -appid {} -secret {}' .format(results_dir, options.environment, options.username, options.application_id, options.secret)) # pushing results to env_repo os.chdir(cwd) location = options.environment.split('.')[0] push_results_to_repo(results_dir, location)
def main(options): from JumpScale import j # Check dependencies if not os.path.exists(options.results_dir): print( "Not all dependencies are met. Make sure the result directory exists." ) return if not check_package('sshpass') or not check_package( 'python3-prettytable'): return # Prepare test run hostname = run_cmd_via_gevent('hostname').replace("\n", "") test_num = len(os.listdir('{}'.format(options.results_dir))) + 1 test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d') test_dir += "_" + hostname + "_testresults_{}".format(test_num) results_dir = options.results_dir + test_dir run_cmd_via_gevent('mkdir -p {}'.format(results_dir)) # list virtual and deployed cloudspaces vms = [] vms_index = set() ovc = j.clients.openvcloud.get(options.environment, options.username, options.password) cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list() for cs in cloudspaces_per_user: portforwards = ovc.api.cloudapi.portforwarding.list( cloudspaceId=cs['id']) for pi in portforwards: if 'machineId' in pi and not pi['machineId'] in vms_index: vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']]) vms_index.add(pi['machineId']) if len(vms) < options.required_vms: print("Not enough vms available to run this test.") return vms = vms[:options.required_vms] # getting bootdisk size, cpu and memory used during vms creatian (for any vm) machine = safe_get_vm(ovc, concurrency, pi['machineId']) bootdisk = machine['disks'][0]['sizeMax'] size_id = machine['sizeid'] sizes = ovc.api.cloudapi.sizes.list(cloudspaceId=cs['id']) memory = next((i for i in sizes if i['id'] == size_id), False)['memory'] cpu = next((i for i in sizes if i['id'] == size_id), False)['vcpus'] # prepare unixbench tests prepare_jobs = [ gevent.spawn(prepare_unixbench_test, options, ovc, cpu, *vm) for vm in vms ] gevent.joinall(prepare_jobs) # run unixbench tests run_jobs = [ gevent.spawn(unixbench_test, options, c, *job.value) for job, c in zip( *[prepare_jobs, range(len(prepare_jobs))]) if job.value is not None ] gevent.joinall(run_jobs) raw_results = [job.value for job in run_jobs if job.value] raw_results.sort(key=lambda x: x[1]) results = list() index = 0 for s in raw_results: index += 1 results.append([ index, '{} (id={})'.format(machines.get(s[0], s[0]), s[0]), cpu, memory, bootdisk, s[1] ]) titles = [ 'Index', 'VM', 'CPU\'s', 'Memory(MB)', 'HDD(GB)', 'Avg. Unixbench Score' ] collect_results(titles, results, results_dir, 'average-results') titles = ['VM', 'Timestamp (epoch)', 'Score'] results = list() for result in raw_results: machine_id, avg_score, scores = result for timestamp, score in scores: results.append([ '{} (id={})'.format(machines.get(machine_id, machine_id), machine_id), timestamp, score ]) collect_results(titles, results, results_dir, 'all-results') # report results with open(os.path.join(results_dir, 'parameters.md'), 'w') as params: params.write("# Parameters\n\n") for key, value in vars(options).items(): params.write("- **{}**: {}\n".format(key, value)) # pushing results to env_repo location = options.environment.split('.')[0] push_results_to_repo(results_dir, location)
def main(options): from js9 import j # Check dependencies if not os.path.exists(options.results_dir): print( "Not all dependencies are met. Make sure the result directory exists." ) return if not check_package('sshpass') or not check_package( 'python3-prettytable'): return # Prepare test run hostname = run_cmd_via_gevent('hostname').replace("\n", "") test_num = len(os.listdir('{}'.format(options.results_dir))) + 1 test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d') test_dir += "_" + hostname + "_pgbench_testresults_{}".format(test_num) results_dir = options.results_dir + test_dir run_cmd_via_gevent('mkdir -p {}'.format(results_dir)) # list virtual and deployed cloudspaces vms = [] vms_index = set() j.clients.itsyouonline.get(data={ 'application_id_': options.application_id, 'secret_': options.secret }) ovc = j.clients.openvcloud.get(data={ 'address': options.environment, 'account': options.username }) cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list() for cs in cloudspaces_per_user: if cs['name'] == 'template_space': continue portforwards = ovc.api.cloudapi.portforwarding.list( cloudspaceId=cs['id']) for pi in portforwards: if 'machineId' not in pi or pi['machineId'] in vms_index: continue vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']]) vms_index.add(pi['machineId']) if len(vms) < options.required_vms: print("Not enough vms available to run this test. {} < {}".format( len(vms), options.required_vms)) return vms = vms[:options.required_vms] # prepare test files = ['{}/4_pgbench/run_pgbench.sh'.format(options.testsuite)] pjobs = [ gevent.spawn(prepare_test, ovc, concurrency, options, files, *vm) for vm in vms ] gevent.joinall(pjobs) # run pgbench tests rjobs = [ gevent.spawn(pgbench, options, *job.value) for job in pjobs if job.value is not None ] gevent.joinall(rjobs) # report results with open(os.path.join(results_dir, 'parameters.md'), 'w') as params: params.write("# Parameters\n\n") for key, value in vars(options).items(): params.write("- **{}**: {}\n".format(key, value)) total_iops = 0 with open(os.path.join(results_dir, 'results.csv'), 'w') as csvfile: fieldnames = ['machine_id', 'iops'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for job in rjobs: machine_id, iops = job.value if iops == 0: continue # skip machines whith errors writer.writerow({'machine_id': machine_id, 'iops': iops}) total_iops += iops writer.writerow({'machine_id': 'total iops', 'iops': total_iops}) print("==========================") print("Total iops: {}".format(total_iops)) print("==========================") # pushing results to env_repo location = options.environment.split('.')[0] push_results_to_repo(results_dir, location)