Пример #1
0
def assemble_fio_test_results(results_dir, account, publicport,
                              cloudspace_publicip, machine_id):
    print('Collecting results from machine: {}'.format(machine_id))
    templ = 'sshpass -p{} scp -r -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null '
    templ += '-P {}  {}@{}:machine{}_iter{}_{}_results {}/'
    cmd = templ.format(account['password'], publicport, account['login'],
                       cloudspace_publicip, machine_id, 1, options.write_type,
                       results_dir)
    run_cmd_via_gevent(cmd)
Пример #2
0
def unixbench_test(options, count, machine_id, publicip, publicport, account,
                   cpu_cores):
    gevent.sleep(options.time_interval * count)
    print(
        'unixbench testing has been started on machine: {}'.format(machine_id))
    templ = 'sshpass -p "{}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {} {}@{} '
    templ += ' "cd /home/cloudscalers/UnixBench; echo {} | sudo -S ./Run -c {} -i 1"'
    cmd = templ.format(account['password'], publicport, account['login'],
                       publicip, account['password'], cpu_cores)
    results = list()
    start = time.time()
    while start + options.test_runtime > time.time():
        output = run_cmd_via_gevent(cmd)
        match = None
        for line in output.splitlines():
            m = matcher.match(line)
            if m:
                match = m
                break
        if match:
            result = float(match.group(1))
            results.append((time.time(), result))
            print("Machine {} reports score of {}".format(machine_id, result))
        else:
            logger.error(
                "Unixbench did not return result:\n\n{}".format(output))

    return machine_id, sum((r[1] for r in results)) / len(results), results
Пример #3
0
    def install_req(self, machine, cloudspace):
        account = machine['accounts'][0]

        # Wait until vm accepts connections
        wait_until_remote_is_listening(cloudspace['publicipaddress'],
                                       machine['public_port'])

        # Copy install_deps.sh to vm
        templ = 'sshpass -p "{0}" scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
        templ += ' -P {1} install_deps.sh {2}@{3}:/home/{2}'
        cmd = templ.format(account['password'], machine['public_port'],
                           account['login'], cloudspace['externalnetworkip'])
        run_cmd_via_gevent(cmd)

        # Run bash script on vm
        templ = 'sshpass -p "{0}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {1} {2}@{3} '
        templ += '\'echo "{0}" | sudo -S bash /home/{2}/install_deps.sh\''
        cmd = templ.format(account['password'], machine['public_port'],
                           account['login'], cloudspace['externalnetworkip'])
        run_cmd_via_gevent(cmd)
Пример #4
0
def fio_test(options, machine_id, publicip, publicport, account):
    machines_running.add(machine_id)
    # only one data disk for this test
    disks_num = 1
    templ = 'sshpass -p "{}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {} {}@{} '
    templ += ' python Machine_script.py {} {} {} {} {} {} {} {} {} {} {} {} {} {}'
    cmd = templ.format(account['password'], publicport, account['login'],
                       publicip, options.testrun_time, machine_id,
                       account['password'], 1, disks_num, options.data_size,
                       options.write_type, options.block_size, options.iodepth,
                       options.direct_io, options.rwmixwrite,
                       options.rate_iops, options.numjobs, options.type)
    print('FIO testing has been started on machine: {}'.format(machine_id))
    run_cmd_via_gevent(cmd)
    machines_complete.add(machine_id)
    running = machines_running.difference(machines_complete)
    complete = (len(machines_running) -
                len(running)) / len(machines_running) * 100.0
    print('Testing completed for {:.2f}%'.format(complete))
    if complete >= 90.0:
        print('Waiting for machines {} to complete their test ...'.format(
            ' '.join(str(x) for x in running)))

    return account, publicport, publicip, machine_id
Пример #5
0
def pgbench(options, machine_id, publicip, publicport, account):
    machines_running.add(machine_id)
    # only one data disk for this test
    templ = 'sshpass -p "{}" ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p {} {}@{} '
    templ += ' bash run_pgbench.sh {} {} {} {} {} {}'
    cmd = templ.format(account['password'], publicport, account['login'], publicip,
                       account['password'], 'vdb', options.scalefactor, options.testrun_time,
                       options.threadcount, options.clientcount)
    print('Postgress benchmarking testing has been started on machine: {}'.format(machine_id))
    iops = int(run_cmd_via_gevent(cmd).splitlines()[-1])
    machines_complete.add(machine_id)
    running = machines_running.difference(machines_complete)
    complete = (len(machines_running) - len(running)) / len(machines_running) * 100.0
    print('Machine {} reports {} iops. Testing completed for {:.2f}%'.format(machine_id, iops, complete))

    return machine_id, iops
Пример #6
0
def main(options):
    from JumpScale import j

    # Check dependencies
    if not os.path.exists(options.results_dir):
        print(
            "Not all dependencies are met. Make sure the result directory exists."
        )
        return

    if not check_package('sshpass') or not check_package(
            'python3-prettytable'):
        return

    # Prepare test run
    hostname = run_cmd_via_gevent('hostname').replace("\n", "")
    test_num = len(os.listdir('{}'.format(options.results_dir))) + 1
    test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d')
    test_dir += "_" + hostname + "_testresults_{}".format(test_num)
    results_dir = options.results_dir + test_dir
    run_cmd_via_gevent('mkdir -p {}'.format(results_dir))

    # list virtual and deployed cloudspaces
    vms = []
    vms_index = set()
    ovc = j.clients.openvcloud.get(options.environment, options.username,
                                   options.password)
    cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list()
    for cs in cloudspaces_per_user:
        portforwards = ovc.api.cloudapi.portforwarding.list(
            cloudspaceId=cs['id'])
        for pi in portforwards:
            if 'machineId' in pi and not pi['machineId'] in vms_index:
                vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']])
                vms_index.add(pi['machineId'])

    if len(vms) < options.required_vms:
        print("Not enough vms available to run this test.")
        return
    vms = vms[:options.required_vms]

    # getting bootdisk size, cpu and memory used during vms creatian (for any vm)
    machine = safe_get_vm(ovc, concurrency, pi['machineId'])
    bootdisk = machine['disks'][0]['sizeMax']
    size_id = machine['sizeid']
    sizes = ovc.api.cloudapi.sizes.list(cloudspaceId=cs['id'])
    memory = next((i for i in sizes if i['id'] == size_id), False)['memory']
    cpu = next((i for i in sizes if i['id'] == size_id), False)['vcpus']

    # prepare unixbench tests
    prepare_jobs = [
        gevent.spawn(prepare_unixbench_test, options, ovc, cpu, *vm)
        for vm in vms
    ]
    gevent.joinall(prepare_jobs)

    # run unixbench tests
    run_jobs = [
        gevent.spawn(unixbench_test, options, c, *job.value) for job, c in zip(
            *[prepare_jobs, range(len(prepare_jobs))]) if job.value is not None
    ]
    gevent.joinall(run_jobs)

    raw_results = [job.value for job in run_jobs if job.value]
    raw_results.sort(key=lambda x: x[1])
    results = list()
    index = 0
    for s in raw_results:
        index += 1
        results.append([
            index, '{} (id={})'.format(machines.get(s[0], s[0]), s[0]), cpu,
            memory, bootdisk, s[1]
        ])
    titles = [
        'Index', 'VM', 'CPU\'s', 'Memory(MB)', 'HDD(GB)',
        'Avg. Unixbench Score'
    ]
    collect_results(titles, results, results_dir, 'average-results')
    titles = ['VM', 'Timestamp (epoch)', 'Score']
    results = list()
    for result in raw_results:
        machine_id, avg_score, scores = result
        for timestamp, score in scores:
            results.append([
                '{} (id={})'.format(machines.get(machine_id, machine_id),
                                    machine_id), timestamp, score
            ])
    collect_results(titles, results, results_dir, 'all-results')

    # report results
    with open(os.path.join(results_dir, 'parameters.md'), 'w') as params:
        params.write("# Parameters\n\n")
        for key, value in vars(options).items():
            params.write("- **{}**: {}\n".format(key, value))

    # pushing results to env_repo
    location = options.environment.split('.')[0]
    push_results_to_repo(results_dir, location)
Пример #7
0
def main(options):
    # Check dependencies
    if not os.path.exists(options.results_dir):
        print(
            "Not all dependencies are met. Make sure the result directory exists."
        )
        return

    if not check_package('sshpass') or not check_package(
            'python3-prettytable'):
        return

    # Prepare test run
    hostname = run_cmd_via_gevent('hostname').replace("\n", "")
    test_num = len(os.listdir('{}'.format(options.results_dir))) + 1
    test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d')
    test_dir += "_" + hostname + "_fio_testresults_{}".format(test_num)
    results_dir = options.results_dir + test_dir
    run_cmd_via_gevent('mkdir -p {}'.format(results_dir))

    # list virtual and deployed cloudspaces
    vms = []
    vms_index = set()
    j.clients.itsyouonline.get(data={
        'application_id_': options.application_id,
        'secret_': options.secret
    })
    ovc = j.clients.openvcloud.get(data={
        'address': options.environment,
        'account': options.username
    })
    cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list()
    cloudspaces_ids = options.cloudspaces
    if cloudspaces_ids:
        cloudspaces_ids = [int(_id) for _id in cloudspaces_ids.split(',')]
        cloudspaces_per_user = [
            cs for cs in cloudspaces_per_user if cs['id'] in cloudspaces_ids
        ]

    for cs in cloudspaces_per_user:
        if cs['name'] == 'template_space':
            continue
        portforwards = ovc.api.cloudapi.portforwarding.list(
            cloudspaceId=cs['id'])
        for pi in portforwards:
            if 'machineId' not in pi or pi['machineId'] in vms_index:
                continue
            vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']])
            vms_index.add(pi['machineId'])

    if len(vms) < options.required_vms:
        print("Not enough vms available to run this test. {} < {}".format(
            len(vms), options.required_vms))
        return
    vms = vms[:options.required_vms]

    # prepare test
    files = [
        '{}/1_fio_vms/Machine_script.py'.format(options.testsuite),
        '{}/1_fio_vms/mount_disks.sh'.format(options.testsuite)
    ]
    pjobs = [
        gevent.spawn(prepare_test, ovc, concurrency, options, files, *vm)
        for vm in vms
    ]
    gevent.joinall(pjobs)

    # mount disks if the filesystem will be used
    if options.type == "filesytem":
        gevent.joinall(
            [gevent.spawn(mount_disks, ovc, options, *vm) for vm in vms])

    # run fio tests
    rjobs = [
        gevent.spawn(fio_test, options, *job.value) for job in pjobs
        if job.value is not None
    ]
    gevent.joinall(rjobs)

    # collect results from machines
    rjobs = [
        gevent.spawn(assemble_fio_test_results, results_dir, *job.value)
        for job in rjobs if job.value is not None
    ]
    gevent.joinall(rjobs)

    # collecting results in csv file
    with open(os.path.join(results_dir, 'parameters.md'), 'w') as params:
        params.write("# Parameters\n\n")
        for key, value in vars(options).items():
            params.write("- **{}**: {}\n".format(key, value))
    cwd = os.getcwd()
    j.sal.fs.copyFile(
        '{}/1_fio_vms/collect_results.py'.format(options.testsuite),
        results_dir)
    os.chdir(results_dir)
    j.sal.process.execute(
        'python3 collect_results.py -dir {} -env {} -u {} -appid {} -secret {}'
        .format(results_dir, options.environment, options.username,
                options.application_id, options.secret))

    # pushing results to env_repo
    os.chdir(cwd)
    location = options.environment.split('.')[0]
    push_results_to_repo(results_dir, location)
Пример #8
0
def main(options):
    from js9 import j

    # Check dependencies
    if not os.path.exists(options.results_dir):
        print(
            "Not all dependencies are met. Make sure the result directory exists."
        )
        return

    if not check_package('sshpass') or not check_package(
            'python3-prettytable'):
        return

    # Prepare test run
    hostname = run_cmd_via_gevent('hostname').replace("\n", "")
    test_num = len(os.listdir('{}'.format(options.results_dir))) + 1
    test_dir = "/" + datetime.datetime.today().strftime('%Y-%m-%d')
    test_dir += "_" + hostname + "_pgbench_testresults_{}".format(test_num)
    results_dir = options.results_dir + test_dir
    run_cmd_via_gevent('mkdir -p {}'.format(results_dir))

    # list virtual and deployed cloudspaces
    vms = []
    vms_index = set()
    j.clients.itsyouonline.get(data={
        'application_id_': options.application_id,
        'secret_': options.secret
    })
    ovc = j.clients.openvcloud.get(data={
        'address': options.environment,
        'account': options.username
    })
    cloudspaces_per_user = ovc.api.cloudapi.cloudspaces.list()
    for cs in cloudspaces_per_user:
        if cs['name'] == 'template_space':
            continue
        portforwards = ovc.api.cloudapi.portforwarding.list(
            cloudspaceId=cs['id'])
        for pi in portforwards:
            if 'machineId' not in pi or pi['machineId'] in vms_index:
                continue
            vms.append([pi['machineId'], pi['publicIp'], pi['publicPort']])
            vms_index.add(pi['machineId'])

    if len(vms) < options.required_vms:
        print("Not enough vms available to run this test. {} < {}".format(
            len(vms), options.required_vms))
        return
    vms = vms[:options.required_vms]

    # prepare test
    files = ['{}/4_pgbench/run_pgbench.sh'.format(options.testsuite)]
    pjobs = [
        gevent.spawn(prepare_test, ovc, concurrency, options, files, *vm)
        for vm in vms
    ]
    gevent.joinall(pjobs)

    # run pgbench tests
    rjobs = [
        gevent.spawn(pgbench, options, *job.value) for job in pjobs
        if job.value is not None
    ]
    gevent.joinall(rjobs)

    # report results
    with open(os.path.join(results_dir, 'parameters.md'), 'w') as params:
        params.write("# Parameters\n\n")
        for key, value in vars(options).items():
            params.write("- **{}**: {}\n".format(key, value))
    total_iops = 0
    with open(os.path.join(results_dir, 'results.csv'), 'w') as csvfile:
        fieldnames = ['machine_id', 'iops']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        for job in rjobs:
            machine_id, iops = job.value
            if iops == 0:
                continue  # skip machines whith errors
            writer.writerow({'machine_id': machine_id, 'iops': iops})
            total_iops += iops
        writer.writerow({'machine_id': 'total iops', 'iops': total_iops})
    print("==========================")
    print("Total iops: {}".format(total_iops))
    print("==========================")

    # pushing results to env_repo
    location = options.environment.split('.')[0]
    push_results_to_repo(results_dir, location)