Exemple #1
0
def output_results(invoke_list, prm_host_set, prm_thread_count, pct_files_min):
    if len(invoke_list) < 1:
        raise SMFResultException('no pickled invokes read, so no results')
    my_host_invoke = invoke_list[0]  # pick a representative one
    total_files = 0
    total_records = 0
    max_elapsed_time = 0.0
    for invk in invoke_list:  # for each parallel SmallfileWorkload

        # add up work that it did
        # and determine time interval over which test ran

        assert isinstance(invk, smallfile.SmallfileWorkload)
        status = 'ok'
        if invk.status:
            status = 'ERR: ' + os.strerror(invk.status)
        fmt = 'host = %s,thr = %s,elapsed = %f'
        fmt += ',files = %d,records = %d,status = %s'
        print(fmt % (invk.onhost, invk.tid, invk.elapsed_time,
                     invk.filenum_final, invk.rq_final, status))
        total_files += invk.filenum_final
        total_records += invk.rq_final
        max_elapsed_time = max(max_elapsed_time, invk.elapsed_time)

    print('total threads = %d' % len(invoke_list))
    print('total files = %d' % total_files)
    rszkb = my_host_invoke.record_sz_kb
    if rszkb == 0:
        rszkb = my_host_invoke.total_sz_kb
    if rszkb * my_host_invoke.BYTES_PER_KB > my_host_invoke.biggest_buf_size:
        rszkb = my_host_invoke.biggest_buf_size / my_host_invoke.BYTES_PER_KB
    if total_records > 0:
        total_data_gb = total_records * rszkb * 1.0 / KB_PER_GB
        print('total data = %9.3f GB' % total_data_gb)
    if len(invoke_list) < len(prm_host_set) * prm_thread_count:
        print('WARNING: failed to get some responses from workload generators')
    max_files = my_host_invoke.iterations * len(invoke_list)
    pct_files = 100.0 * total_files / max_files
    print('%6.2f%% of requested files processed, minimum is %6.2f' %
          (pct_files, pct_files_min))
    if status != 'ok':
        raise SMFResultException(
            'at least one thread encountered error, test may be incomplete')
    if max_elapsed_time > 0.001:  # can't compute rates if it ended too quickly

        print('%f sec elapsed time' % max_elapsed_time)
        files_per_sec = total_files / max_elapsed_time
        print('%f files/sec' % files_per_sec)
        if total_records > 0:
            iops = total_records / max_elapsed_time
            print('%f IOPS' % iops)
            mb_per_sec = iops * rszkb / 1024.0
            print('%f MB/sec' % mb_per_sec)
    if status == 'ok' and pct_files < pct_files_min:
        raise SMFResultException(
            'not enough total files processed, change test parameters')
Exemple #2
0
def output_results(invoke_list, test_params):
    if len(invoke_list) < 1:
        raise SMFResultException('no pickled invokes read, so no results'
                                 )
    my_host_invoke = invoke_list[0]  # pick a representative one
    rszkb = my_host_invoke.record_sz_kb
    if rszkb == 0:
        rszkb = my_host_invoke.total_sz_kb
    if rszkb * my_host_invoke.BYTES_PER_KB > my_host_invoke.biggest_buf_size:
        rszkb = my_host_invoke.biggest_buf_size / my_host_invoke.BYTES_PER_KB

    rslt = {}
    rslt['in-host'] = {}
    cluster = result_stats()

    for invk in invoke_list:  # for each parallel SmallfileWorkload

        # add up work that it did
        # and determine time interval over which test ran

        assert isinstance(invk, smallfile.SmallfileWorkload)
        if invk.status:
            status = 'ERR: ' + os.strerror(invk.status)
        else:
            status = 'ok'
        fmt = 'host = %s,thr = %s,elapsed = %f'
        fmt += ',files = %d,records = %d,status = %s'
        print(fmt %
              (invk.onhost, invk.tid, invk.elapsed_time,
               invk.filenum_final, invk.rq_final, status))

        per_thread = result_stats()
        per_thread.get_from_invoke(invk, rszkb)

        # for JSON, show nesting of threads within hosts

        try:
            per_host_json = rslt['in-host'][invk.onhost]
        except KeyError:
            rslt['in-host'] = {}
            per_host_json = { 'in-thread':{} }
            rslt['in-host'][invk.onhost] = per_host_json
            per_host = result_stats()
            
        # update per-host stats in JSON

        per_host.add_to(per_thread)
        per_host.add_to_dict(per_host_json)

        # insert per-thread stats into JSON

        per_thread_json = {}
        per_host_json['in-thread'][invk.tid] = per_thread_json
        per_thread.add_to_dict(per_thread_json)
        
        # aggregate to get stats for entire cluster

        cluster.add_to(per_thread)
        cluster.add_to_dict(rslt)

    # if there is only 1 host in results, 
    # and no host was specified, 
    # then remove that level from
    # result hierarchy, not needed

    if len(rslt['in-host'].keys()) == 1 and test_params.host_set == None:
        hostkey = list(rslt['in-host'].keys())[0]
        threads_in_host = rslt['in-host'][hostkey]['in-thread']
        rslt['in-thread'] = threads_in_host
        del rslt['in-host']

    print('total threads = %d' % len(invoke_list))
    rslt['total-threads'] = len(invoke_list)

    print('total files = %d' % cluster.files)

    if cluster.records > 0:
        print('total IOPS = %d' % cluster.IOPS)
        total_data_gb = cluster.records * rszkb * 1.0 / KB_PER_GB
        print('total data = %9.3f GiB' % total_data_gb)
        rslt['total-data-GB'] = total_data_gb

    if not test_params.host_set:
        test_params.host_set = [ 'localhost' ]
    json_test_params = deepcopy(test_params)
    json_test_params.host_set = ','.join(test_params.host_set)

    if len(invoke_list) < len(test_params.host_set) * test_params.thread_count:
        print('WARNING: failed to get some responses from workload generators')
    max_files = my_host_invoke.iterations * len(invoke_list)
    pct_files = 100.0 * cluster.files / max_files
    print('%6.2f%% of requested files processed, warning threshold is %6.2f' %
          (pct_files, smallfile.pct_files_min))
    rslt['pct-files-done'] = pct_files

    print('elapsed time = %9.3f' % cluster.elapsed)
    rslt['start-time'] = test_params.test_start_time
    rslt['status'] = os.strerror(cluster.status)

    # output start time in elasticsearch-friendly format

    rslt['date'] = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(test_params.test_start_time))

    # don't output meaningless fields

    if cluster.elapsed < 0.001:  # can't compute rates if it ended too quickly
        print('WARNING: test must run longer than a millisecond')
    else:
        print('files/sec = %f' % cluster.files_per_sec)
        if cluster.records > 0:
            print('IOPS = %f' % cluster.IOPS)
            print('MiB/sec = %f' % cluster.MiBps)

    # if JSON output requested, generate it here

    if test_params.output_json:
        json_obj = json_test_params.to_json()
        json_obj['results'] = rslt
        with open(test_params.output_json, 'w') as jsonf:
            json.dump(json_obj, jsonf, indent=4)

    # finally, throw exceptions if something bad happened
    # wait until here to do it so we can see test results

    if cluster.status != OK:
        print('WARNING: at least one thread encountered error, test may be incomplete')
    elif pct_files < smallfile.pct_files_min:
        print('WARNING: not enough total files processed before 1st thread finished, change test parameters')
Exemple #3
0
def output_results(invoke_list, test_params):
    if len(invoke_list) < 1:
        raise SMFResultException('no pickled invokes read, so no results')
    my_host_invoke = invoke_list[0]  # pick a representative one
    total_files = 0
    total_records = 0
    max_elapsed_time = 0.0
    status = 'ok'
    rslt = {}
    rslt['hosts'] = {}

    for invk in invoke_list:  # for each parallel SmallfileWorkload

        # add up work that it did
        # and determine time interval over which test ran

        assert isinstance(invk, smallfile.SmallfileWorkload)
        if invk.status:
            status = 'ERR: ' + os.strerror(invk.status)
        fmt = 'host = %s,thr = %s,elapsed = %f'
        fmt += ',files = %d,records = %d,status = %s'
        print(fmt % (invk.onhost, invk.tid, invk.elapsed_time,
                     invk.filenum_final, invk.rq_final, status))
        per_thread_obj = {}
        per_thread_obj['elapsed'] = invk.elapsed_time,
        per_thread_obj['filenum-final'] = invk.filenum_final
        per_thread_obj['records'] = invk.rq_final
        per_thread_obj['status'] = status

        # for JSON, show nesting of threads within hosts

        try:
            per_host_results = rslt['hosts'][invk.onhost]
        except KeyError:
            per_host_results = {'threads': {}}
            rslt['hosts'][invk.onhost] = per_host_results
        per_host_results['threads'][invk.tid] = per_thread_obj

        # aggregate to get stats for whole run

        total_files += invk.filenum_final
        total_records += invk.rq_final
        max_elapsed_time = max(max_elapsed_time, invk.elapsed_time)

    print('total threads = %d' % len(invoke_list))
    rslt['total-threads'] = len(invoke_list)

    print('total files = %d' % total_files)
    rslt['total-files'] = total_files

    print('total IOPS = %d' % total_records)
    rslt['total-io-requests'] = total_records

    rszkb = my_host_invoke.record_sz_kb
    if rszkb == 0:
        rszkb = my_host_invoke.total_sz_kb
    if rszkb * my_host_invoke.BYTES_PER_KB > my_host_invoke.biggest_buf_size:
        rszkb = my_host_invoke.biggest_buf_size / my_host_invoke.BYTES_PER_KB
    if total_records > 0:
        total_data_gb = total_records * rszkb * 1.0 / KB_PER_GB
        print('total data = %9.3f GiB' % total_data_gb)
        rslt['total-data-GB'] = total_data_gb
    if not test_params.host_set:
        test_params.host_set = ['localhost']
    if len(invoke_list) < len(test_params.host_set) * test_params.thread_count:
        print('WARNING: failed to get some responses from workload generators')
    max_files = my_host_invoke.iterations * len(invoke_list)
    pct_files = 100.0 * total_files / max_files
    print('%6.2f%% of requested files processed, minimum is %6.2f' %
          (pct_files, smallfile.pct_files_min))
    rslt['pct-files-done'] = pct_files
    print('elapsed time = %9.3f' % max_elapsed_time)
    rslt['elapsed-time'] = max_elapsed_time
    if max_elapsed_time > 0.001:  # can't compute rates if it ended too quickly
        files_per_sec = total_files / max_elapsed_time
        print('files/sec = %f' % files_per_sec)
        rslt['files-per-sec'] = files_per_sec
        if total_records > 0:
            iops = total_records / max_elapsed_time
            print('IOPS = %f' % iops)
            rslt['total-IOPS'] = iops
            mb_per_sec = iops * rszkb / 1024.0
            print('MiB/sec = %f' % mb_per_sec)
            rslt['total-MiBps'] = mb_per_sec

    # if JSON output requested, generate it here

    if test_params.output_json:
        json_obj = test_params.to_json()
        json_obj['results'] = rslt
        with open(test_params.output_json, 'w') as jsonf:
            json.dump(json_obj, jsonf, indent=4)

    # finally, throw exceptions if something bad happened
    # wait until here to do it so we can see test results

    if status != 'ok':
        raise SMFResultException(
            'at least one thread encountered error, test may be incomplete')
    if status == 'ok' and pct_files < smallfile.pct_files_min:
        raise SMFResultException(
            'not enough total files processed before 1st thread finished, change test parameters'
        )