Beispiel #1
0
def test_report_invalid_file(invalid_filepath):
    # verify appropriate error handling for
    # provision of an invalid file path to
    # DarshanReport

    with pytest.raises(RuntimeError, match='Failed to open file'):
        darshan.DarshanReport(invalid_filepath)
Beispiel #2
0
def test_metadata():
    """Sample for an expected property in counters."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    # check a metadata field
    assert 4478544 == report.metadata['job']['jobid']
Beispiel #3
0
def test_load_records():
    """Test if loaded records match."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    report.mod_read_all_records("POSIX")

    assert 1 == len(report.data['records']['POSIX'])
Beispiel #4
0
def test_modules():
    """Sample for an expected number of modules."""

    report = darshan.DarshanReport("tests/input/sample.darshan")

    # check if number of modules matches
    assert 4 == len(report.modules)
    assert 154 == report.modules['MPI-IO']['len']
Beispiel #5
0
def test_unsupported_record_load(caplog, unsupported_record):
    # check for appropriate logger warning when attempting to
    # load unsupported record
    report = darshan.DarshanReport("tests/input/sample.darshan")
    report.mod_read_all_records(mod=unsupported_record)
    for record in caplog.records:
        assert 'Currently unsupported' in record.message
        assert unsupported_record in record.message
Beispiel #6
0
def test_internal_references():
    """
    Test if the reference ids match. This tests mainly serves to make
    regressions verbose when the behavior is changed.
    """

    report = darshan.DarshanReport()

    # check the convienience refs are working fine
    check = id(report.records) == id(report.data['records'])
    assert check is True
Beispiel #7
0
def main(args=None):

    if args is None:
        parser = argparse.ArgumentParser(description='')
        setup_parser(parser)
        args = parser.parse_args()

    if args.debug:
        print(args)

    report = darshan.DarshanReport(args.input,
                                   read_all=True)  # Default behavior
    print(report.to_json())
Beispiel #8
0
def main(args=None):
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('input',
                        help='darshan log file',
                        nargs='?',
                        default='ior_hdf5_example.darshan')
    parser.add_argument('--debug', help='', action='store_true')
    args = parser.parse_args()

    if args.debug:
        print(args)

    report = darshan.DarshanReport(args.input, read_all=True, dtype="numpy")
    print(report.to_json())
Beispiel #9
0
def main(args=None):

    if args is None:
        parser = argparse.ArgumentParser(description='')
        setup_parser(parser)
        args = parser.parse_args()


    if args.debug:
        print(args)

    report = darshan.DarshanReport(args.input, read_all=True)  # Default behavior
    
    for nrec, path in report.name_records.items():
        print("{:<20} => {}".format(nrec, path))
Beispiel #10
0
def test_deepcopy_fidelity_darshan_report(key, subkey):
    # regression guard for the __deepcopy__() method
    # of DarshanReport class
    # note that to_numpy() also performs a deepcopy
    report = darshan.DarshanReport("tests/input/sample.darshan")
    report_deepcopy = copy.deepcopy(report)
    # the deepcopied records should be identical
    # within floating point tolerance
    assert_allclose(report_deepcopy.data['records'][key].to_numpy()[0][subkey],
                    report.data['records'][key].to_numpy()[0][subkey])
    # a deepcopy should not share memory bounds
    # with the original object (or deepcopies thereof)
    assert not np.may_share_memory(
        report_deepcopy.data['records'][key].to_numpy()[0][subkey],
        report.data['records'][key].to_numpy()[0][subkey])
Beispiel #11
0
def test_info_contents(capsys):
    # regression guard for the output from the info()
    # method of DarshanReport
    report = darshan.DarshanReport("tests/input/sample.darshan")
    report.info()
    captured = capsys.readouterr()
    expected_keys = [
        'Times', 'Executable', 'Processes', 'JobID', 'UID', 'Modules in Log',
        'Loaded Records', 'Name Records', 'Darshan/Hints', 'DarshanReport'
    ]

    expected_values = ['2048', '4478544', '69615']
    expected_strings = expected_keys + expected_values

    for expected_string in expected_strings:
        assert expected_string in captured.out
Beispiel #12
0
def test_json_fidelity():
    # regression test for provision of appropriate
    # data by to_json() method of DarshanReport class
    report = darshan.DarshanReport("tests/input/sample.darshan")
    actual_json = report.to_json()

    for expected_key in [
            "version", "metadata", "job", "uid", "start_time", "end_time",
            "nprocs"
    ]:
        assert expected_key in actual_json

    for expected_value in [
            '69615', '1490000867', '1490000983', '2048', 'lustre', 'dvs',
            'rootfs'
    ]:
        assert expected_value in actual_json
Beispiel #13
0
#!/usr/bin/env python3

import darshan

r = darshan.DarshanReport("ior_hdf5_example.darshan")
r.info()
Beispiel #14
0
#!/usr/bin/env python3

import darshan
from darshan.experimental.plots.matplotlib import *

darshan.enable_experimental()

r = darshan.DarshanReport("ior_hdf5_example.darshan", dtype="numpy")
plot_opcounts(r).show()