예제 #1
0
def compare_test_run(test, idx, iss, output_dir, report):
    '''Compare results for a single run of a single test

    Here, test is a dictionary describing the test (read from the testlist YAML
    file). idx is the iteration index. iss is the chosen instruction set
    simulator (currently supported: spike and ovpsim). output_dir is the base
    output directory (which should contain logs from both the ISS and the test
    run itself). report is the path to the regression report file we're
    writing.

    Returns True if the test run passed and False otherwise.

    '''
    # TODO [Haroon]: Support directed asm and directed_c tests post-sim comparison
    test_name = test['test']
    elf = os.path.join(output_dir,
                       'instr_gen/asm_tests/{}.{}.o'.format(test_name, idx))

    logging.info("Comparing %s/DUT sim result : %s" % (iss, elf))

    with open(report, 'a') as report_fd:
        report_fd.write('Test binary: {}\n'.format(elf))

    rtl_dir = os.path.join(output_dir, 'rtl_sim',
                           '{}.{}'.format(test_name, idx))

    nb_log = os.path.join(rtl_dir, 'trace_core_nb_load.log')
    rtl_log = os.path.join(rtl_dir, 'trace_core_00000000.log')
    rtl_log_f = os.path.join(rtl_dir, 'trace_core.log')
    rtl_csv = os.path.join(rtl_dir, 'trace_core_00000000.csv')
    uvm_log = os.path.join(rtl_dir, 'sim.log')

    if not os.path.exists(rtl_log):
        with open(report, 'a') as report_fd:
            report_fd.write('{} does not exist\n'.format(rtl_log))
        return False

    try:
        # Fix RTL log file with non blocking load values
        nb_post_fix(rtl_log_f, rtl_log, nb_log)
    except RuntimeError as e:
        with open(report, 'a') as report_fd:
            report_fd.write('Post-fixing of Log failed: {}\n'.format(e))

        return False

    try:
        # Convert the RTL log file to a trace CSV.
        process_core_sim_log(rtl_log_f, rtl_csv, 1)
    except RuntimeError as e:
        with open(report, 'a') as report_fd:
            report_fd.write('Log processing failed: {}\n'.format(e))

        return False

    if not os.path.exists(rtl_csv):
        with open(report, 'a') as report_fd:
            report_fd.write('{} does not exist\n'.format(rtl_csv))
        return False

    # Have a look at the UVM log. We should write out a message on failure or
    # if we are stopping at this point.
    no_post_compare = test.get('no_post_compare')
    # if not check_core_uvm_log(uvm_log, "core", test_name, report,
    #                          write=(True if no_post_compare else 'onfail')):
    #    return False

    if no_post_compare:
        return True

    # There were no UVM errors. Process the log file from the ISS.
    iss_dir = os.path.join(output_dir, 'instr_gen', '{}_sim'.format(iss))

    iss_log = os.path.join(iss_dir, '{}.{}.log'.format(test_name, idx))
    iss_csv = os.path.join(iss_dir, '{}.{}.csv'.format(test_name, idx))

    if not os.path.exists(iss_log):
        with open(report, 'a') as report_fd:
            report_fd.write('{} does not exist\n'.format(iss_log))
        return False

    if iss == "spike":
        process_spike_sim_log(iss_log, iss_csv)
    else:
        assert iss == 'ovpsim'  # (should be checked by argparse)
        process_ovpsim_sim_log(iss_log, iss_csv)

    if not os.path.exists(iss_csv):
        with open(report, 'a') as report_fd:
            report_fd.write('{} does not exist\n'.format(iss_csv))
        return False

    compare_result = \
        compare_trace_csv(iss_csv, rtl_csv, iss, "core", report,
                          **test.get('compare_opts', {}))

    # Rather oddly, compare_result is a string. The comparison passed if it
    # starts with '[PASSED]'.
    return compare_result.startswith('[PASSED]')
예제 #2
0
파일: sim.py 프로젝트: danghai/ibex
def compare_test_run(test, idx, iss, output_dir, report):
    '''Compare results for a single run of a single test

    Here, test is a dictionary describing the test (read from the testlist YAML
    file). idx is the iteration index. iss is the chosen instruction set
    simulator (currently supported: spike and ovpsim). output_dir is the base
    output directory (which should contain logs from both the ISS and the test
    run itself). report is the path to the regression report file we're
    writing.

    Returns True if the test run passed and False otherwise.

    '''
    test_name = test['test']
    elf = os.path.join(output_dir,
                       'instr_gen/asm_tests/{}.{}.o'.format(test_name, idx))

    logging.info("Comparing %s/DUT sim result : %s" % (iss, elf))

    with open(report, 'a') as report_fd:
        report_fd.write('Test binary: {}\n'.format(elf))

    rtl_dir = os.path.join(output_dir, 'rtl_sim',
                           '{}.{}'.format(test_name, idx))

    rtl_log = os.path.join(rtl_dir, 'trace_core_00000000.log')
    rtl_csv = os.path.join(rtl_dir, 'trace_core_00000000.csv')
    uvm_log = os.path.join(rtl_dir, 'sim.log')

    # Convert the RTL log file to a trace CSV. On failure, this will raise an
    # exception, so we can assume this passed.
    process_ibex_sim_log(rtl_log, rtl_csv, 1)

    # Have a look at the UVM log. We should write out a message on failure or
    # if we are stopping at this point.
    no_post_compare = test.get('no_post_compare')
    if not check_ibex_uvm_log(uvm_log,
                              "ibex",
                              test_name,
                              report,
                              write=(True if no_post_compare else 'onfail')):
        return False

    if no_post_compare:
        return True

    # There were no UVM errors. Process the log file from the ISS.
    iss_dir = os.path.join(output_dir, 'instr_gen', '{}_sim'.format(iss))

    iss_log = os.path.join(iss_dir, '{}.{}.log'.format(test_name, idx))
    iss_csv = os.path.join(iss_dir, '{}.{}.csv'.format(test_name, idx))

    if iss == "spike":
        process_spike_sim_log(iss_log, iss_csv)
    else:
        assert iss == 'ovpsim'  # (should be checked by argparse)
        process_ovpsim_sim_log(iss_log, iss_csv)

    compare_result = \
        compare_trace_csv(rtl_csv, iss_csv, "ibex", iss, report,
                          **test.get('compare_opts', {}))

    # Rather oddly, compare_result is a string. The comparison passed if it
    # starts with '[PASSED]'.
    return compare_result.startswith('[PASSED]')
예제 #3
0
파일: sim.py 프로젝트: sriyerg/ibex
def compare_test_run(test, idx, iss, output_dir, report):
    '''Compare results for a single run of a single test

    Here, test is a dictionary describing the test (read from the testlist YAML
    file). idx is the iteration index. iss is the chosen instruction set
    simulator (currently supported: spike and ovpsim). output_dir is the base
    output directory (which should contain logs from both the ISS and the test
    run itself). report is the path to the regression report file we're
    writing.

    Returns True if the test run passed and False otherwise.

    '''
    with open(report, 'a') as report_fd:
        test_name = test['test']
        elf = os.path.join(output_dir,
                           'instr_gen/asm_test/{}.{}.o'.format(test_name, idx))

        logging.info("Comparing %s/DUT sim result : %s" % (iss, elf))

        test_name_idx = '{}.{}'.format(test_name, idx)
        test_underline = '-' * len(test_name_idx)
        report_fd.write('\n{}\n{}\n'.format(test_name_idx, test_underline))
        report_fd.write('Test binary: {}\n'.format(elf))

        rtl_dir = os.path.join(output_dir, 'rtl_sim',
                               '{}.{}'.format(test_name, idx))

        uvm_log = os.path.join(rtl_dir, 'sim.log')

        # Have a look at the UVM log. Report a failure if an issue is seen in the
        # log.
        uvm_pass, uvm_log_lines = check_ibex_uvm_log(uvm_log)

        report_fd.write('sim log: {}\n'.format(uvm_log))

        if not uvm_pass:
            for line in uvm_log_lines:
                report_fd.write(line)
            report_fd.write('[FAILED]: sim error seen\n')

            return False

        rtl_log = os.path.join(rtl_dir, 'trace_core_00000000.log')
        rtl_csv = os.path.join(rtl_dir, 'trace_core_00000000.csv')

        try:
            # Convert the RTL log file to a trace CSV.
            process_ibex_sim_log(rtl_log, rtl_csv, 1)
        except (OSError, RuntimeError) as e:
            report_fd.write('[FAILED]: Log processing failed: {}\n'.format(e))

            return False

        no_post_compare = test.get('no_post_compare', False)
        assert isinstance(no_post_compare, bool)

        # no_post_compare skips the final ISS v RTL log check, so if we've reached
        # here we're done when no_post_compare is set.
        if no_post_compare:
            report_fd.write('[PASSED]\n')
            return True

        # There were no UVM errors. Process the log file from the ISS.
        iss_dir = os.path.join(output_dir, 'instr_gen', '{}_sim'.format(iss))

        iss_log = os.path.join(iss_dir, '{}.{}.log'.format(test_name, idx))
        iss_csv = os.path.join(iss_dir, '{}.{}.csv'.format(test_name, idx))

        try:
            if iss == "spike":
                process_spike_sim_log(iss_log, iss_csv)
            else:
                assert iss == 'ovpsim'  # (should be checked by argparse)
                process_ovpsim_sim_log(iss_log, iss_csv)
        except (OSError, RuntimeError) as e:
            report_fd.write('[FAILED]: Log processing failed: {}\n'.format(e))

            return False

    compare_result = \
        compare_trace_csv(rtl_csv, iss_csv, "ibex", iss, report,
                          **test.get('compare_opts', {}))

    # Rather oddly, compare_result is a string. The comparison passed if it
    # starts with '[PASSED]'.
    return compare_result.startswith('[PASSED]')
예제 #4
0
def compare_test_run(test: TestEntry,
                     idx: int,
                     seed: int,
                     rtl_log_dir: str,
                     iss: str,
                     iss_log_dir: str,
                     instr_gen_bin_dir: str) -> TestRunResult:
    '''Compare results for a single run of a single test

    Here, test is a dictionary describing the test (read from the testlist YAML
    file). idx is the iteration index and seed is the corresponding seed. iss
    is the chosen instruction set simulator (currently supported: spike and
    ovpsim).

    rtl_log_dir is the directory containing log output from the RTL simulation.
    iss_log_dir is the directory that contains logs for ISS runs.

    Returns a _CompareResult with a pass/fail flag, together with some
    information about the run (to be written to the log file).

    '''
    test_name = test['test']
    assert isinstance(test_name, str)
    uvm_log = os.path.join(rtl_log_dir, 'sim.log')
    elf = os.path.join(instr_gen_bin_dir, '{}_{}.o'.format(test_name, idx))

    rtl_trace = os.path.join(rtl_log_dir, 'trace_core_00000000.log')

    kv_data = {
        'name': test_name,
        'idx': idx,
        'seed': seed,
        'binary': elf,
        'uvm_log': uvm_log,
        'rtl_trace': rtl_trace,
        'rtl_trace_csv': None,
        'iss_trace': None,
        'iss_trace_csv': None,
        'comparison_log': None,
        'passed': False,
        'failure_message': None
    }

    # Have a look at the UVM log. Report a failure if an issue is seen in the
    # log.
    try:
        uvm_pass, uvm_log_lines = check_ibex_uvm_log(uvm_log)
    except IOError as e:
        kv_data['failure_message'] = str(e)
        kv_data['failure_message'] += \
            '\n[FAILED] Could not open simulation log'
        return TestRunResult(**kv_data)

    if not uvm_pass:
        kv_data['failure_message'] = '\n'.join(uvm_log_lines)
        kv_data['failure_message'] += '\n[FAILED]: sim error seen'
        return TestRunResult(**kv_data)

    rtl_trace_csv = os.path.join(rtl_log_dir, 'trace_core_00000000.csv')

    kv_data['rtl_trace_csv'] = rtl_trace_csv
    try:
        # Convert the RTL log file to a trace CSV.
        process_ibex_sim_log(rtl_trace, rtl_trace_csv, 1)
    except (OSError, RuntimeError) as e:
        kv_data['failure_message'] = \
            '[FAILED]: Log processing failed: {}'.format(e)
        return TestRunResult(**kv_data)

    no_post_compare = test.get('no_post_compare', False)
    assert isinstance(no_post_compare, bool)

    # no_post_compare skips the final ISS v RTL log check, so if we've reached
    # here we're done when no_post_compare is set.
    if no_post_compare:
        kv_data['passed'] = True
        return TestRunResult(**kv_data)

    # There were no UVM errors. Process the log file from the ISS.
    iss_log = os.path.join(iss_log_dir, '{}.{}.log'.format(test_name, idx))
    iss_csv = os.path.join(iss_log_dir, '{}.{}.csv'.format(test_name, idx))

    kv_data['iss_trace'] = iss_log
    kv_data['iss_trace_csv'] = iss_csv
    try:
        if iss == "spike":
            process_spike_sim_log(iss_log, iss_csv)
        else:
            assert iss == 'ovpsim'  # (should be checked by argparse)
            process_ovpsim_sim_log(iss_log, iss_csv)
    except (OSError, RuntimeError) as e:
        kv_data['failure_message'] = \
            '[FAILED]: Log processing failed: {}'.format(e)
        return TestRunResult(**kv_data)

    compare_log = os.path.join(rtl_log_dir, 'compare.log')
    kv_data['comparison_log'] = compare_log

    # Delete any existing file at compare_log (the compare_trace_csv function
    # would append to it, which is rather confusing).
    try:
        os.remove(compare_log)
    except FileNotFoundError:
        pass

    compare_result = \
        compare_trace_csv(rtl_trace_csv, iss_csv, "ibex", iss, compare_log,
                          **test.get('compare_opts', {}))

    try:
        compare_log_file = open(compare_log)
        compare_log_contents = compare_log_file.read()
        compare_log_file.close()
    except IOError as e:
        kv_data['failure_message'] = \
            '[FAILED]: Could not read compare log: {}'.format(e)
        return TestRunResult(**kv_data)

    # Rather oddly, compare_result is a string. The comparison passed if it
    # starts with '[PASSED]' and failed otherwise.
    compare_passed = compare_result.startswith('[PASSED]: ')
    if not compare_passed:
        assert compare_result.startswith('[FAILED]: ')
        kv_data['failure_message'] = ('RTL / ISS trace comparison failed\n' +
                                      compare_log_contents)
        return TestRunResult(**kv_data)

    kv_data['passed'] = True
    return TestRunResult(**kv_data)
예제 #5
0
def compare_test_run(test: TestEntry,
                     idx: int,
                     seed: int,
                     rtl_log_dir: str,
                     iss: str,
                     iss_log_dir: str) -> _CompareResult:
    '''Compare results for a single run of a single test

    Here, test is a dictionary describing the test (read from the testlist YAML
    file). idx is the iteration index and seed is the corresponding seed. iss
    is the chosen instruction set simulator (currently supported: spike and
    ovpsim).

    rtl_log_dir is the directory containing log output from the RTL simulation.
    iss_log_dir is the directory that contains logs for ISS runs.

    Returns a _CompareResult with a pass/fail flag, together with some
    information about the run (to be written to the log file).

    '''
    test_name = test['test']
    assert isinstance(test_name, str)
    uvm_log = os.path.join(rtl_log_dir, 'sim.log')

    kv_data = {
        'test name': test_name,
        'iteration': str(idx),
        'seed': str(seed),
        'UVM log': uvm_log
    }

    # Have a look at the UVM log. Report a failure if an issue is seen in the
    # log.
    uvm_pass, uvm_log_lines = check_ibex_uvm_log(uvm_log)
    if not uvm_pass:
        return (False, 'simulation error', kv_data)

    rtl_log = os.path.join(rtl_log_dir, 'trace_core_00000000.log')
    rtl_csv = os.path.join(rtl_log_dir, 'trace_core_00000000.csv')

    kv_data['rtl log'] = rtl_log
    kv_data['rtl csv'] = rtl_csv
    try:
        # Convert the RTL log file to a trace CSV.
        process_ibex_sim_log(rtl_log, rtl_csv, 1)
    except (OSError, RuntimeError) as e:
        return (False, f'RTL log processing failed ({e})', kv_data)

    no_post_compare = test.get('no_post_compare', False)
    assert isinstance(no_post_compare, bool)

    # no_post_compare skips the final ISS v RTL log check, so if we've reached
    # here we're done when no_post_compare is set.
    if no_post_compare:
        return (True, None, kv_data)

    # There were no UVM errors. Process the log file from the ISS.
    iss_log = os.path.join(iss_log_dir, '{}.{}.log'.format(test_name, idx))
    iss_csv = os.path.join(iss_log_dir, '{}.{}.csv'.format(test_name, idx))

    kv_data['ISS log'] = iss_log
    kv_data['ISS csv'] = iss_csv
    try:
        if iss == "spike":
            process_spike_sim_log(iss_log, iss_csv)
        else:
            assert iss == 'ovpsim'  # (should be checked by argparse)
            process_ovpsim_sim_log(iss_log, iss_csv)
    except (OSError, RuntimeError) as e:
        return (False, f'ISS log processing failed ({e})', kv_data)

    compare_log = os.path.join(rtl_log_dir, 'compare.log')
    kv_data['comparison log'] = compare_log

    # Delete any existing file at compare_log (the compare_trace_csv function
    # would append to it, which is rather confusing).
    try:
        os.remove(compare_log)
    except FileNotFoundError:
        pass

    compare_result = \
        compare_trace_csv(rtl_csv, iss_csv, "ibex", iss, compare_log,
                          **test.get('compare_opts', {}))

    # Rather oddly, compare_result is a string. The comparison passed if it
    # starts with '[PASSED]' and failed otherwise.
    compare_passed = compare_result.startswith('[PASSED]: ')
    if not compare_passed:
        assert compare_result.startswith('[FAILED]: ')
        # compare_result[10:] will look like "123 matched, 321 mismatch",
        # meaning that 123 instructions matched and 321 instructions didn't.
        kv_data['compared instructions'] = compare_result[10:]
        return (False, 'mismatch between ISS and RTL', kv_data)

    # compare_result[10:] will look like "123 matched", meaning that 123
    # instructions matched.
    kv_data['compared instructions'] = compare_result[10:]
    return (True, None, kv_data)