Esempio n. 1
0
def run_example_and_compare(example_path):
    testlog.info("Example '%s'" % example_path)

    reference_path = reference_output_path(example_path)
    ref_str = ''
    try:
        with open(reference_path) as ref_f:
            ref_str = ref_f.read()
    except (IOError, OSError) as e:
        testlog.info('.......ERROR - reference output cannot be read! - %s' % e)
        return False

    rc, example_out = run_exe(example_path, ['--test',
                                             './examples/sample_exe64.elf'])
    if rc != 0:
        testlog.info('.......ERROR - example returned error code %s' % rc)
        return False

    # Comparison is done as lists of lines, to avoid EOL problems
    if example_out.split() == ref_str.split():
        return True
    else:
        testlog.info('.......FAIL comparison')
        dump_output_to_temp_files(testlog, example_out)
        return False
Esempio n. 2
0
def run_test_on_file(filename, verbose=False):
    """ Runs a test on the given input filename. Return True if all test
        runs succeeded.
    """
    success = True
    testlog.info("Test file '%s'" % filename)
    for option in [
            '-e', '-d', '-s', '-n', '-r', '-x.text', '-p.shstrtab', '-V',
            '--debug-dump=info', '--debug-dump=decodedline',
            '--debug-dump=frames', '--debug-dump=frames-interp']:
        if verbose: testlog.info("..option='%s'" % option)
        # stdouts will be a 2-element list: output of readelf and output
        # of scripts/readelf.py
        stdouts = []
        for exe_path in [READELF_PATH, 'scripts/readelf.py']:
            args = [option, filename]
            if verbose: testlog.info("....executing: '%s %s'" % (
                exe_path, ' '.join(args)))
            rc, stdout = run_exe(exe_path, args)
            if rc != 0:
                testlog.error("@@ aborting - '%s' returned '%s'" % (exe_path, rc))
                return False
            stdouts.append(stdout)
        if verbose: testlog.info('....comparing output...')
        rc, errmsg = compare_output(*stdouts)
        if rc:
            if verbose: testlog.info('.......................SUCCESS')
        else:
            success = False
            testlog.info('.......................FAIL')
            testlog.info('....for option "%s"' % option)
            testlog.info('....Output #1 is readelf, Output #2 is pyelftools')
            testlog.info('@@ ' + errmsg)
            dump_output_to_temp_files(testlog, *stdouts)
    return success
Esempio n. 3
0
def run_example_and_compare(example_path):
    testlog.info("Example '%s'" % example_path)

    reference_path = reference_output_path(example_path)
    ref_str = ''
    try:
        with open(reference_path) as ref_f:
            ref_str = ref_f.read()
    except (IOError, OSError) as e:
        testlog.info('.......ERROR - reference output cannot be read! - %s' % e)
        return False

    rc, example_out = run_exe(example_path, ['--test',
                                             './examples/sample_exe64.elf'])
    if rc != 0:
        testlog.info('.......ERROR - example returned error code %s' % rc)
        return False

    # Comparison is done as lists of lines, to avoid EOL problems
    if example_out.split() == ref_str.split():
        return True
    else:
        testlog.info('.......FAIL comparison')
        dump_output_to_temp_files(testlog, example_out)
        return False
def run_test_on_file(filename, verbose=False):
    """ Runs a test on the given input filename. Return True if all test
        runs succeeded.
    """
    success = True
    testlog.info("Test file '%s'" % filename)
    for option in [
            '-e', '-d', '-s', '-n', '-r', '-x.text', '-p.shstrtab', '-V',
            '--debug-dump=info', '--debug-dump=decodedline',
            '--debug-dump=frames', '--debug-dump=frames-interp',
            '--debug-dump=aranges', '--debug-dump=pubtypes',
            '--debug-dump=pubnames'
    ]:
        if verbose: testlog.info("..option='%s'" % option)

        # TODO(zlobober): this is a dirty hack to make tests work for ELF core
        # dump notes. Making it work properly requires a pretty deep
        # investigation of how original readelf formats the output.
        if "core" in filename and option == "-n":
            if verbose:
                testlog.warning(
                    "....will fail because corresponding part of readelf.py is not implemented yet"
                )
                testlog.info('.......................SKIPPED')
            continue

        # stdouts will be a 2-element list: output of readelf and output
        # of scripts/readelf.py
        stdouts = []
        for exe_path in [READELF_PATH, 'scripts/readelf.py']:
            args = [option, filename]
            if verbose:
                testlog.info("....executing: '%s %s'" %
                             (exe_path, ' '.join(args)))
            t1 = time.time()
            rc, stdout = run_exe(exe_path, args)
            if verbose: testlog.info("....elapsed: %s" % (time.time() - t1, ))
            if rc != 0:
                testlog.error("@@ aborting - '%s' returned '%s'" %
                              (exe_path, rc))
                return False
            stdouts.append(stdout)
        if verbose: testlog.info('....comparing output...')
        t1 = time.time()
        rc, errmsg = compare_output(*stdouts)
        if verbose: testlog.info("....elapsed: %s" % (time.time() - t1, ))
        if rc:
            if verbose: testlog.info('.......................SUCCESS')
        else:
            success = False
            testlog.info('.......................FAIL')
            testlog.info('....for option "%s"' % option)
            testlog.info('....Output #1 is readelf, Output #2 is pyelftools')
            testlog.info('@@ ' + errmsg)
            dump_output_to_temp_files(testlog, *stdouts)
    return success
Esempio n. 5
0
def run_test_on_file(filename, verbose=False):
    """ Runs a test on the given input filename. Return True if all test
        runs succeeded.
    """
    success = True
    testlog.info("Test file '%s'" % filename)
    for option in [
            '-e', '-d', '-s', '-n', '-r', '-x.text', '-p.shstrtab', '-V',
            '--debug-dump=info', '--debug-dump=decodedline',
            '--debug-dump=frames', '--debug-dump=frames-interp',
            '--debug-dump=aranges', '--debug-dump=pubtypes',
            '--debug-dump=pubnames'
            ]:
        if verbose: testlog.info("..option='%s'" % option)

        # TODO(zlobober): this is a dirty hack to make tests work for ELF core
        # dump notes. Making it work properly requires a pretty deep
        # investigation of how original readelf formats the output.
        if "core" in filename and option == "-n":
            if verbose:
                testlog.warning("....will fail because corresponding part of readelf.py is not implemented yet")
                testlog.info('.......................SKIPPED')
            continue

        # stdouts will be a 2-element list: output of readelf and output
        # of scripts/readelf.py
        stdouts = []
        for exe_path in [READELF_PATH, 'scripts/readelf.py']:
            args = [option, filename]
            if verbose: testlog.info("....executing: '%s %s'" % (
                exe_path, ' '.join(args)))
            t1 = time.time()
            rc, stdout = run_exe(exe_path, args)
            if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
            if rc != 0:
                testlog.error("@@ aborting - '%s' returned '%s'" % (exe_path, rc))
                return False
            stdouts.append(stdout)
        if verbose: testlog.info('....comparing output...')
        t1 = time.time()
        rc, errmsg = compare_output(*stdouts)
        if verbose: testlog.info("....elapsed: %s" % (time.time() - t1,))
        if rc:
            if verbose: testlog.info('.......................SUCCESS')
        else:
            success = False
            testlog.info('.......................FAIL')
            testlog.info('....for option "%s"' % option)
            testlog.info('....Output #1 is readelf, Output #2 is pyelftools')
            testlog.info('@@ ' + errmsg)
            dump_output_to_temp_files(testlog, *stdouts)
    return success