Ejemplo n.º 1
0
def generate_junit_report(args, reports, start_time, end_time, total,
                          junit_file):

    from junit_xml import TestSuite, TestCase
    import sys
    junit_log = []

    junit_prop = {}
    junit_prop['Command Line'] = ' '.join(args)
    junit_prop['Python'] = sys.version.replace('\n', '')
    junit_prop['test_groups'] = []
    junit_prop['Host'] = host.label(mode='all')
    junit_prop['passed_count'] = reports.passed
    junit_prop['failed_count'] = reports.failed
    junit_prop['user-input_count'] = reports.user_input
    junit_prop['expected-fail_count'] = reports.expected_fail
    junit_prop['indeterminate_count'] = reports.indeterminate
    junit_prop['benchmark_count'] = reports.benchmark
    junit_prop['timeout_count'] = reports.timeouts
    junit_prop['test-too-long_count'] = reports.test_too_long
    junit_prop['invalid_count'] = reports.invalids
    junit_prop['wrong-version_count'] = reports.wrong_version
    junit_prop['wrong-build_count'] = reports.wrong_build
    junit_prop['wrong-tools_count'] = reports.wrong_tools
    junit_prop['total_count'] = reports.total
    time_delta = end_time - start_time
    junit_prop['average_test_time'] = str(time_delta / total)
    junit_prop['testing_time'] = str(time_delta)

    for name in reports.results:
        result_type = reports.results[name]['result']
        test_parts = name.split('/')
        test_category = test_parts[-2]
        test_name = test_parts[-1]

        junit_result = TestCase(test_name.split('.')[0])
        junit_result.category = test_category
        if result_type == 'failed' or result_type == 'timeout':
            junit_result.add_failure_info(None,
                                          reports.results[name]['output'],
                                          result_type)

        junit_log.append(junit_result)

    ts = TestSuite('RTEMS Test Suite', junit_log)
    ts.properties = junit_prop
    ts.hostname = host.label(mode='all')

    # write out junit log
    with open(junit_file, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
Ejemplo n.º 2
0
def run(args, command_path=None):
    import sys
    tests = []
    stdtty = console.save()
    opts = None
    default_exefilter = '*.exe'
    try:
        optargs = {
            '--rtems-tools':
            'The path to the RTEMS tools',
            '--rtems-bsp':
            'The RTEMS BSP to run the test on',
            '--user-config':
            'Path to your local user configuration INI file',
            '--report-path':
            'Report output base path (file extension will be added)',
            '--report-format':
            'Formats in which to report test results in addition to txt: json',
            '--log-mode':
            'Reporting modes, failures (default),all,none',
            '--list-bsps':
            'List the supported BSPs',
            '--debug-trace':
            'Debug trace based on specific flags (console,gdb,output,cov)',
            '--filter':
            'Glob that executables must match to run (default: ' +
            default_exefilter + ')',
            '--stacktrace':
            'Dump a stack trace on a user termination (^C)',
            '--coverage':
            'Perform coverage analysis of test executables.'
        }
        mailer.append_options(optargs)
        opts = options.load(args, optargs=optargs, command_path=command_path)
        mail = None
        output = None
        if opts.find_arg('--mail'):
            mail = mailer.mail(opts)
            # Request these now to generate any errors.
            from_addr = mail.from_address()
            smtp_host = mail.smtp_host()
            to_addr = opts.find_arg('--mail-to')
            if to_addr:
                to_addr = to_addr[1]
            else:
                to_addr = '*****@*****.**'
            output = log_capture()
        report_location = opts.find_arg('--report-path')
        if report_location is not None:
            report_location = report_location[1]

        report_formats = opts.find_arg('--report-format')
        if report_formats is not None:
            if len(report_formats) != 2:
                raise error.general('invalid RTEMS report formats option')
            report_formats = report_formats[1].split(',')
            check_report_formats(report_formats, report_location)
        else:
            report_formats = []
        log.notice('RTEMS Testing - Tester, %s' % (version.string()))
        if opts.find_arg('--list-bsps'):
            bsps.list(opts)
        exe_filter = opts.find_arg('--filter')
        if exe_filter:
            exe_filter = exe_filter[1]
        else:
            exe_filter = default_exefilter
        opts.log_info()
        log.output('Host: ' + host.label(mode='all'))
        executables = find_executables(opts.params(), exe_filter)
        debug_trace = opts.find_arg('--debug-trace')
        if debug_trace:
            if len(debug_trace) != 1:
                debug_trace = debug_trace[1]
            else:
                raise error.general(
                    'no debug flags, can be: console,gdb,output,cov')
        else:
            debug_trace = ''
        opts.defaults['exe_trace'] = debug_trace
        job_trace = 'jobs' in debug_trace.split(',')
        rtems_tools = opts.find_arg('--rtems-tools')
        if rtems_tools is not None:
            if len(rtems_tools) != 2:
                raise error.general('invalid RTEMS tools option')
            rtems_tools = rtems_tools[1]
        bsp = opts.find_arg('--rtems-bsp')
        if bsp is None or len(bsp) != 2:
            raise error.general('RTEMS BSP not provided or an invalid option')
        bsp = config.load(bsp[1], opts)
        bsp_config = opts.defaults.expand(opts.defaults['tester'])
        coverage_enabled = opts.find_arg('--coverage')
        if coverage_enabled:
            cov_trace = 'cov' in debug_trace.split(',')
            if len(coverage_enabled) == 2:
                coverage_runner = coverage.coverage_run(
                    opts.defaults,
                    executables,
                    rtems_tools,
                    symbol_set=coverage_enabled[1],
                    trace=cov_trace)
            else:
                coverage_runner = coverage.coverage_run(opts.defaults,
                                                        executables,
                                                        rtems_tools,
                                                        trace=cov_trace)
        log_mode = opts.find_arg('--log-mode')
        if log_mode:
            if log_mode[1] != 'failures' and \
                    log_mode[1] != 'all' and \
                    log_mode[1] != 'none':
                raise error.general('invalid report mode')
            log_mode = log_mode[1]
        else:
            log_mode = 'failures'
        if len(executables) == 0:
            raise error.general('no executables supplied')
        start_time = datetime.datetime.now()
        total = len(executables)
        reports = report.report(total)
        reporting = 1
        jobs = int(opts.jobs(opts.defaults['_ncpus']))
        exe = 0
        finished = []
        if jobs > len(executables):
            jobs = len(executables)
        while exe < total or len(tests) > 0:
            if exe < total and len(tests) < jobs:
                tst = test_run(exe + 1, total, reports, executables[exe],
                               rtems_tools, bsp, bsp_config, opts)
                exe += 1
                tests += [tst]
                if job_trace:
                    _job_trace(tst, 'create', total, exe, tests, reporting)
                tst.run()
            else:
                dead = [t for t in tests if not t.is_alive()]
                tests[:] = [t for t in tests if t not in dead]
                for tst in dead:
                    if job_trace:
                        _job_trace(tst, 'dead', total, exe, tests, reporting)
                    finished += [tst]
                    tst.reraise()
                del dead
                if len(tests) >= jobs or exe >= total:
                    time.sleep(0.250)
                if len(finished):
                    reporting = report_finished(reports, log_mode, reporting,
                                                finished, job_trace)
        finished_time = datetime.datetime.now()
        reporting = report_finished(reports, log_mode, reporting, finished,
                                    job_trace)
        if reporting < total:
            log.warning('finished jobs does match: %d' % (reporting))
            report_finished(reports, log_mode, -1, finished, job_trace)
        reports.summary()
        end_time = datetime.datetime.now()
        average_time = 'Average test time: %s' % (str(
            (end_time - start_time) / total))
        total_time = 'Testing time     : %s' % (str(end_time - start_time))
        log.notice(average_time)
        log.notice(total_time)
        for report_format in report_formats:
            report_formatters[report_format](
                args, reports, start_time, end_time, total,
                '.'.join([report_location, report_format]))

        if mail is not None and output is not None:
            m_arch = opts.defaults.expand('%{arch}')
            m_bsp = opts.defaults.expand('%{bsp}')
            build = ' %s:' % (reports.get_config('build', not_found=''))
            subject = '[rtems-test] %s/%s:%s %s' % (
                m_arch, m_bsp, build, reports.score_card('short'))
            np = 'Not present in test'
            ver = reports.get_config('version', not_found=np)
            build = reports.get_config('build', not_found=np)
            tools = reports.get_config('tools', not_found=np)
            body = [
                total_time, average_time, '', 'Host', '====',
                host.label(mode='all'), '', 'Configuration', '=============',
                'Version: %s' % (ver),
                'Build  : %s' % (build),
                'Tools  : %s' % (tools), '', 'Summary', '=======', '',
                reports.score_card(), '',
                reports.failures(), 'Log', '===', ''
            ] + output.get()
            mail.send(to_addr, subject, os.linesep.join(body))
        if coverage_enabled:
            coverage_runner.run()

    except error.general as gerr:
        print(gerr)
        sys.exit(1)
    except error.internal as ierr:
        print(ierr)
        sys.exit(1)
    except error.exit:
        sys.exit(2)
    except KeyboardInterrupt:
        if opts is not None and opts.find_arg('--stacktrace'):
            print('}} dumping:', threading.active_count())
            for t in threading.enumerate():
                print('}} ', t.name)
            print(stacktraces.trace())
        log.notice('abort: user terminated')
        killall(tests)
        sys.exit(1)
    finally:
        console.restore(stdtty)
    sys.exit(0)
Ejemplo n.º 3
0
def generate_json_report(args, reports, start_time, end_time, total,
                         json_file):
    import json
    import sys
    json_log = {}
    json_log['Command Line'] = " ".join(args)
    json_log['Python'] = sys.version.replace('\n', '')
    json_log['test_groups'] = []
    json_log['Host'] = host.label(mode='all')
    json_log['summary'] = {}
    json_log['summary']['passed_count'] = reports.passed
    json_log['summary']['failed_count'] = reports.failed
    json_log['summary']['user-input_count'] = reports.user_input
    json_log['summary']['expected-fail_count'] = reports.expected_fail
    json_log['summary']['indeterminate_count'] = reports.indeterminate
    json_log['summary']['benchmark_count'] = reports.benchmark
    json_log['summary']['timeout_count'] = reports.timeouts
    json_log['summary']['invalid_count'] = reports.invalids
    json_log['summary']['wrong-version_count'] = reports.wrong_version
    json_log['summary']['wrong-build_count'] = reports.wrong_build
    json_log['summary']['wrong-tools_count'] = reports.wrong_tools
    json_log['summary']['total_count'] = reports.total
    time_delta = end_time - start_time
    json_log['summary']['average_test_time'] = str(time_delta / total)
    json_log['summary']['testing_time'] = str(time_delta)

    result_types = [
        'failed', 'user-input', 'expected-fail', 'indeterminate', 'benchmark',
        'timeout', 'invalid', 'wrong-version', 'wrong-build', 'wrong-tools'
    ]
    json_results = {}
    for result_type in result_types:
        json_log['summary'][result_type] = []

    # collate results for JSON log
    for name in reports.results:
        result_type = reports.results[name]['result']
        test_parts = name.split("/")
        test_category = test_parts[-2]
        test_name = test_parts[-1]
        if result_type != 'passed':
            json_log['summary'][result_type].append(test_name)
        if test_category not in json_results:
            json_results[test_category] = []
        json_result = {}
        # remove the file extension
        json_result["name"] = test_name.split('.')[0]
        json_result["result"] = result_type
        if result_type == "failed" or result_type == "timeout":
            json_result["output"] = reports.results[name]["output"]
        json_results[test_category].append(json_result)

    # convert results to a better format for report generation
    sorted_keys = sorted(json_results.keys())
    for i in range(len(sorted_keys)):
        results_log = {}
        results_log["index"] = i + 1
        results_log["name"] = sorted_keys[i]
        results_log["results"] = json_results[sorted_keys[i]]
        json_log["test_groups"].append(results_log)

    # write out JSON log
    with open(json_file, 'w') as outfile:
        json.dump(json_log, outfile, sort_keys=True, indent=4)
Ejemplo n.º 4
0
def generate_yaml_report(args, reports, start_time, end_time, total,
                         yaml_file):
    """ Generates a YAML file containing information about the test run,
    including all test outputs """

    import yaml

    def format_output(output_list):
        return "\n".join(output_list).replace("] ", '').replace('=>  ', '')

    yaml_log = {}
    yaml_log['command-line'] = args
    yaml_log['host'] = host.label(mode='all')
    yaml_log['python'] = sys.version.replace('\n', '')
    yaml_log['summary'] = {}
    yaml_log['summary']['passed-count'] = reports.passed
    yaml_log['summary']['failed-count'] = reports.failed
    yaml_log['summary']['user-input-count'] = reports.user_input
    yaml_log['summary']['expected-fail-count'] = reports.expected_fail
    yaml_log['summary']['indeterminate-count'] = reports.indeterminate
    yaml_log['summary']['benchmark-count'] = reports.benchmark
    yaml_log['summary']['timeout-count'] = reports.timeouts
    yaml_log['summary']['test-too-long_count'] = reports.test_too_long
    yaml_log['summary']['invalid-count'] = reports.invalids
    yaml_log['summary']['wrong-version-count'] = reports.wrong_version
    yaml_log['summary']['wrong-build-count'] = reports.wrong_build
    yaml_log['summary']['wrong-tools-count'] = reports.wrong_tools
    yaml_log['summary']['total-count'] = reports.total
    time_delta = end_time - start_time
    yaml_log['summary']['average-test-time'] = str(time_delta / total)
    yaml_log['summary']['testing-time'] = str(time_delta)

    result_types = [
        'failed', 'user-input', 'expected-fail', 'indeterminate', 'benchmark',
        'timeout', 'test-too-long', 'invalid', 'wrong-version', 'wrong-build',
        'wrong-tools'
    ]
    for result_type in result_types:
        yaml_log['summary'][result_type] = []

    result_element = {}
    yaml_log['outputs'] = []

    # process output of each test
    for exe_name in reports.results:
        test_parts = exe_name.split("/")
        test_name = test_parts[-1]
        result_element['executable-name'] = test_name
        result_element['executable-sha512'] = get_hash512(exe_name)
        result_element['execution-start'] = reports.results[exe_name][
            'start'].isoformat()
        result_element['execution-end'] = reports.results[exe_name][
            'end'].isoformat()
        date_diff = reports.results[exe_name]['end'] - reports.results[
            exe_name]['start']
        result_element['execution-duration'] = str(date_diff)
        result_element['execution-result'] = reports.results[exe_name][
            'result']
        result_element['bsp'] = reports.results[exe_name]['bsp']
        result_element['bsp-arch'] = reports.results[exe_name]['bsp_arch']
        result_output = reports.results[exe_name]['output']

        dbg_output = []
        test_output = []
        idxs_output = []  # store indices of given substrings
        for elem in result_output:
            if '=> ' in elem:
                idxs_output.append(result_output.index(elem))
            if '*** END' in elem:
                idxs_output.append(result_output.index(elem))

        if len(idxs_output) == 3:  # test executed and has result
            dbg_output = result_output[idxs_output[0]:idxs_output[1]]
            dbg_output.append("=== Executed Test ===")
            dbg_output = dbg_output + result_output[idxs_output[2] +
                                                    1:len(result_output)]
            test_output = result_output[idxs_output[1]:idxs_output[2] + 1]
        else:
            dbg_output = result_output

        result_element['debugger-output'] = format_output(dbg_output)
        result_element['console-output'] = format_output(test_output)
        yaml_log['outputs'].append(result_element)

        result_type = reports.results[exe_name]['result']
        # map "fatal-error" on to "failed"
        if result_type == "fatal-error":
            result_type = "failed"

        if result_type != 'passed':
            yaml_log['summary'][result_type].append(test_name)

        result_element = {}

    with open(yaml_file, 'w') as outfile:
        yaml.dump(yaml_log,
                  outfile,
                  default_flow_style=False,
                  allow_unicode=True)
Ejemplo n.º 5
0
def run(command_path = None):
    import sys
    tests = []
    stdtty = console.save()
    opts = None
    default_exefilter = '*.exe'
    try:
        optargs = { '--rtems-tools': 'The path to the RTEMS tools',
                    '--rtems-bsp':   'The RTEMS BSP to run the test on',
                    '--user-config': 'Path to your local user configuration INI file',
                    '--list-bsps':   'List the supported BSPs',
                    '--debug-trace': 'Debug trace based on specific flags',
                    '--stacktrace':  'Dump a stack trace on a user termination (^C)' }
        opts = options.load(sys.argv,
                            optargs = optargs,
                            command_path = command_path)
        log.notice('RTEMS Testing - Run, %s' % (version.str()))
        if opts.find_arg('--list-bsps'):
            bsps.list(opts)
        opts.log_info()
        log.output('Host: ' + host.label(mode = 'all'))
        debug_trace = opts.find_arg('--debug-trace')
        if debug_trace:
            if len(debug_trace) != 1:
                debug_trace = 'output,' + debug_trace[1]
            else:
                raise error.general('no debug flags, can be: console,gdb,output')
        else:
            debug_trace = 'output'
        opts.defaults['debug_trace'] = debug_trace
        rtems_tools = opts.find_arg('--rtems-tools')
        if rtems_tools:
            if len(rtems_tools) != 2:
                raise error.general('invalid RTEMS tools option')
            rtems_tools = rtems_tools[1]
        else:
            rtems_tools = '%{_prefix}'
        bsp = opts.find_arg('--rtems-bsp')
        if bsp is None or len(bsp) != 2:
            raise error.general('RTEMS BSP not provided or an invalid option')
        bsp = config.load(bsp[1], opts)
        bsp_config = opts.defaults.expand(opts.defaults['tester'])
        executables = find_executables(opts.params())
        if len(executables) != 1:
            raise error.general('one executable required, found %d' % (len(executables)))
        opts.defaults['test_disable_header'] = '1'
        reports = report.report(1)
        start_time = datetime.datetime.now()
        opts.defaults['exe_trace'] = debug_trace
        tst = test(1, 1, reports, executables[0], rtems_tools, bsp, bsp_config, opts)
        tst.run()
        end_time = datetime.datetime.now()
        total_time = 'Run time     : %s' % (str(end_time - start_time))
        log.notice(total_time)

    except error.general as gerr:
        print(gerr)
        sys.exit(1)
    except error.internal as ierr:
        print(ierr)
        sys.exit(1)
    except error.exit:
        sys.exit(2)
    except KeyboardInterrupt:
        if opts is not None and opts.find_arg('--stacktrace'):
            print('}} dumping:', threading.active_count())
            for t in threading.enumerate():
                print('}} ', t.name)
            print(stacktraces.trace())
        log.notice('abort: user terminated')
        sys.exit(1)
    finally:
        console.restore(stdtty)
    sys.exit(0)