Пример #1
0
def pkgtest_cmp(args):
    gnur_filename = args[0]
    fastr_filename = args[1]
    if len(args) >= 4:
        test_output_filters = args[2]
        pkg_name = args[3]
    else:
        test_output_filters = None
        pkg_name = None
    dump_preprocessed = args[4] if len(args) >= 5 else None

    filters = select_filters_for_package(args[2],
                                         pkg_name) if len(args) >= 3 else None

    with open(gnur_filename) as f:
        gnur_content = f.readlines()
    with open(fastr_filename) as f:
        fastr_content = f.readlines()
    from fuzzy_compare import fuzzy_compare
    return fuzzy_compare(gnur_content, fastr_content, gnur_filename,
                         fastr_filename, filters, dump_preprocessed)
Пример #2
0
def _set_test_status(fastr_test_info):
    def _failed_outputs(outputs):
        '''
        return True iff outputs has any .fail files
        '''
        for _, testfile_status in outputs.iteritems():
            if testfile_status.status == "FAILED":
                return [testfile_status.abspath]
        return False

    gnur_test_info = dict()
    for pkg, _ in fastr_test_info.iteritems():
        _get_test_outputs('gnur', pkg, gnur_test_info)

    # gnur is definitive so drive off that
    for pkg in gnur_test_info.keys():
        logging.info('BEGIN checking ' + pkg)
        gnur_test_status = gnur_test_info[pkg]
        fastr_test_status = fastr_test_info[pkg]
        gnur_outputs = gnur_test_status.testfile_outputs
        fastr_outputs = fastr_test_status.testfile_outputs

        gnur_failed_outputs = _failed_outputs(gnur_outputs)
        if gnur_failed_outputs:
            # What this likely means is that some native package is not
            # installed on the system so GNUR can't run the tests.
            # Ideally this never happens.
            logging.info("{0}: GnuR test had .fail outputs: {1}".format(
                pkg, str(gnur_failed_outputs)))

        fastr_failed_outputs = _failed_outputs(fastr_outputs)
        if fastr_failed_outputs:
            # In addition to the similar comment for GNU R, this can happen
            # if, say, the JVM crashes (possible with native code packages)
            logging.info("{0}: FastR test had .fail outputs: {1}".format(
                pkg, str(fastr_failed_outputs)))
            fastr_test_status.set_status_code("FAILED")

        # Now for each successful GNU R output we compare content (assuming FastR didn't fail)
        for gnur_test_output_relpath, gnur_testfile_status in gnur_outputs.iteritems(
        ):

            # If FastR does not have a corresponding test output file ...
            if not gnur_test_output_relpath in fastr_outputs:
                # FastR crashed on this test
                fastr_test_status.set_status_code("FAILED")
                logging.info("{0}: FastR is missing output file: {1}".format(
                    pkg, gnur_test_output_relpath))
                continue

            # Get corresponding FastR test output file
            fastr_testfile_status = fastr_outputs[gnur_test_output_relpath]

            # Can't compare if either GNUR or FastR failed
            if gnur_testfile_status.status == "FAILED":
                fastr_testfile_status.set_status_code("INDETERMINATE")
                continue

            # If the test output file's status is "FAILED" at this point, we know that there was a ".fail" output
            # file. So, don't do fuzzy-compare.
            if fastr_testfile_status.status == "FAILED":
                # It may only be fuzzy-compare because if we would have a test framework, the status would not be
                # "FAILED" since a test framework cannot produce ".fail" output files.
                continue

            gnur_content = None
            with open(gnur_testfile_status.abspath) as f:
                gnur_content = f.readlines()
            fastr_content = None
            with open(fastr_testfile_status.abspath) as f:
                fastr_content = f.readlines()

            # parse custom filters from file
            filters = select_filters_for_package(
                os.path.join(_packages_test_project_dir(),
                             "test.output.filter"), pkg)

            # first, parse file and see if a known test framework has been used
            detected, ok, skipped, failed = handle_output_file(
                fastr_testfile_status.abspath, fastr_content)
            if detected:
                # If a test framework is used, also parse the summary generated by GnuR to compare numbers.
                detected, gnur_ok, gnur_skipped, gnur_failed = handle_output_file(
                    gnur_testfile_status.abspath, gnur_content)
                fastr_invalid_numbers = ok is None or skipped is None and failed is None
                gnur_invalid_numbers = gnur_ok is None or gnur_skipped is None and gnur_failed is None
                total_fastr = ok + skipped + failed if not fastr_invalid_numbers else -1
                total_gnur = gnur_ok + gnur_skipped + gnur_failed if not gnur_invalid_numbers else -1

                if not fastr_invalid_numbers and total_fastr != total_gnur:
                    logging.info(
                        "Different number of tests executed. FastR = {} vs. GnuR = {}"
                        .format(total_fastr, total_gnur))
                elif fastr_invalid_numbers:
                    logging.info(
                        "FastR reported invalid numbers of executed tests.")

                if fastr_invalid_numbers or total_fastr > total_gnur:
                    # If FastR's numbers are invalid or GnuR ran fewer tests than FastR, we cannot trust the FastR numbers
                    fastr_testfile_status.set_report(0, gnur_skipped,
                                                     gnur_ok + gnur_failed)
                    fastr_test_status.set_status_code("FAILED")
                    fastr_testfile_status.status = "FAILED"
                elif total_fastr < total_gnur:
                    # If FastR ran fewer tests than GnuR, we complement the missing ones as failing
                    fastr_testfile_status.set_report(
                        ok, skipped, failed + (total_gnur - total_fastr))
                    fastr_test_status.set_status_code("FAILED")
                    fastr_testfile_status.status = "FAILED"
                else:
                    # The total numbers are equal, so we are fine.
                    fastr_testfile_status.status = "OK"
                    fastr_testfile_status.set_report(ok, skipped, failed)
            else:
                result, n_tests_passed, n_tests_failed = fuzzy_compare(
                    gnur_content,
                    fastr_content,
                    gnur_testfile_status.abspath,
                    fastr_testfile_status.abspath,
                    custom_filters=filters,
                    dump_preprocessed=get_opts().dump_preprocessed)
                if result == -1:
                    logging.info("{0}: content malformed: {1}".format(
                        pkg, gnur_test_output_relpath))
                    fastr_test_status.set_status_code("INDETERMINATE")
                    # we don't know how many tests are in there, so consider the whole file to be one big skipped test
                    fastr_testfile_status.set_report(0, 1, 0)
                elif result != 0:
                    fastr_test_status.set_status_code("FAILED")
                    fastr_testfile_status.status = "FAILED"
                    fastr_testfile_status.set_report(n_tests_passed, 0,
                                                     n_tests_failed)
                    logging.info("{0}: FastR output mismatch: {1}".format(
                        pkg, gnur_test_output_relpath))
                    logging.info("    output mismatch file: {0}".format(
                        join(_pkg_testdir('fastr', pkg),
                             gnur_test_output_relpath)))
                    logging.info("    output mismatch file: {0}".format(
                        join(_pkg_testdir('gnur', pkg),
                             gnur_test_output_relpath)))
                else:
                    fastr_testfile_status.status = "OK"
                    fastr_testfile_status.set_report(n_tests_passed, 0,
                                                     n_tests_failed)

        # we started out as UNKNOWN
        if not (fastr_test_status.status == "INDETERMINATE"
                or fastr_test_status.status == "FAILED"):
            fastr_test_status.set_status_code("OK")

        # write out a file with the test status for each output (that exists)
        with open(join(_pkg_testdir('fastr', pkg), 'testfile_status'),
                  'w') as f:
            f.write(
                '# <file path> <tests passed> <tests skipped> <tests failed>\n'
            )
            for fastr_relpath, fastr_testfile_status in fastr_outputs.iteritems(
            ):
                logging.info(
                    "generating testfile_status for {0}".format(fastr_relpath))
                relpath = fastr_relpath
                test_output_file = join(_pkg_testdir('fastr', pkg), relpath)

                if os.path.exists(test_output_file):
                    ok, skipped, failed = fastr_testfile_status.get_report()
                    f.write("{0} {1} {2} {3} {4}\n".format(
                        relpath, ok, skipped, failed,
                        fastr_testfile_status.test_time))
                elif fastr_testfile_status.status == "FAILED":
                    # In case of status == "FAILED", also try suffix ".fail" because we just do not know if the test
                    # failed and finished or just never finished.
                    relpath_fail = fastr_relpath + ".fail"
                    test_output_file_fail = join(_pkg_testdir('fastr', pkg),
                                                 relpath_fail)
                    if os.path.exists(test_output_file_fail):
                        ok, skipped, failed = fastr_testfile_status.get_report(
                        )
                        f.write("{0} {1} {2} {3} {4}\n".format(
                            relpath_fail, ok, skipped, failed,
                            fastr_testfile_status.test_time))
                    else:
                        logging.info("File {0} or {1} does not exist".format(
                            test_output_file, test_output_file_fail))
                else:
                    logging.info(
                        "File {0} does not exist".format(test_output_file))

        with open(join(_pkg_testdir('fastr', pkg), 'test_time'), 'w') as f:
            f.write(str(fastr_test_status.test_time))

        logging.info('END checking ' + pkg)