Пример #1
0
def main():
    """
    Setuptools entry point for annotate-sections; see setup.py
    """
    import sys
    from optparse import OptionParser
    
    #### OPTIONS

    parser = OptionParser()

    parser.add_option('-c', '--coverage', nargs=1, action="store",
                      dest="coverage_file", 
                      help = 'load coverage info from this file',
                      default='.figleaf_sections')

    ####

    (options, args) = parser.parse_args(sys.argv[1:])
    coverage_file = options.coverage_file
    
    figleaf.load_pickled_coverage(open(coverage_file, 'rb'))
    data = internals.CoverageData(figleaf._t)
    full_cov = data.gather_files()

    for filename in args:
        annotate_file_with_sections(filename, data, full_cov)
Пример #2
0
def list_sections(options, match=""):
    """
    List the filenames in the coverage file, optionally limiting it to
    those files matching to the regexp 'match'.
    """
    if options.verbose:
        print>>sys.stderr, '** Reading sections info from sections file %s' % \
                           (options.sections_file,)
        if match:
            print >> sys.stderr, '** Filtering against regexp "%s"' % (match, )

    fp = open(options.sections_file)
    figleaf.load_pickled_coverage(fp)  # @CTB

    data = figleaf.internals.CoverageData(figleaf._t)
    coverage = data.gather_files()
    coverage = filter_coverage(coverage, match)

    for filename in coverage.keys():
        print filename
Пример #3
0
def list_sections(options, match=""):
    """
    List the filenames in the coverage file, optionally limiting it to
    those files matching to the regexp 'match'.
    """
    if options.verbose:
        print>>sys.stderr, '** Reading sections info from sections file %s' % \
                           (options.sections_file,)
        if match:
            print>>sys.stderr, '** Filtering against regexp "%s"' % (match,)

    fp = open(options.sections_file)
    figleaf.load_pickled_coverage(fp) # @CTB

    data = figleaf.internals.CoverageData(figleaf._t)
    coverage = data.gather_files()
    coverage = filter_coverage(coverage, match)

    for filename in coverage.keys():
        print filename
def main():
    #### OPTIONS

    parser = OptionParser()

    parser.add_option('-c', '--coverage', nargs=1, action="store",
                      dest="coverage_file", 
                      help = 'load coverage info from this file',
                      default='.figleaf_sections')

    ####

    (options, args) = parser.parse_args(sys.argv[1:])
    coverage_file = options.coverage_file
    
    figleaf.load_pickled_coverage(open(coverage_file))
    data = internals.CoverageData(figleaf._t)
    full_cov = data.gather_files()

    for filename in args:
        annotate_file_with_sections(filename, data, full_cov)
Пример #5
0
def coro(dirpath_src, build_monitor):
    """
    Send errors to build_monitor if the unit tests for supplied modules fail.

    """
    dirpath_internal = da.lwc.discover.path(
                                'internal', dirpath_lwc_root = dirpath_src)
    filepath_ini     = os.path.join(
                                dirpath_internal, 'da', 'check', 'pytest.ini')

    while True:

        build_element = (yield)

        filepath_module = build_element['filepath']

        # Ignore non-python design documents.
        if not da.lwc.file.is_python_file(filepath_module):
            continue

        # Ignore experimental design documents.
        if da.lwc.file.is_experimental(filepath_module):
            continue

        # Check to ensure that the test files, classes and methods are present.
        _check_static_coverage(build_element, build_monitor)

        filepath_test = da.lwc.file.specification_filepath_for(filepath_module)
        if not os.path.isfile(filepath_test):
            continue

        # Ensure the test results dir exists.
        dirpath_log = build_element['dirpath_log']
        da.util.ensure_dir_exists(dirpath_log)

        # Define test result files.
        filepath_pytest_log   = os.path.join(dirpath_log, 'pytest.log')
        filepath_pytest_out   = os.path.join(dirpath_log, 'pytest_out.log')
        filepath_pytest_err   = os.path.join(dirpath_log, 'pytest_err.log')
        filepath_junit_xml    = os.path.join(dirpath_log, 'pytest.junit.xml')
        filepath_pytest_json  = os.path.join(dirpath_log, 'pytest.json')
        filepath_cover_pickle = os.path.join(dirpath_log, 'test_cover.pickle')
        filepath_cover_json   = os.path.join(dirpath_log, 'test_cover.json')

        # Run a py.test session for the current module's test cases.
        with _pytest_context(dirpath_cwd     = dirpath_src,
                             filepath_stdout = filepath_pytest_out,
                             filepath_stderr = filepath_pytest_err):
            exit_code = pytest.main(
                            [filepath_test,
                             '-p', 'da.check.pytest_da',
                             '--capture=no',
                             '-c='             + filepath_ini,
                             '--result-log='   + filepath_pytest_log,
                             '--junit-xml='    + filepath_junit_xml,
                             '--json='         + filepath_pytest_json,
                             '--coverage-log=' + filepath_cover_pickle])

        # Communicate any test case failures.
        if exit_code != 0:
            _report_unit_test_failure(filepath_test,
                                      filepath_pytest_json,
                                      dirpath_src,
                                      build_monitor)

            # The coverage metric is not valid if
            # the test aborted early due to a test
            # criterion failing or some sort of
            # error. We therefore only collect
            # coverage metrics when the test
            # passes.
            #
            continue

        # Get coverage data grouped by file.
        with open(filepath_cover_pickle, 'rb') as figleaf_pickle:
            figleaf.load_pickled_coverage(figleaf_pickle)
        coverage    = figleaf.get_data()
        cov_by_file = coverage.gather_files()

        cov_log = dict()
        for key, value in cov_by_file.items():
            cov_log[key] = list(value)

        with open(filepath_cover_json, 'wt') as file_cover_json:
            file_cover_json.write(json.dumps(cov_log,
                                             indent    = 4,
                                             sort_keys = True))

        # Get the design elements in the current document that require
        # test coverage.
        #
        ast_module      = build_element['ast']
        module_name     = da.python_source.get_module_name(filepath_module)
        design_elements = list(da.python_source.gen_ast_paths_depth_first(
                                                    ast_module, module_name))

        # Work out if the coverage provided by the
        # unit tests is sufficient.
        #
        # Initially, we just check to see that
        # *some* coverage is given for each
        # document. (Module-level coverage)
        #
        # As we mature this system, we will
        # (conditionally) extend checks to
        # ensure minimum standards for function-
        # level coverage and line-level coverage.
        #
        if len(design_elements) > 1 and filepath_module not in cov_by_file:
            relpath_module = build_element['relpath']
            build_monitor.report_nonconformity(
                tool    = 'pytest',
                msg_id  = 'E0204',
                msg     = 'No test coverage for module: ' + relpath_module,
                file    = filepath_test)
            continue
Пример #6
0
def coro(dirpath_src, build_monitor):
    """
    Send errors to build_monitor if the unit tests for supplied modules fail.

    """
    dirpath_internal = da.lwc.discover.path('internal',
                                            dirpath_lwc_root=dirpath_src)
    filepath_ini = os.path.join(dirpath_internal, 'da', 'check', 'pytest.ini')

    while True:

        build_unit = (yield)

        filepath_module = build_unit['filepath']

        # Ignore non-python design documents.
        if not da.lwc.file.is_python_file(filepath_module):
            continue

        # Ignore experimental design documents.
        if da.lwc.file.is_experimental(filepath_module):
            continue

        # Ignore documents that failed to parse..
        if 'ast' not in build_unit:
            continue

        # Check to ensure that the test files, classes and methods are present.
        _check_static_coverage(build_unit, build_monitor)

        filepath_test = da.lwc.file.specification_filepath_for(filepath_module)
        if not os.path.isfile(filepath_test):
            continue

        # Ensure the test results dir exists.
        dirpath_log = build_unit['dirpath_log']
        da.util.ensure_dir_exists(dirpath_log)

        # Define test result files.
        filepath_pytest_log = os.path.join(dirpath_log, 'pytest.log')
        filepath_pytest_out = os.path.join(dirpath_log, 'pytest_out.log')
        filepath_pytest_err = os.path.join(dirpath_log, 'pytest_err.log')
        filepath_junit_xml = os.path.join(dirpath_log, 'pytest.junit.xml')
        filepath_pytest_json = os.path.join(dirpath_log, 'pytest.json')
        filepath_cover_pickle = os.path.join(dirpath_log, 'test_cover.pickle')
        filepath_cover_json = os.path.join(dirpath_log, 'test_cover.json')

        # Run a py.test session for the current module's test cases.
        with _pytest_context(dirpath_cwd=dirpath_src,
                             filepath_stdout=filepath_pytest_out,
                             filepath_stderr=filepath_pytest_err):
            exit_code = pytest.main([
                filepath_test,
                '-p',
                'da.check.pytest_da',
                # '-m', 'ci'
                '--capture=no',
                '-c=' + filepath_ini,
                '--result-log=' + filepath_pytest_log,
                '--junit-xml=' + filepath_junit_xml,
                '--json=' + filepath_pytest_json,
                '--coverage-log=' + filepath_cover_pickle
            ])

        # Communicate any test case failures.
        if exit_code != 0:
            _report_unit_test_failure(filepath_test, filepath_pytest_json,
                                      dirpath_src, build_monitor)

            # The coverage metric is not valid if
            # the test aborted early due to a test
            # criterion failing or some sort of
            # error. We therefore only collect
            # coverage metrics when the test
            # passes.
            #
            continue

        # Get coverage data grouped by file.
        with open(filepath_cover_pickle, 'rb') as figleaf_pickle:
            figleaf.load_pickled_coverage(figleaf_pickle)
        coverage = figleaf.get_data()
        cov_by_file = coverage.gather_files()

        cov_log = dict()
        for key, value in cov_by_file.items():
            cov_log[key] = list(value)

        with open(filepath_cover_json, 'wt') as file_cover_json:
            file_cover_json.write(json.dumps(cov_log, indent=4,
                                             sort_keys=True))

        # Get the design elements in the current document that require
        # test coverage.
        #
        ast_module = build_unit['ast']
        module_name = da.python_source.get_module_name(filepath_module)
        design_elements = list(
            da.python_source.gen_ast_paths_depth_first(ast_module,
                                                       module_name))

        # Work out if the coverage provided by the
        # unit tests is sufficient.
        #
        # Initially, we just check to see that
        # *some* coverage is given for each
        # document. (Module-level coverage)
        #
        # As we mature this system, we will
        # (conditionally) extend checks to
        # ensure minimum standards for function-
        # level coverage and line-level coverage.
        #
        if len(design_elements) > 1 and filepath_module not in cov_by_file:
            relpath_module = build_unit['relpath']
            build_monitor.report_nonconformity(
                tool='pytest',
                msg_id=da.check.constants.PYTEST_NO_COVERAGE,
                msg='No test coverage for module: ' + relpath_module,
                path=filepath_test)
            continue