示例#1
0
    def preprocess_coverage_data(coverage):
        '''
        Preprocess the given coverage data.

        Preprocessing includes structuring the coverage data by directory
        for better performance as well as computing coverage summaries per directory.

        @type coverage: dict
        @param coverage: Coverage Data

        @rtype dict
        @return Preprocessed Coverage Data
        '''

        ret = {"children": {}}

        if "source_files" in coverage:
            # Coveralls format
            source_files = coverage["source_files"]

            # Process every source file and store the coverage data in our tree structure
            for source_file in source_files:

                # Split the filename into path parts and file part
                name = source_file["name"]
                name_parts = name.split(os.sep)
                path_parts = name_parts[:-1]
                file_part = name_parts[-1]

                # Start at the top of the tree for the path walking
                ptr = ret["children"]

                # Walk the tree down, one path part at a time and create parts
                # on the fly if they don't exist yet in our tree.
                for path_part in path_parts:
                    if path_part not in ptr:
                        ptr[path_part] = {"children": {}}

                    ptr = ptr[path_part]["children"]

                ptr[file_part] = {
                    "coverage":
                    [-1 if x is None else x for x in source_file["coverage"]]
                }

        else:
            raise RuntimeError("Unknown coverage format")

        # Now we need to calculate the coverage summaries (lines total and covered)
        # for each subtree in the tree. We can do this easily by using a recursive
        # definition.
        CoverageHelper.calculate_summary_fields(ret)

        return ret
示例#2
0
    def preprocess_coverage_data(coverage):
        '''
        Preprocess the given coverage data.

        Preprocessing includes structuring the coverage data by directory
        for better performance as well as computing coverage summaries per directory.

        @type coverage: dict
        @param coverage: Coverage Data

        @rtype dict
        @return Preprocessed Coverage Data
        '''

        ret = {"children": {}}

        if "source_files" in coverage:
            # Coveralls format
            source_files = coverage["source_files"]

            # Process every source file and store the coverage data in our tree structure
            for source_file in source_files:

                # Split the filename into path parts and file part
                name = source_file["name"]
                name_parts = name.split(os.sep)
                path_parts = name_parts[:-1]
                file_part = name_parts[-1]

                # Start at the top of the tree for the path walking
                ptr = ret["children"]

                # Walk the tree down, one path part at a time and create parts
                # on the fly if they don't exist yet in our tree.
                for path_part in path_parts:
                    if path_part not in ptr:
                        ptr[path_part] = {"children": {}}

                    ptr = ptr[path_part]["children"]

                ptr[file_part] = {
                    "coverage": [-1 if x is None else x for x in source_file["coverage"]]
                }

        else:
            raise RuntimeError("Unknown coverage format")

        # Now we need to calculate the coverage summaries (lines total and covered)
        # for each subtree in the tree. We can do this easily by using a recursive
        # definition.
        CoverageHelper.calculate_summary_fields(ret)

        return ret
示例#3
0
def test_CoverageHelperApplyDirectivesMakeEmpty():
    node = json.loads(covdata)

    # Check that making the set entirely empty doesn't crash things (tsmith mode)
    directives = ["-:**"]

    CoverageHelper.apply_include_exclude_directives(node, directives)

    result = CoverageHelper.get_flattened_names(node, prefix="")

    expected_names = []

    assert result == set(expected_names)
    def runTest(self):
        node = json.loads(covdata)

        # Check that making the set entirely empty doesn't crash things (tsmith mode)
        directives = ["-:**"]

        CoverageHelper.apply_include_exclude_directives(node, directives)

        result = CoverageHelper.get_flattened_names(node, prefix="")

        expected_names = []

        self.assertEqual(result, set(expected_names))
    def runTest(self):
        node = json.loads(covdata)

        # Check that excluding all paths works (specialized case)
        directives = ["-:**", "+:topdir2/subdir1/**"]

        CoverageHelper.apply_include_exclude_directives(node, directives)

        result = CoverageHelper.get_flattened_names(node, prefix="")

        expected_names = [
            'topdir2', 'topdir2/subdir1', 'topdir2/subdir1/file1.c'
        ]

        self.assertEqual(result, set(expected_names))
    def runTest(self):
        node = json.loads(covdata)

        # Check that any empty childs are pruned (empty childs are not useful)
        directives = ["-:topdir1/subdir1/**", "-:topdir1/subdir2/**"]

        CoverageHelper.apply_include_exclude_directives(node, directives)

        result = CoverageHelper.get_flattened_names(node, prefix="")

        expected_names = [
            'topdir2', 'topdir2/subdir1', 'topdir2/subdir1/file1.c'
        ]

        self.assertEqual(result, set(expected_names))
示例#7
0
def test_CoverageHelperApplyDirectivesExcludeAll():
    node = json.loads(covdata)

    # Check that excluding all paths works (specialized case)
    directives = ["-:**", "+:topdir2/subdir1/**"]

    CoverageHelper.apply_include_exclude_directives(node, directives)

    result = CoverageHelper.get_flattened_names(node, prefix="")

    expected_names = [
        'topdir2',
        'topdir2/subdir1',
        'topdir2/subdir1/file1.c'
    ]

    assert result == set(expected_names)
    def runTest(self):
        node = json.loads(covdata)

        # Check that mixed directives work properly (exclude multiple paths, include some back)
        directives = [
            "-:topdir1/subdir1/**", "+:topdir1/subdir?/file1.c",
            "+:topdir1/subdir?/file3.c", "-:topdir1/subdir2/**"
        ]

        CoverageHelper.apply_include_exclude_directives(node, directives)

        result = CoverageHelper.get_flattened_names(node, prefix="")

        expected_names = [
            'topdir1', 'topdir1/subdir1/file1.c', 'topdir1/subdir1', 'topdir2',
            'topdir2/subdir1', 'topdir2/subdir1/file1.c'
        ]

        self.assertEqual(result, set(expected_names))
    def runTest(self):
        node = json.loads(covdata)
        result = CoverageHelper.get_flattened_names(node, prefix="")

        expected_names = [
            'topdir1', 'topdir1/subdir2', 'topdir1/subdir2/file3.c',
            'topdir1/subdir1/file2.c', 'topdir1/subdir1',
            'topdir1/subdir1/file1.c', 'topdir2', 'topdir2/subdir1',
            'topdir2/subdir1/file1.c'
        ]

        self.assertEqual(result, set(expected_names))
示例#10
0
def aggregate_coverage_data(pk, pks):
    from covmanager.models import Collection, CollectionFile  # noqa
    from FTB import CoverageHelper  # noqa

    # Fetch our existing, but incomplete destination collection
    mergedCollection = Collection.objects.get(pk=pk)

    # Fetch all source collections
    collections = Collection.objects.filter(pk__in=pks)

    # Merge the coverage of all other collections into the first one
    first_collection = collections[0]
    first_collection.loadCoverage()
    newCoverage = first_collection.content
    total_stats = None

    for collection in collections[1:]:
        # Load coverage, perform the merge, then release reference to the JSON blob again
        collection.loadCoverage()
        stats = CoverageHelper.merge_coverage_data(newCoverage,
                                                   collection.content)
        collection.content = None

        # Merge stats appropriately
        if total_stats is None:
            total_stats = stats
        else:
            for x in total_stats:
                total_stats[x] += stats[x]

    # Save the new coverage blob to disk and database
    newCoverage = json.dumps(newCoverage, separators=(',', ':'))
    h = hashlib.new('sha1')
    h.update(newCoverage.encode('utf-8'))
    dbobj = CollectionFile()
    dbobj.file.save("%s.coverage" % h.hexdigest(), ContentFile(newCoverage))
    dbobj.save()

    mergedCollection.description += " (NC %s, LM %s, CM %s)" % (
        stats['null_coverable_count'], stats['length_mismatch_count'],
        stats['coverable_mismatch_count'])

    # Save the collection
    mergedCollection.coverage = dbobj
    mergedCollection.save()

    return
示例#11
0
def test_CoverageHelperFlattenNames():
    node = json.loads(covdata)
    result = CoverageHelper.get_flattened_names(node, prefix="")

    expected_names = [
        'topdir1',
        'topdir1/subdir2',
        'topdir1/subdir2/file3.c',
        'topdir1/subdir1/file2.c',
        'topdir1/subdir1',
        'topdir1/subdir1/file1.c',
        'topdir2',
        'topdir2/subdir1',
        'topdir2/subdir1/file1.c'
    ]

    assert result == set(expected_names)
示例#12
0
def aggregate_coverage_data(pk, pks):
    from covmanager.models import Collection, CollectionFile  # noqa
    from FTB import CoverageHelper # noqa

    # Fetch our existing, but incomplete destination collection
    mergedCollection = Collection.objects.get(pk=pk)

    # Fetch all source collections
    collections = Collection.objects.filter(pk__in=pks)

    # Merge the coverage of all other collections into the first one
    first_collection = collections[0]
    first_collection.loadCoverage()
    newCoverage = first_collection.content
    total_stats = None

    for collection in collections[1:]:
        # Load coverage, perform the merge, then release reference to the JSON blob again
        collection.loadCoverage()
        stats = CoverageHelper.merge_coverage_data(newCoverage, collection.content)
        collection.content = None

        # Merge stats appropriately
        if total_stats is None:
            total_stats = stats
        else:
            for x in total_stats:
                total_stats[x] += stats[x]

    # Save the new coverage blob to disk and database
    newCoverage = json.dumps(newCoverage, separators=(',', ':'))
    h = hashlib.new('sha1')
    h.update(newCoverage.encode('utf-8'))
    dbobj = CollectionFile()
    dbobj.file.save("%s.coverage" % h.hexdigest(), ContentFile(newCoverage))
    dbobj.save()

    mergedCollection.description += " (NC %s, LM %s, CM %s)" % (stats['null_coverable_count'],
                                                                stats['length_mismatch_count'],
                                                                stats['coverable_mismatch_count'])

    # Save the collection
    mergedCollection.coverage = dbobj
    mergedCollection.save()

    return
示例#13
0
    def create_combined_coverage(coverage_files):
        '''
        Read coverage data from multiple files and return a single dictionary
        containing the merged data (already preprocessed).

        @type coverage_files: list
        @param coverage_files: List of filenames containing coverage data

        @return Dictionary with combined coverage data, version information and debug statistics
        @rtype tuple(dict,dict,dict)
        '''
        ret = None
        version = None
        stats = None

        for coverage_file in coverage_files:
            with open(coverage_file) as f:
                coverage = json.load(f)

                if version is None:
                    version = CovReporter.version_info_from_coverage_data(
                        coverage)

                coverage = CovReporter.preprocess_coverage_data(coverage)

                if ret is None:
                    ret = coverage
                else:
                    merge_stats = CoverageHelper.merge_coverage_data(
                        ret, coverage)
                    if stats is None:
                        stats = merge_stats
                    else:
                        for k in merge_stats:
                            if k in stats:
                                stats[k] += merge_stats[k]

        return (ret, version, stats)
示例#14
0
    def create_combined_coverage(coverage_files):
        '''
        Read coverage data from multiple files and return a single dictionary
        containing the merged data (already preprocessed).

        @type coverage_files: list
        @param coverage_files: List of filenames containing coverage data

        @return Dictionary with combined coverage data, version information and debug statistics
        @rtype tuple(dict,dict,dict)
        '''
        ret = None
        version = None
        stats = None

        for coverage_file in coverage_files:
            with open(coverage_file) as f:
                coverage = json.load(f)

                if version is None:
                    version = CovReporter.version_info_from_coverage_data(coverage)

                coverage = CovReporter.preprocess_coverage_data(coverage)

                if ret is None:
                    ret = coverage
                else:
                    merge_stats = CoverageHelper.merge_coverage_data(ret, coverage)
                    if stats is None:
                        stats = merge_stats
                    else:
                        for k in merge_stats:
                            if k in stats:
                                stats[k] += merge_stats[k]

        return (ret, version, stats)
示例#15
0
 def apply(self, collection):
     CoverageHelper.apply_include_exclude_directives(
         collection, self.directives.splitlines())
     CoverageHelper.calculate_summary_fields(collection)
示例#16
0
 def apply(self, collection):
     CoverageHelper.apply_include_exclude_directives(collection, self.directives.splitlines())
     CoverageHelper.calculate_summary_fields(collection)