예제 #1
0
def test_with_flag():
    metric = CodeChurn(path_to_repo='test-repos/pydriller',
                       from_commit='ab36bf45859a210b0eae14e17683f31d19eea041',
                       to_commit='fdf671856b260aca058e6595a96a7a0fba05454b',
                       ignore_added_files=True)

    code_churns = metric.count()

    assert len(code_churns) == 7
    assert str(Path('domain/__init__.py')) not in code_churns
    assert code_churns[str(Path('domain/commit.py'))] == 0
예제 #2
0
def test_with_dates(path_to_repo, filepath, since, to, expected_count, expected_max, expected_avg):
    metric = CodeChurn(path_to_repo=path_to_repo, since=since, to=to)

    actual_count = metric.count()
    actual_max = metric.max()
    actual_avg = metric.avg()

    filepath = str(Path(filepath))

    assert actual_count[filepath] == expected_count
    assert actual_max[filepath] == expected_max
    assert actual_avg[filepath] == expected_avg
예제 #3
0
def test_with_commits(path_to_repo, filepath, from_commit, to_commit, expected_count, expected_max, expected_avg):
    metric = CodeChurn(path_to_repo=path_to_repo,
                       from_commit=from_commit,
                       to_commit=to_commit)

    actual_count = metric.count()
    actual_max = metric.max()
    actual_avg = metric.avg()

    filepath = str(Path(filepath))

    assert actual_count[filepath] == expected_count
    assert actual_max[filepath] == expected_max
    assert actual_avg[filepath] == expected_avg
예제 #4
0
 def fetch_metrics(self):
     change_set = ChangeSet(self.url,
                            since=datetime.datetime.fromtimestamp(0),
                            to=datetime.datetime.now())
     code_churn = CodeChurn(self.url,
                            since=datetime.datetime.fromtimestamp(0),
                            to=datetime.datetime.now())
     commits_count = CommitsCount(self.url,
                                  since=datetime.datetime.fromtimestamp(0),
                                  to=datetime.datetime.now())
     contributors_count = ContributorsCount(
         self.url,
         since=datetime.datetime.fromtimestamp(0),
         to=datetime.datetime.now())
     contributors_experience = ContributorsExperience(
         self.url,
         since=datetime.datetime.fromtimestamp(0),
         to=datetime.datetime.now())
     hunks_count = HunksCount(self.url,
                              since=datetime.datetime.fromtimestamp(0),
                              to=datetime.datetime.now())
     line_count = LinesCount(self.url,
                             since=datetime.datetime.fromtimestamp(0),
                             to=datetime.datetime.now())
     return {
         'changeSet': change_set,
         'codeChurn': code_churn,
         'commitsCount': commits_count,
         'contributorsCount': contributors_count,
         'contributorsExperience': contributors_experience,
         'hunksCount': hunks_count,
         'lineCount': line_count,
     }
예제 #5
0
    def get_process_metrics(self, from_commit: str, to_commit: str) -> dict:
        """ Extract process metrics for an evolution period.

        Parameters
        ----------
        from_commit : str
            Hash of release start
        to_commit : str
            Hash of release end

        """
        change_set = ChangeSet(self.path_to_repo, from_commit=from_commit, to_commit=to_commit)
        code_churn = CodeChurn(self.path_to_repo, from_commit=from_commit, to_commit=to_commit, ignore_added_files=True)
        commits_count = CommitsCount(self.path_to_repo, from_commit=from_commit, to_commit=to_commit)
        contributors_count = ContributorsCount(self.path_to_repo, from_commit=from_commit, to_commit=to_commit)
        highest_contributors_experience = ContributorsExperience(self.path_to_repo, from_commit=from_commit,
                                                                 to_commit=to_commit)
        median_hunks_count = HunksCount(self.path_to_repo, from_commit=from_commit, to_commit=to_commit)
        lines_count = LinesCount(self.path_to_repo, from_commit=from_commit, to_commit=to_commit)

        return {
            'dict_change_set_max': change_set.max(),
            'dict_change_set_avg': change_set.avg(),
            'dict_code_churn_count': code_churn.count(),
            'dict_code_churn_max': code_churn.max(),
            'dict_code_churn_avg': code_churn.avg(),
            'dict_commits_count': commits_count.count(),
            'dict_contributors_count': contributors_count.count(),
            'dict_minor_contributors_count': contributors_count.count_minor(),
            'dict_highest_contributor_experience': highest_contributors_experience.count(),
            'dict_hunks_median': median_hunks_count.count(),
            'dict_additions': lines_count.count_added(),
            'dict_additions_max': lines_count.max_added(),
            'dict_additions_avg': lines_count.avg_added(),
            'dict_deletions': lines_count.count_removed(),
            'dict_deletions_max': lines_count.max_removed(),
            'dict_deletions_avg': lines_count.avg_removed()}
예제 #6
0
        for path, nloc in self.files.items():
            sum_count[path] = sum(nloc)
        return sum_count


import datetime as dt

dt1 = dt.datetime(2020, 1, 1, 0, 0)
dt2 = dt.datetime(2020, 5, 23, 0, 0)

measure = Complexity(
    path_to_repo='https://github.com/eclipse/eclipse.jdt.core',
    since=dt1,
    to=dt2).count()
measure2 = CodeChurn(
    path_to_repo='https://github.com/eclipse/eclipse.jdt.core',
    since=dt1,
    to=dt2).count()
measure3 = NLOC(path_to_repo='https://github.com/eclipse/eclipse.jdt.core',
                since=dt1,
                to=dt2).count()
measure4 = NLOC(path_to_repo='https://github.com/eclipse/eclipse.jdt.core',
                since=dt1,
                to=dt2).sumC()

fileData = open("/FileMetrics.txt", "w")
for file in measure:
    values = str(measure[file]) + '/\\' + str(measure2[file]) + '/\\' +\
             str(measure3[file]) + '/\\' + str(measure4[file])
    fileData.write(str(file) + '/\\' + values + '\n')
fileData.close()