Exemplo n.º 1
0
def repo_table(scope):  # repos, start, end):

    #this drives the list of all repos within an organization, showing the statistics for them within the selected
    #time range, along with navigation links.

    (repos, authors) = scope.standardize_repos_and_authors()
    interval = 'DY'
    # FIXME: explain
    repos = [x.pk for x in scope.available_repos.all()]
    stats = Statistic.queryset_for_range(repos=repos,
                                         authors=authors,
                                         start=scope.start,
                                         end=scope.end,
                                         interval=interval)
    stats = Statistic.annotate(
        stats.values('repo__name')).order_by('repo__name')
    data = _annotations_to_table(stats, 'repo', 'repo__name')

    # FIXME: insert in author count, which is ... complicated ... this can be optimized later
    # we should be able to grab every repo and annotate it with the author count in one extra query tops
    # but it might require manually writing it.
    for d in data:
        repo = d['repo']
        author_count = Author.author_count(repo,
                                           start=scope.start,
                                           end=scope.end)
        d['author_count'] = author_count

    # some repos won't have been scanned, and this requires a second query to fill them into the table
    repos = Repository.objects.filter(last_scanned=None,
                                      organization=scope.org)
    for unscanned in repos:
        data.append(dict(repo=unscanned.name))
    return data
Exemplo n.º 2
0
    def compute_daily_rollup(cls, repo=None, author=None, start_day=None, total_instances=None):

        """
        Generate rollup stats for everything the team did on a given day
        """

        end_date = cls.get_end_day(start_day, DAY)

        file_change_count = FileChange.change_count(repo, author=author, start=start_day, end=end_date)

        if file_change_count == 0:
            # this looks like a merge commit, FIXME: it would be a good idea to validate that this is 100% true.
            # print("-- skipping potential merge commit --")
            return

        if not author:
            authors_count = Author.author_count(repo, start=start_day, end=end_date)
        else:
            authors_count = 1

        # Aggregate values from query set for rollup

        data = FileChange.aggregate_stats(repo, author=author, start=start_day, end=end_date)

        # FIXME: if start_day is today, we need to UPDATE the current stat? - verify if the bulk_update code deals with this?
        # FIXME: model code method below is rather inefficient, does this matter?

        # Create total rollup row for the day
        stat = Statistic(
            start_date=start_day,
            interval=DAY,
            repo=repo,
            author=author,
            lines_added=data['lines_added'],
            lines_removed=data['lines_removed'],
            lines_changed=data['lines_changed'],
            commit_total= data['commit_total'],
            files_changed=data['files_changed'],
            author_total=authors_count,
            days_active=1,
        )

        stat.compute_derived_values()


        cls.smart_bulk_update(repo=repo, start_day=start_day, author=author, interval=DAY, stat=stat, total_instances=total_instances)