Beispiel #1
0
def snippet_info(request):
    repo, commit = repos.repo_from_request(request)
    starting_point = int(request.GET.getone('starting_point'))
    # Return cached if found.
    cache_key = 'snippet_info-{0}-{1}'.format(commit.hexsha, starting_point)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        starting_point = int(request.GET.getone('starting_point'))
        filename = '{0:016d}.txt'.format(starting_point)
        contributor_list = []
        mtime = None
        for commit in repo.iter_commits(commit, paths=filename):
            if mtime is None:
                # Use the most recent modification time.
                mtime = commit.authored_date
            contributor = dict(author_name=commit.author.name)
            if contributor not in contributor_list:
                contributor_list.append(contributor)
        contributor_list.reverse()
        info = dict(
            contributor_list=contributor_list,
        )
        content = json.dumps(info)
        cache.cache_content(cache_key, content, mtime=mtime)
    return Response(body=content, content_type='application/json', date=mtime)
Beispiel #2
0
def rss_completion(request, percentage_gap=5):
    repo, commit = repos.repo_from_request(request)
    # Return cached if found.
    cache_key = 'rss_completion commit={0} percentage_gap={1}'.format(commit.hexsha, percentage_gap)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        mtime = commit.authored_date
        tree = commit.tree
        percent_reviewed_to_report = set(xrange(percentage_gap, 100, percentage_gap))
        percent_reviewed_to_report.add(100)
        percent_transcribed_to_report = set(xrange(percentage_gap, 100, percentage_gap))
        percent_transcribed_to_report.add(100)
        transcription_info, _ = repos.json_file_at_commit(
            repo, 'transcription.json', commit, required=True)
        duration = transcription_info['duration']
        snippet_ms = _snippet_ms()
        snippets_total = duration / snippet_ms
        if duration % snippet_ms:
            snippets_total += 1
        reviews_total = snippets_total - 1
        completions = []
        now_url = 'http://{host}/?src=rc'.format(**dict(
            host=request.host,
        ))
        for c in reversed(list(repo.iter_commits(commit, ['remaining_reviews.json', 'remaining_snippets.json']))):
            this_url = 'http://{host}/?rev={rev}&src=rc'.format(**dict(
                host=request.host,
                rev=c.hexsha,
            ))
            snippets_remaining = len(repos.get_remaining_snippets(c.tree))
            snippets_completed = snippets_total - snippets_remaining
            snippets_percent = snippets_completed * 100 / snippets_total
            reviews_remaining = len(repos.get_remaining_reviews(c.tree))
            reviews_completed = reviews_total - reviews_remaining
            reviews_percent = reviews_completed * 100 / reviews_total
            if snippets_percent in percent_transcribed_to_report:
                completions.append((
                    snippets_percent, None, this_url, c.authored_date,
                ))
                percent_transcribed_to_report.remove(snippets_percent)
            elif reviews_percent in percent_reviewed_to_report:
                completions.append((
                    None, reviews_percent, this_url, c.authored_date,
                ))
                percent_reviewed_to_report.remove(reviews_percent)
        # Report completion in chrono order.
        completions.reverse()
        pub_date = commit.authored_date
        data = dict(
            _standard_response(repo, commit),
            completions=completions,
            now_url=now_url,
            pub_date=pub_date,
            request=request,
            rfc822_from_time=rfc822_from_time,
        )
        content = render('fanscribed:templates/rss_completion.xml.mako', data, request=request)
        cache.cache_content(cache_key, content, mtime)
    return Response(content, content_type='application/rss+xml', date=mtime)
Beispiel #3
0
def rss_basic(request, max_actions=50):
    repo, commit = repos.repo_from_request(request)
    # Return cached if found.
    cache_key = 'rss_basic commit={0} max_actions={1}'.format(commit.hexsha, max_actions)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        mtime = commit.authored_date
        tree = commit.tree
        actions = [
            # dict(author=AUTHOR, date=DATE, position=POSITION, this_url=URL, now_url=URL),
        ]
        # Starting from the request's commit, iterate backwards.
        for c in repo.iter_commits(commit):
            snippets_affected = set()
            for filename in c.stats.files:
                ms = _ms_from_snippet_filename(filename)
                if ms is not None:
                    snippets_affected.add(ms)
            if snippets_affected:
                earliest_ms = min(snippets_affected)
                anchor = _anchor_from_ms(earliest_ms)
                position = _label_from_ms(earliest_ms)
                author = c.author
                date = c.authored_date
                kwargs = dict(
                    host=request.host,
                    rev=c.hexsha,
                    anchor=anchor,
                )
                now_url = 'http://{host}/?src=rb#{anchor}'.format(**kwargs)
                this_url = 'http://{host}/?rev={rev}&src=rb#{anchor}'.format(**kwargs)
                actions.append(dict(
                    author=author,
                    date=date,
                    position=position,
                    this_url=this_url,
                    now_url=now_url,
                ))
            if len(actions) >= max_actions:
                break
        # Report actions in chrono order.
        actions.reverse()
        if not actions:
            pub_date = time.time()
        else:
            pub_date = actions[-1]['date']
        data = dict(
            _standard_response(repo, commit),
            actions=actions,
            pub_date=pub_date,
            request=request,
            rfc822_from_time=rfc822_from_time,
        )
        content = render('fanscribed:templates/rss_basic.xml.mako', data, request=request)
        cache.cache_content(cache_key, content, mtime)
    return Response(content, content_type='application/rss+xml', date=mtime)
Beispiel #4
0
def progress(request):
    repo, commit = repos.repo_from_request(request)
    # Return cached if found.
    cache_key = 'progress-{0}'.format(commit.hexsha)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        tree = commit.tree
        info, _ = repos.json_file_at_commit(
            repo, 'transcription.json', commit, required=True)
        content = json.dumps(_progress_dicts(tree, info))
        mtime = commit.authored_date
        cache.cache_content(cache_key, content, mtime=mtime)
    return Response(body=content, content_type='application/json', date=mtime)
Beispiel #5
0
def read(request):
    repo, commit = repos.repo_from_request(request)
    # Return cached if found.
    cache_key = 'view-{0}'.format(commit.hexsha)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        mtime = commit.authored_date
        tree = commit.tree
        transcription_info, _ = repos.json_file_at_commit(
            repo, 'transcription.json', commit, required=True)
        raw_snippets = {}
        for obj in tree:
            if isinstance(obj, git.Blob):
                name, ext = os.path.splitext(obj.name)
                if ext == '.txt':
                    try:
                        starting_point = int(name)
                    except ValueError:
                        pass
                    else:
                        raw_snippets[starting_point] = obj.data_stream.read().decode('utf8')
        # Go through all snippets, whether they've been transcribed or not.
        snippets = []
        speakers_map = repos.speakers_map(repo, commit)
        for starting_point in range(0, transcription_info['duration'], _snippet_ms()):
            text = raw_snippets.get(starting_point, '').strip()
            lines = _split_lines_and_expand_abbreviations(text, speakers_map)
            snippets.append((starting_point, lines))
        data = dict(
            _standard_response(repo, commit),
            snippets=sorted(snippets),
            preamble_incomplete=repos.file_at_commit(
                repo, 'preamble_incomplete.html', commit,
            )[0],
            preamble_completed=repos.file_at_commit(
                repo, 'preamble_completed.html', commit,
            )[0],
        )
        content = render('fanscribed:templates/view.mako', data, request=request)
        cache.cache_content(cache_key, content, mtime)
    return Response(content, date=mtime)
Beispiel #6
0
def snippets_updated(request):
    """Return formatted snippets that have been updated since the given revision."""
    repo, request_commit = repos.repo_from_request(request)
    since_rev = request.GET.getone('since')
    # Return cached if found.
    cache_key = 'updated-{0}-{1}'.format(request_commit.hexsha, since_rev)
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        since_commit = repo.commit(since_rev)
        files_updated = set()
        for commit in repo.iter_commits(request_commit):
            # Have we reached the end?
            if commit == since_commit:
                break
            # Look for snippet changes.
            for filename in commit.stats.files:
                if len(filename) == 20 and filename.endswith('.txt'):
                    files_updated.add(filename)
        tree = request_commit.tree
        speakers_map = repos.speakers_map(repo, request_commit)
        snippets = []
        for filename in files_updated:
            starting_point = int(filename[:16])
            snippet = dict(
                starting_point=starting_point,
            )
            text = tree[filename].data_stream.read().strip()
            snippet['lines'] = _split_lines_and_expand_abbreviations(text, speakers_map)
            snippets.append(snippet)
        data = dict(
            latest_revision=repos.latest_revision(repo),
            snippets=snippets,
        )
        content = json.dumps(data)
        mtime = request_commit.authored_date
        cache.cache_content(cache_key, content, mtime)
    return Response(content, content_type='application/json', date=mtime)
Beispiel #7
0
def rss_kudos(request, max_hours=24, default_minutes=60):
    repo, commit = repos.repo_from_request(request)
    # Get grouping parameters.
    end_timestamp = int(request.GET.get('end', time.time()))
    minutes_per_item = int(request.GET.get('minutes', default_minutes))
    # Return cached if found.
    cache_key = 'rss_kudos commit={0} minutes={1} max_hours={2} start={3}'.format(
        commit.hexsha,
        minutes_per_item,
        max_hours,
        end_timestamp,
    )
    content, mtime = cache.get_cached_content(cache_key)
    if content is None or request.GET.has_key('nocache'):
        # Get the list of kudos to give, or use the default.
        kudos_txt, mtime = repos.file_at_commit(repo, 'kudos.txt', commit)
        kudos_txt = kudos_txt or DEFAULT_KUDOS
        kudos_lines = kudos_txt.strip().splitlines()
        # Kudos templates might want transcription info.
        transcription_info, _ = repos.json_file_at_commit(
            repo, 'transcription.json', commit, required=True)
        # Process the range of time needed for this RSS feed.
        mtime = commit.authored_date
        tree = commit.tree
        timegroup_author_actions = {
            # timegroup: {
            #     AUTHOR_NAME: dict(actions=[ACTION, ...], kudos=KUDOS),
            #         ACTION = dict(author=AUTHOR, date=DATE, position=POSITION, this_url=URL, now_url=URL)
            # }
        }
        # Find the ending timestamp for the period of time that comes
        # just before the current "partial" period of time.
        max_timestamp = end_timestamp - (end_timestamp % (minutes_per_item * 60))
        min_timestamp = max_timestamp - (max_hours * 60 * 60)
        # Starting from the request's commit, iterate backwards.
        for c in repo.iter_commits(commit):
            if c.authored_date < min_timestamp:
                break
            snippets_affected = set()
            for filename in c.stats.files:
                ms = _ms_from_snippet_filename(filename)
                if ms is not None:
                    snippets_affected.add(ms)
            if snippets_affected:
                earliest_ms = min(snippets_affected)
                date = c.authored_date
                timegroup = date - (date % (minutes_per_item * 60))
                timegroup_authors = timegroup_author_actions.setdefault(timegroup, {})
                author = c.author
                author_actions = timegroup_authors.setdefault(author.name, dict(actions=[]))['actions']
                anchor = _anchor_from_ms(earliest_ms)
                position = _label_from_ms(earliest_ms)
                kwargs = dict(
                    host=request.host,
                    rev=c.hexsha,
                    anchor=anchor,
                )
                now_url = 'http://{host}/?src=rk#{anchor}'.format(**kwargs)
                this_url = 'http://{host}/?rev={rev}&src=rk#{anchor}'.format(**kwargs)
                action = dict(
                    author=author,
                    author_name=author.name,
                    date=date,
                    position=position,
                    this_url=this_url,
                    now_url=now_url,
                )
                author_actions.append(action)
        # Now create the kudos for each author.
        for timegroup, authors in timegroup_author_actions.iteritems():
            for author_name, author_info in authors.iteritems():
                # Deterministically choose a pseudo-random kudos based on what we
                # know about this author's contributions.
                actions = author_info['actions']
                latest_action = actions[0]
                random.seed(latest_action['date'])
                kudos_line = random.choice(kudos_lines).strip()
                kudos_template = Template(text=kudos_line)
                # Render it.
                kudos = kudos_template.render(
                    author_name=author_name,
                    contributions=len(actions),
                    latest_action=latest_action,
                    transcription_info=transcription_info,
                    request=request,
                )
                # Keep it with the author info.
                author_info['kudos'] = kudos
                author_info['latest_action'] = latest_action
        pub_date = commit.authored_date
        data = dict(
            _standard_response(repo, commit),
            timegroup_author_actions=timegroup_author_actions,
            pub_date=pub_date,
            request=request,
            rfc822_from_time=rfc822_from_time,
            end_timestamp=end_timestamp,
            minutes_per_item=minutes_per_item,
        )
        content = render('fanscribed:templates/rss_kudos.xml.mako', data, request=request)
        cache.cache_content(cache_key, content, mtime)
    return Response(content, content_type='application/rss+xml', date=mtime)