def snippet_mp3(request): repo, commit = repos.repo_from_request(request) # Get information needed from settings and repository. settings = app_settings() full_mp3 = os.path.join( settings['fanscribed.audio'], '{0}.mp3'.format(request.host), ) transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) duration = transcription_info['duration'] snippet_cache = settings['fanscribed.snippet_cache'] snippet_url_prefix = settings['fanscribed.snippet_url_prefix'] # Get information needed from GET params. starting_point = int(request.GET.getone('starting_point')) length = int(request.GET.getone('length')) padding = int(request.GET.getone('padding')) snippet_path = mp3.snippet_path( full_mp3=full_mp3, duration=duration, output_path=snippet_cache, starting_point=starting_point, length=length, padding=padding, ) relative_path = os.path.relpath(snippet_path, snippet_cache) snippet_url = urlparse.urljoin(snippet_url_prefix, relative_path) raise HTTPFound(location=snippet_url)
def rss_completion(request, percentage_gap=5): repo, commit = repos.repo_from_request(request) # Return cached if found. cache_key = 'rss_completion commit={0} percentage_gap={1}'.format(commit.hexsha, percentage_gap) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): mtime = commit.authored_date tree = commit.tree percent_reviewed_to_report = set(xrange(percentage_gap, 100, percentage_gap)) percent_reviewed_to_report.add(100) percent_transcribed_to_report = set(xrange(percentage_gap, 100, percentage_gap)) percent_transcribed_to_report.add(100) transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) duration = transcription_info['duration'] snippet_ms = _snippet_ms() snippets_total = duration / snippet_ms if duration % snippet_ms: snippets_total += 1 reviews_total = snippets_total - 1 completions = [] now_url = 'http://{host}/?src=rc'.format(**dict( host=request.host, )) for c in reversed(list(repo.iter_commits(commit, ['remaining_reviews.json', 'remaining_snippets.json']))): this_url = 'http://{host}/?rev={rev}&src=rc'.format(**dict( host=request.host, rev=c.hexsha, )) snippets_remaining = len(repos.get_remaining_snippets(c.tree)) snippets_completed = snippets_total - snippets_remaining snippets_percent = snippets_completed * 100 / snippets_total reviews_remaining = len(repos.get_remaining_reviews(c.tree)) reviews_completed = reviews_total - reviews_remaining reviews_percent = reviews_completed * 100 / reviews_total if snippets_percent in percent_transcribed_to_report: completions.append(( snippets_percent, None, this_url, c.authored_date, )) percent_transcribed_to_report.remove(snippets_percent) elif reviews_percent in percent_reviewed_to_report: completions.append(( None, reviews_percent, this_url, c.authored_date, )) percent_reviewed_to_report.remove(reviews_percent) # Report completion in chrono order. completions.reverse() pub_date = commit.authored_date data = dict( _standard_response(repo, commit), completions=completions, now_url=now_url, pub_date=pub_date, request=request, rfc822_from_time=rfc822_from_time, ) content = render('fanscribed:templates/rss_completion.xml.mako', data, request=request) cache.cache_content(cache_key, content, mtime) return Response(content, content_type='application/rss+xml', date=mtime)
def transcription_json(request): # No rendering or processing, no need to cache. repo, commit = repos.repo_from_request(request) info, mtime = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) # Inject additional information into the info dict. settings = app_settings() info['snippet_ms'] = int(settings['fanscribed.snippet_seconds']) * 1000 info['snippet_padding_ms'] = int(float(settings['fanscribed.snippet_padding_seconds']) * 1000) return Response(body=json.dumps(info), content_type='application/json')
def progress(request): repo, commit = repos.repo_from_request(request) # Return cached if found. cache_key = 'progress-{0}'.format(commit.hexsha) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): tree = commit.tree info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) content = json.dumps(_progress_dicts(tree, info)) mtime = commit.authored_date cache.cache_content(cache_key, content, mtime=mtime) return Response(body=content, content_type='application/json', date=mtime)
def _standard_response(repo, commit): tree = commit.tree transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) return dict( _progress_dicts(tree, transcription_info), latest_revision=repos.latest_revision(repo), custom_css_revision=repos.most_recent_revision(repo, 'custom.css'), custom_js_revision=repos.most_recent_revision(repo, 'custom.js'), speakers=repos.file_at_commit(repo, 'speakers.txt', commit)[0], tracking_html=repos.file_at_commit(repo, 'tracking.html', commit)[0], transcription_info=transcription_info, transcription_info_json=json.dumps(transcription_info), )
def read(request): repo, commit = repos.repo_from_request(request) # Return cached if found. cache_key = 'view-{0}'.format(commit.hexsha) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): mtime = commit.authored_date tree = commit.tree transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) raw_snippets = {} for obj in tree: if isinstance(obj, git.Blob): name, ext = os.path.splitext(obj.name) if ext == '.txt': try: starting_point = int(name) except ValueError: pass else: raw_snippets[starting_point] = obj.data_stream.read().decode('utf8') # Go through all snippets, whether they've been transcribed or not. snippets = [] speakers_map = repos.speakers_map(repo, commit) for starting_point in range(0, transcription_info['duration'], _snippet_ms()): text = raw_snippets.get(starting_point, '').strip() lines = _split_lines_and_expand_abbreviations(text, speakers_map) snippets.append((starting_point, lines)) data = dict( _standard_response(repo, commit), snippets=sorted(snippets), preamble_incomplete=repos.file_at_commit( repo, 'preamble_incomplete.html', commit, )[0], preamble_completed=repos.file_at_commit( repo, 'preamble_completed.html', commit, )[0], ) content = render('fanscribed:templates/view.mako', data, request=request) cache.cache_content(cache_key, content, mtime) return Response(content, date=mtime)
def rss_kudos(request, max_hours=24, default_minutes=60): repo, commit = repos.repo_from_request(request) # Get grouping parameters. end_timestamp = int(request.GET.get('end', time.time())) minutes_per_item = int(request.GET.get('minutes', default_minutes)) # Return cached if found. cache_key = 'rss_kudos commit={0} minutes={1} max_hours={2} start={3}'.format( commit.hexsha, minutes_per_item, max_hours, end_timestamp, ) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): # Get the list of kudos to give, or use the default. kudos_txt, mtime = repos.file_at_commit(repo, 'kudos.txt', commit) kudos_txt = kudos_txt or DEFAULT_KUDOS kudos_lines = kudos_txt.strip().splitlines() # Kudos templates might want transcription info. transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) # Process the range of time needed for this RSS feed. mtime = commit.authored_date tree = commit.tree timegroup_author_actions = { # timegroup: { # AUTHOR_NAME: dict(actions=[ACTION, ...], kudos=KUDOS), # ACTION = dict(author=AUTHOR, date=DATE, position=POSITION, this_url=URL, now_url=URL) # } } # Find the ending timestamp for the period of time that comes # just before the current "partial" period of time. max_timestamp = end_timestamp - (end_timestamp % (minutes_per_item * 60)) min_timestamp = max_timestamp - (max_hours * 60 * 60) # Starting from the request's commit, iterate backwards. for c in repo.iter_commits(commit): if c.authored_date < min_timestamp: break snippets_affected = set() for filename in c.stats.files: ms = _ms_from_snippet_filename(filename) if ms is not None: snippets_affected.add(ms) if snippets_affected: earliest_ms = min(snippets_affected) date = c.authored_date timegroup = date - (date % (minutes_per_item * 60)) timegroup_authors = timegroup_author_actions.setdefault(timegroup, {}) author = c.author author_actions = timegroup_authors.setdefault(author.name, dict(actions=[]))['actions'] anchor = _anchor_from_ms(earliest_ms) position = _label_from_ms(earliest_ms) kwargs = dict( host=request.host, rev=c.hexsha, anchor=anchor, ) now_url = 'http://{host}/?src=rk#{anchor}'.format(**kwargs) this_url = 'http://{host}/?rev={rev}&src=rk#{anchor}'.format(**kwargs) action = dict( author=author, author_name=author.name, date=date, position=position, this_url=this_url, now_url=now_url, ) author_actions.append(action) # Now create the kudos for each author. for timegroup, authors in timegroup_author_actions.iteritems(): for author_name, author_info in authors.iteritems(): # Deterministically choose a pseudo-random kudos based on what we # know about this author's contributions. actions = author_info['actions'] latest_action = actions[0] random.seed(latest_action['date']) kudos_line = random.choice(kudos_lines).strip() kudos_template = Template(text=kudos_line) # Render it. kudos = kudos_template.render( author_name=author_name, contributions=len(actions), latest_action=latest_action, transcription_info=transcription_info, request=request, ) # Keep it with the author info. author_info['kudos'] = kudos author_info['latest_action'] = latest_action pub_date = commit.authored_date data = dict( _standard_response(repo, commit), timegroup_author_actions=timegroup_author_actions, pub_date=pub_date, request=request, rfc822_from_time=rfc822_from_time, end_timestamp=end_timestamp, minutes_per_item=minutes_per_item, ) content = render('fanscribed:templates/rss_kudos.xml.mako', data, request=request) cache.cache_content(cache_key, content, mtime) return Response(content, content_type='application/rss+xml', date=mtime)