def _standard_response(repo, commit): tree = commit.tree transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) return dict( _progress_dicts(tree, transcription_info), latest_revision=repos.latest_revision(repo), custom_css_revision=repos.most_recent_revision(repo, 'custom.css'), custom_js_revision=repos.most_recent_revision(repo, 'custom.js'), speakers=repos.file_at_commit(repo, 'speakers.txt', commit)[0], tracking_html=repos.file_at_commit(repo, 'tracking.html', commit)[0], transcription_info=transcription_info, transcription_info_json=json.dumps(transcription_info), )
def read(request): repo, commit = repos.repo_from_request(request) # Return cached if found. cache_key = 'view-{0}'.format(commit.hexsha) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): mtime = commit.authored_date tree = commit.tree transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) raw_snippets = {} for obj in tree: if isinstance(obj, git.Blob): name, ext = os.path.splitext(obj.name) if ext == '.txt': try: starting_point = int(name) except ValueError: pass else: raw_snippets[starting_point] = obj.data_stream.read().decode('utf8') # Go through all snippets, whether they've been transcribed or not. snippets = [] speakers_map = repos.speakers_map(repo, commit) for starting_point in range(0, transcription_info['duration'], _snippet_ms()): text = raw_snippets.get(starting_point, '').strip() lines = _split_lines_and_expand_abbreviations(text, speakers_map) snippets.append((starting_point, lines)) data = dict( _standard_response(repo, commit), snippets=sorted(snippets), preamble_incomplete=repos.file_at_commit( repo, 'preamble_incomplete.html', commit, )[0], preamble_completed=repos.file_at_commit( repo, 'preamble_completed.html', commit, )[0], ) content = render('fanscribed:templates/view.mako', data, request=request) cache.cache_content(cache_key, content, mtime) return Response(content, date=mtime)
def post_speakers_txt(request): text = request.POST.getone('text') identity_name = request.POST.getone('identity_name') identity_email = request.POST.getone('identity_email') # Save transcription info. repo, commit = repos.repo_from_request(request, rev='master') with repos.commit_lock: repo.heads['master'].checkout() index = repo.index filename = os.path.join(repo.working_dir, 'speakers.txt') with open(filename, 'wb') as f: f.write(text.encode('utf8')) index.add(['speakers.txt']) os.environ['GIT_AUTHOR_NAME'] = identity_name os.environ['GIT_AUTHOR_EMAIL'] = identity_email index.commit('speakers: save') # Reload from repo and serve it up. commit = repo.commit('master') # Refresh commit to match latest master. text, mtime = repos.file_at_commit(repo, 'speakers.txt', commit) return Response(text, content_type='text/plain', date=mtime)
def rss_kudos(request, max_hours=24, default_minutes=60): repo, commit = repos.repo_from_request(request) # Get grouping parameters. end_timestamp = int(request.GET.get('end', time.time())) minutes_per_item = int(request.GET.get('minutes', default_minutes)) # Return cached if found. cache_key = 'rss_kudos commit={0} minutes={1} max_hours={2} start={3}'.format( commit.hexsha, minutes_per_item, max_hours, end_timestamp, ) content, mtime = cache.get_cached_content(cache_key) if content is None or request.GET.has_key('nocache'): # Get the list of kudos to give, or use the default. kudos_txt, mtime = repos.file_at_commit(repo, 'kudos.txt', commit) kudos_txt = kudos_txt or DEFAULT_KUDOS kudos_lines = kudos_txt.strip().splitlines() # Kudos templates might want transcription info. transcription_info, _ = repos.json_file_at_commit( repo, 'transcription.json', commit, required=True) # Process the range of time needed for this RSS feed. mtime = commit.authored_date tree = commit.tree timegroup_author_actions = { # timegroup: { # AUTHOR_NAME: dict(actions=[ACTION, ...], kudos=KUDOS), # ACTION = dict(author=AUTHOR, date=DATE, position=POSITION, this_url=URL, now_url=URL) # } } # Find the ending timestamp for the period of time that comes # just before the current "partial" period of time. max_timestamp = end_timestamp - (end_timestamp % (minutes_per_item * 60)) min_timestamp = max_timestamp - (max_hours * 60 * 60) # Starting from the request's commit, iterate backwards. for c in repo.iter_commits(commit): if c.authored_date < min_timestamp: break snippets_affected = set() for filename in c.stats.files: ms = _ms_from_snippet_filename(filename) if ms is not None: snippets_affected.add(ms) if snippets_affected: earliest_ms = min(snippets_affected) date = c.authored_date timegroup = date - (date % (minutes_per_item * 60)) timegroup_authors = timegroup_author_actions.setdefault(timegroup, {}) author = c.author author_actions = timegroup_authors.setdefault(author.name, dict(actions=[]))['actions'] anchor = _anchor_from_ms(earliest_ms) position = _label_from_ms(earliest_ms) kwargs = dict( host=request.host, rev=c.hexsha, anchor=anchor, ) now_url = 'http://{host}/?src=rk#{anchor}'.format(**kwargs) this_url = 'http://{host}/?rev={rev}&src=rk#{anchor}'.format(**kwargs) action = dict( author=author, author_name=author.name, date=date, position=position, this_url=this_url, now_url=now_url, ) author_actions.append(action) # Now create the kudos for each author. for timegroup, authors in timegroup_author_actions.iteritems(): for author_name, author_info in authors.iteritems(): # Deterministically choose a pseudo-random kudos based on what we # know about this author's contributions. actions = author_info['actions'] latest_action = actions[0] random.seed(latest_action['date']) kudos_line = random.choice(kudos_lines).strip() kudos_template = Template(text=kudos_line) # Render it. kudos = kudos_template.render( author_name=author_name, contributions=len(actions), latest_action=latest_action, transcription_info=transcription_info, request=request, ) # Keep it with the author info. author_info['kudos'] = kudos author_info['latest_action'] = latest_action pub_date = commit.authored_date data = dict( _standard_response(repo, commit), timegroup_author_actions=timegroup_author_actions, pub_date=pub_date, request=request, rfc822_from_time=rfc822_from_time, end_timestamp=end_timestamp, minutes_per_item=minutes_per_item, ) content = render('fanscribed:templates/rss_kudos.xml.mako', data, request=request) cache.cache_content(cache_key, content, mtime) return Response(content, content_type='application/rss+xml', date=mtime)
def speakers_txt(request): # No rendering or processing, no need to cache. repo, commit = repos.repo_from_request(request) text, mtime = repos.file_at_commit(repo, 'speakers.txt', commit) return Response(text, content_type='text/plain', date=mtime)