def process_request(self, req): req.hdf['trac.href.blog'] = req.href.blog() entries = [] for page_name in WikiSystem(self.env).get_pages(prefix='Blog'): page = WikiPage(self.env, page_name) title = page_name text = page.text match = title_split_match(page.text) if match: title = match.group(1) text = match.group(2) comments = text.count('[[SimpleBlogComment(') cutoff = text.find('[[SimpleBlogComment(') if cutoff >= 0: text = text[:cutoff].rstrip() description = wiki_to_html(text, self.env, req) original = self._get_original_post_info(page_name) event = { 'href': self.env.href.wiki(page_name), 'title': title, 'description': description, 'escaped': Markup.escape(unicode(description)), 'date': format_datetime(original['time']), 'rfcdate': http_date(original['time']), 'author': original['author'], 'comment': original['comment'], 'comments': comments, } if page.version > 1: event['updated.version'] = page.version event['updated.date'] = format_datetime(page.time) event['updated.rfcdate'] = http_date(page.time) event['updated.author'] = page.author event['updated.comment'] = page.comment entries.append((original['time'], event)) entries.sort() entries.reverse() max_count = 20 if len(entries) > max_count: entries = entries[:max_count] events = [] for date, event in entries: events.append(event) req.hdf['blog.events'] = events format = req.args.get('format') if format == 'rss': return 'blog_rss.cs', 'application/rss+xml' add_link(req, 'alternate', self.env.href.blog(format='rss'), 'RSS Feed', 'application/rss+xml', 'rss') return 'blog.cs', None
def send_file(self, path, mimetype=None): stat = os.stat(path) last_modified = http_date(stat.st_mtime) if last_modified == self.req.headers_in.get('If-Modified-Since'): self.send_response(304) raise RequestDone self.req.status = 200 if not mimetype: mimetype = mimetypes.guess_type(path)[0] if mimetype: self.req.content_type = mimetype self.req.set_content_length(stat.st_size) self.req.headers_out.add('Last-Modified', http_date(stat.st_mtime)) self.req.sendfile(path) raise RequestDone
def populate_hdf(hdf, env, req=None): """Populate the HDF data set with various information, such as common URLs, project information and request-related information. """ from trac import __version__ hdf['trac'] = { 'version': __version__, 'time': format_datetime(), 'time.gmt': http_date() } hdf['trac.href'] = { 'wiki': env.href.wiki(), 'browser': env.href.browser('/'), 'timeline': env.href.timeline(), 'roadmap': env.href.roadmap(), 'milestone': env.href.milestone(None), 'report': env.href.report(), 'query': env.href.query(), 'newticket': env.href.newticket(), 'search': env.href.search(), 'about': env.href.about(), 'about_config': env.href.about('config'), 'login': env.href.login(), 'logout': env.href.logout(), 'settings': env.href.settings(), 'homepage': 'http://trac.edgewall.com/' } hdf['project'] = { 'name': env.config.get('project', 'name'), 'name_encoded': escape(env.config.get('project', 'name')), 'descr': env.config.get('project', 'descr'), 'footer': env.config.get( 'project', 'footer', 'Visit the Trac open source project at<br />' '<a href="http://trac.edgewall.com/">' 'http://trac.edgewall.com/</a>'), 'url': env.config.get('project', 'url') } if req: hdf['base_url'] = req.base_url hdf['base_host'] = req.base_url[:req.base_url.rfind(req.cgi_location)] hdf['cgi_location'] = req.cgi_location hdf['trac.authname'] = escape(req.authname) for action in req.perm.permissions(): req.hdf['trac.acl.' + action] = True for arg in [k for k in req.args.keys() if k]: if isinstance(req.args[arg], (list, tuple)): hdf['args.%s' % arg] = [v.value for v in req.args[arg]] else: hdf['args.%s' % arg] = req.args[arg].value
def process_request(self, req): stylespec = (req.args.get('ss_mod'), req.args.get('ss_id'), req.args.get('ss_fil')) docspec = (req.args.get('doc_mod'), req.args.get('doc_id'), req.args.get('doc_fil')) if None in stylespec or None in docspec: self.env.log.error("Missing request parameters: %s", req.args) raise TracError('Bad request') style_obj = _get_src(self.env, req.hdf, *stylespec) doc_obj = _get_src(self.env, req.hdf, *docspec) params = dict(_get_opts(req.args, 'xp_')) lastmod = max(style_obj.get_last_modified(), doc_obj.get_last_modified()) req.check_modified(lastmod) if not req.get_header('If-None-Match'): if http_date(lastmod) == req.get_header('If-Modified-Since'): req.send_response(304) req.end_headers() raise RequestDone if hasattr(req, '_headers'): # 0.9 compatibility req._headers.append(('Last-Modified', http_date(lastmod))) else: req.send_header('Last-Modified', http_date(lastmod)) page, content_type = _transform(style_obj, doc_obj, params, self.env, req.hdf) req.send_response(200) req.send_header('Content-Type', content_type + ';charset=utf-8') req.send_header('Content-Length', len(page)) if hasattr(req, '_headers'): # 0.9 compatibility for name, value in req._headers: req.send_header(name, value) req._send_cookie_headers() req.end_headers() if req.method != 'HEAD': req.write(page) raise RequestDone
def display_rss(self, req, query): query.verbose = True db = self.env.get_db_cnx() results = query.execute(db) for result in results: result['href'] = self.env.abs_href.ticket(result['id']) if result['reporter'].find('@') == -1: result['reporter'] = '' if result['description']: result['description'] = escape(wiki_to_html(result['description'] or '', self.env, req, db, absurls=1)) if result['time']: result['time'] = http_date(result['time']) req.hdf['query.results'] = results
def send_file(self, path, mimetype=None): """Send a local file to the browser. This method includes the "Last-Modified", "Content-Type" and "Content-Length" headers in the response, corresponding to the file attributes. It also checks the last modification time of the local file against the "If-Modified-Since" provided by the user agent, and sends a "304 Not Modified" response if it matches. """ if not os.path.isfile(path): raise TracError, "File %s not found" % path stat = os.stat(path) last_modified = http_date(stat.st_mtime) if last_modified == self.get_header('If-Modified-Since'): self.send_response(304) self.end_headers() raise RequestDone self.send_response(200) if not mimetype: mimetype = mimetypes.guess_type(path)[0] self.send_header('Content-Type', mimetype) self.send_header('Content-Length', stat.st_size) self.send_header('Last-Modified', last_modified) self._send_cookie_headers() self.end_headers() if self.method != 'HEAD': try: fd = open(path, 'rb') while True: data = fd.read(4096) if not data: break self.write(data) finally: fd.close() raise RequestDone
def _generate_blog(self, req, *args, **kwargs): """Extract the blog pages and fill the HDF. *args is a list of tags to use to limit the blog scope **kwargs are any aditional keyword arguments that are needed """ tallies = {} tags = TagEngine(self.env).tagspace.wiki try: union = kwargs['union'] except KeyError: union = False # Formatting read_post = "[wiki:%s Read Post]" entries = {} if not len(args): tlist = [self.env.config.get('blog', 'default_tag', 'blog')] else: tlist = args if union: blog = tags.get_tagged_names(tlist, operation='union') else: blog = tags.get_tagged_names(tlist, operation='intersection') macropage = req.args.get('page', None) poststart, postend, default_times = self._get_time_range(req, **kwargs) mark_updated = self._choose_value('mark_updated', req, kwargs, convert=bool_val) if not mark_updated and (not isinstance(mark_updated, bool)): mark_updated = bool_val(self.env.config.get('blog', 'mark_updated', True)) macro_bl = self.env.config.get('blog', 'macro_blacklist', '').split(',') macro_bl = [name.strip() for name in macro_bl if name.strip()] macro_bl.append('BlogShow') # Get the email addresses of all known users and validate the "poster" # BlogShow optional argument at the same time (avoids looping the user # list twice). is_poster = None limit_poster = self._choose_value('poster', req, kwargs, convert=None) email_map = {} for username, name, email in self.env.get_known_users(): if email: email_map[username] = email if limit_poster != None: if username == limit_poster: is_poster = username num_posts = self._choose_value('num_posts', req, kwargs, convert=int) if num_posts and default_times: poststart = sys.maxint postend = 0 for blog_entry in blog: if blog_entry == macropage: continue try: page = WikiPage(self.env, version=1, name=blog_entry) version, post_time, author, comment, ipnr = page.get_history( ).next() # if we're limiting by poster, do so now so that the calendar # only shows the number of entries the specific poster made. if is_poster != None: if is_poster != author: continue self._add_to_tallies(tallies, post_time, blog_entry) page = WikiPage(self.env, name=blog_entry) version, modified, author, comment, ipnr = page.get_history( ).next() except: self.log.debug("Error loading wiki page %s" % blog_entry, exc_info=True) continue if poststart >= post_time >= postend: time_format = self.env.config.get('blog', 'date_format') \ or '%x %X' timeStr = format_datetime(post_time, format=time_format) fulltext = page.text # remove comments in blog view: del_comments = re.compile('==== Comment.*\Z', re.DOTALL) fulltext = del_comments.sub('', fulltext) # remove the [[AddComment...]] tag, otherwise it would appeare # more than one and crew up the blog view: del_addcomment = re.compile('\[\[AddComment.*\Z', re.DOTALL) fulltext = del_addcomment.sub('', fulltext) # limit length of preview: post_size = self._choose_value('post_size', req, kwargs, int) if not post_size and (not isinstance(post_size, int)): post_size = int(self.env.config.get('blog', 'post_size', 1024)) text = self._trim_page(fulltext, blog_entry, post_size) pagetags = [x for x in tags.get_name_tags(blog_entry) if x not in tlist] tagtags = [] for i, t in enumerate(pagetags[:3]): d = { 'link' : t, 'name' : t, 'last' : i == (len(pagetags[:3]) - 1), } tagtags.append(d) continue # extract title from text: match = _title_split_match(fulltext) if match: title = match.group(1) fulltext = match.group(2) else: title = blog_entry html_text = wiki_to_html(fulltext, self.env, req) rss_text = Markup.escape(to_unicode(html_text)) data = { 'name' : blog_entry, 'title' : title, 'href' : self.env.href.wiki(blog_entry), 'wiki_link' : wiki_to_oneliner(read_post % blog_entry, self.env), 'time' : timeStr, 'date' : http_date(post_time), # 'date' : http_date(page.time), 'author' : author, 'wiki_text' : wiki_to_nofloat_html(text, self.env, req, macro_blacklist=macro_bl), 'rss_text' : rss_text, 'comment' : wiki_to_oneliner(comment, self.env), 'tags' : { 'present' : len(pagetags), 'tags' : tagtags, 'more' : len(pagetags) > 3 or 0, }, } if author: # For RSS, author must be an email address if author.find('@') != -1: data['author.email'] = author elif email_map.has_key(author): data['author.email'] = email_map[author] if (modified != post_time) and mark_updated: data['modified'] = 1 mod_str = format_datetime(modified, format=time_format) data['mod_time'] = mod_str entries[post_time] = data continue tlist = entries.keys() tlist.sort() tlist.reverse() if num_posts and (num_posts <= len(tlist)): tlist = tlist[:num_posts] if tlist: entries[tlist[-1]]['last'] = 1 req.hdf['blog.entries'] = [entries[x] for x in tlist] bloglink = self.env.config.get('blog', 'new_blog_link', 'New Blog Post') req.hdf['blog.newblog'] = bloglink hidecal = self._choose_value('hidecal', req, kwargs) if not hidecal: self._generate_calendar(req, tallies) req.hdf['blog.hidecal'] = hidecal # Insert /wiki/BlogHeader into /blog. If the page does not exist, # this'll be a no-op blog_header = WikiPage(self.env, name='BlogHeader').text req.hdf['blog.header'] = Mimeview(self.env).render(req, 'text/x-trac-wiki', blog_header)
def process_request(self, req): req.perm.assert_permission('TIMELINE_VIEW') format = req.args.get('format') maxrows = int(req.args.get('max', 0)) # Parse the from date and adjust the timestamp to the last second of # the day t = time.localtime() if req.args.has_key('from'): try: t = time.strptime(req.args.get('from'), '%x') except: pass fromdate = time.mktime( (t[0], t[1], t[2], 23, 59, 59, t[6], t[7], t[8])) try: daysback = max(0, int(req.args.get('daysback', ''))) except ValueError: daysback = int(self.config.get('timeline', 'default_daysback')) req.hdf['timeline.from'] = format_date(fromdate) req.hdf['timeline.daysback'] = daysback available_filters = [] for event_provider in self.event_providers: available_filters += event_provider.get_timeline_filters(req) filters = [] # check the request or session for enabled filters, or use default for test in (lambda f: req.args.has_key(f[0]), lambda f: req.session.get('timeline.filter.%s' % f[0], '')\ == '1', lambda f: len(f) == 2 or f[2]): if filters: break filters = [f[0] for f in available_filters if test(f)] # save the results of submitting the timeline form to the session if req.args.has_key('update'): for filter in available_filters: key = 'timeline.filter.%s' % filter[0] if req.args.has_key(filter[0]): req.session[key] = '1' elif req.session.has_key(key): del req.session[key] stop = fromdate start = stop - (daysback + 1) * 86400 events = [] for event_provider in self.event_providers: events += event_provider.get_timeline_events( req, start, stop, filters) events.sort(lambda x, y: cmp(y[3], x[3])) if maxrows and len(events) > maxrows: del events[maxrows:] req.hdf['title'] = 'Timeline' # Get the email addresses of all known users email_map = {} for username, name, email in self.env.get_known_users(): if email: email_map[username] = email idx = 0 for kind, href, title, date, author, message in events: event = { 'kind': kind, 'title': title, 'href': escape(href), 'author': escape(author or 'anonymous'), 'date': format_date(date), 'time': format_time(date, '%H:%M'), 'message': message } if format == 'rss': # Strip/escape HTML markup event['title'] = re.sub(r'</?\w+(?: .*?)?>', '', title) event['message'] = escape(message) if author: # For RSS, author must be an email address if author.find('@') != -1: event['author.email'] = escape(author) elif email_map.has_key(author): event['author.email'] = escape(email_map[author]) event['date'] = http_date(date) req.hdf['timeline.events.%s' % idx] = event idx += 1 if format == 'rss': return 'timeline_rss.cs', 'application/rss+xml' add_stylesheet(req, 'common/css/timeline.css') rss_href = self.env.href.timeline([(f, 'on') for f in filters], daysback=90, max=50, format='rss') add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml', 'rss') for idx, fltr in enum(available_filters): req.hdf['timeline.filters.%d' % idx] = { 'name': fltr[0], 'label': fltr[1], 'enabled': int(fltr[0] in filters) } return 'timeline.cs', None
id_cols = [idx for idx, col in util.enum(cols) if col[0] in ('ticket', 'id')] if id_cols: id_val = row[id_cols[0]] value['ticket_href'] = self.env.href.autotrac("ticket/" + str(id_val)) elif column == 'description': value['parsed'] = wiki_to_html(cell, self.env, req, db) elif column == 'reporter' and cell.find('@') != -1: value['rss'] = cell elif column == 'report': value['report_href'] = self.env.href.report(cell) elif column in ['time', 'date','changetime', 'created', 'modified']: value['date'] = util.format_date(cell) value['time'] = util.format_time(cell) value['datetime'] = util.format_datetime(cell) value['gmt'] = util.http_date(cell) prefix = 'report.items.%d.%s' % (row_idx, str(column)) req.hdf[prefix] = str(cell) for key in value.keys(): req.hdf[prefix + '.' + key] = value[key] col_idx += 1 row_idx += 1 req.hdf['report.numrows'] = row_idx format = req.args.get('format') if format == 'rss': self._render_rss(req) return 'report_rss.cs', 'application/rss+xml' elif format == 'csv': self._render_csv(req, cols, rows)
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview mimeview = Mimeview(self.env) content = node.get_content().read(mimeview.max_preview_size) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') req.hdf['file'] = mimeview.preview_to_hdf( req, content, len(content), mime_type, node.created_path, raw_href, annotations=['addFileNums']) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def _generate_blog(self, req, *args, **kwargs): """Extract the blog pages and fill the HDF. *args is a list of tags to use to limit the blog scope **kwargs are any aditional keyword arguments that are needed """ tallies = {} tags = TagEngine(self.env).tagspace.wiki try: union = kwargs['union'] except KeyError: union = False # Formatting read_post = "[wiki:%s Read Post]" entries = {} if not len(args): tlist = [self.env.config.get('blog', 'default_tag', 'blog')] else: tlist = args if union: blog = tags.get_tagged_names(tlist, operation='union') else: blog = tags.get_tagged_names(tlist, operation='intersection') macropage = req.args.get('page', None) poststart, postend, default_times = self._get_time_range(req, **kwargs) mark_updated = self._choose_value('mark_updated', req, kwargs, convert=bool_val) if not mark_updated and (not isinstance(mark_updated, bool)): mark_updated = bool_val( self.env.config.get('blog', 'mark_updated', True)) macro_bl = self.env.config.get('blog', 'macro_blacklist', '').split(',') macro_bl = [name.strip() for name in macro_bl if name.strip()] macro_bl.append('BlogShow') # Get the email addresses of all known users and validate the "poster" # BlogShow optional argument at the same time (avoids looping the user # list twice). is_poster = None limit_poster = self._choose_value('poster', req, kwargs, convert=None) email_map = {} for username, name, email in self.env.get_known_users(): if email: email_map[username] = email if limit_poster != None: if username == limit_poster: is_poster = username num_posts = self._choose_value('num_posts', req, kwargs, convert=int) if num_posts and default_times: poststart = sys.maxint postend = 0 for blog_entry in blog: if blog_entry == macropage: continue try: page = WikiPage(self.env, version=1, name=blog_entry) version, post_time, author, comment, ipnr = page.get_history( ).next() # if we're limiting by poster, do so now so that the calendar # only shows the number of entries the specific poster made. if is_poster != None: if is_poster != author: continue self._add_to_tallies(tallies, post_time, blog_entry) page = WikiPage(self.env, name=blog_entry) version, modified, author, comment, ipnr = page.get_history( ).next() except: self.log.debug("Error loading wiki page %s" % blog_entry, exc_info=True) continue if poststart >= post_time >= postend: time_format = self.env.config.get('blog', 'date_format') \ or '%x %X' timeStr = format_datetime(post_time, format=time_format) fulltext = page.text # remove comments in blog view: del_comments = re.compile('==== Comment.*\Z', re.DOTALL) fulltext = del_comments.sub('', fulltext) # remove the [[AddComment...]] tag, otherwise it would appeare # more than one and crew up the blog view: del_addcomment = re.compile('\[\[AddComment.*\Z', re.DOTALL) fulltext = del_addcomment.sub('', fulltext) # limit length of preview: post_size = self._choose_value('post_size', req, kwargs, int) if not post_size and (not isinstance(post_size, int)): post_size = int( self.env.config.get('blog', 'post_size', 1024)) text = self._trim_page(fulltext, blog_entry, post_size) pagetags = [ x for x in tags.get_name_tags(blog_entry) if x not in tlist ] tagtags = [] for i, t in enumerate(pagetags[:3]): d = { 'link': t, 'name': t, 'last': i == (len(pagetags[:3]) - 1), } tagtags.append(d) continue # extract title from text: match = _title_split_match(fulltext) if match: title = match.group(1) fulltext = match.group(2) else: title = blog_entry html_text = wiki_to_html(fulltext, self.env, req) rss_text = Markup.escape(to_unicode(html_text)) data = { 'name': blog_entry, 'title': title, 'href': self.env.href.wiki(blog_entry), 'wiki_link': wiki_to_oneliner(read_post % blog_entry, self.env), 'time': timeStr, 'date': http_date(post_time), # 'date' : http_date(page.time), 'author': author, 'wiki_text': wiki_to_nofloat_html(text, self.env, req, macro_blacklist=macro_bl), 'rss_text': rss_text, 'comment': wiki_to_oneliner(comment, self.env), 'tags': { 'present': len(pagetags), 'tags': tagtags, 'more': len(pagetags) > 3 or 0, }, } if author: # For RSS, author must be an email address if author.find('@') != -1: data['author.email'] = author elif email_map.has_key(author): data['author.email'] = email_map[author] if (modified != post_time) and mark_updated: data['modified'] = 1 mod_str = format_datetime(modified, format=time_format) data['mod_time'] = mod_str entries[post_time] = data continue tlist = entries.keys() tlist.sort() tlist.reverse() if num_posts and (num_posts <= len(tlist)): tlist = tlist[:num_posts] if tlist: entries[tlist[-1]]['last'] = 1 req.hdf['blog.entries'] = [entries[x] for x in tlist] bloglink = self.env.config.get('blog', 'new_blog_link', 'New Blog Post') req.hdf['blog.newblog'] = bloglink hidecal = self._choose_value('hidecal', req, kwargs) if not hidecal: self._generate_calendar(req, tallies) req.hdf['blog.hidecal'] = hidecal # Insert /wiki/BlogHeader into /blog. If the page does not exist, # this'll be a no-op blog_header = WikiPage(self.env, name='BlogHeader').text req.hdf['blog.header'] = Mimeview(self.env).render( req, 'text/x-trac-wiki', blog_header)
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview mimeview = Mimeview(self.env) content = node.get_content().read(mimeview.max_preview_size()) if not is_binary(content): if mime_type != 'text/plain': plain_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') req.hdf['file'] = mimeview.preview_to_hdf(req, mime_type, charset, content, node.name, node.rev, annotations=['addFileNums']) raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') req.hdf['file.raw_href'] = util.escape(raw_href) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def process_request(self, req): req.perm.assert_permission('LOG_VIEW') mode = req.args.get('mode', 'stop_on_copy') path = req.args.get('path', '/') rev = req.args.get('rev') format = req.args.get('format') stop_rev = req.args.get('stop_rev') verbose = req.args.get('verbose') limit = LOG_LIMIT req.hdf['title'] = path + ' (log)' req.hdf['log'] = { 'mode': mode, 'path': path, 'rev': rev, 'verbose': verbose, 'stop_rev': stop_rev, 'browser_href': self.env.href.browser(path), 'log_href': self.env.href.log(path, rev=rev) } path_links = get_path_links(self.env.href, path, rev) req.hdf['log.path'] = path_links if path_links: add_link(req, 'up', path_links[-1]['href'], 'Parent directory') repos = self.env.get_repository(req.authname) normpath = repos.normalize_path(path) rev = str(repos.normalize_rev(rev)) # ''Node history'' uses `Node.history()`, # ''Path history'' uses `Repository.get_path_history()` if mode == 'path_history': def history(limit): for h in repos.get_path_history(path, rev, limit): yield h else: history = get_existing_node(self.env, repos, path, rev).get_history # -- retrieve history, asking for limit+1 results info = [] previous_path = repos.normalize_path(path) for old_path, old_rev, old_chg in history(limit + 1): if stop_rev and repos.rev_older_than(old_rev, stop_rev): break old_path = repos.normalize_path(old_path) item = { 'rev': str(old_rev), 'path': str(old_path), 'log_href': self.env.href.log(old_path, rev=old_rev), 'browser_href': self.env.href.browser(old_path, rev=old_rev), 'changeset_href': self.env.href.changeset(old_rev), 'change': old_chg } if not (mode == 'path_history' and old_chg == Changeset.EDIT): info.append(item) if old_path and old_path != previous_path \ and not (mode == 'path_history' and old_path == normpath): item['copyfrom_path'] = old_path if mode == 'stop_on_copy': break if len(info) > limit: # we want limit+1 entries break previous_path = old_path if info == []: # FIXME: we should send a 404 error here raise TracError( "The file or directory '%s' doesn't exist " "at revision %s or at any previous revision." % (path, rev), 'Nonexistent path') def make_log_href(path, **args): link_rev = rev if rev == str(repos.youngest_rev): link_rev = None params = {'rev': link_rev, 'mode': mode, 'limit': limit} params.update(args) if verbose: params['verbose'] = verbose return self.env.href.log(path, **params) if len(info ) == limit + 1: # limit+1 reached, there _might_ be some more next_rev = info[-1]['rev'] next_path = info[-1]['path'] add_link( req, 'next', make_log_href(next_path, rev=next_rev), 'Revision Log (restarting at %s, rev. %s)' % (next_path, next_rev)) # now, only show 'limit' results del info[-1] req.hdf['log.items'] = info changes = get_changes(self.env, repos, [i['rev'] for i in info], verbose, req, format) if format == 'rss': # Get the email addresses of all known users email_map = {} for username, name, email in self.env.get_known_users(): if email: email_map[username] = email for cs in changes.values(): cs['message'] = util.escape(cs['message']) cs['shortlog'] = util.escape(cs['shortlog'].replace('\n', ' ')) # For RSS, author must be an email address author = cs['author'] author_email = '' if '@' in author: author_email = author elif email_map.has_key(author): author_email = email_map[author] cs['author'] = author_email cs['date'] = util.http_date(cs['date_seconds']) elif format == 'changelog': for cs in changes.values(): cs['message'] = '\n'.join( ['\t' + m for m in cs['message'].split('\n')]) req.hdf['log.changes'] = changes if req.args.get('format') == 'changelog': return 'log_changelog.cs', 'text/plain' elif req.args.get('format') == 'rss': return 'log_rss.cs', 'application/rss+xml' add_stylesheet(req, 'common/css/browser.css') add_stylesheet(req, 'common/css/diff.css') rss_href = make_log_href(path, format='rss', stop_rev=stop_rev) add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml', 'rss') changelog_href = make_log_href(path, format='changelog', stop_rev=stop_rev) add_link(req, 'alternate', changelog_href, 'ChangeLog', 'text/plain') return 'log.cs', None
id_cols = [idx for idx, col in util.enum(cols) if col[0] in ('ticket', 'id')] if id_cols: id_val = row[id_cols[0]] value['ticket_href'] = self.env.href.ticket(id_val) elif column == 'description': value['parsed'] = wiki_to_html(cell, self.env, req, db) elif column == 'reporter' and cell.find('@') != -1: value['rss'] = util.escape(cell) elif column == 'report': value['report_href'] = self.env.href.report(cell) elif column in ['time', 'date','changetime', 'created', 'modified']: value['date'] = util.format_date(cell) value['time'] = util.format_time(cell) value['datetime'] = util.format_datetime(cell) value['gmt'] = util.http_date(cell) prefix = 'report.items.%d.%s' % (row_idx, str(column)) req.hdf[prefix] = util.escape(str(cell)) for key in value.keys(): req.hdf[prefix + '.' + key] = value[key] col_idx += 1 row_idx += 1 req.hdf['report.numrows'] = row_idx format = req.args.get('format') if format == 'rss': self._render_rss(req) return 'report_rss.cs', 'application/rss+xml' elif format == 'csv': self._render_csv(req, cols, rows)
def _render_file(self, req, repos, node, rev=None): req.perm.assert_permission('FILE_VIEW') changeset = repos.get_changeset(node.rev) req.hdf['file'] = { 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': time.strftime('%x %X', time.localtime(changeset.date)), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) } mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None format = req.args.get('format') if format in ['raw', 'txt']: req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() content = node.get_content() while 1: chunk = content.read(CHUNK_SIZE) if not chunk: raise RequestDone req.write(chunk) else: # Generate HTML preview max_preview_size = int( self.config.get('mimeviewer', 'max_preview_size', '262144')) content = node.get_content().read(max_preview_size) max_size_reached = len(content) == max_preview_size if not charset: charset = detect_unicode(content) or \ self.config.get('trac', 'default_charset') if not is_binary(content): content = util.to_utf8(content, charset) if mime_type != 'text/plain': plain_href = self.env.href.browser(node.path, rev=rev and node.rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') if max_size_reached: req.hdf['file.max_file_size_reached'] = 1 req.hdf['file.max_file_size'] = max_preview_size preview = ' ' else: preview = Mimeview(self.env).render(req, mime_type, content, node.name, node.rev, annotations=['lineno']) req.hdf['file.preview'] = preview raw_href = self.env.href.browser(node.path, rev=rev and node.rev, format='raw') req.hdf['file.raw_href'] = util.escape(raw_href) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) add_stylesheet(req, 'common/css/code.css')
def _render_file(self, req, context, repos, node, rev=None): req.perm(context.resource).require('FILE_VIEW') changeset = repos.get_changeset(node.rev) mime_type = node.content_type if not mime_type or mime_type == 'application/octet-stream': mime_type = get_mimetype(node.name) or mime_type or 'text/plain' # We don't have to guess if the charset is specified in the # svn:mime-type property ctpos = mime_type.find('charset=') if ctpos >= 0: charset = mime_type[ctpos + 8:] else: charset = None content = node.get_content() chunk = content.read(CHUNK_SIZE) format = req.args.get('format') if format in ('raw', 'txt'): req.send_response(200) req.send_header('Content-Type', format == 'txt' and 'text/plain' or mime_type) req.send_header('Content-Length', node.content_length) req.send_header('Last-Modified', util.http_date(node.last_modified)) req.end_headers() while 1: if not chunk: raise RequestDone req.write(chunk) chunk = content.read(CHUNK_SIZE) else: # Generate HTML preview mimeview = Mimeview(self.env) # The changeset corresponding to the last change on `node` # is more interesting than the `rev` changeset. changeset = repos.get_changeset(node.rev) # add ''Plain Text'' alternate link if needed if not is_binary(chunk) and mime_type != 'text/plain': plain_href = req.href.browser(node.path, rev=rev, format='txt') add_link(req, 'alternate', plain_href, 'Plain Text', 'text/plain') add_stylesheet(req, 'common/css/code.css') raw_href = self.env.href.peerReviewBrowser(node.path, rev=rev and node.rev, format='raw') preview_data = mimeview.preview_data(context, node.get_content(), node.get_content_length(), mime_type, node.created_path, raw_href, annotations=['lineno']) add_link(req, 'alternate', raw_href, 'Original Format', mime_type) return { 'changeset': changeset, 'size': node.content_length, 'preview': preview_data['rendered'], 'annotate': False, 'rev': node.rev, 'changeset_href': util.escape(self.env.href.changeset(node.rev)), 'date': util.format_datetime(changeset.date), 'age': util.pretty_timedelta(changeset.date), 'author': changeset.author or 'anonymous', 'message': wiki_to_html(changeset.message or '--', self.env, req, escape_newlines=True) }