def send_converted(self, req, in_type, content, selector, filename='file'): """Helper method for converting `content` and sending it directly. `selector` can be either a key or a MIME Type.""" from trac.web.chrome import Chrome from trac.web.api import RequestDone iterable = Chrome(self.env).use_chunked_encoding content, output_type, ext = self.convert_content(req, in_type, content, selector, iterable=iterable) if iterable: def encoder(content): for chunk in content: if isinstance(chunk, unicode): chunk = chunk.encode('utf-8') yield chunk content = encoder(content) length = None else: if isinstance(content, unicode): content = content.encode('utf-8') length = len(content) req.send_response(200) req.send_header('Content-Type', output_type) if length is not None: req.send_header('Content-Length', length) if filename: req.send_header('Content-Disposition', content_disposition('attachment', '%s.%s' % (filename, ext))) req.end_headers() req.write(content) raise RequestDone
def process_request(self, req): rev = req.args.get('rev') path = req.args.get('path') filename = req.args.get('filename') with_rev = req.args.get('with_rev') #TODO: load default repo if repo == None repo = req.args.get('repo') if not rev: rev = 'HEAD' if not path: path = '/' path = path[1:] if with_rev and (int(with_rev) != 0 or str(with_rev).lower() == 'True'): with_rev = True else: with_rev = False if not filename: filename = 'snapshot-%s' % str(rev)[:7] elif with_rev: filename += '-%s' % str(rev)[:7] req.send_response(200) req.send_header('Content-Type', 'application/zip') req.send_header('Content-Disposition', content_disposition('inline', filename + '.zip')) content = self.env.get_repository(repo).get_snapshot(rev, path, 'zip', filename) req.send_header("Content-Length", len(content)) req.write(content) raise RequestDone
def _render_zip(self, req, filename, repos, data): """ZIP archive with all the added and/or modified files.""" new_rev = data['new_rev'] req.send_response(200) req.send_header('Content-Type', 'application/zip') req.send_header('Content-Disposition', content_disposition('inline', filename + '.zip')) from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED buf = StringIO() zipfile = ZipFile(buf, 'w', ZIP_DEFLATED) for old_node, new_node, kind, change in repos.get_changes( new_path=data['new_path'], new_rev=data['new_rev'], old_path=data['old_path'], old_rev=data['old_rev']): if kind == Node.FILE and change != Changeset.DELETE: assert new_node zipinfo = ZipInfo() zipinfo.filename = new_node.path.strip('/').encode('utf-8') # Note: unicode filenames are not supported by zipfile. # UTF-8 is not supported by all Zip tools either, # but as some do, I think UTF-8 is the best option here. zipinfo.date_time = new_node.last_modified.utctimetuple()[:6] zipinfo.external_attr = 0644 << 16L # needed since Python 2.5 zipinfo.compress_type = ZIP_DEFLATED zipfile.writestr(zipinfo, new_node.get_content().read()) zipfile.close() zip_str = buf.getvalue() req.send_header("Content-Length", len(zip_str)) req.end_headers() req.write(zip_str) raise RequestDone
def _download_as_zip(self, req, parent, attachments=None): if attachments is None: attachments = self.viewable_attachments(web_context(req, parent)) total_size = sum(attachment.size for attachment in attachments) if total_size > self.max_zip_size: raise TracError( _("Maximum total attachment size: %(num)s", num=pretty_size(self.max_zip_size)), _("Download failed")) req.send_response(200) req.send_header('Content-Type', 'application/zip') filename = 'attachments-%s-%s.zip' % \ (parent.realm, re.sub(r'[/\\:]', '-', unicode(parent.id))) req.send_header('Content-Disposition', content_disposition('inline', filename)) buf = io.BytesIO() with ZipFile(buf, 'w', ZIP_DEFLATED) as zipfile: for attachment in attachments: zipinfo = create_zipinfo(attachment.filename, mtime=attachment.date, comment=attachment.description) try: with attachment.open() as fd: zipfile.writestr(zipinfo, fd.read()) except ResourceNotFound: pass # skip missing files zip_str = buf.getvalue() req.send_header("Content-Length", len(zip_str)) req.end_headers() req.write(zip_str) raise RequestDone()
def _send_csv(self, req, cols, rows, sep=',', mimetype='text/plain', filename=None): def iso_time(t): return format_time(from_utimestamp(t), 'iso8601') def iso_datetime(dt): return format_datetime(from_utimestamp(dt), 'iso8601') col_conversions = { 'time': iso_time, 'datetime': iso_datetime, 'changetime': iso_datetime, 'date': iso_datetime, 'created': iso_datetime, 'modified': iso_datetime, } def iterate(): out = io.BytesIO() writer = csv.writer(out, delimiter=sep, quoting=csv.QUOTE_MINIMAL) def writerow(values): writer.writerow([value.encode('utf-8') for value in values]) rv = out.getvalue() out.truncate(0) out.seek(0) return rv converters = [ col_conversions.get(c.strip('_'), cell_value) for c in cols ] yield '\xef\xbb\xbf' # BOM yield writerow(c for c in cols if c not in self._html_cols) for row in rows: yield writerow(converters[i](cell) for i, cell in enumerate(row) if cols[i] not in self._html_cols) data = iterate() if Chrome(self.env).use_chunked_encoding: length = None else: data = ''.join(data) length = len(data) req.send_response(200) req.send_header('Content-Type', mimetype + ';charset=utf-8') if length is not None: req.send_header('Content-Length', length) if filename: req.send_header('Content-Disposition', content_disposition('attachment', filename)) req.end_headers() req.write(data) raise RequestDone
def render_zip(req, filename, repos, root_node, iter_nodes): """Send a ZIP file containing the data corresponding to the `nodes` iterable. :type root_node: `~trac.versioncontrol.api.Node` :param root_node: optional ancestor for all the *nodes* :param iter_nodes: callable taking the optional *root_node* as input and generating the `~trac.versioncontrol.api.Node` for which the content should be added into the zip. """ req.send_response(200) req.send_header('Content-Type', 'application/zip') req.send_header('Content-Disposition', content_disposition('inline', filename)) if root_node: req.send_header('Last-Modified', http_date(root_node.last_modified)) root_path = root_node.path.rstrip('/') else: root_path = '' if root_path: root_path += '/' root_name = root_node.name + '/' else: root_name = '' root_len = len(root_path) buf = StringIO() zipfile = ZipFile(buf, 'w', ZIP_DEFLATED) for node in iter_nodes(root_node): if node is root_node: continue path = node.path.strip('/') assert path.startswith(root_path) path = root_name + path[root_len:] kwargs = {'mtime': node.last_modified} data = None if node.isfile: data = node.get_processed_content(eol_hint='CRLF').read() properties = node.get_properties() # Subversion specific if 'svn:special' in properties and data.startswith('link '): data = data[5:] kwargs['symlink'] = True if 'svn:executable' in properties: kwargs['executable'] = True elif node.isdir and path: kwargs['dir'] = True data = '' if data is not None: zipfile.writestr(create_zipinfo(path, **kwargs), data) zipfile.close() zip_str = buf.getvalue() req.send_header("Content-Length", len(zip_str)) req.end_headers() req.write(zip_str) raise RequestDone
def _send_csv(self, req, cols, rows, sep=',', mimetype='text/plain', filename=None): def iso_time(t): return format_time(from_utimestamp(t), 'iso8601') def iso_datetime(dt): return format_datetime(from_utimestamp(dt), 'iso8601') col_conversions = { 'time': iso_time, 'datetime': iso_datetime, 'changetime': iso_datetime, 'date': iso_datetime, 'created': iso_datetime, 'modified': iso_datetime, } def iterate(): from cStringIO import StringIO out = StringIO() writer = csv.writer(out, delimiter=sep, quoting=csv.QUOTE_MINIMAL) def writerow(values): writer.writerow([value.encode('utf-8') for value in values]) rv = out.getvalue() out.truncate(0) return rv converters = [col_conversions.get(c.strip('_'), cell_value) for c in cols] yield '\xef\xbb\xbf' # BOM yield writerow(c for c in cols if c not in self._html_cols) for row in rows: yield writerow(converters[i](cell) for i, cell in enumerate(row) if cols[i] not in self._html_cols) data = iterate() if Chrome(self.env).use_chunked_encoding: length = None else: data = ''.join(data) length = len(data) req.send_response(200) req.send_header('Content-Type', mimetype + ';charset=utf-8') if length is not None: req.send_header('Content-Length', length) if filename: req.send_header('Content-Disposition', content_disposition('attachment', filename)) req.end_headers() req.write(data) raise RequestDone
def _download_as_zip(self, req, parent, attachments=None): if attachments is None: attachments = self.viewable_attachments(web_context(req, parent)) total_size = sum(attachment.size for attachment in attachments) if total_size > self.max_zip_size: raise TracError( _("Maximum total attachment size: %(num)s", num=pretty_size(self.max_zip_size)), _("Download failed")) req.send_response(200) req.send_header('Content-Type', 'application/zip') filename = 'attachments-%s-%s.zip' % \ (parent.realm, re.sub(r'[/\\:]', '-', unicode(parent.id))) req.send_header('Content-Disposition', content_disposition('inline', filename)) req.end_headers() def write_partial(fileobj, start): end = fileobj.tell() fileobj.seek(start, 0) remaining = end - start while remaining > 0: chunk = fileobj.read(min(remaining, 4096)) req.write(chunk) remaining -= len(chunk) fileobj.seek(end, 0) return end pos = 0 fileobj = TemporaryFile(prefix='trac-', suffix='.zip') try: zipfile = ZipFile(fileobj, 'w', ZIP_DEFLATED) for attachment in attachments: zipinfo = create_zipinfo(attachment.filename, mtime=attachment.date, comment=attachment.description) try: with attachment.open() as fd: zipfile.writestr(zipinfo, fd.read()) except ResourceNotFound: pass # skip missing files else: pos = write_partial(fileobj, pos) finally: try: zipfile.close() write_partial(fileobj, pos) finally: fileobj.close() raise RequestDone
def _send_csv(self, req, cols, rows, sep=',', mimetype='text/plain', filename=None): def iso_time(t): return format_time(from_utimestamp(t), 'iso8601') def iso_datetime(dt): return format_datetime(from_utimestamp(dt), 'iso8601') col_conversions = { 'time': iso_time, 'datetime': iso_datetime, 'changetime': iso_datetime, 'date': iso_datetime, 'created': iso_datetime, 'modified': iso_datetime, } converters = [ col_conversions.get(c.strip('_'), cell_value) for c in cols ] out = StringIO() out.write('\xef\xbb\xbf') # BOM writer = csv.writer(out, delimiter=sep) writer.writerow([ unicode(c).encode('utf-8') for c in cols if c not in self._html_cols ]) for row in rows: writer.writerow([ converters[i](cell).encode('utf-8') for i, cell in enumerate(row) if cols[i] not in self._html_cols ]) data = out.getvalue() req.send_response(200) req.send_header('Content-Type', mimetype + ';charset=utf-8') req.send_header('Content-Length', len(data)) if filename: req.send_header('Content-Disposition', content_disposition('attachment', filename)) req.end_headers() req.write(data) raise RequestDone
def send_converted(self, req, in_type, content, selector, filename='file'): """Helper method for converting `content` and sending it directly. `selector` can be either a key or a MIME Type.""" from trac.web.api import RequestDone content, output_type, ext = self.convert_content( req, in_type, content, selector) if isinstance(content, unicode): content = content.encode('utf-8') req.send_response(200) req.send_header('Content-Type', output_type) req.send_header('Content-Length', len(content)) if filename: req.send_header( 'Content-Disposition', content_disposition('attachment', '%s.%s' % (filename, ext))) req.end_headers() req.write(content) raise RequestDone
def _send_sql(self, req, id, title, description, sql): req.perm.require("REPORT_SQL_VIEW") out = StringIO() out.write("-- ## %s: %s ## --\n\n" % (id, title.encode("utf-8"))) if description: lines = description.encode("utf-8").splitlines() out.write("-- %s\n\n" % "\n-- ".join(lines)) out.write(sql.encode("utf-8")) data = out.getvalue() req.send_response(200) req.send_header("Content-Type", "text/plain;charset=utf-8") req.send_header("Content-Length", len(data)) if id: req.send_header("Content-Disposition", content_disposition("attachment", "report_%s.sql" % id)) req.end_headers() req.write(data) raise RequestDone
def send_converted(self, req, in_type, content, selector, filename='file'): """Helper method for converting `content` and sending it directly. `selector` can be either a key or a MIME Type.""" from trac.web.api import RequestDone content, output_type, ext = self.convert_content(req, in_type, content, selector) if isinstance(content, unicode): content = content.encode('utf-8') req.send_response(200) req.send_header('Content-Type', output_type) req.send_header('Content-Length', len(content)) if filename: req.send_header('Content-Disposition', content_disposition('attachment', '%s.%s' % (filename, ext))) req.end_headers() req.write(content) raise RequestDone
def _send_sql(self, req, id, title, description, sql): req.perm.require('REPORT_SQL_VIEW') out = StringIO() out.write('-- ## %s: %s ## --\n\n' % (id, title.encode('utf-8'))) if description: lines = description.encode('utf-8').splitlines() out.write('-- %s\n\n' % '\n-- '.join(lines)) out.write(sql.encode('utf-8')) data = out.getvalue() req.send_response(200) req.send_header('Content-Type', 'text/plain;charset=utf-8') req.send_header('Content-Length', len(data)) if id: req.send_header('Content-Disposition', content_disposition(filename='report_%s.sql' % id)) req.end_headers() req.write(data) raise RequestDone
def _send_csv(self, req, cols, rows, sep=',', mimetype='text/plain', filename=None): def iso_time(t): return format_time(from_utimestamp(t), 'iso8601') def iso_datetime(dt): return format_datetime(from_utimestamp(dt), 'iso8601') col_conversions = { 'time': iso_time, 'datetime': iso_datetime, 'changetime': iso_datetime, 'date': iso_datetime, 'created': iso_datetime, 'modified': iso_datetime, } converters = [col_conversions.get(c.strip('_'), cell_value) for c in cols] out = StringIO() out.write('\xef\xbb\xbf') # BOM writer = csv.writer(out, delimiter=sep) writer.writerow([unicode(c).encode('utf-8') for c in cols if c not in self._html_cols]) for row in rows: writer.writerow([converters[i](cell).encode('utf-8') for i, cell in enumerate(row) if cols[i] not in self._html_cols]) data = out.getvalue() req.send_response(200) req.send_header('Content-Type', mimetype + ';charset=utf-8') req.send_header('Content-Length', len(data)) if filename: req.send_header('Content-Disposition', content_disposition('attachment', filename)) req.end_headers() req.write(data) raise RequestDone
def _send_csv(self, req, cols, rows, sep=",", mimetype="text/plain", filename=None): def iso_time(t): return format_time(from_utimestamp(t), "iso8601") def iso_datetime(dt): return format_datetime(from_utimestamp(dt), "iso8601") col_conversions = { "time": iso_time, "datetime": iso_datetime, "changetime": iso_datetime, "date": iso_datetime, "created": iso_datetime, "modified": iso_datetime, } converters = [col_conversions.get(c.strip("_"), cell_value) for c in cols] out = StringIO() out.write("\xef\xbb\xbf") # BOM writer = csv.writer(out, delimiter=sep) writer.writerow([unicode(c).encode("utf-8") for c in cols if c not in self._html_cols]) for row in rows: writer.writerow( [converters[i](cell).encode("utf-8") for i, cell in enumerate(row) if cols[i] not in self._html_cols] ) data = out.getvalue() req.send_response(200) req.send_header("Content-Type", mimetype + ";charset=utf-8") req.send_header("Content-Length", len(data)) if filename: req.send_header("Content-Disposition", content_disposition("attachment", filename)) req.end_headers() req.write(data) raise RequestDone
def test_no_filename(self): self.assertEqual('inline', util.content_disposition('inline')) self.assertEqual('attachment', util.content_disposition('attachment'))
def test_filename(self): self.assertEqual('attachment; filename=myfile.txt', util.content_disposition('attachment', 'myfile.txt')) self.assertEqual('attachment; filename=a%20file.txt', util.content_disposition('attachment', 'a file.txt'))
def test_no_type(self): self.assertEqual('filename=myfile.txt', util.content_disposition(filename='myfile.txt')) self.assertEqual('filename=a%20file.txt', util.content_disposition(filename='a file.txt'))
def process_request(self, req): if 'action' in req.args: transform_id = self._generate_running_transformation_id() parameters = {} for k in req.args: if k.startswith("parameter:"): parameter_name = k.split(":",2)[1] parameter_value = req.args[k] parameters[parameter_name] = parameter_value req.perm.require("BUSINESSINTELLIGENCE_TRANSFORMATION_EXECUTE") if req.args['action'] == "execute_async": # execute the transformation thread.start_new_thread(self._do_execute_transformation, (req.args['transform'],), {'transformation_id': transform_id, 'parameters': parameters}) # send transform_id generated via uuid back to JS via JSON # we have to do this after we invoke a new thread as req.send() # returns from this function and stops further flow control req.send(to_json({'transform_id': transform_id}), 'text/json') elif req.args['action'] == "execute_download": filename, stat, filestream = self._do_execute_transformation(req.args['transform'], transformation_id=transform_id, store=False, return_bytes_handle=True, parameters=parameters) req.send_response(200) req.send_header('Content-Type', mimetypes.guess_type(filename)[0] or 'application/octet-stream') req.send_header('Content-Length', stat.st_size) req.send_header('Content-Disposition', content_disposition('attachment', filename)) req.end_headers() while True: bytes = filestream.read(4096) if not bytes: break req.write(bytes) filestream.close() raise RequestDone elif req.args['action'] == "execute": self._do_execute_transformation(req.args['transform'], transformation_id=transform_id, parameters=parameters) elif req.args['action'] == 'check_status': if 'uuid' not in req.args: raise KeyError running_transformations = json.loads(req.args['uuid']) req.send(to_json(self._generate_status_response(running_transformations)), 'text/json') else: add_warning(req, "No valid action found") req.redirect(req.href.businessintelligence()) if req.get_header('X-Requested-With') == 'XMLHttpRequest': req.send_response(200) req.send_header('Content-Length', 0) req.end_headers() return else: if 'returnto' in req.args: req.redirect(req.args['returnto']) else: req.redirect(req.href.businessintelligence()) else: req.perm.require("BUSINESSINTELLIGENCE_TRANSFORMATION_LIST") data = {'transformations': self._list_transformation_files(listall=False)} add_script(req, 'contextmenu/contextmenu.js') add_script(req, 'businessintelligenceplugin/js/business-intelligence.js') add_stylesheet(req, 'common/css/browser.css') add_ctxtnav(req, tag.a(tag.i(class_="fa fa-upload"), ' Upload Transformations', id="uploadbutton")) add_ctxtnav(req, tag.a(tag.i(class_="fa fa-calendar"), ' Schedule Transformations', id="schedulebutton")) add_ctxtnav(req, tag.a(tag.i(class_="fa fa-cog"), ' Running Transformations', id="runningbutton")) return "listtransformations.html", data, None
def test_filename(self): self.assertEqual("attachment; filename=myfile.txt", util.content_disposition("attachment", "myfile.txt")) self.assertEqual("attachment; filename=a%20file.txt", util.content_disposition("attachment", "a file.txt"))
def test_no_filename(self): self.assertEqual("inline", util.content_disposition("inline")) self.assertEqual("attachment", util.content_disposition("attachment"))
def test_no_type(self): self.assertEqual("filename=myfile.txt", util.content_disposition(filename="myfile.txt")) self.assertEqual("filename=a%20file.txt", util.content_disposition(filename="a file.txt"))
def _render_diff(self, req, filename, repos, data): """Raw Unified Diff version""" req.send_response(200) req.send_header('Content-Type', 'text/x-patch;charset=utf-8') req.send_header('Content-Disposition', content_disposition('inline', filename + '.diff')) buf = StringIO() mimeview = Mimeview(self.env) for old_node, new_node, kind, change in repos.get_changes( new_path=data['new_path'], new_rev=data['new_rev'], old_path=data['old_path'], old_rev=data['old_rev']): # TODO: Property changes # Content changes if kind == Node.DIRECTORY: continue new_content = old_content = '' new_node_info = old_node_info = ('','') mimeview = Mimeview(self.env) if old_node: if mimeview.is_binary(old_node.content_type, old_node.path): continue old_content = old_node.get_content().read() if mimeview.is_binary(content=old_content): continue old_node_info = (old_node.path, old_node.rev) old_content = mimeview.to_unicode(old_content, old_node.content_type) if new_node: if mimeview.is_binary(new_node.content_type, new_node.path): continue new_content = new_node.get_content().read() if mimeview.is_binary(content=new_content): continue new_node_info = (new_node.path, new_node.rev) new_path = new_node.path new_content = mimeview.to_unicode(new_content, new_node.content_type) else: old_node_path = repos.normalize_path(old_node.path) diff_old_path = repos.normalize_path(data['old_path']) new_path = posixpath.join(data['new_path'], old_node_path[len(diff_old_path)+1:]) if old_content != new_content: options = data['diff']['options'] context = options.get('contextlines', 3) if context < 0: context = 3 # FIXME: unified_diff bugs with context=None ignore_blank_lines = options.get('ignoreblanklines') ignore_case = options.get('ignorecase') ignore_space = options.get('ignorewhitespace') if not old_node_info[0]: old_node_info = new_node_info # support for 'A'dd changes buf.write('Index: ' + new_path + CRLF) buf.write('=' * 67 + CRLF) buf.write('--- %s\t(revision %s)' % old_node_info + CRLF) buf.write('+++ %s\t(revision %s)' % new_node_info + CRLF) for line in unified_diff(old_content.splitlines(), new_content.splitlines(), context, ignore_blank_lines=ignore_blank_lines, ignore_case=ignore_case, ignore_space_changes=ignore_space): buf.write(line + CRLF) diff_str = buf.getvalue().encode('utf-8') req.send_header('Content-Length', len(diff_str)) req.end_headers() req.write(diff_str) raise RequestDone
except Exception, err: self.env.log.exception('Repository dump failed: %s' % err) raise TracError('Repository archive failed - please try again later') finally: # Ensure the temp file gets removed even on errors if os.path.exists(tempfd.name): os.remove(tempfd.name) # Create HTTP response by reading the archive into it try: req.send_response(200) req.send_header('Content-Type', self.formats[format]['mime']) inline = '%s-%s.%s' % (project.env_name, revision, self.formats[format]['ext']) req.send_header('Content-Disposition', content_disposition('inline', inline)) content = tempfd.read() req.send_header("Content-Length", len(content)) req.end_headers() req.write(content) tempfd.close() # Ensure the temp file gets removed finally: if os.path.exists(tempfd.name): os.remove(tempfd.name) raise RequestDone def _archive_git(self, srcdir, revision, format, archpath, prefix=None): """
def process_request(self, req): """Process the request. Return a (template_name, data) pair, where data is a dictionary of substitutions for the Jinja2 template (the template context, in Jinja2 terms). Optionally, the return value can also be a (template_name, data, metadata) triple, where metadata is a dict with hints for the template engine or the web front-end.""" print('DIR_req', dir(req)) self.errorlog = [] action = req.args.get('create_report', '__FORM_TOKEN') req_keys = set_req_keys(req) print('req keys', req_keys) print('action:', action) print('self.env', self.env) if req.method == 'POST': errorlog = [] print('request is not:', req) print('request args:', req.args) page_path = req.args.get('get_wiki_link') print('page_path', page_path) match_path = re.match(r"(http://|e:)(.*|/)wiki/(.*)", page_path) if match_path: spec_name = re.split(r'\s+', match_path.group(3)) spec_name = spec_name[0] spec_name = spec_name.split("|") spec_name = spec_name[0] spec_name = urllib.unquote(spec_name) print(spec_name) #resource = Resource('wiki', spec_name[0], 1) page = WikiPage(self.env, spec_name) print(page.name) print('dir(print(page))', dir(page)) print('page.exists', page.exists) if page.exists == True: errorlog, content = self.process_document(page, req) print('True errorlog', errorlog) else: errorlog.append( ("Page {} does not exist.".format(page.name), page.name)) print('False errorlog', errorlog) # select dropdowns in form # keys = [project, igrmilestone, # milestone, igrtask, # ogrtask, clicked_button] self.data['errorlog'] = errorlog print('errorlog', errorlog) if len(errorlog) == 0: self.data['form'] = { 'create_report': to_unicode(req_keys[0]), 'form_token': to_unicode(req_keys[1]), 'get_wiki_link': to_unicode(req_keys[2]), } length = len(content) req.send_response(200) req.send_header( 'Content-Type', 'application/' + \ 'vnd.' + \ 'openxmlformats-officedocument.' + 'wordprocessingml.' + 'document') if length is not None: req.send_header('Content-Length', length) req.send_header( 'Content-Disposition', content_disposition('attachment', 'out.docx')) req.end_headers() req.write(content) raise RequestDone else: pass add_stylesheet(req, 'hw/css/wiki2doc.css') # This tuple is for Genshi (template_name, data, content_type) # Without data the trac layout will not appear. if hasattr(Chrome, 'add_jquery_ui'): Chrome(self.env).add_jquery_ui(req) # pylint: disable=no-member return 'wiki2doc.html', self.data, None
def render_zip(req, filename, repos, root_node, iter_nodes): """Send a ZIP file containing the data corresponding to the `nodes` iterable. :type root_node: `~trac.versioncontrol.api.Node` :param root_node: optional ancestor for all the *nodes* :param iter_nodes: callable taking the optional *root_node* as input and generating the `~trac.versioncontrol.api.Node` for which the content should be added into the zip. """ req.send_response(200) req.send_header('Content-Type', 'application/zip') req.send_header('Content-Disposition', content_disposition('inline', filename)) if root_node: req.send_header('Last-Modified', http_date(root_node.last_modified)) root_path = root_node.path.rstrip('/') else: root_path = '' if root_path: root_path += '/' root_name = root_node.name + '/' else: root_name = '' root_len = len(root_path) req.end_headers() def write_partial(fileobj, start): end = fileobj.tell() fileobj.seek(start, 0) remaining = end - start while remaining > 0: chunk = fileobj.read(min(remaining, 4096)) req.write(chunk) remaining -= len(chunk) fileobj.seek(end, 0) return end pos = 0 with TemporaryFile(prefix='trac-', suffix='.zip') as fileobj: with ZipFile(fileobj, 'w', ZIP_DEFLATED) as zipfile: for node in iter_nodes(root_node): if node is root_node: continue path = node.path.strip('/') assert path.startswith(root_path) path = root_name + path[root_len:] kwargs = {'mtime': node.last_modified} data = None if node.isfile: with content_closing( node.get_processed_content(eol_hint='CRLF')) \ as content: data = content.read() props = node.get_properties() # Subversion specific if 'svn:special' in props and data.startswith('link '): data = data[5:] kwargs['symlink'] = True if 'svn:executable' in props: kwargs['executable'] = True elif node.isdir and path: kwargs['dir'] = True data = '' if data is not None: zipfile.writestr(create_zipinfo(path, **kwargs), data) pos = write_partial(fileobj, pos) write_partial(fileobj, pos) raise RequestDone
raise TracError( 'Repository archive failed - please try again later') finally: # Ensure the temp file gets removed even on errors if os.path.exists(tempfd.name): os.remove(tempfd.name) # Create HTTP response by reading the archive into it try: req.send_response(200) req.send_header('Content-Type', self.formats[format]['mime']) inline = '%s-%s.%s' % (project.env_name, revision, self.formats[format]['ext']) req.send_header('Content-Disposition', content_disposition('inline', inline)) content = tempfd.read() req.send_header("Content-Length", len(content)) req.end_headers() req.write(content) tempfd.close() # Ensure the temp file gets removed finally: if os.path.exists(tempfd.name): os.remove(tempfd.name) raise RequestDone def _archive_git(self, srcdir, revision, format, archpath, prefix=None): """