def index_html(self, REQUEST, RESPONSE): """Default document""" # HTTP If-Modified-Since header handling. This is duplicated # from OFS.Image.Image - it really should be consolidated # somewhere... RESPONSE.setHeader('Content-Type', self.content_type) RESPONSE.setHeader('Last-Modified', self.lmh) RESPONSE.setHeader('Cache-Control', self.cch) if self.set_expiry_header: RESPONSE.setHeader('Expires', self._expires()) header=REQUEST.get_header('If-Modified-Since', None) if header is not None: header=header.split(';')[0] # Some proxies seem to send invalid date strings for this # header. If the date string is not valid, we ignore it # rather than raise an error to be generally consistent # with common servers such as Apache (which can usually # understand the screwy date string as a lucky side effect # of the way they parse it). try: mod_since=long(DateTime(header).timeTime()) except: mod_since=None if mod_since is not None: if getattr(self, 'lmt', None): last_mod = long(self.lmt) else: last_mod = long(0) if last_mod > 0 and last_mod <= mod_since: RESPONSE.setStatus(304) return '' if not os.path.isfile(self.path): self._prepareGZippedContent() if REQUEST['HTTP_ACCEPT_ENCODING'].find('gzip')>-1: # HTTP/1.1 'gzip', HTTP/1.0 'x-gzip' RESPONSE.setHeader("Content-encoding", "gzip") RESPONSE.setHeader("Content-Length", self.content_size) if self.content_size > FILESTREAM_ITERATOR_THRESHOLD: return filestream_iterator(self.path,'rb') else: return open(self.path,'rb').read() else: RESPONSE.setHeader("Content-Length", self.original_content_size) if self.original_content_size > FILESTREAM_ITERATOR_THRESHOLD: return filestream_iterator(self.original_path, 'rb') else: return open(self.original_path,'rb').read()
def generate_zip(self): response = self.request.response with ZipGenerator() as generator: # Protocol generator.add_file(*self.get_protocol()) # Agenda items self.add_agenda_items_attachments(generator) if is_word_meeting_implementation_enabled(): self.add_agenda_item_proposal_documents(generator) # Agenda items list generator.add_file(*self.get_agendaitem_list()) # Return zip zip_file = generator.generate() filename = '{}.zip'.format(normalize_path(self.model.title)) response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader("Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self): if getattr(self.request, "name", None) is not None: filename = self.request['name'] tmpdir = gettempdir() filepath = os.path.join(tmpdir, self.filename) try: file_ = open(filepath) except IOError: return file_.seek(0, 2) # end of file tmpsize = file_.tell() file_.seek(0) contenttype = 'application/octet-stream' filename = safe_unicode(filename) if filename: extension = os.path.splitext(filename)[1].lower() contenttype = mimetypes.types_map.get(extension, 'application/octet-stream') self.request.response.setHeader("Content-Type", contenttype) self.request.response.setHeader("Content-Length", tmpsize) if filename is not None: self.request.response.setHeader("Content-Disposition", "attachment; filename=\"%s\"" % filename) return filestream_iterator(filepath, 'rb')
def decorator(*args, **kwargs): instance = args[0] request = getattr(instance, 'request', None) request.response.setHeader('Content-Type', 'application/zip') zip_out = func(*args, **kwargs) request.response.setHeader('Content-Length', str(os.path.getsize(zip_out))) return filestream_iterator(zip_out)
def __call__(self): """Download the file""" self.setCacheHeaders() RESPONSE = self.REQUEST['RESPONSE'] status = "" if WITH_LNK_PARSER: # get the target file into a MS-Link status, lnkpath = lnkparse(self.getRelativePath(), self.getFilesystemPath()) if status == 'OK': RESPONSE.redirect('%s/%s' % (self.REQUEST.URL2, lnkpath)) return elif status == 'ERROR_RELPATH': # File without a valid target file RESPONSE.redirect(self.absolute_url() + '/symbolic_link_notfound') return elif status == 'ERROR_OUTREFLECTPATH': # File without a valid target file RESPONSE.redirect(self.absolute_url() + '/targetfile_out_basepath') return # - Return file content iterator = filestream_iterator(self.getFilesystemPath(), 'rb') RESPONSE.setHeader('Last-Modified', rfc1123_date(self.getStatus()[ST_MTIME])) RESPONSE.setHeader('Content-Type', self.Format()) RESPONSE.setHeader('Content-Length', len(iterator)) return iterator
def zip_selected(self, objects): if not HAS_ZIPEXPORT: return None response = self.request.response with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter((obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): generator.add_file(path, pointer) # check if zip has files if generator.is_empty: message = _( u'Zip export is not supported on the selected content.') api.portal.show_message(message, self.request, type=u'error') self.request.response.redirect(self.context.absolute_url()) return zip_file = generator.generate() response.setHeader('Content-Disposition', 'inline; filename="{0}"'.format(self.filename)) response.setHeader('Content-type', 'application/zip') response.setHeader('Content-Length', os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def index_html(self, REQUEST, RESPONSE): """Default document""" # HTTP If-Modified-Since header handling. This is duplicated # from OFS.Image.Image - it really should be consolidated # somewhere... RESPONSE.setHeader('Content-Type', self.content_type) RESPONSE.setHeader('Last-Modified', self.lmh) RESPONSE.setHeader('Cache-Control', self.cch) header = REQUEST.get_header('If-Modified-Since', None) if header is not None: header = header.split(';')[0] # Some proxies seem to send invalid date strings for this # header. If the date string is not valid, we ignore it # rather than raise an error to be generally consistent # with common servers such as Apache (which can usually # understand the screwy date string as a lucky side effect # of the way they parse it). try: mod_since = int(DateTime(header).timeTime()) except Exception: mod_since = None if mod_since is not None: if getattr(self, 'lmt', None): last_mod = int(self.lmt) else: last_mod = int(0) if last_mod > 0 and last_mod <= mod_since: RESPONSE.setHeader('Content-Length', '0') RESPONSE.setStatus(304) return '' RESPONSE.setHeader('Content-Length', str(self.size).replace('L', '')) return filestream_iterator(self.path, mode='rb')
def retrieveFileStreamIterator(self, filename, REQUEST=None): threshold = 2 << 16 # 128 kb local_filename = self.targetFile(filename) try: fsize = os.path.getsize(local_filename) except: fsize = 0 raise NotFound if fsize < threshold or REQUEST.RESPONSE is None: try: f = open(local_filename, 'rb') data = f.read() finally: f.close() else: data = filestream_iterator(local_filename, 'rb') try: mt, enc = standard.guess_content_type(local_filename, data) except: mt, enc = 'content/unknown', '' # Remove timestamp from filename. filename = filename[:filename.rfind('_')] + filename[filename.rfind('.' ):] standard.set_response_headers(filename, mt, fsize, REQUEST) return data
def next(self): """ return data by file iterator """ if self.__data__ is None: self.__data__ = filestream_iterator(self.path, mode='rb') return self.__data__.next()
def zip_selected(self, objects): if not HAS_ZIPEXPORT: return None response = self.request.response with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter( (obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): generator.add_file(path, pointer) # check if zip has files if generator.is_empty: message = _(u'Zip export is not supported on the selected content.') api.portal.show_message(message, self.request, type=u'error') self.request.response.redirect(self.context.absolute_url()) return zip_file = generator.generate() response.setHeader( 'Content-Disposition', 'inline; filename="{0}"'.format(self.filename)) response.setHeader('Content-type', 'application/zip') response.setHeader('Content-Length', os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self): """Download the file""" self.setCacheHeaders() RESPONSE=self.REQUEST['RESPONSE'] status = "" if WITH_LNK_PARSER: # get the target file into a MS-Link status, lnkpath = lnkparse(self.getRelativePath(), self.getFilesystemPath()) if status == 'OK': RESPONSE.redirect('%s/%s' % (self.REQUEST.URL2, lnkpath)) return elif status == 'ERROR_RELPATH': # File without a valid target file RESPONSE.redirect(self.absolute_url() + '/symbolic_link_notfound') return elif status == 'ERROR_OUTREFLECTPATH': # File without a valid target file RESPONSE.redirect(self.absolute_url() + '/targetfile_out_basepath') return # - Return file content iterator = filestream_iterator(self.getFilesystemPath(), 'rb') RESPONSE.setHeader('Last-Modified', rfc1123_date(self.getStatus()[ST_MTIME])) RESPONSE.setHeader('Content-Type', self.Format()) RESPONSE.setHeader('Content-Length', len(iterator)) return iterator
def generate_zip(self): response = self.request.response with ZipGenerator() as generator: # Protocol generator.add_file(*self.get_protocol()) # Agenda items self.add_agenda_items_attachments(generator) self.add_agenda_item_proposal_documents(generator) # Agenda items list try: generator.add_file(*self.get_agendaitem_list()) except AgendaItemListMissingTemplate: pass generator.add_file(*self.get_meeting_json()) # Return zip zip_file = generator.generate() filename = '{}.zip'.format(normalize_path(self.model.title)) response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader( "Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def index_html(self, REQUEST, RESPONSE): """Default document""" # HTTP If-Modified-Since header handling. This is duplicated # from OFS.Image.Image - it really should be consolidated # somewhere... RESPONSE.setHeader('Content-Type', self.content_type) RESPONSE.setHeader('Last-Modified', self.lmh) RESPONSE.setHeader('Cache-Control', self.cch) RESPONSE.setHeader('Content-Length', str(self.size).replace('L', '')) header = REQUEST.get_header('If-Modified-Since', None) if header is not None: header = header.split(';')[0] # Some proxies seem to send invalid date strings for this # header. If the date string is not valid, we ignore it # rather than raise an error to be generally consistent # with common servers such as Apache (which can usually # understand the screwy date string as a lucky side effect # of the way they parse it). try: mod_since = int(DateTime(header).timeTime()) except Exception: mod_since = None if mod_since is not None: if getattr(self, 'lmt', None): last_mod = int(self.lmt) else: last_mod = int(0) if last_mod > 0 and last_mod <= mod_since: RESPONSE.setStatus(304) return '' return filestream_iterator(self.path, mode='rb')
def __call__(self): f = os.path.join( os.path.dirname(__file__), 'test_resources', 'streamed.js', ) return filestream_iterator(f)
def __call__(self): path = get_distribution('plone.testing').location path = os.path.join(path, 'plone', 'testing', 'z2.txt') request = self.REQUEST response = request.response response.setHeader('Content-Type', 'text/plain') response.setHeader('Content-Length', os.path.getsize(path)) return filestream_iterator(path)
def deliver(self, pdf_filename): """Stream generated PDF file back to client. """ log.info("Deliver %s" % pdf_filename) request = self.request.RESPONSE request.setHeader("content-type", "application/pdf") request.setHeader("content-length", os.stat(pdf_filename)[6]) request.setHeader("content-disposition", "attachment; filename=%s" % pdf_filename) return filestream_iterator(pdf_filename, "rb")
def index_html(self, REQUEST, RESPONSE=None): """Default view for VirtualBinary file""" ranges = None if RESPONSE is None: RESPONSE = REQUEST.RESPONSE if self.__content_class__ is None: # Build your own headers RESPONSE.setHeader('Content-Type', self.content_type) RESPONSE.setHeader('Content-Length', self.size) else: # Call index_html method of content class with a fake # self.data attribute which will return empty value # Use this artifice to make sure content class is not loading # all data in memory since it is better to return a stream iterator # There is an exception with multiple range if REQUEST.environ.has_key('HTTP_RANGE'): ranges = parseRange(REQUEST.environ.get('HTTP_RANGE')) ## in case of mutiple range we don't know do with an iterator if ranges is not None and len(ranges) > 1: ## call normally OFS.image with data return self.__content_class__.index_html(self, REQUEST, RESPONSE) else: ### now we deal correctly with 304 header if self._if_modified_since_request_handler(REQUEST, RESPONSE): self.ZCacheable_set(None) return '' ### set correctly header RESPONSE.setHeader('Last-Modified', rfc1123_date(self._p_mtime)) RESPONSE.setHeader('Content-Type', self.content_type) RESPONSE.setHeader('Content-Length', self.size) RESPONSE.setHeader('Accept-Ranges', 'bytes') self.ZCacheable_set(None) # This is a default header that can be bypassed by other products # such as attachment field. if RESPONSE.getHeader('content-disposition') is None: # headers are in lower case in HTTPResponse RESPONSE.setHeader( 'Content-Disposition', 'inline; filename="%s"' % self.filename ) if ranges and len(ranges) == 1: ## is an range request with one range , ## return an iterator with this range [(start,end)] = ranges if end is None: end = self.size iterator = range_filestream_iterator(self.path, start, end, mode='rb') return iterator else: return filestream_iterator(self.path, mode='rb')
def pdf_download(self): blob = self.storage.retrieve() filename = blob._p_blob_uncommitted or blob.committed() response = self.context.REQUEST.RESPONSE response.setHeader("Content-Type", 'application/pdf') response.setHeader("Content-Length", self.storage.size) response.setHeader("Content-disposition", 'attachment; filename=%s' % self.pdf_filename()) return filestream_iterator(filename, 'rb')
def stream_data(file): """Return the given file as a stream if possible. """ if IBlobby.providedBy(file): if file._blob._p_blob_uncommitted: return file.data if filestream_iterator is not None: return filestream_iterator(file._blob.committed(), 'rb') return file.data
def __call__(self): """Download the file""" self.setCacheHeaders() RESPONSE=self.REQUEST['RESPONSE'] iterator = filestream_iterator(self.getFilesystemPath(), 'rb') RESPONSE.setHeader('Last-Modified', rfc1123_date(self.getStatus()[ST_MTIME])) RESPONSE.setHeader('Content-Type', self.Format()) RESPONSE.setHeader('Content-Length', len(iterator)) return iterator
def __call__(self, *args, **kw): output_file = super(PDFDownloadView, self).__call__(*args, **kw) mimetype = os.path.splitext(os.path.basename(output_file))[1] R = self.request.response R.setHeader('content-type', 'application/%s' % mimetype) R.setHeader('content-disposition', 'attachment; filename="%s%s"' % (self.context.getId(), mimetype)) R.setHeader('pragma', 'no-cache') R.setHeader('cache-control', 'no-cache') R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT') R.setHeader('content-length', os.path.getsize(output_file)) return filestream_iterator(output_file, 'rb').read()
def __call__(self): data = { "sort_on": "Date", "sort_order": "reverse", "path": "/".join(self.context.getPhysicalPath()), } for k in self.request.form: v = self.request.form.get(k, None) if v and v != "None": data[k] = v if data: query = self.get_query(data=data) brains = self.conflict_manager.unrestricted_prenotazioni(**query) else: brains = [] data = { "Sheet 1": [[ "Nome completo", "Stato", "Postazione", "Tipologia prenotazione", "Email", "Data prenotazione", "Codice prenotazione", ]] } for brain in brains: obj = brain.getObject() data["Sheet 1"].append([ brain.Title, self.get_prenotazione_state(obj), getattr(obj, "gate", "") or "", getattr(obj, "tipologia_prenotazione", "") or "", getattr(obj, "email", "") or "", self.prenotazioni_week_view.localized_time(brain["Date"]) + " - " + self.prenotazioni_week_view.localized_time( brain["Date"], time_only=True), obj.getBookingCode(), ]) now = DateTime() filename = "prenotazioni_{}.ods".format(now.strftime("%Y%m%d%H%M%S")) filepath = "{0}/{1}".format(tempfile.mkdtemp(), filename) save_data(filepath, data) streamed = filestream_iterator(filepath) mime = "application/vnd.oasis.opendocument.spreadsheet" self.request.RESPONSE.setHeader( "Content-type", "{0};charset={1}".format(mime, "utf-8")) self.request.RESPONSE.setHeader("Content-Length", str(len(streamed))) self.request.RESPONSE.setHeader( "Content-Disposition", 'attachment; filename="{}"'.format(filename)) return streamed
def stream_data(file): """Return the given file as a stream if possible. """ if IBlobby.providedBy(file) and filestream_iterator is not None: # XXX: we may want to use this instead, which would raise an error # in case of uncomitted changes # filename = file._blob.committed() filename = file._blob._p_blob_uncommitted or file._blob.committed() return filestream_iterator(filename, 'rb') return file.data
def render(self): import os from ZPublisher.Iterators import filestream_iterator self.request.response.setHeader( 'content-type', 'application/octet-stream' ) self.request.response.setHeader( 'content-length', os.path.getsize(__file__) ) return filestream_iterator(os.path.join( os.path.dirname(__file__), 'demo.py'))
def zip_selected(self, objects): response = self.request.response # check if zipexport is allowed on this context enabled_view = getMultiAdapter((self.context, self.request), name=u'zipexport-enabled') if not enabled_view.zipexport_enabled(): raise NotFound() with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter((obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): try: generator.add_file(path, pointer) except LargeZipFile: messages = IStatusMessage(self.request) messages.add(_("statmsg_zip_file_too_big", default=u"Content is too big " "to export"), type=u"error") return self.request.response.redirect( self.context.absolute_url()) # check if zip has files if generator.is_empty: raise NoExportableContent() zip_file = generator.generate() # Trigger the per container event notify(ContainerZippedEvent(self.context)) # Generate response file filename = '%s.zip' % self.context.title response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader( "Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self, *args, **kw): output_file = super(GenericDownloadView, self).__call__(*args, **kw) mimetype = os.path.splitext(os.path.basename(output_file))[1] # return output file over HTTP R = self.request.response R.setHeader('content-type', 'application/%s' % mimetype) R.setHeader( 'content-disposition', 'attachment; filename="%s.%s"' % (self.context.getId(), mimetype)) R.setHeader('content-length', os.path.getsize(output_file)) R.setHeader('pragma', 'no-cache') R.setHeader('cache-control', 'no-cache') R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT') return filestream_iterator(output_file, 'rb')
def retrieveFileStreamIterator(self, filename, REQUEST=None): threshold = 2 << 16 # 128 kb local_filename = _fileutil.getOSPath('%s/%s' % (self.location, filename)) fsize = os.path.getsize(local_filename) REQUEST.RESPONSE.setHeader('content-length', fsize) if fsize < threshold or REQUEST.RESPONSE is None: try: f = open(local_filename, 'rb') data = f.read() finally: f.close() else: data = filestream_iterator(local_filename, 'rb') return data
def debug_excerpt_docxcompose(self): if not self.is_manager(): raise Forbidden if self.agenda_item.is_paragraph: raise NotFound excerpt_protocol_data = ExcerptProtocolData( self.meeting, [self.agenda_item]) header_template = self.agenda_item.get_excerpt_header_template() suffix_template = self.agenda_item.get_excerpt_suffix_template() with ZipGenerator() as generator: if header_template: sablon = Sablon(header_template).process( excerpt_protocol_data.as_json()) generator.add_file( u'000_excerpt_header_template.docx', StringIO(sablon.file_data)) document = self.agenda_item.resolve_document() filename = u'001_agenda_item_{}.docx'.format( safe_unicode(document.Title())) generator.add_file(filename, document.file.open()) if suffix_template: sablon = Sablon(suffix_template).process( excerpt_protocol_data.as_json()) generator.add_file( u'002_excerpt_suffix_template.docx', StringIO(sablon.file_data)) # Return zip response = self.request.response zip_file = generator.generate() filename = '{}.zip'.format(normalize_path(self.meeting.title)) response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader( "Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def debug_excerpt_docxcompose(self): if not api.user.has_permission('cmf.ManagePortal'): raise Forbidden if self.agenda_item.is_paragraph: raise NotFound excerpt_protocol_data = ExcerptProtocolData( self.meeting, [self.agenda_item]) header_template = self.agenda_item.get_excerpt_header_template() suffix_template = self.agenda_item.get_excerpt_suffix_template() with ZipGenerator() as generator: if header_template: sablon = Sablon(header_template).process( excerpt_protocol_data.as_json()) generator.add_file( u'000_excerpt_header_template.docx', StringIO(sablon.file_data)) document = self.agenda_item.resolve_document() filename = u'001_agenda_item_{}.docx'.format( safe_unicode(document.Title())) generator.add_file(filename, document.file.open()) if suffix_template: sablon = Sablon(suffix_template).process( excerpt_protocol_data.as_json()) generator.add_file( u'002_excerpt_suffix_template.docx', StringIO(sablon.file_data)) # Return zip response = self.request.response zip_file = generator.generate() filename = '{}.zip'.format(normalize_path(self.meeting.title)) response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader( "Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def zip_selected(self, objects): response = self.request.response settings = getUtility(IRegistry).forInterface(IZipExportSettings) with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter((obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): if not pointer: if settings.include_empty_folders: generator.add_folder(path) continue try: generator.add_file(path, pointer) except LargeZipFile: messages = IStatusMessage(self.request) messages.add(_("statmsg_zip_file_too_big", default=u"Content is too big " "to export"), type=u"error") return self.request.response.redirect( self.context.absolute_url()) # check if zip has files if generator.is_empty: raise NoExportableContent() zip_file = generator.generate() # Trigger the per container event notify(ContainerZippedEvent(self.context)) # Generate response file filename = u'%s.zip' % self.context.title response.setHeader( "Content-Disposition", build_header(filename, disposition='attachment')) response.setHeader("Content-type", "application/zip") response.setHeader("Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def download(self): """Download all redirects as CSV. We save to a temporary file and try to stream it as a blob: with one million redirects you easily get 30 MB, which is slow as non-blob. """ portal = getSite() portal_path = "/".join(portal.getPhysicalPath()) len_portal_path = len(portal_path) file_descriptor, file_path = tempfile.mkstemp(suffix='.csv', prefix='redirects_') with open(file_path, 'w') as stream: csv_writer = writer(stream) csv_writer.writerow(('old path', 'new path', 'datetime', 'manual')) storage = getUtility(IRedirectionStorage) paths = storage._paths # Note that the old and new paths start with /plone-site-id. # We strip this, as it is superfluous, and we would get errors # when using this download as an upload. for old_path, new_info in paths.items(): if old_path.startswith(portal_path): old_path = old_path[len_portal_path:] row = [old_path] if not isinstance(new_info, tuple): # Old data: only a single path, no date and manual boolean. new_info = (new_info, ) row.extend(new_info) new_path = row[1] if new_path.startswith(portal_path): row[1] = new_path[len_portal_path:] csv_writer.writerow(row) with open(file_path) as stream: contents = stream.read() length = len(contents) response = self.request.response response.setHeader('Content-Type', 'text/csv') response.setHeader('Content-Length', length) response.setHeader('Content-Disposition', 'attachment; filename=redirects.csv') if filestream_iterator is None: return contents # TODO: this is not enough to really stream the file. # I think we would need to handle Request-Range, like in the old # plone.app.blob.download.handleRequestRange return filestream_iterator(file_path, 'rb')
def __call__(self): response = self.request.response with ZipGenerator() as generator: zipper = MeetingDocumentZipper(self.model, generator) zip_file = zipper.get_zip_file() filename = '{}.zip'.format(normalize_path(self.model.title)) set_attachment_content_disposition(self.request, filename) # the following headers must be set manually as # set_attachment_content_disposition expects a Named(Blob)File response.setHeader( 'Content-type', 'application/zip') response.setHeader( 'Content-Length', os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self, *args, **kw): if not 'resource' in kw: kw['resource'] = 'pp-default' if not 'template' in kw: kw['template'] = 'pdf_template_standalone' kw['no-split'] = True output_file = super(PDFDownloadView, self).__call__(*args, **kw) mimetype = os.path.splitext(os.path.basename(output_file))[1] R = self.request.response R.setHeader('content-type', 'application/%s' % mimetype) R.setHeader('content-disposition', 'attachment; filename="%s.%s"' % (self.context.getId(), mimetype)) R.setHeader('pragma', 'no-cache') R.setHeader('cache-control', 'no-cache') R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT') R.setHeader('content-length', os.path.getsize(output_file)) return filestream_iterator(output_file, 'rb').read()
def _render(self): handle = self.context.get_handle(create_if_not_existing=True) zip_out = temp_zip(suffix='.zip') with fs.zipfs.ZipFS(zip_out, 'w') as zip_handle: for name in handle.walkfiles(): with handle.open(name, 'rb') as fp_in: # zipfs seems to strip off the leading / with zip_handle.open(name, 'wb') as fp_out: fp_out.write(fp_in.read()) with delete_after(zip_out): self.request.response.setHeader('content-length', str(os.path.getsize(zip_out))) self.request.response.setHeader('content-type', 'application/zip') self.request.response.setHeader( 'content-disposition', 'attachment; filename={}.zip'.format(self.context.getId())) return filestream_iterator(zip_out)
def zip_selected(self, objects): response = self.request.response # check if zipexport is allowed on this context enabled_view = getMultiAdapter((self.context, self.request), name=u'zipexport-enabled') if not enabled_view.zipexport_enabled(): raise NotFound() with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter((obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): try: generator.add_file(path, pointer) except LargeZipFile: messages = IStatusMessage(self.request) messages.add(_("statmsg_zip_file_too_big", default=u"Content is too big " "to export"), type=u"error") return self.request.response.redirect( self.context.absolute_url()) # check if zip has files if generator.is_empty: raise NoExportableContent() zip_file = generator.generate() filename = '%s.zip' % self.context.title response.setHeader( "Content-Disposition", 'inline; filename="%s"' % safe_unicode(filename).encode('utf-8')) response.setHeader("Content-type", "application/zip") response.setHeader("Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self, *args, **kw): if not 'resource' in kw: kw['resource'] = 'pp-default' if not 'template' in kw: kw['template'] = 'pdf_template_standalone' kw['no-split'] = True output_file = super(PDFDownloadView, self).__call__(*args, **kw) mimetype = os.path.splitext(os.path.basename(output_file))[1] R = self.request.response R.setHeader('content-type', 'application/%s' % mimetype) R.setHeader( 'content-disposition', 'attachment; filename="%s.%s"' % (self.context.getId(), mimetype)) R.setHeader('pragma', 'no-cache') R.setHeader('cache-control', 'no-cache') R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT') R.setHeader('content-length', os.path.getsize(output_file)) return filestream_iterator(output_file, 'rb').read()
def _render2(self): IPersistentLogger(self.context).log('convert') payload = decode_json_payload(self.request) if 'mapping' not in payload: raise ValueError('No "mapping" found in JSON payload') rules = payload['mapping'] conversion_id = payload.get('converter', 'docx2ditatopic') conversion_endpoint_url = ENDPOINTS[conversion_id]['url'] rewriter = RuleRewriter(rules) handle = self.context.webdav_handle() zip_tmp = temp_zip(suffix='.zip') with fs.zipfs.ZipFS(zip_tmp, 'w') as zip_fp: for name in handle.walkfiles(): if name.endswith('.sha256'): continue name_in_zip = rewriter.rewrite(name) if name_in_zip: with handle.open(name, 'rb') as fp_in, \ zip_fp.open(name_in_zip, 'wb') as fp: fp.write(fp_in.read()) with delete_after(zip_tmp): zip_out = convert_crex(zip_tmp, crex_url=conversion_endpoint_url) store_zip(self.context, zip_out, 'current') conversion_info = self.get_crex_info() conversion_info['status'] = CREX_STATUS_SUCCESS self.set_crex_info(conversion_info) with delete_after(zip_out): self.request.response.setHeader( 'content-length', str(os.path.getsize(zip_out))) self.request.response.setHeader('content-type', 'application/zip') self.request.response.setHeader( 'content-disposition', 'attachment; filename={}.zip'.format(self.context.getId())) return filestream_iterator(zip_out)
def __call__(self): with ZipGenerator() as generator: self.add_header_sablon(generator) for index, agenda_item in enumerate(self.meeting.agenda_items, 1): self.add_agenda_item(index, agenda_item, generator) self.add_suffix_sablon(index, generator) # Return zip response = self.request.response zip_file = generator.generate() filename = '{}.zip'.format(normalize_path(self.meeting.title)) response.setHeader( "Content-Disposition", 'inline; filename="{0}"'.format( safe_unicode(filename).encode('utf-8'))) response.setHeader("Content-type", "application/zip") response.setHeader( "Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def __call__(self): sm = getSecurityManager() if not sm.checkPermission(permissions.View, self.context.context): raise Unauthorized settings = self.context.settings filepath = self.context.filepath blob = settings.blob_files[filepath] filename = blob._p_blob_uncommitted or blob.committed() length = os.path.getsize(filename) ext = os.path.splitext(os.path.normcase(filepath))[1][1:] if ext == 'txt': ct = 'text/plain' else: ct = 'image/%s' % ext self.request.response.setHeader('Last-Modified', rfc1123_date(self.context._p_mtime)) self.request.response.setHeader("Content-Length", length) self.request.response.setHeader('Content-Type', ct) return filestream_iterator(filename, 'rb')
def set(self, name, instance, value, **kwargs): """Set value of a field""" # Ignore initialize process initializing = kwargs.get('_initializing_', False) if initializing: return # Remove acquisition wrappers value = aq_base(value) # Create File System Storage Info info = self.setFSSInfo(name, instance, value, **kwargs) # Wrap value if objectImplements(IObjectField, value): value = value.getRaw(self.instance) if objectImplements(IBaseUnit, value): value = value.getRaw() elif isinstance(value, File): value = value.data else: value = str(value) # Copy file on filesystem strategy = self.getStorageStrategy(name, instance) props = self.getStorageStrategyProperties(name, instance, info) if isinstance(value, FSSPdata): ## put all in temporory file fd, pathtemp = tempfile.mkstemp(prefix="tempfss") copy_file(value, pathtemp) value = filestream_iterator(pathtemp, mode='rb') strategy.setValueFile(value, **props) value.close() os.close(fd) rm_file(pathtemp) elif isinstance(value, Pdata): fd, pathtemp = tempfile.mkstemp(prefix="tempfss") f = open(pathtemp,'wb') data = value while data is not None: f.write(data.data) data = data.next f.seek(0, 0) f.close() f = open(pathtemp,'rb') strategy.setValueFile(f, **props) f.close() os.close(fd) rm_file(pathtemp) else: strategy.setValueFile(value, **props) # Create RDF file conf = self.getConf() is_rdf_enabled = conf.isRDFEnabled() rdf_script = conf.getRDFScript() if is_rdf_enabled: # Replace rdf file rdf_value = info.getRDFValue(name, instance, rdf_script) strategy.setRDFFile(rdf_value, **props)
def __call__(self): from ZPublisher.Iterators import filestream_iterator from pkg_resources import resource_filename filename = resource_filename('plone.subrequest', 'testfile.txt') return filestream_iterator(filename)
def __call__(self, request, result, encoding): return filestream_iterator(tmp)
def open_iterator(self): return filestream_iterator(self._blob.committed(), 'rb')
def getData(self): if os.path.exists(self.path): return FSSPdata(filestream_iterator(self.path, mode='rb')) else: ## simulate an empty iterator return FSSPdata(FileUploadIterator(cStringIO.StringIO('')))
def getIterator(self): fn = self.blob._current_filename() return filestream_iterator(fn)