def get_file(request): contract_id = request.validated['contract_id'] document = request.validated['document'] key = request.params.get('download') conn = getattr(request.registry, 's3_connection', None) filename = "{}_{}".format(document.id, key) if conn and filename not in request.validated['contract']['_attachments']: filename = "{}/{}/{}".format(contract_id, document.id, key) url = conn.generate_url(method='GET', bucket=request.registry.bucket_name, key=filename, expires_in=300) request.response.content_type = document.format.encode('utf-8') request.response.content_disposition = build_header( document.title, filename_compat=quote(document.title.encode('utf-8'))) request.response.status = '302 Moved Temporarily' request.response.location = url return url else: filename = "{}_{}".format(document.id, key) data = request.registry.db.get_attachment(contract_id, filename) if data: request.response.content_type = document.format.encode('utf-8') request.response.content_disposition = build_header( document.title, filename_compat=quote(document.title.encode('utf-8'))) request.response.body_file = data return request.response request.errors.add('url', 'download', 'Not Found') request.errors.status = 404
def get_file(request): plan_id = request.validated["plan_id"] document = request.validated["document"] key = request.params.get("download") conn = getattr(request.registry, "s3_connection", None) filename = "{}_{}".format(document.id, key) if conn and filename not in request.validated["plan"]["_attachments"]: filename = "{}/{}/{}".format(plan_id, document.id, key) url = conn.generate_url(method="GET", bucket=request.registry.bucket_name, key=filename, expires_in=300) request.response.content_type = document.format.encode("utf-8") request.response.content_disposition = build_header( document.title, filename_compat=quote(document.title.encode("utf-8")) ) request.response.status = "302 Moved Temporarily" request.response.location = url return url else: filename = "{}_{}".format(document.id, key) data = request.registry.db.get_attachment(plan_id, filename) if data: request.response.content_type = document.format.encode("utf-8") request.response.content_disposition = build_header( document.title, filename_compat=quote(document.title.encode("utf-8")) ) request.response.body_file = data return request.response request.errors.add("url", "download", "Not Found") request.errors.status = 404
def get_file(request): db_doc_id = request.validated['db_doc'].id document = request.validated['document'] key = request.params.get('download') if not any([key in i.url for i in request.validated['documents']]): request.errors.add('url', 'download', 'Not Found') request.errors.status = 404 return filename = "{}_{}".format(document.id, key) if request.registry.docservice_url and filename not in request.validated['db_doc']['_attachments']: document = [i for i in request.validated['documents'] if key in i.url][-1] if 'Signature=' in document.url and 'KeyID' in document.url: url = document.url else: if 'download=' not in document.url: key = urlparse(document.url).path.replace('/get/', '') if not document.hash: url = generate_docservice_url(request, key, prefix='{}/{}'.format(db_doc_id, document.id)) else: url = generate_docservice_url(request, key) request.response.content_type = document.format.encode('utf-8') request.response.content_disposition = build_header(document.title, filename_compat=quote(document.title.encode('utf-8'))) request.response.status = '302 Moved Temporarily' request.response.location = url return url else: data = request.registry.db.get_attachment(db_doc_id, filename) if data: request.response.content_type = document.format.encode('utf-8') request.response.content_disposition = build_header(document.title, filename_compat=quote(document.title.encode('utf-8'))) request.response.body_file = data return request.response request.errors.add('url', 'download', 'Not Found') request.errors.status = 404
def upload(self, post_file, uuid=None): filename = get_filename(post_file.filename) content_type = post_file.type in_file = post_file.file bucket = self.connection.get_bucket(self.bucket) if uuid is None: uuid = uuid4().hex path = '/'.join([format(i, 'x') for i in UUID(uuid).fields]) key = bucket.new_key(path) else: try: path = '/'.join([format(i, 'x') for i in UUID(uuid).fields]) except ValueError: raise KeyNotFound(uuid) if path not in bucket: raise KeyNotFound(uuid) key = bucket.get_key(path) if key.size != 0: raise ContentUploaded(uuid) md5 = key.get_metadata('hash') if key.compute_md5(in_file)[0] != md5[4:]: raise HashInvalid(md5) key.set_metadata('Content-Type', content_type) key.set_metadata("Content-Disposition", build_header(filename, filename_compat=quote(filename.encode('utf-8')))) key.set_contents_from_file(in_file) key.set_acl('private') return uuid, 'md5:' + key.etag[1:-1], content_type, filename
def create_mail_attachment(attdef, payload=None): """Create the MIME part corresponding to the given attachment. Mandatory keys: 'fname', 'tmpname', 'content-type' :param attdef: a dictionary containing the attachment definition :return: a MIMEBase object """ from email import Encoders from email.mime.base import MIMEBase if "content-type" in attdef: maintype, subtype = attdef["content-type"].split("/") elif "Content-Type" in attdef: maintype, subtype = attdef["Content-Type"].split("/") else: return None res = MIMEBase(maintype, subtype) if payload is None: with open(os.path.join( settings.MEDIA_ROOT, "webmail", attdef["tmpname"]), "rb") as fp: res.set_payload(fp.read()) else: res.set_payload(payload) Encoders.encode_base64(res) if isinstance(attdef['fname'], str): attdef['fname'] = attdef['fname'].decode('utf-8') res['Content-Disposition'] = build_header(attdef['fname']) return res
def getattachment(request): """Fetch a message attachment FIXME: par manque de caching, le bodystructure du message est redemandé pour accéder aux headers de cette pièce jointe. :param request: a ``Request`` object """ mbox = request.GET.get("mbox", None) mailid = request.GET.get("mailid", None) pnum = request.GET.get("partnumber", None) if not mbox or not mailid or not pnum: raise WebmailError(_("Invalid request")) imapc = get_imapconnector(request) partdef, payload = imapc.fetchpart(mailid, mbox, pnum) resp = HttpResponse(decode_payload(partdef["encoding"], payload)) resp["Content-Type"] = partdef["Content-Type"] resp["Content-Transfer-Encoding"] = partdef["encoding"] if partdef["disposition"] != 'NIL': disp = partdef["disposition"] # FIXME : ugly hack, see fetch_parser.py for more explanation # :p if type(disp[1][0]) != dict: cd = '%s; %s=%s' % (disp[0], disp[1][0], disp[1][1]) else: cd = '%s; %s=%s' % (disp[0], disp[1][0]['struct'][0], disp[1][0]['struct'][1]) else: cd = build_header(request.GET["fname"]) resp["Content-Disposition"] = cd resp["Content-Length"] = partdef["size"] return resp
def image(self, subpath=None): """Return the image in a specific scale, either inline (default) or as attachment. :param subpath: [<image_scale>]/download] (optional). When 'download' is the last element in subpath, the image is served with a 'Content-Disposition: attachment' header. <image_scale> has to be one of the predefined image_scales - either from the defaults in this module or one set with a kotti.image_scales.<scale_name> in your app config ini file. :type subpath: str :result: complete response object :rtype: pyramid.response.Response """ if subpath is None: subpath = self.request.subpath width, height = (None, None) subpath = list(subpath) if (len(subpath) > 0) and (subpath[-1] == "download"): disposition = "attachment" subpath.pop() else: disposition = "inline" if len(subpath) == 1: scale = subpath[0] if scale in image_scales: # /path/to/image/scale/thumb width, height = image_scales[scale] if not (width and height): return self.request.uploaded_file_response( self.context.data, disposition) image, format, size = scaleImage(self.context.data.file.read(), width=width, height=height, direction="thumb") res = Response( headerlist=[ ('Content-Disposition', '{0};filename="{1}"'.format( disposition, self.context.filename.encode('ascii', 'ignore'))), ('Content-Length', str(len(image))), ('Content-Type', str(self.context.mimetype)), ], body=image, ) res.content_disposition = rfc6266.build_header( self.context.filename, disposition=disposition, filename_compat=unidecode(self.context.filename)) return res
def __init__(self, f, request, disposition='attachment', cache_max_age=604800, content_type=None, content_encoding=None): """ :param f: the ``UploadedFile`` file field value. :type f: :class:`depot.io.interfaces.StoredFile` :param request: Current request. :type request: :class:`pyramid.request.Request` :param disposition: :type disposition: :param cache_max_age: The number of seconds that should be used to HTTP cache this response. :param content_type: The content_type of the response. :param content_encoding: The content_encoding of the response. It's generally safe to leave this set to ``None`` if you're serving a binary file. This argument will be ignored if you also leave ``content-type`` as ``None``. """ if f.public_url: raise HTTPMovedPermanently(f.public_url) content_encoding, content_type = self._get_type_and_encoding( content_encoding, content_type, f) super(StoredFileResponse, self).__init__( conditional_response=True, content_type=content_type, content_encoding=content_encoding) app_iter = None if request is not None and \ not get_settings()['kotti.depot_replace_wsgi_file_wrapper']: environ = request.environ if 'wsgi.file_wrapper' in environ: app_iter = environ['wsgi.file_wrapper'](f, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(f) self.app_iter = app_iter # assignment of content_length must come after assignment of app_iter self.content_length = f.content_length self.last_modified = f.last_modified if cache_max_age is not None: self.cache_expires = cache_max_age self.cache_control.public = True self.etag = self.generate_etag(f) self.content_disposition = rfc6266.build_header( f.filename, disposition=disposition, filename_compat=unidecode(f.filename))
def upload_file(request, blacklisted_fields=DOCUMENT_BLACKLISTED_FIELDS): first_document = ( request.validated["documents"][0] if "documents" in request.validated and request.validated["documents"] else None ) if request.content_type == "multipart/form-data": data = request.validated["file"] filename = get_filename(data) content_type = data.type in_file = data.file else: filename = first_document.title content_type = request.content_type in_file = request.body_file if hasattr(request.context, "documents"): # upload new document model = type(request.context).documents.model_class else: # update document model = type(request.context) document = model({"title": filename, "format": content_type}) document.__parent__ = request.context if "document_id" in request.validated: document.id = request.validated["document_id"] if first_document: for attr_name in type(first_document)._fields: if attr_name not in blacklisted_fields: setattr(document, attr_name, getattr(first_document, attr_name)) key = generate_id() document_route = request.matched_route.name.replace("collection_", "") document_path = request.current_route_path( _route_name=document_route, document_id=document.id, _query={"download": key} ) document.url = "/" + "/".join(document_path.split("/")[3:]) conn = getattr(request.registry, "s3_connection", None) if conn: bucket = conn.get_bucket(request.registry.bucket_name) filename = "{}/{}/{}".format(request.validated["plan_id"], document.id, key) key = bucket.new_key(filename) key.set_metadata("Content-Type", document.format) key.set_metadata( "Content-Disposition", build_header(document.title, filename_compat=quote(document.title.encode("utf-8"))) ) key.set_contents_from_file(in_file) key.set_acl("private") else: filename = "{}_{}".format(document.id, key) request.validated["plan"]["_attachments"][filename] = { "content_type": document.format, "data": b64encode(in_file.read()), } update_logging_context(request, {"file_size": in_file.tell()}) return document
def _export(content, filename): """Export a csv file's content :param content: the content to export (string) :param filename: the name that will appear into the response :return: an ``HttpResponse`` object """ resp = HttpResponse(content) resp["Content-Type"] = "text/csv" resp["Content-Length"] = len(content) resp["Content-Disposition"] = build_header(filename) return resp
def download_filters_set(request, name): sc = SieveClient(user=request.user.username, password=request.session["password"]) try: script = sc.getscript(name) except SieveClientError as e: return ajax_response(request, "ko", respmsg=str(e)) resp = HttpResponse(script) resp["Content-Type"] = "text/plain; charset=utf-8" resp["Content-Length"] = len(script) resp["Content-Disposition"] = build_header('%s.txt' % name) return resp
def upload_file(request, blacklisted_fields=DOCUMENT_BLACKLISTED_FIELDS): first_document = request.validated['documents'][0] if 'documents' in request.validated and request.validated['documents'] else None if request.content_type == 'multipart/form-data': data = request.validated['file'] filename = get_filename(data) content_type = data.type in_file = data.file else: filename = first_document.title content_type = request.content_type in_file = request.body_file if hasattr(request.context, "documents"): # upload new document model = type(request.context).documents.model_class else: # update document model = type(request.context) document = model({'title': filename, 'format': content_type}) document.__parent__ = request.context if 'document_id' in request.validated: document.id = request.validated['document_id'] if first_document: for attr_name in type(first_document)._fields: if attr_name not in blacklisted_fields: setattr(document, attr_name, getattr(first_document, attr_name)) key = generate_id() document_route = request.matched_route.name.replace("collection_", "") document_path = request.current_route_path(_route_name=document_route, document_id=document.id, _query={'download': key}) document.url = '/' + '/'.join(document_path.split('/')[3:]) conn = getattr(request.registry, 's3_connection', None) if conn: bucket = conn.get_bucket(request.registry.bucket_name) filename = "{}/{}/{}".format(request.validated['contract_id'], document.id, key) key = bucket.new_key(filename) key.set_metadata('Content-Type', document.format) key.set_metadata("Content-Disposition", build_header(document.title, filename_compat=quote(document.title.encode('utf-8')))) key.set_contents_from_file(in_file) key.set_acl('private') else: filename = "{}_{}".format(document.id, key) request.validated['contract']['_attachments'][filename] = { "content_type": document.format, "data": b64encode(in_file.read()) } update_logging_context(request, {'file_size': in_file.tell()}) return document
def get_account_credentials(request, accountid): """View to download a document.""" account = User.objects.get(pk=accountid) if not request.user.can_access(account): raise PermDeniedException() fname = get_creds_filename(account) if not os.path.exists(fname): raise ModoboaException(_("No document available for this user")) content = decrypt_file(fname) if param_tools.get_global_parameter("delete_first_dl"): os.remove(fname) resp = HttpResponse(content) resp["Content-Type"] = "application/pdf" resp["Content-Length"] = len(content) resp["Content-Disposition"] = build_header(os.path.basename(fname)) return resp
def zip_selected(self, objects): response = self.request.response settings = getUtility(IRegistry).forInterface(IZipExportSettings) with ZipGenerator() as generator: for obj in objects: repre = getMultiAdapter((obj, self.request), interface=IZipRepresentation) for path, pointer in repre.get_files(): if not pointer: if settings.include_empty_folders: generator.add_folder(path) continue try: generator.add_file(path, pointer) except LargeZipFile: messages = IStatusMessage(self.request) messages.add(_("statmsg_zip_file_too_big", default=u"Content is too big " "to export"), type=u"error") return self.request.response.redirect( self.context.absolute_url()) # check if zip has files if generator.is_empty: raise NoExportableContent() zip_file = generator.generate() # Trigger the per container event notify(ContainerZippedEvent(self.context)) # Generate response file filename = u'%s.zip' % self.context.title response.setHeader( "Content-Disposition", build_header(filename, disposition='attachment')) response.setHeader("Content-type", "application/zip") response.setHeader("Content-Length", os.stat(zip_file.name).st_size) return filestream_iterator(zip_file.name, 'rb')
def create_mail_attachment(attdef): """Create the MIME part corresponding to the given attachment. Mandatory keys: 'fname', 'tmpname', 'content-type' :param attdef: a dictionary containing the attachment definition :return: a MIMEBase object """ from email import Encoders from email.mime.base import MIMEBase maintype, subtype = attdef["content-type"].split("/") res = MIMEBase(maintype, subtype) fp = open(os.path.join(settings.MEDIA_ROOT, "webmail", attdef["tmpname"]), "rb") res.set_payload(fp.read()) fp.close() Encoders.encode_base64(res) res['Content-Disposition'] = build_header(attdef['fname'].decode('utf-8')) return res
def upload_file(request): first_document = request.validated['documents'][0] if 'documents' in request.validated and request.validated['documents'] else None if request.content_type == 'multipart/form-data': data = request.validated['file'] filename = get_filename(data) content_type = data.type in_file = data.file else: filename = first_document.title content_type = request.content_type in_file = request.body_file document = Document({ 'title': filename, 'format': content_type }) document.__parent__ = request.context if 'document_id' in request.validated: document.id = request.validated['document_id'] if first_document: document.datePublished = first_document.datePublished key = generate_id() document_route = request.matched_route.name.replace("collection_", "") document_path = request.current_route_path(_route_name=document_route, document_id=document.id, _query={'download': key}) document.url = '/tenders' + document_path.split('/tenders', 1)[1] conn = getattr(request.registry, 's3_connection', None) if conn: bucket = conn.get_bucket(request.registry.bucket_name) filename = "{}/{}/{}".format(request.validated['tender_id'], document.id, key) key = bucket.new_key(filename) key.set_metadata('Content-Type', document.format) key.set_metadata("Content-Disposition", build_header(document.title, filename_compat=quote(document.title.encode('utf-8')))) key.set_contents_from_file(in_file) key.set_acl('private') else: filename = "{}_{}".format(document.id, key) request.validated['tender']['_attachments'][filename] = { "content_type": document.format, "data": b64encode(in_file.read()) } return document
def upload(self, post_file, uuid=None): filename = get_filename(post_file.filename) content_type = post_file.type in_file = post_file.file if uuid is not None: if uuid not in self.storage: raise KeyNotFound(uuid) if self.storage[uuid]['Content']: raise ContentUploaded(uuid) key = self.storage[uuid] else: uuid = uuid4().hex key = self.storage[uuid] = {} content = in_file.read() key_md5 = key.get('hash') md5hash = 'md5:' + md5(content).hexdigest() if key_md5 and md5hash != key_md5: raise HashInvalid(key_md5) key['hash'] = md5hash key['Content-Type'] = content_type key["Content-Disposition"] = build_header(filename, filename_compat=quote(filename.encode('utf-8'))) key['Content'] = content return uuid, md5hash, content_type, filename
def getattachment(request): """Fetch a message attachment FIXME: par manque de caching, le bodystructure du message est redemandé pour accéder aux headers de cette pièce jointe. :param request: a ``Request`` object """ mbox = request.GET.get("mbox", None) mailid = request.GET.get("mailid", None) pnum = request.GET.get("partnumber", None) fname = request.GET.get("fname", None) if not mbox or not mailid or not pnum or not fname: raise BadRequest(_("Invalid request")) imapc = get_imapconnector(request) partdef, payload = imapc.fetchpart(mailid, mbox, pnum) resp = HttpResponse(decode_payload(partdef["encoding"], payload)) resp["Content-Type"] = partdef["Content-Type"] resp["Content-Transfer-Encoding"] = partdef["encoding"] resp["Content-Disposition"] = build_header(fname) if int(partdef["size"]) < 200: resp["Content-Length"] = partdef["size"] return resp
def upload(self, post_file, uuid=None): filename = get_filename(post_file.filename) content_type = post_file.type in_file = post_file.file if uuid is not None: if uuid not in self.storage: raise KeyNotFound(uuid) if self.storage[uuid]['Content']: raise ContentUploaded(uuid) key = self.storage[uuid] else: uuid = uuid4().hex key = self.storage[uuid] = {} content = in_file.read() key_md5 = key.get('hash') md5hash = 'md5:' + md5(content).hexdigest() if key_md5 and md5hash != key_md5: raise HashInvalid(key_md5) key['hash'] = md5hash key['Content-Type'] = content_type key["Content-Disposition"] = build_header( filename, filename_compat=quote(filename.encode('utf-8'))) key['Content'] = content return uuid, md5hash, content_type, filename
def encode_hdr(self, boundary): """Returns the header of the encoding of this parameter""" if not self._encoded_hdr or self._encoded_bdr != boundary: boundary = self.quote(boundary) self._encoded_bdr = boundary headers = ["--%s" % boundary] if self.fname: disposition = build_header(self.fname, disposition='form-data', name=self.name).encode() else: disposition = 'form-data; name="%s"' % self.name headers.append("Content-Disposition: %s" % disposition) if self.filetype: filetype = self.filetype else: filetype = "text/plain; charset=utf-8" headers.append("Content-Type: %s" % filetype) headers.append("Content-Length: %i" % self.size) headers.append("") headers.append("") self._encoded_hdr = CRLF.join(headers) return self._encoded_hdr
def make_content_disposition_header(fn): return build_header(os.path.basename(fn)).decode('ascii')
def video_encodings_download(request, course_key_string): """ Returns a CSV report containing the encoded video URLs for video uploads in the following format: Video ID,Name,Status,Profile1 URL,Profile2 URL aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4 """ course = _get_and_validate_course(course_key_string, request.user) if not course: return HttpResponseNotFound() def get_profile_header(profile): """Returns the column header string for the given profile's URLs""" # Translators: This is the header for a CSV file column # containing URLs for video encodings for the named profile # (e.g. desktop, mobile high quality, mobile low quality) return _("{profile_name} URL").format(profile_name=profile) profile_whitelist = VideoUploadConfig.get_profile_whitelist() videos = list(_get_videos(course)) name_col = _("Name") duration_col = _("Duration") added_col = _("Date Added") video_id_col = _("Video ID") status_col = _("Status") profile_cols = [get_profile_header(profile) for profile in profile_whitelist] def make_csv_dict(video): """ Makes a dictionary suitable for writing CSV output. This involves extracting the required items from the original video dict and converting all keys and values to UTF-8 encoded string objects, because the CSV module doesn't play well with unicode objects. """ # Translators: This is listed as the duration for a video that has not # yet reached the point in its processing by the servers where its # duration is determined. duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending") ret = dict( [ (name_col, video["client_video_id"]), (duration_col, duration_val), (added_col, video["created"].isoformat()), (video_id_col, video["edx_video_id"]), (status_col, video["status"]), ] + [ (get_profile_header(encoded_video["profile"]), encoded_video["url"]) for encoded_video in video["encoded_videos"] if encoded_video["profile"] in profile_whitelist ] ) return { key.encode("utf-8"): value.encode("utf-8") for key, value in ret.items() } response = HttpResponse(content_type="text/csv") # Translators: This is the suggested filename when downloading the URL # listing for videos uploaded through Studio filename = _("{course}_video_urls").format(course=course.id.course) # See https://tools.ietf.org/html/rfc6266#appendix-D response["Content-Disposition"] = rfc6266.build_header( filename + ".csv", filename_compat="video_urls.csv" ) writer = csv.DictWriter( response, [ col_name.encode("utf-8") for col_name in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols ], dialect=csv.excel ) writer.writeheader() for video in videos: writer.writerow(make_csv_dict(video)) return response
def serve_raw_file(self, file, content_type=None, disposition=None, name=None): # Adapted from CherryPy's serve_file(), modified to work with file-like # objects response = cherrypy.response st = None if isinstance(file, str): path = file if not os.path.isabs(path): raise ValueError("'%s' is not an absolute path." % path) try: st = os.stat(path) except OSError: raise cherrypy.NotFound() if stat.S_ISDIR(st.st_mode): raise cherrypy.NotFound() response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime) cptools.validate_since() file = open(path, "rb") else: path = getattr(file, "name", None) if path: try: st = os.stat(path) except OSError: pass else: if not hasattr(file, "read"): raise ValueError( "Expected a file-like object, got %r instead " "(object has no read() method)" % file) if not hasattr(file, "seek"): raise ValueError("Can't serve file-like object %r " "(object has no seek() method)" % file) if not hasattr(file, "tell"): raise ValueError("Can't serve file-like object %r " "(object has no tell() method)" % file) # Set the content type if content_type is None: if path: content_type = mimetypes.guess_type(path)[0] if not content_type: content_type = "text/plain" response.headers["Content-Type"] = content_type # Set the content disposition if disposition is not None: cd = disposition if not name and path: name = os.path.basename(path) if name: cd = rfc6266.build_header(name, cd) response.headers["Content-Disposition"] = cd if self.use_xsendfile and path: response.headers["X-Sendfile"] = path return "" # Find the size of the file if st is None: start = file.tell() file.seek(0, 2) # Move to the end of the file c_len = file.tell() - start file.seek(start) else: c_len = st.st_size # HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code if cherrypy.request.protocol >= (1, 1): response.headers["Accept-Ranges"] = "bytes" r = httputil.get_ranges(cherrypy.request.headers.get('Range'), c_len) if r == []: response.headers['Content-Range'] = "bytes */%s" % c_len message = "Invalid Range (first-byte-pos greater than Content-Length)" raise cherrypy.HTTPError(416, message) if r: if len(r) == 1: # Return a single-part response. start, stop = r[0] if stop > c_len: stop = c_len r_len = stop - start response.status = "206 Partial Content" response.headers['Content-Range'] = ( "bytes %s-%s/%s" % (start, stop - 1, c_len)) response.headers['Content-Length'] = r_len file.seek(start) response.body = file_generator_limited(file, r_len) else: # Return a multipart/byteranges response. response.status = "206 Partial Content" import mimetools boundary = mimetools.choose_boundary() ct = "multipart/byteranges; boundary=%s" % boundary response.headers['Content-Type'] = ct if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. del response.headers["Content-Length"] def file_ranges(): # Apache compatibility: yield "\r\n" for start, stop in r: yield "--" + boundary yield "\r\nContent-type: %s" % content_type yield ( "\r\nContent-range: bytes %s-%s/%s\r\n\r\n" % (start, stop - 1, c_len)) file.seek(start) for chunk in file_generator_limited( file, stop - start): yield chunk yield "\r\n" # Final boundary yield "--" + boundary + "--" # Apache compatibility: yield "\r\n" response.body = file_ranges() else: response.headers['Content-Length'] = c_len response.body = file else: response.headers['Content-Length'] = c_len response.body = file return response.body
def send_file_if_conditions_met(self, in_file, filename, n_bytes=None, skip_unhandled_extension=True, skip_duplicate=True, metadata=None, sha1=None): """Upload a file to the Overview server. If ``n_bytes is None or (skip_duplicate == True and sha1 is None)``, then ``in_file`` will be cached in memory. Otherwise, it will be streamed to the server, saving memory. :param io.BytesIO in_file: BytesIO containing the document. :param str filename: Filename to set in Overview. :param int n_bytes: Exact file size (or `None` to auto-calculate). Supply this and ``sha1`` (if applicable) to stream ``in_file`` to the server instead of caching it in memory. :param bool skip_unhandled_extension: if ``True`` (the default), do not upload this file if Overview doesn't support its filename extension (for instance, ``".dbf"``). :param bool skip_duplicate: if ``True`` (the default), do not upload this file if your api_token points to a document set that already contains a file whose sha1 hash is identical to this file's. Files that have been sent without a call to ``finish()`` will not be included in the check. :param dict metadata: Metadata to set on the document, or ``None``. The document set should have a metadata schema that corresponds to this document's metadata (or you can set the schema later). :param str sha1: SHA1 hash:to use in ``skip_duplicate()`` check, or ``None`` to calculate on the fly. If you set this and ``n_bytes``, this method will stream the file contents instead of caching them in memory. """ if skip_unhandled_extension: # We go by filename, with a blacklist we know Overview doesn't handle (yet) path, ext = os.path.splitext(filename) if ext.lower() in ['.zip', '.msg', '.gif', '.jpg', '.png', '.tiff', '.tif', '.dbf']: self.logger.info('Skipping %s, Overview does not handle this format', filename) return if skip_duplicate: if sha1 is None: # Cache in_file bytes in memory so we can read it twice: once in # is_file_already_in_document_set(), and once below. in_file = io.BytesIO(in_file.read()) sha1 = _calculate_sha1(in_file) in_file.seek(0) if self.is_file_already_in_document_set(in_file, sha1): self.logger.info('Skipping %s, already on server', filename) return if n_bytes is None: # Cache in_file bytes in memory so we can read it twice: once # here, once below in_file = io.BytesIO(in_file.read()) n_bytes = in_file.getbuffer().nbytes server_path = '/api/v1/files/{}'.format(uuid.uuid4()) headers = { 'Content-Disposition': rfc6266.build_header(filename), 'Content-Length': str(n_bytes), } if metadata: headers['Overview-Document-Metadata-JSON'] = json.dumps(metadata, ensure_ascii=True) self.logger.info('Uploading %s…', filename) r = self._request('POST', server_path, headers=headers, data=in_file) r.raise_for_status() self.n_uploaded += 1
def video_encodings_download(request, course_key_string): """ Returns a CSV report containing the encoded video URLs for video uploads in the following format: Video ID,Name,Status,Profile1 URL,Profile2 URL aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4 """ course = _get_and_validate_course(course_key_string, request.user) if not course: return HttpResponseNotFound() def get_profile_header(profile): """Returns the column header string for the given profile's URLs""" # Translators: This is the header for a CSV file column # containing URLs for video encodings for the named profile # (e.g. desktop, mobile high quality, mobile low quality) return _("{profile_name} URL").format(profile_name=profile) profile_whitelist = VideoUploadConfig.get_profile_whitelist() videos = list(_get_videos(course)) name_col = _("Name") duration_col = _("Duration") added_col = _("Date Added") video_id_col = _("Video ID") status_col = _("Status") profile_cols = [get_profile_header(profile) for profile in profile_whitelist] def make_csv_dict(video): """ Makes a dictionary suitable for writing CSV output. This involves extracting the required items from the original video dict and converting all keys and values to UTF-8 encoded string objects, because the CSV module doesn't play well with unicode objects. """ # Translators: This is listed as the duration for a video that has not # yet reached the point in its processing by the servers where its # duration is determined. duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending") ret = dict( [ (name_col, video["client_video_id"]), (duration_col, duration_val), (added_col, video["created"].isoformat()), (video_id_col, video["edx_video_id"]), (status_col, video["status"]), ] + [ (get_profile_header(encoded_video["profile"]), encoded_video["url"]) for encoded_video in video["encoded_videos"] if encoded_video["profile"] in profile_whitelist ] ) return {key.encode("utf-8"): value.encode("utf-8") for key, value in ret.items()} response = HttpResponse(content_type="text/csv") # Translators: This is the suggested filename when downloading the URL # listing for videos uploaded through Studio filename = _("{course}_video_urls").format(course=course.id.course) # See https://tools.ietf.org/html/rfc6266#appendix-D response["Content-Disposition"] = rfc6266.build_header(filename + ".csv", filename_compat="video_urls.csv") writer = csv.DictWriter( response, [ col_name.encode("utf-8") for col_name in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols ], dialect=csv.excel, ) writer.writeheader() for video in videos: writer.writerow(make_csv_dict(video)) return response
def upload(self, post_file, uuid=None): now_iso = get_now().isoformat() filename = get_filename(post_file.filename) content_type = post_file.type in_file = post_file.file md5hash = self.compute_md5(in_file) if md5hash in self.forbidden_hash: LOGGER.warning("Forbidden file by hash {}".format(md5hash)) raise StorageUploadError('forbidden_file ' + md5hash) if uuid is None: uuid = self.hash_to_uuid(md5hash) meta = dict(uuid=uuid, hash=md5hash, created=now_iso) else: meta = self.read_meta(uuid) if not compare_digest(meta['hash'], md5hash): raise HashInvalid(meta['hash'] + "/" + md5hash) key = self.uuid_to_file(uuid) path, name = self.file_path(key) if os.path.exists(name): meta = self.read_meta(uuid) if meta['filename'] != filename: if 'alternatives' not in meta: meta['alternatives'] = list() meta['alternatives'].append({ 'created': now_iso, 'filename': filename }) self.save_meta(uuid, meta, overwrite=True) return uuid, md5hash, content_type, filename if self.check_forbidden(filename, content_type, in_file): LOGGER.warning("Forbidden file {} {} {} {}".format( filename, content_type, uuid, md5hash)) raise StorageUploadError('forbidden_file ' + md5hash) meta['filename'] = filename meta['Content-Type'] = content_type meta['Content-Disposition'] = build_header( filename, disposition=self.disposition, filename_compat=quote(filename.encode('utf-8'))) self.save_meta(uuid, meta, overwrite=True) in_file.seek(0) with open(name + '~', 'wb') as out_file: flock(out_file, LOCK_EX | LOCK_NB) copyfileobj(in_file, out_file) os.rename(name + '~', name) os.chmod(name, self.file_mode) try: if self.replica_apis: self.upload_to_replicas(post_file, uuid) except Exception as e: # pragma: no cover LOGGER.error("Replica failed {}, remove file {} {}".format( e, uuid, md5hash)) if self.require_replica_upload: os.rename(name, name + '~') raise StorageUploadError('replica_failed') return uuid, md5hash, content_type, filename
def roundtrip(filename): return parse_headers(build_header(filename)).filename_unsafe
return ajax_response(request, respmsg=_("Filters set activated")) @login_required @needs_mailbox() def download_filters_set(request, name): sc = SieveClient(user=request.user.username, password=request.session["password"]) try: script = sc.getscript(name) except SieveClientError, e: return ajax_response(request, "ko", respmsg=str(e)) resp = HttpResponse(script) resp["Content-Type"] = "text/plain; charset=utf-8" resp["Content-Length"] = len(script) resp["Content-Disposition"] = build_header("%s.txt" % name) return resp @login_required @needs_mailbox() def toggle_filter_state(request, setname, fname): sc = SieveClient(user=request.user.username, password=request.session["password"]) if type(fname) is unicode: fname = fname.encode("utf-8") fset = sc.getscript(setname, format="fset") if fset.is_filter_disabled(fname): ret = fset.enablefilter(fname) newstate = _("yes") color = "green" else:
def __init__(self, f, request, disposition='attachment', cache_max_age=604800, content_type=None, content_encoding=None): """ :param f: the ``UploadedFile`` file field value. :type f: :class:`depot.io.interfaces.StoredFile` :param request: Current request. :type request: :class:`pyramid.request.Request` :param disposition: :type disposition: :param cache_max_age: The number of seconds that should be used to HTTP cache this response. :param content_type: The content_type of the response. :param content_encoding: The content_encoding of the response. It's generally safe to leave this set to ``None`` if you're serving a binary file. This argument will be ignored if you also leave ``content-type`` as ``None``. """ if f.public_url: raise HTTPMovedPermanently(f.public_url) content_encoding, content_type = self._get_type_and_encoding( content_encoding, content_type, f) super(StoredFileResponse, self).__init__(conditional_response=True, content_type=content_type, content_encoding=content_encoding) app_iter = None if request is not None and \ not get_settings()['kotti.depot_replace_wsgi_file_wrapper']: environ = request.environ if 'wsgi.file_wrapper' in environ: app_iter = environ['wsgi.file_wrapper'](f, _BLOCK_SIZE) if app_iter is None: app_iter = FileIter(f) self.app_iter = app_iter # assignment of content_length must come after assignment of app_iter self.content_length = f.content_length self.last_modified = f.last_modified if cache_max_age is not None: self.cache_expires = cache_max_age self.cache_control.public = True self.etag = self.generate_etag(f) self.content_disposition = rfc6266.build_header( f.filename, disposition=disposition, filename_compat=unidecode(f.filename))
@login_required @needs_mailbox() def download_filters_set(request, name): sc = SieveClient(user=request.user.username, password=request.session["password"]) try: script = sc.getscript(name) except SieveClientError, e: return ajax_response(request, "ko", respmsg=str(e)) resp = HttpResponse(script) resp["Content-Type"] = "text/plain; charset=utf-8" resp["Content-Length"] = len(script) resp["Content-Disposition"] = build_header('%s.txt' % name) return resp @login_required @needs_mailbox() def toggle_filter_state(request, setname, fname): sc = SieveClient(user=request.user.username, password=request.session["password"]) if type(fname) is unicode: fname = fname.encode("utf-8") try: fset = sc.getscript(setname, format="fset") if fset.is_filter_disabled(fname): ret = fset.enablefilter(fname) newstate = _("yes")
def test_test_roundtrip(): assert (build_header("test.txt", disposition='form-data', name="test1") == 'form-data; name=test1; filename=test.txt') assert (build_header(u"тест.txt", disposition='form-data', name="test2") == "form-data; name=test2; filename*=utf-8''%D1%82%D0%B5%D1%81%D1%82.txt")
async def render(request: web.Request) -> web.Response: """HTTP POST route at /render for web nbconvert API. Return response of conversion with appropriate CONTENT-DISPOSITION """ post_data = await request.post() try: notebook_field = post_data['notebook'] except KeyError: return make_web_error_response( request, "Missing field", "Missing notebook [multipart file] field") if not notebook_field: return make_web_error_response(request, "Invalid field", "Notebook file multipart field empty") try: exporter_type = post_data['exporter'] except KeyError: return make_web_error_response(request, "Missing field", "Missing exporter field") exporter_names = get_exporter_names() if exporter_type not in exporter_names: return make_web_error_response( request, "Invalid field", f"Invalid exporter {exporter_type!r}, must be one of " f"{exporter_names}") try: disposition = post_data['disposition'] except KeyError: return make_web_error_response(request, "Missing field", "Missing disposition field") if disposition not in DISPOSITION_FIELDS: return make_web_error_response( request, "Invalid field", f"Invalid disposition {disposition!r}, must be one of {DISPOSITION_FIELDS}" ) notebook_string = notebook_field.file.read() notebook_data = nbformat.reads(notebook_string, TO_VERSION) config = { 'Exporter': { 'preprocessors': ['nbconvert_http.preprocessors.TagExtractPreprocessor'] }, 'TagExtractPreprocessor': { 'extract_cell_tags': ['bibliography'] } } # Only need to intercept template for the HTML API async with render_execution_context(exporter_type, config): loop = asyncio.get_event_loop() result = await loop.run_in_executor(pool, convert_notebook_sync, notebook_data, exporter_type, config) filename = f"result{result['resources']['output_extension']}" response = web.Response(body=result['body'], headers=CIMultiDict({ 'CONTENT-DISPOSITION': rfc6266.build_header(filename, disposition) })) response.content_type = result['mime-type'] return response