def handle_filedrop_upload(request): """ Squeeze out an UploadedFile from a request sent through FileDrop.js. FileDrop.js's AJAX mode passes the actual file data as an unembellished binary stream as the POST payload so we need to do some magic that normal (multipart/form-data) uploads would not require. Here's that magic. :param request: HTTP request. :type request: django.http.HttpRequest :return: Uploaded file. :rtype: django.core.files.uploadedfile.UploadedFile """ content_type = request.META.get("HTTP_X_FILE_TYPE", "") filename = request.META["HTTP_X_FILE_NAME"] size = int(request.META["HTTP_X_FILE_SIZE"]) if size >= settings.FILE_UPLOAD_MAX_MEMORY_SIZE: upload_file = TemporaryUploadedFile(name=filename, content_type=content_type, size=size, charset="binary") else: upload_file = InMemoryUploadedFile( name=filename, content_type=content_type, size=size, charset="binary", field_name="none", file=BytesIO() ) upload_file.write(request.read()) return upload_file
class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def __init__(self, *args, **kwargs): super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs) def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super(TemporaryFileUploadHandler, self).new_file( file_name, *args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file
class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def new_file(self, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super().new_file(*args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file def upload_interrupted(self): if hasattr(self, 'file'): temp_location = self.file.temporary_file_path() try: self.file.close() os.remove(temp_location) except FileNotFoundError: pass
def temp_uploaded_file(file): file_size = os.path.getsize(file.name) file_content = file.read() temp = TemporaryUploadedFile(file.name, 'text', file_size, None) temp.write(file_content) temp.seek(0) return temp
def validate_file_link(self, value): if not value: return r = requests.head(value, allow_redirects=True) headers = r.headers mime_type = headers.get('content-type') file_size = headers.get('content-length') file_name = None if "Content-Disposition" in r.headers.keys(): file_name_list = re.findall("filename=(.+)", r.headers["Content-Disposition"]) if len(file_name_list) > 0: file_name = file_name_list[0] if not file_name: file_name = value.split("/")[-1] tf = TemporaryUploadedFile( file_name, mime_type, file_size, 'utf-8' ) r = requests.get(value, stream=True) for chunk in r.iter_content(chunk_size=4096): tf.write(chunk) tf.seek(0) self._file = tf
def decorate(request, *args, **kwargs): if request.method == 'POST' and 'HTTP_X_FILE_NAME' in request.META: tf = TemporaryUploadedFile('rawdata', request.META['HTTP_X_FILE_TYPE'], int(request.META['CONTENT_LENGTH']), None) chunk = ' ' while len(chunk) > 0: chunk = request.read(1024) tf.write(chunk) tf.seek(0) request.FILES['file'] = tf return func(request, *args, **kwargs)
def _make_tempfile(self, filename, content): fileobj = TemporaryUploadedFile( name=filename + ".tempfile", content_type='text/plain', size=0, charset='utf8', ) fileobj.write(content) fileobj.flush() return fileobj
def parse_distutils_request(request): """ Due to a bug in the Python distutils library, the request post is sent using \n as a separator instead of the \r\n that the HTTP spec demands. This breaks the Django form parser and therefore we have to write a custom parser. This bug was fixed in the Python 2.7.4 and 3.4: http://bugs.python.org/issue10510 """ body = request.body.decode('latin-1') if not body.endswith('\r\n'): sep = body.splitlines()[1] request.POST = QueryDict('', mutable=True) try: request._files = MultiValueDict() except Exception: pass for part in filter(lambda e: e.strip(), body.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception: continue if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers and headers['name'] == 'content': dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content.encode('utf-8')) dist.seek(0) request.FILES.appendlist('distribution', dist) else: request.POST.appendlist(headers["name"], content) else: request.FILES['distribution'] = request.FILES['content'] # Distutils sends UNKNOWN for empty fields (e.g platform) for key, value in request.POST.items(): if value == 'UNKNOWN': request.POST[key] = None
class FeedUploadHandler(TemporaryFileUploadHandler): """ This handler specifically handles feed uploads """ QUOTA = 42 * 2**20 # 42 MB # doesn't seem to be a good way to identify zip files MIME_TYPES = ( 'application/zip', 'application/x-zip', 'application/x-gzip', ) def __init__(self, *args, **kwargs): super(FeedUploadHandler, self).__init__(*args, **kwargs) self.total_upload = 0 self.file_name = "" def _validate_file(self): filename_re = re.compile(r'filename="(?P<name>[^"]+)"') content_type = str(self.request.META.get('CONTENT_TYPE', "")) content_length = int(self.request.META.get('CONTENT_LENGTH', 0)) charset = 'binary' m = filename_re.search(self.request.META.get("HTTP_CONTENT_DISPOSITION", "")) if content_type not in self.MIME_TYPES: raise IncorrectMimeTypeError("Incorrect mime type", connection_reset=True) if content_length > self.QUOTA: raise StopUpload(connection_reset=True) if not m: raise FileNameUnspecified("File name not specified", connection_reset=True) self.file_name = self.file_name = m.group('name') self.content_type = content_type self.content_length = content_length # print content_length def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. Ignores and overwrites most of the arguments and relies on exsiting request """ super(FeedUploadHandler, self).new_file(file_name, *args, **kwargs) self._validate_file() self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset) def receive_data_chunk(self, raw_data, start): self.total_upload += len(raw_data) # print "Total upload: {0}".format(self.total_upload) if self.total_upload >= self.QUOTA: raise StopUpload(connection_reset=True) self.file.write(raw_data)
class FeedUploadHandler(TemporaryFileUploadHandler): """ This handler specifically handles feed uploads """ QUOTA = 42 * 2 ** 20 # 42 MB # doesn't seem to be a good way to identify zip files MIME_TYPES = ("application/zip", "application/x-zip", "application/x-gzip") def __init__(self, *args, **kwargs): super(FeedUploadHandler, self).__init__(*args, **kwargs) self.total_upload = 0 self.file_name = "" def _validate_file(self): filename_re = re.compile(r'filename="(?P<name>[^"]+)"') content_type = str(self.request.META.get("CONTENT_TYPE", "")) content_length = int(self.request.META.get("CONTENT_LENGTH", 0)) charset = "binary" m = filename_re.search(self.request.META.get("HTTP_CONTENT_DISPOSITION", "")) if content_type not in self.MIME_TYPES: raise IncorrectMimeTypeError("Incorrect mime type", connection_reset=True) if content_length > self.QUOTA: raise StopUpload(connection_reset=True) if not m: raise FileNameUnspecified("File name not specified", connection_reset=True) self.file_name = self.file_name = m.group("name") self.content_type = content_type self.content_length = content_length # print content_length def new_file(self, file_name, *args, **kwargs): """ Create the file object to append to as data is coming in. Ignores and overwrites most of the arguments and relies on exsiting request """ super(FeedUploadHandler, self).new_file(file_name, *args, **kwargs) self._validate_file() self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset) def receive_data_chunk(self, raw_data, start): self.total_upload += len(raw_data) # print "Total upload: {0}".format(self.total_upload) if self.total_upload >= self.QUOTA: raise StopUpload(connection_reset=True) self.file.write(raw_data)
def parse_distutils_request(request): """ Due to a bug in the Python distutils library, the request post is sent using \n as a separator instead of the \r\n that the HTTP spec demands. This breaks the Django form parser and therefore we have to write a custom parser. This bug was fixed in the Python 2.7.4 and 3.4: http://bugs.python.org/issue10510 """ if not request.body.endswith('\r\n'): sep = request.body.splitlines()[1] request.POST = QueryDict('', mutable=True) try: request._files = MultiValueDict() except Exception: pass for part in filter(lambda e: e.strip(), request.body.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception: continue if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers and headers['name'] == 'content': dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist('distribution', dist) else: request.POST.appendlist(headers["name"], content) else: request.FILES['distribution'] = request.FILES['content'] # Distutils sends UNKNOWN for empty fields (e.g platform) for key, value in request.POST.items(): if value == 'UNKNOWN': request.POST[key] = None
def handle(f, g): nama, ext = os.path.splitext(g) j = TemporaryUploadedFile(g, f.content_type, f.size, f.charset) for chunk in f.chunks(): j.write(chunk) if ext.lower() == ".json": savejson(j) elif ext.lower() == ".csv": savecsv(j) elif ext.lower() == ".xls" or ".xlsx": savexls(j)
def test_save_tempfile(self): with media_root(): storage = SafeFileSystemStorage() content = 'Hello world!' f = TemporaryUploadedFile(name='filename', content_type='text/plain', size=len(content), charset='utf-8') f.write(content) f.seek(0) name = storage.save('hello.txt', f) self.assertEqual(name, 'hello.txt') self.assertEqual(open(storage.path(name)).read(), content)
class CustomFileUploadHandler(FileUploadHandler): """ Custom file upload handler which handles the file upload as job and can be stopped midway if the job is revoked. Acts mainly as a TemporaryFileUpload with additional logic. """ # TODO:Logging file progress def new_file(self, *args, **kwargs): super().new_file(*args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) job = JobModel.objects.get(job_id=self.request.GET["job_id"]) job.job_status = JobModel.STARTED job.save() def receive_data_chunk(self, raw_data, start): # Depending on the job id the task handling will stop # midway if the status is 'REVOKED' time.sleep(0.5) try: job = JobModel.objects.get(job_id=self.request.GET["job_id"]) print(job) except JobModel.DoesNotExist: raise StopUpload(connection_reset=True) if job.job_status == JobModel.PAUSED: pulse_try = 0 while (job.job_status == JobModel.PAUSED and pulse_try <= settings.PULSE_MAX_TRIES): pulse_try += 1 time.sleep(settings.PAUSE_PULSE) job = JobModel.objects.get(job_id=self.request.GET["job_id"]) if pulse_try == settings.PULSE_MAX_TRIES: job.delete() job.save() raise StopUpload(connection_reset=True) elif job.job_status == JobModel.REVOKED: job.delete() raise StopUpload(connection_reset=True) self.file.write(raw_data) def file_complete(self, file_size): # delete the job after completion job = JobModel.objects.get(job_id=self.request.GET["job_id"]) job.delete() self.file.seek(0) self.file.size = file_size return self.file
def upload_file(url, timeout=5): """ Загрузка файла по урлу во временный файл. Пример: from libs.upload import * ... try: uploaded_file = upload_file('http://host.ru/image.jpg') except URLError as e: return JsonResponse({ 'message': str(e.msg), }, status=e.code) request.user.avatar.save(uploaded_file.name, uploaded_file, save=False) uploaded_file.close() try: request.user.full_clean() except ValidationError as e: request.user.avatar.delete(save=False) return JsonResponse({ 'message': ', '.join(e.messages), }, status=400) else: request.user.save() """ logger.debug('Uploading %s...', url) with contextlib.closing(urlopen(url, timeout=timeout)) as fp: headers = fp.info() file_name = url.split('/')[-1] content_type = headers.get('content-type') file_size = headers.get('content-length') charset = 'utf-8' tmp = TemporaryUploadedFile(file_name, content_type, file_size, charset, {}) while True: block = fp.read(8 * 1024) if not block: break tmp.write(block) logger.debug('Uploaded %s to file %s', url, tmp.file.name) tmp.seek(0) tmp.flush() return tmp
def write(self): self.name = uuid.uuid4().hex img_tmp = TemporaryUploadedFile(self.name, self.mimetype, self.size, None) img_tmp.write(self._data) img_tmp.flush() # use temporary file size if there is no content-length in response header # The file size is validated at converter if self.size == 0: img_tmp.size = os.path.getsize(img_tmp.temporary_file_path()) self._data = None return img_tmp
def parse_distutils_request(request): """Parse the `request.raw_post_data` and return a `MultiValueDict` for the POST data and the FILES data. This method is taken from the chishop source. """ try: sep = request.raw_post_data.splitlines()[1] except: raise ValueError('Invalid post data') request.POST = QueryDict('', mutable=True) try: request._files = MultiValueDict() except Exception: pass for part in filter(lambda e: e.strip(), request.raw_post_data.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception: continue if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers and headers['name'] == 'content': dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist('distribution', dist) else: # Distutils sends UNKNOWN for empty fields (e.g platform) # [[email protected]] if content == 'UNKNOWN': content = None request.POST.appendlist(headers["name"], content)
def parse_distutils_request(request): """ This is being used because the built in request parser that Django uses, django.http.multipartparser.MultiPartParser is interperting the POST data incorrectly and/or the post data coming from distutils is invalid. One portion of this is the end marker: \r\n\r\n (what Django expects) versus \n\n (what distutils is sending). """ sep = request.body.splitlines()[1] request.POST = QueryDict('', mutable=True) try: request._files = MultiValueDict() except Exception: pass for part in filter(lambda e: e.strip(), request.body.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception: continue try: if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers: dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist(headers['name'], dist) else: request.POST.appendlist(headers["name"], content) except Exception as e: print e return
def parse_distutils_request(request): """Parse the `request.raw_post_data` and update the request POST and FILES attributes . """ lines = request.raw_post_data.splitlines() seperator = next(line for line in lines if line.startswith('----')) request.POST = QueryDict('', mutable=True) raw_post = request.raw_post_data.split(seperator) raw_lines = [line.lstrip() for line in raw_post if line.lstrip()] try: request._files = MultiValueDict() except Exception: pass for line in raw_lines: line_content = line.lstrip().split('\n', 1) header = line_content[0] content = line_content[1] if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers and headers['name'] == 'content': dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist('distribution', dist) else: # Distutils sends UNKNOWN for empty fields (e.g platform) # [[email protected]] if content == 'UNKNOWN': content = None request.POST.appendlist(headers["name"], content)
def parse_distutils_request(request): """Parse the `request.raw_post_data` and update the request POST and FILES attributes . """ try: sep = request.raw_post_data.splitlines()[1] except: raise ValueError('Invalid post data') request.POST = QueryDict('', mutable=True) try: request._files = MultiValueDict() except Exception: pass for part in filter(lambda e: e.strip(), request.raw_post_data.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception: continue if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = parse_header(header) if "name" not in headers: continue if "filename" in headers and headers['name'] == 'content': dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist('distribution', dist) else: # Distutils sends UNKNOWN for empty fields (e.g platform) # [[email protected]] if content == 'UNKNOWN': content = None request.POST.appendlist(headers["name"], content)
def clean(self, value): url = super(ImageFromURLField, self).clean(value) if url: wf = urllib.urlopen(url) if wf.headers.getmaintype() != "image": raise forms.ValidationError(u"Enter a URL for a valid image.") importedFile = TemporaryUploadedFile( url.split("/")[-1], wf.headers.gettype(), int(wf.headers.get("Content-Length")), None ) importedFile.write(wf.read()) wf.close() importedFile.seek(0) if not is_valid_image(importedFile): raise forms.ValidationError(u"Enter a URL for a valid image.") return importedFile return url
def clean_picture(self): """ Ensure that picture is big enough (width & height >= PORTRAIT_PICTURE_MIN_DIMENSION 1) Clears the picture field in the field isn't an uploaded file 2) Detects the size of the uploaded image, and create ValidationError if neccessary 3) Resize the image so it conforms with the minimum size - minimize the space used """ min_size = settings.PORTRAIT_PICTURE_MIN_DIMENSION # If user has ticked to delete his portrait - then the picture attribute can be False if not self.cleaned_data['picture']: self.cleaned_data['picture'] = '' return '' # Grab content of the Uploaded file and validate size - use StringIO to keep image_file = self.cleaned_data['picture'] image_data = StringIO(image_file.read()) image = Image.open(image_data) w, h = image.size if min(w, h) < min_size: raise forms.ValidationError( 'Picture is too small : must be a minimum of %(min)s x %(min)s pixels', code='invalid', params={'min': min_size}) if min(w, h) == min_size: return self.cleaned_data['picture'] # Resize image to ensure that the smallest size conforms to PORTRAIT_PICTURE_MIN_DIMENSION ratio = max(min_size / float(w), min_size / float(h)) pic = image.resize((int(w * ratio), int(h * ratio)), Image.ANTIALIAS) new_image = StringIO() pic.save(new_image, 'JPEG', quality=90) # Create a new File for the resized image - can't simply overwrite contents of old file. new_Temp = TemporaryUploadedFile( name=image_file.name, content_type=image_file.content_type, content_type_extra=image_file.content_type_extra, charset=image_file.charset, size=new_image.len) new_Temp.write(new_image.getvalue()) self.cleaned_data['picture'] = new_Temp return self.cleaned_data['picture']
def clean(self, value): url = super(ImageFromURLField, self).clean(value) if url: wf = urllib.urlopen(url) if wf.headers.getmaintype() != 'image': raise forms.ValidationError(u'Enter a URL for a valid image.') importedFile = TemporaryUploadedFile( url.split('/')[-1], wf.headers.gettype(), int(wf.headers.get('Content-Length')), None) importedFile.write(wf.read()) wf.close() importedFile.seek(0) if not is_valid_image(importedFile): raise forms.ValidationError(u'Enter a URL for a valid image.') return importedFile return url
def test_file_context(self): response = None with open("threats/test_data/boss.gif", "rb") as boss_reader: file_data = boss_reader.read() boss_reader.seek(0) response = self.client.post('/', { 'artifact': '{"type": "file.content"}', 'file': boss_reader }, format="multipart") upload_file_args = { "name": "boss.gif", "content_type": "application/octet-stream", "size": 29927, "charset": None, } self.assertEqual(response.status_code, 200) temp_file = TemporaryUploadedFile(**upload_file_args) temp_file.write(file_data) temp_file.flush() context = SearchContext({"type": "file.content", "value": temp_file}) context.save() self.assertEqual(context.file_data_len, len(file_data)) loaded_context = SearchContext.load(context.id) self.assertEqual(loaded_context.base64_file_data_len, context.base64_file_data_len) self.assertEqual(loaded_context.file_data_len, context.file_data_len) with open(loaded_context.value.temporary_file_path(), "rb") as temp_file: loaded_file_data = temp_file.read() for counter in range(0, len(loaded_file_data) // 100): begin = counter * 100 end = begin + 100 self.assertEqual(file_data[begin:end], loaded_file_data[begin:end]) self.assertEqual(len(file_data), len(loaded_file_data))
def upload_report(cls, report): formats = ['pdf', 'xml'] try: for format in formats: if format == 'pdf': report_url = report.pdf_url else: report_url = report.xml_url request = requests.get(report_url, stream=True) filename = "appointment_%s_report.%s" % ( report.integrator_response.object_id, format) lf = TemporaryUploadedFile(filename, 'byte', 1000, 'utf-8') for block in request.iter_content(1024 * 8): # If no more file then stop if not block: break # Write image block to temporary file lf.write(block) lf.seek(0) lf.content_type = "application/%s" % format in_memory_file = InMemoryUploadedFile(lf, None, filename, lf.content_type, lf.tell(), None) lab_report, created = LabReport.objects.update_or_create( appointment_id=report.integrator_response.object_id) if lab_report: LabReportFile.objects.create(report_id=lab_report.id, name=in_memory_file) # Send Reports to Patient from ondoc.notification.tasks import send_lab_reports if format == 'pdf': try: send_lab_reports.apply_async( (report.integrator_response.object_id, ), countdown=1) except Exception as e: logger.error(str(e)) except Exception as e: logger.error(str(e))
def __setstate__(self, pickle_dictionary): if pickle_dictionary["_context"]["type"] == "file.content" and \ isinstance(pickle_dictionary["_context"]["value"], dict): arguments = pickle_dictionary["_context"]["value"] # File data info, especially useful for test pickling self.base64_file_data_len = len(arguments["content"]) file_content = base64.b64decode(arguments.pop("content")) # File data info, especially useful for test pickling self.file_data_len = len(file_content) file_object = TemporaryUploadedFile(**arguments) file_object.write(file_content) file_object.flush() pickle_dictionary["_context"]["value"] = file_object self.__dict__.update(pickle_dictionary)
def test_image_is_resized_for_setting_size_and_big_image(self): image_path = get_full_file_path(os.path.join('data', 'big_image.jpg')) with open(image_path, 'rb') as f: upload_file = TemporaryUploadedFile( name='medium_image.jpg', content_type='image/jpeg', size=os.path.getsize(image_path), charset=None ) upload_file.write(f.read()) image_field = ResizedImageField() new_image = image_field.clean(upload_file) tools.assert_not_equals(f, new_image.file) with Image.open(new_image) as im: tools.assert_equals(im.size, (300, 150))
class TemporaryFileUploadHandler(FileUploadHandler): """ Upload handler that streams data into a temporary file. """ def new_file(self, *args, **kwargs): """ Create the file object to append to as data is coming in. """ super().new_file(*args, **kwargs) self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra) def receive_data_chunk(self, raw_data, start): self.file.write(raw_data) def file_complete(self, file_size): self.file.seek(0) self.file.size = file_size return self.file
def clean_picture(self): """ Ensure that picture is big enough (width & height >= PORTRAIT_PICTURE_MIN_DIMENSION 1) Clears the picture field in the field isn't an uploaded file 2) Detects the size of the uploaded image, and create ValidationError if neccessary 3) Resize the image so it conforms with the minimum size - minimize the space used """ min_size = settings.PORTRAIT_PICTURE_MIN_DIMENSION # If user has ticked to delete his portrait - then the picture attribute can be False if not self.cleaned_data['picture']: self.cleaned_data['picture'] = '' return '' # Grab content of the Uploaded file and validate size - use StringIO to keep image_file = self.cleaned_data['picture'] image_data = StringIO(image_file.read()) image = Image.open(image_data) w, h = image.size if min(w,h) < min_size: raise forms.ValidationError('Picture is too small : must be a minimum of %(min)s x %(min)s pixels', code='invalid', params={'min':min_size} ) if min(w,h) == min_size: return self.cleaned_data['picture'] # Resize image to ensure that the smallest size conforms to PORTRAIT_PICTURE_MIN_DIMENSION ratio = max(min_size/float(w), min_size/float(h)) pic = image.resize((int(w*ratio), int(h*ratio)), Image.ANTIALIAS) new_image = StringIO() pic.save(new_image, 'JPEG', quality=90) # Create a new File for the resized image - can't simply overwrite contents of old file. new_Temp = TemporaryUploadedFile( name=image_file.name, content_type= image_file.content_type, content_type_extra=image_file.content_type_extra, charset=image_file.charset, size = new_image.len) new_Temp.write(new_image.getvalue()) self.cleaned_data['picture'] = new_Temp return self.cleaned_data['picture']
def handle(self, *args, **options): queryset = UserInsurance.objects.all() for data in queryset: if data.coi: file_path = data.coi.url request = requests.get(file_path, stream=True) if request.status_code != requests.codes.ok: continue file_name = file_path.split('/')[-1] splited = file_name.split('.') splited = splited[0] file_name = str(splited) + '-' + str(uuid.uuid4().hex) + '.' + str(splited[-1]) temp_file = TemporaryUploadedFile(file_name, 'byte', 1000, 'utf-8') for block in request.iter_content(1024 * 8): if not block: break temp_file.write(block) data.coi = InMemoryUploadedFile(temp_file, None, file_name, 'application/pdf', temp_file.tell(), None) data.save()
class AjaxFileUploadSessionMiddleware(object): chunk_size = 64 * 2 ** 10 # The default chunk size is 64 KB. def process_request(self, request): file_name = request.META.get('HTTP_X_FILE_NAME') self.uploaded_file = None if ('application/octet-stream' in request.META.get('CONTENT_TYPE') and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' and request.method == 'POST' and file_name): initial_size = request._stream.remaining self.uploaded_file = TemporaryUploadedFile( name=unquote(file_name), content_type='application/octet-stream', size=initial_size, charset=None) size = 0 while True: chunk = request._stream.read(self.chunk_size) if not chunk: break size += len(chunk) self.uploaded_file.write(chunk) if size != initial_size: raise HttpResponseBadRequest self.uploaded_file.seek(0) self.uploaded_file.size = size request.FILES['file'] = self.uploaded_file request.POST = request.GET def process_response(self, request, response): if hasattr(self, 'uploaded_file') and self.uploaded_file is not None: tmp_file_name = self.uploaded_file.file.name if os.path.exists(tmp_file_name): os.remove(tmp_file_name) return response
def test_rack_form_clean_photo(self): from fixcity.exif_utils import get_exif_info from PIL import Image import os.path data = self.data.copy() # Jump through a few hoops to simulate a real upload. HERE = os.path.abspath(os.path.dirname(__file__)) path = os.path.join(HERE, 'files', 'test_exif.jpg') content = open(path).read() photofile = TemporaryUploadedFile('test_exif.jpg', 'image/jpeg', len(content), None) photofile.write(content) photofile.seek(0) # Okay, now we have something like a file upload. data['photo'] = photofile form = RackForm(data, {'photo': photofile}) self.assert_(form.is_valid()) # Make sure it doesn't have a bad rotation. self.assertEqual({}, get_exif_info(Image.open(photofile.temporary_file_path())))
def ensure_saved(self, file): """This may create a temporary file, which will be deleted when it's closed, so always close() it but only when you've finished!""" if isinstance(file, InMemoryUploadedFile): print "Writing %s to disk (%d bytes)" % (file, file.size) tmp = TemporaryUploadedFile(name=file.name, content_type=file.content_type, size=file.size, charset=file.charset) file.seek(0) buf = file.read() tmp.write(buf) print "Wrote %d bytes" % len(buf) tmp.flush() else: tmp = file if isinstance(tmp, TemporaryUploadedFile): path = tmp.temporary_file_path() else: path = tmp.name return (tmp, path)
def handle_filedrop_upload(request): """ Squeeze out an UploadedFile from a request sent through FileDrop.js. FileDrop.js's AJAX mode passes the actual file data as an unembellished binary stream as the POST payload so we need to do some magic that normal (multipart/form-data) uploads would not require. Here's that magic. :param request: HTTP request. :type request: django.http.HttpRequest :return: Uploaded file. :rtype: django.core.files.uploadedfile.UploadedFile """ content_type = request.META.get("HTTP_X_FILE_TYPE", "") filename = request.META["HTTP_X_FILE_NAME"] size = int(request.META["HTTP_X_FILE_SIZE"]) if size >= settings.FILE_UPLOAD_MAX_MEMORY_SIZE: upload_file = TemporaryUploadedFile(name=filename, content_type=content_type, size=size, charset="binary") else: upload_file = InMemoryUploadedFile(name=filename, content_type=content_type, size=size, charset="binary", field_name="none", file=BytesIO()) upload_file.write(request.read()) return upload_file
pass for part in filter(lambda e: e.strip(), request.body.split(sep)): try: header, content = part.lstrip().split('\n', 1) except Exception, e: continue if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] headers = _parse_header(header) if "name" not in headers: continue if "filename" in headers: dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist(headers['name'], dist) else: request.POST.appendlist(headers["name"], content) return
def _get_documents_from_api(self) -> List[Tuple[str, BytesIO]]: """Retrieve the documents and their content from the Documenten API.""" logger.debug("Retrieving documents from Documenten API") variables = self.task.get_variables() document_urls = check_variable(variables, "documents") client_pool = DRCClientPool(variables) client_pool.populate_clients(self.task, document_urls) documents = [] current_total_documents_size = 0 for document_url in document_urls: # Getting the appropriate client document_client = client_pool.get_client_for(document_url) # Retrieving the document document_data = document_client.retrieve( resource="enkelvoudiginformatieobject", url=document_url, ) # Retrieving the content of the document # Need use requests directly instead of `document_client.request()` since the response is not in JSON format response = requests.get( document_data["inhoud"], headers=document_client.auth_header, stream=True, ) # Get the document size in bytes document_size = document_data["bestandsomvang"] # If the size of the document is above the max size or if all the documents together have already reached # the maximum size, write the file content to a temporary file if (document_size > settings.MAX_DOCUMENT_SIZE or (current_total_documents_size + document_size) > settings.MAX_TOTAL_DOCUMENT_SIZE): # The file is created with rb+ mode by default tmp_file_object = TemporaryUploadedFile( name= f"{document_data['titel']}-{get_random_string(length=5)}.tempfile", content_type="application/octet-stream", size=document_size, charset= None, # Required argument in TemporaryUploadedFile, but not in parent class UploadedFile ) for chunk in response.iter_content( chunk_size=settings.CHUNK_SIZE): tmp_file_object.write(chunk) tmp_file_object.flush() doc_tuple = (document_data["titel"], tmp_file_object) else: doc_tuple = (document_data["titel"], BytesIO(response.content)) current_total_documents_size += document_size response.close() documents.append(doc_tuple) return documents
def new(request): """ Schedule a new message to be sent """ if request.method == "GET": return render(request, "schedule/send.html", {"new_message": True}) # Get fields from request from_name = request.POST.get("name") from_email = request.POST.get("email") to = request.POST.get("to") subject = request.POST.get("subject") send_at = request.POST.get("send_at") html = request.POST.get("body") plaintext = request.POST.get("plaintext") # Ensure all the fields are present if not mail.validate_fields(request, from_name, from_email, to, subject, html, plaintext): return redirect("schedule:new") if send_at is None: messages.error(request, "Your message must have a date and time to be sent at") # Build the mime message to ensure consistency mime_message = mail.build_message(request, from_name, from_email, to, subject, html, plaintext) # Parse the timestamp and make it timezone aware parsed_send_at = datetime.strptime(send_at, DATETIME_FORMAT) parsed_send_at = timezone.make_aware(parsed_send_at) # Save the scheduled message to the database [(parsed_from_name, parsed_from_email) ] = getaddresses([mime_message.from_email]) message = Message( from_name=parsed_from_name, from_email=parsed_from_email, to=to, subject=mime_message.subject, send_at=parsed_send_at, text=plaintext, html=html, ) message.save() # Extract any attachments for attachment in mime_message.attachments: # Extract the mime details name, content, mime = attachment # Get only the filepath sanitized_path = Path(name).name # Create a tempfile to be uploaded temp = TemporaryUploadedFile(name, mime, len(content), "utf-8") if type(content) == str: temp.write(content.encode()) else: temp.write(content) message.attachment_set.create(name=sanitized_path, content_type=mime, inline=False, content=temp) # Queue the message for sending later queue_message.apply_async((message.id, ), eta=message.send_at) return redirect("schedule:queued")
def post_send_handler( message: EmailMessage, status: AnymailStatus, esp_name: str, **_unused ): """ Add sent messages to their corresponding thread """ assert esp_name == "Mailgun" # Parse the emails [(from_name, from_email)] = getaddresses([message.from_email]) [(_, recipient_email), *_] = getaddresses(message.to) # Prevent reset emails from being stored if from_email == settings.DEFAULT_FROM_EMAIL: return # Get the HTML message if it exists html = None if isinstance(message, EmailMultiAlternatives): # Get the html content for data, content_type in message.alternatives: if content_type == "text/html": html = data # Set the html content if there's nothing if html is None: html = f"<pre>{message.body}</pre>" # Extract data from the message sent = Message( type=MessageType.OUTGOING, sender_email=from_email, recipient_email=recipient_email, from_name=from_name, from_email=from_email, to=", ".join(message.to), cc=", ".join(message.cc), subject=message.subject, timestamp=timezone.now(), text=message.body, html=html, message_id=status.message_id, status=MessageStatus.PENDING, ) # Save message to database sent.save() # Attempt to associate with existing thread associate_with_thread( sent, message.extra_headers.get("in-reply-to") or message.extra_headers.get("In-Reply-To"), ) # Extract attachments for attachment in message.attachments: # Extract the attachment details # The MIME type can be trusted since it was already sniffed by the handler name, content, mime = attachment # Get only the filename, not the path sanitized_name = Path(name).name # Create a temporary file to be uploaded temp = TemporaryUploadedFile(name, mime, len(content), "utf-8") if type(content) == str: temp.write(content.encode()) else: temp.write(content) # Add it to the message sent.attachment_set.create( name=sanitized_name, content_type=mime, inline=False, content=temp, )
def upload_chunked_file(request, param_name, allow_memory=True): """ Загрузчик файлов, переданных от форм. Поддерживает передачу файла по частям. Возвращает обертку над файлом (возможно в памяти), удаляющимся после закрытия. Если allow_memory = False, то мелкие файлы, которые Django сохраняет в память, будут принудительно сохранены во временные файлы на диске. Пример: from libs.upload import upload_chunked_file, NotLastChunk, TemporaryFileNotFoundError ... try: uploaded_file = upload_chunked_file(request, 'image') except TemporaryFileNotFoundError as e: return JsonResponse({ 'message': str(e), }, status=400) except NotLastChunk: return HttpResponse() request.user.avatar.save(uploaded_file.name, uploaded_file, save=False) uploaded_file.close() try: request.user.avatar.field.clean(request.user.avatar, request.user) except ValidationError as e: request.user.avatar.delete(save=False) return JsonResponse({ 'message': ', '.join(e.messages), }, status=400) request.user.avatar.clean() request.user.avatar.save() """ file = request.FILES[param_name] chunk_num = int(request.POST.get('chunk', 0)) chunk_count = int(request.POST.get('chunks', 1)) if chunk_count == 1: # файл одним куском if not isinstance(file, InMemoryUploadedFile): return file elif allow_memory: return file else: # принудительное сохранение в файл tmp = TemporaryUploadedFile(file.name, file.content_type, file.size, file.charset, file.content_type_extra) for chunk in file.chunks(): tmp.write(chunk) tmp.seek(0) tmp.flush() return tmp else: # pluploader отправляет имя "blob" file.name = os.path.basename(request.POST.get('name', file.name)) # генерируем имя, которое можно восстановить при получении # следующих чанков name, ext = os.path.splitext(file.name) hashname = '%s.%s' % (request.META.get('REMOTE_ADDR'), name) hashname = hashlib.md5(hashname.encode()).hexdigest() tempfile_name = '%s.upload%s' % (hashname, ext) tempfile_path = os.path.join(tempfile.gettempdir(), tempfile_name) if chunk_num > 0: if not os.path.exists(tempfile_path): raise TemporaryFileNotFoundError(_('Temporary file lost')) tmp = open(tempfile_path, 'ab+') if chunk_num == 0: tmp.seek(0) tmp.truncate() for chunk in file.chunks(): tmp.write(chunk) if chunk_num < chunk_count - 1: tmp.close() raise NotLastChunk(chunk_num + 1, chunk_count) tmp.seek(0) tmp.flush() file_info = os.stat(tempfile_path) return TempUploadedFile(tmp, name=file.name, content_type=file.content_type, size=file_info.st_size, charset=file.charset, content_type_extra=file.content_type_extra)
"""
def share(self, msgfile): msg = email.message_from_file(msgfile) args = {} files = [] check = getattr(settings, 'EMAIL2POST_CHECK', {}) for lhs in check: v = six.text_type(make_header(decode_header(msg.get(lhs, '')))) if not check[lhs] in v: return 77 # EX_NOPERM if msg.is_multipart(): for part in msg.walk(): attach = False t = part.get_content_type() if t == 'text/plain': if part.get_filename(None): attach = True else: args['content'] = part.get_payload(decode=True) if attach or \ t.startswith ('image/') or \ t.startswith ('audio/') or \ t.startswith ('video/') or \ t.startswith('application/'): payload = part.get_payload(decode=True) os.umask(0) tmp = TemporaryUploadedFile( name=part.get_filename('attachment'), content_type=t, size=len(payload), charset=None) tmp.write(payload) tmp.seek(0) os.chmod(tmp.file.name, 0o644) files.append(tmp) else: args['content'] = msg.get_payload(decode=True) subject = msg.get('Subject', None) if subject: hdr = make_header(decode_header(subject)) args['title'] = six.text_type(hdr) # Mail subject may contain @foo, a selfposts' class name for which # this message is post to. m = re.search(r'(\A|\s)@(\w[\w\-]+)', args['title']) if m: cls = m.groups()[1] args['title'] = re.sub(r'(\A|\s)@(\w[\w\-]+)', '', args['title']) s = Service.objects.filter(cls=cls, api='selfposts').values('id') if len(s): args['id'] = s[0]['id'] # Mail subject may contain "!draft" literal. if '!draft' in args['title']: args['title'] = args['title'].replace('!draft', '').strip() args['draft'] = True # Mail subject may contain "!friends-only" literal. if '!friends-only' in args['title']: args['title'] = args['title'].replace('!friends-only', '').strip() args['friends_only'] = True if len(files): args['files'] = MultiValueDict() args['files'].setlist('docs', files) selfposts.API(None).share(args) return 0 # EX_OK
class UploadProgressCachedHandler(FileUploadHandler): """ Tracks progress for file uploads. The http post request must contain a header or query parameter, 'X-Progress-ID', which should contain a unique string to identify the upload to be tracked. Copied from: http://djangosnippets.org/snippets/678/ See views.py for upload_progress function... """ def __init__(self, request=None): super(UploadProgressCachedHandler, self).__init__(request) self.progress_id = None self.cache_key = None def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None): """ """ self.content_length = content_length if 'X-Progress-ID' in self.request.GET: self.progress_id = self.request.GET['X-Progress-ID'] elif 'X-Progress-ID' in self.request.META: self.progress_id = self.request.META['X-Progress-ID'] if self.progress_id: self.cache_key = "%s_%s" % ( self.request.META['REMOTE_ADDR'], self.progress_id ) cache.set(self.cache_key, { 'length': self.content_length, 'uploaded': 0 }) def new_file(self, field_name, file_name, content_type, content_length, charset=None): """ """ self.field_name = field_name self.file_name = file_name self.content_type = content_type self.content_length = content_length self.charset = charset self.file = TemporaryUploadedFile( self.file_name, self.content_type, 0, self.charset ) def receive_data_chunk(self, raw_data, start): """ """ if self.cache_key: data = cache.get(self.cache_key) data['uploaded'] += self.chunk_size cache.set(self.cache_key, data) self.file.write(raw_data) return raw_data def file_complete(self, file_size): """ """ self.file.seek(0) self.file.size = file_size self.file.close() return self.file def upload_complete(self): """ """ if self.cache_key: cache.delete(self.cache_key)
def share(self, msgfile): msg = email.message_from_file(msgfile) args = {} files = [] check = getattr(settings, 'EMAIL2POST_CHECK', {}) for lhs in check: v = six.text_type(make_header(decode_header(msg.get(lhs, '')))) if not check[lhs] in v: return 77 # EX_NOPERM if msg.is_multipart(): for part in msg.walk(): attach = False t = part.get_content_type() if t == 'text/plain': if part.get_filename(None): attach = True else: args['content'] = part.get_payload(decode=True) if attach or \ t.startswith ('image/') or \ t.startswith ('audio/') or \ t.startswith ('video/') or \ t.startswith('application/'): payload = part.get_payload(decode=True) os.umask(0) tmp = TemporaryUploadedFile( name=part.get_filename('attachment'), content_type=t, size=len(payload), charset=None) tmp.write(payload) tmp.seek(0) os.chmod(tmp.file.name, 0o644) files.append(tmp) else: args['content'] = msg.get_payload(decode=True) subject = msg.get('Subject', None) if subject: hdr = make_header(decode_header(subject)) args['title'] = six.text_type(hdr) # Mail subject may contain @foo, a selfposts' class name for which # this message is post to. m = re.search(r'(\A|\s)@(\w[\w\-]+)', args['title']) if m: cls = m.groups()[1] args['title'] = re.sub(r'(\A|\s)@(\w[\w\-]+)', '', args['title']) s = Service.objects.filter(cls=cls, api='selfposts').values('id') if len(s): args['id'] = s[0]['id'] # Mail subject may contain "!draft" literal. if '!draft' in args['title']: args['title'] = args['title'].replace('!draft', '').strip() args['draft'] = True # Mail subject may contain "!friends-only" literal. if '!friends-only' in args['title']: args['title'] = args['title'].replace( '!friends-only', '').strip() args['friends_only'] = True if len(files): args['files'] = MultiValueDict() args['files'].setlist('docs', files) selfposts.API(None).share(args) return 0 # EX_OK
# newer distutils can submit \r\n end-of-line marks. if header.endswith('\r'): header = header[:-1] if content.startswith('\r'): content = content[1:] if content.startswith('\n'): content = content[1:] if content.endswith('\n'): content = content[:-1] if content.endswith('\r'): content = content[:-1] headers = _parse_header(header) if "name" not in headers: continue if "filename" in headers: dist = TemporaryUploadedFile(name=headers["filename"], size=len(content), content_type="application/gzip", charset='utf-8') dist.write(content) dist.seek(0) request.FILES.appendlist(headers['name'], dist) else: request.POST.appendlist(headers["name"],content) return