def test_create_mor(self): img_file_name = "MooringSetupTest.png" img_file_path = os.path.dirname(os.path.realpath( __file__)) + os.path.sep + "data" + os.path.sep + img_file_name data = BytesIO() Image.open(img_file_path).save(data, "PNG") data.seek(0) file = ContentFile(data.read(), img_file_name) self.mooring_dic = {} mor_1 = models.MorMooringSetup( mor_name="MOR001", mor_max_depth=100, mor_link_setup_image="https://somelink.com", mor_setup_image=file) mor_1.save() # Check that the file was saved expected_path = os.path.join(settings.MEDIA_DIR, "whalesdb", "mooring_setup", img_file_name) self.assertTrue(os.path.exists(expected_path)) self.assertTrue(os.path.isfile(expected_path)) # Delete the image mor_1.delete() self.assertFalse(os.path.exists(expected_path))
def test_comment_file_upload_tmp_file(self): """ Check (tmp) upload files are checked """ utils.login(self) file = BytesIO( b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1' b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000' b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre' b'f\n149\n%EOF\n') files = { 'file': SimpleUploadedFile('file_large.pdf', file.read(), content_type='application/pdf'), } response = self.client.post(reverse('spirit:comment:file-upload-ajax'), HTTP_X_REQUESTED_WITH='XMLHttpRequest', data=files) res = json.loads(response.content.decode('utf-8')) file_url = os.path.join( settings.MEDIA_URL, 'spirit', 'files', str(self.user.pk), "file_large_fadcb2389bb2b69b46bc54185de0ae91.pdf").replace( "\\", "/") self.assertEqual(res['url'], file_url) file_path = os.path.join( settings.MEDIA_ROOT, 'spirit', 'files', str(self.user.pk), "file_large_fadcb2389bb2b69b46bc54185de0ae91.pdf") with open(file_path, 'rb') as fh: file.seek(0) self.assertEqual(fh.read(), file.read()) shutil.rmtree(settings.MEDIA_ROOT) # cleanup
def test_write_dump(self): dump_file = BytesIO() connector = SqliteConnector() connector._write_dump(dump_file) dump_file.seek(0) for line in dump_file: self.assertTrue(line.strip().endswith(b';'))
def create_dump(self): path = self.connection.settings_dict['NAME'] dump = BytesIO() with open(path, 'rb') as db_file: copyfileobj(db_file, dump) dump.seek(0) return dump
def create_image_file(size=(100, 100), image_mode='RGB', image_format='PNG'): data = BytesIO() Image.new(image_mode, size).save(data, image_format) data.seek(0) return SimpleUploadedFile(f'{str(uuid.uuid4())}.png', data.getvalue(), content_type='image/png')
def test_comment_file_upload_tmp_file(self): """ Check (tmp) upload files are checked """ utils.login(self) file = BytesIO( b'%PDF-1.0\n1 0 obj<</Type/Catalog/Pages 2 0 R>>endobj 2 0 obj<</Type/Pages/Kids[3 0 R]/Count 1' b'>>endobj 3 0 obj<</Type/Page/MediaBox[0 0 3 3]>>endobj\nxref\n0 4\n0000000000 65535 f\n000000' b'0010 00000 n\n0000000053 00000 n\n0000000102 00000 n\ntrailer<</Size 4/Root 1 0 R>>\nstartxre' b'f\n149\n%EOF\n') files = { 'file': SimpleUploadedFile( 'file_large.pdf', file.read(), content_type='application/pdf'),} response = self.client.post( reverse('spirit:comment:file-upload-ajax'), HTTP_X_REQUESTED_WITH='XMLHttpRequest', data=files) res = json.loads(response.content.decode('utf-8')) file_url = os.path.join( settings.MEDIA_URL, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf" ).replace("\\", "/") self.assertEqual(res['url'], file_url) file_path = os.path.join( settings.MEDIA_ROOT, 'spirit', 'files', str(self.user.pk), "fadcb2389bb2b69b46bc54185de0ae91.pdf" ) with open(file_path, 'rb') as fh: file.seek(0) self.assertEqual(fh.read(), file.read()) shutil.rmtree(settings.MEDIA_ROOT) # cleanup
def setUp(self): super().setUp() self.data = Factory.MorFactory.get_valid_data() self.test_url = reverse_lazy('whalesdb:create_mor') # Since this is intended to be used as a pop-out form, the html file should start with an underscore self.test_expected_template = 'shared_models/shared_entry_form.html' self.expected_success_url = reverse_lazy('whalesdb:list_mor') self.expected_view = views.MorCreate self.expected_form = forms.MorForm self.img_file_name = "MooringSetupTest.png" self.img_file_path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + "data" + os.path.sep + \ self.img_file_name data = BytesIO() Image.open(self.img_file_path).save(data, "PNG") data.seek(0) file = ContentFile(data.read(), self.img_file_name) # add the image to the data array self.data['mor_setup_image'] = self.img_file_path
def save(self, commit=True): img = scale_and_crop(self.files['image'], **MARKDOWNX_IMAGE_MAX_SIZE) thumb_io = BytesIO() img.save(thumb_io, self.files['image'].content_type.split('/')[-1].upper()) file_name = str(self.files['image']) thumb_io.seek(0, os.SEEK_END) img = InMemoryUploadedFile(thumb_io, "image", file_name, self.files['image'].content_type, thumb_io.tell(), None) unique_file_name = self.get_unique_file_name(file_name) full_path = os.path.join(settings.MEDIA_ROOT, MARKDOWNX_MEDIA_PATH, unique_file_name) if not os.path.exists(os.path.dirname(full_path)): os.makedirs(os.path.dirname(full_path)) destination = open(full_path, 'wb+') for chunk in img.chunks(): destination.write(chunk) destination.close() return os.path.join(settings.MEDIA_URL, MARKDOWNX_MEDIA_PATH, unique_file_name)
def test_logslice_api(test_repository, webapp, activate_responses, logname, line_range, gzipped, num_loads): job = Job.objects.create(repository=test_repository, guid="12345", project_specific_id=1) fake_log_url = 'http://www.fakelog.com/log.gz' JobLog.objects.create(job=job, name=logname, url=fake_log_url, status=JobLog.PARSED) lines = ['cheezburger %s' % i for i in range(10)] # set up a file response text = "\n".join(lines) + '\n' content = BytesIO() if gzipped: with gzip.GzipFile('none', 'w', fileobj=content) as gz: gz.write(text) else: content.write(text) content.seek(0) responses.add(responses.GET, fake_log_url, body=content.read(), content_type="text/plain;charset=utf-8", status=200) # now test it for i in range(num_loads): resp = webapp.get(reverse('logslice-list', kwargs={"project": test_repository.name}) + '?start_line={}&end_line={}&job_id=1'.format(line_range[0], line_range[1])) assert resp.json == [{'index': i + line_range[0], 'text': l + '\n'} for (i, l) in enumerate(lines[line_range[0]:line_range[1]])]
def create_image(size=(100, 100), image_mode='RGB', image_format='PNG'): """ Generate a test image, returning the filename that it was saved as. """ data = BytesIO() Image.new(image_mode, size).save(data, image_format) data.seek(0) return data
def open(self, mode=None): try: image = BytesIO() image.write(urlopen(self.path).read()) image.seek(0) yield image except URLError as e: logger.exception('Error image urlopen') raise HttpError(e)
def create_image(filename, size=(100, 100), image_mode='RGB', image_format='png'): data = BytesIO() Image.new(image_mode, size).save(data, image_format) data.name = filename data.seek(0) return data
def export_xlsx(self, request, queryset): output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet = workbook.add_worksheet() worksheet.write_row( 0, 0, [ "Titel", "Autor_in", "Hochladedatum", "KMK - Kompetenz", "mind. Unterrichtsstufe", "max. Unterrichtsstufe ", "Zeitaufwand", "Schulform", "Bildungsplanbezug", "Verlinkte Tools", "Verlinkte Trends", ], ) counter = 1 for tm in queryset: worksheet.write_row( counter, 0, [ tm.name, tm.author.full_name if tm.author else "", tm.created.strftime("%d.%m.%Y") if tm.created else "", ", ".join([c.name for c in tm.competences.all()]), tm.school_class.lower if tm.school_class else "", tm.school_class.upper if tm.school_class else "", tm.estimated_time, ", ".join([t.name for t in tm.school_types.all()]), tm.educational_plan_reference, ", ".join([t.name for t in tm.related_tools.all()]), ", ".join([t.name for t in tm.related_trends.all()]), ], ) counter += 1 workbook.close() output.seek(0) response = HttpResponse( output, content_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) response[ "Content-Disposition"] = "attachment; filename=unterrichtsbausteine.xlsx" return response
def _read(self, name): memory_file = BytesIO() try: pwd = self._connection.pwd() self._connection.cwd(os.path.dirname(name)) self._connection.retrbinary("RETR " + os.path.basename(name), memory_file.write) self._connection.cwd(pwd) memory_file.seek(0) return memory_file except ftplib.all_errors: raise FTPStorageException("Error reading file %s" % name)
def get(self, request): import xlwt workbook = xlwt.Workbook(encoding="utf-8") # 创建workbook实例 sheet = workbook.add_sheet("sheet1") # 创建工作薄1 # 写标题栏 sheet.write(0, 0, '学号') sheet.write(0, 1, '姓名') sheet.write(0, 2, '学院') sheet.write(0, 3, '年级') sheet.write(0, 4, '班级') sheet.write(0, 5, '活动名称') sheet.write(0, 6, '参加类别') sheet.write(0, 7, '获得学时') sheet.write(0, 8, '记录时间') student_activities = StudentActivity.objects.all() if self.request.user.role == RoleEnum.ACADEMY.value: student_activities = student_activities.filter( academy=self.request.user.academy, grade=self.request.user.grade) if self.request.user.role == RoleEnum.ORG.value: student_activities = student_activities.filter( academy=self.request.user.academy) # 写数据 row = 1 for obj in student_activities: # 单条写入学生数据 sheet.write(row, 0, obj.student_id) sheet.write(row, 1, obj.student_name) sheet.write(row, 2, obj.academy) sheet.write(row, 3, obj.grade) sheet.write(row, 4, obj.clazz) sheet.write(row, 5, obj.activity_name) sheet.write(row, 6, obj.credit_type) sheet.write(row, 7, obj.credit) sheet.write(row, 8, obj.create_time.strftime("%Y-%m-%d")) row += 1 sio = BytesIO() # StringIO报错,使用BytesIO workbook.save(sio) sio.seek(0) # 定位到开始 response = HttpResponse(sio.getvalue(), content_type='application/vnd.ms-excel') response['Content-Disposition'] = 'attachment;filename=activity.xls' response.write(sio.getvalue()) # 写入日志 Log.objects.create(user=self.request.user, content=f"下载了{row - 1}条学生活动记录") return response
def _process_raster(image, extension): """ Processing of raster graphic image. """ # File needs to be uploaded and saved temporarily in # the memory for additional processing using PIL. thumb_io = BytesIO() preped_image = scale_and_crop(image, **MARKDOWNX_IMAGE_MAX_SIZE) preped_image.save(thumb_io, extension) thumb_io.seek(0, SEEK_END) return thumb_io
def create_image(filename, storage=None, size=(100, 100), image_mode='RGB', image_format='PNG'): data = BytesIO() Image.new(image_mode, size).save(data, image_format) data.seek(0) if not storage: return data image_file = ContentFile(data.read()) return storage.save(filename, image_file)
def test_strips_underscore_headers(self): """WSGIRequestHandler ignores headers containing underscores. This follows the lead of nginx and Apache 2.4, and is to avoid ambiguity between dashes and underscores in mapping to WSGI environ, which can have security implications. """ def test_app(environ, start_response): """A WSGI app that just reflects its HTTP environ.""" start_response('200 OK', []) http_environ_items = sorted( '%s:%s' % (k, v) for k, v in environ.items() if k.startswith('HTTP_') ) yield (','.join(http_environ_items)).encode('utf-8') rfile = BytesIO() rfile.write("GET / HTTP/1.0\r\n") rfile.write("Some-Header: good\r\n") rfile.write("Some_Header: bad\r\n") rfile.write("Other_Header: bad\r\n") rfile.seek(0) # WSGIRequestHandler closes the output file; we need to make this a # no-op so we can still read its contents. class UnclosableBytesIO(BytesIO): def close(self): pass wfile = UnclosableBytesIO() def makefile(mode, *a, **kw): if mode == 'rb': return rfile elif mode == 'wb': return wfile request = Stub(makefile=makefile) server = Stub(base_environ={}, get_app=lambda: test_app) # We don't need to check stderr, but we don't want it in test output old_stderr = sys.stderr sys.stderr = StringIO() try: # instantiating a handler runs the request as side effect WSGIRequestHandler(request, '192.168.0.2', server) finally: sys.stderr = old_stderr wfile.seek(0) body = list(wfile.readlines())[-1] self.assertEqual(body, 'HTTP_SOME_HEADER:good')
def test_strips_underscore_headers(self): """WSGIRequestHandler ignores headers containing underscores. This follows the lead of nginx and Apache 2.4, and is to avoid ambiguity between dashes and underscores in mapping to WSGI environ, which can have security implications. """ def test_app(environ, start_response): """A WSGI app that just reflects its HTTP environ.""" start_response('200 OK', []) http_environ_items = sorted( '%s:%s' % (k, v) for k, v in environ.items() if k.startswith('HTTP_') ) yield (','.join(http_environ_items)).encode('utf-8') rfile = BytesIO() rfile.write(b"GET / HTTP/1.0\r\n") rfile.write(b"Some-Header: good\r\n") rfile.write(b"Some_Header: bad\r\n") rfile.write(b"Other_Header: bad\r\n") rfile.seek(0) # WSGIRequestHandler closes the output file; we need to make this a # no-op so we can still read its contents. class UnclosableBytesIO(BytesIO): def close(self): pass wfile = UnclosableBytesIO() def makefile(mode, *a, **kw): if mode == 'rb': return rfile elif mode == 'wb': return wfile request = Stub(makefile=makefile) server = Stub(base_environ={}, get_app=lambda: test_app) # We don't need to check stderr, but we don't want it in test output old_stderr = sys.stderr sys.stderr = StringIO() try: # instantiating a handler runs the request as side effect WSGIRequestHandler(request, '192.168.0.2', server) finally: sys.stderr = old_stderr wfile.seek(0) body = list(wfile.readlines())[-1] self.assertEqual(body, b'HTTP_SOME_HEADER:good')
def _compress_content(self, content): """Gzip a given string content.""" zbuf = BytesIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) content.file = zbuf content.seek(0) return content
def _read(self, name): memory_file = BytesIO() try: pwd = self._connection.pwd() self._connection.cwd(os.path.dirname(name)) self._connection.retrbinary('RETR ' + os.path.basename(name), memory_file.write) self._connection.cwd(pwd) memory_file.seek(0) return memory_file except ftplib.all_errors: raise FTPStorageException('Error reading file %s' % name)
def _compress_content(self, content): """Gzip a given string content.""" zbuf = BytesIO() zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) # Boto 2 returned the InMemoryUploadedFile with the file pointer replaced, # but Boto 3 seems to have issues with that. No need for fp.name in Boto3 # so just returning the BytesIO directly return zbuf
def test_invalid_file(self): """ test invalid format file """ file = BytesIO() Image.new('RGB', (100, 100)).save(file, 'PNG') file.seek(0) resp = self.client.put(reverse('validators-view'), data={'file': file}) self.assertEqual(resp.status_code, 400) data = resp.json() self.assertEqual('Invalid file type', data['detail'])
def render(data, width, height, force=True, padding=None, overlays=(), overlay_sources=(), overlay_tints=(), overlay_sizes=None, overlay_positions=None, mask=None, mask_source=None, center=".5,.5", format=IMAGE_DEFAULT_FORMAT, quality=IMAGE_DEFAULT_QUALITY, fill=None, background=None, tint=None, pre_rotation=None, post_rotation=None, crop=True, grayscale=False): """ Rescale the given image, optionally cropping it to make sure the result image has the specified width and height. """ if not isinstance(data, six.string_types): input_file = BytesIO(data) else: input_file = StringIO(data) img = pil.open(input_file) if img.mode != "RGBA": img = img.convert("RGBA") if width is None: width = img.size[0] if height is None: height = img.size[1] img = do_rotate(img, pre_rotation) if crop: img = resizeCrop(img, width, height, center, force) else: img = resizeScale(img, width, height, force) if grayscale: img = do_grayscale(img) do_tint(img, tint) img = do_fill(img, fill, width, height) img = do_background(img, background) do_mask(img, mask, mask_source) img = do_overlays(img, overlays, overlay_tints, overlay_sources, overlay_sizes, overlay_positions) img = do_padding(img, padding) img = do_rotate(img, post_rotation) tmp = BytesIO() if not format.upper() in ALPHA_FORMATS: img = img.convert("RGB") img.save(tmp, format, quality=quality) tmp.seek(0) output_data = tmp.getvalue() input_file.close() tmp.close() return output_data
def save(self, commit=True): img = scale_and_crop(self.files['image'], **MARKDOWNX_IMAGE_MAX_SIZE) thumb_io = BytesIO() img.save(thumb_io, self.files['image'].content_type.split('/')[-1].upper()) file_name = str(self.files['image']) thumb_io.seek(0, os.SEEK_END) img = InMemoryUploadedFile(thumb_io, None, file_name, self.files['image'].content_type, thumb_io.tell(), None) unique_file_name = self.get_unique_file_name(file_name) full_path = os.path.join(MARKDOWNX_MEDIA_PATH, unique_file_name) default_storage.save(full_path, img) return default_storage.url(full_path)
def yeter(request, type): form = PhotoForm(data=request.POST, files=request.FILES) if form.is_valid(): img_data = dict(request.POST.items()) x = None # Coordinate x y = None # Coordinate y w = None # Width h = None # Height rotate = None # Rotate for key, value in img_data.items(): if key == "avatar_data": str_value = json.loads(value) print(str_value) x = str_value.get('x') y = str_value.get('y') w = str_value.get('width') h = str_value.get('height') rotate = str_value.get('rotate') print('x: {}, y: {}, w: {}, h: {}, rotate: {}'.format( x, y, w, h, rotate)) im = Image.open(request.FILES['file']).convert('RGBA') tempfile = im.rotate(-rotate, expand=True) tempfile = tempfile.crop((int(x), int(y), int(w + x), int(h + y))) tempfile_io = BytesIO() tempfile_io.seek(0, os.SEEK_END) tempfile.save(tempfile_io, format='PNG') image_file = InMemoryUploadedFile(tempfile_io, None, 'rotate.png', 'image/png', tempfile_io.tell(), None) image_file = compressImage(image_file) if type == "profile": Photo.objects.filter(user=request.user).delete() Photo.objects.create(user=request.user, file=image_file) elif type == "cover": CoverPhoto.objects.filter(user=request.user).delete() CoverPhoto.objects.create(user=request.user, file=image_file) data = { 'result': True, 'state': 200, 'message': 'Yükleme Başarılı', } return JsonResponse({'data': data}) else: print('Uncut image!') print(form.errors) return redirect('user:profile', request.user)
def export_xlsx(self, request, queryset): output = BytesIO() workbook = xlsxwriter.Workbook(output) worksheet = workbook.add_worksheet() worksheet.write_row(0, 0, [ 'Titel', 'Autor_in', 'Hochladedatum', 'KMK - Kompetenz', 'mind. Unterrichtsstufe', 'max. Unterrichtsstufe ', 'Zeitaufwand', 'Schulform', 'Bildungsplanbezug', 'Bundesland', 'Unterrichtsfach', 'Verlinkte Tools', 'Verlinkte Trends' ]) counter = 1 for tm in queryset: worksheet.write_row(counter, 0, [ tm.name, tm.author.full_name, tm.created.strftime('%d.%m.%Y') if tm.created else '', ', '.join([c.name for c in tm.competences.all()]), tm.school_class.lower if tm.school_class else '', tm.school_class.upper if tm.school_class else '', tm.estimated_time, ', '.join([t.name for t in tm.school_types.all()]), tm.educational_plan_reference, tm.state, ', '.join([s.name for s in tm.subjects.all()]), ', '.join([t.name for t in tm.related_tools.all()]), ', '.join([t.name for t in tm.related_trends.all()]), ]) counter += 1 workbook.close() output.seek(0) response = HttpResponse( output, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ) response['Content-Disposition'] = 'attachment; filename=unterrichtsbausteine.xlsx' return response
def create_image(storage, filename, size=(100, 100), image_mode='RGB', image_format='PNG'): """ Generate a test image, returning the filename that it was saved as. If ``storage`` is ``None``, the BytesIO containing the image data will be passed instead. """ data = BytesIO() Image.new(image_mode, size).save(data, image_format) data.seek(0) if not storage: return data image_file = ContentFile(data.read()) return storage.save(filename, image_file)
def _compress_content(self, content): """Gzip a given string content.""" zbuf = BytesIO() # The GZIP header has a modification time attribute (see http://www.zlib.org/rfc-gzip.html) # This means each time a file is compressed it changes even if the other contents don't change # For S3 this defeats detection of changes using MD5 sums on gzipped files # Fixing the mtime at 0.0 at compression time avoids this problem zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0.0) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) content.seek(0) return zbuf
def _process_raster(image, extension, image_max_size=None, close_image=True): """ Processing of raster graphic image. """ # File needs to be uploaded and saved temporarily in # the memory for additional processing using PIL. thumb_io = BytesIO() sdict = image_max_size if image_max_size else MARKDOWNX_IMAGE_MAX_SIZE preped_image = scale_and_crop(image, **sdict, close_image=close_image) preped_image.save(thumb_io, extension) thumb_io.seek(0, SEEK_END) return thumb_io
def add_user(name, bio): u = User.objects.get_or_create(username=name)[0] u.save() p = UserProfile.objects.get_or_create(user=u)[0] p.bio = bio # https://stackoverflow.com/questions/54891829/typeerror-memoryview-a-bytes-like-object-is-required-not-jpegimagefile image = 'populate_images/' + name + '.jpg' img_in = Image.open(image) buf = BytesIO() img_in.save(buf, 'jpeg') buf.seek(0) p.profile_image.save(name + '.jpeg', buf, True) buf.close() img_in.close() return p
def get_nametag_file(name): with Image.new('RGB', (256, 256), 'blue') as canvas: font = ImageFont.truetype( os.path.join(settings.BASE_DIR, "D2Coding.ttf"), 24) text_width, text_height = font.getsize(name) x = (canvas.width - text_width) // 2 y = (canvas.height - text_height) // 2 draw = ImageDraw.Draw(canvas) draw.text((x, y), name, font=font) io = BytesIO() canvas.save(io, format='PNG') io.seek(0) return io
def compressImage(uploadedImage): #GELEN RESMİ SIKIŞTIRIR+BOYUTUNU KÜÇÜLTÜR imageTemproary = Image.open(uploadedImage) if imageTemproary.mode in ("RGBA", "P"): imageTemproary = imageTemproary.convert("RGB") outputIoStream = BytesIO() width, height = imageTemproary.size imageTemproaryResized = imageTemproary.resize( (int(width / 1.2), int(height / 1.2))) #BOYUT 1.2 ORANINDA KÜÇÜLTÜLÜR imageTemproaryResized.save(outputIoStream, format='JPEG', quality=50) #KALİTE 50% ORANINDA DÜŞER outputIoStream.seek(0) uploadedImage = InMemoryUploadedFile( outputIoStream, 'ImageField', "%s.jpg" % uploadedImage.name.split('.')[0], 'image/jpeg', sys.getsizeof(outputIoStream), None) return uploadedImage
def _compress_content(self, content): """Gzip a given string content.""" content.seek(0) zbuf = BytesIO() # The GZIP header has a modification time attribute (see http://www.zlib.org/rfc-gzip.html) # This means each time a file is compressed it changes even if the other contents don't change # For S3 this defeats detection of changes using MD5 sums on gzipped files # Fixing the mtime at 0.0 at compression time avoids this problem zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0.0) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) # Boto 2 returned the InMemoryUploadedFile with the file pointer replaced, # but Boto 3 seems to have issues with that. No need for fp.name in Boto3 # so just returning the BytesIO directly return zbuf
def downloadSheetPage(request, pk): """ View for dowloading a sheet """ student = getStudent(request.user) sheet = getSheetInstance(pk) # Get the file # Check if the user can have access to it (if he is part of the lesson) if sheet.lesson not in student.classroom.lessons.all(): raise PermissionDenied() files = sheet.fileSet if len(files) == 1: uploadedFile = files[0] contentType = uploadedFile.contentType fileName = sheet.name + uploadedFile.extension data = uploadedFile.file response = HttpResponse(data.read(), content_type=contentType) response['Content-Disposition'] = 'attachment; filename="{}"'.format(fileName) data.close() else: contentType = 'application/zip' fileName = sheet.name + '.zip' zipStream = BytesIO() zipFile = ZipFile(zipStream, 'w') for uploadedFile in files: filePath = join(settings.MEDIA_ROOT, uploadedFile.file.name) archiveName = uploadedFile.file.name.replace('sheets/', '').replace('-point-', '.') zipFile.write(filePath, archiveName) zipFile.close() zipStream.seek(0) response = HttpResponse(zipStream, content_type=contentType) response['Content-Disposition'] = 'attachment; filename="{}"'.format(fileName) zipStream.close() return response
def _process_raster(image, extension): """ Processing of raster graphic image using Python Imaging Library (PIL). This is where raster graphics are processed to the specifications as defined in ``settings.py``. *Note*: The file needs to be uploaded and saved temporarily in the memory to enable processing tasks using Python Imaging Library (PIL) to take place and subsequently retained until written onto the disk. :param image: Non-SVG image as processed by Django. :type image: django.forms.BaseForm.file :param extension: Image extension (e.g.: png, jpg, gif) :type extension: str :return: The image object ready to be written into a file. :rtype: BytesIO """ thumb_io = BytesIO() preped_image = scale_and_crop(image, **MARKDOWNX_IMAGE_MAX_SIZE) preped_image.save(thumb_io, extension) thumb_io.seek(0, SEEK_END) return thumb_io
def extract_images(data, plugin): """ extracts base64 encoded images from drag and drop actions in browser and saves those images as plugins """ if not settings.TEXT_SAVE_IMAGE_FUNCTION: return data tree_builder = html5lib.treebuilders.getTreeBuilder('dom') parser = html5lib.html5parser.HTMLParser(tree=tree_builder) dom = parser.parse(data) found = False for img in dom.getElementsByTagName('img'): src = img.getAttribute('src') if not src.startswith('data:'): # nothing to do continue width = img.getAttribute('width') height = img.getAttribute('height') # extract the image data data_re = re.compile(r'data:(?P<mime_type>[^"]*);(?P<encoding>[^"]*),(?P<data>[^"]*)') m = data_re.search(src) dr = m.groupdict() mime_type = dr['mime_type'] image_data = dr['data'] if mime_type.find(';'): mime_type = mime_type.split(';')[0] try: image_data = base64.b64decode(image_data) except Exception: image_data = base64.urlsafe_b64decode(image_data) try: image_type = mime_type.split('/')[1] except IndexError: # No image type specified -- will convert to jpg below if it's valid image data image_type = '' image = BytesIO(image_data) # genarate filename and normalize image format if image_type == 'jpg' or image_type == 'jpeg': file_ending = 'jpg' elif image_type == 'png': file_ending = 'png' elif image_type == 'gif': file_ending = 'gif' else: # any not "web-safe" image format we try to convert to jpg im = Image.open(image) new_image = BytesIO() file_ending = 'jpg' im.save(new_image, 'JPEG') new_image.seek(0) image = new_image filename = u'%s.%s' % (uuid.uuid4(), file_ending) # transform image into a cms plugin image_plugin = img_data_to_plugin( filename, image, parent_plugin=plugin, width=width, height=height ) # render the new html for the plugin new_img_html = plugin_to_tag(image_plugin) # replace the original image node with the newly created cms plugin html img.parentNode.replaceChild(parser.parseFragment(new_img_html).childNodes[0], img) found = True if found: return u''.join([y.toxml() for y in dom.getElementsByTagName('body')[0].childNodes]) else: return data
class TestFileUploadParser(TestCase): def setUp(self): class MockRequest(object): pass self.stream = BytesIO( "Test text file".encode('utf-8') ) request = MockRequest() request.upload_handlers = (MemoryFileUploadHandler(),) request.META = { 'HTTP_CONTENT_DISPOSITION': 'Content-Disposition: inline; filename=file.txt', 'HTTP_CONTENT_LENGTH': 14, } self.parser_context = {'request': request, 'kwargs': {}} def test_parse(self): """ Parse raw file upload. """ parser = FileUploadParser() self.stream.seek(0) data_and_files = parser.parse(self.stream, None, self.parser_context) file_obj = data_and_files.files['file'] assert file_obj._size == 14 def test_parse_missing_filename(self): """ Parse raw file upload when filename is missing. """ parser = FileUploadParser() self.stream.seek(0) self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = '' with pytest.raises(ParseError) as excinfo: parser.parse(self.stream, None, self.parser_context) assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.' def test_parse_missing_filename_multiple_upload_handlers(self): """ Parse raw file upload with multiple handlers when filename is missing. Regression test for #2109. """ parser = FileUploadParser() self.stream.seek(0) self.parser_context['request'].upload_handlers = ( MemoryFileUploadHandler(), MemoryFileUploadHandler() ) self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = '' with pytest.raises(ParseError) as excinfo: parser.parse(self.stream, None, self.parser_context) assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.' def test_parse_missing_filename_large_file(self): """ Parse raw file upload when filename is missing with TemporaryFileUploadHandler. """ parser = FileUploadParser() self.stream.seek(0) self.parser_context['request'].upload_handlers = ( TemporaryFileUploadHandler(), ) self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = '' with pytest.raises(ParseError) as excinfo: parser.parse(self.stream, None, self.parser_context) assert str(excinfo.value) == 'Missing filename. Request should include a Content-Disposition header with a filename parameter.' def test_get_filename(self): parser = FileUploadParser() filename = parser.get_filename(self.stream, None, self.parser_context) assert filename == 'file.txt' def test_get_encoded_filename(self): parser = FileUploadParser() self.__replace_content_disposition('inline; filename*=utf-8\'\'ÀĥƦ.txt') filename = parser.get_filename(self.stream, None, self.parser_context) assert filename == 'ÀĥƦ.txt' self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'\'ÀĥƦ.txt') filename = parser.get_filename(self.stream, None, self.parser_context) assert filename == 'ÀĥƦ.txt' self.__replace_content_disposition('inline; filename=fallback.txt; filename*=utf-8\'en-us\'ÀĥƦ.txt') filename = parser.get_filename(self.stream, None, self.parser_context) assert filename == 'ÀĥƦ.txt' def __replace_content_disposition(self, disposition): self.parser_context['request'].META['HTTP_CONTENT_DISPOSITION'] = disposition