def test_default(self): # Test with some unicode characters css_body = 'body { background: url("\u1234"); } ' css_output_file = filters.cssmin(ContentFile(css_body)) css_body_min = css_output_file.read() self.assertFalse(b' ' in css_body_min) storage = DefaultStorage() storage.save('folder/a.css', ContentFile(css_body)) self._check_upload(is_gzip=False, body_min=css_body_min) self._check_upload(is_gzip=True, body_min=css_body_min)
def serve_fullpath(*, fullpath): storage = DefaultStorage() if not (os.path.abspath(fullpath) == fullpath) or not storage.exists(fullpath): raise Http404("File not found.") try: f = storage.open(fullpath, "rb") file = File(f) return serve_file(file, save_as=True) except IOError: raise Http404("File not found.")
def image_view(request, path=None, options=None): if not path: raise Http404('No path provided') # Grab the default storage, to build the URL storage = DefaultStorage() # Optionaly check if the file exists in the storage # Depending on your storage class, this might not be implemented or performs something outrageous like loading # the entire file into memory if getattr(settings, 'RSZIO_CHECK_EXISTS', False) and not storage.exists(path): raise Http404('Image not found in storage') # Get the full URL for the image original_url = storage.url(path) # Use urllip to pull out the host and path parsed_url = urlparse(original_url) # Build the URL url = 'https://rsz.io/{host}{path}'.format( host=parsed_url.hostname, path=parsed_url.path, ) # Build the rsz.io parameters try: parameters = process_options(options) except: # KISS: if invalid parameters are passed, raise a 404 raise Http404('Invalid rsz.io options') # Grab the image rszio_response = requests.get(url, parameters) # Return buffer_image = BytesIO(rszio_response.content) buffer_image.seek(0) response = HttpResponse( buffer_image, content_type=rszio_response.headers['content-type']) # Set cache headers if hasattr(settings, 'RSZIO_CACHE_CONTROL'): try: response['Cache-Control'] = 'max-age={}'.format( int(settings.RSZIO_CACHE_CONTROL)) except: response['Cache-Control'] = settings.RSZIO_CACHE_CONTROL return response
def diagnostics(request): from django.conf import settings from post_office import mail ping_socket_url = (request.build_absolute_uri( f'{reverse("tracker:index_all")}ws/ping/').replace( 'https:', 'wss:').replace('http:', 'ws:')) celery_socket_url = (request.build_absolute_uri( f'{reverse("tracker:index_all")}ws/celery/').replace( 'https:', 'wss:').replace('http:', 'ws:')) if request.method == 'POST': test_email_form = TestEmailForm(data=request.POST) if test_email_form.is_valid(): mail.send( [test_email_form.cleaned_data['email']], f'webmaster@{request.get_host().split(":")[0]}', subject='Test Email', message='If you got this, email is set up correctly.', ) messages.info( request, 'Test email queued. Check Post Office models for status.') else: test_email_form = TestEmailForm() try: storage = DefaultStorage() output = storage.save(f'testfile_{int(time.time())}', BytesIO(b'test file')) storage.open(output).read() assert storage.exists(output) storage.delete(output) storage_works = True except Exception as e: storage_works = e return render( request, 'admin/tracker/diagnostics.html', { 'is_secure': request.is_secure(), 'test_email_form': test_email_form, 'ping_socket_url': ping_socket_url, 'celery_socket_url': celery_socket_url, 'storage_works': storage_works, 'HAS_CELERY': getattr(settings, 'HAS_CELERY', False), }, )
def test_serve_file_request_signal(self): with mock.patch('secrets.token_urlsafe', return_value='ABCDEFGHIJKLMNOPQRSTUVWXYZ'): super_user = baker.make(get_user_model(), is_superuser=True, email='*****@*****.**') self.client.force_login(super_user) with mock.patch('secrets.token_urlsafe', return_value='ABCDEFGHIJKLMNOPQRSTUVWXYZ'): media_path = generate_media_path(user=super_user, filename='foobar.ext') assert media_path == 'abcdefghijkl/abcdefghijklmnopqrstuvwx/foobar.ext' storage = DefaultStorage() content = io.BytesIO('Test äöüß !'.encode()) storage.save(media_path, content) assert settings.MEDIA_URL == '/media/' url = reverse('serve_media_app:serve-media', kwargs={ 'user_token': 'abcdefghijkl', 'path': 'abcdefghijklmnopqrstuvwx/foobar.ext' }) assert url == '/media/abcdefghijkl/abcdefghijklmnopqrstuvwx/foobar.ext' # Test with a signal callback that will not allow the access: def deny_access_callback(user, path, media_path, **kwargs): assert user.email == '*****@*****.**' assert path == 'abcdefghijklmnopqrstuvwx/foobar.ext' assert media_path == 'abcdefghijkl/abcdefghijklmnopqrstuvwx/foobar.ext' raise PermissionDenied with SignalsContextManager(serve_file_request, deny_access_callback): response = self.client.get(url) assert response.status_code == 403 # Test with a signal callback that allowes the access: def allow_access_callback(user, path, media_path, **kwargs): assert user.email == '*****@*****.**' assert path == 'abcdefghijklmnopqrstuvwx/foobar.ext' assert media_path == 'abcdefghijkl/abcdefghijklmnopqrstuvwx/foobar.ext' with SignalsContextManager(serve_file_request, allow_access_callback): response = self.client.get(url) assert response.status_code == 200 assert isinstance(response, FileResponse) assert response.getvalue().decode('UTF-8') == 'Test äöüß !'
def get(self, request, **kwargs): entity_id = kwargs.get('entity_id') current_object = self.get_object(entity_id) if current_object is None and self.slugToEntityIdRedirect and getattr(request, 'version', 'v1') == 'v2': return self.get_slug_to_entity_id_redirect(kwargs.get('entity_id', None)) elif current_object is None: return Response(status=status.HTTP_404_NOT_FOUND) image_prop = getattr(current_object, self.prop) if not bool(image_prop): return Response(status=status.HTTP_404_NOT_FOUND) image_type = request.query_params.get('type', 'original') if image_type not in ['original', 'png']: raise ValidationError(u"invalid image type: {}".format(image_type)) image_url = image_prop.url filename, ext = os.path.splitext(image_prop.name) basename = os.path.basename(filename) dirname = os.path.dirname(filename) version_suffix = getattr(settings, 'CAIROSVG_VERSION_SUFFIX', '1') new_name = '{dirname}/converted{version}/{basename}.png'.format(dirname=dirname, basename=basename, version=version_suffix) storage = DefaultStorage() if image_type == 'original': image_url = image_prop.url elif image_type == 'png' and ext == '.svg': if not storage.exists(new_name): with storage.open(image_prop.name, 'rb') as input_svg: svg_buf = StringIO.StringIO() out_buf = StringIO.StringIO() cairosvg.svg2png(file_obj=input_svg, write_to=svg_buf) img = Image.open(svg_buf) img.thumbnail((400, 400)) img.save(out_buf, format=image_type) storage.save(new_name, out_buf) image_url = storage.url(new_name) elif ext != '.png': # attempt to use PIL to do desired image conversion if not storage.exists(new_name): with storage.open(image_prop.name, 'rb') as input_svg: out_buf = StringIO.StringIO() img = Image.open(input_svg) img.save(out_buf, format=image_type) storage.save(new_name, out_buf) image_url = storage.url(new_name) return redirect(image_url)
def test_basic(self): assert settings.MEDIA_URL == '/media/' assert get_user_model() == User url = reverse('serve_media_app:serve-media', kwargs={ 'user_token': 'foo', 'path': 'bar' }) assert url == '/media/foo/bar' with tempfile.TemporaryDirectory() as temp: with override_settings(MEDIA_ROOT=temp): user = baker.make(User, username='******') file_path = generate_media_path(user, filename='foobar.txt') storage = DefaultStorage() content = io.BytesIO('Test äöüß !'.encode()) final_file_path = storage.save(file_path, content) assert final_file_path == file_path url = f'/media/{file_path}' # Anonymous has no access: response = self.client.get(url) assert response.status_code == 403 # Can't access with wrong user: other_user = baker.make(User, username='******') self.client.force_login(other_user) response = self.client.get(url) assert response.status_code == 403 # Can access with the right user: self.client.force_login(user) response = self.client.get(url) assert response.status_code == 200 assert isinstance(response, FileResponse) assert response.getvalue().decode('UTF-8') == 'Test äöüß !' # Test whats happen, if token was deleted UserMediaTokenModel.objects.all().delete() with self.assertLogs(logger='django_tools', level=logging.ERROR) as log: response = self.client.get(url) assert response.status_code == 400 # SuspiciousOperation -> HttpResponseBadRequest assert log.output == [ 'ERROR:django_tools.serve_media_app.exceptions:Current user "owner" has no token!' ]
def get_filenames(self, context, path): """ Get all filenames in path Raises OSError if directory can not be found """ challenge_short_name = context.page.challenge.short_name projectpath = challenge_short_name + "/" + path storage = DefaultStorage() filenames = storage.listdir(projectpath)[1] filenames.sort() # if extensionsFilter is given, show only filenames with those extensions if "extensionFilter" in self.args.keys(): extensions = self.args["extensionFilter"].split(",") filenames = filter_by_extension(filenames, extensions) return filenames
def process_signed_certs(): default_storage = DefaultStorage() for signed_cert in signed_certs_batch: uid = id_map[signed_cert['id']] filename = f'{uid}.json' person_issuance = PersonIssuances.objects.get(cert_uid=uid) default_storage.save(filename, ContentFile(json.dumps(signed_cert))) send_issued_cert(person_issuance.person, person_issuance.issuance.credential, filename) person_issuance.is_issued = True person_issuance.issued_at = datetime.now().strftime( '%Y-%m-%d') person_issuance.save()
def post(self, request): print(request.FILES) form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): storage = DefaultStorage() _, file_extension = os.path.splitext(request.FILES['file'].name) filename = get_random_string(length=32) + file_extension filename = storage.save(name=filename, content=request.FILES['file']) url = storage.url(filename) return Response({ 'url': url, }) else: return HttpResponseBadRequest()
def cleanup(self, user): """Clean up the uploaded file. This will delete the uploaded file from the storage. Args: user (django.contrib.auth.models.User): The user. """ settings_manager = self._settings_manager_class(user) configuration = settings_manager.configuration_for( self.avatar_service_id) storage = DefaultStorage() storage.delete(configuration['file_path'])
def parse_php_arrays(filename): """ Parse a php page containing only php arrays like $x=(1,2,3). Created to parse anode09 eval results. Returns: dict{"varname1",array1,....}, array1 is a float array """ verbose = False output = {} storage = DefaultStorage() with storage.open(filename, "r") as f: content = f.read() content = content.replace("\n", "") php = re.compile(r"\<\?php(.*?)\?\>", re.DOTALL) s = php.search(content) assert s is not None, ( "trying to parse a php array, but could not find anything like <? php /?> in '%s'" % filename) phpcontent = s.group(1) phpvars = phpcontent.split("$") phpvars = [x for x in phpvars if x != ""] # remove empty if verbose: print("found %d php variables in %s. " % (len(phpvars), filename)) print("parsing %s into int arrays.. " % filename) # check whether this looks like a php var phpvar = re.compile(r"([a-zA-Z]+[a-zA-Z0-9]*?)=array\((.*?)\);", re.DOTALL) for var in phpvars: result = phpvar.search(var) # TODO Log these messages as info if result is None: msg = ("Could not match regex pattern '%s' to '%s'\ " % (phpvar.pattern, var)) continue if len(result.groups()) != 2: msg = ("Expected to find varname and content,\ but regex '%s' found %d items:%s " % ( phpvar.pattern, len(result.groups()), "[" + ",".join(result.groups()) + "]", )) continue (varname, varcontent) = result.groups() output[varname] = [float(x) for x in varcontent.split(",")] return output
def render_variation(cls, file_name, variation, replace=False, storage=DefaultStorage()): """Render an image variation and saves it to the storage.""" variation_name = cls.get_variation_name(file_name, variation['name']) if storage.exists(variation_name): if replace: storage.delete(variation_name) logger.info('File "{}" already exists and has been replaced.') else: logger.info('File "{}" already exists.') return variation_name resample = variation['resample'] with storage.open(file_name) as f: with Image.open(f) as img: file_format = img.format if cls.is_smaller(img, variation): factor = 1 while img.size[0] / factor \ > 2 * variation['width'] \ and img.size[1] * 2 / factor \ > 2 * variation['height']: factor *= 2 if factor > 1: img.thumbnail((int( img.size[0] / factor), int(img.size[1] / factor)), resample=resample) if variation['crop']: img = ImageOps.fit( img, (variation['width'], variation['height']), method=resample) else: img.thumbnail( (variation['width'], variation['height']), resample=resample) with BytesIO() as file_buffer: img.save(file_buffer, file_format) f = ContentFile(file_buffer.getvalue()) storage.save(variation_name, f) return variation_name
def render(self, context): # text typed in the tag token = self.args["file"] try: filename = resolve_path(token, self.parser, context) except PathResolutionException as e: return self.make_error_msg(f"Path Resolution failed: {e}") challenge_short_name = context["site"].short_name filepath = os.path.join(settings.MEDIA_ROOT, challenge_short_name, filename) filepath = os.path.abspath(filepath) filepath = self.make_canonical_path(filepath) # when all rendering is done, check if the final path is still not getting # into places it should not go. if not self.is_inside_project_data_folder(filepath, context["site"]): error_msg = "'{}' cannot be opened because it is outside the current project.".format( filepath) return self.make_error_msg(error_msg) storage = DefaultStorage() try: with storage.open(filepath, "r") as f: contents = f.read() except Exception as e: return self.make_error_msg("error opening file:" + str(e)) # TODO check content safety # For some special pages like login and signup, there is no current page # In that case just don't try any link rewriting if "currentpage" in context: currentpage = context["currentpage"] else: currentpage = None if currentpage and os.path.splitext(filename)[1] != ".css": html_out = self.replace_links(filename, contents, currentpage).decode() # rewrite relative links else: html_out = contents return html_out
def fetch_remote_file_to_storage(remote_url, upload_to=''): """ Fetches a remote url, and stores it in DefaultStorage :return: (status_code, new_storage_name) """ store = DefaultStorage() r = requests.get(remote_url, stream=True) if r.status_code == 200: name, ext = os.path.splitext(urllib.parse.urlparse(r.url).path) storage_name = '{upload_to}/cached/{filename}{ext}'.format( upload_to=upload_to, filename=hashlib.md5(remote_url.encode()).hexdigest(), ext=ext) if not store.exists(storage_name): buf = io.BytesIO(r.content) store.save(storage_name, buf) return r.status_code, storage_name return r.status_code, None
def handle(self, *args, **options): dry_run = options.pop('dry_run', False) storage = DefaultStorage() for image_file in ImageFile.objects.all(): if storage.exists(image_file.photo.name): continue print(f'{image_file.photo.name} not found, remove imagefile {image_file.id}') for image_to_file in ImageToFile.objects.filter(file=image_file): try: print(f'delete image {image_to_file.image.id}') if not dry_run: image_to_file.image.delete() except Image.DoesNotExist: pass if not dry_run: image_file.delete()
def save(self): """Save the file and return the configuration. Returns: dict: The avatar service configuration. """ storage = DefaultStorage() file_path = self.cleaned_data['avatar_upload'].name file_path = storage.get_valid_name(file_path) with storage.open(file_path, 'wb') as f: f.write(self.cleaned_data['avatar_upload'].read()) return { 'absolute_url': storage.url(file_path), 'file_path': file_path, }
def safely_write(output_filename, people, group_by_post): """ Use Django's storage backend to write the CSV file to the MEDIA_ROOT. If using S3 (via Django Storages) the file is atomically written when the file is closed (when the context manager closes). That is, the file can be opened and written to but nothing changes at the public S3 URL until the object is closed. Meaning it's not possible to have a half written file. If not using S3, there will be a short time where the file is empty during write. """ csv = list_to_csv(people, group_by_post) file_store = DefaultStorage() with file_store.open(output_filename, "w") as out_file: out_file.write(csv.encode("utf-8"))
def image_upload(request): if request.method == 'POST': image = request.FILES['image'] fs = DefaultStorage() now = timezone.now() #image_path = f'{settings.MEDIA_ROOT}/posts/{now.year}/{now.month}/{now.day}/{image.name}' image_path = f'/posts/{now.year}/{now.month}/{now.day}/{image.name}' filename = fs.save(image_path, image) uploaded_file_url = fs.url(filename) # for AWS #uploaded_file_url = f'{settings.MEDIA_URL}posts/{now.year}/{now.month}/{now.day}/{image.name}' print(uploaded_file_url) return JsonResponse({ 'success': 1, 'file': { 'url': uploaded_file_url, } }) return JsonResponse({ 'success': 0, })
def handle(self, *args, **options): self.client = Client() self.directory_path = "cached-api" self.storage = DefaultStorage() self.secure = not options.get("http", False) self.hostname = options["hostname"] self.url_prefix = self.get_url_prefix(options["url_prefix"]) self.timestamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S") json_directory = join(self.directory_path, self.timestamp) page_size = options["page_size"] if not page_size: page_size = 200 for endpoint in self.endpoints: self.get_api_results_to_directory(endpoint, json_directory, page_size) self.update_latest_page(self.directory_path, endpoint) if options["prune"]: self.prune()
def applyImport(self, request, queryset): from django.core.files.storage import DefaultStorage storage = DefaultStorage() for __datafile in queryset: # restore academic year data f1 = storage.open(__datafile.data_file_academic_year.name, mode='rb') for deserialized_object in serializers.deserialize( "json", f1.read(), ignorenonexistent=True): print(str(deserialized_object)) deserialized_object.save() del f1 # restore semesters data f2 = storage.open(__datafile.data_file_semester.name, mode='rb') for deserialized_object in serializers.deserialize( "json", f2.read(), ignorenonexistent=True): print(str(deserialized_object)) deserialized_object.save() del f2
def process_signed_certs(): default_storage = DefaultStorage() for signed_cert in signed_certs_batch: for uid in signed_cert.keys(): if uid != 'signature': person_issuance = PersonIssuances.objects.get( cert_uid=uid) full_cert = signed_cert[uid] full_cert['signature'] = signed_cert['signature'] default_storage.save( uid + '.json', ContentFile(json.dumps(full_cert))) send_issued_cert( person_issuance.person, person_issuance.issuance.credential, uid + '.json') person_issuance.is_issued = True person_issuance.issued_at = datetime.now( ).strftime('%Y-%m-%d') person_issuance.save()
def applyImport(self, request, queryset): from django.core.files.storage import DefaultStorage storage = DefaultStorage() for __datafile in queryset: # restore Academic Program data f1 = storage.open(__datafile.data_file_programs.name, mode='rb') for deserialized_object in serializers.deserialize( "json", f1.read(), ignorenonexistent=True): print(str(deserialized_object)) deserialized_object.save() del f1 # restore Course data f1 = storage.open(__datafile.data_file_courses.name, mode='rb') for deserialized_object in serializers.deserialize( "json", f1.read(), ignorenonexistent=True): print(str(deserialized_object)) deserialized_object.save() del f1
def _try_create_challenge(self, user, short_name, description="test project"): url = reverse("challenges:create") storage = DefaultStorage() banner = storage._open("fake_test_dir/fakefile2.jpg") data = { "short_name": short_name, "description": description, "logo": "fakelogo.jpg", "banner": banner, "prefix": "form", "page_set-TOTAL_FORMS": "0", "page_set-INITIAL_FORMS": "0", "page_set-MAX_NUM_FORMS": "", } self._login(user) response = self.client.post(url, data) return response
def image_upload(request): publication = Publication.objects.filter( site__domain=request.get_host()).first() if publication and not request.user in publication.users.all(): raise Http404 if request.method == 'POST': image = request.FILES['image'] fs = DefaultStorage() now = timezone.now() image_path = f'publications/{publication.slug}/{now.year}/{now.month}/{now.day}/{image.name}' filename = fs.save(image_path, image) uploaded_file_url = fs.url(filename) return JsonResponse({ 'success': 1, 'file': { 'url': uploaded_file_url, } }) return JsonResponse({ 'success': 0, })
class StorageDownloadView(PathDownloadView): """Serve a file using storage and filename.""" #: Storage the file to serve belongs to. storage = DefaultStorage() #: Path to the file to serve relative to storage. path = None # Override docstring. def get_path(self): """Return path of the file to serve, relative to storage. Default implementation simply returns view's :py:attr:`path`. Override this method if you want custom implementation. """ return super(StorageDownloadView, self).get_path() def get_file(self): """Use path and storage to return wrapper around file to serve.""" return files.StorageFile(self.storage, self.get_path())
def upload_attachment(request): # 限制上传附件小于 20M if request.FILES['file'].size >= 20971520: return HttpResponse(status=413, content=u'请求文件过大') storage = DefaultStorage() real_name = request.FILES['file'].name name = storage.save( '/%s/%s' % (request.user.id, request.FILES['file'].name), request.FILES['file']) try: attachment = Attachment.objects.create(real_name=real_name, path=name) except BaseException: return HttpResponse(status=201) url = storage.url(name) ret = { 'url': url, 'attachment_name': real_name, 'attachment_id': attachment.id } return render_json(ret)
def parse_team_list(team_obj): """ Parse a team CSV list into a dict to be written to PDF """ # No-op if no team list available if team_obj.team_list.name == '': return {} # Open up a CSV reader storage = DefaultStorage() with storage.open(team_obj.team_list.name, mode='r') as team_list: reader = csv.DictReader(team_list, fieldnames=( 'role', 'number', 'name', 'birthday', )) rows = [row for row in reader] data_dict = {} # Build and write back a player list and staff list players = [row for row in rows if not any([_ in row['role'] for _ in STAFF_ROLE_KEY])][:20] players.sort(key=lambda x: try_int_or_zero(x['number'])) for i, row in enumerate(players): no = '{:02d}'.format(i+1) data_dict['PlayerGC{}'.format(no)] = ''.join([ _ for _ in row['role'] if _ in STAFF_ROLE_KEY+CAPTAIN_ROLE_KEY+GOALIE_ROLE_KEY ]) data_dict['PlayerNo{}'.format(no)] = row['number'] data_dict['PlayerName{}'.format(no)] = row['name'] data_dict['PlayerDOB{}'.format(no)] = row['birthday'] staff = [row for row in rows if any([_ in row['role'] for _ in STAFF_ROLE_KEY])][:5] staff.sort(key=lambda x: x['number']) for i, row in enumerate(staff): no = '{:1d}'.format(i+1) data_dict['Off{}'.format(no)] = row['name'] return data_dict
def get_previous_result_data(self): storage = DefaultStorage() result_json_filename = os.getenv(self.settings.input_filename_env, None) result_json_value = ( {} if result_json_filename else json.loads(os.getenv(self.settings.input_json_env, "{}")) ) if not result_json_filename and not result_json_value: raise ImproperlyConfigured( "Either {} (result file path/name) or {} (raw JSON value) need to be set".format( self.settings.input_filename_env, self.settings.input_json_env ) ) # If the update JSON is specified in an env var instead of a file, return that right away if result_json_value and not result_json_filename: # If raw result output was provided, just return the "results" portion if "results" in result_json_value and "updated" in result_json_value["results"]: return result_json_value["results"] return result_json_value # Try to find the result at one of several paths based on the value of the filename env var possible_xpro_result_file_paths = [ result_json_filename, join_path(RESULT_JSON_DIR_PATH, result_json_filename), join_path( RESULT_JSON_DIR_PATH, "{}.json".format(result_json_filename), ), ] for path in possible_xpro_result_file_paths: if storage.exists(path): with storage.open(path) as f: # If the file is found, parse the JSON and return return json.loads(f.read()) raise ImproperlyConfigured( "Could not find an xPro result JSON file at any of these paths: {}\n(env var {}={})".format( str(possible_xpro_result_file_paths), self.settings.input_filename_env, result_json_filename, ) )
def get_or_create_image_file(stream) -> ImageFile: logger.debug('begin save_image_file') stream.seek(0) s = sha1() if hasattr(stream, 'chunks'): for chunk in stream.chunks(): s.update(chunk) else: s.update(stream.read()) sha1_hash = s.hexdigest() storage = DefaultStorage() try: image_file = ImageFile.objects.get(sha1=sha1_hash) if not storage.exists(image_file.photo.name): stream.seek(0) storage.save(image_file.photo.name, stream) except ImageFile.DoesNotExist: stream.seek(0) try: image = PImage.open(stream) except (UnidentifiedImageError, OSError): raise InvalidImageFile() image_file = ImageFile() image_file.sha1 = sha1_hash image_file.width = image.width image_file.height = image.height image_file_ext = '.' + FORMAT_EXT[image.format] if image.format else '' image_file.photo.name = '%s/%s/%s%s' % (sha1_hash[0:2], sha1_hash[2:4], sha1_hash[4:], image_file_ext) image_file.format = image.format stream.seek(0, 2) image_file.file_size = stream.tell() stream.seek(0) storage.save(image_file.photo.name, stream) if hasattr(stream, 'name'): image_file.origin_filename = stream.name image_file.save() return image_file