def content_streamer(): z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED, allowZip64=True) for document in documents: doc_id = document['_id'] content = document['fields']['content'][0] def _it(): yield content.encode('utf-8') # We need to make use of the private functions of zipstream, # because we need to feed the zipfile documents from ES while we # send them to the user. Since we want every document on a separate # file, we have no alternative than to call the `__write` method # manually. file_name = "{}.txt".format(doc_id) for data in z._ZipFile__write(iterable=_it(), arcname=file_name): yield data # Yield the rest of the data the zipfile might have (corresponding to # the `__close` method). for chunk in z: yield chunk
def export_user_manual(self, request, *args, **kwargs): """ Exports the user manual in form of a ZIP file """ # verify the current user is staff if not request.user or not request.user.is_staff: raise PermissionDenied zf = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) serialized_categories = UserManualHelper.get_serialized_categories() zf.writestr("categories.xml", serialized_categories.encode()) serialized_placeholders = UserManualHelper.get_serialized_placeholders( ) zf.writestr("placeholders.xml", serialized_placeholders.encode()) serialized_help_texts = UserManualHelper.get_serialized_user_manual_texts( ) zf.writestr("help_texts.xml", serialized_help_texts.encode()) collected_files = UserManualHelper.collect_all_media_files_from_html( serialized_placeholders + serialized_help_texts) for file in collected_files: actual_file = file.replace("${MEDIA_URL}", settings.MEDIA_ROOT) zf.write(actual_file, arcname=file) response = StreamingHttpResponse(zf, content_type='application/zip') # set filename in header response['Content-Disposition'] = 'attachment; filename="{}"'.format( "user_manual.zip") return response
def upload_file(): form = ReportForm(request.form) isValid = form.validate() if request.method == 'POST' and isValid == True: z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) x = 1 for f in request.files: file = request.files[f] if allowed_file(file.filename): filename = secure_filename(file.filename) filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(filepath) ext = os.path.splitext(filepath)[1] newFilePath = "" newFileName = "" if ext == ".xlsx": newFileName = generate_new_expense_report_filename(form) else: newFileName = generate_new_receipt_filename(form, x, ext) newFilePath = os.path.join(app.config['UPLOAD_FOLDER'], newFileName) os.rename(filepath, newFilePath) z.write(newFilePath, newFileName) x += 1 response = Response(z, mimetype='application/zip') response.headers[ 'Content-Disposition'] = 'attachment; filename={}'.format( 'files.zip') return response elif isValid != True: return app.response_class(response='Bad Request', status=400, mimetype='application/json')
def _handle_download_request(self) -> None: query_body = self.rfile.read(int(self.headers['Content-Length'])) query = parse_qs(query_body.decode('utf-8')) zipfile = zipstream.ZipFile(allowZip64=True) prefix = query.get('prefix', ['/'])[0] os_prefix = os.path.join(self.configuration.path, prefix[1:]) for filename in query.get('file_selection', []): file_path = os.path.join(self.configuration.path, filename[1:]) # if is directory, walk directory tree if os.path.isdir(file_path): for entry in os.walk(file_path): os_path = entry[0] zip_path = os_path.removeprefix(os_prefix) for child in entry[2]: child_os_path = os.path.join(os_path, child) child_zip_path = os.path.join(zip_path, child) zipfile.write(child_os_path, child_zip_path) else: zipfile.write(file_path, filename.removeprefix(prefix)) archive_name = os.path.basename(prefix) if not archive_name: archive_name = "tinyserv" self.send_response(200) self.send_header('Content-Type', 'application/zip') self.send_header( 'Content-Disposition', f'attachment; filename="{archive_name}.zip"' ) self.end_headers() for data in zipfile: self.wfile.write(data)
def generate_zipped_pdfs(exam_id, start, end): """Generates a zip file with all the copies joined together. Inside the zip, the copies are named by their copy number. Parameters ---------- exam_id : int The exam id to generate the pdfs for start : int The start copy number end : int The final copy number, included """ exam = Exam.query.get(exam_id) exam_dir, _, barcode_widget, exam_path, _ = _exam_generate_data(exam) zf = zipstream.ZipFile(mode='w') for copy_num, pdf in generate_pdfs(exam_pdf_file=exam_path, copy_nums=list(range(start, end + 1)), exam_token=exam.token, datamatrix_x=barcode_widget.x, datamatrix_y=barcode_widget.y): zf.writestr( current_app.config['OUTPUT_PDF_FILENAME_FORMAT'].format(copy_num), pdf.getvalue()) yield from zf.flush() yield from zf
def download_multiple_files(request): if request.method == "POST": import json json_data = json.loads(request.body) query = "" for i in json_data: query += f"files={i}&" url = request.build_absolute_uri(f"{reverse('download_multiple_files')}?{query[:-1]}") return JsonResponse( data={"download_url": url}, status=201 ) files_index = request.GET.get("files") files = [] if files_index is list: for i in files_index: files.append(File.objects.get(id=i)) else: files.append(File.objects.get(id=files_index)) z = zipstream.ZipFile(mode='w', allowZip64=True) for file in files: z.write(file.file.path, file.filename()) response = StreamingHttpResponse(z, content_type='application/zip') zipfile_name = f"grouped_files.zip" # return as zipfile response['Content-Disposition'] = f'attachment; filename={zipfile_name}' return response
def export_entities(request, result, format): assert format in (FORMAT_CSV, FORMAT_EXCEL) entities = [] for entity in result.results: resolver.queue(result, Collection, entity.get('collection_id')) entities.append(model.get_proxy(entity)) resolver.resolve(result) zip_archive = zipstream.ZipFile() if format == FORMAT_EXCEL: workbook = get_workbook() for entity in entities: collection_id = entity.context.get('collection_id') collection = resolver.get(result, Collection, collection_id) export_entity_excel(workbook, collection, entity) write_document(zip_archive, collection, entity) content = io.BytesIO(get_workbook_content(workbook)) zip_archive.write_iter('export.xlsx', content) elif format == FORMAT_CSV: handlers = {} for entity in entities: collection_id = entity.context.get('collection_id') collection = resolver.get(result, Collection, collection_id) export_entity_csv(handlers, collection, entity) write_document(zip_archive, collection, entity) for key in handlers: content = handlers[key] content.seek(0) content = io.BytesIO(content.read().encode()) zip_archive.write_iter(key + '.csv', content) for chunk in zip_archive: yield chunk
def zip_file(ch, method, properties, body): time.sleep(5) try: msg = json.loads(body.decode("utf-8")) fname = msg['filename'] location = 'uas/templates/uas/cache/' + fname size = msg['size'] token = msg['token'] print('filename : ', fname, 'location : ', location, ' size: ', size) except Exception as e: print("[E] Error :", e) try: response = authorize(token) response = json.loads(response.text) print(response) sum = 0 z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) z.write(location) with open(location + '.zip', 'wb') as f: for data in z: f.write(data) sum += (len(data) / size) * 100 print("[X] compressing ", sum, "%") time.sleep(0.01) channel.basic_publish(exchange='ZIP_QUEUE', routing_key=fname, body=str(sum)) print("[X] compress done") except Exception as e: print(e)
def download_image(self, request, queryset): if len(queryset) >= 2: z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) for data in queryset: file_path = str(data.image.url).strip('/') file_name = str(data.name) + '-' + file_path.split('/')[-1] z.write(file_path, file_name) export_name = escape_uri_path(str(len(queryset)) + 'images') response = StreamingHttpResponse(z) response['Content-Type'] = 'application/zip' response[ 'Content-Disposition'] = 'attachment; filename={}.zip'.format( export_name) return response elif len(queryset) >= 1: for data in queryset: file_path = str(data.image.url).strip('/') file_name = str(data.name) + '-' + file_path.split('/')[-1] file = open(file_path, 'rb') export_name = escape_uri_path(file_name) response = FileResponse(file) response['Content-Type'] = 'application/octet-stream' response[ 'Content-Disposition'] = 'attachment;filename={}'.format( export_name) return response else: return None
def download_classimage(self, request, *args, **kwargs): id = kwargs['object_id'] try: category = CategoryModel.objects.get(pk=id) except CategoryModel.DoesNotExist: queryset = [] else: queryset = category.postmodel_set.all().select_related('category') if queryset: category_name = CategoryModel.objects.get(pk=id).name z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) for data in queryset: file_path = str(data.image.url).strip('/') file_name = str(data.name) + '-' + file_path.split('/')[-1] z.write(file_path, file_name) export_name = escape_uri_path(category_name) response = StreamingHttpResponse(z) response['Content-Type'] = 'application/zip' response[ 'Content-Disposition'] = 'attachment; filename={}.zip'.format( export_name) return response else: return None
def home(request): if request.method == 'POST' and request.FILES['myfile']: myfile = request.FILES['myfile'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) uploaded_file_url = fs.path(filename) tiles = image_slicer.slice(uploaded_file_url, 9, save=False) image_slicer.save_tiles(tiles, directory='media/', prefix='slice', format='png') os.remove(uploaded_file_url) z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) z.write('media/slice_01_01.png', 'slice_01_01.png') z.write('media/slice_01_02.png', 'slice_01_02.png') z.write('media/slice_01_03.png', 'slice_01_03.png') z.write('media/slice_02_01.png', 'slice_02_01.png') z.write('media/slice_02_02.png', 'slice_02_02.png') z.write('media/slice_02_03.png', 'slice_02_03.png') z.write('media/slice_03_01.png', 'slice_03_01.png') z.write('media/slice_03_02.png', 'slice_03_02.png') z.write('media/slice_03_03.png', 'slice_03_03.png') response = StreamingHttpResponse(z, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename={}'.format( 'files.zip') return response return render(request, 'core/simple_upload.html')
def download_tabular_data_format(self, package_id): '''Return the given package as a Tabular Data Format ZIP file. ''' context = { 'model': model, 'session': model.Session, 'user': toolkit.c.user or toolkit.c.author, } r = toolkit.response r.content_disposition = 'attachment; filename={0}.zip'.format( package_id) r.content_type = 'application/octet-stream' # Make a zipstream and put it in the context. This means the # package_to_tabular_data_format action will add files into # the zipstream for us. pkg_zipstream = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) context['pkg_zipstream'] = pkg_zipstream toolkit.get_action('package_to_tabular_data_format')(context, { 'id': package_id }) return pkg_zipstream
def create_bagit_stream(dir_name, payload_info_list): """Create a stream containing a BagIt zip archive. Args: dir_name : str The name of the root directory in the zip file, under which all the files are placed (avoids "zip bombs"). payload_info_list: list List of payload_info_dict, each dict describing a file. - keys: pid, filename, iter, checksum, checksum_algorithm - If the filename is None, the pid is used for the filename. """ zip_file = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) _add_path(dir_name, payload_info_list) payload_byte_count, payload_file_count = _add_payload_files( zip_file, payload_info_list ) tag_info_list = _add_tag_files( zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count ) _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list) _add_tag_manifest_file(zip_file, dir_name, tag_info_list) return zip_file
def create_archive_stream(self, items, subdir=None): import zipstream from django.http.response import StreamingHttpResponse from settings.settings import ZIPFILE_SIZE_LIMIT_BYTES from utils import zipdir, get_total_size from os.path import isfile, isdir path = self.get_path() if subdir is None else os.path.join( self.get_path(), subdir) if not os.path.exists(path): raise Exception('Invalid subdirectory provided') share_path = self.get_path() z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # total_size = get_total_size([os.path.join(path,item) for item in items]) # if total_size > ZIPFILE_SIZE_LIMIT_BYTES: # raise Exception("%d bytes is above bioshare's limit for creating zipfiles, please use rsync or wget instead" % (total_size)) for item in items: item_path = os.path.join(path, item) if not os.path.exists(item_path): raise Exception("File or folder: '%s' does not exist" % (item)) if isfile(item_path): item_name = item #os.path.join(self.id,item) z.write(item_path, arcname=item_name) elif isdir(item_path): zipdir(share_path, item_path, z) from datetime import datetime zip_name = 'archive_' + datetime.now().strftime( '%Y_%m_%d__%H_%M_%S') + '.zip' response = StreamingHttpResponse(z, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename={}'.format( zip_name) return response
def zip_to_zipstream(zip_file, only=None, exclude=None): """ args: zip_file (ZipFile): the original zipfile.ZipFile only (list): the file names of the files to be included in the stream exclude (list): the file names of the files to be excluded in the stream ..note: only and exclude cannot be used at the same time """ only = only or [] exclude = exclude or [] if only and exclude: raise AttributeError( '`only` and `exclude` cannot be used at the same time') file_names = zip_file.namelist() if only: file_names = [name for name in file_names if name in only] elif exclude: file_names = [name for name in file_names if name not in exclude] zip_stream = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) for file_name in file_names: zip_stream.write_iter( arcname=file_name, iterable=iter([zip_file.read(file_name)]), compress_type=zipstream.ZIP_DEFLATED, ) return zip_stream
def generator(): z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # find all analysis files folder = self.analyses_path + '/' + self.name for root, dirnames, filenames in os.walk(folder): invisible_dirs = [d for d in dirnames if d[0] == '.'] for d in invisible_dirs: dirnames.remove(d) for filename in filenames: if filename[0] == '.': continue if filename[-4:] == '.pyc': continue # add the file to zipstream fullname = os.path.join(root, filename) arcname = fullname.replace(self.analyses_path + '/', '') z.write(fullname, arcname=arcname) # add requirements.txt if present if os.path.isfile(self.analyses_path + '/requirements.txt'): z.write(self.analyses_path + '/requirements.txt') for chunk in z: yield chunk
def data_archive_generator( self, file_names: Union[List[str], Set[str]]) -> Generator[bytes, None, None]: """ Get the CARD:Live JSON files as a zipstream generator (code derived from https://pypi.org/project/zipstream-new/). :param file_names: The file names to load into the archive. :return: A generator which allows streaming of the zip file contents. """ zf = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) for file in file_names: file_path = path.join(self._directory, file) valid_file = False with open(file_path) as f: try: json_obj = json.load(f) valid_file = 'rgi_main' in json_obj except Exception: valid_file = False if valid_file: zf.write(file_path, arcname=f'card_live/{file}') yield from zf.flush() else: logger.warning(( f'File [{file_path}] is not a proper CARD:Live JSON file, ' 'skipping file in download request.')) yield from zf
def otu_export(request): """ this view takes: - contextual filters - taxonomic filters produces a Zip file containing: - an CSV of all the contextual data samples matching the query - an CSV of all the OTUs matching the query, with counts against Sample IDs """ def val_or_empty(obj): if obj is None: return '' return obj.value zf = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) params, errors = param_to_filters(request.GET['q']) with SampleQuery(params) as query: def sample_otu_csv_rows(kingdom_id): fd = io.StringIO() w = csv.writer(fd) w.writerow([ 'BPA ID', 'OTU', 'OTU Count', 'Amplicon', 'Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species' ]) yield fd.getvalue().encode('utf8') fd.seek(0) fd.truncate(0) q = query.matching_sample_otus(kingdom_id) for i, (otu, sample_otu, sample_context) in enumerate(q.yield_per(50)): w.writerow([ format_bpa_id(sample_otu.sample_id), otu.code, sample_otu.count, val_or_empty(otu.amplicon), val_or_empty(otu.kingdom), val_or_empty(otu.phylum), val_or_empty(otu.klass), val_or_empty(otu.order), val_or_empty(otu.family), val_or_empty(otu.genus), val_or_empty(otu.species) ]) yield fd.getvalue().encode('utf8') fd.seek(0) fd.truncate(0) zf.writestr('contextual.csv', contextual_csv(query.matching_samples()).encode('utf8')) with OntologyInfo() as info: for kingdom_id, kingdom_label in info.get_values(OTUKingdom): if not query.has_matching_sample_otus(kingdom_id): continue zf.write_iter('%s.csv' % (kingdom_label), sample_otu_csv_rows(kingdom_id)) response = StreamingHttpResponse(zf, content_type='application/zip') filename = "BPASearchResultsExport.zip" response['Content-Disposition'] = 'attachment; filename="%s"' % filename return response
def test_write_iterable_with_date_time(self): file_name_in_zip = "data_datetime" file_date_time_in_zip = time.strptime("2011-04-19 22:30:21", "%Y-%m-%d %H:%M:%S") z = zipstream.ZipFile(mode='w') def string_generator(): for _ in range(10): yield b'zipstream\x01\n' z.write_iter(iterable=string_generator(), arcname=file_name_in_zip, date_time=file_date_time_in_zip) f = tempfile.NamedTemporaryFile(suffix='zip', delete=False) for chunk in z: f.write(chunk) f.close() z2 = zipfile.ZipFile(f.name, 'r') self.assertFalse(z2.testzip()) self.assertEqual(file_date_time_in_zip[0:5], z2.getinfo(file_name_in_zip).date_time[0:5]) os.remove(f.name)
def build_zip(client, project_id): z = zipstream.ZipFile() paths = get_dds_paths(client, project_id) for (filename, dds_file) in paths.items(): print('write_iter {}'.format(filename)) z.write_iter(filename, fetch(client, dds_file)) return z
def biom_zip_file_generator(params, timestamp): zf = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) with SampleQuery(params) as query: zf.write_iter(params.filename(timestamp, '.biom'), (s.encode('utf8') for s in generate_biom_file(query, params.describe()))) return zf
def get_context_data(self, **kwargs): context = super().get_context_data() z = zipstream.ZipFile() for attachment in self.get_queryset(): z.write(attachment.attachment.path, basename(attachment.attachment.path)) context["archive"] = z return context
def download_assignment_uploads(assignment_id): if current_user.is_authenticated and app.models.is_admin( current_user.username): z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) # Get list of uploads for this assignment filtered by class uploads_and_users = db.session.query(Upload, User).join( User, Upload.user_id == User.id).filter( Upload.assignment_id == assignment_id).all() if len(uploads_and_users) < 1: flash('No files have been uploaded for this assignment yet.', 'warning') return redirect( url_for('assignments.view_assignment_details', assignment_id=assignment_id)) else: upload_folder = Path(current_app.config['UPLOAD_FOLDER']) for upload, user in uploads_and_users: filepath = os.path.join(upload_folder, upload.filename) filename = user.student_number + ' - ' + user.username + '.' + app.files.models.get_file_extension( upload.original_filename) z.write(filepath, arcname=filename) response = Response(z, mimetype='application/zip') # Name the zip file with class and assignment names assignment = Assignment.query.get(assignment_id) class_label = Turma.query.get( assignment.target_turma_id).turma_label filename = class_label + ' - ' + assignment.title + '.zip' response.headers[ 'Content-Disposition'] = 'attachment; filename={}'.format( filename) return response abort(403)
def test_duplicate_with_zipfile_encoding_issues(project, async_run, controller): zf = zipstream.ZipFile() zf.writestr('test\udcc3', "data") with asyncio_patch('gns3server.controller.project.export_project', return_value=zf): with pytest.raises(aiohttp.web.HTTPConflict): async_run(project.duplicate(name="Hello"))
def archive(): which = request.json if not isinstance(which, list) or \ any(not isinstance(x, str) for x in which): abort(400) with session_scope() as session: z = zipstream.ZipFile() def do_request(cur_perms=None): return Response(z, mimetype=ZIP_MIMETYPE) for x in which: photo = session.query(Photo).get(x) if photo is None: abort(404) _ensure_photo_attrs(photo) filename = photo.id if photo.mime_type in CONTENT_TYPE_TO_EXTENSION: filename += "." + CONTENT_TYPE_TO_EXTENSION[photo.mime_type] z.write_iter(filename, read_file_iter(get_raw_photo_path(photo))) do_request = perms.require(ViewPerm(photo_id=x))(do_request) return do_request()
def make_zipstream(self, compression='gz'): import zipstream compression = zipstream.ZIP_STORED if compression == 'gz': compression = zipstream.ZIP_DEFLATE zstream = zipstream.ZipFile(mode='w', compression=compression) self._write_bag_to_zipfile(zstream) return zstream
def zip_gen(): z = zipstream.ZipFile(mode="w") for doc in valid_docs(): z.write(doc.pdf_url()) if errors: z.writestr("errors.txt", str.encode("\n".join(errors))) for chunk in z: yield chunk
def generate_zipstream(path): z = zipstream.ZipFile(mode='w', allowZip64=True, compression=zipstream.ZIP_DEFLATED) for root, dirs, files in os.walk(path): for filename in files: file_path = os.path.join(root, filename) arcpath = os.path.join(path, os.path.relpath(file_path, path)) z.write(file_path, arcpath) for chunk in z: yield chunk
async def download_directory_as_archive( request: web.Request, file_path: Path, zip_filename: str = None, ) -> web.StreamResponse: """ Serve a directory as a zip archive on the fly. """ def _iter2aiter(iter): """Iterable to async iterable""" def _consume(loop, iter, q): for item in iter: q.put(item) q.put(SENTINEL) async def _aiter(): loop = asyncio.get_running_loop() q = janus.Queue(maxsize=DEFAULT_INFLIGHT_CHUNKS) try: fut = loop.run_in_executor(None, lambda: _consume(loop, iter, q.sync_q)) while True: item = await q.async_q.get() if item is SENTINEL: break yield item q.async_q.task_done() await fut finally: q.close() await q.wait_closed() return _aiter() if zip_filename is None: zip_filename = file_path.name + '.zip' zf = zipstream.ZipFile(compression=zipstream.ZIP_DEFLATED) async for root, dirs, files in _iter2aiter(os.walk(file_path)): for file in files: zf.write(Path(root) / file, Path(root).relative_to(file_path) / file) if len(dirs) == 0 and len(files) == 0: # Include an empty directory in the archive as well. zf.write(root, Path(root).relative_to(file_path)) ascii_filename = zip_filename.encode('ascii', errors='ignore').decode('ascii').replace('"', r'\"') encoded_filename = urllib.parse.quote(zip_filename, encoding='utf-8') response = web.StreamResponse(headers={ hdrs.CONTENT_TYPE: 'application/zip', hdrs.CONTENT_DISPOSITION: " ".join([ "attachment;" f"filename=\"{ascii_filename}\";", # RFC-2616 sec2.2 f"filename*=UTF-8''{encoded_filename}", # RFC-5987 ]) }) await response.prepare(request) async for chunk in _iter2aiter(zf): await response.write(chunk) return response
def __init__(self, torrent_handler, files, name, progress_callback, log, should_split=True): self.buf = bytes() self.progress_callback = progress_callback self.log = log self.processed_size = 0 self.torrent_handler = torrent_handler self.should_split = should_split # self.progress_text = None self.files = files self.files_size_sum = 0 file_names_sum = 0 self.zipstream = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_STORED, allowZip64=True) for f in files: self.zipstream.write_iter(f.info.fullpath, f) self.files_size_sum += f.info.size file_names_sum += len(f.info.fullpath.encode('utf')) #self.real_size = 21438417 + 205 + 6 #len(files) * (30 + 16 + 46) + 2 * file_names_sum + files_size_sum + 22 + 512 self.real_size = len(files) * ( 30 + 16 + 46) + 2 * file_names_sum + self.files_size_sum + 22 + 2048 self.max_size = const.TG_MAX_FILE_SIZE if should_split else self.real_size self.big = self.real_size > self.max_size self.size = self.max_size if self.big else self.real_size last_repl = False f_name = '' for i in name: if not i.isalnum() and i != '.': f_name += '_' if last_repl == False else '' last_repl = True else: f_name += i last_repl = False self._name = f_name self.zip_num = 1 self.must_next_file = False self.zip_parts = m.ceil(self.real_size / const.TG_MAX_FILE_SIZE) self.downloaded_bytes_count = 0 self.last_percent = 0 self.should_close = False self.zipiter = iter(self.zipstream) self.is_finished = False self.last_progress_update = time.time() self.log.debug( "ZipTorrentContentFile.real_size {} ZipTorrentContentFile.size {}". format(self.real_size, self.size))