def __download_range(self, k, dst): try: _, ext = os.path.splitext(dst) ds = [] parts = [] logging.info("Download %s start", k.name) for startByte in range(0, k.size, self.splitMB): output_part = self.new_temp_file(suffix=ext) parts.append(output_part) endByte = min(startByte + self.splitMB - 1, k.size) logging.debug( "deferToThreadPool %s start=%d end=%d size=%d cnt=%d", k.name, startByte, endByte, endByte - startByte, len(ds), ) d = twisted.internet.threads.deferToThreadPool( reactor, reactor.getThreadPool(), # @UndefinedVariable self.__downloadOne, k, startByte, endByte, output_part, len(ds), ) ds.append(d) if os.path.exists(dst): os.remove(dst) fout = file(dst, "wb") start = timeit.default_timer() for cnt, p in enumerate(parts): yield ds[cnt] shutil.copyfileobj(file(p, "rb"), fout) size = min(k.size, (cnt + 1) * self.splitMB) elapsed = timeit.default_timer() - start speedstr = formatFileSize(size / elapsed) sizestr = formatFileSize(size) percent = (float(cnt) / len(parts)) * 100.0 logging.info( "%03d/%03d (%.2f%%) speed=%s/s, elapsed=%.2f, size=%s", cnt, len(parts), percent, speedstr, elapsed, sizestr, ) except Exception: logging.error("download error", exc_info=True) raise
def getFilesAndDirectories(self, directory): files = [] dirs = [] for path in directory: url = urllib.quote(path.encode('utf8'), "/") escapedPath = cgi.escape(path) if os.path.isdir(os.path.join(self.path, path)): url = url + '/' dirs.append({ 'text': escapedPath + "/", 'href': url, 'size': '', 'type': '[Directory]', 'encoding': '' }) else: mimetype, encoding = static.getTypeAndEncoding( path, self.contentTypes, self.contentEncodings, self.defaultType) try: size = os.stat(os.path.join(self.path, path)).st_size except OSError: continue files.append({ 'text': escapedPath, "href": url, 'type': '[%s]' % mimetype, 'encoding': (encoding and '[%s]' % encoding or ''), 'size': static.formatFileSize(size) }) return dirs, files
def getFilesAndDirectories(self, directory): files = [] dirs = [] for path in directory: url = urllib.quote(path.encode('utf8'), "/") escapedPath = cgi.escape(path) if os.path.isdir(os.path.join(self.path, path)): url = url + '/' dirs.append({'text': escapedPath + "/", 'href': url, 'size': '', 'type': '[Directory]', 'encoding': ''}) else: mimetype, encoding = static.getTypeAndEncoding(path, self.contentTypes, self.contentEncodings, self.defaultType) try: size = os.stat(os.path.join(self.path, path)).st_size except OSError: continue files.append({ 'text': escapedPath, "href": url, 'type': '[%s]' % mimetype, 'encoding': (encoding and '[%s]' % encoding or ''), 'size': static.formatFileSize(size)}) return dirs, files
def download(self, key, dst): start = timeit.default_timer() if key.size < self.splitMB: key.get_contents_to_filename(dst) else: try: yield self.__download_range(key, dst) finally: for f in self.tempfiles: try: os.unlink(f) except Exception: pass elapsed = timeit.default_timer() - start speedstr = formatFileSize(key.size / elapsed) sizestr = formatFileSize(key.size) logging.info("Done=%s dst=%s speed=%s/s, elapsed=%.2f, size=%s", key.name, dst, speedstr, elapsed, sizestr)
def __uploadOne(self, mp, src, dst, seek, cnt): size = os.path.getsize(src) sizestr = formatFileSize(size) logging.debug("upload start src=%s, dst=%s, size=%s, seek=%d, cnt=%d", src, dst, sizestr, seek, cnt) start = timeit.default_timer() fd = file(src, "rb") sd = cStringIO.StringIO() fd.seek(seek) content = fd.read(self.splitMB) size = len(content) sd.write(content) sd.seek(0) mp.upload_part_from_file(sd, cnt + 1, policy=self.grant) elapsed = timeit.default_timer() - start speedstr = formatFileSize(size / elapsed) logging.info( "upload end src=%s, dst=%s, size=%s, speed=%s/s, seek=%d, cnt=%d", src, dst, sizestr, speedstr, seek, cnt )
def _getFilesAndDirectories(self, directory): """ Helper returning files and directories in given directory listing, with attributes to be used to build a table content with C{self.linePattern}. @return: tuple of (directories, files) @rtype: C{tuple} of C{list} """ files = [] dirs = [] for path in directory: if _PY3: if isinstance(path, bytes): path = path.decode("utf8") url = quote(path, "/") escapedPath = escape(path) childPath = filepath.FilePath(self.path).child(path) if childPath.isdir(): dirs.append({ 'text': escapedPath + "/", 'href': url + "/", 'size_int': -1, 'size': '', 'type': '[Directory]', 'encoding': '', 'ctime': '' }) else: mimetype, encoding = getTypeAndEncoding( path, self.contentTypes, self.contentEncodings, self.defaultType) try: size = childPath.getsize() except OSError: continue files.append({ 'text': escapedPath, "href": url, 'type': '[%s]' % mimetype, 'encoding': (encoding and '[%s]' % encoding or ''), 'size_int': size, 'size': formatFileSize(size), 'ctime': self.getCreateTime(childPath.path) }) return dirs, files
def test_formatFileSize(self): """ L{static.formatFileSize} format an amount of bytes into a more readable format. """ self.assertEquals(static.formatFileSize(0), "0B") self.assertEquals(static.formatFileSize(123), "123B") self.assertEquals(static.formatFileSize(4567), "4K") self.assertEquals(static.formatFileSize(8900000), "8M") self.assertEquals(static.formatFileSize(1234000000), "1G") self.assertEquals(static.formatFileSize(1234567890000), "1149G")
def targzip_package(tarpath, basedir, src_paths, src_excludes, checksum=False): from twisted.web.static import formatFileSize start = timeit.default_timer() files = [] for src_path in src_paths: try: src_path.index("/") except ValueError: files.append(src_path) continue exp = re.compile(src_path) # look for the deeper possible sub_path to search (e.g. faster) exp_subpath = re.compile(r'^([\w\/]+)') sub_path = exp_subpath.match(src_path).group(1) sub_path = sub_path[:sub_path.rfind('/')] logging.info("searching %s for '%s'", sub_path, src_path) for dirpath, dirnames, filenames in os.walk( os.path.join(basedir, sub_path)): for filename in filenames: path = os.path.join(dirpath, filename) path = path[len(basedir) + 1:] path = path.replace('\\', "/") if exp.match(path): files.append(path) for dirname in dirnames: path = os.path.join(dirpath, dirname) if not os.path.islink(path): continue path = path[len(basedir) + 1:] path = path.replace('\\', "/") if exp.match(path): files.append(path) newfiles = [] src_excludes_exp = map(lambda x: re.compile(x), src_excludes) for f in files: add = True for cnt, e in enumerate(src_excludes_exp): if e.match(f): add = False logging.debug("excluding %s (based on %s)", f, src_excludes[cnt]) break if add: newfiles.append(f) files = newfiles files.sort() if not files: logging.warning("skipping targzip_package, no files") return None logging.info("targzip_package: search elapsed=%.2f", timeit.default_timer() - start) # checksum image_size = 0 if checksum: start = timeit.default_timer() sha_image = sha() for f in files: logging.debug("checksum %s", f) path = os.path.join(basedir, f) if os.path.islink(path): sha_image.update(os.readlink(path)) # @UndefinedVariable continue fd = open(path, "rb") while 1: buf = fd.read(IMAGE_IO_CHUNK) if not buf: break sha_image.update(buf) image_size += os.path.getsize(path) image_digest = hexlify(sha_image.digest()) logging.info("targzip_package: sha=%s, size=%d, elapsed=%.2f", image_digest, image_size, timeit.default_timer() - start) else: image_digest = None for f in files: path = os.path.join(basedir, f) if not os.path.islink(path): image_size += os.path.getsize(path) # tar/gzip start = timeit.default_timer() tar_file = targzip_files(tarpath, files, basedir) tar_size = os.path.getsize(tar_file) logging.info( "targzip_package: size=%s, compressed=%s, ratio=%.2f%%, elapsed=%.2f", formatFileSize(image_size), formatFileSize(tar_size), (float(tar_size) / image_size) * 100, timeit.default_timer() - start) return tar_file, image_digest
def _getFilesAndDirectories(self, directory): """ Helper returning files and directories in given directory listing, with attributes to be used to build a table content with C{self.linePattern}. @return: tuple of (directories, files) @rtype: C{tuple} of C{list} """ files = [] dirs = [] dirs.append({'text':"<i>Parent Directory</i>", 'href': "..", 'size': '', 'type': '', # 'encoding': '', 'mtime': "", 'commstr': "" }) for path in directory: if path[0] == ".": # ignore filenames that begin with "." continue mtime = time.asctime(time.localtime(os.path.getmtime(os.path.join(self.path, path)))) url = urllib.quote(path, "/") escapedPath = cgi.escape(path) # print "path %s url %s escapedPath %s" %(path, url, escapedPath) if os.path.isdir(os.path.join(self.path, path)): url = url + '/' dirs.append({'text': escapedPath + "/", 'href': url, 'size': '', 'type': '[Directory]', # 'encoding': '', 'mtime': mtime, 'commstr': '' }) else: mimetype, encoding = getTypeAndEncoding(path, self.contentTypes, self.contentEncodings, self.defaultType) try: size = os.stat(os.path.join(self.path, path)).st_size except OSError: continue extension = os.path.splitext(path)[1] if extension == ".dbm": # ignore .dbm files continue if extension == ".db": # if it's ".db" str = path[:-3] extension = os.path.splitext(str)[1] if extension == ".dbm": # and if there's also a ".dbm" continue # just ignore it - MacOSX (esp. Mountain Lion) likes to # add .dbm.db (see comment in snmpsimd.py) - so don't show it theCommStr = "" if (extension == ".snmprec") or (extension == ".sapwalk") or (extension == ".snmpwalk"): bdLen = len(myBaseDir) + len("/data/") theCommStr = self.path[bdLen:] # get the path up to .../data if theCommStr != "": # if it's non-empty theCommStr += "/" # append the "/" theCommStr += path[:-len(extension)] # append the file name, less the extension ## ** Comment ** ## Add attributes the the "elements" displayed in each line files.append({ 'text': escapedPath, "href": url, 'type': '[%s]' % mimetype, # 'encoding': (encoding and '[%s]' % encoding or ''), 'size': formatFileSize(size), 'mtime': mtime, 'commstr': theCommStr, } ) return dirs, files