コード例 #1
0
ファイル: _ncd_lzma.py プロジェクト: chrislit/abydos
    def dist(self, src, tar):
        """Return the NCD between two strings using LZMA compression.

        Parameters
        ----------
        src : str
            Source string for comparison
        tar : str
            Target string for comparison

        Returns
        -------
        float
            Compression distance

        Raises
        ------
        ValueError
            Install the PylibLZMA module in order to use LZMA

        Examples
        --------
        >>> cmp = NCDlzma()
        >>> cmp.dist('cat', 'hat')
        0.08695652173913043
        >>> cmp.dist('Niall', 'Neil')
        0.16
        >>> cmp.dist('aluminum', 'Catalan')
        0.16
        >>> cmp.dist('ATCG', 'TAGC')
        0.08695652173913043


        .. versionadded:: 0.3.5
        .. versionchanged:: 0.3.6
            Encapsulated in class

        """
        if src == tar:
            return 0.0

        src = src.encode('utf-8')
        tar = tar.encode('utf-8')

        if lzma is not None:
            src_comp = lzma.compress(src)[14:]
            tar_comp = lzma.compress(tar)[14:]
            concat_comp = lzma.compress(src + tar)[14:]
            concat_comp2 = lzma.compress(tar + src)[14:]
        else:  # pragma: no cover
            raise ValueError(
                'Install the PylibLZMA module in order to use LZMA'
            )

        return (
            min(len(concat_comp), len(concat_comp2))
            - min(len(src_comp), len(tar_comp))
        ) / max(len(src_comp), len(tar_comp))
コード例 #2
0
ファイル: dist_utils.py プロジェクト: pjpan/Practice
def _compression_dist(x, y, l_x=None, l_y=None):
    if x == y:
        return 0
    x_b = x.encode("utf-8")
    y_b = y.encode("utf-8")
    if l_x is None:
        l_x = len(lzma.compress(x_b))
        l_y = len(lzma.compress(y_b))
    l_xy = len(lzma.compress(x_b + y_b))
    l_yx = len(lzma.compress(y_b + x_b))
    dist = np_utils._try_divide(min(l_xy, l_yx) - min(l_x, l_y), max(l_x, l_y))
    return dist
コード例 #3
0
 def compression_dist(x, y, l_x=None, l_y=None):
     if x == y:
         return 0
     x_b = x.encode('utf-8')
     y_b = y.encode('utf-8')
     if l_x is None:
         l_x = len(lzma.compress(x_b))
         l_y = len(lzma.compress(y_b))
     l_xy = len(lzma.compress(x_b + y_b))
     l_yx = len(lzma.compress(y_b + x_b))
     dist = MathUtil.try_divide(min(l_xy, l_yx) - min(l_x, l_y), max(l_x, l_y))
     return dist
コード例 #4
0
ファイル: feature_generator.py プロジェクト: amsqr/hd
 def _compression_distance(self, x, y, l_x=None, l_y=None):
     x, y = str(x), str(y)
     if x==y:
         return 0
     x_b = x.encode('utf-8')
     y_b = y.encode('utf-8')
     if l_x is None:
         l_x = len(lzma.compress(x_b))
         l_y = len(lzma.compress(y_b))
     l_xy = len(lzma.compress(x_b+y_b))
     l_yx = len(lzma.compress(y_b+x_b))
     dist = (min(l_xy,l_yx)-min(l_x,l_y))/max(l_x,l_y)
     return dist
コード例 #5
0
    def compression_trial(self, text):
        self.len_uc = len(text)
        self._t('zd', lambda: zlib.compress(text))
        self._t('z9', lambda: zlib.compress(text, 9))

        self._t('xd',
                lambda: lzma.compress(text,
                                      format=lzma.FORMAT_RAW,
                                      filters=[{"id": lzma.FILTER_LZMA2}]))
        self._t('xf',
                lambda: lzma.compress(text,
                                      format=lzma.FORMAT_RAW,
                                      filters=[{"id": lzma.FILTER_DELTA},
                                               {"id": lzma.FILTER_LZMA2}]))
コード例 #6
0
ファイル: web_cache.py プロジェクト: EzhikTT/GoogleSpeech
  def __setitem__(self, url_data, data):
    """ Store an item in cache. """
    if isinstance(url_data, tuple):
      url, post_data = url_data
    else:
      url = url_data
      post_data = None

    if self.__compression is Compression.DEFLATE:
      buffer = memoryview(data)
      data = zlib.compress(buffer, self.__compression_level)
    elif self.__compression is Compression.BZIP2:
      buffer = memoryview(data)
      data = bz2.compress(buffer, compresslevel=self.__compression_level)
    elif self.__compression is Compression.LZMA:
      buffer = memoryview(data)
      data = lzma.compress(buffer, format=lzma.FORMAT_ALONE, preset=self.__compression_level)

    with self.__connexion:
      if post_data is not None:
        post_bin_data = sqlite3.Binary(pickle.dumps(post_data, protocol=3))
        self.__connexion.execute("INSERT OR REPLACE INTO " +
                                 self.__table_name +
                                 "_post (url, post_data, added_timestamp, last_accessed_timestamp,data) VALUES (?, ?, strftime('%s','now'), strftime('%s','now'), ?);",
                                 (url, post_bin_data, sqlite3.Binary(data)))
      else:
        self.__connexion.execute("INSERT OR REPLACE INTO " +
                                 self.__table_name +
                                 " (url, added_timestamp, last_accessed_timestamp,data) VALUES (?, strftime('%s','now'), strftime('%s','now'), ?);",
                                 (url, sqlite3.Binary(data)))
コード例 #7
0
ファイル: Database.py プロジェクト: sjsafranek/SeCupid
	def saveProfile(self, username, profile_source):
		""" Save profile to database. 
			Profile linked to user in users table.
			Args:
				username (str): okcupid username
				profile_source (str): html source of profile page
		"""
		data = lzma.compress(profile_source.encode())
		encoded = base64.b64encode(data).decode('utf-8')
		profile = self.getProfile(username)
		if not profile:
			self.logger.info("Creating profile: %s" % username)
			profile = Models.Profile(username)
			self.session.add(profile)
			self.session.commit()
			profile.source = encoded
			self.session.commit()
			user = self.getUser(username)
			if not user:
				user = User(username)
				self.session.add(user)
				self.session.commit()
			user.profile.append(profile)
			self.session.commit()
		# elif self.update:
		else:
			self.logger.info("Updating profile: %s" % username)
			profile.source = encoded
			self.session.commit()
コード例 #8
0
ファイル: compress.py プロジェクト: dngferreira/hrfanalyse
def lzma_compress(inputfile, level, decompress):
    """
    (str,int,bool) -> CompressionData
    
    Compresses one file using the python implementation of lzma.
    
    NOTE: The lzma module was created for python3, the backported version for 
    python2.7, does not have a level parameter, a decision was made to keep this
    code backwards compatible so the level parameter is never used. The 
    default the level being used is 6.
     """

    original_size = int(os.stat(inputfile).st_size)
    with open(inputfile, "rU") as fdorig:
        origlines = fdorig.read()
    origtext = memoryview(bytearray(origlines, "utf8"))
    compressedtext = memoryview(lzma.compress(origtext.tobytes()))
    compressed_size = len(compressedtext)

    decompress_time = None
    if decompress:
        decompress_time = min(timeit.repeat(lambda: lzma.decompress(compressedtext.tobytes()),
                                            number=10,
                                            repeat=3, timer=time.clock))

    cd = CompressionData(original_size, compressed_size, decompress_time)

    return cd
コード例 #9
0
ファイル: compression.py プロジェクト: Neo23x0/pyminifier
def lzma_pack(source):
    """
    Returns 'source' as a lzma-compressed, self-extracting python script.

    .. note::

        This method uses up more space than the zip_pack method but it has the
        advantage in that the resulting .py file can still be imported into a
        python program.
    """
    import lzma, base64
    out = ""
    # Preserve shebangs (don't care about encodings for this)
    first_line = source.split('\n')[0]
    if analyze.shebang.match(first_line):
        if py3:
            if first_line.rstrip().endswith('python'): # Make it python3
                first_line = first_line.rstrip()
                first_line += '3' #!/usr/bin/env python3
        out = first_line + '\n'
    compressed_source = lzma.compress(source.encode('utf-8'))
    out += 'import lzma, base64\n'
    out += "exec(lzma.decompress(base64.b64decode('"
    out += base64.b64encode(compressed_source).decode('utf-8')
    out += "')))\n"
    return out
コード例 #10
0
def findBestLzma(rawData):
	result = rawData
	for i in range(10):
		tempData = lzma.compress(rawData, lzma.FORMAT_ALONE, -1, i)
		if len(tempData) < len(result):
			result = tempData
	return result
コード例 #11
0
ファイル: sunspot_server.py プロジェクト: withgemini/cft2014
        def compress(self, bytesToCompress):
            '''@param bytesToCompress - the bytes to compress, duh
            @return the compressed bytes'''

            self.logger.error("CompressorLzmaXz: beginning compression")
            zeBytes =  lzma.compress(bytesToCompress, format=lzma.FORMAT_XZ)
            self.logger.error("CompressorLzmaXz: finished compression")
            return zeBytes
コード例 #12
0
    def run_lzma(self):
        start = time()
        self.lzma_compressed = lzma.compress(self.raw_data)
        self.lzma_compress_time = time() - start

        start = time()
        lzma.decompress(self.lzma_compressed)
        self.lzma_decompress_time = time() - start
コード例 #13
0
ファイル: test_liblzma.py プロジェクト: pretaweb/pyliblzma
    def test_dict_size(self):
        dict = lzma.options.dict_size[0]
        while dict <= 1<<26: # lzma.options.dict_size[1]: Since using very large dictionaries requires
                             # very large amount of memory, let's not go beyond 64mb for testing..
            result = lzma.compress(self.data, options={'dict_size':dict})
            self.assertEqual(self.data, lzma.decompress(result))
            dict = dict * 2
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'dict_size':lzma.options.dict_size[1]+1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'dict_size':lzma.options.dict_size[0]-1})
コード例 #14
0
ファイル: test_liblzma.py プロジェクト: pretaweb/pyliblzma
    def test_lclp(self):
        for lcb in xrange(lzma.options.lc[0], lzma.options.lc[1]+1):
		for lpb in xrange(lzma.options.lc[1]-lcb):
			result = lzma.compress(self.data, options={'lc':lcb, 'lp':lpb})
			self.assertEqual(self.data, lzma.decompress(result))
        self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lc':lzma.options.lc[0]-1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lc':lzma.options.lc[1]+1})
        self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lp':lzma.options.lp[0]-1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lp':lzma.options.lp[1]+1})
コード例 #15
0
ファイル: codecs.py プロジェクト: alimanfoo/zarr
        def encode(self, buf):

            # if numpy array, can only handle C contiguous directly
            if isinstance(buf, np.ndarray) and not buf.flags.c_contiguous:
                buf = buf.tobytes(order='A')

            # do compression
            return lzma.compress(buf, format=self.format, check=self.check,
                                 preset=self.preset, filters=self.filters)
コード例 #16
0
 def _dump2stor(self, data,key=""):
     if len(data)==0:
         return ""
     if key=="":
         key = j.tools.hash.md5_string(data)
     data2 = lzma.compress(data) if self.compress else data
     if not self.client.exists(key=key,repoId=self.repoId):
         self.client.set(key=key, data=data2,repoId=self.repoId)
         
     return key
コード例 #17
0
ファイル: base.py プロジェクト: awesome-python/p2p-project
def compress(msg, method):  # takes bytes, returns bytes
    """Shortcut method for compression"""
    if method == flags.gzip:
        return zlib.compress(msg)
    elif method == flags.bz2:
        return bz2.compress(msg)
    elif method == flags.lzma:
        return lzma.compress(msg)
    else:  # pragma: no cover
        raise Exception('Unknown compression method')
コード例 #18
0
ファイル: p2p.py プロジェクト: echelon60/p2p-project
def compress(msg, method):
    """Shortcut method for compression"""
    if method == flags.gzip:
        return zlib.compress(msg)
    elif method == flags.bz2:
        return bz2.compress(msg)
    elif method == flags.lzma:
        return lzma.compress(msg)
    else:
        raise Exception('Unknown compression method')
コード例 #19
0
ファイル: test_liblzma.py プロジェクト: pretaweb/pyliblzma
    def test_decompress_large_stream(self):
        # decompress large block of repeating data, stream version
        decompress = lzma.LZMADecompressor()
	infile = StringIO(lzma.compress(self.data_large, options={'format':'alone'}))
        outfile = StringIO()
        while 1:
            tmp = infile.read(1)
            if not tmp: break
            outfile.write(decompress.decompress(tmp))
        outfile.write(decompress.flush())
        self.failUnless(self.data_large == outfile.getvalue())
	decompress.reset()
	infile = StringIO(lzma.compress(self.data_large, options={'format':'xz'}))
        outfile = StringIO()
        while 1:
            tmp = infile.read(1)
            if not tmp: break
            outfile.write(decompress.decompress(tmp))
        outfile.write(decompress.flush())
        self.failUnless(self.data_large == outfile.getvalue())
コード例 #20
0
ファイル: sync.py プロジェクト: MegaMark16/freeciv-android
def comment_upload(install_time):
    with ui.execute_later_lock:
        ui.execute_later.append(lambda: ui.message('Compressing log...'))

    content = lzma.compress(open(save.get_save_dir() + '/more.log').read())

    with ui.execute_later_lock:
        ui.execute_later.append(lambda: ui.back())

    request(lambda result: comment_next(install_time), 'upload_log', content, install_time,
            banner="Uploading log (%dkB)" % (len(content)/1024))
コード例 #21
0
ファイル: test_liblzma.py プロジェクト: pretaweb/pyliblzma
 def test_decompress_large_stream_bigchunks(self):
     # decompress large block of repeating data, stream version with big chunks
     decompress = lzma.LZMADecompressor()
     infile = StringIO(lzma.compress(self.data_large))
     outfile = StringIO()
     while 1:
         tmp = infile.read(1024)
         if not tmp: break
         outfile.write(decompress.decompress(tmp))
     outfile.write(decompress.flush())
     self.failUnless(self.data_large == outfile.getvalue())
コード例 #22
0
ファイル: common.py プロジェクト: njsmith/zs
 def lzma_compress_dsize20(payload, compress_level=0, extreme=True):
     if compress_level > 1:
         raise ValueError("lzma compress level must be 0 or 1")
     if extreme:
         compress_level |= lzma.PRESET_EXTREME
     return lzma.compress(payload,
                          format=lzma.FORMAT_RAW,
                          filters=[{
                              "id": lzma.FILTER_LZMA2,
                              "preset": compress_level,
                          }])
コード例 #23
0
ファイル: apt.py プロジェクト: victorlin/depot
 def commit_sources_metadata(self):
     # Update the Sources file
     sources_path = 'dists/{0}/{1}/source/Sources'.format(self.codename, self.component)
     if sources_path in self.storage:
         return
     sources_content = ''
     self.storage.upload(sources_path, sources_content)
     self.storage.upload(sources_path+'.gz', gzip_compress(sources_content))
     self.storage.upload(sources_path+'.bz2', bz2.compress(sources_content))
     if lzma:
         self.storage.upload(sources_path+'.lzma', lzma.compress(sources_content))
     self.dirty_sources = True
コード例 #24
0
ファイル: apt.py プロジェクト: victorlin/depot
 def commit_package_metadata(self, arch, pkgs):
     # Update the Packages file
     packages_path = 'dists/{0}/{1}/binary-{2}/Packages'.format(self.codename, self.component, arch)
     packages = AptPackages(self.storage, self.storage.download(packages_path, skip_hash=True) or '')
     for pkg in pkgs:
         packages.add(pkg)
     packages_raw = str(packages)
     self.storage.upload(packages_path, packages_raw)
     self.storage.upload(packages_path+'.gz', gzip_compress(packages_raw))
     self.storage.upload(packages_path+'.bz2', bz2.compress(packages_raw))
     if lzma:
         self.storage.upload(packages_path+'.lzma', lzma.compress(packages_raw))
コード例 #25
0
ファイル: pab_sieve.py プロジェクト: Miaou/ProjectEuler
def stats(n=8*1048576*10):
    t0 = time.time()
    bufb = sieveBit(n//8)
    print('bufb done ({:.2f}s)'.format(time.time()-t0))
    t0 = time.time()
    bufB = sieveByte(n)
    print('bufB done ({:.2f}s)'.format(time.time()-t0))
    t0 = time.time()
    print('deflate: b {: 6} B {: 6} ({:.2f}s)'.format(len(zlib.compress(bufb,9)), len(zlib.compress(bufB,9)), time.time()-t0))
    t0 = time.time()
    print('bz2    : b {: 6} B {: 6} ({:.2f}s)'.format(len(bz2.compress(bufb,9)), len(bz2.compress(bufB,9)), time.time()-t0))
    t0 = time.time()
    print('lzma   : b {: 6} B {: 6} ({:.2f}s)'.format(len(lzma.compress(bufb)), len(zlib.compress(bufB)), time.time()-t0))
コード例 #26
0
ファイル: sync.py プロジェクト: renatolouro/freeciv-android
def upload_save(path, name):
    data = save.open_save(path).read()
    compressed = lzma.compress(data)
    if name.endswith(".gz"):
        name = name[:-3]
    request(
        uploaded_save,
        "upload_content",
        "android",
        name,
        compressed,
        banner="Uploading save... (%dkB)" % (len(compressed) / 1024),
    )
コード例 #27
0
 def compress_chunk(self, chunk, i, lock):
     compressed_chunk = lzma.compress(chunk)
     lock.acquire()
     try:
         with open("tmp", "rb") as f:
             d = pickle.load(f)
     except:
         d = {}
     d[i] = compressed_chunk
     with open("tmp", "w") as f:
         pickle.dump(d, f)
     lock.release()
     print "chunk compressed:", i, [ x for x  in d ]
コード例 #28
0
ファイル: imagegenerator.py プロジェクト: frapposelli/photon
def generateCompressedFile(inputfile, outputfile, formatstring):
    if formatstring == "w:xz":
        in_file = open(inputfile, 'rb')
        in_data = in_file.read()

        out_file = open(inputfile+".xz", 'wb')
        out_file.write(xz.compress(in_data))
        in_file.close()
        out_file.close()
    else:
        tarout = tarfile.open(outputfile, formatstring)
        tarout.add(inputfile, arcname=os.path.basename(inputfile))
        tarout.close()
コード例 #29
0
ファイル: test_liblzma.py プロジェクト: pretaweb/pyliblzma
    def test_compression_decompression(self, dict_size=1<<23):
        # call compression and decompression on random data of various sizes
        for i in xrange(18):
            size = 1 << i
            original = generate_random(size)
	    # FIXME:
	    """
            result = lzma.decompress(lzma.compress(original, options={'dict_size':dict_size, 'format':'alone'}))
	    self.assertEqual(len(result), size)
	    self.assertEqual(md5(original).hexdigest(), md5(result).hexdigest())
            """
	    result = lzma.decompress(lzma.compress(original, options={'dict_size':dict_size, 'format':'xz'}))
            self.assertEqual(len(result), size)
            self.assertEqual(md5(original).hexdigest(), md5(result).hexdigest())
コード例 #30
0
 def do_result_from_properties(self):
     for po in self.Project.objects.all():
         tests = self.get_tests(po)
         for obj in self.get_objects_for_project(po):
             pending_status = api.models.Result.RUNNING \
                     if self.get_property(obj, "testing.started") \
                     else api.models.Result.PENDING
             done_tests = set()
             results = []
             for prop in self.get_properties(obj, name__startswith='testing.report.'):
                 tn = prop.name[len('testing.report.'):]
                 done_tests.add(tn)
                 r = self.create_result(obj)
                 report = load_property(prop)
                 passed = report["passed"]
                 del report["passed"]
                 log = self.get_property(obj, "testing.log." + tn)
                 r.name = 'testing.' + tn
                 r.status = api.models.Result.SUCCESS if passed else api.models.Result.FAILURE
                 r.last_update = datetime.datetime.utcnow()
                 r.data = report
                 if log:
                     log_xz = lzma.compress(log.encode("utf-8"))
                     log_entry = self.LogEntry(data_xz=log_xz)
                     log_entry.save()
                     r.log_entry = log_entry
                     self.delete_property(obj, "testing.log." + tn)
                 r.save()
                 results.append(r)
                 self.delete_property(obj, "testing.report." + tn)
             if self.get_property(obj, "testing.ready"):
                 for tn, test in tests.items():
                     if tn in done_tests:
                         continue
                     r = self.create_result(obj)
                     r.name = 'testing.' + tn
                     r.status = pending_status
                     r.last_update = datetime.datetime.utcnow()
                     r.save()
                     results.append(r)
                 self.delete_property(obj, "testing.ready")
             #print(obj, len(done_tests), len(tests) - len(done_tests))
             obj.results.add(*results)
             try:
                 self.delete_property(obj, "testing.started")
                 self.delete_property(obj, "testing.failed")
                 self.delete_property(obj, "testing.start-time")
             except:
                 pass
コード例 #31
0
ファイル: dill.py プロジェクト: dill-format/dill-python
elif command == "compress-bz2":
    with open(filename, "r") as f:
        cdata = bz2.compress(f.read().encode("utf-8"))
    sys.stdout.buffer.write(cdata)
elif command == "decompress-bz2":
    with open(filename, "rb") as f:
        udata = bz2.decompress(f.read()).decode("utf-8")
    sys.stdout.write(udata)

# XZ
elif command == "read-xz":
    with open(filename, "rb") as f:
        card = toml.loads(lzma.decompress(f.read()).decode("utf-8"))
elif command == "compress-xz":
    with open(filename, "r") as f:
        cdata = lzma.compress(f.read().encode("utf-8"))
    sys.stdout.buffer.write(cdata)
elif command == "decompress-xz":
    with open(filename, "rb") as f:
        udata = lzma.decompress(f.read()).decode("utf-8")
    sys.stdout.write(udata)

# Brotli
elif command == "read-br":
    with open(filename, "rb") as f:
        card = toml.loads(brotli.decompress(f.read()).decode("utf-8"))
elif command == "compress-br":
    with open(filename, "r") as f:
        cdata = brotli.compress(f.read().encode("utf-8"))
    sys.stdout.buffer.write(cdata)
elif command == "decompress-br":
コード例 #32
0
 def compress(data):
     return _lzma.compress(data, lzma._format_, filters=lzma._filters_)
コード例 #33
0
def compress_message(message):
    compressed_message = lzma.compress(message)
    return compressed_message
コード例 #34
0
 def save_as(self, filename: str) -> None:
     """Save this Engine instance as a compressed file."""
     save_data = lzma.compress(pickle.dumps(self))
     with open(filename, 'wb') as f:
         f.write(save_data)
コード例 #35
0
ファイル: compress.py プロジェクト: fzls/djc_helper
def compress_in_memory_with_lzma(src_bytes: bytes) -> bytes:
    return lzma.compress(src_bytes)
コード例 #36
0
ファイル: app.py プロジェクト: lgeiger/black-playground
def compress_state(data):
    compressed = lzma.compress(json.dumps(data).encode("utf-8"))
    return base64.urlsafe_b64encode(compressed).decode("utf-8")
コード例 #37
0
ファイル: test_compressor.py プロジェクト: skytreader/pghoard
 def compress(self, data):
     return lzma.compress(data)
コード例 #38
0
def test_ddo_on_chain():
    """Tests chain operations on a DDO."""
    config = ConfigProvider.get_config()
    ddo_address = get_contracts_addresses(
        "ganache", config)[MetadataContract.CONTRACT_NAME]
    dtfactory_address = get_contracts_addresses(
        "ganache", config)[DTFactory.CONTRACT_NAME]
    ddo_registry = MetadataContract(ddo_address)
    wallet = get_publisher_wallet()
    web3 = Web3Provider.get_web3()

    dtfactory = DTFactory(dtfactory_address)
    tx_id = dtfactory.createToken("", "dt1", "dt1", 1000, wallet)
    dt = DataToken(dtfactory.get_token_address(tx_id))

    # test create ddo
    asset = get_ddo_sample(dt.address)
    old_name = asset.metadata["main"]["name"]
    txid = ddo_registry.create(
        asset.asset_id, b"", lzma.compress(web3.toBytes(text=asset.as_text())),
        wallet)
    assert ddo_registry.verify_tx(txid), f"create ddo failed: txid={txid}"
    logs = ddo_registry.event_MetadataCreated.processReceipt(
        ddo_registry.get_tx_receipt(txid))
    assert logs, f"no logs found for create ddo tx {txid}"
    log = logs[0]
    assert add_0x_prefix(log.args.dataToken) == asset.asset_id
    # read back the asset ddo from the event log
    ddo_text = web3.toText(lzma.decompress(log.args.data))
    assert ddo_text == asset.as_text(), "ddo text does not match original."

    _asset = Asset(json_text=ddo_text)
    assert _asset.did == asset.did, "did does not match."
    name = _asset.metadata["main"]["name"]
    assert name == old_name, f"name does not match: {name} != {old_name}"

    # test_update ddo
    asset.metadata["main"]["name"] = "updated name for test"
    txid = ddo_registry.update(
        asset.asset_id, b"", lzma.compress(web3.toBytes(text=asset.as_text())),
        wallet)
    assert ddo_registry.verify_tx(txid), f"update ddo failed: txid={txid}"
    logs = ddo_registry.event_MetadataUpdated.processReceipt(
        ddo_registry.get_tx_receipt(txid))
    assert logs, f"no logs found for update ddo tx {txid}"
    log = logs[0]
    assert add_0x_prefix(log.args.dataToken) == asset.asset_id
    # read back the asset ddo from the event log
    ddo_text = web3.toText(lzma.decompress(log.args.data))
    assert ddo_text == asset.as_text(), "ddo text does not match original."
    _asset = Asset(json_text=ddo_text)
    assert (_asset.metadata["main"]["name"] == "updated name for test"
            ), "name does not seem to be updated."
    assert DataToken(asset.asset_id).contract_concise.isMinter(wallet.address)

    # test update fails from wallet other than the original publisher
    bob = get_consumer_wallet()
    try:
        txid = ddo_registry.update(
            asset.asset_id, b"",
            lzma.compress(web3.toBytes(text=asset.as_text())), bob)
        assert ddo_registry.verify_tx(
            txid) is False, f"update ddo failed: txid={txid}"
        logs = ddo_registry.event_MetadataUpdated.processReceipt(
            ddo_registry.get_tx_receipt(txid))
        assert (
            not logs
        ), f"should be no logs for MetadataUpdated, but seems there are some logs: tx {txid}, logs {logs}"
    except ValueError:
        print("as expected, only owner can update a published ddo.")

    # test ddoOwner
    assert DataToken(asset.asset_id).contract_concise.isMinter(
        wallet.address
    ), (f"ddo owner does not match the expected publisher address {wallet.address}, "
        f"owner is {DataToken(asset.asset_id).contract_concise.minter(wallet.address)}"
        )
コード例 #39
0
    def _do_save_quotas(self, run_number, quotas):  #override
        # Will save quotas and statuses

        if len(self._site_id_map) == 0:
            self._make_site_id_map()

        if type(run_number) is int:
            db_file_name = '%s/snapshot_%09d.db' % (
                self.config.snapshots_spool_dir, run_number)
        else:
            # run_number is actually the partition name
            db_file_name = '%s/snapshot_%s.db' % (
                self.config.snapshots_spool_dir, run_number)

        # DB file should exist already - this function is called after save_deletion_decisions

        snapshot_db = sqlite3.connect(db_file_name)
        snapshot_cursor = snapshot_db.cursor()

        sql = 'CREATE TABLE `statuses` ('
        sql += '`id` TINYINT PRIMARY KEY NOT NULL,'
        sql += '`value` TEXT NOT NULL'
        sql += ')'
        snapshot_db.execute(sql)
        snapshot_db.execute('INSERT INTO `statuses` VALUES (%d, \'ready\')' %
                            Site.STAT_READY)
        snapshot_db.execute(
            'INSERT INTO `statuses` VALUES (%d, \'waitroom\')' %
            Site.STAT_WAITROOM)
        snapshot_db.execute('INSERT INTO `statuses` VALUES (%d, \'morgue\')' %
                            Site.STAT_MORGUE)
        snapshot_db.execute('INSERT INTO `statuses` VALUES (%d, \'unknown\')' %
                            Site.STAT_UNKNOWN)

        sql = 'CREATE TABLE `sites` ('
        sql += '`site_id` SMALLINT PRIMARY KEY NOT NULL,'
        sql += '`status_id` TINYINT NOT NULL REFERENCES `statuses`(`id`),'
        sql += '`quota` INT NOT NULL'
        sql += ')'
        snapshot_db.execute(sql)

        sql = 'INSERT INTO `sites` VALUES (?, ?, ?)'

        for site, quota in quotas.iteritems():
            snapshot_cursor.execute(
                sql, (self._site_id_map[site.name], site.status, quota))

        snapshot_db.commit()

        snapshot_cursor.close()
        snapshot_db.close()

        self._fill_snapshot_cache('sites', run_number, overwrite=True)

        if type(run_number) is int:
            # This was a numbered cycle
            # Archive the sqlite3 file
            # Relying on the fact save_quotas is called after save_deletion_decisions

            srun = '%09d' % run_number
            archive_dir_name = '%s/%s/%s' % (self.config.snapshots_archive_dir,
                                             srun[:3], srun[3:6])
            xz_file_name = '%s/snapshot_%09d.db.xz' % (archive_dir_name,
                                                       run_number)

            try:
                os.makedirs(archive_dir_name)
            except OSError:
                pass

            with open(db_file_name, 'rb') as db_file:
                with open(xz_file_name, 'wb') as xz_file:
                    xz_file.write(lzma.compress(db_file.read()))
コード例 #40
0
    if len(result) < 1:
        result = [0]

    print('Finished converting.')
    return result


n = len(codebook)

raw_file = open(input_file, 'rb').read()

if compress_data:
    import lzma

    print("Compressing file with LZMA...")
    bytes_to_encode = lzma.compress(data=raw_file, format=lzma.FORMAT_XZ)
else:
    bytes_to_encode = raw_file

open(encoded_bytes_path,
     'w').write('\n'.join([str(b) for b in bytes_to_encode]))

converted_to_new_base = bytes_to_base_n(bytes_to_encode, n)

# Encode each digit into DNA
encoded = []
for d in converted_to_new_base:
    w = codebook[d]
    encoded += [w]

# Write DNA sequence to files
コード例 #41
0
ファイル: compress_lzma.py プロジェクト: LFenske/LFbackup
 def compress(self, s):
     return lzma.compress(s)
コード例 #42
0
def xz(data):
    return lzma.compress(data, preset=9, check=lzma.CHECK_NONE)
コード例 #43
0
def write(context, cursor, givenbytes, compression, key, keycursor):
    if compression is None:
        algorithm, level = 0, 0
    else:
        algorithm, level = compression.pair

    if algorithm == 0 or level == 0:
        key.fObjlen = len(givenbytes)
        key.fNbytes = key.fObjlen + key.fKeylen
        key.write(keycursor, context._sink)
        cursor.write_data(context._sink, givenbytes)
        return

    _header = struct.Struct("2sBBBBBBB")
    uncompressedbytes = len(givenbytes)
    u1 = (uncompressedbytes >> 0) & 0xff
    u2 = (uncompressedbytes >> 8) & 0xff
    u3 = (uncompressedbytes >> 16) & 0xff

    if algorithm == uproot.const.kZLIB:
        algo = b"ZL"
        import zlib
        after_compressed = zlib.compress(givenbytes, level)
        compressedbytes = len(after_compressed)
        if compressedbytes < uncompressedbytes:
            c1 = (compressedbytes >> 0) & 0xff
            c2 = (compressedbytes >> 8) & 0xff
            c3 = (compressedbytes >> 16) & 0xff
            method = 8
            cursor.write_fields(context._sink, _header, algo, method, c1, c2,
                                c3, u1, u2, u3)
            cursor.write_data(context._sink, after_compressed)
            key.fObjlen = uncompressedbytes
            key.fNbytes = compressedbytes + key.fKeylen + 9
            key.write(keycursor, context._sink)
        else:
            cursor.write_data(context._sink, givenbytes)

    elif algorithm == uproot.const.kLZ4:
        algo = b"L4"
        try:
            import xxhash
        except ImportError:
            raise ImportError(
                "Install xxhash package with:\n    pip install xxhash\nor\n    conda install -c conda-forge python-xxhash"
            )
        try:
            import lz4.block
        except ImportError:
            raise ImportError(
                "Install lz4 package with:\n    pip install lz4\nor\n    conda install -c anaconda lz4"
            )
        if level >= 4:
            after_compressed = lz4.block.compress(givenbytes,
                                                  compression=level,
                                                  mode="high_compression",
                                                  store_size=False)
        else:
            after_compressed = lz4.block.compress(givenbytes, store_size=False)
        compressedbytes = len(after_compressed) + 8
        checksum = xxhash.xxh64(after_compressed).digest()
        if compressedbytes < uncompressedbytes:
            c1 = (compressedbytes >> 0) & 0xff
            c2 = (compressedbytes >> 8) & 0xff
            c3 = (compressedbytes >> 16) & 0xff
            method = lz4.library_version_number() // (100 * 100)
            cursor.write_fields(context._sink, _header, algo, method, c1, c2,
                                c3, u1, u2, u3)
            cursor.write_data(context._sink, checksum)
            cursor.write_data(context._sink, after_compressed)
            key.fObjlen = uncompressedbytes
            key.fNbytes = compressedbytes + key.fKeylen + 9
            key.write(keycursor, context._sink)
        else:
            cursor.write_data(context._sink, givenbytes)

    elif algorithm == uproot.const.kLZMA:
        algo = b"XZ"
        try:
            import lzma
        except ImportError:
            try:
                from backports import lzma
            except ImportError:
                raise ImportError(
                    "Install lzma package with:\n    pip install backports.lzma\nor\n    conda install -c conda-forge backports.lzma\n(or just use Python >= 3.3)."
                )
        after_compressed = lzma.compress(givenbytes, preset=level)
        compressedbytes = len(after_compressed)
        if compressedbytes < uncompressedbytes:
            c1 = (compressedbytes >> 0) & 0xff
            c2 = (compressedbytes >> 8) & 0xff
            c3 = (compressedbytes >> 16) & 0xff
            method = 0
            cursor.write_fields(context._sink, _header, algo, method, c1, c2,
                                c3, u1, u2, u3)
            cursor.write_data(context._sink, after_compressed)
            key.fObjlen = uncompressedbytes
            key.fNbytes = compressedbytes + key.fKeylen + 9
            key.write(keycursor, context._sink)
        else:
            cursor.write_data(context._sink, givenbytes)

    elif algorithm == uproot.const.kOldCompressionAlgo:
        raise ValueError(
            "unsupported compression algorithm: 'old' (according to ROOT comments, hasn't been used in 20+ years!)"
        )
    else:
        raise ValueError(
            "Unrecognized compression algorithm: {0}".format(algorithm))
コード例 #44
0
 def compress(cls, data):
     return lzma.compress(data)
コード例 #45
0
ファイル: compress.py プロジェクト: SiegeEngineers/aoc-mgz-db
def compress_tiles(tiles):
    """Compress map tiles."""
    data = b''
    for tile in tiles:
        data += struct.pack('<bb', tile['terrain_id'], tile['elevation'])
    return lzma.compress(data, filters=LZMA_FILTERS)
コード例 #46
0
 def update_event(self, inp=-1):
     self.set_output_val(0, lzma.compress(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4)))
コード例 #47
0
ファイル: lzma.py プロジェクト: jackton1/django-redis
 def compress(self, value: bytes) -> bytes:
     if len(value) > self.min_length:
         return lzma.compress(value, preset=self.preset)
     return value
コード例 #48
0
ファイル: release.py プロジェクト: simplybusiness/Kiln
def main(version, github_personal_access_token,
         kiln_automation_docker_access_token):
    no_verify = True

    kiln_repo = Repo.discover()
    working_copy_clean = check_for_expected_working_copy_changes(kiln_repo)
    if not working_copy_clean:
        raise click.ClickException(
            "Working copy contains uncomitted changes except for CHANGELOG.md")
    dulwich.porcelain.branch_create(kiln_repo.path, f"release/{version}")
    release_branch_ref = f"refs/heads/release/{version}".encode()
    kiln_repo.refs.set_symbolic_ref(b'HEAD', release_branch_ref)

    kiln_repo.stage(['CHANGELOG.md'])
    changelog_commit_hash = kiln_repo.do_commit(
        message=f"Docs: Update CHANGELOG.md for {version} release.".encode(),
        no_verify=no_verify)
    changelog_commit = kiln_repo.get_object(changelog_commit_hash)

    buf = io.BytesIO()
    dulwich.porcelain.diff_tree(
        kiln_repo,
        kiln_repo.get_object(changelog_commit.parents[0]).tree,
        changelog_commit.tree, buf)

    diffs = whatthepatch.parse_patch(buf.getvalue().decode("utf-8"))
    changelog_lines = []
    for diff in diffs:
        for change in diff.changes:
            if change.old is None and change.new is not None and change.line != "":
                changelog_lines.append(change.line)

    set_cargo_toml_version(kiln_repo, "kiln_lib", version)
    sh.cargo.check("--manifest-path",
                   os.path.join(kiln_repo.path, "kiln_lib", "Cargo.toml"),
                   "--all-features",
                   _err=sys.stderr)
    kiln_repo.stage(['kiln_lib/Cargo.toml', 'kiln_lib/Cargo.lock'])
    kiln_lib_version_commit = kiln_repo.do_commit(
        message=f"Kiln_lib: Update component version to {version}".encode(),
        no_verify=no_verify)
    origin = kiln_repo.get_config().get(('remote', 'origin'), 'url')
    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=release_branch_ref)

    for component in [
            "data-collector", "data-forwarder", "report-parser",
            "slack-connector"
    ]:
        set_kiln_lib_dependency(kiln_repo,
                                component,
                                sha=kiln_lib_version_commit)
        sh.cargo.check("--manifest-path",
                       os.path.join(kiln_repo.path, component, "Cargo.toml"),
                       "--all-features",
                       _err=sys.stderr)
        kiln_repo.stage([f'{component}/Cargo.toml', f'{component}/Cargo.lock'])
        kiln_repo.do_commit(
            message=
            f"{component.capitalize()}: Update kiln_lib dependency to {version}"
            .encode(),
            no_verify=no_verify)
        set_cargo_toml_version(kiln_repo, component, version)
        sh.cargo.check("--manifest-path",
                       os.path.join(kiln_repo.path, component, "Cargo.toml"),
                       "--all-features",
                       _err=sys.stderr)
        kiln_repo.stage([f'{component}/Cargo.toml', f'{component}/Cargo.lock'])
        kiln_repo.do_commit(
            message=
            f"{component.capitalize()}: Update component version to {version}".
            encode(),
            no_verify=no_verify)

    set_cargo_toml_version(kiln_repo, "cli", version)
    sh.cargo.check("--manifest-path",
                   os.path.join(kiln_repo.path, 'cli', "Cargo.toml"),
                   "--all-features",
                   _err=sys.stderr)
    kiln_repo.stage(['cli/Cargo.toml', 'cli/Cargo.lock'])
    kiln_repo.do_commit(
        message=f"CLI: Update component version to {version}".encode(),
        no_verify=no_verify)

    signing_key_id = kiln_repo.get_config()[(
        b'user', )][b'signingkey'].decode('utf-8')
    dulwich.porcelain.tag_create(kiln_repo,
                                 f"v{version}".encode(),
                                 message=f"v{version}".encode(),
                                 annotated=True,
                                 sign=signing_key_id)
    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=[release_branch_ref])
    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=[f"refs/tags/v{version}".encode()])

    sh.cargo.make("build-data-forwarder-musl",
                  _cwd=os.path.join(kiln_repo.path, "data-forwarder"),
                  _err=sys.stderr)
    shutil.copy2(
        os.path.join(kiln_repo.path, "bin", "data-forwarder"),
        os.path.join(kiln_repo.path, "tool-images", "ruby", "bundler-audit"))
    shutil.copy2(
        os.path.join(kiln_repo.path, "bin", "data-forwarder"),
        os.path.join(kiln_repo.path, "tool-images", "python", "safety"))
    docker_client = docker.from_env()
    docker_client.login(username="******",
                        password=kiln_automation_docker_access_token)

    image_tags = docker_image_tags(version)
    (bundler_audit_image, build_logs) = docker_client.images.build(
        path=os.path.join(kiln_repo.path, "tool-images", "ruby",
                          "bundler-audit"),
        tag=f"kiln/bundler-audit:{image_tags[0]}",
        rm=True)
    for line in build_logs:
        try:
            print(line['stream'], end='')
        except KeyError:
            pass

    push_logs = docker_client.images.push("kiln/bundler-audit",
                                          tag=image_tags[0])
    print(push_logs)
    for tag in image_tags[1:]:
        bundler_audit_image.tag("kiln/bundler-audit", tag=tag)
        push_logs = bundler_audit_image.push("kiln/bundler-audit", tag=tag)
        print(push_logs)

    (safety_image, build_logs) = docker_client.images.build(
        path=os.path.join(kiln_repo.path, "tool-images", "python", "safety"),
        tag=f"kiln/safety:{image_tags[0]}",
        rm=True)
    for line in build_logs:
        try:
            print(line['stream'], end='')
        except KeyError:
            pass
    push_logs = docker_client.images.push("kiln/safety", tag=image_tags[0])
    print(push_logs)

    for tag in image_tags[1:]:
        safety_image.tag("kiln/safety", tag=tag)
        push_logs = docker_client.images.push("kiln/safety", tag=tag)
        print(push_logs)

    for component in ["data-collector", "report-parser", "slack-connector"]:
        sh.cargo.make("musl-build",
                      _cwd=os.path.join(kiln_repo.path, component),
                      _err=sys.stderr)
        (docker_image, build_logs) = docker_client.images.build(
            path=os.path.join(kiln_repo.path, component),
            tag=f"kiln/{component}:{image_tags[0]}",
            rm=True)
        for line in build_logs:
            try:
                print(line['stream'], end='')
            except KeyError:
                pass
        push_logs = docker_image.push(f"kiln/{component}", tag=image_tags[0])
        print(push_logs)
        for tag in image_tags[1:]:
            docker_image.tag(f"kiln/{component}", tag=tag)
            push_logs = docker_client.images.push(f"kiln/{component}", tag=tag)
            print(push_logs)

    sh.cargo.make("musl-build",
                  _cwd=os.path.join(kiln_repo.path, "cli"),
                  _err=sys.stderr)
    base_path = os.path.join(kiln_repo.path, "cli", "target",
                             "x86_64-unknown-linux-musl", "release")
    src_path = os.path.join(base_path, "kiln-cli")
    base_name = f"kiln-cli-{version}.x86_64"
    dst_path = os.path.join(base_path, base_name)
    tarball_name = f"{base_name}.tar.xz"
    tarball_path = os.path.join(base_path, tarball_name)
    hashfile_name = f"{tarball_name}.sha256"
    hashfile_path = os.path.join(base_path, hashfile_name)
    sig_name = f"{hashfile_name}.sig"
    sig_path = os.path.join(base_path, sig_name)

    os.rename(src_path, dst_path)
    with tarfile.open(name=tarball_path, mode='w:xz') as tarball:
        tarball.add(dst_path, arcname=base_name)

    sha256sum = hashlib.sha256()
    b = bytearray(128 * 1024)
    mv = memoryview(b)
    with open(tarball_path, 'rb', buffering=0) as f:
        for n in iter(lambda: f.readinto(mv), 0):
            sha256sum.update(mv[:n])
    tarball_hash = sha256sum.hexdigest()
    with open(hashfile_path, 'w') as f:
        f.write(f"{tarball_hash} {tarball_name}")

    with gpg.Context() as default_ctx:
        signing_key = default_ctx.get_key(signing_key_id)
        with gpg.Context(signers=[signing_key], armor=True) as ctx:
            with open(hashfile_path, 'rb') as hashfile:
                with open(sig_path, 'wb') as sigfile:
                    hashdata = hashfile.read()
                    sig, metadata = ctx.sign(
                        hashdata, mode=gpg.constants.sig.mode.DETACH)
                    sigfile.write(sig)

    source_tarball_name = f"Kiln-{version}.tar.xz"
    source_tarball_path = os.path.join(kiln_repo.path, source_tarball_name)
    source_hashfile_name = f"{source_tarball_name}.sha256"
    source_hashfile_path = os.path.join(kiln_repo.path, source_hashfile_name)
    source_sig_name = f"{source_hashfile_name}.sig"
    source_sig_path = os.path.join(kiln_repo.path, source_sig_name)

    with io.BytesIO() as f:
        dulwich.porcelain.archive(kiln_repo, outstream=f)
        f.flush()
        compressed_bytes = lzma.compress(f.getvalue())
    with open(source_tarball_path, 'wb') as f:
        f.write(compressed_bytes)
    sha256sum = hashlib.sha256()
    sha256sum.update(compressed_bytes)
    tarball_hash = sha256sum.hexdigest()
    with open(source_hashfile_path, 'w') as f:
        f.write(f"{tarball_hash} {source_tarball_name}")

    with gpg.Context() as default_ctx:
        signing_key = default_ctx.get_key(signing_key_id)
        with gpg.Context(signers=[signing_key], armor=True) as ctx:
            with open(source_hashfile_path, 'rb') as hashfile:
                with open(source_sig_path, 'wb') as sigfile:
                    hashdata = hashfile.read()
                    sig, metadata = ctx.sign(
                        hashdata, mode=gpg.constants.sig.mode.DETACH)
                    sigfile.write(sig)

    g = Github(github_personal_access_token)
    repo = g.get_repo("simplybusiness/Kiln")
    release = repo.create_git_release(f"v{version}",
                                      f"Version {version}",
                                      '\n'.join(changelog_lines),
                                      draft=True)
    release.upload_asset(tarball_path)
    release.upload_asset(hashfile_path)
    release.upload_asset(sig_path)
    release.upload_asset(source_tarball_path)
    release.upload_asset(source_hashfile_path)
    release.upload_asset(source_sig_path)

    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=release_branch_ref)
    main_branch_ref = f"refs/heads/main".encode()
    kiln_repo.refs.set_symbolic_ref(b'HEAD', main_branch_ref)
    kiln_repo.reset_index()
    sh.git.merge("--no-edit", "--no-ff", f"release/{version}")
    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=main_branch_ref)

    for component in [
            "data-collector", "data-forwarder", "report-parser",
            "slack-connector"
    ]:
        set_kiln_lib_dependency(kiln_repo, component, branch="main")
        sh.cargo.check("--manifest-path",
                       os.path.join(kiln_repo.path, component, "Cargo.toml"),
                       "--all-features",
                       _err=sys.stderr)
        kiln_repo.stage([f'{component}/Cargo.toml', f'{component}/Cargo.lock'])
        kiln_repo.do_commit(
            message=
            f"{component.capitalize()}: Revert kiln_lib dependency to main branch"
            .encode(),
            no_verify=no_verify)

    dulwich.porcelain.push(kiln_repo,
                           remote_location=origin,
                           refspecs=main_branch_ref)

    print(
        "Release is complete, but requires that Github release is published manually"
    )
 def test_lzma_formatter(self):
     value = lzma.compress(self.expected_value)
     self.check_formatting(value)
コード例 #50
0
ファイル: engine.py プロジェクト: voynix/7drl
 def save_as(self, filename: str) -> None:
     save_data = lzma.compress(pickle.dumps(self))
     with open(filename, "wb") as f:
         f.write(save_data)
コード例 #51
0
 def compress(data: bytes, level: int = 1) -> bytes:
     return lzma.compress(data, format=lzma.FORMAT_ALONE, preset=1)
コード例 #52
0
    def _compress(self, data: bytes) -> bytes:
        # no further compression or post processing is required
        if isinstance(self.request, Request) and not self.request.is_used:
            return data

        # otherwise there are two cases
        # 1. it is a lazy request, and being used, so `self.request.SerializeToString()` is a new uncompressed string
        # 2. it is a regular request, `self.request.SerializeToString()` is a uncompressed string
        # either way need compress
        if not self.envelope.compression.algorithm:
            return data

        ctag = CompressAlgo.from_string(self.envelope.compression.algorithm)

        if ctag == CompressAlgo.NONE:
            return data

        _size_before = sys.getsizeof(data)

        # lower than hwm, pass compression
        if (_size_before < self.envelope.compression.min_bytes
                or self.envelope.compression.min_bytes < 0):
            self.envelope.compression.algorithm = 'NONE'
            return data

        try:
            if ctag == CompressAlgo.LZ4:
                import lz4.frame

                c_data = lz4.frame.compress(data)
            elif ctag == CompressAlgo.BZ2:
                import bz2

                c_data = bz2.compress(data)
            elif ctag == CompressAlgo.LZMA:
                import lzma

                c_data = lzma.compress(data)
            elif ctag == CompressAlgo.ZLIB:
                import zlib

                c_data = zlib.compress(data)
            elif ctag == CompressAlgo.GZIP:
                import gzip

                c_data = gzip.compress(data)

            _size_after = sys.getsizeof(c_data)
            _c_ratio = _size_before / _size_after

            if _c_ratio > self.envelope.compression.min_ratio:
                data = c_data
            else:
                # compression rate is too bad, dont bother
                # save time on decompression
                default_logger.debug(
                    f'compression rate {(_size_before / _size_after):.2f}% '
                    f'is lower than min_ratio '
                    f'{self.envelope.compression.min_ratio}')
                self.envelope.compression.algorithm = 'NONE'
        except Exception as ex:
            default_logger.error(
                f'compression={str(ctag)} failed, fallback to compression="NONE". reason: {ex!r}'
            )
            self.envelope.compression.algorithm = 'NONE'

        return data
コード例 #53
0
ファイル: routes-to-bin.py プロジェクト: Ravenslofty/mistral
    idx2 = ls[1].find('_')
    pat = ls[1][:idx1]
    if '.' in pat:
        patx = pat.split('.')
        assert (patx[0] == '6')
        pat = 70 + int(patx[1])
    else:
        pat = int(pat)
    fwpos = int(ls[1][idx1 + 1:idx2]) + w * int(ls[1][idx2 + 1:])
    span = {}
    for n in ls[2:]:
        idx3 = n.find(':')
        span[int(n[:idx3])] = node_to_id(n[idx3 + 1:])
    muxes.append((dest, pat, fwpos, span))

muxes.sort()

bmux = bytearray(len(muxes) * 188)
off = 0
for i in range(0, len(muxes)):
    struct.pack_into("III", bmux, off, muxes[i][0], muxes[i][1], muxes[i][2])
    for s, v in muxes[i][3].items():
        struct.pack_into("I", bmux, off + 12 + 4 * s, v)
    off += 188

cbmux = lzma.compress(bmux)

print("%s muxes %d" % (chip, len(muxes)))

open(sys.argv[4] + '/' + chip + "-r.bin", "wb").write(cbmux)
コード例 #54
0
ファイル: run_test.py プロジェクト: xmnlab/anaconda-recipes
import itertools
import lzma
import math
import mmap
import operator
import parser
import pyexpat
import select
import ssl
import time
import unicodedata
import zlib
from os import urandom

t = 100 * b'Foo '
assert lzma.decompress(lzma.compress(t)) == t

if sys.platform != 'win32':
    if not (ppc64le or armv7l):
        import _curses
        import _curses_panel
    import crypt
    import fcntl
    import grp
    import nis
    import readline
    import resource
    import syslog
    import termios

    from distutils import sysconfig
コード例 #55
0
ファイル: lzma.py プロジェクト: ct-clmsn/pysparkling
    def compress(self, stream):
        if lzma is None:
            return Codec.compress(self, stream)

        return BytesIO(lzma.compress(stream.read()))
コード例 #56
0
ファイル: serialize.py プロジェクト: feihoo87/QuLab
def packz(obj: Any) -> bytes:
    """
    Serialize and compress.
    """
    return lzma.compress(pack(obj), format=lzma.FORMAT_XZ)
コード例 #57
0
ファイル: models.py プロジェクト: samarnath/patchew
 def data(self, value):
     self._data = value
     self.data_xz = lzma.compress(value.encode("utf-8"))
コード例 #58
0
    default=None,
    help=
    'LZMA compression format mode: ALONE = 2, AUTO = 0, RAW = 3, XZ = 1 (default: %(default)s)'
)
parser.add_argument(
    "-check",
    type=int,
    default=0,
    help=
    'LZMA integrity check type NONE = 0, CRC32 = 1, CRC64 = 4, ID_MAX = 15, SHA256 = 10, UNKNOWN = 16  (default: %(default)s)'
)
parser.add_argument(
    "-preset",
    type=int,
    default=None,
    help=
    'LZMA compression level preset, an integer between 0 and 9. Also can be OR-ed with the constant preset EXTREME Constant 2147483648 (default: %(default)s)'
)
args = parser.parse_args()

#parser.add_argument("-filter", type=int, default=None, help='LZMA filter chain OR-ed together: ARM = 7, ARMTHUMB = 8, DELTA = 3, IA64 = 6, LZMA1 = 4611686018427387905, LZMA2 = 33, POWERPC = 5, SPARC = 9, X86 = 4 (default: %(default)s)')

str_object1 = open(args.input, 'rb').read()
if args.format is None:
    str_object2 = lzma.compress(str_object1, preset=args.preset)
else:
    str_object2 = lzma.compress(str_object1,
                                format=args.format,
                                preset=args.preset)
with open(args.output, 'wb') as f:
    f.write(str_object2)
コード例 #59
0
    def compress(data, ):
        compressed_object = lzma.compress(data)

        return compressed_object
コード例 #60
0
ファイル: make_release.py プロジェクト: newfyle/deluge
release_dir = 'dist/release-%s' % version
print('Creating release archive for ' + version)
call(
    'python setup.py --quiet egg_info --egg-base /tmp sdist --formats=tar --dist-dir=%s'
    % release_dir,
    shell=True,
)

# Compress release archive with xz
tar_path = os.path.join(release_dir, 'deluge-%s.tar' % version)
tarxz_path = tar_path + '.xz'
print('Compressing tar (%s) with xz' % tar_path)
if lzma:
    with open(tar_path, 'rb') as tar_file, open(tarxz_path, 'wb') as xz_file:
        xz_file.write(
            lzma.compress(bytes(tar_file.read()),
                          preset=9 | lzma.PRESET_EXTREME))
else:
    call(['xz', '-e9zkf', tar_path])

# Calculate shasum and add to sha256sums.txt
with open(tarxz_path, 'rb') as _file:
    sha256sum = '%s %s' % (
        sha256(_file.read()).hexdigest(),
        os.path.basename(tarxz_path),
    )
with open(os.path.join(release_dir, 'sha256sums.txt'), 'w') as _file:
    _file.write(sha256sum + '\n')

print('Complete: %s' % release_dir)