def _restoreBlobToDest(self, dest, blob, chmod=0,chownuid=0,chowngid=0,serialization=""):
        """
        Write blob to destination
        """
        check="##HASHLIST##"
        j.system.fs.createDir(j.system.fs.getDirName(dest))
        if blob.find(check)==0:
            # found hashlist
            # print "FOUND HASHLIST %s" % blob
            hashlist = blob[len(check) + 1:]            
            j.system.fs.writeFile(dest,"")
            for hashitem in hashlist.split("\n"):
                if hashitem.strip() != "":
                    key,serialization,blob_block = self.get(hashitem)
                    if serialization=="L":
                         blob_block= lzma.decompress(blob_block)
                    j.system.fs.writeFile(dest, blob_block, append=True)                        
        else:
            # content is there
            if serialization=="L":
                blob = lzma.decompress(blob)
            j.system.fs.writeFile(dest, blob)

        # chmod/chown
        if chmod<>0:
            os.chmod(dest,chmod)
        if chownuid<>0:
            os.chown(dest,chownuid,chowngid)       
Esempio n. 2
0
    def decompress(self, data):
        result = None
        description = None
        i = 0

        for header in self.headers:
            i += 1
            # The only acceptable exceptions are those indicating that the input data was truncated.
            try:
                final_data = binwalk.core.compat.str2bytes(header + data)
                lzma.decompress(final_data)
                result = self.parse_header(header)
                break
            except IOError as e:
                # The Python2 module gives this error on truncated input data.
                if str(e) == "unknown BUF error":
                    result = self.parse_header(header)
                    break
            except Exception as e:
                # The Python3 module gives this error on truncated input data.
                # The inconsistency between modules is a bit worrisome.
                if str(e) == "Compressed data ended before the end-of-stream marker was reached":
                    result = self.parse_header(header)
                    break

        if result is not None:
            self.properties = self.build_property(result.pb, result.lp, result.lc)
            description = "%s, properties: 0x%.2X [pb: %d, lp: %d, lc: %d], dictionary size: %d" % (self.DESCRIPTION,
                                                                                                   self.properties,
                                                                                                   result.pb,
                                                                                                   result.lp,
                                                                                                   result.lc,
                                                                                                   result.dictionary)

        return description
    def run_lzma(self):
        start = time()
        self.lzma_compressed = lzma.compress(self.raw_data)
        self.lzma_compress_time = time() - start

        start = time()
        lzma.decompress(self.lzma_compressed)
        self.lzma_decompress_time = time() - start
Esempio n. 4
0
    def fetch_file(self, time, symbol):
        if time.hour % 24 == 0: self.logger.info("Downloading... " + str(time))

        tick_path = self.tick_name.format(
                symbol = symbol,
                year = str(time.year).rjust(4, '0'),
                month = str(time.month).rjust(2, '0'),
                day = str(time.day).rjust(2, '0'),
                hour = str(time.hour).rjust(2, '0')
            )

        tick = self.fetch_tick(Constants().dukascopy_base_url + tick_path)

        if Constants().dukascopy_write_temp_tick_disk:
            out_path = Constants().temp_pythalesians_folder + "/dkticks/" + tick_path

            if not os.path.exists(out_path):
                if not os.path.exists(os.path.dirname(out_path)):
                    os.makedirs(os.path.dirname(out_path))

            self.write_tick(tick, out_path)

        try:
            return self.retrieve_df(lzma.decompress(tick), symbol, time)
        except:
            return None
Esempio n. 5
0
 def extract_cache_file(self, f):
     with open(f, 'rb') as ff:
         contents = ff.read()
         # First four bytes are garbage
         decompressed = lzma.decompress(contents[4:])
         # Last byte is \0
         return decompressed[:-1]
Esempio n. 6
0
    def make_index(self, package_list):
        doc = piksemel.newDocument("PISI")

        # since new PiSi (pisi 2) needs component info in index file, we need to copy it from original index that user specified
        indexpath = fetch_uri(self.base_uri, self.cache_dir, self.index_name, None, False)
        if indexpath.endswith(".bz2"):
            import bz2
            data = open(indexpath).read()
            data = bz2.decompress(data)
            doc_index = piksemel.parseString(data)
        elif indexpath.endswith(".xz"):
            try:
                import lzma
            except ImportError:
                print "Install python-pyliblzma package, or try a different index format."
                return

            data = open(indexpath).read()
            data = lzma.decompress(data)
            doc_index = piksemel.parseString(data)
        else:
            doc_index = piksemel.parse(indexpath)

        # old PiSi needs obsoletes list, so we need to copy it too.
        for comp_node in doc_index.tags("Distribution"):
            doc.insertNode(comp_node)

        for name in package_list:
            doc.insertNode(self.packages[name].node)

        for comp_node in doc_index.tags("Component"):
            doc.insertNode(comp_node)

        return doc.toPrettyString()
def result_to_properties(apps, schema_editor):
    # We can't import the models directly as they may be a newer
    # version than this migration expects. We use the historical version.
    Message = apps.get_model('api', 'Message')
    MessageProperty = apps.get_model('api', 'MessageProperty')
    MessageResult = apps.get_model('api', 'MessageResult')
    LogEntry = apps.get_model('api', 'LogEntry')
    messages = Message.objects.filter(results__name='git')
    for m in messages:
        r = MessageResult.objects.get(name='git', message=m)
        if not r:
            continue
        if r.status == api.models.Result.PENDING:
            set_property(MessageProperty, 'git.need-apply', True, message=m)
        else:
            log = lzma.decompress(r.log_entry.data_xz).decode("utf-8")
            set_property(MessageProperty, 'git.need-apply', False, message=m)
            set_property(MessageProperty, 'git.apply-log', log, message=m)
            if r.status == api.models.Result.FAILURE:
                set_property(MessageProperty, "git.apply-failed", True, message=m)
            else:
                set_property(MessageProperty, "git.apply-failed", False, message=m)
                if 'repo' in r.data:
                    set_property(MessageProperty, "git.repo", r.data['repo'], message=m)
                if 'tag' in r.data:
                    set_property(MessageProperty, "git.tag", r.data['repo'][len('refs/tags/'):], message=m)
                if 'url' in r.data:
                    set_property(MessageProperty, "git.url", r.data['url'], message=m)
                if 'base' in r.data:
                    set_property(MessageProperty, "git.base", r.data['base'], message=m)
    MessageResult.objects.filter(message=m, name='git').delete()
Esempio n. 8
0
    def update_element(self, element):
        """
        Extract some informations from element to prepare the repository
        :param element: Element to add to the repository
        :return: Unicode string containing meta-data

        ar -x control.tar.gz
        tar -xf control.tar.gz control
        """
        archive_file = storage(settings.STORAGE_ARCHIVE).get_file(element.archive_key)
        ar_file = ArFile(element.filename, mode='r', fileobj=archive_file)
        control_file, control_file_name = self.get_subfile(ar_file, 'control.tar.')
        if control_file is None:
            raise InvalidRepositoryException('No control file found in .deb package')
        mode = 'r:*'
        if control_file_name.endswith('.xz') or control_file_name.endswith('.lzma'):
            control_file_content = control_file.read()
            control_file_content_uncompressed = lzma.decompress(control_file_content)
            control_file.close()
            control_file = io.BytesIO(control_file_content_uncompressed)
            mode = 'r'
        tar_file = tarfile.open(name='control', mode=mode, fileobj=control_file)
        control_data = tar_file.extractfile('./control')
        # poulating different informations on the element
        control_data_value = control_data.read().decode('utf-8')
        control_data.close()
        tar_file.close()
        ar_file.close()
        archive_file.close()
        element.extra_data = control_data_value
        parsed_data = parse_control_data(control_data_value)
        element.archive = parsed_data['Package']
        element.version = parsed_data['Version']
        element.official_link = parsed_data.get('Homepage', '')
        element.long_description = parsed_data.get('Description', '')
Esempio n. 9
0
 def file_list(self, element, uid):
     cache_filename = 'filelist_%s' % element.sha256
     key = storage(settings.STORAGE_CACHE).uid_to_key(uid)
     fileobj = storage(settings.STORAGE_CACHE).get_file(key, cache_filename)
     if fileobj is None:
         tmpfile = tempfile.NamedTemporaryFile(dir=settings.FILE_UPLOAD_TEMP_DIR)
         archive_file = storage(settings.STORAGE_ARCHIVE).get_file(element.archive_key, sub_path='')
         ar_file = ArFile(element.filename, mode='r', fileobj=archive_file)
         data_file, data_file_name = self.get_subfile(ar_file, 'data.tar.')
         mode = 'r:*'
         if data_file_name.endswith('.xz') or data_file_name.endswith('.lzma'):
             data_file_content = data_file.read()
             data_file_content_uncompressed = lzma.decompress(data_file_content)
             data_file.close()
             data_file = io.BytesIO(data_file_content_uncompressed)
             mode = 'r'
         tar_file = tarfile.open(name='data', mode=mode, fileobj=data_file)
         members = tar_file.getmembers()
         members = filter(lambda x: x.isfile(), members)
         names = [x.path[2:] for x in members]
         tar_file.close()
         ar_file.close()
         archive_file.close()
         for name in names:
             tmpfile.write(('%s\n' % name).encode('utf-8'))
         tmpfile.flush()
         tmpfile.seek(0)
         storage(settings.STORAGE_CACHE).store_descriptor(uid, cache_filename, tmpfile)
         tmpfile.close()
     else:
         names = [line.strip().decode() for line in fileobj]
         fileobj.close()
     return names
Esempio n. 10
0
def lzma_compress(inputfile, level, decompress):
    """
    (str,int,bool) -> CompressionData
    
    Compresses one file using the python implementation of lzma.
    
    NOTE: The lzma module was created for python3, the backported version for 
    python2.7, does not have a level parameter, a decision was made to keep this
    code backwards compatible so the level parameter is never used. The 
    default the level being used is 6.
     """

    original_size = int(os.stat(inputfile).st_size)
    with open(inputfile, "rU") as fdorig:
        origlines = fdorig.read()
    origtext = memoryview(bytearray(origlines, "utf8"))
    compressedtext = memoryview(lzma.compress(origtext.tobytes()))
    compressed_size = len(compressedtext)

    decompress_time = None
    if decompress:
        decompress_time = min(timeit.repeat(lambda: lzma.decompress(compressedtext.tobytes()),
                                            number=10,
                                            repeat=3, timer=time.clock))

    cd = CompressionData(original_size, compressed_size, decompress_time)

    return cd
Esempio n. 11
0
 def read(self, all_tags=False, self_provides=True, *extra_tags):
     arfile = ar.Ar(fh = self.__file)
     arfile.read()
     debbin = arfile.get_file('debian-binary')
     if debbin is None:
         raise DebError(self.__path, 'no debian binary')
     if debbin.read() != '2.0\n':
         raise DebError(self.__path, 'invalid debian binary format')
     control = arfile.get_file('control.tar.gz')
     if control is not None:
         # XXX: python2.4 relies on a name
         tar = tarfile.open(name='control.tar.gz', fileobj=control)
     else:
         control = arfile.get_file('control.tar.xz')
         if control is None:
             raise DebError(self.__path, 'missing control.tar')
         if not HAVE_LZMA:
             raise DebError(self.__path, 'can\'t open control.tar.xz without python-lzma')
         decompressed = lzma.decompress(control.read())
         tar = tarfile.open(name="control.tar.xz",
                            fileobj=StringIO.StringIO(decompressed))
     try:
         name = './control'
         # workaround for python2.4's tarfile module
         if 'control' in tar.getnames():
             name = 'control'
         control = tar.extractfile(name)
     except KeyError:
         raise DebError(self.__path,
                        'missing \'control\' file in control.tar')
     self.__parse_control(control, all_tags, self_provides, *extra_tags)
     return self
Esempio n. 12
0
def setupRepoIndex():
    target = os.path.join(ctx.consts.target_dir, "var/lib/pisi/index/%s" % ctx.consts.pardus_repo_name)

    if os.path.exists(ctx.consts.pisi_index_file):
        # Copy package index
        shutil.copy(ctx.consts.pisi_index_file, target)
        shutil.copy(ctx.consts.pisi_index_file_sum, target)

        # Extract the index
        pureIndex = file(os.path.join(target,"pisi-index.xml"),"w")
        if ctx.consts.pisi_index_file.endswith("bz2"):
            import bz2
            pureIndex.write(bz2.decompress(open(ctx.consts.pisi_index_file).read()))
        else:
            import lzma
            pureIndex.write(lzma.decompress(open(ctx.consts.pisi_index_file).read()))
        pureIndex.close()

        ctx.logger.debug("pisi index files copied.")
    else:
        ctx.logger.debug("pisi index file not found!")

    ctx.logger.debug("Regenerating pisi caches.. ")
    yali.pisiiface.regenerateCaches()
    return True
 def do_result_to_properties(self):
     for po in self.Project.objects.all():
         for obj in self.get_objects_for_project(po):
             by_status = defaultdict(lambda: 0)
             start_time = datetime.datetime.utcnow()
             for r in obj.results.filter(name__startswith='testing.'):
                 by_status[r.status] += 1
                 if r.status in (api.models.Result.SUCCESS, api.models.Result.FAILURE):
                     tn = r.name[len('testing.'):]
                     report = copy(r.data)
                     report['passed'] = (r.status == api.models.Result.SUCCESS)
                     self.set_property(obj, "testing.report." + tn, report)
                     if r.log_entry:
                         log = lzma.decompress(r.log_entry.data_xz).decode("utf-8")
                         self.set_property(obj, "testing.log." + tn, log)
                 else:
                     started = started or r.status == api.models.Result.RUNNING
                     if r.last_update < start_time:
                         start_time = r.last_update
             #print(obj, dict(by_status))
             if by_status[api.models.Result.FAILURE]:
                 self.set_property(obj, "testing.failed", True)
             if by_status[api.models.Result.RUNNING]:
                 self.set_property(obj, "testing.started", True)
                 self.set_property(obj, "testing.start-time", d.timestamp())
             if by_status[api.models.Result.RUNNING] + by_status[api.models.Result.PENDING]:
                 self.set_property(obj, "testing.ready", 1)
             else:
                 self.set_property(obj, "testing.done", True)
             obj.results.filter(name__startswith='testing.').delete()
Esempio n. 14
0
def ReadCompressedClusters(volume):
    status = True
    error = ''

    try:
        if (debug >= 1):
            print('Entering ReadCompressedClusters:')
        if (debug >= 2):
            print('\tVolume passed in: ' + str(volume))

        with open(volume, "rb") as r:
            if debug >= 1:
                print('\tOpening Volume: ' + str(volume))
                print('\tSeeking to : ' + str(FSInfoSector * BytesPerSector + 4))
            temp = FSInfoSector * BytesPerSector + 4
            temp1 = 1536
            #if (debug >= 2):

            r.seek(temp1)
            length = struct.unpack("I", r.read(4))[0]  #Get length of LZMA compressed string
            if (debug >= 2):
                print('\tLength of compressed data: ' + str(length))
            r.seek(temp1 + 4)
            lzmadata = r.read(length)
            if (debug >= 3):
                print('\tCompressed data: ' + str(lzmadata))
            data = lzma.decompress(bytes(lzmadata))
            if debug >= 2:
                print('\tUncompressed Data: ' + str(data))
    except:
        error = 'Error: Cannot Read Compressed Cluster List.'
        status = False
    finally:
        return status, error, data
Esempio n. 15
0
def get_control_data(filename):
    """ Extract control data from a `.deb` file
    A `.deb` is an `.ar` file that contains `control.tar.XXX`, that contains a `control` file.

    :param filename: complete filepath of a `.deb` file
    :type filename: :class:`str`
    :return:
    :rtype: :class:`dict`
    """
    # open the four files (.deb, .ar, control.tar.XXX, control)
    deb_file = open(filename, mode="rb")
    ar_file = ArFile(filename, mode="r", fileobj=deb_file)
    control_file, control_file_name = get_subfile(ar_file, "^control\.tar\..*$")
    mode = "r:*"
    if control_file_name.endswith(".xz") or control_file_name.endswith(".lzma"):
        # special case when lzma is used from backport (Python 3.2, 2.7)
        control_file_content = control_file.read()
        control_file_content_uncompressed = lzma.decompress(control_file_content)
        control_file.close()
        control_file = io.BytesIO(control_file_content_uncompressed)
        mode = "r"
    tar_file = tarfile.open(name="control", mode=mode, fileobj=control_file)
    control_data = tar_file.extractfile("./control")
    # we got the data!
    control_data_value = control_data.read().decode("utf-8")
    # ok, we close the previous file descriptors
    control_data.close()
    tar_file.close()
    ar_file.close()
    deb_file.close()
    # here we are
    parsed_data = parse_control_data(control_data_value)
    return parsed_data
Esempio n. 16
0
def xz_decompress(data):
    if HAS_LZMA_MODULE:
        return lzma.decompress(data)
    unxz = subprocess.Popen(['unxz'],
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE)
    data = unxz.communicate(input=data)[0]
    return data
Esempio n. 17
0
def unxz(contents):
    """ unxz contents in memory and return the data
    """
    try:
        xzdata = lzma.decompress(contents)
        return xzdata
    except lzma.LZMAError as e:
        error_message.send(sender=None, text='lzma: ' + e)
Esempio n. 18
0
    def is_valid_lzma(self, data):
        valid = True

        # The only acceptable exceptions are those indicating that the input data was truncated.
        try:
            lzma.decompress(binwalk.core.compat.str2bytes(data))
        except IOError as e:
            # The Python2 module gives this error on truncated input data.
            if str(e) != "unknown BUF error":
                valid = False
        except Exception as e:
            # The Python3 module gives this error on truncated input data.
            # The inconsistency between modules is a bit worrisome.
            if str(e) != "Compressed data ended before the end-of-stream marker was reached":
                valid = False

        return valid
Esempio n. 19
0
    def _handle_end(self, hostname, action, msg):  # pylint: disable=unused-argument
        try:
            job_id = int(msg[2])
            error_msg = msg[3]
            compressed_description = msg[4]
        except (IndexError, ValueError):
            self.logger.error("Invalid message from <%s> '%s'", hostname, msg)
            return

        try:
            job = TestJob.objects.get(id=job_id)
        except TestJob.DoesNotExist:
            self.logger.error("[%d] Unknown job", job_id)
            # ACK even if the job is unknown to let the dispatcher
            # forget about it
            send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
            return

        filename = os.path.join(job.output_dir, 'description.yaml')
        # If description.yaml already exists: a END was already received
        if os.path.exists(filename):
            self.logger.info("[%d] %s => END (duplicated), skipping", job_id, hostname)
        else:
            if compressed_description:
                self.logger.info("[%d] %s => END", job_id, hostname)
            else:
                self.logger.info("[%d] %s => END (lava-run crashed, mark job as INCOMPLETE)",
                                 job_id, hostname)
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)

                    job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
                    if error_msg:
                        self.logger.error("[%d] Error: %s", job_id, error_msg)
                        job.failure_comment = error_msg
                    job.save()

            # Create description.yaml even if it's empty
            # Allows to know when END messages are duplicated
            try:
                # Create the directory if it was not already created
                mkdir(os.path.dirname(filename))
                # TODO: check that compressed_description is not ""
                description = lzma.decompress(compressed_description)
                with open(filename, 'w') as f_description:
                    f_description.write(description.decode("utf-8"))
                if description:
                    parse_job_description(job)
            except (IOError, lzma.LZMAError) as exc:
                self.logger.error("[%d] Unable to dump 'description.yaml'",
                                  job_id)
                self.logger.exception("[%d] %s", job_id, exc)

        # ACK the job and mark the dispatcher as alive
        send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
        self.dispatcher_alive(hostname)
Esempio n. 20
0
def decodeBody(fileData):
	sign, version, dataSize = decodeHead(fileData)
	if sign == b"CWS":
		return zlib.decompress(fileData[8:])
	if sign == b"FWS":
		return fileData[8:]
	if sign == b"ZWS":
		rawData = fileData[12:17] + struct.pack("2I",0xFFFFFFFF,0xFFFFFFFF) + fileData[17:]
		return lzma.decompress(rawData, lzma.FORMAT_ALONE)
Esempio n. 21
0
    def test_lclp(self):
        for lcb in xrange(lzma.options.lc[0], lzma.options.lc[1]+1):
		for lpb in xrange(lzma.options.lc[1]-lcb):
			result = lzma.compress(self.data, options={'lc':lcb, 'lp':lpb})
			self.assertEqual(self.data, lzma.decompress(result))
        self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lc':lzma.options.lc[0]-1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lc':lzma.options.lc[1]+1})
        self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lp':lzma.options.lp[0]-1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'lp':lzma.options.lp[1]+1})
Esempio n. 22
0
 def testWrite(self):
     # "Test lzma.LZMAFile.write()"
     lzmaf = lzma.LZMAFile(self.filename, "w")
     self.assertRaises(TypeError, lzmaf.write)
     lzmaf.write(self.TEXT)
     lzmaf.close()
     f = open(self.filename, 'rb')
     self.assertEqual(lzma.decompress(f.read()), self.TEXT)
     f.close()
Esempio n. 23
0
def ExtractLZMA( compressed_data, destination ):
  uncompressed_data = BytesIO( lzma.decompress( compressed_data ) )

  with tarfile.TarFile( fileobj=uncompressed_data, mode='r' ) as tar_file:
    a_member = tar_file.getmembers()[ 0 ]
    tar_file.extractall( destination )

  # Determine the directory name
  return os.path.join( destination, a_member.name.split( '/' )[ 0 ] )
Esempio n. 24
0
    def test_dict_size(self):
        dict = lzma.options.dict_size[0]
        while dict <= 1<<26: # lzma.options.dict_size[1]: Since using very large dictionaries requires
                             # very large amount of memory, let's not go beyond 64mb for testing..
            result = lzma.compress(self.data, options={'dict_size':dict})
            self.assertEqual(self.data, lzma.decompress(result))
            dict = dict * 2
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'dict_size':lzma.options.dict_size[1]+1})
	self.failUnlessRaises(ValueError, lzma.compress, self.data, options={'dict_size':lzma.options.dict_size[0]-1})
Esempio n. 25
0
 def parse_play_data(self, replay_data):
     offset_end = self.offset+self.__replay_length
     if self.game_mode != GameMode.Standard:
         self.play_data = None
     else:
         datastring = lzma.decompress(replay_data[self.offset:offset_end], format=lzma.FORMAT_AUTO).decode('ascii')[:-1]
         events = [eventstring.split('|') for eventstring in datastring.split(',')]
         self.play_data = [ReplayEvent(int(event[0]), float(event[1]), float(event[2]), int(event[3])) for event in events]
     self.offset = offset_end
Esempio n. 26
0
    def next(self):
        """ read and decompress next chunk """
        chunkhdr = self.fh.read(16)
        if len(chunkhdr) == 0:
            return
        fullsize, compsize = struct.unpack(">QQ", chunkhdr)

        xzdata = self.fh.read(compsize)
        return lzma.decompress(xzdata)
Esempio n. 27
0
def piksemelize(xml_path):
    """
        Uncompress and parse the given index file.
        return Piksemel Object.
    """

    if xml_path.endswith("bz2"):
        return piksemel.parseString(bz2.decompress(file(xml_path).read()))
    return piksemel.parseString(lzma.decompress(file(xml_path).read()))
Esempio n. 28
0
def decompress(msg, method):
    """Shortcut method for decompression"""
    if method == flags.gzip:
        return zlib.decompress(msg, zlib.MAX_WBITS | 32)
    elif method == flags.bz2:
        return bz2.decompress(msg)
    elif method == flags.lzma:
        return lzma.decompress(msg)
    else:
        raise Exception('Unknown decompression method')
Esempio n. 29
0
 def _lzma(self):
     '''LZMA processor'''
     try:
         archive = lzma.decompress(self.cur_attachment.file_obj.read())
         new_fn, ext = os.path.splitext(self.cur_attachment.orig_filename)
         cur_file = File(archive, new_fn)
         self.process_payload(cur_file)
     except:
         self.cur_attachment.make_dangerous()
     return self.cur_attachment
Esempio n. 30
0
def file_write(file, data):
    try:
        fd = open(file, "wb")
        aux_data = lzma.decompress(data)
        fd.write(aux_data)
        fd.close()
        return True
    except Exception:
        return False
    return True
Esempio n. 31
0
 def _read_block_header(self):
     self._block_count = self.raw_decoder.read_long()
     if self.codec == NULL_CODEC:
         # Skip a long; we don't need to use the length.
         self.raw_decoder.skip_long()
         self._datum_decoder = self._raw_decoder
     elif self.codec == DEFLATE_CODEC:
         # Compressed data is stored as (length, data), which
         # corresponds to how the "bytes" type is encoded.
         data = self.raw_decoder.read_bytes()
         # -15 is the log of the window size; negative indicates
         # "raw" (no zlib headers) decompression.  See zlib.h.
         uncompressed = zlib.decompress(data, -15)
         self._datum_decoder = avro_io.BinaryDecoder(
             io.BytesIO(uncompressed))
     elif self.codec == BZIP2_CODEC:
         length = self.raw_decoder.read_long()
         data = self.raw_decoder.read(length)
         uncompressed = bz2.decompress(data)
         self._datum_decoder = avro_io.BinaryDecoder(
             io.BytesIO(uncompressed))
     elif self.codec == SNAPPY_CODEC:
         # Compressed data includes a 4-byte CRC32 checksum
         length = self.raw_decoder.read_long()
         data = self.raw_decoder.read(length - 4)
         uncompressed = snappy.decompress(data)
         self._datum_decoder = avro_io.BinaryDecoder(
             io.BytesIO(uncompressed))
         self.raw_decoder.check_crc32(uncompressed)
     elif self.codec == XZ_CODEC:
         length = self.raw_decoder.read_long()
         data = self.raw_decoder.read(length)
         uncompressed = lzma.decompress(data)
         self._datum_decoder = avro_io.BinaryDecoder(
             io.BytesIO(uncompressed))
     elif self.codec == ZSTANDARD_CODEC:
         length = self.raw_decoder.read_long()
         data = self.raw_decoder.read(length)
         uncompressed = bytearray()
         dctx = zstd.ZstdDecompressor()
         with dctx.stream_reader(io.BytesIO(data)) as reader:
             while True:
                 chunk = reader.read(16384)
                 if not chunk:
                     break
                 uncompressed.extend(chunk)
         self._datum_decoder = avro_io.BinaryDecoder(
             io.BytesIO(uncompressed))
     else:
         raise DataFileException("Unknown codec: %r" % self.codec)
Esempio n. 32
0
    def get_data(self, uid):
        start = self.entries[uid].offset
        end = start + self.entries[uid].size

        data = self.data[start:end]

        try:
            data = lzma.decompress(data)
        except KeyboardInterrupt as e:
            raise e
        except Exception as e:
            pass

        return data
Esempio n. 33
0
async def analyze_score(score: 'Score') -> None:
    """Analyze a single score."""
    player = score.player

    # open & parse replay files frames
    replay_file = REPLAYS_PATH / f'{score.id}.osr'
    data = lzma.decompress(replay_file.read_bytes())

    frames: list[ReplayFrame] = []

    # ignore seed & blank line at end
    for action in data.decode().split(',')[:-2]:
        if frame := ReplayFrame.from_str(action):
            frames.append(frame)
Esempio n. 34
0
def download_list(url='http://verteiler5.mediathekview.de/Filmliste-akt.xz',
                  path='filme.json',
                  tmp_path='filme.xz'):

    print('download {}'.format(url))

    with urllib.request.urlopen(url) as response, \
        open(tmp_path, 'wb') as tmp_fp, \
        open(path, 'wb') as fp:

        data = response.read()
        tmp_fp.write(data)
        data = lzma.decompress(data)
        fp.write(data)
Esempio n. 35
0
def decode_qr_code_data(encoded: bytes) -> str:
    try:
        compressed_bytes = b32decode(encoded)
        try:
            decompressed_bytes = decompress(compressed_bytes)
            try:
                data = decompressed_bytes.decode('utf-8')
                return data
            except UnicodeError:
                raise InvalidSignatureRequest("Not valid UTF-8")
        except LZMAError:
            raise InvalidSignatureRequest("Not LZMA compressed")
    except (TypeError, Base32DecodeError):
        raise InvalidSignatureRequest("Not Base32")
Esempio n. 36
0
    def decompress(self, data):
        result = None
        description = None
        i = 0

        for header in self.headers:
            i += 1
            # The only acceptable exceptions are those indicating that the
            # input data was truncated.
            try:
                final_data = binwalk.core.compat.str2bytes(header + data)
                lzma.decompress(final_data)
                result = self.parse_header(header)
                break
            except IOError as e:
                # The Python2 module gives this error on truncated input data.
                if str(e) == "unknown BUF error":
                    result = self.parse_header(header)
                    break
            except Exception as e:
                # The Python3 module gives this error on truncated input data.
                # The inconsistency between modules is a bit worrisome.
                if str(e) == "Compressed data ended before the end-of-stream marker was reached":
                    result = self.parse_header(header)
                    break

        if result is not None:
            self.properties = self.build_property(
                result.pb, result.lp, result.lc)
            description = "%s, properties: 0x%.2X [pb: %d, lp: %d, lc: %d], dictionary size: %d" % (self.DESCRIPTION,
                                                                                                    self.properties,
                                                                                                    result.pb,
                                                                                                    result.lp,
                                                                                                    result.lc,
                                                                                                    result.dictionary)

        return description
Esempio n. 37
0
def separate(lzma_stream):
    """
    Separates the lzma stream of frames into separate lists of x, y, z and w.

    Args:
        String lzma_stream: The lzma to separate.

    Returns:
        The lists of x, y, z, w.
    """
    text = lzma.decompress(lzma_stream).decode('UTF-8')

    xs = []
    ys = []
    zs = []
    ws = []

    for frame in text.split(','):
        if not frame:
            continue
        w, x, y, z = frame.split('|')
        w = int(w)
        x = float(x)
        y = float(y)
        z = int(z)

        #Everything we need from Z is in the first byte
        z = z & 0xFF

        #To fit x and y into shorts, they can be scaled to retain more precision.
        x = int(round(x * 16))
        y = int(round(y * 16))

        #Prevent the coordinates from being too large for a short. If this happens, the cursor is way offscreen anyway.
        if x <= -0x8000: x = -0x8000
        elif x >= 0x7FFF: x = 0x7FFF
        if y <= -0x8000: y = -0x8000
        elif y >= 0x7FFF: y = 0x7FFF

        #w: signed 24bit integer
        #x: signed short
        #y: signed short
        #z: unsigned char
        xs.append(x)
        ys.append(y)
        zs.append(z)
        ws.append(w)

    return xs, ys, zs, ws
def result_to_properties(apps, schema_editor):
    # We can't import the models directly as they may be a newer
    # version than this migration expects. We use the historical version.
    Message = apps.get_model('api', 'Message')
    MessageProperty = apps.get_model('api', 'MessageProperty')
    MessageResult = apps.get_model('api', 'MessageResult')
    LogEntry = apps.get_model('api', 'LogEntry')
    messages = Message.objects.filter(results__name='git')
    for m in messages:
        r = MessageResult.objects.get(name='git', message=m)
        if not r:
            continue
        if r.status == api.models.Result.PENDING:
            set_property(MessageProperty, 'git.need-apply', True, message=m)
        else:
            log = lzma.decompress(r.log_entry.data_xz).decode("utf-8")
            set_property(MessageProperty, 'git.need-apply', False, message=m)
            set_property(MessageProperty, 'git.apply-log', log, message=m)
            if r.status == api.models.Result.FAILURE:
                set_property(MessageProperty,
                             "git.apply-failed",
                             True,
                             message=m)
            else:
                set_property(MessageProperty,
                             "git.apply-failed",
                             False,
                             message=m)
                if 'repo' in r.data:
                    set_property(MessageProperty,
                                 "git.repo",
                                 r.data['repo'],
                                 message=m)
                if 'tag' in r.data:
                    set_property(MessageProperty,
                                 "git.tag",
                                 r.data['repo'][len('refs/tags/'):],
                                 message=m)
                if 'url' in r.data:
                    set_property(MessageProperty,
                                 "git.url",
                                 r.data['url'],
                                 message=m)
                if 'base' in r.data:
                    set_property(MessageProperty,
                                 "git.base",
                                 r.data['base'],
                                 message=m)
    MessageResult.objects.filter(message=m, name='git').delete()
Esempio n. 39
0
def create_rom_bytes(
        patch_file: str,
        ignore_version: bool = False) -> Tuple[dict, str, bytearray]:
    data = Utils.parse_yaml(
        lzma.decompress(load_bytes(patch_file)).decode("utf-8-sig"))
    if not ignore_version and data[
            "compatible_version"] > current_patch_version:
        raise RuntimeError(
            "Patch file is incompatible with this patcher, likely an update is required."
        )
    patched_data = bsdiff4.patch(get_base_rom_bytes(), data["patch"])
    rom_hash = patched_data[int(0x7FC0):int(0x7FD5)]
    data["meta"]["hash"] = "".join(chr(x) for x in rom_hash)
    target = os.path.splitext(patch_file)[0] + ".sfc"
    return data["meta"], target, patched_data
Esempio n. 40
0
def decompress_rev_1(data, version=None):
    """Decompress from file."""
    start = time.time()

    if version == Version.AOK:
        prefix_size = 4
        header_len, = struct.unpack('<I', data.read(prefix_size))
    else:
        prefix_size = 8
        header_len, next_header = struct.unpack('<II', data.read(prefix_size))

    lzma_header = data.read(header_len - prefix_size)
    header = lzma.decompress(lzma_header)
    zlib_header = zlib.compress(header)[2:]

    body = lzma.decompress(data.read())

    LOGGER.info("decompressed in %.2f seconds", time.time() - start)
    if version == Version.AOK:
        prefix = struct.pack('<I', len(zlib_header) + prefix_size)
    else:
        prefix = struct.pack('<II',
                             len(zlib_header) + prefix_size, next_header)
    return prefix + zlib_header + body
Esempio n. 41
0
    def _writeBlob(self, dest, blob, item, namespace):
        """
        Write blob to destination
        """

        check = "##HASHLIST##"
        if blob.find(check) == 0:
            # found hashlist
            print "FOUND HASHLIST %s" % blob
            hashlist = blob[len(check) + 1:]
            j.system.fs.writeFile(dest, "")
            for hashitem in hashlist.split("\n"):
                if hashitem.strip() != "":
                    blob_block = self.client.get(namespace, hashitem)
                    data = lzma.decompress(blob_block)
                    j.system.fs.writeFile(dest, data, append=True)
        else:
            # content is there
            data = lzma.decompress(blob)
            j.system.fs.writeFile(dest, data)

        # chmod/chown
        os.chmod(dest, int(item.mode))
        os.chown(dest, int(item.uid), int(item.gid))
Esempio n. 42
0
def extract_update_bundle(bundle):
    past_update_dir = paths.data_path("update")
    if past_update_dir.exists():
        log.info("Found previous update data. Removing...")
        shutil.rmtree(past_update_dir, ignore_errors=True)
    log.debug("Extracting update bundle")
    bundle.seek(0)
    extraction_dir = paths.data_path("update", "extracted")
    if extraction_dir.exists():
        shutil.rmtree(extraction_dir)
    extraction_dir.mkdir(parents=True, exist_ok=True)
    archive_file = BytesIO(decompress(bundle.read()))
    with zipfile.ZipFile(archive_file) as archive:
        archive.extractall(extraction_dir)
    return extraction_dir
Esempio n. 43
0
def main():
    word_dict = {}
    with open('compressed.xz', 'rb') as file:
        decompressed = lzma.decompress(file.read()).decode('utf-8')
        for line in decompressed.splitlines():
            count, words = line.split(':')
            count = int(count, 16)
            words = words.split(',')
            for i in words:
                word_dict[i] = count
    while True:
        string = input('String>>>')
        not_in = input('Letters not in>>>>')
        print('\n'.join('%s %s' % i
                        for i in suggest_next(word_dict, string, not_in)))
Esempio n. 44
0
	def parse_play_data(self, replay_data: bytes):
		frames = []
		lzma_len = self.read_int() # aka self.__replay_length
		lzma_raw = lzma.decompress(self.read_byte(lzma_len)).decode('ascii')[:-1]
		events = [event_raw.split('|') for event_raw in lzma_raw.split(',')]

		self.play_data = [
						ReplayEvent(
							int(event[0]),
							float(event[1]),
							float(event[2]),
							int(event[3])
							)
						for event in events
						]
Esempio n. 45
0
    def decompress(self, data):
        """
        Decompress a datablock.

        Not all algorithms are supported yet.
        """
        if self.compression == XZ_COMPRESSION:
            return lzma.decompress(data)
        elif self.compression == ZLIB_COMPRESSION:
            return zlib.decompress(data)
        elif self.compression == LZMA_COMPRESSION:
            if data[:1] == b'\x5d':
                return lzma.decompress(data[:5] + b'\xff' * 8 + data[13:])
            else:  # \x6d
                return lzma.decompress(data[:5] + b'\xff' * 8 + data[5:])
        elif self.compression == LZO_COMPRESSION and lzo:
            return lzo.decompress(data, False, self.block_size)
        elif self.compression == LZ4_COMPRESSION and lz4:
            return lz4.block.decompress(data,
                                        uncompressed_size=self.block_size)
        elif self.compression == ZSTD_COMPRESSION and zstd:
            return zstd.decompress(data)

        raise Exception("Compression type %d not supported" % self.compression)
Esempio n. 46
0
 def test_nice_len(self):
     for nl in xrange(lzma.options.nice_len[0],
                      lzma.options.nice_len[1] + 1):
         result = lzma.compress(self.data, options={'nice_len': nl})
         self.assertEqual(self.data, lzma.decompress(result))
     self.failUnlessRaises(
         ValueError,
         lzma.compress,
         self.data,
         options={'nice_len': lzma.options.nice_len[1] + 1})
     self.failUnlessRaises(
         ValueError,
         lzma.compress,
         self.data,
         options={'nice_len': lzma.options.nice_len[0] - 1})
Esempio n. 47
0
        def decode(self, buf, out=None):

            # setup filters
            if self.format == lzma.FORMAT_RAW:
                # filters needed
                filters = self.filters
            else:
                # filters should not be specified
                filters = None

            # do decompression
            dec = lzma.decompress(buf, format=self.format, filters=filters)

            # handle destination
            return _buffer_copy(dec, out)
Esempio n. 48
0
def main():
    f = open("2012/11/03/01h_ticks.bi5", "r")
    data = lzma.decompress(f.read())

    Tick = namedtuple('Tick', 'time ask bid askv bidv')
    epoch = datetime(2012, 11, 3, 1)

    def row_data(row):
        row_data = Tick._asdict(Tick._make(struct.unpack(">LLLff", row)))
        row_data['time'] = (epoch + timedelta(0, 0, 0, row_data['time']))
        return row_data

    mapped_data = map(lambda row: row_data(row), chunks(data, 20))

    print mapped_data[-1]
Esempio n. 49
0
 def get_string(self):
     """
     Returns the string representing the output
     """
     if self.compression is CompressionEnum.none:
         return self.data.decode()
     elif self.compression is CompressionEnum.gzip:
         return gzip.decompress(self.data).decode()
     elif self.compression is CompressionEnum.bzip2:
         return bz2.decompress(self.data).decode()
     elif self.compression is CompressionEnum.lzma:
         return lzma.decompress(self.data).decode()
     else:
         # Shouldn't ever happen, unless we change CompressionEnum but not the rest of this function
         raise TypeError("Unknown compression type??")
Esempio n. 50
0
def load_ffmpeg_bin():

    os_name = system().lower()

    # Load OS specific ffmpeg executable
    cwd = os.path.dirname(__file__)
    if os_name == 'windows':
        path_to_ffmpeg = os.path.join(cwd,
        '.', 'static', 'bin', 'win32')
        ffmpeg_file = os.path.join(path_to_ffmpeg,
                                            'ffmpeg.exe')
    elif os_name == 'linux':
        path_to_ffmpeg = os.path.join(cwd,
        './static/bin/linux')
        ffmpeg_file = path_to_ffmpeg + '/ffmpeg'
    elif os_name == 'darwin':
        path_to_ffmpeg = os.path.join(cwd,
        './static/bin/darwin')
        ffmpeg_file = path_to_ffmpeg + '/ffmpeg'

    if not os.path.exists(ffmpeg_file):

        # load os specific ffmpeg bin data
        if os_name == 'windows':
            from pyffmpeg.static.bin.win32 import win32
            b64 = win32.contents
        elif os_name == 'linux':
            from pyffmpeg.static.bin.linux import linux
            b64 = linux.contents
        else:
            from pyffmpeg.static.bin.darwin import darwin
            b64 = darwin.contents

        raw = b64decode(b64)
        decompressed = decompress(raw)
        # Create the folders
        if not os.path.exists(path_to_ffmpeg):
            os.makedirs(path_to_ffmpeg)
        # Finally create the ffmpeg file
        with open(ffmpeg_file, 'wb') as f_file:
            f_file.write(decompressed)

        # Write path to file
        with open('FFMBIN.PYF', 'w') as pyf:
            conts = str(b64encode(bytes(ffmpeg_file, 'utf-8')))[2:-1]
            pyf.write(conts)

    return ffmpeg_file
Esempio n. 51
0
    def parse_index(self, console=None, update_repo=False):
        path = fetch_uri(self.base_uri, self.cache_dir, self.index_name,
                         console, update_repo)
        if path.endswith(".bz2"):
            import bz2
            data = open(path).read()
            data = bz2.decompress(data)
            doc = piksemel.parseString(data)
        elif path.endswith(".xz"):
            try:
                import lzma
            except ImportError:
                print "Install python-pyliblzma package, or try a different index format."
                return

            data = open(path).read()
            data = lzma.decompress(data)
            doc = piksemel.parseString(data)
        else:
            doc = piksemel.parse(path)
        for tag in doc.tags('Package'):
            p = Package(tag)
            self.packages[p.name] = p
            self.size += p.size
            self.inst_size += p.inst_size
            if p.component not in self.components:
                self.components[p.component] = []
        for name in self.packages:
            p = self.packages[name]
            for name2 in p.depends:
                if self.packages.has_key(name2):
                    self.packages[name2].revdeps.append(p.name)
                else:
                    raise ExPackageMissing, (p.name, name2)
            if p.component in self.components:
                self.components[p.component].append(p.name)
            else:
                self.components[p.component] = []
        from pisi.graph import Digraph, CycleException
        dep_graph = Digraph()
        for name in self.packages:
            p = self.packages[name]
            for dep in p.depends:
                dep_graph.add_edge(name, dep)
        try:
            dep_graph.dfs()
        except CycleException, c:
            raise ExPackageCycle, (c.cycle)
Esempio n. 52
0
    def getfile(self, hash):
        # print("getfile: hash: ", hash)
        uri = self.swarmserver + "File"
        payload = {"AgentName": socket.gethostname(), "Hash": hash}
        try:
            r = requests.post(uri, json=payload)
            # print("getfile: resp: ", r.status_code, r.text)
            if (r.status_code != requests.codes.ok):
                self.isconnected = False

            jsonresp = {}
            # self.scriptlist
            jsonresp = json.loads(r.text)
            # print("getfile: jsonresp:", jsonresp)

            # print('scriptdir', self.scriptdir)
            localfile = os.path.abspath(
                os.path.join(self.scriptdir, jsonresp['File']))
            print('getfile: localfile', localfile)
            self.scriptlist[hash]['localfile'] = localfile
            self.scriptlist[hash]['file'] = jsonresp['File']

            # self.scriptlist[hash][]

            filedata = jsonresp['FileData']
            # print("filedata:", filedata)
            # print("getfile: filedata:")

            decoded = base64.b64decode(filedata)
            # print("b64decode: decoded:", decoded)
            # print("getfile: b64decode:")

            uncompressed = lzma.decompress(decoded)
            # print("uncompressed:", uncompressed)
            # print("getfile: uncompressed:")

            localfiledir = os.path.dirname(localfile)
            # print("getfile: localfiledir:", localfiledir)
            self.ensuredir(localfiledir)
            # print("getfile: ensuredir:")

            with open(localfile, 'wb') as afile:
                # print("getfile: afile:")
                afile.write(uncompressed)
                # print("getfile: write:")

        except Exception as e:
            print("getfile: Exception:", e)
Esempio n. 53
0
    def loadFrom(self, db, ignoreReplayData=False):
        self.mode = db.readByte()
        self.version = db.readInt()
        self.mapHash = db.readOsuString()
        self.username = db.readOsuString()
        self.hash = db.readOsuString()
        self.cnt300 = db.readShort()
        self.cnt100 = db.readShort()
        self.cnt50 = db.readShort()
        self.cntGeki = db.readShort()
        self.cntKatu = db.readShort()
        self.cntMiss = db.readShort()
        self.score = db.readInt()
        self.combo = db.readShort()
        self.perfectCombo = db.readByte()
        self.mods = Mods(db.readInt())
        hpBarStr = db.readOsuString()
        self.hpGraph = []
        if hpBarStr is not None:
            for uv in hpBarStr.split(','):
                if len(uv) == 0:
                    continue
                t, val = uv.split('|')
                t = int(t)
                val = float(val)
                self.hpGraph.append((t, val))
        self.timestamp = db.readOsuTimestamp()
        rawReplayData = db.readBytes(len32=True)
        self.scoreID = db.readLL()

        if not ignoreReplayData and rawReplayData is not None and len(
                rawReplayData) > 0:
            replayData = [
                s for s in lzma.decompress(
                    data=rawReplayData).decode('utf-8').split(',')
                if len(s) > 0
            ]
            self.replayData = []
            for wxyz in replayData[:
                                   -1] if self.version >= 20130319 else replayData:
                t, x, y, keyFlags = wxyz.split('|')
                t = int(t)
                x = float(x)
                y = float(y)
                keyFlags = int(keyFlags)
                self.replayData.append((t, x, y, keyFlags))
            if self.version >= 20130319:
                self.randomSeed = int(replayData[-1].split('|')[-1])
Esempio n. 54
0
 def read(self, all_tags=False, self_provides=True, *extra_tags):
     arfile = ar.Ar(fh=self.__file)
     arfile.read()
     debbin = arfile.get_file(b'debian-binary')
     if debbin is None:
         raise DebError(self.__path, 'no debian binary')
     if debbin.read() != b'2.0\n':
         raise DebError(self.__path, 'invalid debian binary format')
     control = arfile.get_file(b'control.tar.gz')
     if control is not None:
         # XXX: python2.4 relies on a name
         tar = tarfile.open(name='control.tar.gz', fileobj=control)
     else:
         control = arfile.get_file(b'control.tar.xz')
         if control:
             if not HAVE_LZMA:
                 raise DebError(
                     self.__path,
                     'can\'t open control.tar.xz without python-lzma')
             decompressed = lzma.decompress(control.read())
             tar = tarfile.open(name="control.tar.xz",
                                fileobj=BytesIO(decompressed))
         else:
             control = arfile.get_file(b'control.tar.zst')
             if control:
                 if not HAVE_ZSTD:
                     raise DebError(
                         self.__path,
                         'can\'t open control.tar.zst without python-zstandard'
                     )
                 with zstandard.ZstdDecompressor().stream_reader(
                         BytesIO(control.read())) as reader:
                     decompressed = reader.read()
                 tar = tarfile.open(name="control.tar.zst",
                                    fileobj=BytesIO(decompressed))
         if control is None:
             raise DebError(self.__path, 'missing control.tar')
     try:
         name = './control'
         # workaround for python2.4's tarfile module
         if 'control' in tar.getnames():
             name = 'control'
         control = tar.extractfile(name)
     except KeyError:
         raise DebError(self.__path,
                        'missing \'control\' file in control.tar')
     self.__parse_control(control, all_tags, self_provides, *extra_tags)
     return self
Esempio n. 55
0
    def get(self, statute_id):
        _id = get_title_from_id(statute_id)
        cache_key = get_cache_key()
        if cache_key_exists(cache_key):
            app.logger.info('getting data from Redis')
            compressed_data = redis_store.get(cache_key)
            text_data = lzma.decompress(compressed_data).decode('utf-8')
            return json.loads(text_data)

        law = codifier.laws[_id]
        articles = law.sentences
        ordered_articles = {}
        for key in sorted(articles, key=lambda x: int(x)):
            ordered_articles[key] = articles[key]
        cache_store(cache_key, ordered_articles, compress=True)
        return ordered_articles
Esempio n. 56
0
def write_patch(source_path, target_path=None):
    # This patches rounding differences that started to occur around Feb 2021.
    from lzma import decompress
    from base64 import b85decode
    from diff_match_patch import diff_match_patch

    dmp = diff_match_patch()
    diff = decompress(b85decode(BLOB)).decode("utf-8")

    with open(source_path, "r") as fp:
        new_json = fp.read()

    patches = dmp.patch_fromText(diff)
    patched, _ = dmp.patch_apply(patches, new_json)
    with open(target_path, "w") as fp:
        fp.write(patched)
 def _decrypt(self, ciphertext: str) -> str:
     """
     Decrypt payload
     :params ciphertext: encrypted string to decrypt
     :return: decrypted payload
     """
     self.salt, iv, ciphertext = map(unhexlify, ciphertext.split("-"))
     self.key = self._set_key()
     aes = AESGCM(self.key)
     plaintext = ""
     try:
         plaintext = aes.decrypt(iv, ciphertext, None)
         plaintext = lzma.decompress(plaintext)
     except InvalidTag:
         logger.error("Wrong passphrase used.")
     return plaintext.decode("utf8")
Esempio n. 58
0
    def decompress(self, data):
        """
        decompress data

        @param      data        binary data
        @return                 binary data
        """
        if self._compress == "zip":
            return zlib.decompress(data)
        elif self._compress == "lzma":
            return lzma.decompress(data)
        elif self._compress is None:
            return data
        else:
            raise ValueError("unexpected compression algorithm {0}".format(
                self._compress))
Esempio n. 59
0
 def parse_play_data(self, replay_data):
     offset_end = self.offset + self.__replay_length
     if self.game_mode == GameMode.Standard or self.game_mode == GameMode.Taiko:
         datastring = lzma.decompress(
             replay_data[self.offset:offset_end],
             format=lzma.FORMAT_AUTO).decode('ascii')[:-1]
         events = [
             eventstring.split('|') for eventstring in datastring.split(',')
         ]
         self.play_data = [
             ReplayEvent(int(event[0]), float(event[1]), float(event[2]),
                         int(event[3])) for event in events
         ]
     else:
         self.play_data = None
     self.offset = offset_end
Esempio n. 60
0
def download_old_all_printings() -> None:
    """
    Download the hosted version of AllPrintings from MTGJSON
    for future consumption
    """
    file_bytes = b""
    file_data = requests.get(
        "https://mtgjson.com/api/v5/AllPrintings.json.xz", stream=True
    )
    for chunk in file_data.iter_content(chunk_size=1024 * 36):
        if chunk:
            file_bytes += chunk

    OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
    with OUTPUT_PATH.joinpath("AllPrintings.json").open("w", encoding="utf8") as f:
        f.write(lzma.decompress(file_bytes).decode())