def compress(self, sourcefile_name, savefile, dry_run=False, debuggy=False):
        """ run the compression algorithm for each chunk, in parallel. Note that the
        caller has the duty to set the file seek in the right position before calling this. """
        totalcomp = 0
        threads = Broker(len(self.chunks), debugmode=debuggy, greed=1)

        def spawnAll(chunklist):
            for chunk in chunklist:
                threads.appendNfire(spawn, (chunk, sourcefile_name, debuggy))

        self.writeHeader(savefile, dry_run=dry_run)
        for chunk in self.chunks:
            chunk.writeSubHeader(savefile, dry_run=dry_run)
        metadata_offset = savefile.tell()

        # this thread will feed the broker with tasks
        Thread(target=spawnAll, args=(self.chunks,)).start()

        # gather all the results, write them in sequence
        collected = 0
        while collected < len(self.chunks):
            for partial in threads.collect():
                totalcomp += partial.bitstream.buffer_info()[1]
                partial.writeToFile(savefile, metadata_offset, dry_run=dry_run)
                collected += 1
        threads.stop()
        if self.aftercompress_callback_obj is not None:
            self.aftercompress_callback_obj.compressed(totalcomp + self.header_size, dry_run=dry_run)
        return totalcomp + 4
def nonStaging(pac, cmdline, filename):
    if cmdline.list:
        pac.printInfo()
    elif cmdline.list_harder:
        pac.printDetailInfo()
    elif cmdline.file_info:
        file = pac.getFileById(cmdline.file_info)
        if file is not None:
            print("          id    offset       size  compress  size  filename")
            file.printDetailInfo()
    elif cmdline.extract_id:
        with open(filename, "rb") as binfile:
            idlist = map(int, cmdline.extract_id.split(","))
            if cmdline.raw:
                location = datastruct.adjustSeparatorForFS("raw-extract/")
                if cmdline.extract:
                    location = cmdline.extract
                for fid in idlist:
                    pac.dumpFileId(fid, location, binfile)
            else:
                location = datastruct.adjustSeparatorForFS("extract/")
                if cmdline.extract:
                    location = cmdline.extract
                for fid in idlist:
                    pac.extractFileId(fid, location, binfile, debuggy=cmdline.debug)

    elif cmdline.extract:
        threads = Broker(len(pac.files))
        for fid in pac.listFileIDs():
            threads.appendNfire(extractJob, (pac, fid, cmdline, filename))
        threads.stop()
        print("Extraction job completed")
Beispiel #3
0
    def compress(self,
                 sourcefile_name,
                 savefile,
                 dry_run=False,
                 debuggy=False):
        """ run the compression algorithm for each chunk, in parallel. Note that the
        caller has the duty to set the file seek in the right position before calling this. """
        totalcomp = 0
        threads = Broker(len(self.chunks), debugmode=debuggy, greed=1)

        def spawnAll(chunklist):
            for chunk in chunklist:
                threads.appendNfire(spawn, (chunk, sourcefile_name, debuggy))

        self.writeHeader(savefile, dry_run=dry_run)
        for chunk in self.chunks:
            chunk.writeSubHeader(savefile, dry_run=dry_run)
        metadata_offset = savefile.tell()

        # this thread will feed the broker with tasks
        Thread(target=spawnAll, args=(self.chunks, )).start()

        # gather all the results, write them in sequence
        collected = 0
        while collected < len(self.chunks):
            for partial in threads.collect():
                totalcomp += partial.bitstream.buffer_info()[1]
                partial.writeToFile(savefile, metadata_offset, dry_run=dry_run)
                collected += 1
        threads.stop()
        if self.aftercompress_callback_obj is not None:
            self.aftercompress_callback_obj.compressed(totalcomp +
                                                       self.header_size,
                                                       dry_run=dry_run)
        return totalcomp + 4
Beispiel #4
0
 def capsule(thread):
     threads = Broker(len(self.app.staging.package.files))
     thread.abort_interface = threads
     fids = self.app.staging.package.searchFile(internalname, exact_match=False)
     for fid in fids:
         if thread.handbrake:
             break
         if not threads.appendNfire(extractJob,
                             (self.app.staging.package,
                              fid, saveto, self.app.staging.target)):
             break
         thread.progressCallback(self.app.staging.package.getFileById(fid))
     threads.stop()
     self.after_run(thread, "done %s %d files" % (self.opname[1], self.doneunits))
def nonStaging(pac, cmdline, filename):
    if cmdline.list:
        pac.printInfo()
    elif cmdline.list_harder:
        pac.printDetailInfo()
    elif cmdline.file_info:
        file = pac.getFileById(cmdline.file_info)
        if file is not None:
            print("          id    offset       size  compress  size  filename")
            file.printDetailInfo()
    elif cmdline.extract_id:
        with open(filename, "rb") as binfile:
            idlist = map(int, cmdline.extract_id.split(","))
            if cmdline.raw:
                location = datastruct.adjustSeparatorForFS("raw-extract/")
                if cmdline.extract:
                    location = cmdline.extract
                for fid in idlist:
                    pac.dumpFileId(fid, location, binfile)
            else:
                location = datastruct.adjustSeparatorForFS("extract/")
                if cmdline.extract:
                    location = cmdline.extract
                for fid in idlist:
                    pac.extractFileId(fid, location, binfile, debuggy=cmdline.debug)

    elif cmdline.extract:
        threads = Broker(len(pac.files))
        for fid in pac.listFileIDs():
            threads.appendNfire(extractJob, (pac, fid, cmdline, filename))
        threads.stop()
        print("Extraction job completed")