Beispiel #1
0
def parse_content(zipped):
    # read in the streamingbody response as bytes and close
    sourceBodyStream = zipped['Body']
    sourceData = sourceBodyStream.read()
    sourceBodyStream.close()
    # read the streamingbody data and create a file-like object
    sourceBytes = io.BytesIO(sourceData)
    # open the zipped file in read mode
    zippedData = ZipFile(sourceBytes)
    # get all file bytes not directories from the file archive
    unzipped = []
    for name in zippedData.namelist():
        if not zippedData.getinfo(name).is_dir():
            fileName = zippedData.getinfo(name).filename
            body = zippedData.read(name)
            # tuple, mime is 1st position
            mime = mimetypes.guess_type(fileName)[0]
            contentType = 'binary/octet-stream' if not mime else mime
            objectParams = {
                'Name': fileName,
                'Body': body,
                'Type': contentType
            }
            unzipped.append(objectParams)
    # close archive
    zippedData.close()
    # return the unzipped bytes stream back to s3 for put object
    return unzipped
def check_sap_file(sap_file_path):
    """
    Checks if the file at the given path is a valid Sapelli project file.

    Parameters
    ----------
    sap_file_path : str
        Path to (suspected) Sapelli project file.

    Raises
    ------
    SapelliSAPException:
        When the given file does not exist, is not a ZIP archive, or does not contain PROJECT.xml.
    """
    try:
        if not os.path.isfile(sap_file_path):
            raise SapelliSAPException('The file does not exist.')
        # Check if it is a ZIP file:
        zip = ZipFile(sap_file_path)  # throws BadZipfile
        # Check if it contains PROJECT.xml:
        zip.getinfo('PROJECT.xml')  # throws KeyError
    except BadZipfile:
        raise SapelliSAPException('The file is not a valid Sapelli project file (*.sap, *.excites or *.zip).')
    except KeyError:
        raise SapelliSAPException('The file is not a valid Sapelli project file (ZIP archive does not contain PROJECT.xml file).')
    finally:
        try:
            zip.close()
        except BaseException:
            pass
def check_sap_file(sap_file_path):
    """
    Checks if the file at the given path is a valid Sapelli project file.

    Parameters
    ----------
    sap_file_path : str
        Path to (suspected) Sapelli project file.

    Raises
    ------
    SapelliSAPException:
        When the given file does not exist, is not a ZIP archive, or does not contain PROJECT.xml.
    """
    try:
        if not os.path.isfile(sap_file_path):
            raise SapelliSAPException('The file does not exist.')
        # Check if it is a ZIP file:
        zip = ZipFile(sap_file_path)  # throws BadZipfile
        # Check if it contains PROJECT.xml:
        zip.getinfo('PROJECT.xml')  # throws KeyError
    except BadZipfile:
        raise SapelliSAPException(
            'The file is not a valid Sapelli project file (*.sap, *.excites or *.zip).'
        )
    except KeyError:
        raise SapelliSAPException(
            'The file is not a valid Sapelli project file (ZIP archive does not contain PROJECT.xml file).'
        )
    finally:
        try:
            zip.close()
        except BaseException:
            pass
Beispiel #4
0
def updateitemversion():
    response = urlopen(
        "https://github.com/NetroScript/Graveyard-Keeper-Savefile-Editor/archive/master.zip"
    )
    # Load the downloaded in memory file as a zip file
    zipfile = ZipFile(BytesIO(response.read()))
    print("Deleting Old /rsc folder")
    shutil.rmtree('./data/html/rsc', ignore_errors=True)
    os.mkdir("./data/html/rsc")
    print("Copying new rsc files")
    # We iterate all files in the zip to be able to extract 1 whole specific folder
    for zip_info in zipfile.infolist():
        # We only want the rsc folder
        if zip_info.filename.startswith(
                "Graveyard-Keeper-Savefile-Editor-master/data/html/rsc/"):
            # To prevent an error when there is no base name
            if zip_info.filename[-1] == '/':
                continue
            # So we don't extract the whole folder structure we change the path in the zip info object
            zip_info.filename = os.path.basename(zip_info.filename)
            zipfile.extract(zip_info, "./data/html/rsc/")

    # Same as above but for individual files
    print("Deleting old items.json")
    os.remove("./data/html/items.json")
    print("Copying new items.json")
    info = zipfile.getinfo(
        "Graveyard-Keeper-Savefile-Editor-master/data/html/items.json")
    info.filename = os.path.basename(info.filename)
    zipfile.extract(info, "./data/html/")
    print("Deleting old locals.json")
    os.remove("./data/locals.json")
    print("Copying new locals.json")
    info = zipfile.getinfo(
        "Graveyard-Keeper-Savefile-Editor-master/data/locals.json")
    info.filename = os.path.basename(info.filename)
    zipfile.extract(info, "./data/")
    print("Deleting old itemversion")
    os.remove("./data/itemversion")
    print("Copying new itemversion")
    info = zipfile.getinfo(
        "Graveyard-Keeper-Savefile-Editor-master/data/itemversion")
    info.filename = os.path.basename(info.filename)
    zipfile.extract(info, "./data/")

    print("Finished updating - now closing")

    # Using just exit() doesn't close the browser (GUI) window
    # The JavaScript window.close() doesn't work on newer Chrome versions, if so a splash screen is shown
    eel.closeWindow()()

    # Because this process spawns the browser windows they should be sub processes
    # Then it closes those (in case the window.close() didn't work)
    app = psutil.Process(os.getpid())
    for GUI in app.children(recursive=True):
        GUI.kill()
    exit()
Beispiel #5
0
def main():
    czip = ZipFile('channel.zip', 'r')
    for name in czip.namelist():
        if (name == 'readme.txt'): print czip.read(name)

    nothing = '90052'
    while True:
        fileContent = czip.read(nothing + '.txt')
        print czip.getinfo(nothing + '.txt').comment,
        match = re.findall('\d{2,5}', fileContent)
        if len(match) == 0: break
        else: nothing = match[0]
Beispiel #6
0
    def _open_local(self, filename, mode):
        """
        This is an internal function to handle opening the temporary file 
        that the URL has been downloaded to, including handling compression
        if appropriate
        """
        open_f = None

        if filename.endswith('.gz') or filename.endswith('.gzip'):
            open_f = functools.partial(gzip.open, mode='rb')

        elif filename.endswith('.zip'):
            zipped_data = ZipFile(filename)

            if len(zipped_data.filelist) > 1:
                raise IOError("more than one file in " + file)
            if len(zipped_data.filelist) == 0:
                raise IOError("no file in " + file)

            #get the filename of the single file inside the zip
            info = zipped_data.getinfo(zipped_data.filelist[0].orig_filename)
            filename = info

            def open_internal(filename, zipped_data):
                return TextIOWrapper(zipped_data.open(filename))

            open_f = functools.partial(open_internal, zipped_data=zipped_data)

        else:
            open_f = functools.partial(open, mode='r')

        with open_f(filename) as fd:
            yield fd
    def get_smses(self, owner_id=None):
        """
        Returns all smses in form of SMS objects
        The optional owner_id if given sets the owner_id field of the SMS object
        """
        
        ret = dict(smses=[], errors=0, successful=0, error_messages=[])
        
        owner = db.query(User).filter_by(user_id=owner_id).first()
        if not owner:
            raise RuntimeError("Specified owner does not exist in the DB")
        
        owner_cell_number = UserCellNumber.get_default_number(owner_id)

        zip_file = ZipFile(self.filename, 'r')
        for subfilename in zip_file.namelist():
            try:
                log.info("Processing {} from the zip file".format(subfilename))
                file_info = zip_file.getinfo(subfilename)
                subfile = zip_file.open(file_info)
                file_data = subfile.read().decode('utf-16')
                log.debug(file_data)
                msg_obj = self.parse_msg_string(file_data, owner_cell_number, owner)
                msg_obj.owner_id = owner_id
                
                ret['smses'].append(msg_obj)
                ret['successful'] += 1
            
            except Exception as exp:
                ret['errors'] += 1
                ret['error_messages'].append(str(exp))
                
        return ret
Beispiel #8
0
def decrypt_zip_file_map(encrypted_zip, encrypted_file_password):
    """Return decrypted zip file map bytes object."""
    encrypted_zip_file = ZipFile(encrypted_zip)
    for file in encrypted_zip_file.namelist():
        if encrypted_zip_file.getinfo(file).file_size < 1024 * 1024:
            file_map = encrypted_zip_file.read(file, pwd=encrypted_file_password.encode('utf-8'))
            return file_map
 def save_data(self, zip_file:zipfile.ZipFile):
     entry = self.folder + "/" + self.label + ".txt"
     try:
         if self.attr.data_format == tango._tango.AttrDataFormat.SCALAR:
             buf = str(self.attr.value)
         elif self.attr.data_format == tango._tango.AttrDataFormat.SPECTRUM:
             avg = self.get_prop_as_int("save_avg")
             if avg < 1:
                 avg = 1
             buf = self.convert_to_buf(avg)
         else:
             LOGGER.log(logging.WARNING, "Unsupported attribute format for %s" % self.get_name())
             return
         try:
             info = zip_file.getinfo(entry)
             self.folder += ("_" + self.dev + '_' + str(time.time()))
             self.folder = self.folder.replace('/', '_')
             self.folder = self.folder.replace('.', '_')
             LOGGER.log(logging.WARNING, "Duplicate entry %s in zip file. Folder is changed to %s" % (entry, self.folder))
             entry = self.folder + "/" + self.label + ".txt"
         except:
             pass
         zip_file.writestr(entry, buf)
     except:
         LOGGER.log(logging.WARNING, "Attribute data save error for %s" % self.get_name())
 def publishFromDir(self, path, dirPath):
     '''
     @see ICDM.publishFromDir
     '''
     assert isinstance(path, str) and len(path) > 0, 'Invalid content path %s' % path
     assert isinstance(dirPath, str), 'Invalid directory path value %s' % dirPath
     path, fullPath = self._validatePath(path)
     if not isdir(dirPath):
         # not a directory, see if it's a entry in a zip file
         zipFilePath, inDirPath = getZipFilePath(dirPath, self.delivery.getRepositoryPath())
         zipFile = ZipFile(zipFilePath)
         if not inDirPath.endswith(ZIPSEP):
             inDirPath = inDirPath + ZIPSEP
         fileInfo = zipFile.getinfo(inDirPath)
         if not fileInfo.filename.endswith(ZIPSEP):
             raise IOError('Trying to publish a file from a ZIP directory path: %s' % fileInfo.filename)
         self._copyZipDir(zipFilePath, inDirPath, fullPath)
         assert log.debug('Success publishing ZIP dir %s (%s) to path %s', inDirPath, zipFilePath, path) or True
         return
     dirPath = normpath(dirPath)
     assert os.access(dirPath, os.R_OK), 'Unable to read the directory path %s' % dirPath
     for root, _dirs, files in os.walk(dirPath):
         relPath = relpath(root, dirPath)
         for file in files:
             publishPath = join(path, relPath.lstrip(os.sep), file)
             filePath = join(root, file)
             self.publishFromFile(publishPath, filePath)
         assert log.debug('Success publishing directory %s to path %s', dirPath, path) or True
 def publishFromFile(self, path, filePath):
     '''
     @see ICDM.publishFromFile
     '''
     assert isinstance(path, str) and len(path) > 0, 'Invalid content path %s' % path
     if not isinstance(filePath, str) and hasattr(filePath, 'read'):
         return self._publishFromFileObj(path, filePath)
     assert isinstance(filePath, str), 'Invalid file path value %s' % filePath
     path, dstFilePath = self._validatePath(path)
     dstDir = dirname(dstFilePath)
     if not isdir(dstDir):
         os.makedirs(dstDir)
     if not isfile(filePath):
         # not a file, see if it's a entry in a zip file
         zipFilePath, inFilePath = getZipFilePath(filePath, self.delivery.getRepositoryPath())
         zipFile = ZipFile(zipFilePath)
         fileInfo = zipFile.getinfo(inFilePath)
         if fileInfo.filename.endswith(ZIPSEP):
             raise IOError('Trying to publish a file from a ZIP directory path: %s' % fileInfo.filename)
         if not self._isSyncFile(zipFilePath, dstFilePath):
             copyfileobj(zipFile.open(inFilePath), open(dstFilePath, 'w+b'))
             assert log.debug('Success publishing ZIP file %s (%s) to path %s', inFilePath, zipFilePath, path) or True
         return
     assert os.access(filePath, os.R_OK), 'Unable to read the file path %s' % filePath
     if not self._isSyncFile(filePath, dstFilePath):
         copyfile(filePath, dstFilePath)
         assert log.debug('Success publishing file %s to path %s', filePath, path) or True
Beispiel #12
0
def unpack(dirname, callback=None):
	"""unpack the files in ``packed.py`` and decompress under ``dirname``

	* callback is a function of prototype function(path).
	It is called before attempting to compress the file.
	"""
	from biz.default.packed import packed_default

	fakefile = StringIO(b64decode(packed_default))
	zipf = ZipFile(fakefile, "r")

	for name in zipf.namelist():
		try:
			os.makedirs(os.path.join(dirname, os.path.dirname(name)), 0700)
		except OSError:
			pass

		data = zipf.read(name)
		filename = os.path.join(dirname, name)

		if callback:
			callback(filename)
			
		mode = zipf.getinfo(name).external_attr >> 16

		f = file(filename, "wb")
		f.write(data)
		f.close()
		os.chmod(filename, mode)

	zipf.close()
	fakefile.close()
Beispiel #13
0
class FileSeekerZip(FileSeekerBase):
    def __init__(self, zip_file_path, temp_folder):
        FileSeekerBase.__init__(self)
        self.zip_file = ZipFile(zip_file_path)
        self.name_list = self.zip_file.namelist()
        self.temp_folder = temp_folder
        self.directory = temp_folder

    def search(self, filepattern, return_on_first_hit=False):
        pathlist = []
        for member in self.name_list:
            if fnmatch.fnmatch('root/' + member, filepattern):
                try:
                    extracted_path = self.zip_file.extract(member, path=self.temp_folder) # already replaces illegal chars with _ when exporting
                    f = self.zip_file.getinfo(member)
                    date_time = f.date_time
                    date_time = time.mktime(date_time + (0, 0, -1))
                    os.utime(extracted_path, (date_time, date_time))
                    pathlist.append(extracted_path)
                except Exception as ex:
                    member = member.lstrip("/")
                    logfunc(f'Could not write file to filesystem, path was {member} ' + str(ex))
        return pathlist

    def cleanup(self):
        self.zip_file.close()
        
Beispiel #14
0
    def _add_path(self, zip_file: zipfile.ZipFile, parent: NodeDirectory, path: zipfile.Path):
        info: zipfile.ZipInfo = zip_file.getinfo(path.at)

        common_kwargs = dict(
            fs=parent.fs,
            # specifically the file portion
            name=path.name.encode("utf8"),
            # Not writeable!
            mode=propagate_owner_perms((info.external_attr >> 16) & (~0o222 if self.read_only else ~0)),
            # size=info.file_size,
            blocks=math.ceil(info.file_size / self.block_size),
            mtime=dt.datetime(*info.date_time),
            ctime=dt.datetime(*info.date_time),
        )

        if info.is_dir():
            entry = SimpleDirectory(
                **common_kwargs,
            )
        else:
            entry = SimpleFile(
                contents=bytearray(path.read_bytes()),
                **common_kwargs
            )

        parent.link_child(entry)

        if info.is_dir():
            for child_path in path.iterdir():
                self._add_path(zip_file, parent=entry, path=child_path)
Beispiel #15
0
    def preloadFont(cls, font, directory=DEFAULT_DIR):
        """
        Load font file into memory. This can be overriden with
        a superclass to create different font sources.
        """

        fontPath = os.path.join(directory, font + '.flf')
        if not os.path.exists(fontPath):
            fontPath = os.path.join(directory, font + '.tlf')
            if not os.path.exists(fontPath):
                raise pyfiglet.FontNotFound("%s doesn't exist" % font)

        if is_zipfile(fontPath):
            z = None
            try:
                z = ZipFile(fontPath, 'r')
                data = z.read(z.getinfo(z.infolist()[0].filename))
                z.close()
                return data.decode('utf-8', 'replace') if ST3 else data
            except Exception as e:
                if z is not None:
                    z.close()
                raise pyfiglet.FontError("couldn't read %s: %s" % (fontPath, e))
        else:
            try:
                with open(fontPath, 'rb') as f:
                    data = f.read()
                return data.decode('utf-8', 'replace') if ST3 else data
            except Exception as e:
                raise pyfiglet.FontError("couldn't open %s: %s" % (fontPath, e))

        raise pyfiglet.FontNotFound(font)
Beispiel #16
0
def unzip(context, blob, pathname, compact=False, base=None, excludeExt=None, excludeList=None, compatText=False):
    if excludeList is None:
        excludeList = []

    zipFile = ZipFile(StringIO(blob))
    for member in zipFile.namelist():
        filename = zipFile.getinfo(member).filename
        if (excludeExt is not None and filename.lower().endswith(excludeExt)) or filename in excludeList:
            continue
        elif filename.find('/') != -1:
            if base is not None and filename[:filename.index('/')].lower().startswith(base):
                filename = filename[filename.index('/')+1:]
                if not filename or (compact and '/' in filename):
                    continue
            elif compact:
                continue
            elif filename.endswith('/'):
                os.makedirs(os.path.join(pathname, os.path.normpath(filename)))
                continue
            else:
                try:
                    os.makedirs(os.path.join(pathname, os.path.normpath(filename[:filename.rindex('/')])))
                except WindowsError:
                    pass
        if compact and compatText and (filename.endswith('.txt') or filename == 'COPYING'):
            filename = '[%s] %s' % (context, filename)
        with open(os.path.join(pathname, os.path.normpath(filename)), 'wb') as fp:
            fp.write(zipFile.open(member).read())
    zipFile.close()
Beispiel #17
0
 def __init__(
     self,
     archive: zipfile.ZipFile,
     filename: str,
     restrictions: CSV_Restrictions,
 ) -> None:
     self.filename: str = filename
     self._restrictions: CSV_Restrictions = restrictions
     self.info: zipfile.ZipInfo = archive.getinfo(self.filename)
     display_loading_message: bool = (self.info.file_size >
                                      self.FILE_SIZE_THRESHOLD)
     if display_loading_message:
         sys.stdout.write('Loading {nam} (about {siz}) ... '.format(
             nam=self.filename, siz=self._format_file_size()))
         sys.stdout.flush()
     with archive.open(self.filename) as f:
         #See <http://stackoverflow.com/q/5627954/2899277> for explanation
         #of `io.TextIOWrapper` use.
         #Defining temporary variable `rows` here so we can be more precise
         #about the type of `self.rows`. In particular, later on we will
         #want to call `len` on it, so it needs to belong to `typing.Sized`.
         rows: typing.Iterable[CSV_Row] = (csv.DictReader(
             io.TextIOWrapper(f), dialect='excel'))
         self._apply_restrictions(rows=rows)
     if display_loading_message:
         sys.stdout.write('done.\n')
Beispiel #18
0
    def acquire_all_resources(self, format_dict):
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        shapefile_online = self._urlopen(url)
        zfh = ZipFile(six.BytesIO(shapefile_online.read()), "r")
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ("c", "l", "i", "h", "f")
        levels = (1, 2, 3, 4)
        for scale, level in itertools.product(scales, levels):
            modified_format_dict.update({"scale": scale, "level": level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path)
                with open(target, "wb") as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
    def install(self, target_dir, rename_like_self=True):
        """Unzip into `target_dir`.

        Keyword arguments:
        rename_like_self -- Ignored - We always unzip to a subdirectory named
                            `self.name`
        """
        zip = ZipFile(io.BytesIO(requests.get(self.url).content))

        progress("Unzipping {}".format(self.url))
        for member in zip.namelist():
            zipinfo = zip.getinfo(member)
            if zipinfo.filename[-1] == '/':
                continue

            targetpathelts = os.path.normpath(zipinfo.filename).split('/')
            targetpathelts[0] = self.name

            targetpath = os.path.join(target_dir, *targetpathelts)

            upperdirs = os.path.dirname(targetpath)
            if upperdirs and not os.path.exists(upperdirs):
                os.makedirs(upperdirs)

            with zip.open(zipinfo) as source, open(targetpath, "wb") as target:
                shutil.copyfileobj(source, target)
Beispiel #20
0
    def aqcuire_all_resources(self, format_dict):
        import cStringIO as StringIO
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        shapefile_online = self._urlopen(url)
        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ('c', 'l', 'i', 'h', 'f')
        levels = (1, 2, 3, 4)
        for scale, level in itertools.product(scales, levels):
            modified_format_dict.update({'scale': scale, 'level': level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path)
                with open(target, 'wb') as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
Beispiel #21
0
    def preloadFont(cls, font, directory=DEFAULT_DIR):
        """
        Load font file into memory. This can be overriden with
        a superclass to create different font sources.
        """

        fontPath = os.path.join(directory, font + ".flf")
        if not os.path.exists(fontPath):
            fontPath = os.path.join(directory, font + ".tlf")
            if not os.path.exists(fontPath):
                raise pyfiglet.FontNotFound("%s doesn't exist" % font)

        if is_zipfile(fontPath):
            z = None
            try:
                z = ZipFile(fontPath, "r")
                data = z.read(z.getinfo(z.infolist()[0].filename))
                z.close()
                return data.decode("utf-8", "replace") if ST3 else data
            except Exception as e:
                if z is not None:
                    z.close()
                raise pyfiglet.FontError("couldn't read %s: %s" % (fontPath, e))
        else:
            try:
                with open(fontPath, "rb") as f:
                    data = f.read()
                return data.decode("utf-8", "replace") if ST3 else data
            except Exception as e:
                raise pyfiglet.FontError("couldn't open %s: %s" % (fontPath, e))

        raise pyfiglet.FontNotFound(font)
Beispiel #22
0
def main():
    """
    <-- zip
    See linked list
    """

    f = 'channel.zip'
    f = ZipFile(f, 'r')

    nothing = '90052'
    pattern = re.compile('Next nothing is (\d+)')

    result = []
    while True:
        nothing += '.txt'
        try:
            comment = f.getinfo(nothing).comment
        except IndexError:
            break

        if not comment in result:
            result.append(comment)

        nothing = f.open(nothing).read()
        try:
            nothing = pattern.findall(nothing)[0]
        except IndexError:
            break

    result = [c.lower() for c in result if c.isalpha()]
    print ''.join(result)
Beispiel #23
0
def extractall(zip: zipfile.ZipFile, path: str) -> None:
    for name in zip.namelist():
        member = zip.getinfo(name)
        extracted_path = zip.extract(member, path)
        attr = member.external_attr >> 16
        if attr != 0:
            os.chmod(extracted_path, attr)
Beispiel #24
0
    def acquire_all_resources(self, format_dict):
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        shapefile_online = self._urlopen(url)
        zfh = ZipFile(six.BytesIO(shapefile_online.read()), 'r')
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ('c', 'l', 'i', 'h', 'f')
        levels = (1, 2, 3, 4)
        for scale, level in itertools.product(scales, levels):
            modified_format_dict.update({'scale': scale, 'level': level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path)
                with open(target, 'wb') as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
Beispiel #25
0
    def acquire_resource(self, target_path, format_dict):
        """
        Download the zip file and extracts the files listed in
        :meth:`zip_file_contents` to the target path.

        """
        from zipfile import ZipFile

        target_dir = os.path.dirname(target_path)
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        url = self.url(format_dict)

        shapefile_online = self._urlopen(url)

        zfh = ZipFile(io.BytesIO(shapefile_online.read()), 'r')

        for member_path in self.zip_file_contents(format_dict):
            ext = os.path.splitext(member_path)[1]
            target = os.path.splitext(target_path)[0] + ext
            member = zfh.getinfo(member_path.replace(os.sep, '/'))
            with open(target, 'wb') as fh:
                fh.write(zfh.open(member).read())

        shapefile_online.close()
        zfh.close()

        return target_path
Beispiel #26
0
def unzip(obj):
    counter = 0
    dir_path = path.join(
        'series', obj.series.slug, str(obj.volume), '%g' % obj.number
    )
    full_path = path.join(settings.MEDIA_ROOT, dir_path)
    if path.exists(full_path):
        rmtree(full_path)
    makedirs(full_path)
    zip_file = ZipFile(obj.file)
    name_list = zip_file.namelist()
    for name in sort.natural_sort(name_list):
        if _is_dir(zip_file.getinfo(name)):
            continue
        counter += 1
        data = zip_file.read(name)
        filename = '%03d%s' % (counter, path.splitext(name)[-1])
        file_path = path.join(dir_path, filename)
        image = Image.open(BytesIO(data))
        image.save(path.join(full_path, filename), quality=100)
        obj.pages.create(number=counter, image=file_path)
    zip_file.close()
    obj.file.close()
    # TODO: option to keep zip file
    remove(obj.file.path)
    obj.file.delete(save=True)
Beispiel #27
0
def unzip(filename):
    z = ZipFile(filename)
    names = z.namelist()
    for path in names:
        if path.startswith('__MACOSX/'):
            continue

        base, name = os.path.split(path)

        if name.startswith('._') and\
            '%s/' % name.replace('._', '', 1) in names:
            continue

        double = os.path.join('__MACOSX', base, '._' + name)
        if double in names:
            print '=> %s.bin' % path

            info = z.getinfo(path)

            bin = MacBinary(name)
            bin.data = z.open(path, 'r').read()
            bin.res = z.open(double, 'r').read()

            modified = datetime.datetime(*info.date_time)
            bin.modified = time.mktime(modified.timetuple())
            bin.created = time.time()

            if not os.path.exists(base):
                os.makedirs(base)

            with open('%s.bin' % path.rstrip('\r'), 'wb') as f:
                f.write(bin.encode())
        else:
            print '-> %s' % path
            z.extract(path)
Beispiel #28
0
    def acquire_resource(self, target_path, format_dict):
        """
        Downloads the zip file and extracts the files listed in
        :meth:`zip_file_contents` to the target path.

        """
        import cStringIO as StringIO
        from zipfile import ZipFile

        target_dir = os.path.dirname(target_path)
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        url = self.url(format_dict)

        shapefile_online = self._urlopen(url)

        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')

        for member_path in self.zip_file_contents(format_dict):
            ext = os.path.splitext(member_path)[1]
            target = os.path.splitext(target_path)[0] + ext
            member = zfh.getinfo(member_path)
            with open(target, 'wb') as fh:
                fh.write(zfh.open(member).read())

        shapefile_online.close()
        zfh.close()

        return target_path
Beispiel #29
0
    def getFile(self, getfile, source, unpack=True):
        try:
            response = req.urlopen(getfile)
        except URLError as e:
            sys.exit(e)

        filepath = source + '/' + getfile.split('/')[-1]

        data = response
        save_file = open(filepath, 'wb')
        save_file.write(response.read())
        save_file.close()

        file = open(filepath, 'rb')
        size = 0
        # TODO: if data == text/plain; charset=utf-8, read and decode
        if unpack:
            # if 'gzip' in response.info().get('Content-Type'):
            #     buf = BytesIO(file.read())
            #     data = GzipFile(fileobj=buf)
            # elif 'bzip2' in response.info().get('Content-Type'):
            #     data = BytesIO(bz2.decompress(response.read()))
            if 'zip' in response.info().get('Content-Type'):
                fzip = ZipFile(BytesIO(file.read()), 'r')
                filename = fzip.namelist()[0]
                size = fzip.getinfo(filename).file_size
                if len(fzip.namelist()) > 0:
                    data = BytesIO(fzip.read(filename))
        file.close()

        return data, size
Beispiel #30
0
 def publishFromFile(self, path, filePath):
     '''
     @see ICDM.publishFromFile
     '''
     assert isinstance(path, str) and len(path) > 0, 'Invalid content path %s' % path
     if not isinstance(filePath, str) and hasattr(filePath, 'read'):
         return self._publishFromFileObj(path, filePath)
     assert isinstance(filePath, str), 'Invalid file path value %s' % filePath
     path, dstFilePath = self._validatePath(path)
     dstDir = dirname(dstFilePath)
     if not isdir(dstDir):
         os.makedirs(dstDir)
     if not isfile(filePath):
         # not a file, see if it's a entry in a zip file
         zipFilePath, inFilePath = getZipFilePath(filePath, self.delivery.getRepositoryPath())
         zipFile = ZipFile(zipFilePath)
         fileInfo = zipFile.getinfo(inFilePath)
         if fileInfo.filename.endswith(ZIPSEP):
             raise IOError('Trying to publish a file from a ZIP directory path: %s' % fileInfo.filename)
         if not self._isSyncFile(zipFilePath, dstFilePath):
             copyfileobj(zipFile.open(inFilePath), open(dstFilePath, 'w+b'))
             assert log.debug('Success publishing ZIP file %s (%s) to path %s', inFilePath, zipFilePath, path) or True
         return
     assert os.access(filePath, os.R_OK), 'Unable to read the file path %s' % filePath
     if not self._isSyncFile(filePath, dstFilePath):
         copyfile(filePath, dstFilePath)
         assert log.debug('Success publishing file %s to path %s', filePath, path) or True
Beispiel #31
0
def main():
	global parser
	parser = ArgumentParser()
	parser.add_argument('-f', '--file', dest='zipFile', metavar='<filename>', required=True, type=str, help='path to zip file')
	parser.add_argument('-d', '--dictionary', dest='dictionary', metavar='<filename>', type=str, help='path to password dictionary file')
	parser.add_argument('-s', '--start-length', dest='minLength', metavar='N', default=1, type=int, help='minimum length for brute-force - defaults to 1 (only available in no dictionary file)')
	parser.add_argument('-e', '--end-length', dest='maxLength', metavar='N', default=6, type=int, help='maximum length for brute-force - defaults to 6 (only available in no dictionary file)')
	args = parser.parse_args()

	print('')

	_timeStart()

	zFile = ZipFile(args.zipFile)
	namelist = zFile.namelist()
	dictionary = args.dictionary
	minLength = args.minLength
	maxLength = args.maxLength

	zFileName = ''
	for name in namelist:
		if name[-1] != '/':
			zFileName = name
			break
	if zFileName == '':
		_exit('No valid file in zip ')
	info = zFile.getinfo(zFileName)
	if info.flag_bits & 0x8:
		checkByte = (info._raw_time >> 8) & 0xff
	else:
		checkByte = (info.CRC >> 24) & 0xff
	zFile.fp.seek(41)  # sizeFileHeader + fheader[_FH_FILENAME_LENGTH]  30 + 11
	bytesContent = zFile.fp.read(12)
	zef_file = SharedFile(zFile.fp)
		
	count = 0
	if dictionary is not None:
		f = open(dictionary, 'r')
		content = f.readlines()
		f.close()
		print('%s passwords in dictionary file \n' % len(content))
		for passwd in content:
			count += 1
			if _zFile(zFile, zFileName, passwd.strip('\n\r'), info, checkByte, bytesContent, zef_file):
				_resultExit(count, passwd)
	else:
		#characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
		characters = "abcdefghijklmnopqrstuvwxyz"

		for length in range(minLength, maxLength + 1):
			print('Length of password : %s' % length)
			content = itertools.product(characters, repeat=length)
			for pw in content:
				passwd = ''.join(pw)
				count += 1
				if _zFile(zFile, zFileName, passwd, info, checkByte, bytesContent, zef_file):
					_resultExit(count, passwd)
	print('Tried %d passwords but no password found ...\n' % count)
	_timeEnd()
Beispiel #32
0
def buildstamp(zipname):
    """Gets the build timestamp for a zipapp archive.
    __main__.py must exist.
    """
    archive = ZipFile(zipname)
    info = archive.getinfo('__main__.py')
    archive.close()
    yy, m, dd, hh, mm = info.date_time[0:5]
    return '{0:04}-{1:02}-{2:02} {3:02}:{4:02}'.format(yy, m, dd, hh, mm)
Beispiel #33
0
    def acquire_all_resources(self, format_dict):
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        try:
            shapefile_online = self._urlopen(url)
        # error handling:
        except HTTPError:
            try:
                """
                case if GSHHS has had an update
                without changing the naming convention
                """
                url = (
                    f'https://www.ngdc.noaa.gov/mgg/shorelines/data/'
                    f'gshhs/oldversions/version{self.gshhs_version}/'
                    f'gshhg-shp-{self.gshhs_version}.zip'
                )
                shapefile_online = self._urlopen(url)
            except HTTPError:
                """
                case if GSHHS has had an update
                with changing the naming convention
                """
                url = (
                    'https://www.ngdc.noaa.gov/mgg/shorelines/data/'
                    'gshhs/oldversions/version2.3.6/'
                    'gshhg-shp-2.3.6.zip'
                )
                shapefile_online = self._urlopen(url)
        zfh = ZipFile(io.BytesIO(shapefile_online.read()), 'r')
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ('c', 'l', 'i', 'h', 'f')
        levels = (1, 2, 3, 4, 5, 6)
        for scale, level in itertools.product(scales, levels):
            # the combination c4 does not occur for some reason
            if scale == "c" and level == 4:
                continue
            modified_format_dict.update({'scale': scale, 'level': level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path.replace(os.sep, '/'))
                with open(target, 'wb') as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
Beispiel #34
0
 def _check_test_zip(self):
     zip = ZipFile(self.zipFilename, 'r')
     try:
         info = zip.getinfo('image')
         expect(info).to_be_truthy()
         expect(info.flag_bits).to_equal(8)
         expect(info.filename).to_equal('image')
         expect(info.file_size).to_equal(stat(self.tiffFilename).st_size)
     finally:
         zip.close()
Beispiel #35
0
 def _check_test_zip(self):
     zip = ZipFile(self.zipFilename, 'r')
     try:
         info = zip.getinfo('image')
         expect(info).to_be_truthy()
         expect(info.flag_bits).to_equal(8)
         expect(info.filename).to_equal('image')
         expect(info.file_size).to_equal(stat(self.tiffFilename).st_size)
     finally:
         zip.close()
Beispiel #36
0
def read_xml_inside_zip(xml_path):
    """
    Reads an XML with untangle which is inside a ZipFile.
    """
    zip_filepath = list(xml_path.values())[0]
    filename = list(xml_path.keys())[0]
    z = ZipFile(zip_filepath)
    file_size = z.getinfo(filename).file_size
    parsed_xml = untangle.parse(io.TextIOWrapper(io.BytesIO(z.read(filename))))
    return parsed_xml, file_size, filename
Beispiel #37
0
def unzip(filename, destination=None, report=False):
    from zipfile import ZipFile
    base_dir = ""
    if destination is None:
        destination = os.path.dirname(
            filename)  #=> extraction in current directory
    try:
        zip = ZipFile(filename, "r")
        namelist = zip.namelist()
        total_items = len(namelist) or 1
        diff = 100.0 / total_items
        percent = 0
        # nom du dossier racine
        root_dir = namelist[0]
        is_root_dir = True
        # si root_dir n'est pas un dossier ou n'est pas la racine, on se base sur le nom de l'archive
        #print root_dir
        if not root_dir.endswith("/") and (zip.getinfo(root_dir).file_size >
                                           0):
            is_root_dir = False
        else:
            for i in namelist:
                #print root_dir in i, i
                if not root_dir in i:
                    is_root_dir = False
                    break
        base_dir = os.path.join(destination, root_dir.rstrip("/"))
        if not is_root_dir:  #root_dir.endswith( "/" ) and ( zip.getinfo( root_dir ).file_size > 0 ):
            root_dir = os.path.basename(os.path.splitext(filename)[0])
            #destination = os.path.join( destination, root_dir )
            base_dir = destination
        #if os.path.isdir( base_dir ):
        #    shutil2.rmtree( base_dir )
        #os.makedirs( base_dir )
        for count, item in enumerate(namelist):
            percent += diff
            if report:
                if DIALOG_PROGRESS.iscanceled():
                    break
                DIALOG_PROGRESS.update(
                    int(percent),
                    Language(188) % (count + 1, total_items), item,
                    Language(110))
                #print round( percent, 2 ), item
            if not item.endswith("/"):
                root, name = os.path.split(item)
                directory = os.path.normpath(os.path.join(destination, root))
                if not os.path.isdir(directory): os.makedirs(directory)
                file(os.path.join(directory, name), "wb").write(zip.read(item))
        zip.close()
        del zip
        return base_dir, True
    except:
        print_exc()
    return "", False
Beispiel #38
0
def unzip( filename, destination=None, report=False ):
    from zipfile import ZipFile
    base_dir = ""
    if destination is None:
        destination = os.path.dirname( filename ) #=> extraction in current directory
    try:
        zip = ZipFile( filename, "r" )
        namelist = zip.namelist()
        total_items = len( namelist ) or 1
        diff = 100.0 / total_items
        percent = 0
        # nom du dossier racine
        is_root_dir = True
        if "/" in namelist[ 0 ]:
            if namelist[ 0 ].endswith( "/" ):
                root_dir = namelist[ 0 ]
                if not root_dir.endswith( "/" ) and ( zip.getinfo( root_dir ).file_size > 0 ):
                    is_root_dir = False
                else:
                    for i in namelist:
                        if not root_dir in i:
                            is_root_dir = False
                            break
            else:
                root_dir = namelist[ 0 ].split("/")[0]
        else:
            is_root_dir = False

        # si root_dir n'est pas un dossier ou n'est pas la racine, on se base sur le nom de l'archive
        base_dir = os.path.join( destination, root_dir.rstrip( "/" )[:42] ) # xbox filename limitation
        if not is_root_dir:#root_dir.endswith( "/" ) and ( zip.getinfo( root_dir ).file_size > 0 ):
            root_dir = os.path.basename( os.path.splitext( filename )[ 0 ] )
            destination = os.path.join( destination, root_dir[:42] )
            base_dir = destination
        if os.path.isdir( base_dir ):
            shutil2.rmtree( base_dir )
        os.makedirs( base_dir )
        for count, item in enumerate( namelist ):
            percent += diff
            if report:
                if DIALOG_PROGRESS.iscanceled():
                    break
                DIALOG_PROGRESS.update( int( percent ), _( 30188 ) % ( count + 1, total_items ), item, _( 30110 ) )
            if not item.endswith( "/" ):
                root, name = os.path.split( item )
                directory = os.path.normpath( os.path.join( destination, root.replace(root_dir.rstrip( "/" ),root_dir.rstrip( "/" )[:42]) ) )
                if not os.path.isdir( directory ): os.makedirs( directory )
                filename = makeLegalFilename( os.path.join( directory, name ), True )
                file( filename, "wb" ).write( zip.read( item ) )
        zip.close()
        del zip
        return base_dir, True
    except:
        print_exc()
    return "", False
Beispiel #39
0
class ZipFileImportContext(BaseContext):
    """ GS Import context for a ZipFile """

    def __init__(self, tool, archive_bits, encoding=None, should_purge=False):
        super(ZipFileImportContext, self).__init__(tool, encoding)
        self._archive = ZipFile(archive_bits, 'r')
        self._should_purge = bool(should_purge)
        self.name_list = self._archive.namelist()

    def readDataFile(self, filename, subdir=None):

        if subdir is not None:
            filename = '/'.join((subdir, filename))

        try:
            file = self._archive.open(filename, 'rU')
        except KeyError:
            return None

        return file.read()

    def getLastModified(self, path):
        try:
            zip_info = self._archive.getinfo(path)
        except KeyError:
            return None
        return DateTime(*zip_info.date_time)

    def isDirectory(self, path):
        """ See IImportContext """

        # namelist only includes full filenames, not directories
        return path not in self.name_list

    def listDirectory(self, path, skip=[]):
        """ See IImportContext """

        # namelist contains only full path/filenames, not
        # directories. But we need to include directories.

        if path is None:
            path = ''
        path_parts = path.split('/')
        res = set()
        for pn in self.name_list:
            dn, bn = os.path.split(pn)
            dn_parts = dn.split('/')
            if dn == path:
                if bn not in skip:
                    res.add(bn)
                continue
            if dn.startswith(path) \
               and (path == '' or len(dn_parts) == len(path_parts) + 1):
                res.add(dn_parts[-1])
        return list(res)
Beispiel #40
0
class ZipFileImportContext(BaseContext):
    """ GS Import context for a ZipFile """

    def __init__(self, tool, archive_bits, encoding=None, should_purge=False):
        super(ZipFileImportContext, self).__init__(tool, encoding)
        self._archive = ZipFile(archive_bits, 'r')
        self._should_purge = bool(should_purge)
        self.name_list = self._archive.namelist()

    def readDataFile(self, filename, subdir=None):

        if subdir is not None:
            filename = '/'.join((subdir, filename))

        try:
            file = self._archive.open(filename, 'rU')
        except KeyError:
            return None

        return file.read()

    def getLastModified(self, path):
        try:
            zip_info = self._archive.getinfo(path)
        except KeyError:
            return None
        return DateTime(*zip_info.date_time)

    def isDirectory(self, path):
        """ See IImportContext """

        # namelist only includes full filenames, not directories
        return path not in self.name_list

    def listDirectory(self, path, skip=[]):
        """ See IImportContext """

        # namelist contains only full path/filenames, not
        # directories. But we need to include directories.

        if path is None:
            path = ''
        path_parts = path.split('/')
        res = set()
        for pn in self.name_list:
            dn, bn = os.path.split(pn)
            dn_parts = dn.split('/')
            if dn == path:
                if bn not in skip:
                    res.add(bn)
                continue
            if dn.startswith(path) \
               and (path == '' or len(dn_parts) == len(path_parts) + 1):
                res.add(dn_parts[-1])
        return list(res)
Beispiel #41
0
class ZipHandler:

    def __init__(self, path):
        self.path = path
        self.zip = ZipFile(self.path, "r")

    def list_files(self, sub_path):
        if sub_path:
            return
        for name in self.zip.namelist():
            if name.endswith(str("/")):
                continue
            yield self.decode_name(name)

    def open(self, name):
        name = self.encode_name(name)
        return self.zip.open(name)

    def exists(self, name):
        name = self.encode_name(name)
        try:
            self.zip.getinfo(name)
        except KeyError:
            return False
        else:
            return True

    def encode_name(self, name):
        name = name.replace("\\", "/")
        name = name.replace("%5f", "\\")
        name = name.replace("%25", "%")
        #name = name.encode("CP437")
        name = name.encode("ISO-8859-1")
        return name

    def decode_name(self, name):
        #name = name.decode("CP437")
        name = name.decode("ISO-8859-1")
        name = name.replace("%", "%25")
        name = name.replace("\\", "%5f")
        name = name.replace("/", os.sep)
        return name
Beispiel #42
0
def loop_zip(zp: ZipFile, current):
    comments = []
    while True:
        filename = f"{current}.txt"
        content = zp.open(filename).read().decode("utf-8")

        comments.append(zp.getinfo(filename).comment)
        if (m := PATTERN.match(content)) is not None:
            current = m.group(1)
        else:
            return comments
Beispiel #43
0
 def load_from_local(self, source):
     try:
         filename = settings.CPE_SOURCE.split('/')[-1]
         z = ZipFile(source + '/' + filename, 'r')
         fname = z.namelist()[0]
         size = z.getinfo(fname).file_size
         f = z.open(fname)
         return f, size
     except FileNotFoundError as e:
         print(FileNotFoundError, e)
         print("Downloading from online source...")
         return self.load_from_remote(source)
Beispiel #44
0
def _stream_convert(stream, outpath: str, mode: str, date: str):

    assert mode in ("zip", "gz")

    if mode == "zip":
        zip_file = ZipFile(stream, allowZip64=True)

        # Hack to workaround the broken file size in the header
        file_size = zip_file.getinfo(zip_file.namelist()[0]).file_size
        if file_size < 2**33:
            zip_file.getinfo(zip_file.namelist()[0]).file_size = 2**64 - 1
            file_size = 0

        # Open the first compressed file
        # (Only expecting one file inside the ZIP)
        with zip_file.open(zip_file.namelist()[0]) as z:
            _convert_and_store(z, outpath, file_size, date)

    elif mode == "gz":
        with gzip.open(stream) as z:
            _convert_and_store(z, outpath, 0, date)
Beispiel #45
0
def main(argv=None):
    """Parse passed in cooked single HTML."""
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('collated_html',
                        type=argparse.FileType('r'),
                        help='Path to the collated html'
                        ' file (use - for stdin)')
    parser.add_argument('-d',
                        '--dump-tree',
                        action='store_true',
                        help='Print out parsed model tree.')

    parser.add_argument('-o',
                        '--output',
                        type=argparse.FileType('w+'),
                        help='Write out epub of parsed tree.')

    parser.add_argument('-i',
                        '--input',
                        type=argparse.FileType('r'),
                        help='Read and copy resources/ for output epub.')

    args = parser.parse_args(argv)

    if args.input and args.output == sys.stdout:
        raise ValueError('Cannot output to stdout if reading resources')

    from cnxepub.collation import reconstitute
    binder = reconstitute(args.collated_html)

    if args.dump_tree:
        print(pformat(cnxepub.model_to_tree(binder)), file=sys.stdout)
    if args.output:
        cnxepub.adapters.make_epub(binder, args.output)

    if args.input:
        args.output.seek(0)
        zout = ZipFile(args.output, 'a', ZIP_DEFLATED)
        zin = ZipFile(args.input, 'r')
        for res in zin.namelist():
            if res.startswith('resources'):
                zres = zin.open(res)
                zi = zin.getinfo(res)
                zout.writestr(zi, zres.read(), ZIP_DEFLATED)
        zout.close()

    # TODO Check for documents that have no identifier.
    #      These should likely be composite-documents
    #      or the the metadata got wiped out.
    # docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter)
    #         if x.ident_hash is None]

    return 0
Beispiel #46
0
def find_file_path(file_path):
    zip = ZipFile(file_path, 'r')
    if len(zip.read('bibliography.json')) > 0:
        # bibliography is not broken, use this file
        return file_path
    path_parts = file_path.split('/')
    if len(path_parts) < 5 or path_parts[-5] != 'revision':
        # We could not find a working version
        return None
    file_datetime = zip.getinfo('mimetype').date_time
    path_parts.pop()
    path_parts.pop()
    path_parts.pop()
    dir_path = '/'.join(path_parts)
    for file in os.listdir(dir_path):
        file_path = os.path.join(dir_path, file)
        if not os.path.isfile(file_path):
            continue
        zip = ZipFile(file_path, 'r')
        if zip.getinfo('mimetype').date_time == file_datetime:
            return find_file_path(file_path)
    return None
def get_file_in_zipfile(filepath, filename, targetdir):
    try:
        tmpfile = ZipFile(filepath, "r")
    except BZ:
        raise BadZipfile
    try:
        fileinfo = tmpfile.getinfo(filename)
    except KeyError:
        return False
    if fileinfo.file_size == 0:
        return 0
    targetfile = tmpfile.extract(filename, targetdir)
    tmpfile.close()
    return targetfile
Beispiel #48
0
def main():
    pattern = re.compile(r'\d{2,}')
    zf = ZipFile('channel.zip')
    fp = zf.open('readme.txt')
    chain = open('chain.txt', 'w')
    text = fp.read()
    number = pattern.search(text).group(0)
    while True:
        finfo = zf.getinfo(number + '.txt')
        print finfo.comment
        print number
        text = zf.open(finfo).read()
        chain.write(finfo.comment)
        number = pattern.search(text).group(0)
 def _processZiplink(self, subPath, zipFilePath, inFilePath):
     '''
     Reads a link description file and returns a file handler to
     the linked file inside the ZIP archive.
     '''
     # make sure the ZIP file path uses the OS separator
     zipFilePath = normOSPath(zipFilePath)
     # convert the internal ZIP path to OS format in order to use standard path functions
     inFilePath = normOSPath(inFilePath)
     zipFile = ZipFile(zipFilePath)
     # resource internal ZIP path should be in ZIP format
     resPath = normZipPath(join(inFilePath, subPath))
     if resPath in zipFile.NameToInfo:
         return zipFile.open(resPath, 'r'), zipFile.getinfo(resPath).file_size
Beispiel #50
0
class ZipFileSystem(object):
    def __init__(self, zip_file):
        self.zip = ZipFile(zip_file, "r")

    def read(self, rel_path):
        try:
            return self.zip.read(rel_path)
        except KeyError:
            return None

    def modified_at(self, rel_path):
        try:
            zip_info = self.zip.getinfo(rel_path)
            date_time = datetime(*zip_info.date_time)
            return time.mktime(date_time.timetuple())
        except KeyError:
            return None

    def file_size(self, rel_path):
        try:
            zip_info = self.zip.getinfo(rel_path)
            return zip_info.file_size
        except KeyError:
            return None

    @classmethod
    def make_zip_file_with_folder(cls, folder_path, zip_file_path):
        zip_file = ZipFile(zip_file_path, "w", ZIP_DEFLATED)

        for root, dirs, files in os.walk(folder_path):
            for file_name in files:
                abs_path = os.path.join(root, file_name)
                rel_path = unicode(get_rel_path(abs_path, folder_path), sys.getfilesystemencoding())
                zip_file.write(abs_path, rel_path)
                print "add...", rel_path
        zip_file.close()
        print "done!"
Beispiel #51
0
 def unpackFont(cls, data, font):
     is_file_obj = hasattr(data, 'read')
     if (is_file_obj and is_zipfile(data)) or data.startswith("PK".encode('utf-8')):
         z = None
         try:
             z = ZipFile(BytesIO(data) if not is_file_obj else data, 'r')
             data = z.read(z.getinfo(z.infolist()[0].filename))
             z.close()
             return BytesIO(data) if is_file_obj else data
         except Exception as e:
             if z is not None:
                 z.close()
             raise FontError("couldn't unpack %s: %s" % (font, e))
     else:
         return data
def read_file_in_zipfile(filepath, filename):
    try:
        tmpfile = ZipFile(filepath, "r")
    except BZ:
        raise BadZipfile
    try:
        fileinfo = tmpfile.getinfo(filename)
    except KeyError:
        return False
    if fileinfo.file_size == 0:
        return 0
    file_contents = None
    file_contents = tmpfile.read(filename)
    tmpfile.close()
    return file_contents
Beispiel #53
0
def main(argv=None):
    """Parse passed in cooked single HTML."""
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('collated_html', type=argparse.FileType('r'),
                        help='Path to the collated html'
                             ' file (use - for stdin)')
    parser.add_argument('-d', '--dump-tree', action='store_true',
                        help='Print out parsed model tree.')

    parser.add_argument('-o', '--output', type=argparse.FileType('w+'),
                        help='Write out epub of parsed tree.')

    parser.add_argument('-i', '--input', type=argparse.FileType('r'),
                        help='Read and copy resources/ for output epub.')

    args = parser.parse_args(argv)

    if args.input and args.output == sys.stdout:
        raise ValueError('Cannot output to stdout if reading resources')

    from cnxepub.collation import reconstitute
    binder = reconstitute(args.collated_html)

    if args.dump_tree:
        print(pformat(cnxepub.model_to_tree(binder)),
              file=sys.stdout)
    if args.output:
        cnxepub.adapters.make_epub(binder, args.output)

    if args.input:
        args.output.seek(0)
        zout = ZipFile(args.output, 'a', ZIP_DEFLATED)
        zin = ZipFile(args.input, 'r')
        for res in zin.namelist():
            if res.startswith('resources'):
                zres = zin.open(res)
                zi = zin.getinfo(res)
                zout.writestr(zi, zres.read(), ZIP_DEFLATED)
        zout.close()

    # TODO Check for documents that have no identifier.
    #      These should likely be composite-documents
    #      or the the metadata got wiped out.
    # docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter)
    #         if x.ident_hash is None]

    return 0
Beispiel #54
0
    def test_zip_integrity(self):
        for num_files, min_size, max_size in [(1, 0, MB(5)), (2, 0, MB(2.5)), (10, 0, MB(0.5)), (100, 0, MB(0.25))]:
            # (1000, 0, MB(0.001))]:
            zipstream, consumer, producers = yield self.create_zipstream_from_tempfiles(
                num_files, min_size=min_size, max_size=max_size
            )
            z = ZipFile(consumer)
            namelist = z.namelist()
            self.assertEquals(len(namelist), num_files)

            for filename, producer in producers.iteritems():
                self.assertTrue(producer.key() in namelist)
                info = z.getinfo(producer.key())
                self.assertEquals(producer.size(), info.file_size)

                with open(filename, "r") as f:
                    self.assertEquals(binascii.crc32(f.read()) & 0xFFFFFFFF, info.CRC)
Beispiel #55
0
 def replace(self, filename, stream):
   """
   Replaces the content of filename by stream in the archive.
   Creates a new file if filename was not already there.
   """
   try:
     zf = ZipFile(self._document, mode='a', compression=ZIP_DEFLATED)
   except RuntimeError:
     zf = ZipFile(self._document, mode='a')
   try:
     # remove the file first if it exists
     fi = zf.getinfo(filename)
     zf.filelist.remove( fi )
   except KeyError:
     # This is a new file
     pass
   zf.writestr(filename, stream)
   zf.close()
def write_zip_dir_node(file: zipfile.ZipFile, path: str):
    """
    Attempt to write a dummy directory node into the zip file.
    Does nothing if the node already exists.
    """

    if not path.endswith("/"):
        path += "/"

    try:
        info = file.getinfo(path)

    except:
        info = zipfile.ZipInfo()
        info.date_time = (2001, 9, 11, 8, 46, 0)
        info.filename = path

        file.writestr(info, "")
        print(Fore.CYAN + Style.DIM + "Wrote " + path + " zip directory." + Style.RESET_ALL)
Beispiel #57
0
    def zip_dir_munge(self, old, new):
        "add some directory/ entries that vertx's unzipper prefers"
        o = ZipFile(old, 'r')
        orig_contents = o.namelist()
        dirs = {}
        for entry in orig_contents:
            components = entry.split("/")[:-1]
            for i in range(0, len(components)):
                path = "/".join(components[0:i+1]) + "/"
                dirs[path] = path

        n = ZipFile(new, 'w')
        # order them, even
        for d in sorted(dirs.keys()):
                n.writestr(d, '', ZIP_STORED)
        for entry in orig_contents:
            n.writestr(o.getinfo(entry), o.read(entry)) # sluurp

        o.close()
        n.close()
Beispiel #58
0
 def getZipFile(self, zfile, filename):
      """ Gets a file from the Zip archive."""
      mt = self.mimetypes_registry
      f = ZipFile(zfile)
      finfo = f.getinfo(filename)
      fn = split(finfo.filename)[1] # Get the file name
      path = fn.replace('\\', '/')
      fp = path.split('/') # Split the file path into a list
      
      if '' == fn:
          return 'dir', fn, fp, None, None, 0, None
      ftype = mt.lookupExtension(finfo.filename)
      if not ftype:
          major = 'application'
          mimetype = 'application/octet-stream'
      else:
          major =  ftype.major()
          mimetype = ftype.normalized()
      fdata = f.read(filename)
      return 'file', fn, fp, major, mimetype, finfo.file_size, fdata
Beispiel #59
0
    def acquire_resource(self, target_path, format_dict):
        from zipfile import ZipFile

        target_dir = os.path.dirname(target_path)
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        url = self.url(format_dict)

        srtm_online = self._urlopen(url)
        zfh = ZipFile(six.BytesIO(srtm_online.read()), 'r')

        zip_member_path = u'{y}{x}.hgt'.format(**format_dict)
        member = zfh.getinfo(zip_member_path)
        with open(target_path, 'wb') as fh:
            fh.write(zfh.open(member).read())

        srtm_online.close()
        zfh.close()

        return target_path
Beispiel #60
0
def extract_psd(filename):
	zipf = ZipFile(os.path.join(psd_directory, filename), 'r')
	psd_file = {}

	for member in zipf.namelist():
		is_psd = member.endswith('.psd')
		file_size = zipf.getinfo(member).file_size
		if is_psd and (("size" not in psd_file) or (psd_file["size"] < file_size)):
			psd_file["size"] = file_size
			psd_file["name"] = member

	if not psd_file:
		return False

	m_source = zipf.open(psd_file['name'])
	m_target = file(os.path.join(psd_directory, change_ext(filename, 'psd')), 'wb')
	shutil.copyfileobj(m_source, m_target)
	m_source.close()
	m_target.close()
	zipf.close()
	os.unlink(os.path.join(psd_directory, filename))