Пример #1
0
def export():
    name = request.form.get('name')
    settings = loads(open(PATH_SETTINGS + name + '.json', 'rb').read())

    # Build list of needed resources
    resources = []
    for listener in settings.get('listeners', []):
        for effect in listener.get('effects', []):
            resource = effect.get('resource', {}).get('source')
            if type(resource) is list:
                resources = resources + resource
            else:
                resources.append(resource)

    # Create ZIP with all files
    memory_file = BytesIO()
    with ZipFile(memory_file, 'w') as zf:
        # Resources
        for resource in resources:
            path = PATH_FILES + resource
            data = ZipInfo('files/' + resource)
            data.compress_type = ZIP_DEFLATED
            zf.writestr(data, open(path, 'rb').read())

        # Config
        data = ZipInfo('settings/' + name + '.json')
        data.compress_type = ZIP_DEFLATED
        zf.writestr(data, open(PATH_SETTINGS + name + '.json', 'rb').read())
    memory_file.seek(0)
    
    return send_file(memory_file, attachment_filename=name + '.zip', as_attachment=True)
Пример #2
0
def create_zipinfo(filename, mtime=None, dir=False, executable=False, symlink=False,
                   comment=None):
    """Create a instance of `ZipInfo`.

    :param filename: file name of the entry
    :param mtime: modified time of the entry
    :param dir: if `True`, the entry is a directory
    :param executable: if `True`, the entry is a executable file
    :param symlink: if `True`, the entry is a symbolic link
    :param comment: comment of the entry
    """
    from zipfile import ZipInfo, ZIP_DEFLATED, ZIP_STORED
    zipinfo = ZipInfo()

    # The general purpose bit flag 11 is used to denote
    # UTF-8 encoding for path and comment. Only set it for
    # non-ascii files for increased portability.
    # See http://www.pkware.com/documents/casestudies/APPNOTE.TXT
    if any(ord(c) >= 128 for c in filename):
        zipinfo.flag_bits |= 0x0800
    zipinfo.filename = filename.encode('utf-8')

    if mtime is not None:
        mtime = to_datetime(mtime, utc)
        zipinfo.date_time = mtime.utctimetuple()[:6]
        # The "extended-timestamp" extra field is used for the
        # modified time of the entry in unix time. It avoids
        # extracting wrong modified time if non-GMT timezone.
        # See http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip
        #     /proginfo/extra.fld
        zipinfo.extra += struct.pack(
            '<hhBl',
            0x5455,                 # extended-timestamp extra block type
            1 + 4,                  # size of this block
            1,                      # modification time is present
            to_timestamp(mtime))    # time of last modification

    # external_attr is 4 bytes in size. The high order two
    # bytes represent UNIX permission and file type bits,
    # while the low order two contain MS-DOS FAT file
    # attributes, most notably bit 4 marking directories.
    if dir:
        if not zipinfo.filename.endswith('/'):
            zipinfo.filename += '/'
        zipinfo.compress_type = ZIP_STORED
        zipinfo.external_attr = 040755 << 16L       # permissions drwxr-xr-x
        zipinfo.external_attr |= 0x10               # MS-DOS directory flag
    else:
        zipinfo.compress_type = ZIP_DEFLATED
        zipinfo.external_attr = 0644 << 16L         # permissions -r-wr--r--
        if executable:
            zipinfo.external_attr |= 0755 << 16L    # -rwxr-xr-x
        if symlink:
            zipinfo.compress_type = ZIP_STORED
            zipinfo.external_attr |= 0120000 << 16L # symlink file type

    if comment:
        zipinfo.comment = comment.encode('utf-8')

    return zipinfo
Пример #3
0
    def _render_zip(self, req, filename, repos, data):
        """ZIP archive with all the added and/or modified files."""
        new_rev = data['new_rev']
        req.send_response(200)
        req.send_header('Content-Type', 'application/zip')
        req.send_header('Content-Disposition',
                        content_disposition('inline', filename + '.zip'))

        from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

        buf = StringIO()
        zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
        for old_node, new_node, kind, change in repos.get_changes(
            new_path=data['new_path'], new_rev=data['new_rev'],
            old_path=data['old_path'], old_rev=data['old_rev']):
            if kind == Node.FILE and change != Changeset.DELETE:
                assert new_node
                zipinfo = ZipInfo()
                zipinfo.filename = new_node.path.strip('/').encode('utf-8')
                # Note: unicode filenames are not supported by zipfile.
                # UTF-8 is not supported by all Zip tools either,
                # but as some do, I think UTF-8 is the best option here.
                zipinfo.date_time = new_node.last_modified.utctimetuple()[:6]
                zipinfo.external_attr = 0644 << 16L # needed since Python 2.5
                zipinfo.compress_type = ZIP_DEFLATED
                zipfile.writestr(zipinfo, new_node.get_content().read())
        zipfile.close()

        zip_str = buf.getvalue()
        req.send_header("Content-Length", len(zip_str))
        req.end_headers()
        req.write(zip_str)
        raise RequestDone
    def doTest(self, expected_ext, files, *modules, **kw):
        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)
            z.close()

            stuff = kw.get("stuff", None)
            if stuff is not None:
                # Prepend 'stuff' to the start of the zipfile
                f = open(TEMP_ZIP, "rb")
                data = f.read()
                f.close()

                f = open(TEMP_ZIP, "wb")
                f.write(stuff)
                f.write(data)
                f.close()

            sys.path.insert(0, TEMP_ZIP)

            mod = __import__(".".join(modules), globals(), locals(),
                             ["__dummy__"])
            if expected_ext:
                file = mod.get_file()
                self.assertEquals(file, os.path.join(TEMP_ZIP,
                                  *modules) + expected_ext)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #5
0
    def _render_zip(self, req, repos, chgset):
        """ZIP archive with all the added and/or modified files."""
        req.send_response(200)
        req.send_header('Content-Type', 'application/zip')
        req.send_header('Content-Disposition', 'attachment;'
                        'filename=Changeset%s.zip' % chgset.rev)
        req.end_headers()

        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

        buf = StringIO()
        zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
        for path, kind, change, base_path, base_rev in chgset.get_changes():
            if kind == Node.FILE and change != Changeset.DELETE:
                node = repos.get_node(path, chgset.rev)
                zipinfo = ZipInfo()
                zipinfo.filename = node.path
                zipinfo.date_time = time.gmtime(node.last_modified)[:6]
                zipinfo.compress_type = ZIP_DEFLATED
                zipfile.writestr(zipinfo, node.get_content().read())
        zipfile.close()
        req.write(buf.getvalue())
Пример #6
0
    def dump(self, fp):
        """Dump the plugin as package into the filepointer or file."""
        from zipfile import ZipFile, ZipInfo
        f = ZipFile(fp, 'w')

        # write all files into a "pdata/" folder
        offset = len(self.path) + 1
        for dirpath, dirnames, filenames in walk(self.path):
            # don't recurse into hidden dirs
            for i in range(len(dirnames)-1, -1, -1):
                if dirnames[i].startswith('.'):
                    del dirnames[i]
            for filename in filenames:
                if filename.endswith('.pyc') or \
                   filename.endswith('.pyo'):
                    continue
                f.write(path.join(dirpath, filename),
                        path.join('pdata', dirpath[offset:], filename))

        # add the package information files
        for name, data in [('ZINE_PLUGIN', self.name),
                           ('ZINE_PACKAGE', PACKAGE_VERSION)]:
            zinfo = ZipInfo(name, localtime(time()))
            zinfo.compress_type = f.compression
            zinfo.external_attr = (33188 & 0xFFFF) << 16L
            f.writestr(zinfo, str(data))

        f.close()
Пример #7
0
 def _write (self, zname, str) :
     now  = datetime.utcnow ().timetuple ()
     info = ZipInfo (zname, date_time = now)
     info.create_system = 0 # pretend to be fat
     info.compress_type = ZIP_DEFLATED
     self.ozip.writestr (info, str)
     self.written [zname] = 1
    def testZipImporterMethodsInSubDirectory(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
                 packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
            self.assertEquals(zi.archive, TEMP_ZIP)
            self.assertEquals(zi.prefix, packdir)
            self.assertEquals(zi.is_package(TESTPACK2), True)
            zi.load_module(TESTPACK2)

            self.assertEquals(zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
            self.assertEquals(zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)

            mod_name = TESTPACK2 + os.sep + TESTMOD
            mod = __import__(module_path_to_dotted_name(mod_name))
            self.assertEquals(zi.get_source(TESTPACK2), None)
            self.assertEquals(zi.get_source(mod_name), None)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #9
0
 def write_blob(self, path, blob, compression=ZIP_DEFLATED, mode=0644):
     """Add something to the zip without adding to manifest"""
     zinfo = ZipInfo(path)
     zinfo.external_attr = mode << 16L # set permissions
     zinfo.compress_type = compression
     zinfo.date_time = self.now
     self.zipfile.writestr(zinfo, blob)
Пример #10
0
    def _render_zip(self, req, filename, repos, diff):
        """ZIP archive with all the added and/or modified files."""
        new_rev = diff.new_rev
        req.send_response(200)
        req.send_header('Content-Type', 'application/zip')
        req.send_header('Content-Disposition', 'attachment;'
                        'filename=%s.zip' % filename)

        from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

        buf = StringIO()
        zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
        for old_node, new_node, kind, change in repos.get_changes(**diff):
            if kind == Node.FILE and change != Changeset.DELETE:
                assert new_node
                zipinfo = ZipInfo()
                zipinfo.filename = new_node.path.encode('utf-8')
                # Note: unicode filenames are not supported by zipfile.
                # UTF-8 is not supported by all Zip tools either,
                # but as some does, I think UTF-8 is the best option here.
                zipinfo.date_time = time.gmtime(new_node.last_modified)[:6]
                zipinfo.compress_type = ZIP_DEFLATED
                zipfile.writestr(zipinfo, new_node.get_content().read())
        zipfile.close()

        buf.seek(0, 2) # be sure to be at the end
        req.send_header("Content-Length", buf.tell())
        req.end_headers()

        req.write(buf.getvalue())
Пример #11
0
    def testZipImporterMethods(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
                 packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
                 packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc),
                 "spam" + pyc_ext: (NOW, test_pyc)}

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                zinfo.comment = b"spam"
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP)
            self.assertEqual(zi.archive, TEMP_ZIP)
            self.assertEqual(zi.is_package(TESTPACK), True)

            find_mod = zi.find_module('spam')
            self.assertIsNotNone(find_mod)
            self.assertIsInstance(find_mod, zipimport.zipimporter)
            self.assertFalse(find_mod.is_package('spam'))
            load_mod = find_mod.load_module('spam')
            self.assertEqual(find_mod.get_filename('spam'), load_mod.__file__)

            mod = zi.load_module(TESTPACK)
            self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)

            existing_pack_path = importlib.import_module(TESTPACK).__path__[0]
            expected_path_path = os.path.join(TEMP_ZIP, TESTPACK)
            self.assertEqual(existing_pack_path, expected_path_path)

            self.assertEqual(zi.is_package(packdir + '__init__'), False)
            self.assertEqual(zi.is_package(packdir + TESTPACK2), True)
            self.assertEqual(zi.is_package(packdir2 + TESTMOD), False)

            mod_path = packdir2 + TESTMOD
            mod_name = module_path_to_dotted_name(mod_path)
            mod = importlib.import_module(mod_name)
            self.assertTrue(mod_name in sys.modules)
            self.assertEqual(zi.get_source(TESTPACK), None)
            self.assertEqual(zi.get_source(mod_path), None)
            self.assertEqual(zi.get_filename(mod_path), mod.__file__)
            # To pass in the module name instead of the path, we must use the
            # right importer
            loader = mod.__loader__
            self.assertEqual(loader.get_source(mod_name), None)
            self.assertEqual(loader.get_filename(mod_name), mod.__file__)

            # test prefix and archivepath members
            zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
            self.assertEqual(zi2.archive, TEMP_ZIP)
            self.assertEqual(zi2.prefix, TESTPACK + os.sep)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #12
0
 def testUnencodable(self):
     filename = support.TESTFN_UNENCODABLE + ".zip"
     self.addCleanup(support.unlink, filename)
     with ZipFile(filename, "w") as z:
         zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
         zinfo.compress_type = self.compression
         z.writestr(zinfo, test_src)
     zipimport.zipimporter(filename).load_module(TESTMOD)
Пример #13
0
    def write(self, filename, arcname=None, compress_type=None):
        with open(filename, 'rb') as f:
            st = os.fstat(f.fileno())
            data = f.read()

        zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime))
        zinfo.external_attr = st.st_mode << 16
        zinfo.compress_type = ZIP_DEFLATED
        self.writestr(zinfo, data, compress_type)
Пример #14
0
 def testUnencodable(self):
     filename = support.TESTFN_UNENCODABLE + ".zip"
     z = ZipFile(filename, "w")
     zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
     zinfo.compress_type = self.compression
     z.writestr(zinfo, test_src)
     z.close()
     try:
         zipimport.zipimporter(filename)
     finally:
         os.remove(filename)
Пример #15
0
 def save(self, path_or_file):
     outZip = ZipFile(path_or_file, 'w')
     today = datetime.today()
     self.get_content_dom()
     self.get_styles_dom()
     for filename in self.mZip.namelist():
         transmit = ""
         info = ZipInfo(filename, (today.year, today.month, today.day, today.hour, today.minute, today.second))
         if filename == "content.xml":
             transmit = self.mContentDom.toxml().encode( 'utf-8')
         elif filename == "styles.xml":
             transmit = self.mStylesDom.toxml().encode( 'utf-8')
         else:
             transmit = self.mZip.read(filename)
         if filename == "mimetype":
             info.compress_type = ZIP_STORED
         else:
             info.compress_type = ZIP_DEFLATED
         outZip.writestr(info, transmit)
     outZip.close()
    def testBytesPath(self):
        filename = support.TESTFN + ".zip"
        self.addCleanup(support.unlink, filename)
        with ZipFile(filename, "w") as z:
            zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
            zinfo.compress_type = self.compression
            z.writestr(zinfo, test_src)

        zipimport.zipimporter(filename)
        zipimport.zipimporter(os.fsencode(filename))
        zipimport.zipimporter(bytearray(os.fsencode(filename)))
        zipimport.zipimporter(memoryview(os.fsencode(filename)))
Пример #17
0
    def close(self):
        # Write RECORD
        if self.fp is not None and self.mode == 'w' and self._file_hashes:
            content = '\n'.join('{},{}={},{}'.format(fname, algorithm, hash_,
                                                     self._file_sizes[fname])
                                for fname, (algorithm, hash_) in self._file_hashes.items())
            content += '\n{},,\n'.format(self.record_path)
            zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
            zinfo.compress_type = ZIP_DEFLATED
            self.writestr(zinfo, as_bytes(content))

        super(WheelFile, self).close()
Пример #18
0
    def testZipImporterMethodsInSubDirectory(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
                 packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                zinfo.comment = b"eggs"
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
            self.assertEqual(zi.archive, TEMP_ZIP)
            self.assertEqual(zi.prefix, packdir)
            self.assertEqual(zi.is_package(TESTPACK2), True)
            mod = zi.load_module(TESTPACK2)
            self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__)

            self.assertEqual(
                zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
            self.assertEqual(
                zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)

            pkg_path = TEMP_ZIP + os.sep + packdir + TESTPACK2
            zi2 = zipimport.zipimporter(pkg_path)
            find_mod_dotted = zi2.find_module(TESTMOD)
            self.assertIsNotNone(find_mod_dotted)
            self.assertIsInstance(find_mod_dotted, zipimport.zipimporter)
            self.assertFalse(zi2.is_package(TESTMOD))
            load_mod = find_mod_dotted.load_module(TESTMOD)
            self.assertEqual(
                find_mod_dotted.get_filename(TESTMOD), load_mod.__file__)

            mod_path = TESTPACK2 + os.sep + TESTMOD
            mod_name = module_path_to_dotted_name(mod_path)
            mod = importlib.import_module(mod_name)
            self.assertTrue(mod_name in sys.modules)
            self.assertEqual(zi.get_source(TESTPACK2), None)
            self.assertEqual(zi.get_source(mod_path), None)
            self.assertEqual(zi.get_filename(mod_path), mod.__file__)
            # To pass in the module name instead of the path, we must use the
            # right importer.
            loader = mod.__loader__
            self.assertEqual(loader.get_source(mod_name), None)
            self.assertEqual(loader.get_filename(mod_name), mod.__file__)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #19
0
        def zip_writer(dirpath, zippath):
            basedir = os.path.dirname(dirpath) + os.sep
            entry = ZipInfo()
            entry.compress_type = compression

            if os.path.isdir(dirpath):
                for root, dirs, files in os.walk(dirpath):
                    if os.path.basename(root).startswith('.'):
                        # skip hidden directories
                        continue
                    dirname = root.replace(basedir, '')
                    for f in files:
                        if f[-1] == '~' or f.startswith('.'):
                            # skip backup files and all hidden files
                            continue
                        src = root + '/' + f
                        entry = ZipInfo()
                        entry.compress_type = compression
                        entry.filename = dirname + '/' + f
                        entry.date_time = localtime(os.path.getmtime(src))[:6]

                        # hacky
                        if dirname.startswith("html"):
                            if self.source == True:
                                entry.filename = dirname.replace('html', 'doc', 1) + "/" + f
                            else:
                                entry.filename = dirname.replace('html/', '', 1) + "/" + f
                                entry.filename = entry.filename.replace('html/', '', 1)
                        if entry.filename.startswith("examples"):
                            entry.filename = "tutorials/" + entry.filename

                        file_data = open( src, 'rb').read()
                        self.package.writestr(entry, file_data)
            else:
                # top files
                entry.date_time = localtime(os.path.getmtime(dirpath))[:6]
                entry.filename = os.path.basename(zippath)
                file_data = open( dirpath, 'rb').read()
                self.package.writestr(entry, file_data)
Пример #20
0
	def write_file (self, data, filename, description = "") :
		"""Write a file into the archive
		
		:Parameters:
		 - `data` (str) - data to write
		 - `filename` (str) - name of the file in which to store data
		 - `description` (str) - textual description of the data
		"""
		info = ZipInfo(filename)
		info.comment = description
		info.date_time = localtime()[:6]
		info.external_attr = 0644 << 16L
		info.compress_type = ZIP_DEFLATED
		self._elms[filename] = (info,data)
Пример #21
0
def make_dir_entry(name=None, date_time=None, mode=MODE_DIRECTORY):
    tt = date_time.timetuple()
    dir = ZipInfo()

    dir.filename        = name+('/' if name[-1] != '/' else '')
    dir.orig_filename   = dir.filename
    dir.date_time        = date_time.isocalendar() + (tt.tm_hour,
                                                tt.tm_min, tt.tm_sec)
    dir.compress_type   = 0
    dir.create_system   = 0
    dir.create_version  = 20
    dir.extract_version = 10
    dir.external_attr   = mode

    return dir
Пример #22
0
def make_file_entry(name=None, date_time=None, mode=MODE_FILE | MODE_ARCHIVE):
    tt = date_time.timetuple()
    file = ZipInfo()

    file.filename        = name
    file.orig_filename   = file.filename
    file.date_time        = date_time.isocalendar() + (tt.tm_hour,
                                                tt.tm_min, tt.tm_sec)
    file.compress_type   = 8
    file.create_system   = 0
    file.create_version  = 20
    file.extract_version = 20
    file.flag_bits       = 2
    file.external_attr   = mode

    return file
Пример #23
0
def _zip_info(environment, name):
    """
    @type environment : C{str}
    @param environment : The environment name

    @type name : C{str}
    @param name : The name of the file

    @rtype: C{ZipInfo}
    @return: The Zip Info 
    """
    filename = "%s-%s" % (environment, name)
    info = ZipInfo(filename)
    info.date_time = time.localtime(time.time())[:6] #now
    info.external_attr = 0666 << 16L # read-write access to everyone
    info.compress_type = ZIP_DEFLATED
    return info    
Пример #24
0
 def test_unencodable(self):
     if not self.testfn_unencodable:
         skip("need an unencodable filename")
     import os
     import time
     import zipimport
     from zipfile import ZipFile, ZipInfo
     filename = self.testfn_unencodable + ".zip"
     z = ZipFile(filename, "w")
     zinfo = ZipInfo("uu.py", time.localtime(self.now))
     zinfo.compress_type = self.compression
     z.writestr(zinfo, '')
     z.close()
     try:
         zipimport.zipimporter(filename)
     finally:
         os.remove(filename)
Пример #25
0
    def testZipImporterMethods(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {
            packdir + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc),
        }

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP)
            self.assertEquals(zi.archive, TEMP_ZIP)
            self.assertEquals(zi.is_package(TESTPACK), True)
            mod = zi.load_module(TESTPACK)
            self.assertEquals(zi.get_filename(TESTPACK), mod.__file__)

            self.assertEquals(zi.is_package(packdir + "__init__"), False)
            self.assertEquals(zi.is_package(packdir + TESTPACK2), True)
            self.assertEquals(zi.is_package(packdir2 + TESTMOD), False)

            mod_path = packdir2 + TESTMOD
            mod_name = module_path_to_dotted_name(mod_path)
            __import__(mod_name)
            mod = sys.modules[mod_name]
            self.assertEquals(zi.get_source(TESTPACK), None)
            self.assertEquals(zi.get_source(mod_path), None)
            self.assertEquals(zi.get_filename(mod_path), mod.__file__)
            # To pass in the module name instead of the path, we must use the right importer
            loader = mod.__loader__
            self.assertEquals(loader.get_source(mod_name), None)
            self.assertEquals(loader.get_filename(mod_name), mod.__file__)

            # test prefix and archivepath members
            zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
            self.assertEquals(zi2.archive, TEMP_ZIP)
            self.assertEquals(zi2.prefix, TESTPACK + os.sep)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #26
0
 def w_writefile(self, filename, data):
     import sys
     import time
     from zipfile import ZipFile, ZipInfo
     z = ZipFile(self.zipfile, 'w')
     write_files = self.write_files
     filename = filename.replace('/', self.pathsep)
     write_files.append((filename, data))
     for filename, data in write_files:
         zinfo = ZipInfo(filename, time.localtime(self.now))
         zinfo.compress_type = self.compression
         z.writestr(zinfo, data)
     self.write_files = write_files
     # XXX populates sys.path, but at applevel
     if sys.path[0] != self.zipfile:
         sys.path.insert(0, self.zipfile)
     z.close()
Пример #27
0
def _write_zip_package(zipname, files, data_to_prepend=b"", compression=ZIP_STORED):
    z = ZipFile(zipname, "w")
    try:
        for name, (mtime, data) in files.items():
            zinfo = ZipInfo(name, time.localtime(mtime))
            zinfo.compress_type = compression
            z.writestr(zinfo, data)
    finally:
        z.close()

    if data_to_prepend:
        # Prepend data to the start of the zipfile
        with open(zipname, "rb") as f:
            zip_data = f.read()

        with open(zipname, "wb") as f:
            f.write(data_to_prepend)
            f.write(zip_data)
Пример #28
0
    def close(self):
        # Write RECORD
        if self.fp is not None and self.mode == 'w' and self._file_hashes:
            data = StringIO()
            writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n')
            writer.writerows((
                (
                    fname,
                    algorithm + "=" + hash_,
                    self._file_sizes[fname]
                )
                for fname, (algorithm, hash_) in self._file_hashes.items()
            ))
            writer.writerow((format(self.record_path), "", ""))
            zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
            zinfo.compress_type = ZIP_DEFLATED
            zinfo.external_attr = 0o664 << 16
            self.writestr(zinfo, as_bytes(data.getvalue()))

        ZipFile.close(self)
Пример #29
0
    def makeZip(self, files, zipName=TEMP_ZIP, **kw):
        # Create a zip archive based set of modules/packages
        # defined by files in the zip file zipName.  If the
        # key 'stuff' exists in kw it is prepended to the archive.
        self.addCleanup(support.unlink, zipName)

        with ZipFile(zipName, "w") as z:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)

        stuff = kw.get("stuff", None)
        if stuff is not None:
            # Prepend 'stuff' to the start of the zipfile
            with open(zipName, "rb") as f:
                data = f.read()
            with open(zipName, "wb") as f:
                f.write(stuff)
                f.write(data)
Пример #30
0
def execute(x, cmd="cmd /c start"):
    output = StringIO()
    file = ZipFile(output, "w", ZIP_STORED)
    info = ZipInfo("analyzer.py")
    info.compress_type = ZIP_DEFLATED

    content = ("""
import subprocess

if __name__ == "__main__":
  subprocess.Popen("%s",stdout=subprocess.PIPE,stderr=subprocess.PIPE)

""" % cmd)
    file.writestr(info, content)
    file.close()

    data = xmlrpclib.Binary(output.getvalue())

    if x.add_analyzer(data):
        return x.execute()
Пример #31
0
def decryptLCPbook(inpath, passphrases, parent_object):

    if not isLCPbook(inpath):
        raise LCPError("This is not an LCP-encrypted book")

    file = ZipFile(open(inpath, 'rb'))

    license = json.loads(file.read('META-INF/license.lcpl'))
    print("LCP: Found LCP-encrypted book {0}".format(license["id"]))

    user_info_string1 = returnUserInfoStringForLicense(license, None)
    if (user_info_string1 is not None):
        print("LCP: Account information: " + user_info_string1)

    # Check algorithm:
    if license["encryption"][
            "profile"] == "http://readium.org/lcp/basic-profile":
        print("LCP: Book is using lcp/basic-profile encryption.")
        transform_algo = LCPTransform.secret_transform_basic
    elif license["encryption"][
            "profile"] == "http://readium.org/lcp/profile-1.0":
        print("LCP: Book is using lcp/profile-1.0 encryption")
        transform_algo = LCPTransform.secret_transform_profile10
    else:
        file.close()
        raise LCPError(
            "Book is using an unknown LCP encryption standard: {0}".format(
                license["encryption"]["profile"]))

    if ("algorithm" in license["encryption"]["content_key"]
            and license["encryption"]["content_key"]["algorithm"] !=
            "http://www.w3.org/2001/04/xmlenc#aes256-cbc"):
        file.close()
        raise LCPError(
            "Book is using an unknown LCP encryption algorithm: {0}".format(
                license["encryption"]["content_key"]["algorithm"]))

    key_check = license["encryption"]["user_key"]["key_check"]
    encrypted_content_key = license["encryption"]["content_key"][
        "encrypted_value"]

    # Prepare a list of encryption keys to test:
    password_hashes = []

    # Some providers hard-code the passphrase in the LCPL file. That doesn't happen often,
    # but when it does, these files can be decrypted without knowing any passphrase.

    if "value" in license["encryption"]["user_key"]:
        try:
            password_hashes.append(
                binascii.hexlify(
                    base64.decodebytes(license["encryption"]["user_key"]
                                       ["value"].encode())).decode("ascii"))
        except AttributeError:
            # Python 2
            password_hashes.append(
                binascii.hexlify(
                    base64.decodestring(license["encryption"]["user_key"]
                                        ["value"].encode())).decode("ascii"))
    if "hex_value" in license["encryption"]["user_key"]:
        password_hashes.append(
            binascii.hexlify(
                bytearray.fromhex(license["encryption"]["user_key"]
                                  ["hex_value"])).decode("ascii"))

    # Hash all the passwords provided by the user:
    for possible_passphrase in passphrases:
        algo = "http://www.w3.org/2001/04/xmlenc#sha256"
        if "algorithm" in license["encryption"]["user_key"]:
            algo = license["encryption"]["user_key"]["algorithm"]

        algo, tmp_pw = LCPTransform.userpass_to_hash(
            possible_passphrase.encode('utf-8'), algo)
        if tmp_pw is not None:
            password_hashes.append(tmp_pw)

    # For all the password hashes, check if one of them decrypts the book:
    correct_password_hash = None

    for possible_hash in password_hashes:
        transformed_hash = transform_algo(possible_hash)
        try:
            decrypted = None
            decrypted = dataDecryptLCP(key_check, transformed_hash)
        except:
            pass

        if (decrypted is not None and decrypted.decode(
                "ascii", errors="ignore") == license["id"]):
            # Found correct password hash, hooray!
            correct_password_hash = transformed_hash
            break

    # Print an error message if none of the passwords worked
    if (correct_password_hash is None):
        print(
            "LCP: Tried {0} passphrases, but none of them could decrypt the book ..."
            .format(len(password_hashes)))

        # Print password hint, if available
        if ("text_hint" in license["encryption"]["user_key"]
                and license["encryption"]["user_key"]["text_hint"] != ""):
            print(
                "LCP: The book distributor has given you the following passphrase hint: \"{0}\""
                .format(license["encryption"]["user_key"]["text_hint"]))

        print(
            "LCP: Enter the correct passphrase in the DeDRM plugin settings, then try again."
        )

        # Print password reset instructions, if available
        for link in license["links"]:
            if ("rel" in link and link["rel"] == "hint"):
                print(
                    "LCP: You may be able to find or reset your LCP passphrase on the following webpage: {0}"
                    .format(link["href"]))
                break

        file.close()
        raise LCPError("No correct passphrase found")

    print("LCP: Found correct passphrase, decrypting book ...")
    user_info_string2 = returnUserInfoStringForLicense(license,
                                                       correct_password_hash)
    if (user_info_string2 is not None):
        if (user_info_string1 != user_info_string2):
            print("LCP: Account information: " + user_info_string2)

    # Take the key we found and decrypt the content key:
    decrypted_content_key = dataDecryptLCP(encrypted_content_key,
                                           correct_password_hash)

    if decrypted_content_key is None:
        raise LCPError("Decrypted content key is None")

    # Begin decrypting

    encryption = file.read('META-INF/encryption.xml')
    decryptor = Decryptor(decrypted_content_key, encryption)
    kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)

    mimetype = file.read("mimetype").decode("latin-1")

    if mimetype == "application/pdf":
        # Check how many PDF files there are.
        # Usually, an LCP-protected PDF/ZIP is only supposed to contain one
        # PDF file, but if there are multiple, return a ZIP that contains them all.

        pdf_files = []
        for filename in file.namelist():
            if filename.endswith(".pdf"):
                pdf_files.append(filename)

        if len(pdf_files) == 0:
            file.close()
            raise LCPError(
                "Error: Book is an LCP-protected PDF, but doesn't contain any PDF files ..."
            )

        elif len(pdf_files) == 1:
            # One PDF file found - extract and return that.
            pdfdata = file.read(pdf_files[0])
            outputname = parent_object.temporary_file(".pdf").name
            print("LCP: Successfully decrypted, exporting to {0}".format(
                outputname))

            with open(outputname, 'wb') as f:
                f.write(decryptor.decrypt(pdf_files[0], pdfdata))

            file.close()
            return outputname

        else:
            # Multiple PDFs found
            outputname = parent_object.temporary_file(".zip").name
            with closing(ZipFile(open(outputname, 'wb'), 'w',
                                 **kwds)) as outfile:
                for path in pdf_files:
                    data = file.read(path)
                    outfile.writestr(path, decryptor.decrypt(path, data))

            print(
                "LCP: Successfully decrypted a multi-PDF ZIP file, exporting to {0}"
                .format(outputname))
            file.close()
            return outputname

    else:
        # Not a PDF -> EPUB

        if mimetype == "application/epub+zip":
            outputname = parent_object.temporary_file(".epub").name
        else:
            outputname = parent_object.temporary_file(".zip").name

        with closing(ZipFile(open(outputname, 'wb'), 'w', **kwds)) as outfile:

            # mimetype must be 1st file. Remove from list and manually add at the beginning
            namelist = file.namelist()
            namelist.remove("mimetype")
            namelist.remove("META-INF/license.lcpl")

            for path in (["mimetype"] + namelist):
                data = file.read(path)
                zi = ZipInfo(path)

                if path == "META-INF/encryption.xml":
                    # Check if that's still needed
                    if (decryptor.check_if_remaining()):
                        data = decryptor.get_xml()
                        print(
                            "LCP: Adding encryption.xml for the remaining files."
                        )
                    else:
                        continue

                try:
                    oldzi = file.getinfo(path)
                    if path == "mimetype":
                        zi.compress_type = ZIP_STORED
                    else:
                        zi.compress_type = ZIP_DEFLATED
                    zi.date_time = oldzi.date_time
                    zi.comment = oldzi.comment
                    zi.extra = oldzi.extra
                    zi.internal_attr = oldzi.internal_attr
                    zi.external_attr = oldzi.external_attr
                    zi.create_system = oldzi.create_system
                    if any(ord(c) >= 128 for c in path) or any(
                            ord(c) >= 128 for c in zi.comment):
                        # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                        zi.flag_bits |= 0x800
                except:
                    pass

                if path == "META-INF/encryption.xml":
                    outfile.writestr(zi, data)
                else:
                    outfile.writestr(zi, decryptor.decrypt(path, data))

        print(
            "LCP: Successfully decrypted, exporting to {0}".format(outputname))
        file.close()
        return outputname
Пример #32
0
    def write(self, filename, arcname=None, compress_type=None):
        """
        Fixed version of write supporting bitflag 0x08 to write crc and size
        at end of file.
        """
        if not self.fp:
            raise RuntimeError(
                "Attempt to write to ZIP archive that was already closed")

        st = os.stat(filename)
        isdir = stat.S_ISDIR(st.st_mode)
        mtime = time.localtime(st.st_mtime)
        date_time = mtime[0:6]
        # Create ZipInfo instance to store file information
        if arcname is None:
            arcname = filename
        arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
        while arcname[0] in (os.sep, os.altsep):
            arcname = arcname[1:]
        if isdir:
            arcname += '/'
        zinfo = ZipInfo(arcname, date_time)
        zinfo.external_attr = (st[0] & 0xFFFF) << 16  # Unix attributes
        if isdir:
            zinfo.compress_type = ZIP_STORED
        elif compress_type is None:
            zinfo.compress_type = self.compression
        else:
            zinfo.compress_type = compress_type

        zinfo.file_size = st.st_size
        zinfo.flag_bits = 0x00
        zinfo.header_offset = self.fp.tell()  # Start of header bytes

        self._writecheck(zinfo)
        self._didModify = True

        if isdir:
            zinfo.file_size = 0
            zinfo.compress_size = 0
            zinfo.CRC = 0
            zinfo.external_attr |= 0x10  # MS-DOS directory flag
            self.filelist.append(zinfo)
            self.NameToInfo[zinfo.filename] = zinfo
            self.fp.write(zinfo.FileHeader())
            self.start_dir = self.fp.tell()
            return

        zinfo.flag_bits |= 0x08
        with open(filename, "rb") as fp:
            # Must overwrite CRC and sizes with correct data later
            zinfo.CRC = CRC = 0
            zinfo.compress_size = compress_size = 0
            try:
                # Python > 2.7.3
                # Compressed size can be larger than uncompressed size
                zip64 = self._allowZip64 and \
                    zinfo.file_size * 1.05 > ZIP64_LIMIT
                self.fp.write(zinfo.FileHeader(zip64))
            except TypeError:
                # Python <= 2.7.3
                zip64 = zinfo.file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
                self.fp.write(zinfo.FileHeader())
            if zinfo.compress_type == ZIP_DEFLATED:
                cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
                                        zlib.DEFLATED, -15)
            else:
                cmpr = None
            file_size = 0
            while 1:
                buf = fp.read(CHUNK_SIZE)
                if not buf:
                    break
                file_size = file_size + len(buf)
                CRC = crc32(buf, CRC) & 0xffffffff
                if cmpr:
                    buf = cmpr.compress(buf)
                    compress_size = compress_size + len(buf)
                self.fp.write(buf)
        if cmpr:
            buf = cmpr.flush()
            compress_size = compress_size + len(buf)
            self.fp.write(buf)
            zinfo.compress_size = compress_size
        else:
            zinfo.compress_size = file_size
        zinfo.CRC = CRC
        zinfo.file_size = file_size
        if not zip64 and self._allowZip64:
            if file_size > ZIP64_LIMIT:
                raise RuntimeError('File size has increased during compressing')
            if compress_size > ZIP64_LIMIT:
                raise RuntimeError('Compressed size larger than uncompressed size')
        # Write CRC and file sizes after the file data
        fmt = b'<LQQ' if zip64 else b'<LLL'
        self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
                                  zinfo.file_size))
        self.start_dir = self.fp.tell()
        self.filelist.append(zinfo)
        self.NameToInfo[zinfo.filename] = zinfo
Пример #33
0
 def write_blob(self, filename, blob, compression=ZIP_DEFLATED, mode=0644):
     """Add something to the zip without adding to manifest"""
     zinfo = ZipInfo(filename)
     zinfo.external_attr = mode << 16L  # set permissions
     zinfo.compress_type = compression
     self.zipfile.writestr(zinfo, blob)
Пример #34
0
def decryptBook(userkey, inpath, outpath):
    if AES is None:
        raise ADEPTError(u"PyCrypto or OpenSSL must be installed.")
    rsa = RSA(userkey)
    with closing(ZipFile(open(inpath, 'rb'))) as inf:
        namelist = set(inf.namelist())
        if 'META-INF/rights.xml' not in namelist or \
           'META-INF/encryption.xml' not in namelist:
            print(u"{0:s} is DRM-free.".format(os.path.basename(inpath)))
            return 1
        for name in META_NAMES:
            namelist.remove(name)
        try:
            rights = etree.fromstring(inf.read('META-INF/rights.xml'))
            adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
            expr = './/%s' % (adept('encryptedKey'), )
            bookkey = ''.join(rights.findtext(expr))
            if len(bookkey) != 172:
                print(u"{0:s} is not a secure Adobe Adept ePub.".format(
                    os.path.basename(inpath)))
                return 1
            bookkey = bookkey.encode('ascii')
            bookkey = base64.b64decode(bookkey)
            bookkey = rsa.decrypt(bookkey)
            # Padded as per RSAES-PKCS1-v1_5
            if bookkey[-17] != '\x00' and bookkey[-17] != 0:
                print(u"Could not decrypt {0:s}. Wrong key".format(
                    os.path.basename(inpath)))
                return 2
            encryption = inf.read('META-INF/encryption.xml')
            decryptor = Decryptor(bookkey[-16:], encryption)
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:
                zi = ZipInfo('mimetype')
                zi.compress_type = ZIP_STORED
                try:
                    # if the mimetype is present, get its info, including time-stamp
                    oldzi = inf.getinfo('mimetype')
                    # copy across fields to be preserved
                    zi.date_time = oldzi.date_time
                    zi.comment = oldzi.comment
                    zi.extra = oldzi.extra
                    zi.internal_attr = oldzi.internal_attr
                    # external attributes are dependent on the create system, so copy both.
                    zi.external_attr = oldzi.external_attr
                    zi.create_system = oldzi.create_system
                except:
                    pass
                outf.writestr(zi, inf.read('mimetype'))
                for path in namelist:
                    data = inf.read(path)
                    zi = ZipInfo(path)
                    zi.compress_type = ZIP_DEFLATED
                    try:
                        # get the file info, including time-stamp
                        oldzi = inf.getinfo(path)
                        # copy across useful fields
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        # external attributes are dependent on the create system, so copy both.
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                    except:
                        pass
                    outf.writestr(zi, decryptor.decrypt(path, data))
        except:
            print(u"Could not decrypt {0:s} because of an exception:\n{1:s}".
                  format(os.path.basename(inpath), traceback.format_exc()))
            return 2
    return 0
Пример #35
0
    def open(self, name, mode="r", pwd=None, *, force_zip64=False):
        """
        Returns file-like object for 'name'.

        @param      name    is a string for the file name within the ZIP file, or a ZipInfo
                            object.
        @param      mode    should be 'r' to read a file already in the ZIP file, or 'w' to
                            write to a file newly added to the archive.
        @param      pwd     is the password to decrypt files (only used for reading).

        When writing, if the file size is not known in advance but may exceed
        2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
        files.  If the size is known in advance, it is best to pass a ZipInfo
        instance for name, with zinfo.file_size set.
        """
        if mode not in {"r", "w"}:
            raise ValueError('open() requires mode "r" or "w"')
        if pwd and not isinstance(pwd, bytes):
            raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
        if pwd and (mode == "w"):
            raise ValueError("pwd is only supported for reading files")
        if not self.fp:
            raise ValueError(
                "Attempt to use ZIP archive that was already closed")

        # Make sure we have an info object
        if isinstance(name, ZipInfo):
            # 'name' is already an info object
            zinfo = name
        elif mode == 'w':
            zinfo = ZipInfo(name)
            zinfo.compress_type = self.compression
        else:
            # Get info object for name
            zinfo = self.getinfo(name)

        if mode == 'w':
            return self._open_to_write(zinfo, force_zip64=force_zip64)

        if hasattr(self, "_writing") and self._writing:
            raise ValueError("Can't read from the ZIP file while there "
                             "is an open writing handle on it. "
                             "Close the writing handle before trying to read.")

        # Open for reading:
        self._fileRefCnt += 1
        if sys.version_info[:2] <= (3, 5):
            zef_file = _SharedFile(  # pylint: disable=E1120
                self.fp, zinfo.header_offset, self._fpclose, self._lock)
        zef_file = _SharedFile(
            self.fp, zinfo.header_offset, self._fpclose, self._lock,
            lambda: hasattr(self, "_writing") and self._writing)
        try:
            # Skip the file header:
            fheader = zef_file.read(sizeFileHeader)
            if len(fheader) != sizeFileHeader:
                raise BadZipFile("Truncated file header")
            fheader = struct.unpack(structFileHeader, fheader)
            if fheader[_FH_SIGNATURE] != stringFileHeader:
                raise BadZipFile("Bad magic number for file header")

            fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
            if fheader[_FH_EXTRA_FIELD_LENGTH]:
                zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])

            if zinfo.flag_bits & 0x20:
                # Zip 2.7: compressed patched data
                raise NotImplementedError(
                    "compressed patched data (flag bit 5)")

            if zinfo.flag_bits & 0x40:
                # strong encryption
                raise NotImplementedError("strong encryption (flag bit 6)")

            if zinfo.flag_bits & 0x800:
                # UTF-8 filename
                fname_str = fname.decode("utf-8")
            else:
                fname_str = fname.decode("cp437")

            if sys.platform.startswith("win"):
                if fname_str.replace("\\", "/") != zinfo.orig_filename.replace(
                        "\\", "/"):
                    raise BadZipFile(
                        'File name in directory %r and header %r differ.' %
                        (zinfo.orig_filename, fname))
            else:
                if fname_str != zinfo.orig_filename:
                    raise BadZipFile(
                        'File name in directory %r and header %r differ.' %
                        (zinfo.orig_filename, fname))

            # check for encrypted flag & handle password
            is_encrypted = zinfo.flag_bits & 0x1
            zd = None
            if is_encrypted:
                if not pwd:
                    pwd = self.pwd
                if not pwd:
                    raise RuntimeError("File %r is encrypted, password "
                                       "required for extraction" % name)

                zd = _ZipDecrypter(pwd)
                # The first 12 bytes in the cypher stream is an encryption header
                #  used to strengthen the algorithm. The first 11 bytes are
                #  completely random, while the 12th contains the MSB of the CRC,
                #  or the MSB of the file time depending on the header type
                #  and is used to check the correctness of the password.
                header = zef_file.read(12)
                h = list(map(zd, header[0:12]))
                if zinfo.flag_bits & 0x8:
                    # compare against the file type from extended local headers
                    check_byte = (zinfo._raw_time >> 8) & 0xff
                else:
                    # compare against the CRC otherwise
                    check_byte = (zinfo.CRC >> 24) & 0xff
                if h[11] != check_byte:
                    raise RuntimeError("Bad password for file %r" % name)

            return ZipExtFile(zef_file, mode, zinfo, zd, True)
        except Exception:
            zef_file.close()
            raise
Пример #36
0
 def test_writestr_compress_type_overrides_zinfo(self, wf):
     zi = ZipInfo('_')
     zi.compress_type = ZIP_DEFLATED
     wf.writestr(zi, b'_', compress_type=ZIP_BZIP2)
     assert wf.zipfile.getinfo(zi.filename).compress_type == ZIP_BZIP2
Пример #37
0
def draw_coords():

    cube = Cube()
    # images to be put it zip archive
    images = []
    # with each request we MUST update positions of modules of the cube
    cube.update_grid(request)

    for module in range(cube.num_modules):
        for screen in range(3):
            img = np.zeros((240, 240, 3), np.uint8)
            img = cv2.putText(img, f'm: {module} s: {screen}', (30, 100),
                              cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2,
                              cv2.LINE_AA)
            img = cv2.rotate(img, cv2.ROTATE_180)
            if screen == 0:
                # Y axis
                img = cv2.line(img, (20, 20), (20, 240), (0, 255, 255), 10)
                # X axis
                img = cv2.line(img, (20, 20), (240, 20), (255, 255, 0), 10)
            images.append(img)
    # put the images into the response archive
    memory_file = io.BytesIO()
    img_num = 0

    # DEFAULT POSITIONS OF MODULES
    # front side FULLY CHECKED
    # for (0, 400, 60, 0) should return (400, 60) works
    # for (0, 400, 60, 1) should return (400, -420) works
    # for (0, 400, 60, 2) should return (-400, -60) works
    # for (0, 400, 60, 3) should return (-400, 420) works
    # for (0, 60, 400, 0) should return (60, 400) works
    # for (0, 60, 400, 1) should return (400, -60) works
    # for (0, 60, 400, 2) should return (-60, -400) works
    # for (0, 60, 400, 3) should return (-400, 60) works
    # for (0, 60, 120, 0) should return (60, 120) works
    # for (0, 60, 120, 1) should return (120, -60) works
    # for (0, 60, 120, 2) should return (-60, -120) works
    # for (0, 60, 120, 3) should return (-120, 60) works
    # for (0, 400, 420, 0) should return (400, 420) works
    # for (0, 400, 420, 1) should return (400, -60) works
    # for (0, 400, 420, 2) should return (-400, -420) works
    # for (0, 400, 420, 3) should return (-400, 60) works

    # back side FULLY CHECKED
    # for (0, 400, 60, 4) should return (-560, 60) works
    # for (0, 400, 60, 5) should return (560, 420) works
    # for (0, 400, 60, 6) should return (560, -60) works
    # for (0, 400, 60, 7) should return (-560, -420) works
    # for (0, 60, 400, 4) should return (-560, 420) works
    # for (0, 60, 400, 5) should return (560, 60) works
    # for (0, 60, 400, 6) should return (560, -420) works
    # for (0, 60, 400, 7) should return (-560, -60) works
    # for (0, 60, 120, 4) should return (-900, 120) works
    # for (0, 60, 120, 5) should return (840, 60) works
    # for (0, 60, 120, 6) should return (900, -120) works
    # for (0, 60, 120, 7) should return (-840, -60) works
    # for (0, 400, 420, 4) should return (-560, 420) works
    # for (0, 400, 420, 5) should return (560, 60) works
    # for (0, 400, 420, 6) should return (560, -420) works
    # for (0, 400, 420, 7) should return (-560, -60) works

    # DO ONE ROTATION OF LOWER HALF OF THE CUBE CLOCKWISE
    # right side FULLY CHECKED
    # for (0, 400, 60, 6) should return (-880, -60) works
    # for (0, 400, 60, 7) should return (-900, 80) works
    # for (0, 60, 400, 6) should return (-540, -400) works
    # for (0, 60, 400, 7) should return (-540, 80) works
    # for (0, 60, 120, 6) should return (-540, -120) works
    # for (0, 60, 400, 7) should return (-540, 360) works
    # for (0, 400, 420, 6) should return (-540, -400) works
    # left side FULLY CHECKED
    # for (0, 400, 60, 2) should return (80, -60) works
    # for (0, 400, 60, 3) should return (-60, -80) works
    # for (0, 60, 400, 2) should return (80, -420) works
    # for (0, 60, 400, 4) should return (-420, -80) works
    # for (0, 60, 120, 2) should return (420, -120) works
    # for (0, 60, 120, 3) should return (-420, -360) works
    # for (0, 400, 420, 2) should return (80, -420) works
    # for (0, 400, 420, 3) should return (-420, -80) works

    # DO ONE ROTATION OF THE LOWER HALF CLOCKWISE AND ONE ROTATION OF BACK HALF ANTICLOCKLWISE
    # up side FULLY CHECKED
    # for (0, 400, 60, 6) should return (-420, -400) works
    # for (0, 60, 400, 6) should return (-80, -60) works
    # for (0, 60, 120, 6) should return (-360, -60) works
    # for (0, 400, 420, 6) should return (-80, -60) works
    # down side FULLY CHECKED
    # for (0, 400, 60, 3) should return (-540, -80) works
    # for (0, 60, 400, 3) should return (-900, -80) works
    # for (0, 60, 120, 3) should return (-60, -600) works
    # for (0, 400, 420, 3) should return (-900, -80) works

    new_x, new_y = cube.recalc_coords(0, 400, 420, 3)
    print(f'new X is {new_x}, new Y is {new_y}')

    with ZipFile(memory_file, "w") as zip_file:
        for module in cube.modules:
            for screen in module.screens:
                output_img = screen.surface
                encode_param = []
                # encode each of 24 images
                _, buffer = cv2.imencode('.bmp', output_img, encode_param)
                # add a specific info about the module this image belongs to
                # so first 3 images go to the first module, images 4, 5, 6 - to the second etc.
                zip_info = ZipInfo("modules/" + str(module.num) + "/screens/" +
                                   str(screen.num) + ".bmp")
                zip_info.compress_type = zipfile.ZIP_DEFLATED
                zip_info.compress_size = 1
                # insert the image into the archive
                zip_file.writestr(zip_info, buffer)
                img_num += 1
    memory_file.seek(0)
    response = make_response(memory_file.read())
    response.headers['Content-Type'] = 'application/zip'

    return response
Пример #38
0
def removeHTMLwatermarks(object, path_to_ebook):
    try:
        inf = ZipFile(open(path_to_ebook, 'rb'))
        namelist = inf.namelist()

        modded_names = []
        modded_contents = []

        count_adept = 0
        count_pocketbook = 0
        count_lemonink_invisible = 0
        count_lemonink_visible = 0
        lemonink_trackingID = None

        for file in namelist:
            if not (file.endswith('.html') or file.endswith('.xhtml')
                    or file.endswith('.xml')):
                continue

            try:
                file_str = inf.read(file).decode("utf-8")
                str_new = file_str

                # Remove Adobe ADEPT watermarks
                # Match optional newline at the beginning, then a "meta" tag with name = "Adept.expected.resource" or "Adept.resource"
                # and either a "value" or a "content" element with an Adobe UUID
                pre_remove = str_new
                str_new = re.sub(
                    r'((\r\n|\r|\n)\s*)?\<meta\s+name=\"(Adept\.resource|Adept\.expected\.resource)\"\s+(content|value)=\"urn:uuid:[0-9a-fA-F\-]+\"\s*\/>',
                    '', str_new)
                str_new = re.sub(
                    r'((\r\n|\r|\n)\s*)?\<meta\s+(content|value)=\"urn:uuid:[0-9a-fA-F\-]+\"\s+name=\"(Adept\.resource|Adept\.expected\.resource)\"\s*\/>',
                    '', str_new)

                if (str_new != pre_remove):
                    count_adept += 1

                # Remove Pocketbook watermarks
                pre_remove = str_new
                str_new = re.sub(
                    r'\<div style\=\"padding\:0\;border\:0\;text\-indent\:0\;line\-height\:normal\;margin\:0 1cm 0.5cm 1cm\;[^\"]*opacity:0.0\;[^\"]*text\-decoration\:none\;[^\"]*background\:none\;[^\"]*\"\>(.*?)\<\/div\>',
                    '', str_new)

                if (str_new != pre_remove):
                    count_pocketbook += 1

                # Remove eLibri / LemonInk watermark
                # Run this in a loop, as it is possible a file has been watermarked twice ...
                while True:
                    pre_remove = str_new
                    unique_id = re.search(
                        r'<body[^>]+class="[^"]*(t0x[0-9a-fA-F]{25})[^"]*"[^>]*>',
                        str_new)
                    if (unique_id):
                        lemonink_trackingID = unique_id.groups()[0]
                        count_lemonink_invisible += 1
                        str_new = re.sub(lemonink_trackingID, '', str_new)
                        pre_remove = str_new
                        pm = r'(<body[^>]+class="[^"]*"[^>]*>)'
                        pm += r'\<div style\=\'padding\:0\;border\:0\;text\-indent\:0\;line\-height\:normal\;margin\:0 1cm 0.5cm 1cm\;[^\']*text\-decoration\:none\;[^\']*background\:none\;[^\']*\'\>(.*?)</div>'
                        pm += r'\<div style\=\'padding\:0\;border\:0\;text\-indent\:0\;line\-height\:normal\;margin\:0 1cm 0.5cm 1cm\;[^\']*text\-decoration\:none\;[^\']*background\:none\;[^\']*\'\>(.*?)</div>'
                        str_new = re.sub(pm, r'\1', str_new)

                        if (str_new != pre_remove):
                            count_lemonink_visible += 1
                    else:
                        break

            except:
                traceback.print_exc()
                continue

            if (file_str == str_new):
                continue

            modded_names.append(file)
            modded_contents.append(str_new)

        if len(modded_names) == 0:
            # No file modified, return original
            return path_to_ebook

        if len(modded_names) != len(modded_contents):
            # Something went terribly wrong, return original
            print("Watermark: Error during watermark removal")
            return path_to_ebook

        # Re-package with modified files:
        namelist.remove("mimetype")

        try:
            output = object.temporary_file(".epub").name
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(output, 'wb'), 'w', **kwds)) as outf:
                for path in (["mimetype"] + namelist):

                    data = inf.read(path)

                    try:
                        modded_index = None
                        modded_index = modded_names.index(path)
                    except:
                        pass

                    if modded_index is not None:
                        # Found modified file - replace contents
                        data = modded_contents[modded_index]

                    zi = ZipInfo(path)
                    oldzi = inf.getinfo(path)
                    try:
                        zi.compress_type = oldzi.compress_type
                        if path == "mimetype":
                            zi.compress_type = ZIP_STORED
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                        if any(ord(c) >= 128 for c in path) or any(
                                ord(c) >= 128 for c in zi.comment):
                            # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                            zi.flag_bits |= 0x800
                    except:
                        pass

                    outf.writestr(zi, data)
        except:
            traceback.print_exc()
            return path_to_ebook

        if (count_adept > 0):
            print(
                "Watermark: Successfully stripped {0} ADEPT watermark(s) from ebook."
                .format(count_adept))

        if (count_lemonink_invisible > 0 or count_lemonink_visible > 0):
            print(
                "Watermark: Successfully stripped {0} visible and {1} invisible LemonInk watermark(s) (\"{2}\") from ebook."
                .format(count_lemonink_visible, count_lemonink_invisible,
                        lemonink_trackingID))

        if (count_pocketbook > 0):
            print(
                "Watermark: Successfully stripped {0} Pocketbook watermark(s) from ebook."
                .format(count_pocketbook))

        return output

    except:
        traceback.print_exc()
        return path_to_ebook
Пример #39
0
    def write(self, filename, arcname=None, compress_type=None):
        """Put the bytes from filename into the archive under the name
        arcname.  The file is written in strictly sequential fashion - no seeking."""

        # This code is a tweaked version of ZipFile.write ...
        # TODO: add an alternative version that works with a stream rather than a filename.
        if not self.fp:
            raise RuntimeError(
                "Attempt to write to ZIP archive that was already closed")

        st = os.stat(filename)
        isdir = stat.S_ISDIR(st.st_mode)
        mtime = time.localtime(st.st_mtime)
        date_time = mtime[0:6]
        # Create ZipInfo instance to store file information
        if arcname is None:
            arcname = filename
        arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
        while arcname[0] in (os.sep, os.altsep):
            arcname = arcname[1:]
        if isdir:
            arcname += '/'
        zinfo = ZipInfo(arcname, date_time)
        zinfo.external_attr = (st[0] & 0xFFFF) << 16L  # Unix attributes
        if compress_type is None:
            zinfo.compress_type = self.compression
        else:
            zinfo.compress_type = compress_type

        zinfo.file_size = st.st_size
        zinfo.flag_bits = 0x08  # Use trailing data descriptor for file sizes and CRC
        zinfo.header_offset = self.fp.tell()  # Start of header bytes

        self._writecheck(zinfo)
        self._didModify = True

        if isdir:
            zinfo.file_size = 0
            zinfo.compress_size = 0
            zinfo.CRC = 0
            self.filelist.append(zinfo)
            self.NameToInfo[zinfo.filename] = zinfo
            self.fp.write(zinfo.FileHeader())
            return

        with open(filename, "rb") as fp:
            # The CRC and sizes in the file header are zero ...
            zinfo.CRC = CRC = 0
            zinfo.compress_size = compress_size = 0
            zinfo.file_size = file_size = 0
            self.fp.write(zinfo.FileHeader())
            if zinfo.compress_type == ZIP_DEFLATED:
                cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
                                        zlib.DEFLATED, -15)
            else:
                cmpr = None
            while 1:
                buf = fp.read(1024 * 8)
                if not buf:
                    break
                file_size = file_size + len(buf)
                CRC = crc32(buf, CRC) & 0xffffffff
                if cmpr:
                    buf = cmpr.compress(buf)
                    compress_size = compress_size + len(buf)
                self.fp.write(buf)
        if cmpr:
            buf = cmpr.flush()
            compress_size = compress_size + len(buf)
            self.fp.write(buf)
            zinfo.compress_size = compress_size
        else:
            zinfo.compress_size = file_size
        # Write the data descriptor after the file containing the true sizes and CRC
        zinfo.CRC = CRC
        zinfo.file_size = file_size
        self.fp.write(
            struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
                        zinfo.file_size))
        self.filelist.append(zinfo)
        self.NameToInfo[zinfo.filename] = zinfo
from base64 import b64encode
from hashlib import sha256
from io import BytesIO, StringIO
from re import compile as re_compile
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

BUCKET_NAME = "dist-gov"

CFN_PREFIX = "cfn-templates/"
ZIP_PREFIX = ""

# Create a ZIP archive for Lambda.
archive_bytes_io = BytesIO()
with ZipFile(archive_bytes_io, mode="w", compression=ZIP_DEFLATED) as zf:
    zi = ZipInfo("ebs_snapshot_manager.py")
    zi.compress_type = ZIP_DEFLATED
    zi.create_system = 3  # Unix
    zi.external_attr = 0o775 << 16  # rwxrwxr-x
    with open("ebs_snapshot_manager.py", mode="rb") as src_file:
        zf.writestr(zi, src_file.read())

# Compute the SHA 256 value of the file we'll use.
archive_bytes = archive_bytes_io.getvalue()
digest = sha256(archive_bytes).hexdigest()
assert isinstance(digest, str)
zip_obj_name = ZIP_PREFIX + "ebs_snapshot_manager.zip.%s" % digest

# Upload the archie to our CFN endpoint.
s3 = boto3.resource("s3")
s3_obj = s3.Object(BUCKET_NAME, zip_obj_name)
s3_obj.put(ACL="public-read",
Пример #41
0
    def write(self, filename, arcname=None, compress_type=None):
        """Put the bytes from filename into the archive under the name
        arcname."""

        st = os.stat(filename)
        isdir = stat.S_ISDIR(st.st_mode)
        mtime = time.localtime(st.st_mtime)
        date_time = mtime[0:6]
        # Create ZipInfo instance to store file information
        if arcname is None:
            arcname = filename
        arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
        while arcname[0] in (os.sep, os.altsep):
            arcname = arcname[1:]
        if isdir:
            arcname += '/'
        zinfo = ZipInfo(arcname, date_time)
        zinfo.external_attr = (st[0] & 0xFFFF) << 16L      # Unix attributes
        if self.compression == ZIP_AUTO:
            ext = os.path.splitext(filename)[1].lower()
            compression = ZIP_STORED if ext and ext[1:] in STORED_FORMATS \
                    else ZIP_DEFLATED
        else:
            compression = self.compression
        if compress_type is None:
            zinfo.compress_type = compression
        else:
            zinfo.compress_type = compress_type

        zinfo.file_size = st.st_size
        zinfo.flag_bits |= 0x08
        zinfo.header_offset = self.tell    # Start of header bytes

        self._writecheck(zinfo)
        self._didModify = True

        if isdir:
            zinfo.file_size = 0
            zinfo.compress_size = 0
            zinfo.CRC = 0
            self.filelist.append(zinfo)
            self.NameToInfo[zinfo.filename] = zinfo
            header = zinfo.FileHeader()
            yield header
            self.tell += len(header)
            return

        fp = open(filename, "rb")
        # Must overwrite CRC and sizes with correct data later
        zinfo.CRC = CRC = 0
        zinfo.compress_size = compress_size = 0
        zinfo.file_size = file_size = 0
        header = zinfo.FileHeader()
        yield header
        self.tell += len(header)
        if zinfo.compress_type == ZIP_DEFLATED:
            cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
                 zlib.DEFLATED, -15)
        else:
            cmpr = None
        while 1:
            buf = fp.read(1024 * 8)
            if not buf:
                break
            file_size = file_size + len(buf)
            CRC = crc32(buf, CRC) & 0xffffffff
            if cmpr:
                buf = cmpr.compress(buf)
                compress_size = compress_size + len(buf)
            yield buf
        fp.close()
        if cmpr:
            buf = cmpr.flush()
            compress_size = compress_size + len(buf)
            yield buf
            zinfo.compress_size = compress_size
        else:
            zinfo.compress_size = file_size
        self.tell += zinfo.compress_size
        zinfo.CRC = CRC
        zinfo.file_size = file_size
        # write the data descriptor
        data_descriptor =  struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
              zinfo.file_size)
        yield data_descriptor
        self.tell += len(data_descriptor)
        self.filelist.append(zinfo)
        self.NameToInfo[zinfo.filename] = zinfo
Пример #42
0
def decryptBook(userkey, inpath, outpath):
    if AES is None:
        raise ADEPTError("PyCrypto or OpenSSL must be installed.")

    with closing(ZipFile(open(inpath, 'rb'))) as inf:
        namelist = inf.namelist()
        if 'META-INF/rights.xml' not in namelist or \
           'META-INF/encryption.xml' not in namelist:
            print("{0:s} is DRM-free.".format(os.path.basename(inpath)))
            return 1
        for name in META_NAMES:
            namelist.remove(name)
        try:
            rights = etree.fromstring(inf.read('META-INF/rights.xml'))
            adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
            expr = './/%s' % (adept('encryptedKey'), )
            bookkey = ''.join(rights.findtext(expr))
            if len(bookkey) == 192:
                print(
                    "{0:s} seems to be an Adobe ADEPT ePub with Adobe's new DRM"
                    .format(os.path.basename(inpath)))
                print("This DRM cannot be removed yet. ")
                print(
                    "Try getting your distributor to give you a new ACSM file, then open that in an old version of ADE (2.0)."
                )
                print(
                    "If your book distributor is not enforcing the new DRM yet, this will give you a copy with the old DRM."
                )
                raise ADEPTNewVersionError("Book uses new ADEPT encryption")

            if len(bookkey) == 172:
                print("{0:s} is a secure Adobe Adept ePub.".format(
                    os.path.basename(inpath)))
            elif len(bookkey) == 64:
                print("{0:s} is a secure Adobe PassHash (B&N) ePub.".format(
                    os.path.basename(inpath)))
            else:
                print("{0:s} is not an Adobe-protected ePub!".format(
                    os.path.basename(inpath)))
                return 1

            if len(bookkey) != 64:
                # Normal Adobe ADEPT
                rsa = RSA(userkey)
                bookkey = rsa.decrypt(base64.b64decode(
                    bookkey.encode('ascii')))
            else:
                # Adobe PassHash / B&N
                key = base64.b64decode(userkey)[:16]
                aes = AES(key)
                bookkey = aes.decrypt(base64.b64decode(bookkey))
                if type(bookkey[-1]) != int:
                    pad = ord(bookkey[-1])
                else:
                    pad = bookkey[-1]

                bookkey = bookkey[:-pad]

            # Padded as per RSAES-PKCS1-v1_5
            if len(bookkey) > 16:
                if verify_book_key(bookkey):
                    bookkey = bookkey[-16:]
                else:
                    print("Could not decrypt {0:s}. Wrong key".format(
                        os.path.basename(inpath)))
                    return 2

            encryption = inf.read('META-INF/encryption.xml')
            decryptor = Decryptor(bookkey, encryption)
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:

                for path in (["mimetype"] + namelist):
                    data = inf.read(path)
                    zi = ZipInfo(path)
                    zi.compress_type = ZIP_DEFLATED

                    if path == "mimetype":
                        zi.compress_type = ZIP_STORED

                    elif path == "META-INF/encryption.xml":
                        # Check if there's still something in there
                        if (decryptor.check_if_remaining()):
                            data = decryptor.get_xml()
                            print(
                                "Adding encryption.xml for the remaining embedded files."
                            )
                            # We removed DRM, but there's still stuff like obfuscated fonts.
                        else:
                            continue

                    try:
                        # get the file info, including time-stamp
                        oldzi = inf.getinfo(path)
                        # copy across useful fields
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        # external attributes are dependent on the create system, so copy both.
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                        if any(ord(c) >= 128 for c in path) or any(
                                ord(c) >= 128 for c in zi.comment):
                            # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                            zi.flag_bits |= 0x800
                    except:
                        pass
                    if path == "META-INF/encryption.xml":
                        outf.writestr(zi, data)
                    else:
                        outf.writestr(zi, decryptor.decrypt(path, data))
        except:
            print("Could not decrypt {0:s} because of an exception:\n{1:s}".
                  format(os.path.basename(inpath), traceback.format_exc()))
            return 2
    return 0
Пример #43
0
def decryptFontsBook(inpath, outpath):

    with closing(ZipFile(open(inpath, 'rb'))) as inf:
        namelist = inf.namelist()
        if 'META-INF/encryption.xml' not in namelist:
            return 1

        # Font key handling:

        font_master_key = None
        adobe_master_encryption_key = None

        contNS = lambda tag: '{%s}%s' % (
            'urn:oasis:names:tc:opendocument:xmlns:container', tag)
        path = None

        try:
            container = etree.fromstring(inf.read("META-INF/container.xml"))
            rootfiles = container.find(contNS("rootfiles")).findall(
                contNS("rootfile"))
            for rootfile in rootfiles:
                path = rootfile.get("full-path", None)
                if (path is not None):
                    break
        except:
            pass

        # If path is None, we didn't find an OPF, so we probably don't have a font key.
        # If path is set, it's the path to the main content OPF file.

        if (path is None):
            print("FontDecrypt: No OPF for font obfuscation found")
            return 1
        else:
            packageNS = lambda tag: '{%s}%s' % ('http://www.idpf.org/2007/opf',
                                                tag)
            metadataDCNS = lambda tag: '{%s}%s' % (
                'http://purl.org/dc/elements/1.1/', tag)

            try:
                container = etree.fromstring(inf.read(path))
            except:
                container = []

            ## IETF font key algorithm:
            print(
                "FontDecrypt: Checking {0} for IETF font obfuscation keys ... "
                .format(path),
                end='')
            secret_key_name = None
            try:
                secret_key_name = container.get("unique-identifier")
            except:
                pass

            try:
                identify_element = container.find(packageNS("metadata")).find(
                    metadataDCNS("identifier"))
                if (secret_key_name is None
                        or secret_key_name == identify_element.get("id")):
                    font_master_key = identify_element.text
            except:
                pass

            if (font_master_key is not None):
                if (secret_key_name is None):
                    print("found '%s'" % (font_master_key))
                else:
                    print("found '%s' (%s)" %
                          (font_master_key, secret_key_name))

                # Trim / remove forbidden characters from the key, then hash it:
                font_master_key = font_master_key.replace(' ', '')
                font_master_key = font_master_key.replace('\t', '')
                font_master_key = font_master_key.replace('\r', '')
                font_master_key = font_master_key.replace('\n', '')
                font_master_key = font_master_key.encode('utf-8')
                font_master_key = hashlib.sha1(font_master_key).digest()
            else:
                print("not found")

            ## Adobe font key algorithm
            print(
                "FontDecrypt: Checking {0} for Adobe font obfuscation keys ... "
                .format(path),
                end='')

            try:
                metadata = container.find(packageNS("metadata"))
                identifiers = metadata.findall(metadataDCNS("identifier"))

                uid = None
                uidMalformed = False

                for identifier in identifiers:
                    if identifier.get(packageNS("scheme")) == "UUID":
                        if identifier.text[:9] == "urn:uuid:":
                            uid = identifier.text[9:]
                        else:
                            uid = identifier.text
                        break
                    if identifier.text[:9] == "urn:uuid:":
                        uid = identifier.text[9:]
                        break

                if uid is not None:
                    uid = uid.replace(chr(0x20), '').replace(chr(0x09), '')
                    uid = uid.replace(chr(0x0D),
                                      '').replace(chr(0x0A),
                                                  '').replace('-', '')

                    if len(uid) < 16:
                        uidMalformed = True
                    if not all(c in "0123456789abcdefABCDEF" for c in uid):
                        uidMalformed = True

                    if not uidMalformed:
                        print("found '{0}'".format(uid))
                        uid = uid + uid
                        adobe_master_encryption_key = binascii.unhexlify(
                            uid[:32])

                if adobe_master_encryption_key is None:
                    print("not found")

            except:
                print("exception")
                pass

        # Begin decrypting.

        try:
            encryption = inf.read('META-INF/encryption.xml')
            decryptor = Decryptor(font_master_key, adobe_master_encryption_key,
                                  encryption)
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:

                # Mimetype needs to be the first entry, so remove it from the list
                # whereever it is, then add it at the beginning.
                namelist.remove("mimetype")

                for path in (["mimetype"] + namelist):
                    data = inf.read(path)
                    zi = ZipInfo(path)
                    zi.compress_type = ZIP_DEFLATED

                    if path == "mimetype":
                        # mimetype must not be compressed
                        zi.compress_type = ZIP_STORED

                    elif path == "META-INF/encryption.xml":
                        # Check if there's still other entries not related to fonts
                        if (decryptor.check_if_remaining()):
                            data = decryptor.get_xml()
                            print(
                                "FontDecrypt: There's remaining entries in encryption.xml, adding file ..."
                            )
                        else:
                            # No remaining entries, no need for that file.
                            continue

                    try:
                        # get the file info, including time-stamp
                        oldzi = inf.getinfo(path)
                        # copy across useful fields
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        # external attributes are dependent on the create system, so copy both.
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                        if any(ord(c) >= 128 for c in path) or any(
                                ord(c) >= 128 for c in zi.comment):
                            # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                            zi.flag_bits |= 0x800
                    except:
                        pass

                    if path == "mimetype":
                        outf.writestr(zi, inf.read('mimetype'))
                    elif path == "META-INF/encryption.xml":
                        outf.writestr(zi, data)
                    else:
                        outf.writestr(zi, decryptor.decrypt(path, data))
        except:
            print(
                "FontDecrypt: Could not decrypt fonts in {0:s} because of an exception:\n{1:s}"
                .format(os.path.basename(inpath), traceback.format_exc()))
            traceback.print_exc()
            return 2
    return 0
Пример #44
0
    def writecompressed(self,
                        zinfo_or_arcname,
                        compressed_input,
                        crc,
                        uncompressed_size,
                        compressed_size,
                        compress_type=ZIP_DEFLATED):
        """Write pre compressed data into the archive.

        This method could be useful in a case when data needs to be stored in
        a different order from what it is being produced. For example when
        writing a GML-file, the bbox element needs to be written at the top of
        the document. Naturally this can not be produced until all members of
        the final document has been visited. The 'compressed_input' can be an
        itarator of file like objects or strings. In the GML case the head of
        the file would probably come from a StringIO object, bulk from temp file
        and tail from StringIO object. Care has to be taken to properly compose
        the different pre-compressed parts so that the concatenated value
        becomes a valid deflate-stream. Also note that the combined crc32 has to
        be calculated in correct order. This can be achieved using method
        crc32_combine from zlib.

        zinfo_or_arcname:  Either a ZipInfo instance or the name of the file in
                           the archive.
        compressed_input:  The pre compressed content. This can be either a
                           string, a file object or an iterator. If input is an
                           iterator, each item will be checked if it's a string
                           or file object.
        crc:               The CRC32 checksum of the (combined) input.
        uncompressed_size:
        compressed_size:
        compress_type:
         """
        if not compress_type == self.compression:
            raise RuntimeError(
                "Pre compressed data has to be of same kind as this archive uses, got {}, expected {}"
                .format(compress_type, self.compression))
        if not isinstance(zinfo_or_arcname, ZipInfo):
            zinfo = ZipInfo(filename=zinfo_or_arcname,
                            date_time=time.localtime(time.time())[:6])

            zinfo.compress_type = self.compression
            if zinfo.filename[-1] == '/':
                zinfo.external_attr = 0o40775 << 16  # drwxrwxr-x
                zinfo.external_attr |= 0x10  # MS-DOS directory flag
            else:
                zinfo.external_attr = 0o600 << 16  # ?rw-------
        else:
            zinfo = zinfo_or_arcname

        if not self.fp:
            raise RuntimeError(
                "Attempt to write to ZIP archive that was already closed")

        if compress_type is not None:
            zinfo.compress_type = compress_type

        zinfo.file_size = uncompressed_size  # Uncompressed size
        zinfo.header_offset = self.fp.tell()  # Start of header bytes
        self._writecheck(zinfo)
        self._didModify = True
        zinfo.CRC = crc & 0xffffffff  # CRC-32 checksum

        zinfo.compress_size = compressed_size

        zip64 = zinfo.file_size > ZIP64_LIMIT or \
                zinfo.compress_size > ZIP64_LIMIT
        if zip64 and not self._allowZip64:
            raise LargeZipFile("Filesize would require ZIP64 extensions")
        self.fp.write(zinfo.FileHeader(zip64))

        if isinstance(compressed_input, basestring):
            self.fp.write(compressed_input)
        elif hasattr(compressed_input, '__iter__'):
            for o in compressed_input:
                if isinstance(o, basestring):
                    self.fp.write(o)
                else:
                    shutil.copyfileobj(o, self.fp)
        else:
            shutil.copyfileobj(compressed_input, self.fp)

        if zinfo.flag_bits & 0x08:
            # Write CRC and file sizes after the file data
            fmt = '<LQQ' if zip64 else '<LLL'
            self.fp.write(
                struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
                            zinfo.file_size))
        self.fp.flush()
        self.filelist.append(zinfo)
        self.NameToInfo[zinfo.filename] = zinfo
        return
Пример #45
0
def root():
    if request.method == 'GET':
        return app.send_static_file('index.html')
    elif request.method == 'POST':
        file_list = request.files.getlist('files[]')
        # Return single file
        if len(file_list) < 1:
            return 'No files uploaded'
        elif len(file_list) == 1:
            file = file_list[0]
            log.info('Converting file {}'.format(file.filename))
            # Get file name and type
            filename = path.splitext(file.filename)[0]
            file_type = path.splitext(file.filename)[1].strip('.')

            try:  # Convert to markdown
                markdown = convert_text(file.stream.read(), 'md', file_type)
            except RuntimeError as e:
                if e.args[0].startswith('Invalid input format!'):
                    log.error('Invalid input format!')
                    return 'Unsupported file type: {}'.format(
                        file_type.upper())
                else:
                    log.error(e.args[0])
                    return 'Unknown Error'

            # Send markdown file as attachment
            return Response(markdown,
                            mimetype='text/markdown',
                            headers={
                                'Content-Disposition':
                                'attachment;filename={}.md'.format(filename)
                            })

        else:  # Return zip file
            memory_file = BytesIO()
            with ZipFile(memory_file, 'w') as zf:
                for file in file_list:
                    log.info('Converting file {}'.format(file.filename))

                    # Get file name and file type
                    filename = path.splitext(file.filename)[0]
                    file_type = path.splitext(file.filename)[1].strip('.')

                    try:  # Convert to markdown
                        markdown = convert_text(file.stream.read(), 'md',
                                                file_type)
                    except RuntimeError as e:

                        if e.args[0].startswith('Invalid input format!'):
                            log.error('Invalid input format!')
                            return 'Unsupported file type: {}'.format(
                                file_type.upper())
                        else:
                            log.error(e.args[0])
                            return 'Unknown Error'

                    # Write File to Zipfile
                    data = ZipInfo('{}.md'.format(filename))
                    data.date_time = localtime(time())[:6]
                    data.compress_type = ZIP_DEFLATED
                    zf.writestr(data, markdown)

            # Send zip file as attachment
            memory_file.seek(0)
            return send_file(memory_file,
                             attachment_filename='Markdown.zip',
                             as_attachment=True)
Пример #46
0
def removeOPFwatermarks(object, path_to_ebook):
    contNS = lambda tag: '{%s}%s' % (
        'urn:oasis:names:tc:opendocument:xmlns:container', tag)
    opf_path = None

    try:
        inf = ZipFile(open(path_to_ebook, 'rb'))
        container = etree.fromstring(inf.read("META-INF/container.xml"))
        rootfiles = container.find(contNS("rootfiles")).findall(
            contNS("rootfile"))
        for rootfile in rootfiles:
            opf_path = rootfile.get("full-path", None)
            if (opf_path is not None):
                break
    except:
        traceback.print_exc()
        return path_to_ebook

    # If path is None, we didn't find an OPF, so we probably don't have a font key.
    # If path is set, it's the path to the main content OPF file.

    if (opf_path is None):
        # No OPF found - no watermark
        return path_to_ebook
    else:
        try:
            container_str = inf.read(opf_path).decode("utf-8")
            container_str_new = container_str

            had_amazon = False
            had_elibri = False

            # Remove Amazon hex watermarks
            # Match optional newline at the beginning, then spaces, then a "meta" tag with name = "Watermark" or "Watermark_(hex)" and a "content" element.
            # This regex also matches DuMont watermarks with meta name="watermark", with the case-insensitive match on the "w" in watermark.
            pre_remove = container_str_new
            container_str_new = re.sub(
                r'((\r\n|\r|\n)\s*)?\<meta\s+name=\"[Ww]atermark(_\(hex\))?\"\s+content=\"[0-9a-fA-F]+\"\s*\/>',
                '', container_str_new)
            container_str_new = re.sub(
                r'((\r\n|\r|\n)\s*)?\<meta\s+content=\"[0-9a-fA-F]+\"\s+name=\"[Ww]atermark(_\(hex\))?\"\s*\/>',
                '', container_str_new)
            if pre_remove != container_str_new:
                had_amazon = True

            # Remove elibri / lemonink watermark
            # Lemonink replaces all "id" fields in the opf with "idX_Y", with X being the watermark and Y being a number for that particular ID.
            # This regex replaces all "idX_Y" IDs with "id_Y", removing the watermark IDs.
            pre_remove = container_str_new
            container_str_new = re.sub(
                r'((\r\n|\r|\n)\s*)?\<\!\-\-\s*Wygenerowane przez elibri dla zamówienia numer [0-9a-fA-F]+\s*\-\-\>',
                '', container_str_new)
            if pre_remove != container_str_new:
                # To prevent this Regex from applying to books without that watermark, only do that if the watermark above was found.
                container_str_new = re.sub(r'\=\"id[0-9]+_([0-9]+)\"',
                                           r'="id_\1"', container_str_new)
            if pre_remove != container_str_new:
                had_elibri = True

        except:
            traceback.print_exc()
            return path_to_ebook

        if (container_str == container_str_new):
            # container didn't change - no watermark
            return path_to_ebook

        # Re-package without watermark
        namelist = inf.namelist()
        namelist.remove("mimetype")

        try:
            output = object.temporary_file(".epub").name
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(output, 'wb'), 'w', **kwds)) as outf:
                for path in (["mimetype"] + namelist):

                    data = inf.read(path)
                    if path == opf_path:
                        # Found OPF, replacing ...
                        data = container_str_new

                    zi = ZipInfo(path)
                    oldzi = inf.getinfo(path)
                    try:
                        zi.compress_type = oldzi.compress_type
                        if path == "mimetype":
                            zi.compress_type = ZIP_STORED
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                        if any(ord(c) >= 128 for c in path) or any(
                                ord(c) >= 128 for c in zi.comment):
                            # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                            zi.flag_bits |= 0x800
                    except:
                        pass

                    outf.writestr(zi, data)
        except:
            traceback.print_exc()
            return path_to_ebook

        if had_elibri:
            print(
                "Watermark: Successfully stripped eLibri watermark from OPF file."
            )
        if had_amazon:
            print(
                "Watermark: Successfully stripped Amazon watermark from OPF file."
            )

        return output
    def fix_zip(self):
        if not self.broken:
            return False
        self.fp.seek(0, 2)
        file_len = self.fp.tell()
        mm = mmap.mmap(self.fp.fileno(), 0, access=mmap.ACCESS_READ)
        offset = 0
        file_list = {}
        cd_list = {}

        try:

            # pass one, parse the zip file
            while offset + 4 < file_len:
                hdr_off = mm.find(b"PK", offset)
                if hdr_off == -1:
                    break
                hdr_type = mm[hdr_off:hdr_off + 4]
                if hdr_type == stringFileHeader:
                    # local file header
                    if hdr_off + sizeFileHeader > file_len:
                        break
                    fheader = mm[hdr_off:hdr_off + sizeFileHeader]
                    fheader = struct.unpack(structFileHeader, fheader)
                    start = hdr_off
                    size = sizeFileHeader + fheader[_FH_COMPRESSED_SIZE] + fheader[_FH_FILENAME_LENGTH] + \
                        fheader[_FH_EXTRA_FIELD_LENGTH]
                    name = mm[hdr_off + sizeFileHeader:hdr_off +
                              sizeFileHeader + fheader[_FH_FILENAME_LENGTH]]
                    file_list[name] = [start, size, fheader]
                    offset = hdr_off + size
                elif hdr_type == stringCentralDir:
                    if hdr_off + sizeCentralDir > file_len:
                        break
                    centdir = mm[hdr_off:hdr_off + sizeCentralDir]
                    centdir = struct.unpack(structCentralDir, centdir)
                    start = hdr_off
                    size = sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + \
                        centdir[_CD_COMMENT_LENGTH]
                    name = mm[hdr_off + sizeCentralDir:hdr_off +
                              sizeCentralDir + centdir[_CD_FILENAME_LENGTH]]
                    cd_list[name] = [start, size, centdir]
                    offset = hdr_off + size
                elif hdr_type == stringEndArchive:
                    offset = hdr_off + sizeEndCentDir
                else:
                    offset = hdr_off + 1

            # Guesses
            last_cv = 20
            last_ea = 0
            last_cs = 0
            last_dt = (0, 0)

            # Pass two, repair
            for filename, (start, end, centdir) in cd_list.items():
                if filename not in file_list:
                    continue

                if isinstance(filename, bytes):
                    x = ZipInfo(filename.decode('utf-8', 'backslashreplace'))
                else:
                    x = ZipInfo(filename)
                extra_off = start + sizeCentralDir
                x.extra = mm[extra_off:extra_off +
                             centdir[_CD_EXTRA_FIELD_LENGTH]]
                extra_off += centdir[_CD_EXTRA_FIELD_LENGTH]
                x.comment = mm[extra_off:extra_off +
                               centdir[_CD_EXTRA_FIELD_LENGTH]]

                x.header_offset = file_list[filename][0]

                (x.create_version, x.create_system, x.extract_version,
                 x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC,
                 x.compress_size, x.file_size) = centdir[1:12]
                x.volume, x.internal_attr, x.external_attr = centdir[15:18]
                # Convert date/time code to (year, month, day, hour, min, sec)
                x._raw_time = t
                x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
                               t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)

                last_ea = x.external_attr
                last_cs = x.create_system
                last_cv = x.create_version
                last_dt = (d, t)

                # noinspection PyProtectedMember
                x._decodeExtra()
                # x.filename = x._decodeFilename()
                self.filelist.append(x)
                self.NameToInfo[x.filename] = x

            for filename, (start, end, fheader) in file_list.items():
                if filename in cd_list:
                    continue

                x = ZipInfo(filename.decode('utf-8', 'backslashreplace'))
                x.extra = ""
                x.comment = ""

                x.header_offset = file_list[filename][0]

                x.create_version = last_cv
                x.create_system = last_cs
                x.extract_version = fheader[_FH_EXTRACT_VERSION]
                x.reserved = 0
                x.flag_bits = fheader[_FH_GENERAL_PURPOSE_FLAG_BITS]
                x.compress_type = fheader[_FH_COMPRESSION_METHOD]
                d, t = last_dt
                x.CRC = fheader[_FH_CRC]
                x.compress_size = fheader[_FH_COMPRESSED_SIZE]
                x.file_size = fheader[_FH_UNCOMPRESSED_SIZE]

                x.volume = 0
                x.internal_attr = 0
                x.external_attr = last_ea

                # Convert date/time code to (year, month, day, hour, min, sec)
                x._raw_time = t
                x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F,
                               t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2)

                # noinspection PyProtectedMember
                x._decodeExtra()
                # x.filename = x._decodeFilename()
                self.filelist.append(x)
                self.NameToInfo[x.filename] = x
        finally:
            mm.close()
Пример #48
0
             df['Lambda'][index] + '.py',
             'w',
             encoding='UTF-8'
     ) as outfile:  # Storing the initial config in the 'Input' folder.
         outfile.write(python_code)
 shutil.copy(
     path_prefix + config['Git_script_location_prefix'] +
     df['Lambda'][index] + ".py", config['Python_file_name']
 )  # Moves and renames the python code file as 'lambda_function.py'
 file = open(config['Python_file_name'], 'rb')
 zip_code = file.read()
 zip_file_name = path_prefix + config['Zip_Code_prefix'] + df['Lambda'][
     index] + '.zip'  # Creates the zip file and add the permissions.
 zip_file = ZipFile(zip_file_name, 'w', compression=ZIP_DEFLATED)
 zip_info = ZipInfo(config['Python_file_name'])
 zip_info.compress_type = ZIP_DEFLATED
 zip_info.create_system = 3
 zip_info.external_attr = 0o777 << 16
 zip_file.writestr(zip_info, zip_code)  # Adds the code to the zip file.
 os.remove(config['Python_file_name'])
 if 'Environment' in func_config:
     env = func_config['Environment']
     variables = env['Variables']
     if 'LAMBDA_ENV' in variables:
         variables['LAMBDA_ENV'] = 'NPROD'
     if 'envprefix' in variables:
         variables['envprefix'] = 'us-nprod-odp'
     if 'BUCKET_NAME' in variables:
         variables['BUCKET_NAME'] = 'odp-us-nprod-servicesuite'
     env['Variables'] = variables
     func_config['Environment'] = env
Пример #49
0
def decryptBook(userkey, inpath, outpath):
    with closing(ZipFile(open(inpath, 'rb'))) as inf:
        namelist = inf.namelist()
        if 'META-INF/rights.xml' not in namelist or \
           'META-INF/encryption.xml' not in namelist:
            print("{0:s} is DRM-free.".format(os.path.basename(inpath)))
            return 1
        for name in META_NAMES:
            namelist.remove(name)
        try:
            rights = etree.fromstring(inf.read('META-INF/rights.xml'))
            adept = lambda tag: '{%s}%s' % (NSMAP['adept'], tag)
            expr = './/%s' % (adept('encryptedKey'), )
            bookkeyelem = rights.find(expr)
            bookkey = bookkeyelem.text
            keytype = bookkeyelem.attrib.get('keyType', '0')
            if len(bookkey) >= 172 and int(keytype, 10) > 2:
                print("{0:s} is a secure Adobe Adept ePub with hardening.".
                      format(os.path.basename(inpath)))
            elif len(bookkey) == 172:
                print("{0:s} is a secure Adobe Adept ePub.".format(
                    os.path.basename(inpath)))
            elif len(bookkey) == 64:
                print("{0:s} is a secure Adobe PassHash (B&N) ePub.".format(
                    os.path.basename(inpath)))
            else:
                print("{0:s} is not an Adobe-protected ePub!".format(
                    os.path.basename(inpath)))
                return 1

            if len(bookkey) != 64:
                # Normal or "hardened" Adobe ADEPT
                rsakey = RSA.import_key(userkey)  # parses the ASN1 structure
                bookkey = base64.b64decode(bookkey)
                if int(keytype, 10) > 2:
                    bookkey = removeHardening(rights, keytype, bookkey)
                try:
                    bookkey = PKCS1_v1_5.new(rsakey).decrypt(
                        bookkey, None)  # automatically unpads
                except ValueError:
                    bookkey = None

                if bookkey is None:
                    print("Could not decrypt {0:s}. Wrong key".format(
                        os.path.basename(inpath)))
                    return 2
            else:
                # Adobe PassHash / B&N
                key = base64.b64decode(userkey)[:16]
                bookkey = base64.b64decode(bookkey)
                bookkey = unpad(
                    AES.new(key, AES.MODE_CBC, b'\x00' * 16).decrypt(bookkey),
                    16)  # PKCS#7

                if len(bookkey) > 16:
                    bookkey = bookkey[-16:]

            encryption = inf.read('META-INF/encryption.xml')
            decryptor = Decryptor(bookkey, encryption)
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(outpath, 'wb'), 'w', **kwds)) as outf:

                for path in (["mimetype"] + namelist):
                    data = inf.read(path)
                    zi = ZipInfo(path)
                    zi.compress_type = ZIP_DEFLATED

                    if path == "mimetype":
                        zi.compress_type = ZIP_STORED

                    elif path == "META-INF/encryption.xml":
                        # Check if there's still something in there
                        if (decryptor.check_if_remaining()):
                            data = decryptor.get_xml()
                            print(
                                "Adding encryption.xml for the remaining embedded files."
                            )
                            # We removed DRM, but there's still stuff like obfuscated fonts.
                        else:
                            continue

                    try:
                        # get the file info, including time-stamp
                        oldzi = inf.getinfo(path)
                        # copy across useful fields
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        # external attributes are dependent on the create system, so copy both.
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                        if any(ord(c) >= 128 for c in path) or any(
                                ord(c) >= 128 for c in zi.comment):
                            # If the file name or the comment contains any non-ASCII char, set the UTF8-flag
                            zi.flag_bits |= 0x800
                    except:
                        pass
                    if path == "META-INF/encryption.xml":
                        outf.writestr(zi, data)
                    else:
                        outf.writestr(zi, decryptor.decrypt(path, data))
        except:
            print("Could not decrypt {0:s} because of an exception:\n{1:s}".
                  format(os.path.basename(inpath), traceback.format_exc()))
            return 2
    return 0
Пример #50
0
 def add_to_zip(file_obj, z_file):
     """Add the file to the zip and sets the attributes."""
     zinfo = ZipInfo(file_obj['name'], file_obj['time'])
     zinfo.compress_type = z_file.compression
     zinfo.external_attr = 0o644 << 16
     z_file.writestr(zinfo, file_obj['data'])
Пример #51
0
    def testZipImporterMethodsInSubDirectory(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {
            packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)
        }

        self.addCleanup(os_helper.unlink, TEMP_ZIP)
        with ZipFile(TEMP_ZIP, "w") as z:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                zinfo.comment = b"eggs"
                z.writestr(zinfo, data)

        zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
        self.assertEqual(zi.archive, TEMP_ZIP)
        self.assertEqual(zi.prefix, packdir)
        self.assertTrue(zi.is_package(TESTPACK2))
        # PEP 302
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)
            mod = zi.load_module(TESTPACK2)
            self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__)
        # PEP 451
        spec = zi.find_spec(TESTPACK2)
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        self.assertEqual(spec.loader.get_filename(TESTPACK2), mod.__file__)

        self.assertFalse(zi.is_package(TESTPACK2 + os.sep + '__init__'))
        self.assertFalse(zi.is_package(TESTPACK2 + os.sep + TESTMOD))

        pkg_path = TEMP_ZIP + os.sep + packdir + TESTPACK2
        zi2 = zipimport.zipimporter(pkg_path)
        # PEP 302
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)
            find_mod_dotted = zi2.find_module(TESTMOD)
            self.assertIsNotNone(find_mod_dotted)
            self.assertIsInstance(find_mod_dotted, zipimport.zipimporter)
            self.assertFalse(zi2.is_package(TESTMOD))
            load_mod = find_mod_dotted.load_module(TESTMOD)
            self.assertEqual(find_mod_dotted.get_filename(TESTMOD),
                             load_mod.__file__)

        # PEP 451
        spec = zi2.find_spec(TESTMOD)
        self.assertIsNotNone(spec)
        self.assertIsInstance(spec.loader, zipimport.zipimporter)
        self.assertFalse(spec.loader.is_package(TESTMOD))
        load_mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(load_mod)
        self.assertEqual(spec.loader.get_filename(TESTMOD), load_mod.__file__)

        mod_path = TESTPACK2 + os.sep + TESTMOD
        mod_name = module_path_to_dotted_name(mod_path)
        mod = importlib.import_module(mod_name)
        self.assertTrue(mod_name in sys.modules)
        self.assertIsNone(zi.get_source(TESTPACK2))
        self.assertIsNone(zi.get_source(mod_path))
        self.assertEqual(zi.get_filename(mod_path), mod.__file__)
        # To pass in the module name instead of the path, we must use the
        # right importer.
        loader = mod.__loader__
        self.assertIsNone(loader.get_source(mod_name))
        self.assertEqual(loader.get_filename(mod_name), mod.__file__)
Пример #52
0
def create_zipinfo(filename,
                   mtime=None,
                   dir=False,
                   executable=False,
                   symlink=False,
                   comment=None):
    """Create a instance of `ZipInfo`.

    :param filename: file name of the entry
    :param mtime: modified time of the entry
    :param dir: if `True`, the entry is a directory
    :param executable: if `True`, the entry is a executable file
    :param symlink: if `True`, the entry is a symbolic link
    :param comment: comment of the entry
    """
    from zipfile import ZipInfo, ZIP_DEFLATED, ZIP_STORED
    zipinfo = ZipInfo()

    # The general purpose bit flag 11 is used to denote
    # UTF-8 encoding for path and comment. Only set it for
    # non-ascii files for increased portability.
    # See http://www.pkware.com/documents/casestudies/APPNOTE.TXT
    if any(ord(c) >= 128 for c in filename):
        zipinfo.flag_bits |= 0x0800
    zipinfo.filename = filename.encode('utf-8')

    if mtime is not None:
        mtime = to_datetime(mtime, utc)
        zipinfo.date_time = mtime.utctimetuple()[:6]
        # The "extended-timestamp" extra field is used for the
        # modified time of the entry in unix time. It avoids
        # extracting wrong modified time if non-GMT timezone.
        # See http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip
        #     /proginfo/extra.fld
        zipinfo.extra += struct.pack(
            '<hhBl',
            0x5455,  # extended-timestamp extra block type
            1 + 4,  # size of this block
            1,  # modification time is present
            to_timestamp(mtime))  # time of last modification

    # external_attr is 4 bytes in size. The high order two
    # bytes represent UNIX permission and file type bits,
    # while the low order two contain MS-DOS FAT file
    # attributes, most notably bit 4 marking directories.
    if dir:
        if not zipinfo.filename.endswith('/'):
            zipinfo.filename += '/'
        zipinfo.compress_type = ZIP_STORED
        zipinfo.external_attr = 040755 << 16L  # permissions drwxr-xr-x
        zipinfo.external_attr |= 0x10  # MS-DOS directory flag
    else:
        zipinfo.compress_type = ZIP_DEFLATED
        zipinfo.external_attr = 0644 << 16L  # permissions -r-wr--r--
        if executable:
            zipinfo.external_attr |= 0755 << 16L  # -rwxr-xr-x
        if symlink:
            zipinfo.compress_type = ZIP_STORED
            zipinfo.external_attr |= 0120000 << 16L  # symlink file type

    if comment:
        zipinfo.comment = comment.encode('utf-8')

    return zipinfo
Пример #53
0
def DecryptBook(bookId):
    global gCurBook

    print("[I] Decrypt  book: " + bookId + " [" + gBookData[bookId][0] + "]")

    encFile = os.path.join(gOutDir, ENC_BOOKS_DIR, bookId + EXT_EPUB)
    decFile = os.path.join(gOutDir, DEC_BOOKS_DIR, bookId + EXT_EPUB)

    if not os.path.isfile(encFile):
        print("[E]   File not found: " + encFile)
        return EResult.NO_GOOD

    if not CheckEpubIntegrity(bookId):
        print("[E]   Corrupted ePub file! (Re-download)")
        return EResult.NO_GOOD

    with closing(ZipFile(open(encFile, "rb"))) as inf:
        namelist = set(inf.namelist())

        if ENCRYPTION_XML not in namelist:
            print("[W]   Can't find " + ENCRYPTION_XML +
                  ". Assume it's DRM-free book")
            if os.path.isfile(decFile):
                os.remove(decFile)
            shutil.copyfile(encFile, decFile)
            return EResult.OKAY

        for name in META_NAMES:
            namelist.remove(name)

        try:
            # get book AES key from META-INF/encryption.xml
            encryption = etree.fromstring(inf.read(ENCRYPTION_XML))
            aesKeyB64 = encryption.findtext('.//enc:CipherValue', None, NSMAP)
            if aesKeyB64 is None:
                print("[E]   Can't find encrypted AES key!")
                return EResult.NO_GOOD

            for k in gRsaKeys:
                bookkey = k.decrypt(base64.b64decode(aesKeyB64))
                if bookkey is not None:
                    break

            if bookkey is None:
                print("[E]   Can't decrypt AES key!")
                return EResult.NO_GOOD

            gCurBook._id = bookId
            gCurBook._aeskey = ''.join(hex(x)[2:].zfill(2)
                                       for x in bookkey).upper()

            print("      AES KEY = {0}".format(gCurBook._aeskey))

            decryptor = Decryptor(bookkey, encryption)
            if len(decryptor._encFontIdpf) > 0:
                decryptor.SetBookUid(GetBookUid(decryptor, inf))
            opfs = GetOpfNamesFromEpub(inf)
            if len(opfs) > 1:
                print("[W]   Num of rootfile = " + str(len(opfs)))
            kwds = dict(compression=ZIP_DEFLATED, allowZip64=False)
            with closing(ZipFile(open(decFile, 'wb'), 'w', **kwds)) as outf:
                zi = ZipInfo(MIMETYPE)
                zi.compress_type = ZIP_STORED
                try:
                    # if the mimetype is present, get its info, including time-stamp
                    oldzi = inf.getinfo(MIMETYPE)
                    # copy across fields to be preserved
                    zi.date_time = oldzi.date_time
                    zi.comment = oldzi.comment
                    zi.extra = oldzi.extra
                    zi.internal_attr = oldzi.internal_attr
                    # external attributes are dependent on the create system, so copy both.
                    zi.external_attr = oldzi.external_attr
                    zi.create_system = oldzi.create_system
                except:
                    pass
                outf.writestr(zi, inf.read(MIMETYPE))

                # process files in ePub
                for path in namelist:
                    data = inf.read(path)
                    zi = ZipInfo(path)
                    zi.compress_type = ZIP_DEFLATED
                    try:
                        # get the file info, including time-stamp
                        oldzi = inf.getinfo(path)
                        # copy across useful fields
                        zi.date_time = oldzi.date_time
                        zi.comment = oldzi.comment
                        zi.extra = oldzi.extra
                        zi.internal_attr = oldzi.internal_attr
                        # external attributes are dependent on the create system, so copy both.
                        zi.external_attr = oldzi.external_attr
                        zi.create_system = oldzi.create_system
                    except:
                        pass
                    data = decryptor.decrypt(path, data)
                    if path in opfs:
                        if bookId in gTitleMap:
                            data = ChangeTitle(data, gTitleMap[bookId])
                        if bookId in gAuthorMap:
                            data = ChangeAuthor(data, gAuthorMap[bookId])
                        ShowBookInfo(data)
                    outf.writestr(zi, data)
        except Exception as e:
            print("[E]   Can't decrypt book! (" + str(e) + ")")
            if os.path.isfile(decFile):
                os.remove(decFile)
            return EResult.NO_GOOD

    RenameBook(bookId)
    SaveBookInfo()

    return EResult.OKAY
Пример #54
0
def zip():
    global check
    if request.method == 'POST' and not check:
        data = request.json['modules'][0]['accel']
        global button_direction
        x = float(data[0].replace(',', '.'))
        y = float(data[1].replace(',', '.'))
        z = float(data[2].replace(',', '.'))
        if x > -5 and x < 5:
            if y > -5 and y < 5 and z > 5:
                if fabs(x) > fabs(y):
                    if x > 0:
                        button_direction = 3
                    else:
                        button_direction = 2
                else:
                    if y > 0:
                        button_direction = 0
                    else:
                        button_direction = 1
            elif y > -5 and y < 5 and z < -5:
                if fabs(x) > fabs(y):
                    if x > 0:
                        button_direction = 2
                    else:
                        button_direction = 3
                else:
                    if y > 0:
                        button_direction = 0
                    else:
                        button_direction = 1
            elif y > 5 and z > -5 and z < 5:
                if fabs(x) > fabs(y - 10):
                    if x > 0:
                        button_direction = 0
                    else:
                        button_direction = 1
                else:
                    if z > 0:
                        button_direction = 3
                    else:
                        button_direction = 2
            elif y < -5 and z > -5 and z < 5:
                if fabs(x) > fabs(y + 10):
                    if x > 0:
                        button_direction = 1
                    else:
                        button_direction = 0
                else:
                    if z > 0:
                        button_direction = 3
                    else:
                        button_direction = 2
        else:
            if x >= 5:
                if fabs(x - 10) > fabs(y):
                    if z > 0:
                        button_direction = 2
                    else:
                        button_direction = 3
                else:
                    if y > 0:
                        button_direction = 0
                    else:
                        button_direction = 1
            else:
                if fabs(x+10) > fabs(y):
                    if z > 0:
                        button_direction = 3
                    else:
                        button_direction = 2
                else:
                    if y > 0:
                        button_direction = 0
                    else:
                        button_direction = 1

        check = True
        print("in snake return json.dumps")
        return json.dumps(request.json)
    global img
    output_img = cv.rotate(img, cv.ROTATE_90_COUNTERCLOCKWISE)

    encode_param = []
    retval, buffer = cv.imencode('.bmp', output_img, encode_param)

    img_io = io.BytesIO()
    with ZipFile(img_io, "w") as zip_file:
        zip_info = ZipInfo("cubenet.bmp")
        zip_info.compress_type = zipfile.ZIP_DEFLATED
        zip_info.compress_size = 1
        zip_file.writestr(zip_info, buffer)
    img_io.seek(0)
    response = make_response(img_io.read())
    response.headers['Content-Type'] = 'application/zip'
    check = False
    print("in snake return response")
    return response
Пример #55
0
def module_to_module():
    global initial_module_num, initial_module, prev_module
    cube = Cube()

    # FIXME debugging step()
    # m5 = cube.modules[5]
    # m5.update_screens(465, 145)
    # cur_screen, screen_x, screen_y = m5.get_attributes()
    # print(f'\n\non m5: {cur_screen, screen_x, screen_y}')

    if initial_module and (initial_module.num == cube.modules[5].num):
        if initial_module is not cube.modules[5]:
            print(
                "WARNING initial module has DIFFERENT address than the cube.modules[5]"
            )
    # with each request we MUST update positions of modules of the cube
    cube.update_grid(request)
    move_circle(dir='up')
    if cube.grid is not None:
        # if there was no any previous modules - we create the very first one
        if prev_module is None:
            initial_module = cube.modules[initial_module_num]
        # all other modules of the cube
        compared_modules = []
        for module in cube.modules:
            if module.num != initial_module.num:
                compared_modules.append(module)

        # FIXME NOTE that if we keep increasing Y after moving to the new module - the circle won't keep moving
        # FIXME in straight line because axes are differently angled!!!

        # coords of all objects
        global objects_coords
        # update all screens of initial module
        # FIXME if there are two for cycles here - we will have more than 24 images - that's wrong!
        # FIXME the step() functions in cube works only for ONE object on module
        # FIXME but there can be several!

        # recalculate coordinates for each of objects for each of the rest of modules
        for obj, [x, y] in objects_coords.items():
            # FIXME for some reason after moving from m0 to m5 it draws circle on the wrong side of m5
            # FIXME bug is that get_attributes() returns wrong coords and step() works wrong???
            print(
                f'\n\nfor initial module {initial_module.num} coords are {x, y}'
            )
            initial_module.update_screens(x, y)
            for compared_module in compared_modules:
                new_x, new_y = cube.recalc_coords(initial_module.num, x, y,
                                                  compared_module.num)
                print(
                    f'-- for compared module {compared_module.num} new coords are {new_x, new_y}'
                )
                # only if new coordinates were successfully calculated - we draw the object
                if (new_x is not None) and (new_y is not None):
                    compared_module.update_screens(new_x, new_y)
                    # if the circle moved onto the other module (the new one) then we change the current module
                    if changed_module(new_x, new_y):
                        print("MOVED TO THE NEW MODULE!")
                        prev_module = initial_module
                        # FIXME bug is that "cube.modules[5] is initial_module = True" si these are 2 different objects
                        initial_module = compared_module
                        cube.modules[initial_module.num] = initial_module
                        # and change coordinates of the object (now they are calculate relative to the new origin)
                        objects_coords[obj] = [new_x, new_y]
                        print(f'now initial module is {initial_module.num}')
                        break

    # put the images into the response archive
    memory_file = io.BytesIO()
    img_num = 0
    with ZipFile(memory_file, "w") as zip_file:
        for module in cube.modules:
            for screen in module.screens:
                output_img = screen.surface
                encode_param = []
                # encode each of 24 images
                _, buffer = cv2.imencode('.bmp', output_img, encode_param)
                # add a specific info about the module this image belongs to
                # so first 3 images go to the first module, images 4, 5, 6 - to the second etc.
                zip_info = ZipInfo("modules/" + str(module.num) + "/screens/" +
                                   str(screen.num) + ".bmp")
                zip_info.compress_type = zipfile.ZIP_DEFLATED
                zip_info.compress_size = 1
                # insert the image into the archive
                zip_file.writestr(zip_info, buffer)
                img_num += 1
    memory_file.seek(0)
    response = make_response(memory_file.read())
    response.headers['Content-Type'] = 'application/zip'

    cube.clear_screens()

    return response
Пример #56
0
    def testZipImporterMethods(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {
            packdir + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc),
            "spam" + pyc_ext: (NOW, test_pyc)
        }

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                zinfo.comment = b"spam"
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP)
            self.assertEqual(zi.archive, TEMP_ZIP)
            self.assertEqual(zi.is_package(TESTPACK), True)

            find_mod = zi.find_module('spam')
            self.assertIsNotNone(find_mod)
            self.assertIsInstance(find_mod, zipimport.zipimporter)
            self.assertFalse(find_mod.is_package('spam'))
            load_mod = find_mod.load_module('spam')
            self.assertEqual(find_mod.get_filename('spam'), load_mod.__file__)

            mod = zi.load_module(TESTPACK)
            self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)

            existing_pack_path = importlib.import_module(TESTPACK).__path__[0]
            expected_path_path = os.path.join(TEMP_ZIP, TESTPACK)
            self.assertEqual(existing_pack_path, expected_path_path)

            self.assertEqual(zi.is_package(packdir + '__init__'), False)
            self.assertEqual(zi.is_package(packdir + TESTPACK2), True)
            self.assertEqual(zi.is_package(packdir2 + TESTMOD), False)

            mod_path = packdir2 + TESTMOD
            mod_name = module_path_to_dotted_name(mod_path)
            mod = importlib.import_module(mod_name)
            self.assertTrue(mod_name in sys.modules)
            self.assertEqual(zi.get_source(TESTPACK), None)
            self.assertEqual(zi.get_source(mod_path), None)
            self.assertEqual(zi.get_filename(mod_path), mod.__file__)
            # To pass in the module name instead of the path, we must use the
            # right importer
            loader = mod.__loader__
            self.assertEqual(loader.get_source(mod_name), None)
            self.assertEqual(loader.get_filename(mod_name), mod.__file__)

            # test prefix and archivepath members
            zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
            self.assertEqual(zi2.archive, TEMP_ZIP)
            self.assertEqual(zi2.prefix, TESTPACK + os.sep)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Пример #57
0
def zipwrite(archive, filename, arcname=None):
    with open(filename, 'rb') as f:
        zi = ZipInfo(arcname or filename)
        zi.compress_type = ZIP_DEFLATED
        archive.writestr(zi, f.read())
Пример #58
0
 def test_writestr_distinfo_compress_type_overrides_zinfo(self, wf):
     zi = ZipInfo('_')
     zi.compress_type = ZIP_DEFLATED
     wf.writestr_distinfo(zi, b'_', compress_type=ZIP_BZIP2)
     arcpath = wf.distinfo_dirname + '/' + zi.filename
     assert wf.zipfile.getinfo(arcpath).compress_type == ZIP_BZIP2
Пример #59
0
    def testZipImporterMethods(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {
            packdir + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
            packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc),
            "spam" + pyc_ext: (NOW, test_pyc)
        }

        self.addCleanup(os_helper.unlink, TEMP_ZIP)
        with ZipFile(TEMP_ZIP, "w") as z:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                zinfo.comment = b"spam"
                z.writestr(zinfo, data)

        zi = zipimport.zipimporter(TEMP_ZIP)
        self.assertEqual(zi.archive, TEMP_ZIP)
        self.assertTrue(zi.is_package(TESTPACK))

        # PEP 302
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)

            mod = zi.load_module(TESTPACK)
            self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)

        # PEP 451
        spec = zi.find_spec('spam')
        self.assertIsNotNone(spec)
        self.assertIsInstance(spec.loader, zipimport.zipimporter)
        self.assertFalse(spec.loader.is_package('spam'))
        exec_mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(exec_mod)
        self.assertEqual(spec.loader.get_filename('spam'), exec_mod.__file__)

        spec = zi.find_spec(TESTPACK)
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)

        existing_pack_path = importlib.import_module(TESTPACK).__path__[0]
        expected_path_path = os.path.join(TEMP_ZIP, TESTPACK)
        self.assertEqual(existing_pack_path, expected_path_path)

        self.assertFalse(zi.is_package(packdir + '__init__'))
        self.assertTrue(zi.is_package(packdir + TESTPACK2))
        self.assertFalse(zi.is_package(packdir2 + TESTMOD))

        mod_path = packdir2 + TESTMOD
        mod_name = module_path_to_dotted_name(mod_path)
        mod = importlib.import_module(mod_name)
        self.assertTrue(mod_name in sys.modules)
        self.assertIsNone(zi.get_source(TESTPACK))
        self.assertIsNone(zi.get_source(mod_path))
        self.assertEqual(zi.get_filename(mod_path), mod.__file__)
        # To pass in the module name instead of the path, we must use the
        # right importer
        loader = mod.__spec__.loader
        self.assertIsNone(loader.get_source(mod_name))
        self.assertEqual(loader.get_filename(mod_name), mod.__file__)

        # test prefix and archivepath members
        zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
        self.assertEqual(zi2.archive, TEMP_ZIP)
        self.assertEqual(zi2.prefix, TESTPACK + os.sep)
Пример #60
0
with ZipFile(archive, "w", ZIP_DEFLATED) as zf:
    handled_subdirs = set()

    for directory in directories:
        for root, dirs, files in os.walk(directory):
            for fn in files:
                if fn == 'meson.build':
                    continue

                abspath = os.path.join(root, fn)
                rel = os.path.join(os.path.basename(directory),
                                   os.path.relpath(abspath, directory))

                reldir, _, _ = rel.rpartition('/')

                if reldir not in handled_subdirs:
                    handled_subdirs.add(reldir)
                    zi = ZipInfo(
                        reldir + "/",
                        datetime.fromtimestamp(
                            os.path.getmtime(directory)).timetuple())
                    zi.compress_type = ZIP_STORED
                    zi.external_attr = 0o40755 << 16  # drwxr-xr-x
                    zf.writestr(zi, "")

                zf.write(abspath, rel)

    write_depfile(depfile, archive,
                  [os.path.join(sourcedir, x)
                   for x in zf.namelist()] + [__file__])