Beispiel #1
0
def save_uploadfile_in_zipfile(
    upload_file, upload_filename,
    dest_zipfile_name, dest_filename_in_zip=None):

    nzf = ZipFile(dest_zipfile_name, mode='w', **ZIP_WRITE_OPTS)

    if upload_filename[-3:].lower() == u'zip':
        # 'zipfile, check and rename content'
        zf = ZipFile(StringIO(upload_file.read()), 'r')
        file_names = zf.namelist()
        for filename in file_names:
            #write file to new
            a = get_new_filename(filename, dest_filename_in_zip)

            nzf.writestr(a, zf.read(filename))
        zf.close()
    else:
        # 'normal file'
        nzf.writestr(
            get_new_filename(upload_filename, dest_filename_in_zip),
            upload_file.read())

    nzf.close()

    # Remove comment line from .asc and .inc files after uploading
    remove_comments_from_asc_files(os.path.dirname(dest_zipfile_name))
Beispiel #2
0
def MyUnzipFile(in_file_path, target_root_path):
    """Unzip file.

>>> unzip_file("d:\\zout.zip", "d:\\")"""
    zin = ZipFile(in_file_path, "r")
    zin.extractall(target_root_path)
    zin.close()
Beispiel #3
0
class UnZip():
    """Unzip the given file into a temporary directory."""
    def __init__(self, input_file):
        self.input_file = input_file

    def unzip(self):
        self.zip_file = ZipFile(self.input_file, 'r')
        self.tempdir = tempfile.mkdtemp()
        self.zip_file.extractall(self.tempdir)
        return self.tempdir

    def cleanup(self):
        self.zip_file.close()
        for root, dirs, files in os.walk(self.tempdir, topdown=False):
            for name in files:
                os.remove(os.path.join(root, name))
            for name in dirs:
                os.rmdir(os.path.join(root, name))
        os.rmdir(self.tempdir)

    def __enter__(self):
        return self.unzip()

    def __exit__(self, *args):
        self.cleanup()
        return False
Beispiel #4
0
    def export_gtfs(self, gtfs_file):
        """Export a GTFS file as feed

        Keyword arguments:
        gtfs_file - A path or file-like object for the GTFS feed

        This function will close the file in order to finalize it.
        """
        z = ZipFile(gtfs_file, 'w')

        gtfs_order = (
            ('agency.txt', Agency),
            ('calendar.txt', Service),
            ('calendar_dates.txt', ServiceDate),
            ('fare_attributes.txt', Fare),
            ('fare_rules.txt', FareRule),
            ('feed_info.txt', FeedInfo),
            ('frequencies.txt', Frequency),
            ('routes.txt', Route),
            ('shapes.txt', ShapePoint),
            ('stop_times.txt', StopTime),
            ('stops.txt', Stop),
            ('transfers.txt', Transfer),
            ('trips.txt', Trip),
        )

        for filename, exporter in gtfs_order:
            content = exporter.objects.in_feed(self).export_txt()
            if content:
                z.writestr(filename, content)
        z.close()
Beispiel #5
0
def main():
    # test if PyXML is installed
    try:
        import _xmlplus.parsers.expat
        pyxml = '--includes _xmlplus.parsers.expat'
    except ImportError:
        pyxml = ''
    # create exe
    status = os.system('python setup.py py2exe %s >& build.log' % pyxml)
    if status != 0:
        raise RuntimeError, 'Error creating EXE'

    # create distribution
    import pyste
    version = pyste.__VERSION__
    zip = ZipFile('pyste-%s.zip' % version, 'w', ZIP_DEFLATED)    
    # include the base files
    dist_dir = 'dist/pyste'
    for basefile in os.listdir(dist_dir):
        zip.write(os.path.join(dist_dir, basefile), os.path.join('pyste', basefile))
    # include documentation
    for doc_file in findfiles('../doc', '*.*'):
        dest_name = os.path.join('pyste/doc', doc_file[3:])
        zip.write(doc_file, dest_name)
    zip.write('../index.html', 'pyste/doc/index.html')
    zip.close()
    # cleanup
    os.remove('build.log')
    shutil.rmtree('build')
    shutil.rmtree('dist')
Beispiel #6
0
 def __save_zip(self, file):
     """Save a Zip ODF from the available parts.
     """
     # Parts were loaded by "save"
     parts = self.__parts
     compression = ZIP_DEFLATED
     try:
         filezip = ZipFile(file, 'w', compression=compression)
     except RuntimeError:
         # No zlib module
         compression = ZIP_STORED
         filezip = ZipFile(file, 'w', compression=compression)
     # Parts to save, except manifest at the end
     part_names = parts.keys()
     part_names.remove(ODF_MANIFEST)
     # "Pretty-save" parts in some order
     # mimetype requires to be first and uncompressed
     filezip.compression = ZIP_STORED
     filezip.writestr('mimetype', parts['mimetype'])
     filezip.compression = compression
     part_names.remove('mimetype')
     # XML parts
     for path in ODF_CONTENT, ODF_META, ODF_SETTINGS, ODF_STYLES:
         filezip.writestr(path, parts[path])
         part_names.remove(path)
     # Everything else
     for path in part_names:
         data = parts[path]
         if data is None:
             # Deleted
             continue
         filezip.writestr(path, data)
     # Manifest
     filezip.writestr(ODF_MANIFEST, parts[ODF_MANIFEST])
     filezip.close()
Beispiel #7
0
def build(worker_dir, sha, repo_url, destination, concurrency):
  """Download and build sources in a temporary directory then move exe to destination"""
  tmp_dir = tempfile.mkdtemp()
  os.chdir(tmp_dir)

  with open('sf.gz', 'wb+') as f:
    f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)
  zip_file = ZipFile('sf.gz')
  zip_file.extractall()
  zip_file.close()

  for name in zip_file.namelist():
    if name.endswith('/src/'):
      src_dir = name
  os.chdir(src_dir)

  custom_make = os.path.join(worker_dir, 'custom_make.txt')
  if os.path.exists(custom_make):
    with open(custom_make, 'r') as m:
      make_cmd = m.read().strip()
    subprocess.check_call(make_cmd, shell=True)
  else:
    subprocess.check_call(MAKE_CMD + ' -j %s' % (concurrency), shell=True)

  shutil.move('stockfish'+ EXE_SUFFIX, destination)
  os.chdir(worker_dir)
  shutil.rmtree(tmp_dir)
    def doTest(self, expected_ext, files, *modules, **kw):
        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)
            z.close()

            stuff = kw.get("stuff", None)
            if stuff is not None:
                # Prepend 'stuff' to the start of the zipfile
                f = open(TEMP_ZIP, "rb")
                data = f.read()
                f.close()

                f = open(TEMP_ZIP, "wb")
                f.write(stuff)
                f.write(data)
                f.close()

            sys.path.insert(0, TEMP_ZIP)

            mod = __import__(".".join(modules), globals(), locals(),
                             ["__dummy__"])
            if expected_ext:
                file = mod.get_file()
                self.assertEquals(file, os.path.join(TEMP_ZIP,
                                  *modules) + expected_ext)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Beispiel #9
0
    def _render_zip(self, req, filename, repos, diff):
        """ZIP archive with all the added and/or modified files."""
        new_rev = diff.new_rev
        req.send_response(200)
        req.send_header('Content-Type', 'application/zip')
        req.send_header('Content-Disposition', 'attachment;'
                        'filename=%s.zip' % filename)

        from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED

        buf = StringIO()
        zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
        for old_node, new_node, kind, change in repos.get_changes(**diff):
            if kind == Node.FILE and change != Changeset.DELETE:
                assert new_node
                zipinfo = ZipInfo()
                zipinfo.filename = new_node.path.encode('utf-8')
                # Note: unicode filenames are not supported by zipfile.
                # UTF-8 is not supported by all Zip tools either,
                # but as some does, I think UTF-8 is the best option here.
                zipinfo.date_time = time.gmtime(new_node.last_modified)[:6]
                zipinfo.compress_type = ZIP_DEFLATED
                zipfile.writestr(zipinfo, new_node.get_content().read())
        zipfile.close()

        buf.seek(0, 2) # be sure to be at the end
        req.send_header("Content-Length", buf.tell())
        req.end_headers()

        req.write(buf.getvalue())
Beispiel #10
0
    def acquire_resource(self, target_path, format_dict):
        """
        Downloads the zip file and extracts the files listed in
        :meth:`zip_file_contents` to the target path.

        """
        import cStringIO as StringIO
        from zipfile import ZipFile

        target_dir = os.path.dirname(target_path)
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        url = self.url(format_dict)

        shapefile_online = self._urlopen(url)

        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')

        for member_path in self.zip_file_contents(format_dict):
            ext = os.path.splitext(member_path)[1]
            target = os.path.splitext(target_path)[0] + ext
            member = zfh.getinfo(member_path)
            with open(target, 'wb') as fh:
                fh.write(zfh.open(member).read())

        shapefile_online.close()
        zfh.close()

        return target_path
Beispiel #11
0
    def aqcuire_all_resources(self, format_dict):
        import cStringIO as StringIO
        from zipfile import ZipFile

        # Download archive.
        url = self.url(format_dict)
        shapefile_online = self._urlopen(url)
        zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')
        shapefile_online.close()

        # Iterate through all scales and levels and extract relevant files.
        modified_format_dict = dict(format_dict)
        scales = ('c', 'l', 'i', 'h', 'f')
        levels = (1, 2, 3, 4)
        for scale, level in itertools.product(scales, levels):
            modified_format_dict.update({'scale': scale, 'level': level})
            target_path = self.target_path(modified_format_dict)
            target_dir = os.path.dirname(target_path)
            if not os.path.isdir(target_dir):
                os.makedirs(target_dir)

            for member_path in self.zip_file_contents(modified_format_dict):
                ext = os.path.splitext(member_path)[1]
                target = os.path.splitext(target_path)[0] + ext
                member = zfh.getinfo(member_path)
                with open(target, 'wb') as fh:
                    fh.write(zfh.open(member).read())

        zfh.close()
Beispiel #12
0
    def do_export(self, REQUEST=None):
        """ """
        if REQUEST and not self.getParentNode().checkPermissionView():
            raise Unauthorized

        errors = []

        my_container = self.getParentNode()

        objects_to_archive = self.gather_objects(my_container)

        file_like_object = StringIO()
        zip_file = ZipFile(file_like_object, 'w')
        archive_files = []
        try:
            for obj in objects_to_archive:
                added_path = None
                if self.is_exportable(obj):
                    added_path = self.add_object_to_zip(obj, zip_file)
                if added_path:
                    archive_files.append((obj.title_or_id(),
                                          getattr(obj, 'meta_label',
                                                  obj.meta_type),
                                          added_path))

            self.add_index(zip_file, archive_files)
            zip_file.close()
        except Exception, e:
            errors.append(e)
Beispiel #13
0
    def save_pickle_in_cfile(self, local_fname, networkref):
        """ Creates a pickled version of the graph and stores it in the
        cfile
        
        Parameters
        ----------
        local_fname: string
            The filename used in the Pickle folder to store
        networkref: NetworkX Graph instance
            The NetworkX graph to pickle
        
        """

        logger.info('Write a generated graph pickle to the connectome file.')
        picklefilepath = os.path.join(tempfile.gettempdir(),local_fname)
        from networkx import write_gpickle
        # add nodekeys, edgekeys, graphid to helpernode 'n0' before storage
        helperdict = {'nodekeys': networkref.nodekeys.copy(), \
                      'edgekeys': networkref.edgekeys.copy(), \
                      'graphid' : networkref.networkid }
        networkref.graph.add_node('n0')
        networkref.graph.node['n0'] = helperdict
        write_gpickle(networkref.graph, picklefilepath)
        networkref.graph.remove_node('n0')
        
        from zipfile import ZipFile, ZIP_DEFLATED
        tmpzipfile = ZipFile(self.data.fullpathtofile, 'a', ZIP_DEFLATED)
        # store it in the zip file
        tmpzipfile.write(picklefilepath, 'Pickle/' + local_fname)
        tmpzipfile.close()
        
        # remove pickle file from system
        logger.debug('Unlink: %s' % picklefilepath)
        os.unlink(picklefilepath)
Beispiel #14
0
def layer_type(filename):
    """Finds out if a filename is a Feature or a Vector
       returns a gsconfig resource_type string
       that can be either 'featureType' or 'coverage'
    """
    base_name, extension = os.path.splitext(filename)

    shp_exts = ['.shp',]
    cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif']
    csv_exts = ['.csv']
    kml_exts = ['.kml']

    if extension.lower() == '.zip':
        zf = ZipFile(filename)
        # ZipFile doesn't support with statement in 2.6, so don't do it
        try:
            for n in zf.namelist():
                b, e = os.path.splitext(n.lower())
                if e in shp_exts or e in cov_exts or e in csv_exts:
                    base_name, extension = b,e
        finally:
            zf.close()

    if extension.lower() in shp_exts + csv_exts + kml_exts:
         return FeatureType.resource_type
    elif extension.lower() in cov_exts:
         return Coverage.resource_type
    else:
        msg = ('Saving of extension [%s] is not implemented' % extension)
        raise GeoNodeException(msg)
Beispiel #15
0
    def pack(self, output_dir, devel=False, force=False, keep_temp=False):
        self.output_dir = os.path.realpath(output_dir)
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        self.force = force
        self.keep_temp = keep_temp

        paths = []
        self.merge_modules = {}

        # create runtime package
        p = self._create_msi_installer(PackageType.RUNTIME)
        paths.append(p)

        # create devel package
        if devel and not isinstance(self.package, App):
            p = self._create_msi_installer(PackageType.DEVEL)
            paths.append(p)

        # create zip with merge modules
        self.package.set_mode(PackageType.RUNTIME)
        zipf = ZipFile(os.path.join(self.output_dir, '%s-merge-modules.zip' %
                                    self._package_name()), 'w')
        for p in self.merge_modules[PackageType.RUNTIME]:
            zipf.write(p)
        zipf.close()

        if not keep_temp:
            for msms in self.merge_modules.values():
                for p in msms:
                    os.remove(p)

        return paths
Beispiel #16
0
    def __exportStyle(self, dirstylename, filename,cfgxml):
        """
        Exporta un estilo a un archivo ZIP
        """
        if not filename.lower().endswith('.zip'):
            filename += '.zip' 
        sfile=os.path.basename(filename)
        log.debug("Export style %s" % dirstylename)
        try:
            zippedFile = ZipFile(filename, "w")
            try:
                if cfgxml!='':
                    for contFile in dirstylename.files():
                        if contFile.basename()!= 'config.xml':
                            zippedFile.write(unicode(contFile.normpath()), contFile.basename(), ZIP_DEFLATED)
                else:
                    for contFile in dirstylename.files():
                            zippedFile.write(unicode(contFile.normpath()), contFile.basename(), ZIP_DEFLATED)
            finally:
                if cfgxml!='':
                    zippedFile.writestr('config.xml', cfgxml)
                zippedFile.close()
                self.alert(_(u'Correct'), _(u'Style exported correctly: %s') % sfile)

        except IOError:
            self.alert(_(u'Error'), _(u'Could not export style : %s') % filename.basename())
        self.action = ""
Beispiel #17
0
def zip_layer_folder(dir_path, layer_name):
    """
    Create a zip archive with the content of the folder located at `dir_path`
    and name it with `layer_name`.

    Parameters
    ----------
    dir_path: str
        The path to the temporary folder in which are located the files to
        be zipped.

    layer_name: str
        The name of the concerned layer (will be used as file name for the
        zip archive).

    Returns
    -------
    raw_content: str
        The zip archive
    archive_name: str
        The name of the archive (used later in the header of the response).
    """
    filenames = os.listdir(dir_path)
    zip_stream = BytesIO()
    myZip = ZipFile(zip_stream, "w", compression=ZIP_DEFLATED)
    for filename in filenames:
        if not filename.endswith('.geojson'):
            f_name = path_join(dir_path, filename)
            myZip.write(f_name, filename, ZIP_DEFLATED)
    myZip.close()
    zip_stream.seek(0)
    return zip_stream.read(), ''.join([layer_name, ".zip"])
Beispiel #18
0
    def upload_analyzer(self):
        """Upload analyzer to guest.
        @return: operation status.
        """
        zip_data = StringIO()
        zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)

        root = os.path.join("analyzer", self.platform)
        root_len = len(os.path.abspath(root))

        if not os.path.exists(root):
            log.error("No valid analyzer found at path: %s" % root)
            return False

        for root, dirs, files in os.walk(root):
            archive_root = os.path.abspath(root)[root_len:]
            for name in files:
                path = os.path.join(root, name)
                archive_name = os.path.join(archive_root, name)
                zip_file.write(path, archive_name, ZIP_DEFLATED)

        zip_file.close()
        data = xmlrpclib.Binary(zip_data.getvalue())
        zip_data.close()

        log.debug("Uploading analyzer to guest (ip=%s)" % self.ip)
        self.server.add_analyzer(data)
Beispiel #19
0
    def get_results(self):
        """Get analysis results.
        @return: data.
        """
        root = self._get_root(container="cuckoo", create=False)

        if not os.path.exists(root):
            return False

        zip_data = StringIO()
        zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)

        root_len = len(os.path.abspath(root))
        
        for root, dirs, files in os.walk(root):
            archive_root = os.path.abspath(root)[root_len:]
            for name in files:
                path = os.path.join(root, name)
                archive_name = os.path.join(archive_root, name)
                zip_file.write(path, archive_name, ZIP_DEFLATED)
        
        zip_file.close()
        data = xmlrpclib.Binary(zip_data.getvalue())
        zip_data.close()

        return data
Beispiel #20
0
def parse_template(template_name):
    """Resolve template name into absolute path to the template
    and boolean if absolute path is temporary directory.
    """
    if template_name.startswith('http'):
        if '#' in template_name:
            url, subpath = template_name.rsplit('#', 1)
        else:
            url = template_name
            subpath = ''
        with tempfile.NamedTemporaryFile() as tmpfile:
            urlretrieve(url, tmpfile.name)
            if not is_zipfile(tmpfile.name):
                raise ConfigurationError("Not a zip file: %s" % tmpfile)
            zf = ZipFile(tmpfile)
            try:
                path = tempfile.mkdtemp()
                zf.extractall(path)
                return os.path.join(path, subpath), True
            finally:
                zf.close()

    registry = TemplatesRegistry()
    if registry.has_template(template_name):
        path = registry.path_of_template(template_name)
    elif ':' in template_name:
        path = resolve_dotted_path(template_name)
    else:
        path = os.path.realpath(template_name)

    if not os.path.isdir(path):
        raise ConfigurationError('Template directory does not exist: %s' % path)
    return path, False
Beispiel #21
0
class WikiArchiver(BaseWikiManager):
    def _serialize_page(self, page):
        pdict = dict(id=page.id, name=page.name, type=page.type,
                     created=page.created, modified=page.modified)
        return pdict
    
    def create_new_zipfile(self):
        fields = ['id', 'name', 'type', 'created', 'modified']
        self.zipfileobj = BytesIO()
        self.csvfileobj = StringIO()
        self.zipfile = ZipFile(self.zipfileobj, 'w')
        self.csvfile = csv.DictWriter(self.csvfileobj, fields)
        
    def archive_pages(self):
        for page in self.query().all():
            pdict = self._serialize_page(page)
            self.csvfile.writerow(pdict)
            filename = 'tutwiki-page-%04d.txt' % page.id
            self.zipfile.writestr(filename, bytes(page.content))
        csvfilename = 'tutwiki-dbinfo.csv'
        self.zipfile.writestr(csvfilename, self.csvfileobj.getvalue())
        self.zipfile.close()
        #self.zipfileobj.seek(0)
        #return self.zipfileobj.read()
        return self.zipfileobj.getvalue()
Beispiel #22
0
def createDevEnv(baseDir, type):
  fileBuffer = StringIO()
  createBuild(baseDir, type=type, outFile=fileBuffer, devenv=True, releaseBuild=True)

  from zipfile import ZipFile
  zip = ZipFile(StringIO(fileBuffer.getvalue()), 'r')
  zip.extractall(os.path.join(baseDir, 'devenv'))
  zip.close()

  print 'Development environment created, waiting for connections from active extensions...'
  metadata = readMetadata(baseDir, type)
  connections = [0]

  import SocketServer, time, thread

  class ConnectionHandler(SocketServer.BaseRequestHandler):
    def handle(self):
      connections[0] += 1
      self.request.sendall('HTTP/1.0 OK\nConnection: close\n\n%s' % metadata.get('general', 'basename'))

  server = SocketServer.TCPServer(('localhost', 43816), ConnectionHandler)

  def shutdown_server(server):
    time.sleep(10)
    server.shutdown()
  thread.start_new_thread(shutdown_server, (server,))
  server.serve_forever()

  if connections[0] == 0:
    print 'Warning: No incoming connections, extension probably not active in the browser yet'
  else:
    print 'Handled %i connection(s)' % connections[0]
Beispiel #23
0
    def preloadFont(cls, font, directory=DEFAULT_DIR):
        """
        Load font file into memory. This can be overriden with
        a superclass to create different font sources.
        """

        fontPath = os.path.join(directory, font + ".flf")
        if not os.path.exists(fontPath):
            fontPath = os.path.join(directory, font + ".tlf")
            if not os.path.exists(fontPath):
                raise pyfiglet.FontNotFound("%s doesn't exist" % font)

        if is_zipfile(fontPath):
            z = None
            try:
                z = ZipFile(fontPath, "r")
                data = z.read(z.getinfo(z.infolist()[0].filename))
                z.close()
                return data.decode("utf-8", "replace") if ST3 else data
            except Exception as e:
                if z is not None:
                    z.close()
                raise pyfiglet.FontError("couldn't read %s: %s" % (fontPath, e))
        else:
            try:
                with open(fontPath, "rb") as f:
                    data = f.read()
                return data.decode("utf-8", "replace") if ST3 else data
            except Exception as e:
                raise pyfiglet.FontError("couldn't open %s: %s" % (fontPath, e))

        raise pyfiglet.FontNotFound(font)
Beispiel #24
0
    def compileToZip(self):
        """ 
            Compile the exam as a .zip file
        """
        def cleanpath(path):
            if path=='': 
                return ''
            dirname, basename = os.path.split(path)
            dirname=cleanpath(dirname)
            if basename!='.':
                dirname = os.path.join(dirname,basename)
            return dirname

        f = ZipFile(self.options.output,'w')

        for (dst,src) in self.files.items():
            dst = ZipInfo(cleanpath(dst))
            dst.external_attr = 0o644<<16
            dst.date_time = datetime.datetime.today().timetuple()
            if isinstance(src,basestring):
                f.writestr(dst,open(src,'rb').read())
            else:
                f.writestr(dst,src.read())

        print("Exam created in %s" % os.path.relpath(self.options.output))

        f.close()
Beispiel #25
0
def get_info(in_stream):
    """ Return the version and submitter strings from zipfile byte stream. """
    arch = ZipFile(in_stream, 'r')
    try:
        return unpack_info(arch.read('__INFO__'))
    finally:
        arch.close()
Beispiel #26
0
    def getZipForLanguages(self, languages, path=None):
        self.verifyPaths()

        # strip out any duplicates
        languages = list(set(languages))

        write_to = StringIO() if path is None else path
        zip_file = ZipFile(write_to, 'w')
        zip_file.write(self.getRainbowPath(), 'rainbow.js', zipfile.ZIP_DEFLATED)

        # include minimized version even when downloading the dev version
        zip_file.write(self.getRainbowPath().replace('.js', '.min.js'), 'rainbow.min.js', zipfile.ZIP_DEFLATED)

        # include themes as well
        if self.theme_path:
            files = glob.glob(self.theme_path + '/*.css')
            for file_name in files:
                zip_file.write(file_name, os.path.join('themes', os.path.basename(file_name)), zipfile.ZIP_DEFLATED)

        for language in languages:
            zip_file.write(self.getPathForLanguage(language), os.path.join('language', language + '.js'), zipfile.ZIP_DEFLATED)

        zip_file.close()

        return write_to
    def testZipImporterMethodsInSubDirectory(self):
        packdir = TESTPACK + os.sep
        packdir2 = packdir + TESTPACK2 + os.sep
        files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
                 packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}

        z = ZipFile(TEMP_ZIP, "w")
        try:
            for name, (mtime, data) in files.items():
                zinfo = ZipInfo(name, time.localtime(mtime))
                zinfo.compress_type = self.compression
                z.writestr(zinfo, data)
            z.close()

            zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
            self.assertEquals(zi.archive, TEMP_ZIP)
            self.assertEquals(zi.prefix, packdir)
            self.assertEquals(zi.is_package(TESTPACK2), True)
            zi.load_module(TESTPACK2)

            self.assertEquals(zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
            self.assertEquals(zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)

            mod_name = TESTPACK2 + os.sep + TESTMOD
            mod = __import__(module_path_to_dotted_name(mod_name))
            self.assertEquals(zi.get_source(TESTPACK2), None)
            self.assertEquals(zi.get_source(mod_name), None)
        finally:
            z.close()
            os.remove(TEMP_ZIP)
Beispiel #28
0
    def test_replace_metadata(self):
        old_zip_file = get_test_zipfile('LotsOfFiles')

        metadata = models.parse_zipfile_metadata(old_zip_file)
        old_zip_file.seek(0)

        extension = models.Extension.objects.create_from_metadata(metadata, creator=self.user)
        version = models.ExtensionVersion(extension=extension,
                                          source=File(old_zip_file))

        version.parse_metadata_json(metadata)

        new_zip = version.get_zipfile('r')

        old_zip = ZipFile(File(old_zip_file), 'r')
        self.assertEqual(len(old_zip.infolist()), len(new_zip.infolist()))
        self.assertEqual(new_zip.read("metadata.json"),
                         version.make_metadata_json_string())

        for old_info in old_zip.infolist():
            if old_info.filename == "metadata.json":
                continue

            new_info = new_zip.getinfo(old_info.filename)
            self.assertEqual(old_zip.read(old_info), new_zip.read(new_info))
            self.assertEqual(old_info.date_time, new_info.date_time)

        old_zip.close()
        new_zip.close()
Beispiel #29
0
def logs(request):
    db = sqlite3.connect(r'C:\Users\Administrator\Desktop\Raspi\mysite\newdb.db')
    cursor = db.cursor()
    cursor.execute('SELECT * FROM connhistory')
    data = cursor.fetchall()

    with open("data.csv", 'wb')as csvfile:
        writer = csv.writer(csvfile)
        for row in data:
            writer.writerow(list(row))

    with open("data.csv", 'rb')as csvfile:
        data = csvfile.read()
                    
    in_memory = StringIO()
    zip = ZipFile(in_memory, "a")
    zip.writestr("data.csv",data)
    zip.close()
                    
    response = HttpResponse(content_type="application/zip;")
    response["Content-Disposition"] = "attachment; filename=zipfiles.zip"

    in_memory.seek(0)
    response.write(in_memory.read())
    return response
Beispiel #30
0
    def export_zip(self, paths):
        stringio = StringIO()
        archive = ZipFile(stringio, mode='w')

        def _add_resource(resource):
            for filename in resource.get_files_to_archive(True):
                if filename.endswith('.metadata'):
                    continue
                path = Path(self.handler.key).get_pathto(filename)
                archive.writestr(str(path), resource.handler.to_str())

        for path in paths:
            child = self.get_resource(path, soft=True)
            if child is None:
                continue
            # A Folder => we add its content
            if isinstance(child, Folder):
                for subchild in child.traverse_resources():
                    if subchild is None or isinstance(subchild, Folder):
                        continue
                    _add_resource(subchild)
            else:
                _add_resource(child)

        archive.close()
        return stringio.getvalue()
Beispiel #31
0
        exzip = exname + '.zip'
        try:
            os.mkdir(exname)
            print(f" - Temporary dir created: {exname}")
            shutil.copy(expdforig, os.path.join(exname, expdf))
            print(f" - Copied exercise pdf file: {expdforig}")
            shutil.copytree("code", os.path.join(exname, "code"))
            print(" - Copied the code folder")
            print(" - Removing unnecessary files:")
            pdfpyfiles = set(
                glob.glob(exname + '/**/*.pdf', recursive=True) +
                glob.glob(exname + '/**/*.py', recursive=True))
            allfiles = set(glob.glob(exname + '/**/*.*', recursive=True))
            for f in allfiles.difference(pdfpyfiles):
                removeFileOrFolder(f)
            hiddenfilesorPaths = glob.glob(exname +
                                           '/**/.*') + glob.glob(exname +
                                                                 '/**/__*__')
            for f in hiddenfilesorPaths:
                removeFileOrFolder(f)
            zipf = ZipFile(exzip, 'w', ZIP_DEFLATED)
            zipdir(exname, zipf)
            zipf.close()
            print(f" - .zip file for upload created: {exzip}")
            shutil.rmtree(exname)
            print(" - Temporary dir removed")
        except:
            raise (
                f"Error occured while creating submission .zip: {sys.exc_info()[0]}"
            )
Beispiel #32
0
                with tempfile.NamedTemporaryFile(suffix='.zip') as tmp:
                    # the file transfer can take a long time; by default
                    # cherrypy limits responses to 300s; we increase it to 1h
                    cherrypy.response.timeout = 3600
                    try:
                        # create the zipfile
                        archive = ZipFile(tmp.name, 'w')
                        # add all the recording files
                        for recording in selected_recordings:
                            # ignore missing files
                            if recording.fileExists():
                                archive.write(recording.abspath,
                                              recording.path)
                    finally:
                        # safer this way
                        archive.close()

                    # create a suitable name for the export
                    if selected_date:
                        served_filename = ('Recordings_%s.zip' %
                                           formatDateIso(selected_date))
                    elif filter_start == filter_finish:
                        served_filename = ('Recordings_%s.zip' %
                                           formatDateIso(filter_start))
                    else:
                        served_filename = ('Recordings_%s_to_%s.zip' %
                                           (formatDateIso(filter_start),
                                            formatDateIso(filter_finish)))

                    # use cherrypy utility to push the file for download. This
                    # also means that we don't have to move the config file into
Beispiel #33
0
for root, dirs, files in os.walk(PRJ_PATH):
    is_exclude_dir = root.startswith(tuple(exclude_dirs))
    if (isNameMatch(os.path.basename(root), exclude)
            or is_exclude_dir) and root != PRJ_PATH:
        if not is_exclude_dir:
            exclude_dirs.append(root)
        continue
    else:
        for name in files:
            if not isNameMatch(name, exclude):
                fullname = os.path.join(root, name)
                arcname = fullname[len(PRJ_PAR_DIR):]
                if len(minify['dirs']) > 0 and root.startswith(
                        tuple(minify['dirs'])) and fnmatch.fnmatch(
                            name, '*.js') and not isNameMatch(
                                name, minify['exclude']):
                    fullname = minifyJs(fullname, arcname)
                z.write(fullname, arcname)
                print(arcname[1:], 'was added to the package\n')
z.close()
print('Package was created')

if os.path.exists(TMP_DIR):
    print('\nDeleting temporary files...')
    shutil.rmtree(TMP_DIR, ignore_errors=False, onerror=None)
    print('Ok')

writeToChangelogFile()

print('Done!')
input('\nPress "Enter" to exit')
    End year: 2018
'''

# download the data from the source
logger.info('Downloading raw data')
download = glob.glob(os.path.join(os.path.expanduser("~"), 'Downloads', 'historical_emissions.zip'))[0]

# move this file into your data directory
raw_data_file = os.path.join(data_dir, os.path.basename(download))
shutil.move(download, raw_data_file)

# unzip historical emissions data
raw_data_file_unzipped = raw_data_file.split('.')[0]
zip_ref = ZipFile(raw_data_file, 'r')
zip_ref.extractall(raw_data_file_unzipped)
zip_ref.close()

'''
Process data
'''
# read in historical emissions csv data to pandas dataframe
filename = os.path.join(raw_data_file_unzipped, 'historical_emissions.csv')
# filename = 'data/historical_emissions/historical_emissions.csv'
df = pd.read_csv(filename)

# add iso_a3 to the dataframe
df_edit = pd.merge(df, df_carto[['iso_a3','name']], left_on = 'Country',right_on = 'name', how = 'left')

# deal with exceptions
df_edit.loc[df_edit['Country'] == 'United States','iso_a3'] = 'USA'
df_edit.loc[df_edit['Country'] == 'Tanzania','iso_a3'] = 'TZA'
    with open(DATA_FILE, "wb") as file:
        shutil.copyfileobj(data.raw, file)

    del data

train_file = os.path.join(DATA_DIR, LING10_TRAIN_LARGE, "train_set.txt")
test_file = os.path.join(DATA_DIR, LING10_TRAIN_LARGE, "test_set.txt")
chars_map_file = os.path.join(DATA_DIR, LING10_TRAIN_LARGE, "chars.json")

#If any of the content doesn't exist, extract the zip file
if not os.path.exists(train_file) or not os.path.exists(
        test_file) or not os.path.exists(chars_map_file):
    extractor = ZipFile(DATA_FILE)
    extractor.extractall(DATA_DIR)
    extractor.close()

#READ THE DATA FILES

train_data = open(train_file, encoding="utf-8").read()
test_data = open(test_file, encoding="utf-8").read()
char_map_data = open(chars_map_file)

#Load the char map
char_map = json.load(char_map_data)
char_to_idx = char_map["char_to_idx"]

#Split the train and test into lines
train_data = train_data.splitlines()
test_data = test_data.splitlines()
Beispiel #36
0
"""
Created on 2019年1月18日
@author: Irony
@site: https://pyqt5.com https://github.com/892768447
@email: [email protected]
@file: Test.buildzip
@description: 
"""

import os
from pathlib import Path
import shutil
from zipfile import ZipFile, ZIP_DEFLATED


__Author__ = "Irony"
__Copyright__ = "Copyright (c) 2019"

os.chdir('../')

os.makedirs('dist', exist_ok=True)

# 压缩文件到zip
zipfp = ZipFile(os.path.abspath('release/Library.zip'), 'w', ZIP_DEFLATED)
for file in Path('Library').rglob('*.pyc'):
    print('add file: %s' % file)
    zipfp.write(os.path.abspath(str(file)), str(file)[len('Library'):])
zipfp.close()

shutil.rmtree('Library', ignore_errors=True)
Beispiel #37
0
def auto(quality, email, self):
    internet_date = urlopen('http://just-the-time.appspot.com/')
    result = internet_date.read().strip()
    result_str = result.decode('utf-8')
    result_str = result_str.replace(' ', '-')
    result_set = result_str.split('-')
    final_date = result_set[2] + result_set[1] + result_set[0]
    cwd = os.getcwd()

    DayList = [
        'MON',
        'TUE',
        'WED',
        'THU',
        'FRI',
        'SAT',
        'SUN',
    ]
    day_is = DayList[datetime.date(int(result_set[0]), int(result_set[1]),
                                   int(result_set[2])).weekday()]

    i = 0
    j = 1
    c = 0
    download_Counter = 0
    extension = ""
    percentage_Download = 0


    static_url =  'http://epaper.livehindustan.com/epaperimages/' + final_date \
            + '/' + final_date

    static_url = static_url + '-ng1r-del-'
    extension = ".jpg"

    while c != 2:
        QApplication.processEvents()
        url = static_url + str(i) + str(j) + extension
        h = requests.head(url, allow_redirects=True)
        header = h.headers
        content_type = header.get('content-type')
        if (quality):

            if 'image/jpeg' in content_type.lower():

                urllib.request.urlretrieve(
                    url, 'local-filename' + str(i * 10 + j) + '.jpeg')
                j = j + 1
                c = 0
                download_Counter = download_Counter + 1
                percentage_Download = (download_Counter / 26) * 100
                self.pbar.setValue(percentage_Download)
                print(str(int(percentage_Download)) + "% Downloaded.")

            else:
                c = c + 1
                i = i + 1
                j = 0

    print('Downloads Completed.')
    self.pbar.setValue(100)

    file_name = day_is.upper() + result_set[0] + result_set[1] + result_set[2]
    with open(file_name + ".pdf", "wb") as f:
        f.write(
            img2pdf.convert(
                [i for i in os.listdir(cwd) if i.endswith(".jpeg")]))

    for i in os.listdir(cwd):
        if i.endswith('.jpeg'):
            os.remove(i)

    comp_file = file_name + '.pdf'

    while not os.path.exists(comp_file):
        time.sleep(1)

    if os.path.isfile(comp_file):
        ZipFile(file_name + '.zip', 'w').write(comp_file)
        ZipFile.close(ZipFile)
    else:
        raise ValueError("%s isn't a file!" % comp_file)

    fromaddr = 'Enter an email adress to send.'
    toaddr = email
    cwd = os.getcwd()

    msg = MIMEMultipart()

    msg['From'] = fromaddr
    msg['To'] = toaddr
    msg['Subject'] = 'NEWSPAPER for' + day_is

    body = 'Download the file and uncompress it. Read and Enjoy.'

    msg.attach(MIMEText(body, 'plain'))

    outfilename = file_name + '.zip'

    file_to_open = os.path.join(cwd, outfilename)

    attachment = open(file_to_open, 'rb')

    part = MIMEBase('application', 'octet-stream')
    part.set_payload(attachment.read())
    encoders.encode_base64(part)
    part.add_header('Content-Disposition',
                    'attachment; filename= %s' % outfilename)

    msg.attach(part)

    server = smtplib.SMTP('smtp.gmail.com', 587)
    server.starttls()
    server.login(fromaddr, 'enter_password')
    text = msg.as_string()
    server.sendmail(fromaddr, toaddr, text)
    server.quit()
Beispiel #38
0
    input()
    exit()

if result[0] == True:
    newName = result[1][:-4] + lib.getDate()
    oldFDBPath = result[1]
    newFDBPath = newName + ".fdb"
    zipName = newName + ".zip"
    lib.copy(oldFDBPath, newFDBPath)
    zipObject = ZipFile(zipName,
                        'w',
                        compression=zipfile.ZIP_BZIP2,
                        compresslevel=9)
    print("compressing file...")
    zipObject.write(newFDBPath)
    zipObject.close()

    lib.restoreLowDash(oldFDBPath)
    print("local database is ready for work")
    print("creating auxiliary copy...")
    lib.deleteCopy(newFDBPath)
    print("auxiliary copy deleted")
    print("sending file...")
    response = (str(lib.sendFile(zipName, user, password)))
    print(response)
    if response != "<Response [200]>":
        print(
            "something went wrong, but now a database backup exists in the current dir :("
        )
    else:
        print("file sent")
def keyholemarkup2x(file,output='df', out_filename = None):
    """
    Takes Keyhole Markup Language Zipped (KMZ) or KML file as input. The  
    output is a pandas dataframe, geopandas geodataframe, csv, geojson, or
    shapefile.
    
    All core functionality from:
    http://programmingadvent.blogspot.com/2013/06/kmzkml-file-parsing-with-python.html
    
    Parameters
        ----------
        file : {string}
            The string path to your KMZ or .
        output : {string}
            Defines the type of output. Valid selections include:
                - shapefile - 'shp', 'shapefile', or 'ESRI Shapefile'

        Returns
        -------
        self : object
    """
    r = re.compile(r'(?<=\.)km+[lz]?',re.I)
    try:
        extension = r.search(file).group(0) #(re.findall(r'(?<=\.)[\w]+',file))[-1]
        
    
    except IOError as e:
        logging.error("I/O error {0}".format(e))
    if (extension.lower()=='kml') is True:
        buffer = file
    elif (extension.lower()=='kmz') is True:
        kmz = ZipFile(file, 'r')
        
        vmatch = np.vectorize(lambda x:bool(r.search(x)))
        A = np.array(kmz.namelist())
        sel = vmatch(A)
        buffer = kmz.open(A[sel][0],'r')
    
    else:
        raise ValueError('Incorrect file format entered.  Please provide the '
                         'path to a valid KML or KMZ file.')    
     
    
    parser = xml.sax.make_parser()
    handler = PlacemarkHandler()
    parser.setContentHandler(handler)
    parser.parse(buffer)
    
    try:
        kmz.close()
    except:
        pass
    
    df = pd.DataFrame(handler.mapping).T
    names = list(map(lambda x: x.lower(),df.columns))
    if 'description' in names:
        extradata = df.apply(PlacemarkHandler.htmlizer,axis=1)
        df = df.join(extradata)
    
    
    output = output.lower()
    
    if output=='df' or output=='dataframe' or output == None:
        result = df
        
    elif output=='csv':
        out_filename = (out_filename or file[:-4]) + ".csv"
        df.to_csv(out_filename,encoding='utf-8',sep="\t")
        result = ("Successfully converted {0} to CSV and output to"
                   " disk at {1}".format(file,out_filename))
        
    elif output=='gpd' or output == 'gdf' or output=='geoframe' or output == 'geodataframe':
        try:
            import shapely
            from shapely.geometry import Polygon,LineString,Point
        except ImportError as e:
            raise ImportError('This operation requires shapely. {0}'.format(e))
        try:
            import fiona
        except ImportError as e:
            raise ImportError('This operation requires fiona. {0}'.format(e))
        try:
            import geopandas as gpd
        except ImportError as e:
            raise ImportError('This operation requires geopandas. {0}'.format(e))
            
        geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))
        result = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))
        
        
    elif output=='geojson' or output=='json':
        try:
            import shapely
            from shapely.geometry import Polygon,LineString,Point
        except ImportError as e:
            raise ImportError('This operation requires shapely. {0}'.format(e))
        try:
            import fiona
        except ImportError as e:
            raise ImportError('This operation requires fiona. {0}'.format(e))
        try:
            import geopandas as gpd
        except ImportError as e:
            raise ImportError('This operation requires geopandas. {0}'.format(e))
        try:
            import geojson
        except ImportError as e:
            raise ImportError('This operation requires geojson. {0}'.format(e))
            
        geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))
        gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))
        out_filename = file[:-3] + "geojson"
        gdf.to_file(out_filename,driver='GeoJSON')
        validation = geojson.is_valid(geojson.load(open(out_filename)))['valid']
        if validation == 'yes':
            
            result = ("Successfully converted {0} to GeoJSON and output to"
                      " disk at {1}".format(file,out_filename))
        else:
            raise ValueError('The geojson conversion did not create a '
                            'valid geojson object. Try to clean your '
                            'data or try another file.')
            
    elif output=='shapefile' or output=='shp' or output =='esri shapefile':
        try:
            import shapely
            from shapely.geometry import Polygon,LineString,Point
        except ImportError as e:
            raise ImportError('This operation requires shapely. {0}'.format(e))
        try:
            import fiona
        except ImportError as e:
            raise ImportError('This operation requires fiona. {0}'.format(e))
            
        try:
            import geopandas as gpd
        except ImportError as e:
            raise ImportError('This operation requires geopandas. {0}'.format(e))
            
        try:
            import shapefile
        except ImportError as e:
            raise ImportError('This operation requires pyshp. {0}'.format(e))
        
            
        geos = gpd.GeoDataFrame(df.apply(PlacemarkHandler.spatializer,axis=1))
        gdf = gpd.GeoDataFrame(pd.concat([df,geos],axis=1))
        out_filename = (out_filename or file[:-4]) + ".shp"
        gdf.to_file(out_filename,driver='ESRI Shapefile')
        sf = shapefile.Reader(out_filename)
        import shapefile
        sf = shapefile.Reader(out_filename)
        if len(sf.shapes())>0:
            validation = "yes"
        else:
            validation = "no"
        if validation == 'yes':
            
            result = ("Successfully converted {0} to Shapefile and output to"
                      " disk at {1}".format(file,out_filename))
        else:
            raise ValueError('The Shapefile conversion did not create a '
                            'valid shapefile object. Try to clean your '
                            'data or try another file.') 
    else:
        raise ValueError('The conversion returned no data; check if'
                        ' you entered a correct output file type. '
                        'Valid output types are geojson, shapefile,'
                        ' csv, geodataframe, and/or pandas dataframe.')
        
    return result
Beispiel #40
0
 def do_downloads(self,checkfolder=True,output=None):
     for x in list.children[2:]:
         if x.checked:
             if not hasattr(self,"progress"):
                 self.progress = progress()
                 root.add_child(self.progress)
             self.progress.height = 20
             self.progress.width = 400
             self.progress.rpos[1] = list.rpos[1]+list.height+20
             self.progress.progress = 0
             print self.dl_url+"/"+x.file
             serv = urllib2.urlopen(self.dl_url+x.file)
             size = int(serv.info()["Content-Length"])
             read = 0
             bytes = 0
             cli = open(x.filename,"wb")
             s = time.time()
             bps = 0
             while not Engine.quit_threads:
                 r = serv.read(1024)
                 if not r: break
                 cli.write(r)
                 read += len(r)
                 bytes += len(r)
                 self.progress.progress = read/float(size)
                 if time.time()-s>1:
                     bps = bytes/(time.time()-s)
                     s = time.time()
                     bytes = 0
                 self.progress.text = "%sKB/%sKB - %s KB/s"%(read/1000.0,size/1000.0,bps/1000.0)
                 if output:
                     self.progress.rpos = [0,0]
                     self.progress.width = 256
                     self.progress.draw(output[0])
                     output[1]()
                     for evt in pygame.event.get():
                         if evt.type == pygame.QUIT: raise SystemExit
             serv.close()
             cli.close()
             if os.path.exists(self.path+"/"+x.text):
                 if not os.path.isdir(self.path+"/"+x.text):
                     os.remove(self.path+"/"+x.text)
                 else:
                     removeall(self.path+"/"+x.text)
             if not os.path.exists(self.path+"/"+x.text):
                 os.mkdir(self.path+"/"+x.text)
             try:
                 z = ZipFile(x.filename,"r")
             except:
                 print "File corrupt"
                 return
             for name in z.namelist():
                 txt = z.read(name)
                 if "/" in name:
                     try:
                         os.makedirs(self.path+"/"+x.text+"/"+name.rsplit("/",1)[0])
                     except:
                         pass
                 if not name.endswith("/"):
                     f = open(self.path+"/"+x.text+"/"+name,"wb")
                     f.write(txt)
                     f.close()
             z.close()
             os.remove(x.filename)
             root.children.remove(self.progress)
             list.children.remove(x)
             if len(list.children)<=2:
                 list.status_box.text = "No more new downloads."
             del self.progress
Beispiel #41
0
def table_data_export(request: HttpRequest) -> HttpResponse:
    """
    View that handles generating and returning zip files containing csv files full of records.

    :param request: The incoming HTTP request.
    :return: The HTTP response to return to the user.
    """
    table_data_file_name = "/tmp/tabledata.zip"

    if request.method == 'POST':
        map_data = request.POST.getlist('ids[]')
        # sort based on record type
        map_data.sort(key=lambda x: x[0])

        # Lists to hold core ids of each record
        # I'm not proud of this
        who_items = []
        sota_items = []
        profile_items = []
        org_items = []
        cap_items = []
        guide_items = []
        case_items = []
        expe_items = []
        bluejustice_items = []

        # Loop through each item, get the type, and add the core id to the relevant list
        for item in map_data:
            record = item.split('&')
            type = record[0]
            issf_core_id = record[1]

            if type == 'SSF Governance':
                cap_items.append(issf_core_id)
            elif type == 'SSF Guidelines':
                guide_items.append(issf_core_id)
            elif type == 'SSF Organization':
                org_items.append(issf_core_id)
            elif type == 'SSF Profile':
                profile_items.append(issf_core_id)
            elif type == 'State-of-the-Art in SSF Research':
                sota_items.append(issf_core_id)
            elif type == 'Who\'s Who in SSF':
                who_items.append(issf_core_id)
            elif type == 'Case Study':
                case_items.append(issf_core_id)
            elif type == 'SSF Experiences':
                expe_items.append(issf_core_id)
            elif type == 'SSF Blue Justice':
                bluejustice_items.append(issf_core_id)

        # Generate the zipfile containing all the records
        zipfile = ZipFile(table_data_file_name, 'w')

        cap_records = SSFCapacityNeed.objects.filter(issf_core_id__in=cap_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'capacity_need_title',
            'capacity_need_description',
            'capacity_need_category',
            'capacity_need_type'
        )
        guide_records = SSFGuidelines.objects.filter(issf_core_id__in=guide_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'title',
            'location', 'start_day',
            'start_month', 'start_year',
            'end_day', 'end_month',
            'end_year', 'organizer',
            'purpose', 'link',
            'activity_type',
            'activity_coverage',
            'ongoing'
        )
        org_records = SSFOrganization.objects.filter(issf_core_id__in=org_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'organization_name',
            'mission',
            'address1',
            'address2',
            'prov_state',
            'country__short_name',
            'postal_code',
            'city_town',
            'year_established',
            'ssf_defined',
            'ssf_definition',
            'organization_type_union',
            'organization_type_support',
            'organization_type_coop',
            'organization_type_flag',
            'organization_type_other',
            'organization_type_other_text',
            'motivation_voice',
            'motivation_market',
            'motivation_sustainability',
            'motivation_economics',
            'motivation_rights',
            'motivation_collaboration',
            'motivation_other',
            'motivation_other_text',
            'activities_capacity',
            'activities_sustainability',
            'activities_networking',
            'activities_marketing',
            'activities_collaboration',
            'activities_other',
            'activities_other_text',
            'network_types_state',
            'network_types_ssfos',
            'network_types_community',
            'network_types_society',
            'network_types_ngos',
            'network_types_other',
            'network_types_other_text',
            'achievements',
            'success_factors', 'obstacles',
            'organization_point'
        )
        pro_records = SSFProfile.objects.filter(issf_core_id__in=profile_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'ssf_name',
            'ssf_defined',
            'ssf_definition',
            'data_day',
            'data_month',
            'data_year',
            'data_end_day',
            'data_end_month',
            'data_end_year',
            'comments',
            'sources',
            'percent'
        )

        sota_records = SSFKnowledge.objects.filter(issf_core_id__in=sota_items).values(
            'issf_core_id',
            'contribution_date',
            'contributor__first_name',
            'contributor__last_name',
            'geographic_scope_type',
            'publication_type__publication_type',
            'other_publication_type',
            'level1_title',
            'level2_title',
            'year',
            'nonenglish_language__language_name',
            'nonenglish_title',
            'ssf_defined',
            'ssf_definition',
            'lsf_considered',
            'fishery_type_details',
            'gear_type_details',
            'ecosystem_type_details',
            'demographics_na',
            'demographics_age',
            'demographics_education',
            'demographics_ethnicity',
            'demographics_gender',
            'demographics_health',
            'demographics_income',
            'demographics_religion',
            'demographics_unspecified',
            'demographics_other',
            'demographics_other_text',
            'demographic_details',
            'employment_na',
            'employment_full_time',
            'employment_part_time',
            'employment_seasonal',
            'employment_unspecified',
            'employment_details',
            'stage_na',
            'stage_pre_harvest',
            'stage_harvest',
            'stage_post_harvest',
            'stage_unspecified',
            'market_details',
            'governance_details',
            'management_details',
            'research_method',
            'method_specify_qualitative',
            'method_specify_quantitative',
            'method_specify_mixed',
            'aim_purpose_question',
            'theme_issue_details',
            'solutions_offered',
            'solution_details',
            'explicit_implications_recommendations',
            'implication_details',
            'comments'
        )
        case_records = SSFCaseStudies.objects.filter(issf_core_id__in=case_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'name',
            'role',
            'description_area',
            'description_fishery',
            'description_issues',
            'issues_challenges',
            'stakeholders',
            'transdisciplinary',
            'background_context',
            'activities_innovation'
        )
        expe_records = SSFExperiences.objects.filter(issf_core_id__in=expe_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'title',
            'name',
            'description'
        )
        bluejustice_records = SSFBlueJustice.objects.filter(issf_core_id__in=bluejustice_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'name', 'email', 'affiliation',
            'country', 'role', 'img_url', 'photo_location', 'date_of_photo', 'photographer',
            'title', 'video_url', 'vimeo_video_url', 'description', 'contributor', 'ssf_name',
            'ssf_location', 'ssf_country', 'ssf_main_species', 'ssf_type_aquaculture',
            'ssf_type_recreational', 'ssf_type_commercial', 'ssf_type_subsistence', 'ssf_type_indigenous',
            'ssf_type_other', 'ecosystem_type_marine', 'ecosystem_type_freshwater', 'ecosystem_type_brackish',
            'ecosystem_detailed_archipelago', 'ecosystem_detailed_beach', 'ecosystem_detailed_coastal',
            'ecosystem_detailed_coral_reef', 'ecosystem_detailed_deep_sea', 'ecosystem_detailed_estuary',
            'ecosystem_detailed_fjord', 'ecosystem_detailed_intertidal', 'ecosystem_detailed_lagoon',
            'ecosystem_detailed_lake', 'ecosystem_detailed_mangrove', 'ecosystem_detailed_open_ocean',
            'ecosystem_detailed_river', 'ecosystem_detailed_salt_marsh', 'ecosystem_detailed_other',
            'ssf_terms_artisanal', 'ssf_terms_coastal', 'ssf_terms_indigenous', 'ssf_terms_inland', 'ssf_terms_inshore',
            'ssf_terms_small_boat', 'ssf_terms_small_scale', 'ssf_terms_subsistence', 'ssf_terms_traditional',
            'ssf_terms_others', 'ssf_terms_fisheries', 'ssf_terms_fisheries_definiton', 'main_gears_dredge', 'main_gears_lift_net',
            'main_gears_cast_net', 'main_gears_poison', 'main_gears_gillnet', 'main_gears_recreational_fishing_gears',
            'main_gears_gleaning', 'main_gears_seine_net', 'main_gears_harpoon', 'main_gears_surrounding_net',
            'main_gears_harvesting_machines', 'main_gears_traps', 'main_gears_hook_line', 'main_gears_trawls', 'main_gears_others',
            'main_vessel_type', 'main_vessel_number', 'main_vessel_engine', 'ss_fishers_numbers', 'ss_fishers_full_time',
            'ss_fishers_women', 'total_number_households', 'households_participation_percentage', 'background_about_ssf',
            'justice_in_context', 'types_of_justice_distributive', 'types_of_justice_social', 'types_of_justice_economic',
            'types_of_justice_market', 'types_of_justice_infrastructure', 'types_of_justice_regulatory', 'types_of_justice_procedural',
            'types_of_justice_environmental', 'types_of_justice_others', 'dealing_with_justice', 'covid_19_related'
        )
        who_records = SSFPerson.objects.filter(issf_core_id__in=who_items).values(
            'issf_core_id',
            'contributor_id__first_name',
            'contributor_id__last_name',
            'contribution_date',
            'geographic_scope_type',
            'number_publications',
            'education_level',
            'research_method',
            'issues_addressed',
            'url',
            'other_education_level',
            'affiliation',
            'address1',
            'address2',
            'city_town',
            'prov_state',
            'country__short_name',
            'postal_code',
            'is_researcher',
            'person_point'
        )

        write_file_csv('capacity.csv', cap_records, zipfile)
        write_file_csv('guidelines.csv', guide_records, zipfile)
        write_file_csv('organization.csv', org_records, zipfile)
        write_file_csv('profile.csv', pro_records, zipfile)
        write_file_csv('state_of_the_art.csv', sota_records, zipfile)
        write_file_csv('case_studies.csv', case_records, zipfile)
        write_file_csv('experiences.csv', expe_records, zipfile)
        write_file_csv('bluejustice.csv', bluejustice_records, zipfile)
        write_file_csv('whos_who.csv', who_records, zipfile)

        main_attrs = MainAttributeView.objects.filter(issf_core_id__in=profile_items).values(
            'issf_core_id',
            'attribute__question_number',
            'attribute__attribute_label',
            'value',
            'attribute__units_label',
            'attribute_value__value_label',
            'other_value',
            'additional',
            'additional_value__value_label'
        )

        write_file_csv('main_attributes.csv', main_attrs, zipfile)
        author_records = KnowledgeAuthorSimple.objects.filter(knowledge_core__in=sota_items).values()
        write_file_csv('authors.csv', author_records, zipfile)
        all_ids = cap_items + guide_items + org_items + profile_items + sota_items + who_items + expe_items + case_items
        theme_issue_records = CommonThemeIssueView.objects.filter(issf_core_id__in=all_ids).values(
            'issf_core_id',
            'selected_theme_issue_id',
            'theme_issue_value__theme_issue_label',
            'theme_issue_value__theme_issue__theme_issue_category',
            'other_theme_issue'
        )

        characteristic_records = CommonAttributeView.objects.filter(issf_core_id__in=all_ids).values(
            'issf_core_id',
            'selected_attribute_id',
            'attribute__attribute_category',
            'attribute__attribute_label',
            'attribute__units_label',
            'attribute__additional_field',
            'attribute_value__value_label',
            'other_value'
        )

        write_file_csv('themes_issues.csv', theme_issue_records, zipfile)
        write_file_csv('characteristics.csv', characteristic_records, zipfile)

        geog_scope_local_records = GeographicScopeLocalArea.objects.filter(issf_core_id__in=all_ids).values(
            'geographic_scope_local_area_id',
            'issf_core_id',
            'local_area_name',
            'local_area_alternate_name',
            'country__short_name',
            'local_area_setting',
            'local_area_setting_other',
            'local_area_point'
        )
        geog_scope_regional_records = Geographic_Scope_Region.objects.filter(issf_core_id__in=all_ids).values(
            'geographic_scope_region_id',
            'issf_core_id',
            'region__region_name',
            'region_name_other'
        )
        geog_scope_subnational_records = GeographicScopeSubnation.objects.filter(issf_core_id__in=all_ids).values(
            'geographic_scope_subnation_id',
            'issf_core_id',
            'subnation_name',
            'country__short_name',
            'subnation_type',
            'subnation_type_other',
            'subnation_point'
        )
        geog_scope_national_records = GeographicScopeNation.objects.filter(issf_core_id__in=all_ids).values(
            'geographic_scope_nation_id',
            'issf_core_id',
            'country__short_name'
        )

        species_records = Species.objects.filter(issf_core_id__in=all_ids).defer('species_id').values()

        write_file_csv('geog_scope_local.csv', geog_scope_local_records, zipfile)
        write_file_csv('geog_scope_regional.csv', geog_scope_regional_records, zipfile)
        write_file_csv('geog_scope_subnational.csv', geog_scope_subnational_records, zipfile)
        write_file_csv('geog_scope_national.csv', geog_scope_national_records, zipfile)

        write_file_csv('species.csv', species_records, zipfile)

        zipfile.write('/issf/export/README within exported zip files.txt', 'README.txt')

        zipfile.close()

        return HttpResponse("Created tabledata.zip")

    else:
        # Return the already generated zipfile
        if os.path.isfile(table_data_file_name):
            zipfile = open(table_data_file_name, 'rb')

            response = HttpResponse(zipfile, content_type='application/x-zip-compressed')
            response['Content-Disposition'] = 'attachment; filename="tabledata.zip"'

        else:
            response = HttpResponse("No tabledata.zip")

        return response
Beispiel #42
0
def create_offline_file(job, offline_job):

    offline_job.status = 'Running'
    offline_job.save()

    try:

        # Temp dir string
        temp_dir = job.random_id

        # Create the directory
        os.mkdir(temp_dir)

        # Temp dir string
        temp_dir_string = temp_dir + '/'

        # Create an Array for IDs and the component data
        component_data = {}

        # Iterate over all diff data for array
        for component in job.sorted_component_list():
            if component.diff_html:
                component_data['diff-' +
                               str(component.id)] = component.diff_html

        # Iterate over all metadata for the array
        for component in Component.objects.filter(component_type__org__in=[
                job.sorted_orgs()[0],
                job.sorted_orgs()[1]
        ]):
            component_data['component-' +
                           str(component.id)] = component.content

        # Create JSON file
        component_json = open(temp_dir_string + 'components.json', 'w+')
        component_json.write('var component_data = ' +
                             json.dumps(component_data) + ';')
        component_json.close()

        # Create html file
        compare_result = open(temp_dir_string + 'compare_results_offline.html',
                              'w+')

        # Build the html using the template contentxt
        t = loader.get_template('compare_results_offline.html')
        c = Context({
            'org_left_username':
            job.sorted_orgs()[0].username,
            'org_right_username':
            job.sorted_orgs()[1].username,
            'html_rows':
            ''.join(
                list(job.sorted_component_list().values_list('row_html',
                                                             flat=True)))
        })

        # Write template contents to file
        compare_result.write(t.render(c))
        compare_result.close()

        # Create zip file for all content
        zip_file = ZipFile(temp_dir_string + 'compare_results.zip', 'w')

        # Add JSON files
        zip_file.write(temp_dir_string + 'components.json',
                       'data/components.json')

        # Add html file
        zip_file.write(temp_dir_string + 'compare_results_offline.html',
                       'compare_results_offline.html')

        # Add all static files
        for root, dirs, files in os.walk('staticfiles'):
            for file in files:
                zip_file.write(os.path.join(root, file))

        # Close the file
        zip_file.close()

        # Re-open the file
        zip_file = open(temp_dir_string + 'compare_results.zip')

        # Save file to model
        job.zip_file.save(temp_dir + '.zip', File(zip_file))
        job.save()

        # Close the file again
        zip_file.close()

        # Remove the files and directories
        for f in glob.glob(temp_dir_string + '*'):
            os.remove(f)
        os.rmdir(temp_dir)

        # Update status to finished
        offline_job.status = 'Finished'

    except Exception as error:

        offline_job.status = 'Error'
        offline_job.error = error
        offline_job.error_stacktrace = traceback.format_exc()

    offline_job.save()
Beispiel #43
0
 def run(self):
     f = ZipFile(self.outfile, 'w', ZIP_DEFLATED)
     f.write(self.infile)
     f.close()
     print('Finished child_process zip of:', self.infile)
Beispiel #44
0
def profile_csv(request: HttpRequest) -> HttpResponse:
    """
    URL that generates and returns a zipfile of csv files for GCPC.
    Now unused.

    :param request: The incoming HTTP request.
    :return: The HTTP response to return to the user.
    """
    profile_records = SSFProfile.objects.all().values(
        'issf_core_id',
        'contributor_id__first_name',
        'contributor_id__last_name',
        'contribution_date',
        'geographic_scope_type',
        'ssf_name',
        'ssf_defined',
        'ssf_definition',
        'data_day',
        'data_month',
        'data_year',
        'data_end_day',
        'data_end_month',
        'data_end_year',
        'comments',
        'sources',
        'percent'
    )
    zipfile = ZipFile('profile_data.zip', 'w')

    main_attrs = MainAttributeView.objects.all().values(
        'issf_core_id',
        'attribute__question_number',
        'attribute__attribute_label',
        'value',
        'attribute__units_label',
        'attribute_value__value_label',
        'other_value',
        'additional',
        'additional_value__value_label'
    )

    write_file_csv('profile.csv', profile_records, zipfile)
    write_file_csv('main_attributes.csv', main_attrs, zipfile)

    geog_scope_local_records = GeographicScopeLocalArea.objects.filter(issf_core__core_record_type='SSF Profile').values(
        'geographic_scope_local_area_id',
        'issf_core_id',
        'local_area_name',
        'local_area_alternate_name',
        'country__short_name',
        'local_area_setting',
        'local_area_setting_other',
        'local_area_point'
    )
    geog_scope_regional_records = Geographic_Scope_Region.objects.filter(issf_core__core_record_type='SSF Profile').values(
        'geographic_scope_region_id',
        'issf_core_id',
        'region__region_name',
        'region_name_other'
    )
    geog_scope_subnational_records = GeographicScopeSubnation.objects.filter(issf_core__core_record_type='SSF Profile').values(
        'geographic_scope_subnation_id',
        'issf_core_id',
        'subnation_name',
        'country__short_name',
        'subnation_type',
        'subnation_type_other',
        'subnation_point'
    )
    geog_scope_national_records = GeographicScopeNation.objects.filter(issf_core__core_record_type='SSF Profile').values(
        'geographic_scope_nation_id',
        'issf_core_id',
        'country__short_name'
    )

    write_file_csv('geog_scope_local.csv', geog_scope_local_records, zipfile)
    write_file_csv('geog_scope_regional.csv', geog_scope_regional_records, zipfile)
    write_file_csv('geog_scope_subnational.csv', geog_scope_subnational_records, zipfile)
    write_file_csv('geog_scope_national.csv', geog_scope_national_records, zipfile)

    zipfile.close()
    zipfile = open('profile_data.zip', 'rb')

    response = HttpResponse(zipfile, content_type='application/x-zip-compressed')
    response['Content-Disposition'] = 'attachment; filename="profile_data.zip"'

    return response
Beispiel #45
0
    def download_image(self):
        ARCHIVE_TYPES = ['zip']
        suffix = re.compile(r'^.*?[.](?P<ext>tar\.gz|tar\.bz2|\w+)$').match(
            self.image_name).group('ext')
        # Check if file exists already:
        if path.isfile(self.image_name):
            if self.checksum_verification():
                logger.info('Local image found, skipping download: %s',
                            self.local_file_path)
                if suffix not in ARCHIVE_TYPES:
                    return True
            else:
                os.remove(self.local_file_path)

        if not path.isfile(self.image_name):
            # Download file to cli-tool-client
            try:
                request.urlretrieve(self.raw_image_url, self.local_file_path)
            except URLError:
                logger.exception('Failed download of image using urllib')
                return False

        self.checksum_verification()

        # Unzips image  when suffix is zip or tar.gz and then changes image name to extracted one.
        # For EC2 and SCVMM images is zip used and for GCE is tar.gz used.

        archive_path = self.image_name
        if suffix not in ARCHIVE_TYPES:
            return True
        else:
            if suffix == 'zip':
                try:
                    archive = ZipFile(archive_path)
                    zipinfo = archive.infolist()
                    self._unzipped_file = zipinfo[0].filename
                except Exception:
                    logger.exception(
                        "Getting information of {} archive failed.".format(
                            self.image_name))
                    return False

                if path.isfile(self.image_name):
                    try:
                        os.remove(self.image_name)
                    except Exception:
                        logger.exception(
                            "Deleting previously unpacked file {} failed.".
                            format(self.image_name))
                        return False
                logger.info(
                    f"Image archived - unpacking as : {self._unzipped_file}")
                try:
                    archive.extractall()
                    archive.close()
                    # remove the archive
                    os.remove(archive_path)
                    return True
                except Exception:
                    logger.exception(f"{suffix} archive unpacked failed.")
                    return False
Beispiel #46
0
import os
from zipfile import ZipFile

directory = '/Users/Albaba/Library/Mobile Documents/com~apple~CloudDocs/Coding Workplaces/Repos/Web Programming/midterm'

# create a ZipFile object
zipObj = ZipFile('text_files.zip', 'w')
new_dir = directory + '/text_files'


for filename in os.listdir(directory):
    if filename.endswith(".php") or filename.endswith(".css"): 
        #print(filename)
        #zipObj.write(os.path.join(directory, filename))
    else:
        continue
    
#close zip
zipObj.close()
Beispiel #47
0
    def compile_templates(self, target, extensions=None, filter_func=None,
                          zip='deflated', log_function=None,
                          ignore_errors=True, py_compile=False):
        """Finds all the templates the loader can find, compiles them
        and stores them in `target`.  If `zip` is `None`, instead of in a
        zipfile, the templates will be stored in a directory.
        By default a deflate zip algorithm is used. To switch to
        the stored algorithm, `zip` can be set to ``'stored'``.

        `extensions` and `filter_func` are passed to :meth:`list_templates`.
        Each template returned will be compiled to the target folder or
        zipfile.

        By default template compilation errors are ignored.  In case a
        log function is provided, errors are logged.  If you want template
        syntax errors to abort the compilation you can set `ignore_errors`
        to `False` and you will get an exception on syntax errors.

        If `py_compile` is set to `True` .pyc files will be written to the
        target instead of standard .py files.  This flag does not do anything
        on pypy and Python 3 where pyc files are not picked up by itself and
        don't give much benefit.

        .. versionadded:: 2.4
        """
        from jinja2.loaders import ModuleLoader

        if log_function is None:
            log_function = lambda x: None

        if py_compile:
            if not PY2 or PYPY:
                from warnings import warn
                warn(Warning('py_compile has no effect on pypy or Python 3'))
                py_compile = False
            else:
                import imp
                import marshal
                py_header = imp.get_magic() + \
                    '\xff\xff\xff\xff'.encode('iso-8859-15')

                # Python 3.3 added a source filesize to the header
                if sys.version_info >= (3, 3):
                    py_header += '\x00\x00\x00\x00'.encode('iso-8859-15')

        def write_file(filename, data, mode):
            if zip:
                info = ZipInfo(filename)
                info.external_attr = 0o755 << 16
                zip_file.writestr(info, data)
            else:
                f = open(os.path.join(target, filename), mode)
                try:
                    f.write(data)
                finally:
                    f.close()

        if zip is not None:
            from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
            zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
                                                 stored=ZIP_STORED)[zip])
            log_function('Compiling into Zip archive "%s"' % target)
        else:
            if not os.path.isdir(target):
                os.makedirs(target)
            log_function('Compiling into folder "%s"' % target)

        try:
            for name in self.list_templates(extensions, filter_func):
                source, filename, _ = self.loader.get_source(self, name)
                try:
                    code = self.compile(source, name, filename, True, True)
                except TemplateSyntaxError as e:
                    if not ignore_errors:
                        raise
                    log_function('Could not compile "%s": %s' % (name, e))
                    continue

                filename = ModuleLoader.get_module_filename(name)

                if py_compile:
                    c = self._compile(code, encode_filename(filename))
                    write_file(filename + 'c', py_header +
                               marshal.dumps(c), 'wb')
                    log_function('Byte-compiled "%s" as %s' %
                                 (name, filename + 'c'))
                else:
                    write_file(filename, code, 'w')
                    log_function('Compiled "%s" as %s' % (name, filename))
        finally:
            if zip:
                zip_file.close()

        log_function('Finished compiling templates')
Beispiel #48
0
    'X': '#636363',
    'Y': '#00ecff',
    'Z': '#d9d9d9',
}

if not os.path.exists('dist'):
    os.mkdir('dist')

with open('manifest.json') as f:
    manifest_template = f.read()
with open('addon.js') as f:
    addon_template = f.read()
with Image.open('icon.png') as icon:
    for letter, color in letters.items():
        addon = addon_template.replace('{color}',
                                       color).replace('{letter}', letter)
        manifest = manifest_template.replace('{letter}', letter)

        filename = 'dist/%s-in-status-bar.zip' % letter.lower()
        zip_file = ZipFile(filename, mode='w')
        zip_file.writestr('manifest.json', manifest)
        zip_file.writestr('addon.js', addon)

        rgb = [ord(c) for c in color[1:].decode('hex')]
        icon.putpalette([0, 0, 0, rgb[0], rgb[1], rgb[2]])
        icon_contents = StringIO()
        icon.save(icon_contents, format='PNG')
        icon_contents.seek(0)
        zip_file.writestr('icon.png', icon_contents.read())
        zip_file.close()
def pack_content(path, is_peti):
    """Pack any custom content into the map.

    Filelist format: "[control char]filename[\t packname]"
    Filename is the name of the actual file. If given packname is the
    name to save it into the packfile as. If the first character of the
    filename is '#', the file will be added to the soundscript manifest too.
    """
    files = set()  # Files to pack.
    soundscripts = set()  # Soundscripts need to be added to the manifest too..
    rem_soundscripts = set(
    )  # Soundscripts to exclude, so we can override the sounds.
    particles = set()
    additional_files = set()  # .vvd files etc which also are needed.
    preload_files = set()  # Files we want to force preloading

    try:
        pack_list = open(path[:-4] + '.filelist.txt')
    except (IOError, FileNotFoundError):
        pass  # Assume no files if missing..
        # There might still be things to inject.
    else:
        with pack_list:
            for line in pack_list:
                line = line.strip().lower()
                if not line or line.startswith('//'):
                    continue  # Skip blanks or comments

                if line[:8] == 'precache':
                    preload_files.add(line)
                    continue

                if line[:2] == '-#':
                    rem_soundscripts.add(line[2:])
                    continue

                if line[:1] == '#':
                    line = line[1:]
                    soundscripts.add(line)

                # We need to add particle systems to a manifest.
                if line.startswith('particles/'):
                    particles.add(line)

                if line[-4:] == '.mdl':
                    additional_files.update(
                        {line[:-4] + ext
                         for ext in MDL_ADDITIONAL_EXT})

                files.add(line)

    # Remove guessed files not in the original list.
    additional_files -= files

    # Only generate a soundscript for PeTI maps..
    if is_peti:
        music_data = CONF.find_key('MusicScript', [])
        if music_data.value:
            generate_music_script(music_data, files)
            # Add the new script to the manifest file..
            soundscripts.add('scripts/BEE2_generated_music.txt')

    # We still generate these in hammer-mode - it's still useful there.
    # If no files are packed, no manifest will be added either.
    gen_sound_manifest(soundscripts, rem_soundscripts)
    gen_part_manifest(particles)
    gen_auto_script(preload_files, is_peti)

    inject_names = list(inject_files())

    # Abort packing if no packfiles exist, and no injected files exist either.
    if not files and not inject_names:
        LOGGER.info('No files to pack!')
        return

    LOGGER.info('Files to pack:')
    for file in sorted(files):
        # \t seperates the original and in-pack name if used.
        LOGGER.info(' # "' + file.replace('\t', '" as "') + '"')

    if additional_files and LOGGER.isEnabledFor(logging.DEBUG):
        LOGGER.info('Potential additional files:')
        for file in sorted(additional_files):
            LOGGER.debug(' # "' + file + '"')

    LOGGER.info('Injected files:')
    for _, file in inject_names:
        LOGGER.info(' # "' + file + '"')

    LOGGER.info("Packing Files!")
    bsp_file = BSP(path)
    LOGGER.debug(' - Header read')
    bsp_file.read_header()

    # Manipulate the zip entirely in memory
    zip_data = BytesIO()
    zip_data.write(bsp_file.get_lump(BSP_LUMPS.PAKFILE))
    zipfile = ZipFile(zip_data, mode='a')
    LOGGER.debug(' - Existing zip read')

    zip_write = get_zip_writer(zipfile)

    for file in files:
        pack_file(zip_write, file)

    for file in additional_files:
        pack_file(zip_write, file, suppress_error=True)

    for filename, arcname in inject_names:
        LOGGER.info('Injecting "{}" into packfile.', arcname)
        zip_write(filename, arcname)

    LOGGER.debug(' - Added files')

    zipfile.close()  # Finalise the zip modification

    # Copy the zipfile into the BSP file, and adjust the headers
    bsp_file.replace_lump(
        path,
        BSP_LUMPS.PAKFILE,
        zip_data.getvalue(),  # Get the binary data we need
    )
    LOGGER.debug(' - BSP written!')

    LOGGER.info("Packing complete!")
class AudioPositions(Positions):

    def __init__(self, zip_filename, position_file_name=None, is_time=True, fps=48000,audio_metadata_extractor= None):
        import re
        self.zip_filename = zip_filename
        self.fps = fps
        self.audio_metadata_extractor = self.audio_metadata_extractor if audio_metadata_extractor is None else audio_metadata_extractor
        self.dir = os.path.dirname(os.path.abspath(self.zip_filename))
        file_type_matcher = re.compile(
            '.*\.(' + '|'.join([ft[1][ft[1].rfind('.') + 1:] for ft in audiofiletypes]) + ')')
        self.myzip = ZipFile(zip_filename, 'r')
        self.names = [name for name in self.myzip.namelist() if len(file_type_matcher.findall(name.lower())) > 0 and \
                      os.path.basename(name) == name]
        self.file_meta = {}
        if position_file_name is not None:
            Positions.__init__(self,Positions.read(position_file_name, is_time=is_time, fps=48000).positions,fps=48000)
            for position in self.positions:
                if position[2] not in self.names:
                    raise ValueError('Missing {} from audio zip file'.format(position[2]))
        else:
            meta = self.audio_metadata_extractor(self.names[0])
            self.fps = int(meta['sample_rate'])
            positions = []
            positions.append(
                Positions.to_segment(0, is_time=True, fps=self.fps) + (self.names[0],))
            last = 0
            for name_pos in range(len(self.names)-1):
                # get duration of last for start of next
                if name_pos > 0:
                    meta = self.audio_metadata_extractor(self.names[name_pos])
                last = float(meta['duration'])*1000.0 + last
                if int(meta['sample_rate']) != self.fps:
                    raise ValueError('Mismatched sample rate {} from audio zip file {}'.format(self.fps,int(meta['sample_rate']) ))
                positions.append(Positions.to_segment(last, is_time=True, fps=int(meta['sample_rate'])) + (self.names[name_pos+1],))
            Positions.__init__(self,positions, fps=fps)

    def audio_metadata_extractor(self, filename):
        place = self.get_file(filename)
        meta = get_meta_from_video(place, show_streams=True, media_types=['audio'])[0]
        return  [x for x in meta if len(x) > 0][0]

    def get_file(self, name):
        path  = os.path.join(self.dir,name)
        if os.path.exists(path):
            return path
        if os.path.exists(name):
            return name
        return self.myzip.extract(name, self.dir)

    def _get_duration(self, position_file):
        if os.path.basename(position_file) not in self.file_meta:
            meta = self.audio_metadata_extractor(position_file)
            self.file_meta[os.path.basename(position_file)] = meta
        else:
            meta = self.file_meta[os.path.basename(position_file)]
        return float(meta['duration'])*1000.0

    def isOpened(self):
        #TODO: check names, what else
        return True

    def release(self):
        import shutil
        shutil.rmtree(self.dir)
        self.myzip.close()
Beispiel #51
0
def unzip(filepath, dest):
    zipfile = ZipFile(filepath)
    zipfile.extractall(dest)
    zipfile.close()
Beispiel #52
0
class StaticEventCreator(object):
    """Define process which generates a static (offline) version of an Indico event."""

    def __init__(self, rh, event):
        self._rh = rh
        self.event = event
        self._display_tz = self.event.display_tzinfo.zone
        self._zip_file = None
        self._content_dir = _normalize_path(u'OfflineWebsite-{}'.format(event.title))
        self._web_dir = os.path.join(get_root_path('indico'), 'web')
        self._static_dir = os.path.join(self._web_dir, 'static')

    def create(self):
        """Trigger the creation of a ZIP file containing the site."""
        temp_file = NamedTemporaryFile(suffix='indico.tmp', dir=config.TEMP_DIR)
        self._zip_file = ZipFile(temp_file.name, 'w', allowZip64=True)

        with collect_static_files() as used_assets:
            # create the home page html
            html = self._create_home().encode('utf-8')

            # Mathjax plugins can only be known in runtime
            self._copy_folder(os.path.join(self._content_dir, 'static', 'dist', 'js', 'mathjax'),
                              os.path.join(self._static_dir, 'dist', 'js', 'mathjax'))

            # Materials and additional pages
            self._copy_all_material()
            self._create_other_pages()

            # Create index.html file (main page for the event)
            index_path = os.path.join(self._content_dir, 'index.html')
            self._zip_file.writestr(index_path, html)

            self._write_generated_js()

        # Copy static assets to ZIP file
        self._copy_static_files(used_assets)
        self._copy_plugin_files(used_assets)
        if config.CUSTOMIZATION_DIR:
            self._copy_customization_files(used_assets)

        temp_file.delete = False
        chmod_umask(temp_file.name)
        self._zip_file.close()
        return temp_file.name

    def _write_generated_js(self):
        global_js = generate_global_file().encode('utf-8')
        user_js = generate_user_file().encode('utf-8')
        i18n_js = u"window.TRANSLATIONS = {};".format(generate_i18n_file(session.lang)).encode('utf-8')
        gen_path = os.path.join(self._content_dir, 'assets')
        self._zip_file.writestr(os.path.join(gen_path, 'js-vars', 'global.js'), global_js)
        self._zip_file.writestr(os.path.join(gen_path, 'js-vars', 'user.js'), user_js)
        self._zip_file.writestr(os.path.join(gen_path, 'i18n', session.lang + '.js'), i18n_js)

    def _copy_static_files(self, used_assets):
        # add favicon
        used_assets.add('static/images/indico.ico')
        # assets
        css_files = {url for url in used_assets if re.match('static/dist/.*\.css$', url)}
        for file_path in css_files:
            with open(os.path.join(self._web_dir, file_path)) as f:
                rewritten_css, used_urls, __ = rewrite_css_urls(self.event, f.read())
                used_assets |= used_urls
                self._zip_file.writestr(os.path.join(self._content_dir, file_path), rewritten_css)
        for file_path in used_assets - css_files:
            if not re.match('^static/(images|fonts|dist)/(?!js/ckeditor/)', file_path):
                continue
            self._copy_file(os.path.join(self._content_dir, file_path),
                            os.path.join(self._web_dir, file_path))

    def _copy_plugin_files(self, used_assets):
        css_files = {url for url in used_assets if re.match('static/plugins/.*\.css$', url)}
        for file_path in css_files:
            plugin_name, path = re.match(r'static/plugins/([^/]+)/(.+.css)', file_path).groups()
            plugin = plugin_engine.get_plugin(plugin_name)
            with open(os.path.join(plugin.root_path, 'static', path)) as f:
                rewritten_css, used_urls, __ = rewrite_css_urls(self.event, f.read())
                used_assets |= used_urls
                self._zip_file.writestr(os.path.join(self._content_dir, file_path), rewritten_css)
        for file_path in used_assets - css_files:
            match = re.match(r'static/plugins/([^/]+)/(.+)', file_path)
            if not match:
                continue
            plugin_name, path = match.groups()
            plugin = plugin_engine.get_plugin(plugin_name)
            self._copy_file(os.path.join(self._content_dir, file_path),
                            os.path.join(plugin.root_path, 'static', path))

    def _strip_custom_prefix(self, url):
        # strip the 'static/custom/' prefix from the given url/path
        return '/'.join(url.split('/')[2:])

    def _copy_customization_files(self, used_assets):
        css_files = {url for url in used_assets if re.match('static/custom/.*\.css$', url)}
        for file_path in css_files:
            with open(os.path.join(config.CUSTOMIZATION_DIR, self._strip_custom_prefix(file_path))) as f:
                rewritten_css, used_urls, __ = rewrite_css_urls(self.event, f.read())
                used_assets |= used_urls
                self._zip_file.writestr(os.path.join(self._content_dir, file_path), rewritten_css)
        for file_path in used_assets - css_files:
            if not file_path.startswith('static/custom/'):
                continue
            self._copy_file(os.path.join(self._content_dir, file_path),
                            os.path.join(config.CUSTOMIZATION_DIR, self._strip_custom_prefix(file_path)))

    def _create_home(self):
        return WPStaticSimpleEventDisplay(self._rh, self.event, self.event.theme).display()

    def _create_other_pages(self):
        pass

    def _copy_all_material(self):
        self._add_material(self.event, '')
        for contrib in self.event.contributions:
            if not contrib.can_access(None):
                continue
            self._add_material(contrib, "%s-contribution" % contrib.friendly_id)
            for sc in contrib.subcontributions:
                self._add_material(sc, "%s-subcontribution" % sc.friendly_id)
        for session_ in self.event.sessions:
            if not session.can_access(None):
                continue
            self._add_material(session_, "%s-session" % session_.friendly_id)

    def _add_material(self, target, type_):
        for folder in AttachmentFolder.get_for_linked_object(target, preload_event=True):
            for attachment in folder.attachments:
                if not attachment.can_access(None):
                    continue
                if attachment.type == AttachmentType.file:
                    dst_path = posixpath.join(self._content_dir, "material", type_,
                                              "{}-{}".format(attachment.id, attachment.file.filename))
                    with attachment.file.get_local_path() as file_path:
                        self._copy_file(dst_path, file_path)

    def _copy_file(self, dest, src):
        """Copy a file from a source path to a destination inside the ZIP."""
        self._zip_file.write(src, dest)

    def _copy_folder(self, dest, src):
        for root, subfolders, files in os.walk(src):
            dst_dirpath = os.path.join(dest, os.path.relpath(root, src))
            for filename in files:
                src_filepath = os.path.join(src, root, filename)
                self._zip_file.write(src_filepath, os.path.join(dst_dirpath, filename))
Beispiel #53
0
def create_workflow(drops, cwl_filename, buffer):
    """
    Create a CWL workflow from a given Physical Graph Template

    A CWL workflow consists of multiple files. A single file describing the
    workflow, and multiple files each describing one step in the workflow. All
    the files are combined into one zip file, so that a single file can be
    downloaded by the user.

    NOTE: CWL only supports workflow steps that are bash shell applications
          Non-BashShellApp nodes are unable to be implemented in CWL
    """

    # search the drops for non-BashShellApp drops,
    # if found, the graph cannot be translated into CWL
    for index, node in enumerate(drops):
        dataType = node.get('dt', '')
        if dataType not in SUPPORTED_CATEGORIES:
            raise Exception('Node {0} has an unsupported category: {1}'.format(
                index, dataType))

    # create list for command line tool description files
    step_files = []

    # create the workflow
    cwl_workflow = cwlgen.Workflow('', label='', doc='', cwl_version='v1.0')

    # create files dictionary
    files = {}

    # look for input and output files in the pg_spec
    for index, node in enumerate(drops):
        command = node.get('command', None)
        dataType = node.get('dt', None)
        outputId = node.get('oid', None)
        outputs = node.get('outputs', [])

        if len(outputs) > 0:
            files[outputs[0]] = "step" + str(index) + "/output_file_0"

    # add steps to the workflow
    for index, node in enumerate(drops):
        dataType = node.get('dt', '')

        if dataType == 'BashShellApp':
            name = node.get('nm', '')
            inputs = node.get('inputs', [])
            outputs = node.get('outputs', [])

            # create command line tool description
            filename = "step" + str(index) + ".cwl"
            contents = create_command_line_tool(node)

            # add contents of command line tool description to list of step files
            step_files.append({"filename": filename, "contents": contents})

            # create step
            step = cwlgen.WorkflowStep("step" + str(index), run=filename)

            # add input to step
            for index, input in enumerate(inputs):
                step.inputs.append(
                    cwlgen.WorkflowStepInput('input_file_' + str(index),
                                             source=files[input]))

            # add output to step
            for index, output in enumerate(outputs):
                step.out.append(
                    cwlgen.WorkflowStepOutput('output_file_' + str(index)))

            # add step to workflow
            cwl_workflow.steps.append(step)

    # put workflow and command line tool description files all together in a zip
    zipObj = ZipFile(buffer, 'w')
    for step_file in step_files:
        zipObj.writestr(step_file["filename"], six.b(step_file["contents"]))
    zipObj.writestr(cwl_filename, six.b(cwl_workflow.export_string()))
    zipObj.close()
import urllib
from zipfile import ZipFile
import os

tagname = "1.0"
zipname = "markdown-presenter-" + tagname + ".zip"
zippath = "./" + zipname
url = "https://github.com/jsakamoto/MarkdownPresenter/releases/download/v." + tagname + "/" + zipname

# download zip archive of Markdown Presenter.
urllib.urlretrieve(url, zippath)

# ectract the zip.
zfile = ZipFile(zippath)
zfile.extractall(".")
zfile.close()

# clean up zip.
os.remove(zippath)


# launch default web browser to open Markdown Presenter
# after one shot timer to wait for warming up HTTP daemon.
def launch():
    time.sleep(1)
    webbrowser.open("http://localhost:8000/Presenter.html")


thread.start_new_thread(launch, ())

# start mini HTTP daemon.
Beispiel #55
0
    def save(self, delete_zip_import=True, *args, **kwargs):
        """
        If a zip file is uploaded, extract any images from it and add
        them to the gallery, before removing the zip file.
        """
        super().save(*args, **kwargs)
        if self.zip_import:
            zip_file = ZipFile(self.zip_import)
            for name in zip_file.namelist():
                data = zip_file.read(name)
                try:
                    from PIL import Image

                    image = Image.open(BytesIO(data))
                    image.load()
                    image = Image.open(BytesIO(data))
                    image.verify()
                except ImportError:
                    pass
                except:  # noqa
                    continue
                name = os.path.split(name)[1]

                # In python3, name is a string. Convert it to bytes.
                if not isinstance(name, bytes):
                    try:
                        name = name.encode("cp437")
                    except UnicodeEncodeError:
                        # File name includes characters that aren't in cp437,
                        # which isn't supported by most zip tooling. They will
                        # not appear correctly.
                        tempname = name

                # Decode byte-name.
                if isinstance(name, bytes):
                    encoding = charsetdetect(name)["encoding"]
                    tempname = name.decode(encoding)

                # A gallery with a slug of "/" tries to extract files
                # to / on disk; see os.path.join docs.
                slug = self.slug if self.slug != "/" else ""
                path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname)
                try:
                    saved_path = default_storage.save(path, ContentFile(data))
                except UnicodeEncodeError:
                    from warnings import warn

                    warn("A file was saved that contains unicode "
                         "characters in its path, but somehow the current "
                         "locale does not support utf-8. You may need to set "
                         "'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.")
                    # The native() call is needed here around str because
                    # os.path.join() in Python 2.x (in posixpath.py)
                    # mixes byte-strings with unicode strings without
                    # explicit conversion, which raises a TypeError as it
                    # would on Python 3.
                    path = os.path.join(GALLERIES_UPLOAD_DIR, slug,
                                        str(name, errors="ignore"))
                    saved_path = default_storage.save(path, ContentFile(data))
                self.images.create(file=saved_path)
            if delete_zip_import:
                zip_file.close()
                self.zip_import.delete(save=True)
Beispiel #56
0
def provision_group_purchase_branding(sln_group_purchase_settings,
                                      main_branding, language):
    if not sln_group_purchase_settings.branding_hash:
        logging.info("Storing GROUP PURCHASE branding")
        stream = ZipFile(StringIO(main_branding.blob))
        try:
            new_zip_stream = StringIO()
            zip_ = ZipFile(new_zip_stream, 'w', compression=ZIP_DEFLATED)
            try:
                path = os.path.join(os.path.dirname(solutions.__file__),
                                    'common', 'templates',
                                    'brandings/app_jquery.tmpl.js')
                zip_.writestr("jquery.tmpl.min.js", file_get_contents(path))
                path = os.path.join(os.path.dirname(solutions.__file__),
                                    'common', 'templates',
                                    'brandings/moment-with-locales.min.js')
                zip_.writestr("moment-with-locales.min.js",
                              file_get_contents(path))
                zip_.writestr(
                    "app-translations.js",
                    JINJA_ENVIRONMENT.get_template(
                        "brandings/app_group_purchases_translations.js").
                    render({
                        'language': language
                    }).encode("utf-8"))
                path = os.path.join(os.path.dirname(solutions.__file__),
                                    'common', 'templates',
                                    'brandings/app_group_purchases.js')
                zip_.writestr("app.js",
                              file_get_contents(path).encode("utf-8"))

                for file_name in set(stream.namelist()):
                    str_ = stream.read(file_name)
                    if file_name == 'branding.html':
                        html = str_
                        # Remove previously added dimensions:
                        html = re.sub(
                            "<meta\\s+property=\\\"rt:dimensions\\\"\\s+content=\\\"\\[\\d+,\\d+,\\d+,\\d+\\]\\\"\\s*/>",
                            "", html)
                        html = re.sub(
                            '<head>', """<head>
<link href="jquery/jquery.mobile.inline-png-1.4.2.min.css" rel="stylesheet" media="screen">
<style type="text/css">
#group_purchases-empty{text-align: center;}
div.groupPurchase{padding: 10px 10px 25px 10px;}
img.groupPurchasePicture { width: 100%; margin-top: 10px; }
div.backgoundLight{background: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3gEYDyEEzIMX+AAAACZpVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVAgb24gYSBNYWOV5F9bAAAADUlEQVQI12NgYGCQBAAAHgAaOwrXiAAAAABJRU5ErkJggg==");}
div.backgoundDark{background: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH3gEYDyIY868YdAAAACZpVFh0Q29tbWVudAAAAAAAQ3JlYXRlZCB3aXRoIEdJTVAgb24gYSBNYWOV5F9bAAAADUlEQVQI12P4//+/JAAJFQMXEGL3cQAAAABJRU5ErkJggg==");}
h2.title { margin: 0;}
.subscribed { font-weight: bold; }
</style>
<script src="jquery/jquery-1.11.0.min.js"></script>
<script src="jquery/jquery.mobile-1.4.2.min.js"></script>
<script src="jquery.tmpl.min.js"></script>
<script src="moment-with-locales.min.js"></script>
<script src="rogerthat/rogerthat-1.0.js" type="text/javascript"></script>
<script src="rogerthat/rogerthat.api-1.0.js" type="text/javascript"></script>

<script src="app-translations.js" type="text/javascript"></script>
<script src="app.js" type="text/javascript"></script>""", html)
                        html = re.sub(
                            '<nuntiuz_message/>',
                            """<div data-role="popup" id="gp-popup" class="ui-content">
    <a href="#" data-rel="back" data-role="button" data-theme="a" data-icon="delete" data-iconpos="notext" class="closePopupOverlay ui-btn-right">Close</a>
    <div id="gp-popup-content"></div>
</div>
<div id="menu"></div>""", html)
                        zip_.writestr('app.html', html)

                    else:
                        zip_.writestr(file_name, str_)
            finally:
                zip_.close()

            branding_content = new_zip_stream.getvalue()
            new_zip_stream.close()

            sln_group_purchase_settings.branding_hash = put_branding(
                u"Group Purchase App", base64.b64encode(branding_content)).id
            sln_group_purchase_settings.put()
        except:
            logging.error("Failure while parsing group purchase app branding",
                          exc_info=1)
            raise
        finally:
            stream.close()
def get_or_create_model_shapefile_def(model_loc, output_dir, generate, srid,
                                      check_source_file_last_modified=None):
    '''
    input:
        model_loc: name of directory or zipfile of model (recognize .zip)
        output_dir: output directory of shapefiles
        generate: dict with sources to generate
        srid

        return:
            generated files
            source date
    '''
    log.debug('!!!get_or_create_model_shapefile_def')

    #check source
    if not os.path.isfile(model_loc) and not os.path.isdir(
        model_loc.encode('utf8')):
        log.warning('source file is missing. ' + model_loc)
        return (False, None, None, )

    if model_loc[-3:].lower() == 'zip':
        log.debug('read model from zipfile: {0}'.format(model_loc))
        zip_file = ZipFile(model_loc, "r")
        sobek_gr = Stream(zip_file.read('network.gr').replace('\r\n', '\n'))
        sobek_struc = Stream(zip_file.read('network.st'))
        source_file_last_modified = datetime.datetime.fromtimestamp(
            os.stat(model_loc.encode('utf8'))[8])
        zip_file.close()
    else:
        log.debug('read model from directory: {0}'.format(model_loc))
        sobek_gr = open(os.path.join(model_loc, 'network.gr'))
        sobek_struc = open(os.path.join(model_loc, 'network.st'))
        source_file_last_modified = datetime.datetime.fromtimestamp(
            os.stat(os.path.join(model_loc.encode('utf8'), 'network.gr'))[8])

    source_up_to_date = False
    log.debug(
        'in db origin date is {0}'.format(check_source_file_last_modified))
    log.debug('file date is {0}'.format(source_file_last_modified))
    if (check_source_file_last_modified and (
            check_source_file_last_modified >= source_file_last_modified)):
        log.info('source file is still up to date')
        source_up_to_date = True

    #check output
    log.debug('output dir for shapefile is ' + output_dir)
    if not os.path.isdir(output_dir):
        log.info('create output dir' + output_dir)
        os.makedirs(output_dir)

    shapefile_name = {}
    for type in ['nodes', 'branches', 'structures']:
        shapefile_name[type] = os.path.join(output_dir, type + '.shp')
        if generate[type] and os.path.isfile(shapefile_name[type]):
            if source_up_to_date:
                log.info('output for ' + type + ' exist and is up to date.')
                generate[type] = False
            else:
                log.info('output for ' + type +
                         ' exist and is not up to date, ' \
                         'or recreation is needed. Remove previous output.')
                os.remove(shapefile_name[type])
                os.remove(shapefile_name[type].replace('.shp', '.shx'))
                os.remove(shapefile_name[type].replace('.shp', '.dbf'))
                os.remove(shapefile_name[type].replace('.shp', '.prj'))

    node_array = []
    branch_array = []
    node = {}
    node_dict = {}

    #start with reading source files
    if generate['nodes'] or generate['branches'] or generate['structures']:
        log.debug('read locations of nodes and branches')
        pool = sobek.File(sobek_gr)

        #original SRS
        oSRS = ogr.osr.SpatialReference()

        # warning oSRS.ImportFromEPSG(28992) gives wrong string
        # '+proj=sterea +lat_0=52.15616055555555 +lon_0=5.387638888888889 +
        # k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +units=m +no_defs '
        oSRS.ImportFromProj4(
            "+proj=sterea +lat_0=52.15616055555555 +" \
            "lon_0=5.38763888888889 +k=0.999908 +x_0=155000 +" \
            "y_0=463000 +ellps=bessel +towgs84=565.237,50.0087," \
            "465.658,-0.406857,0.350733,-1.87035,4.0812 +units=m +no_defs")
        #target SRS
        tSRS = ogr.osr.SpatialReference()
        tSRS.ImportFromEPSG(900913)
        poCT = ogr.osr.CoordinateTransformation(oSRS, tSRS)

        for candidate_grid in pool['GRID']:
            branch_id = candidate_grid['id'][0]
            table = candidate_grid['gr gr'][-1:][0]
            for row_no in range(table.rows()):
                node_id = table[row_no, 3]
                #check if node is not already in dict
                if not node_id in node.keys():
                    node[node_id] = True
                    pnt = poCT.TransformPoint(
                        table[row_no, 5], table[row_no, 6], 0.)
                    node_array.append({'id': node_id, 'type': 0,
                                       'x': int(pnt[0]), 'y': int(pnt[1])})
                    node_dict[node_id] = {'id': node_id, 'type': 0,
                                          'x': int(pnt[0]), 'y': int(pnt[1])}

                sub_branch_id = table[row_no, 4]
                if (row_no != range(table.rows()) and sub_branch_id != ''):
                    branch_array.append({'id': sub_branch_id,
                                         'branch_id': branch_id,
                                         'dist_from': table[row_no, 0],
                                         'dist_to': table[row_no + 1, 0],
                                         'type': 0,
                                         'to': table[row_no + 1, 3],
                                         'from': table[row_no, 3]})
        del pool
        #start generating shapefiles
        if generate['nodes']:
            log.info('generate shapefile for nodes')
            #make shapefile for nodes
            # Create the file object, a layer, and an attribute
            t_srs = osr.SpatialReference()
            t_srs.SetFromUserInput('epsg:900913')
            drv = ogr.GetDriverByName('ESRI Shapefile')
            ds = drv.CreateDataSource(str(shapefile_name['nodes']))

            layer = ds.CreateLayer(
                ds.GetName(), geom_type=ogr.wkbPoint, srs=t_srs)
            layer.CreateField(ogr.FieldDefn('id', ogr.OFTString))
            layer.CreateField(ogr.FieldDefn('type', ogr.OFTInteger))

            # Could loop the following over some number of features
            geom = ogr.Geometry(type=ogr.wkbPoint)
            fid = 0

            for node in node_array:
                geom.SetPoint(0, node['x'], node['y'])
                feat = ogr.Feature(feature_def=layer.GetLayerDefn())
                feat.SetGeometry(geom)
                feat.SetFID(fid)
                feat.SetField('id', node['id'])
                feat.SetField('type', node['type'])
                layer.CreateFeature(feat)

                fid = fid + 1

            log.debug('nr of nodes {0}'.format(fid))
            layer.SyncToDisk()
            # Clean up
            ds.Destroy()

        if generate['branches']:
            log.info('generate shapefile for breaches')
            #make shapefile for branches
            # Create the file object, a layer, and an attribute
            t_srs = osr.SpatialReference()
            t_srs.SetFromUserInput('epsg:900913')
            drv = ogr.GetDriverByName('ESRI Shapefile')
            dsb = drv.CreateDataSource(str(shapefile_name['branches']))

            layerb = dsb.CreateLayer(
                dsb.GetName(), geom_type=ogr.wkbLineString, srs=t_srs)
            layerb.CreateField(ogr.FieldDefn('id', ogr.OFTString))
            layerb.CreateField(ogr.FieldDefn('type', ogr.OFTInteger))
            layerb.CreateField(ogr.FieldDefn('from', ogr.OFTString))
            layerb.CreateField(ogr.FieldDefn('to', ogr.OFTInteger))

            fid = 0

            for branch in branch_array:
                line = ogr.Geometry(type=ogr.wkbLineString)
                line.AddPoint(node_dict[branch['from']]['x'],
                              node_dict[branch['from']]['y'])
                line.AddPoint(node_dict[branch['to']]['x'],
                              node_dict[branch['to']]['y'])
                feat = ogr.Feature(feature_def=layerb.GetLayerDefn())
                feat.SetGeometry(line)
                feat.SetFID(fid)
                feat.SetField('id', branch['id'])
                feat.SetField('type', branch['type'])
                feat.SetField('from', branch['from'])
                feat.SetField('to', branch['to'])
                layerb.CreateFeature(feat)
                fid = fid + 1

            log.debug('nr of branches {0}'.format(fid))
            layerb.SyncToDisk()
            # Clean up
            dsb.Destroy()

    if generate['structures']:
        log.debug('read locations of structures')
        #read extra sources for structures
        pool = sobek.File(sobek_struc)
        struc_array = []
        for candidate in pool['STRU']:
            struc = {}
            struc['id'] = candidate['id'][0]
            if 'nm' in candidate.keys():
                struc['name'] = candidate['nm'][0]
            else:
                struc['name'] = '-'
            struc['branch_id'] = candidate['ci'][0]
            struc['dist'] = candidate['lc'][0]
            struc['type'] = -1

            location_found = False
            for branch in branch_array:
                if ((branch['branch_id'] == struc['branch_id']) and (
                        struc['dist'] >= branch['dist_from']) and (
                        struc['dist'] < branch['dist_to'])):
                    struc['x'] = (node_dict[branch['from']]['x'] +
                                  node_dict[branch['to']]['x']) / 2
                    struc['y'] = (node_dict[branch['from']]['y'] +
                                  node_dict[branch['to']]['y']) / 2
                    location_found = True
                    break

            if location_found == False:
                log.warning('structure location not found for {0}'.format(struc))
                struc['x'] = 0
                struc['y'] = 0

            if ((struc['id'][:4] == 'lkb_') or (
                    struc['id'][:6] == 'c_lkb_') or (
                    struc['name'][:4] == 'lkb_')):
                struc['origin'] = 1
            else:
                struc['origin'] = 0

            struc_array.append(struc)

        #make shapefile for structures
        t_srs = osr.SpatialReference()
        t_srs.SetFromUserInput('epsg:900913')
        drv = ogr.GetDriverByName('ESRI Shapefile')
        log.info('create shapefile ' + shapefile_name['structures'])
        ds = drv.CreateDataSource(str(shapefile_name['structures']))

        layer = ds.CreateLayer(ds.GetName(), geom_type=ogr.wkbPoint, srs=t_srs)
        layer.CreateField(ogr.FieldDefn('id', ogr.OFTString))
        layer.CreateField(ogr.FieldDefn('type', ogr.OFTInteger))
        layer.CreateField(ogr.FieldDefn('origin', ogr.OFTInteger))

        # Could loop the following over some number of features
        geom = ogr.Geometry(type=ogr.wkbPoint)
        fid = 0

        for node in struc_array:
            geom.SetPoint(0, node['x'], node['y'])
            feat = ogr.Feature(feature_def=layer.GetLayerDefn())
            feat.SetGeometry(geom)
            feat.SetFID(fid)
            feat.SetField('id', node['id'])
            feat.SetField('type', node['type'])
            feat.SetField('origin', node['origin'])
            layer.CreateFeature(feat)
            feat.Destroy()
            fid = fid + 1

        log.debug('nr of structures {0}'.format(fid))
        layer.SyncToDisk()
        # Clean up
        ds.Destroy()
        del struc_array
        del pool

    del node_array
    del branch_array
    del node
    del node_dict

    return (True, shapefile_name, source_file_last_modified, )
Beispiel #58
0
    def scrape(self, chamber, session):
        # Unfortunately, you now have to request access to FTP.
        # This method of retrieving votes needs to be be changed or
        # fall back to traditional web scraping.
        if session == '2009':
            # 2009 files have a different delimiter and naming scheme.
            vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Vote Data 2009.zip'
            naming_scheme = '{session}{file_label}.txt'
            delimiter = ";"
        else:
            vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Votes%s.zip' % session
            naming_scheme = '{file_label}_{session}.txt'
            delimiter = "\t"
        fname, resp = self.urlretrieve(vote_data_url)
        # fname = "/Users/brian/Downloads/Vote Data 2009.zip"
        zf = ZipFile(fname)

        chamber_code = 'H' if chamber == 'lower' else 'S'

        # Members_YYYY.txt: tab separated
        # 0: id (unique only in chamber)
        # 1: H or S
        # 2: member name
        # 3-5: county, district, party
        # 6: mmUserId
        member_file = zf.open(naming_scheme.format(file_label='Members', session=session))
        members = {}
        for line in member_file.readlines():
            data = line.split(delimiter)
            if data[1] == chamber_code:
                members[data[0]] = data[2]

        # Votes_YYYY.txt
        # 0: sequence number
        # 1: chamber (S/H)
        # 2: date
        # 3: prefix
        # 4: bill_id
        # 5: yes votes
        # 6: no votes
        # 7: excused absences
        # 8: excused votes
        # 9: didn't votes
        # 10: total yes+no
        # 11: sponsor
        # 12: reading info
        # 13: info
        # 20: PASSED/FAILED
        # 21: legislative day
        vote_file = zf.open(naming_scheme.format(file_label='Votes', session=session))
        bill_chambers = {'H':'lower', 'S':'upper'}
        votes = {}
        for line in vote_file.readlines():
            data = line.split(delimiter)
            if len(data) < 24:
                self.warning('line too short %s', data)
                continue
            if data[1] == chamber_code:
                date = datetime.datetime.strptime(data[2][:16],
                                                  '%Y-%m-%d %H:%M')
                if data[3][0] not in bill_chambers:
                    # skip votes that aren't on bills
                    self.log('skipping vote %s' % data[0])
                    continue

                votes[data[0]] = Vote(chamber, date, data[13],
                                      'PASS' in data[20],
                                      int(data[5]),
                                      int(data[6]),
                                      int(data[7])+int(data[8])+int(data[9]),
                                      bill_chamber=bill_chambers[data[3][0]],
                                      bill_id=data[3]+data[4], session=session)

        member_vote_file = zf.open(naming_scheme.format(file_label='MemberVotes', session=session))
        # 0: member id
        # 1: chamber (S/H)
        # 2: vote id
        # 3: vote chamber (always same as 1)
        # 4: vote (Y,N,E,X)
        # 5: pair ID (member)
        # 6: pair order
        # If a vote is paired then it should be counted as an 'other'
        for line in member_vote_file.readlines():
            data = line.split(delimiter)
            if data[1] == chamber_code:
                try:
                    member_voting = members[data[0]]
                except KeyError:
                    self.debug('Member %s not found.' % data[0])
                    continue
                try:
                    vote = votes[data[2]]
                except KeyError:
                    self.debug('Vote %s not found.' % data[2])
                    continue

                # -1 votes are Lt. Gov, not included in count, so we add them
                if data[4] == 'Y' and not data[5]:
                    if data[0] == '-1':
                        vote['yes_count'] += 1
                    vote.yes(member_voting)
                elif data[4] == 'N' and not data[5]:
                    if data[0] == '-1':
                        vote['no_count'] += 1
                    vote.no(member_voting)
                else:
                    # for some reason other_count is high for paired votes
                    if data[5]:
                        vote['other_count'] -= 1
                    # is either E: excused, X: no vote, or paired (doesn't count)
                    vote.other(member_voting)

        for vote in votes.itervalues():
            #vote.validate()
            vote.add_source(vote_data_url)
            self.save_vote(vote)

        # remove file
        zf.close()
        os.remove(fname)
def get_or_create_value_presentation_source(
    scenario, pt, get_animation_info,
    check_timestamp_original_sourcefile):
    '''
        create PresentationSource objects for given Flooding.Scenario
        and PresentationType

        input:
            scenario: Flooding.Scenario object
            pt: PresentationType object
            check_timestamp_original_sourcefile
        output:
            source object

        if there are no Results to fill the presentationLayer, this function
        throws an exception
        In case of an error, an exeption is thrown.
    '''
    log.debug('!!!get_or_create_value_presentation_source')
    dest_dir = settings.EXTERNAL_RESULT_MOUNTED_DIR
    presentation_dir = settings.EXTERNAL_PRESENTATION_MOUNTED_DIR
    try:
        animation = {}

        result = scenario.result_set.get(resulttype__presentationtype=pt)

        link_type, _ = SourceLinkType.objects.get_or_create(
            name='flooding_scenario_value')
        source_link, new = SourceLink.objects.get_or_create(
            link_id=str(scenario.id), sourcelinktype=link_type,
            type='fl_rlt_' + str(result.id))
        source, new = source_link.presentationsource.get_or_create(
            type=PresentationSource.SOURCE_TYPE_ZIPPED_HISFILE)

        if new or source.file_location is None:
            new = True
        elif not os.path.isfile(
            os.path.join(presentation_dir, rel_path(source.file_location))):
            new = True

        #source
        resultloc = result.resultloc.replace('\\', '/')
        source_file_name = os.path.join(dest_dir, resultloc)
        if check_timestamp_original_sourcefile and not new:
            log.debug('check timestamps')
            if os.path.isfile(source_file_name):
                source_file_last_modified = datetime.datetime.fromtimestamp(
                    os.stat(source_file_name.encode('utf8'))[8])
                #adding timestamps where this was not filled before
                if source.t_origin == None or source.t_source == None:
                    source.t_origin = source_file_last_modified
                    source.t_source = datetime.datetime.now()
                    source.save()
                elif (source_file_last_modified > source.t_origin):
                    log.info('source file has changed, recreate')
                    new = True

        output_dir_name = os.path.join(
            'flooding', 'scenario', str(scenario.id))
        filename = source_file_name.replace('\\', '/').split('/')[-1]
        output_file_name = os.path.join(output_dir_name, filename)

        if new:
            log.debug('copy file')
            #output
            output_path = os.path.join(presentation_dir, output_dir_name)
            if not os.path.isdir(output_path):
                os.makedirs(output_path)

            destination_file_name = os.path.join(
                presentation_dir, output_file_name)
            log.debug('source file is {0}'.format(source_file_name))
            log.debug('destination is {0}'.format(destination_file_name))

            try:
                if filename[-3:].lower() == 'zip':
                    copyfile(source_file_name, destination_file_name)
                else:
                    dest = ZipFile(destination_file_name.encode('utf8')[:-3] + '.zip',
                                   mode="w", compression=ZIP_DEFLATED)
                    dest.writestr(
                        filename, file(source_file_name, 'rb').read())
                    dest.close()

                source.file_location = output_file_name
                source.t_original = datetime.datetime.fromtimestamp(
                    os.stat(source_file_name.encode('utf8'))[8])
                source.t_source = datetime.datetime.now()
                source.save()
                get_animation_info = True

            except IOError as e:
                source.delete()
                raise IOError(e)

        if get_animation_info:
            log.debug('get animation information')
            zip_name = os.path.join(presentation_dir, output_file_name)
            input_file = ZipFile(zip_name, "r")
            his = HISFile(
                Stream(input_file.read(input_file.filelist[0].filename)))
            input_file.close()

            delta = (his.get_datetime_of_timestep(1) -
                     his.get_datetime_of_timestep(0))
            animation['delta_timestep'] = (
                delta.days + float(delta.seconds) / (24 * 60 * 60))
            animation['firstnr'] = 0
            animation['lastnr'] = his.size() - 1
            animation['startnr'] = 0
    except Exception as e:
        log.error('error generation value source')
        log.error(','.join(map(str, e.args)))
        return False, None, None, None

    return True, source, animation, new
Beispiel #60
0
    def writeStoryImpl(self, out):

        ## Python 2.5 ZipFile is rather more primative than later
        ## versions.  It can operate on a file, or on a StringIO, but
        ## not on an open stream.  OTOH, I suspect we would have had
        ## problems with closing and opening again to change the
        ## compression type anyway.
        zipio = StringIO.StringIO()

        ## mimetype must be first file and uncompressed.  Python 2.5
        ## ZipFile can't change compression type file-by-file, so we
        ## have to close and re-open
        outputepub = ZipFile(zipio, 'w', compression=ZIP_STORED)
        outputepub.debug = 3
        outputepub.writestr('mimetype', 'application/epub+zip')
        outputepub.close()

        ## Re-open file for content.
        outputepub = ZipFile(zipio, 'a', compression=ZIP_DEFLATED)
        outputepub.debug = 3

        ## Create META-INF/container.xml file.  The only thing it does is
        ## point to content.opf
        containerdom = getDOMImplementation().createDocument(
            None, "container", None)
        containertop = containerdom.documentElement
        containertop.setAttribute("version", "1.0")
        containertop.setAttribute(
            "xmlns", "urn:oasis:names:tc:opendocument:xmlns:container")
        rootfiles = containerdom.createElement("rootfiles")
        containertop.appendChild(rootfiles)
        rootfiles.appendChild(
            newTag(
                containerdom, "rootfile", {
                    "full-path": "content.opf",
                    "media-type": "application/oebps-package+xml"
                }))
        outputepub.writestr("META-INF/container.xml",
                            containerdom.toxml(encoding='utf-8'))
        containerdom.unlink()
        del containerdom

        ## Epub has two metadata files with real data.  We're putting
        ## them in content.opf (pointed to by META-INF/container.xml)
        ## and toc.ncx (pointed to by content.opf)

        ## content.opf contains metadata, a 'manifest' list of all
        ## other included files, and another 'spine' list of the items in the
        ## file

        uniqueid = 'fanficfare-uid:%s-u%s-s%s' % (
            self.getMetadata('site'), self.story.getList('authorId')[0],
            self.getMetadata('storyId'))

        contentdom = getDOMImplementation().createDocument(
            None, "package", None)
        package = contentdom.documentElement
        package.setAttribute("version", "2.0")
        package.setAttribute("xmlns", "http://www.idpf.org/2007/opf")
        package.setAttribute("unique-identifier", "fanficfare-uid")
        metadata = newTag(contentdom,
                          "metadata",
                          attrs={
                              "xmlns:dc": "http://purl.org/dc/elements/1.1/",
                              "xmlns:opf": "http://www.idpf.org/2007/opf"
                          })
        package.appendChild(metadata)

        metadata.appendChild(
            newTag(contentdom,
                   "dc:identifier",
                   text=uniqueid,
                   attrs={"id": "fanficfare-uid"}))

        if self.getMetadata('title'):
            metadata.appendChild(
                newTag(contentdom, "dc:title", text=self.getMetadata('title')))

        if self.getMetadata('author'):
            if self.story.isList('author'):
                for auth in self.story.getList('author'):
                    metadata.appendChild(
                        newTag(contentdom,
                               "dc:creator",
                               attrs={"opf:role": "aut"},
                               text=auth))
            else:
                metadata.appendChild(
                    newTag(contentdom,
                           "dc:creator",
                           attrs={"opf:role": "aut"},
                           text=self.getMetadata('author')))

        metadata.appendChild(
            newTag(contentdom,
                   "dc:contributor",
                   text="FanFicFare [https://github.com/JimmXinu/FanFicFare]",
                   attrs={"opf:role": "bkp"}))
        metadata.appendChild(newTag(contentdom, "dc:rights", text=""))
        if self.story.getMetadata('langcode'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:language",
                       text=self.story.getMetadata('langcode')))
        else:
            metadata.appendChild(newTag(contentdom, "dc:language", text='en'))

        #  published, created, updated, calibre
        #  Leave calling self.story.getMetadataRaw directly in case date format changes.
        if self.story.getMetadataRaw('datePublished'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "publication"},
                       text=self.story.getMetadataRaw(
                           'datePublished').strftime("%Y-%m-%d")))

        if self.story.getMetadataRaw('dateCreated'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "creation"},
                       text=self.story.getMetadataRaw('dateCreated').strftime(
                           "%Y-%m-%d")))

        if self.story.getMetadataRaw('dateUpdated'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:date",
                       attrs={"opf:event": "modification"},
                       text=self.story.getMetadataRaw('dateUpdated').strftime(
                           "%Y-%m-%d")))
            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name":
                           "calibre:timestamp",
                           "content":
                           self.story.getMetadataRaw('dateUpdated').strftime(
                               "%Y-%m-%dT%H:%M:%S")
                       }))

        series = self.story.getMetadataRaw('series')
        if series and self.getConfig('calibre_series_meta'):
            series_index = "0.0"
            if '[' in series:
                logger.debug(series)
                ## assumed "series [series_index]"
                series_index = series[series.index(' [') + 2:-1]
                series = series[:series.index(' [')]

                ## calibre always outputs a series_index and it's
                ## always a float with 1 or 2 decimals.  FFF usually
                ## has either an integer or no index. (injected
                ## calibre series is the only float at this time)
                series_index = "%.2f" % float(series_index)

            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name": "calibre:series",
                           "content": series
                       }))
            metadata.appendChild(
                newTag(contentdom,
                       "meta",
                       attrs={
                           "name": "calibre:series_index",
                           "content": series_index
                       }))

        if self.getMetadata('description'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:description",
                       text=self.getMetadata('description')))

        for subject in self.story.getSubjectTags():
            metadata.appendChild(newTag(contentdom, "dc:subject",
                                        text=subject))

        if self.getMetadata('site'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:publisher",
                       text=self.getMetadata('site')))

        if self.getMetadata('storyUrl'):
            metadata.appendChild(
                newTag(contentdom,
                       "dc:identifier",
                       attrs={"opf:scheme": "URL"},
                       text=self.getMetadata('storyUrl')))
            metadata.appendChild(
                newTag(contentdom,
                       "dc:source",
                       text=self.getMetadata('storyUrl')))

        ## end of metadata, create manifest.
        items = []  # list of (id, href, type, title) tuples(all strings)
        itemrefs = []  # list of strings -- idrefs from .opfs' spines
        items.append(("ncx", "toc.ncx", "application/x-dtbncx+xml",
                      None))  ## we'll generate the toc.ncx file,
        ## but it needs to be in the items manifest.

        guide = None
        coverIO = None

        coverimgid = "image0000"
        if not self.story.cover and self.story.oldcover:
            logger.debug(
                "writer_epub: no new cover, has old cover, write image.")
            (oldcoverhtmlhref, oldcoverhtmltype, oldcoverhtmldata,
             oldcoverimghref, oldcoverimgtype,
             oldcoverimgdata) = self.story.oldcover
            outputepub.writestr(oldcoverhtmlhref, oldcoverhtmldata)
            outputepub.writestr(oldcoverimghref, oldcoverimgdata)

            coverimgid = "image0"
            items.append((coverimgid, oldcoverimghref, oldcoverimgtype, None))
            items.append(("cover", oldcoverhtmlhref, oldcoverhtmltype, None))
            itemrefs.append("cover")
            metadata.appendChild(
                newTag(contentdom, "meta", {
                    "content": "image0",
                    "name": "cover"
                }))
            guide = newTag(contentdom, "guide")
            guide.appendChild(
                newTag(contentdom,
                       "reference",
                       attrs={
                           "type": "cover",
                           "title": "Cover",
                           "href": oldcoverhtmlhref
                       }))

        if self.getConfig('include_images'):
            imgcount = 0
            for imgmap in self.story.getImgUrls():
                imgfile = "OEBPS/" + imgmap['newsrc']
                outputepub.writestr(imgfile, imgmap['data'])
                items.append(
                    ("image%04d" % imgcount, imgfile, imgmap['mime'], None))
                imgcount += 1
                if 'cover' in imgfile:
                    # make sure coverimgid is set to the cover, not
                    # just the first image.
                    coverimgid = items[-1][0]

        items.append(("style", "OEBPS/stylesheet.css", "text/css", None))

        if self.story.cover:
            # Note that the id of the cover xhmtl *must* be 'cover'
            # for it to work on Nook.
            items.append(
                ("cover", "OEBPS/cover.xhtml", "application/xhtml+xml", None))
            itemrefs.append("cover")
            #
            # <meta name="cover" content="cover.jpg"/>
            metadata.appendChild(
                newTag(contentdom, "meta", {
                    "content": coverimgid,
                    "name": "cover"
                }))
            # cover stuff for later:
            # at end of <package>:
            # <guide>
            # <reference type="cover" title="Cover" href="Text/cover.xhtml"/>
            # </guide>
            guide = newTag(contentdom, "guide")
            guide.appendChild(
                newTag(contentdom,
                       "reference",
                       attrs={
                           "type": "cover",
                           "title": "Cover",
                           "href": "OEBPS/cover.xhtml"
                       }))

            if self.hasConfig("cover_content"):
                COVER = string.Template(self.getConfig("cover_content"))
            else:
                COVER = self.EPUB_COVER
            coverIO = StringIO.StringIO()
            coverIO.write(
                COVER.substitute(
                    dict(self.story.getAllMetadata().items() +
                         {'coverimg': self.story.cover}.items())))

        if self.getConfig("include_titlepage"):
            items.append(("title_page", "OEBPS/title_page.xhtml",
                          "application/xhtml+xml", "Title Page"))
            itemrefs.append("title_page")
        if len(self.story.getChapters()) > 1 and self.getConfig(
                "include_tocpage") and not self.metaonly:
            items.append(("toc_page", "OEBPS/toc_page.xhtml",
                          "application/xhtml+xml", "Table of Contents"))
            itemrefs.append("toc_page")

        ## save where to insert logpage.
        logpage_indices = (len(items), len(itemrefs))

        dologpage = ( self.getConfig("include_logpage") == "smart" and \
                          (self.story.logfile or self.story.getMetadataRaw("status") == "In-Progress") )  \
                     or self.getConfig("include_logpage") == "true"

        ## collect chapter urls and file names for internalize_text_links option.
        chapurlmap = {}
        for index, chap in enumerate(self.story.getChapters(fortoc=True)):
            if chap['html']:
                i = index + 1
                items.append(("file%s" % chap['index04'],
                              "OEBPS/file%s.xhtml" % chap['index04'],
                              "application/xhtml+xml", chap['title']))
                itemrefs.append("file%s" % chap['index04'])
                chapurlmap[chap['url']] = "file%s.xhtml" % chap[
                    'index04']  # url -> relative epub file name.

        if dologpage:
            if self.getConfig("logpage_at_end") == "true":
                ## insert logpage after chapters.
                logpage_indices = (len(items), len(itemrefs))
            items.insert(logpage_indices[0],
                         ("log_page", "OEBPS/log_page.xhtml",
                          "application/xhtml+xml", "Update Log"))
            itemrefs.insert(logpage_indices[1], "log_page")

        manifest = contentdom.createElement("manifest")
        package.appendChild(manifest)
        for item in items:
            (id, href, type, title) = item
            manifest.appendChild(
                newTag(contentdom,
                       "item",
                       attrs={
                           'id': id,
                           'href': href,
                           'media-type': type
                       }))

        spine = newTag(contentdom, "spine", attrs={"toc": "ncx"})
        package.appendChild(spine)
        for itemref in itemrefs:
            spine.appendChild(
                newTag(contentdom,
                       "itemref",
                       attrs={
                           "idref": itemref,
                           "linear": "yes"
                       }))
        # guide only exists if there's a cover.
        if guide:
            package.appendChild(guide)

        # write content.opf to zip.
        contentxml = contentdom.toxml(encoding='utf-8')

        # tweak for brain damaged Nook STR.  Nook insists on name before content.
        contentxml = contentxml.replace(
            '<meta content="%s" name="cover"/>' % coverimgid,
            '<meta name="cover" content="%s"/>' % coverimgid)
        outputepub.writestr("content.opf", contentxml)

        contentdom.unlink()
        del contentdom

        ## create toc.ncx file
        tocncxdom = getDOMImplementation().createDocument(None, "ncx", None)
        ncx = tocncxdom.documentElement
        ncx.setAttribute("version", "2005-1")
        ncx.setAttribute("xmlns", "http://www.daisy.org/z3986/2005/ncx/")
        head = tocncxdom.createElement("head")
        ncx.appendChild(head)
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:uid",
                       "content": uniqueid
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:depth",
                       "content": "1"
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:totalPageCount",
                       "content": "0"
                   }))
        head.appendChild(
            newTag(tocncxdom,
                   "meta",
                   attrs={
                       "name": "dtb:maxPageNumber",
                       "content": "0"
                   }))

        docTitle = tocncxdom.createElement("docTitle")
        docTitle.appendChild(
            newTag(tocncxdom, "text", text=self.getMetadata('title')))
        ncx.appendChild(docTitle)

        tocnavMap = tocncxdom.createElement("navMap")
        ncx.appendChild(tocnavMap)

        # <navPoint id="<id>" playOrder="<risingnumberfrom0>">
        #   <navLabel>
        #     <text><chapter title></text>
        #   </navLabel>
        #   <content src="<chapterfile>"/>
        # </navPoint>
        index = 0
        for item in items:
            (id, href, type, title) = item
            # only items to be skipped, cover.xhtml, images, toc.ncx, stylesheet.css, should have no title.
            if title:
                navPoint = newTag(tocncxdom,
                                  "navPoint",
                                  attrs={
                                      'id': id,
                                      'playOrder': unicode(index)
                                  })
                tocnavMap.appendChild(navPoint)
                navLabel = newTag(tocncxdom, "navLabel")
                navPoint.appendChild(navLabel)
                ## the xml library will re-escape as needed.
                navLabel.appendChild(
                    newTag(tocncxdom, "text", text=stripHTML(title)))
                navPoint.appendChild(
                    newTag(tocncxdom, "content", attrs={"src": href}))
                index = index + 1

        # write toc.ncx to zip file
        outputepub.writestr("toc.ncx", tocncxdom.toxml(encoding='utf-8'))
        tocncxdom.unlink()
        del tocncxdom

        # write stylesheet.css file.
        outputepub.writestr(
            "OEBPS/stylesheet.css",
            self.EPUB_CSS.substitute(self.story.getAllMetadata()))

        # write title page.
        if self.getConfig("titlepage_use_table"):
            TITLE_PAGE_START = self.EPUB_TABLE_TITLE_PAGE_START
            TITLE_ENTRY = self.EPUB_TABLE_TITLE_ENTRY
            WIDE_TITLE_ENTRY = self.EPUB_TABLE_TITLE_WIDE_ENTRY
            NO_TITLE_ENTRY = self.EPUB_TABLE_NO_TITLE_ENTRY
            TITLE_PAGE_END = self.EPUB_TABLE_TITLE_PAGE_END
        else:
            TITLE_PAGE_START = self.EPUB_TITLE_PAGE_START
            TITLE_ENTRY = self.EPUB_TITLE_ENTRY
            WIDE_TITLE_ENTRY = self.EPUB_TITLE_ENTRY  # same, only wide in tables.
            NO_TITLE_ENTRY = self.EPUB_NO_TITLE_ENTRY
            TITLE_PAGE_END = self.EPUB_TITLE_PAGE_END

        if coverIO:
            outputepub.writestr("OEBPS/cover.xhtml", coverIO.getvalue())
            coverIO.close()

        titlepageIO = StringIO.StringIO()
        self.writeTitlePage(out=titlepageIO,
                            START=TITLE_PAGE_START,
                            ENTRY=TITLE_ENTRY,
                            WIDE_ENTRY=WIDE_TITLE_ENTRY,
                            END=TITLE_PAGE_END,
                            NO_TITLE_ENTRY=NO_TITLE_ENTRY)
        if titlepageIO.getvalue():  # will be false if no title page.
            outputepub.writestr("OEBPS/title_page.xhtml",
                                titlepageIO.getvalue())
        titlepageIO.close()

        # write toc page.
        tocpageIO = StringIO.StringIO()
        self.writeTOCPage(tocpageIO, self.EPUB_TOC_PAGE_START,
                          self.EPUB_TOC_ENTRY, self.EPUB_TOC_PAGE_END)
        if tocpageIO.getvalue():  # will be false if no toc page.
            outputepub.writestr("OEBPS/toc_page.xhtml", tocpageIO.getvalue())
        tocpageIO.close()

        if dologpage:
            # write log page.
            logpageIO = StringIO.StringIO()
            self.writeLogPage(logpageIO)
            outputepub.writestr("OEBPS/log_page.xhtml", logpageIO.getvalue())
            logpageIO.close()

        if self.hasConfig('chapter_start'):
            CHAPTER_START = string.Template(self.getConfig("chapter_start"))
        else:
            CHAPTER_START = self.EPUB_CHAPTER_START

        if self.hasConfig('chapter_end'):
            CHAPTER_END = string.Template(self.getConfig("chapter_end"))
        else:
            CHAPTER_END = self.EPUB_CHAPTER_END

        for index, chap in enumerate(
                self.story.getChapters()):  # (url,title,html)
            if chap['html']:
                chap_data = chap['html']
                if self.getConfig('internalize_text_links'):
                    soup = bs4.BeautifulSoup(chap['html'], 'html5lib')
                    changed = False
                    for alink in soup.find_all('a'):
                        if alink.has_attr(
                                'href') and alink['href'] in chapurlmap:
                            alink['href'] = chapurlmap[alink['href']]
                            changed = True
                    if changed:
                        chap_data = unicode(soup)
                        # Don't want html, head or body tags in
                        # chapter html--bs4 insists on adding them.
                        chap_data = re.sub(r"</?(html|head|body)[^>]*>\r?\n?",
                                           "", chap_data)

                #logger.debug('Writing chapter text for: %s' % chap.title)
                chap['url'] = removeEntities(chap['url'])
                chap['chapter'] = removeEntities(chap['chapter'])
                chap['title'] = removeEntities(chap['title'])
                chap['origchapter'] = removeEntities(chap['origtitle'])
                chap['tocchapter'] = removeEntities(chap['toctitle'])
                # escape double quotes in all vals.
                for k, v in chap.items():
                    if isinstance(v, basestring):
                        chap[k] = v.replace('"', '&quot;')
                fullhtml = CHAPTER_START.substitute(chap) + \
                    chap_data.strip() + \
                    CHAPTER_END.substitute(chap)
                # strip to avoid ever growning numbers of newlines.
                # ffnet(& maybe others) gives the whole chapter text
                # as one line.  This causes problems for nook(at
                # least) when the chapter size starts getting big
                # (200k+)
                fullhtml = re.sub(r'(</p>|<br ?/>)\n*', r'\1\n', fullhtml)

                outputepub.writestr("OEBPS/file%s.xhtml" % chap['index04'],
                                    fullhtml.encode('utf-8'))
                del fullhtml

        if self.story.calibrebookmark:
            outputepub.writestr("META-INF/calibre_bookmarks.txt",
                                self.story.calibrebookmark)

        # declares all the files created by Windows.  otherwise, when
        # it runs in appengine, windows unzips the files as 000 perms.
        for zf in outputepub.filelist:
            zf.create_system = 0
        outputepub.close()
        out.write(zipio.getvalue())
        zipio.close()