def test_fix_interface_for_file(self): self.obj.portal_type = 'File' self.catalog.reindexObject(self.obj) self.portal.restrictedTraverse('fix_base_classes')() self.assertTrue(IFile.providedBy(self.obj))
def friendly_type_name(obj): """Index for the friendly name of any content type. :param obj: The Plone content object to index :type obj: IContentish :return: Friendly content type name :rtype: str """ default_name = obj.Type() # If the object is a file get the friendly name of the mime type if IFile.providedBy(obj): mtr = api.portal.get_tool(name='mimetypes_registry') primary_field_info = IPrimaryFieldInfo(obj) if not primary_field_info.value: return default_name if hasattr(primary_field_info.value, 'contentType'): contenttype = primary_field_info.value.contentType try: mimetypeitem = mtr.lookup(contenttype) except MimeTypeException as msg: logger.warn('mimetype lookup failed for %s. Error: %s', obj.absolute_url(), str(msg)) return default_name mimetype_name = mimetypeitem[0].name() if mimetype_name != contenttype: return mimetype_name elif IUserProfile.providedBy(obj): return 'Person' return default_name
def is_right_type(self, context=None): if context is None: context = self.context if self.enabled()==False and IFile.providedBy(context): return context.file.contentType in ('application/pdf', 'application/x-pdf', 'image/pdf', 'application/vnd.oasis.opendocument.text-master', 'application/vnd.oasis.opendocument.text', 'application/vnd.wordperfect', 'application/x-wordperfect', 'application/vnd.sun.xml.writer', 'application/wordperfect', 'application/vnd.sun.xml.impress', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.ms-powerpoint', 'application/powerpoint, application/mspowerpoint', 'application/x-mspowerpoint', 'application/rtf', 'application/msword') else: return False
def is_printable(self): if ILink.providedBy(self.context): return False if IFile.providedBy(self.context): return IIconifiedPreview(self.context).is_convertible() if IImage.providedBy(self.context): return True return True
def Format(self): ob = self.getObject() # TODO: protect adainst acquisition here? if IFile.providedBy(ob): mime = ob.file.contentType else: mime = getattr(ob, 'format', 'application/octet-stream') return mime
def test_factory(self): fti = queryUtility( IDexterityFTI, name='emc.bokeh.codefile' ) factory = fti.factory new_object = createObject(factory) self.assertTrue(IFile.providedBy(new_object))
def test_factory(self): fti = queryUtility( IDexterityFTI, name='File' ) factory = fti.factory new_object = createObject(factory) self.failUnless(IFile.providedBy(new_object))
def _download_url(self): """Return the download url (None by default) for the current object""" url = u'{url}/@@download' portal_url = api.portal.get_tool('portal_url') if IFile.providedBy(self.obj): return url.format( url=portal_url.getRelativeUrl(self.context)) if IImage.providedBy(self.obj): return url.format( url=portal_url.getRelativeUrl(self.context))
def attachable_objs(self): """ We can only attach files. When implemented, we will also attach the PDF version of Documents """ objs = [] for obj in self.items: if IFile.providedBy(obj) or IImage.providedBy(obj): objs.append(obj) return objs
def test_file_is_migrated(self): from Products.ATContentTypes.content.file import ATFile from plone.app.contenttypes.migration.migration import FileMigrator from plone.app.contenttypes.interfaces import IFile at_file = self.createATCTobject(ATFile, 'file') migrator = self.get_migrator(at_file, FileMigrator) migrator.migrate() new_file = self.portal['file'] self.assertTrue(IFile.providedBy(new_file)) self.assertTrue(at_file is not new_file)
def test_blob_file_is_migrated(self): from plone.app.contenttypes.migration.migration import BlobFileMigrator from plone.app.contenttypes.interfaces import IFile self.portal.invokeFactory('File', 'file') at_file = self.portal['file'] applyProfile(self.portal, 'plone.app.contenttypes:default') migrator = self.get_migrator(at_file, BlobFileMigrator) migrator.migrate() dx_file = self.portal['file'] self.assertTrue(IFile.providedBy(dx_file)) self.assertTrue(at_file is not dx_file)
def setUp(self): self.portal = self.layer['portal'] setRoles(self.portal, TEST_USER_ID, ['Manager']) login(self.portal, TEST_USER_NAME) # Create files for uri in self.layer['pdf_files']: content_id = uri.split('/').pop() blobfile = NamedBlobFile(filename=unicode(content_id), data=open(uri, 'r').read()) dxfile = createContentInContainer( self.portal, 'File', id=content_id, file=blobfile) self.assertTrue(IFile.providedBy(dxfile)) self._validate_created_files() logout()
def setUp(self): self.portal = self.layer['portal'] setRoles(self.portal, TEST_USER_ID, ['Manager']) login(self.portal, TEST_USER_NAME) # Create files for uri in self.layer['pdf_files']: content_id = uri.split('/').pop() new_id = self.portal.invokeFactory('File', content_id) dxfile = self.portal[new_id] dxfile.file = NamedBlobFile(filename=unicode(content_id), data=open(uri, 'r').read()) self.assertTrue(IFile.providedBy(dxfile)) self._validate_created_files() logout()
def __call__(self): obj = self.event.object if IFile.providedBy(obj): base_unit = getattr(obj, 'file', None) name = getattr(base_unit, 'filename', None) elif IFileContent is None: return False elif not IFileContent.providedBy(obj): return False else: base_unit = obj.getFile() get_filename = getattr(base_unit, 'getFilename', None) if not get_filename: return False name = get_filename() extension = name[name.rfind('.') + 1:] return extension == self.element.file_extension
def myViewSource(self, vtype): """ """ doc = self.context dto = doc.docTypeObj() app = self.currentApplication() dtid = doc.getPortalTypeName().lower() if shasattr(doc, "typeName"): dtid = doc.typeName() if dto: dtid = dto.customViewTemplate if not dtid: dtid = dto.getId() else: dto = doc # so that we can acquire stuff below data = "" if app: names = [ "%s_%s_%s" % (app, dtid, vtype), "%s_%s" % (app, vtype), "%s_%s" % (dtid, vtype), "doc_%s" % vtype ] else: names = [ "%s_%s" % (dtid, vtype), "doc_%s" % vtype ] #for n in names: # print n for n in names: if shasattr(dto, n, acquire=True): o = aq_base(getattr(dto, n)) if IFile.providedBy(o): f = o.file.open() data = f.read() elif IPageTemplateSubclassing.providedBy(o): data = o.read() return data return data
def friendly_type_name(obj): """ Index for the friendly name of any content type :param obj: The Plone content object to index :type obj: IContentish :return: Friendly content type name :rtype: str """ default_name = obj.Type() # If the object is a file get the friendly name of the mime type if IFile.providedBy(obj): mtr = api.portal.get_tool(name='mimetypes_registry') primary_field_info = IPrimaryFieldInfo(obj) if not primary_field_info.value: return default_name if hasattr(primary_field_info.value, "contentType"): contenttype = primary_field_info.value.contentType try: mimetypeitem = mtr.lookup(contenttype) except MimeTypeException as msg: logger.warn( 'mimetype lookup failed for %s. Error: %s', obj.absolute_url(), str(msg) ) return default_name mimetype_name = mimetypeitem[0].name() if mimetype_name != contenttype: return mimetype_name elif IUserProfile.providedBy(obj): return 'Person' return default_name
def _createProvenance(self, result): provdata = IProvenanceData(result) from rdflib import URIRef, Literal, Namespace, Graph from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD from rdflib.resource import Resource PROV = Namespace(u"http://www.w3.org/ns/prov#") BCCVL = Namespace(u"http://ns.bccvl.org.au/") LOCAL = Namespace(u"urn:bccvl:") graph = Graph() # the user is our agent member = api.user.get_current() username = member.getProperty('fullname') or member.getId() user = Resource(graph, LOCAL['user']) user.add(RDF['type'], PROV['Agent']) user.add(RDF['type'], FOAF['Person']) user.add(FOAF['name'], Literal(username)) user.add(FOAF['mbox'], URIRef('mailto:{}'.format(member.getProperty('email')))) # add software as agent software = Resource(graph, LOCAL['software']) software.add(RDF['type'], PROV['Agent']) software.add(RDF['type'], PROV['SoftwareAgent']) software.add(FOAF['name'], Literal('BCCVL Job Script')) # script content is stored somewhere on result and will be exported with zip? # ... or store along with pstats.json ? hidden from user # -> execenvironment after import -> log output? # -> source code ... maybe some link expression? stored on result ? separate entity? activity = Resource(graph, LOCAL['activity']) activity.add(RDF['type'], PROV['Activity']) # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer) now = datetime.now().replace(microsecond=0) activity.add(PROV['startedAtTime'], Literal(now.isoformat(), datatype=XSD['dateTime'])) activity.add(PROV['hasAssociationWith'], user) activity.add(PROV['hasAssociationWith'], software) # add job parameters to activity for idx, (key, value) in enumerate(result.job_params.items()): param = Resource(graph, LOCAL[u'param_{}'.format(idx)]) activity.add(BCCVL['algoparam'], param) param.add(BCCVL['name'], Literal(key)) # We have only dataset references as parameters if key in ('future_climate_datasets',): for dsuuid in value.keys(): param.add(BCCVL['value'], LOCAL[dsuuid]) elif key in ('species_distribution_models',): param.add(BCCVL['value'], LOCAL[value]) else: param.add(BCCVL['value'], Literal(value)) # iterate over all input datasets and add them as entities for key in ('species_distribution_models',): dsbrain = uuidToCatalogBrain(result.job_params[key]) if not dsbrain: continue ds = dsbrain.getObject() dsprov = Resource(graph, LOCAL[result.job_params[key]]) dsprov.add(RDF['type'], PROV['Entity']) #dsprov.add(PROV['..'], Literal('')) dsprov.add(DCTERMS['creator'], Literal(ds.Creator())) dsprov.add(DCTERMS['title'], Literal(ds.title)) dsprov.add(DCTERMS['description'], Literal(ds.description)) dsprov.add(DCTERMS['rights'], Literal(ds.rights)) # ds.rightsstatement dsprov.add(DCTERMS['format'], Literal(ds.file.contentType)) # location / source # graph.add(uri, DCTERMS['source'], Literal('')) # TODO: genre ... # TODO: resolution # species metadata md = IBCCVLMetadata(ds) dsprov.add(BCCVL['scientificName'], Literal(md['species']['scientificName'])) dsprov.add(BCCVL['taxonID'], URIRef(md['species']['taxonID'])) # ... species data, ... species id for layer in md.get('layers_used',()): dsprov.add(BCCVL['layer'], LOCAL[layer]) # link with activity activity.add(PROV['used'], dsprov) for uuid, layers in result.job_params['future_climate_datasets'].items(): key = 'future_climate_datasets' ds = uuidToObject(uuid) dsprov = Resource(graph, LOCAL[uuid]) dsprov.add(RDF['type'], PROV['Entity']) dsprov.add(DCTERMS['creator'], Literal(ds.Creator())) dsprov.add(DCTERMS['title'], Literal(ds.title)) dsprov.add(DCTERMS['description'], Literal(ds.description)) dsprov.add(DCTERMS['rights'], Literal(ds.rights)) # ds.rightsstatement if IFile.providedBy(ds): dsprov.add(DCTERMS['format'], Literal(ds.file.contentType)) # TODO: genre, resolution, emsc, gcm, year(s) ... for layer in layers: dsprov.add(BCCVL['layer'], LOCAL[layer]) # location / source # layers selected + layer metadata # ... raster data, .. layers # link with activity activity.add(PROV['used'], dsprov) provdata.data = graph.serialize(format="turtle")
def printDocument(self): """ GENERAZIONE DI UN DOCX PARTENDO DA UN MODELLO DI STAMPA <docurl>/@@printdoc?app=praticaweb&model=relazione_asseverata_scia&grp=relazioni&form=scia-completa&field=campo&pdf=1&jsondump=1 PARAMETRI DI REQUEST model:modello di stampa da usare grp:sottocartella del modello di stampa app:applicazione form:(consigliato)form da usare per serializzare il documento se non viene passato usa il form del plomino document field:(facoltativo) se viene passato setta l'item sul plomino document pdf:(facoltativo) se vine passato cre anche il file pdf jsondump:(per debug) esce restituendo il documento serializzato """ request = self.request if not self.print_form: self.print_form = self.doc.Form app = request.get('app') #praticaweb, dehors, trasporti.. grp = request.get('grp') #autorizzazione.... model = request.get('model') #nome del modello redirect_url = '' fieldsubset = '' fieldsremove = '' if not self.printConfig: self.result[ 'msg'] = "MANCA IL FILE DI CONFIGURAZIONE resources/config_printdocuments" return self.render() if not self.doc.isCurrentUserAuthor(self.doc): self.result['msg'] = "ACCESSO NON CONSENTITO" return self.render() if not "ws_createdocx" in self.printConfig: self.result['msg'] = "ws_createdocx non assegnato" return self.render() serviceUrl = self.printConfig["ws_createdocx"] if not "models_folder" in self.printConfig: self.result['msg'] = "models_folder non assegnato" return self.render() models = self.printConfig['models_folder'] portal = self.doc.portal_url.getPortalObject() if not models in portal.keys(): self.result['msg'] = "Non esiste la cartella %s" % (models) return self.render() modelsFolder = portal[models] if not app in modelsFolder.keys(): self.result['msg'] = "Non esiste la cartella %s/%s" % (models, app) return self.render() modelsFolder = portal[models][app] if not grp in modelsFolder.keys(): self.result['msg'] = "Non esiste la cartella %s/%s/%s" % (models, app, grp) return self.render() modelsFolder = portal[models][app][grp] if not model in modelsFolder.keys(): self.result['msg'] = "Non esiste il file %s/%s/%s/%s" % ( models, app, grp, model) return self.render() #serializzo il documento serializedDoc = serialDoc(self.doc, self.print_form) if not serializedDoc: self.result['msg'] = "Errore nella serializzazione del documento" return self.render() """ SERIALIZZO I DATI DI UNA RIGA DI DATAGRID AGGIUNGO/SOSTITUISCO NEL DOCUMENTO SERIALIZZATO I CAMPI PRESENTI IN UNA RIGA DI UN DATAGRID Caso d'uso: Stampo 1 documento per ogni soggetto presente in un datagrid. grid: nome del campo con il datagrid grid_index: riga da da accodare serializzata """ grid = request.get('grid') grid_index = request.get('grid_index') if grid and grid_index: rows = serializedDoc[grid] grid_index = int(grid_index) if rows and len(rows) > grid_index: row = rows[grid_index] serializedDoc.update(row) #passando in request il parametro testjson visualizza la json del documento serializzato if request.get('jsondump') == '1': self.request.response.setHeader("Content-type", "application/json") return json.dumps(serializedDoc) #Stampa da modello modelFile = modelsFolder[model] if IFile.providedBy(modelFile): modelContent = base64.b64encode(modelFile.file.data) modelName = modelFile.file.filename modelMimeType = modelFile.file.contentType else: modelContent = base64.b64encode(modelFile.get_data()) #estensione dei file per stampe modelName = modelFile.getFilename() modelMimeType = modelFile.getContentType() modelIcon = modelFile.getIcon() #Parametri della chiamata al servizio di creazione data = dict(model=modelContent, dataType='JSON', data=json.dumps(serializedDoc)) if self.toPdf: data["pdf"] = 1 #Creazione del documento tramite webservice #plone_tools = getToolByName(self.doc.getParentDatabase().aq_inner, 'plone_utils') #plone_tools.addPortalMessage(json.dumps(query), 'message') #import pdb;pdb.set_trace() try: #TODO CONTROLLO ERRORI wsres = requests.post(serviceUrl, data) res = wsres.json() except Exception as error: msg = ('%s: %s' % (type(error), error), 'error') #plone_tools.addPortalMessage(*msg, request=self.doc.REQUEST) #self.doc.REQUEST.RESPONSE.redirect(self.doc.absolute_url()) self.result['msg'] = msg self.result['response'] = wsres return self.render() content = '' pdfContent = '' if res['success'] == 1: if "content" in res: content = base64.b64decode(res['content']) if "pdfContent" in res: pdfContent = base64.b64decode(res['pdfContent']) return self.renderContent(modelName, content, modelMimeType, pdfContent) else: self.result['msg'] = "Errore nella creazione del documento" return self.render()
def Format(self): ob = self.getObject() mime = 'application/octet-stream' if IFile.providedBy(ob): mime = ob.file.contentType return mime
def _createProvenance(self, result): provdata = IProvenanceData(result) from rdflib import URIRef, Literal, Namespace, Graph from rdflib.namespace import RDF, RDFS, FOAF, DCTERMS, XSD from rdflib.resource import Resource PROV = Namespace(u"http://www.w3.org/ns/prov#") BCCVL = Namespace(u"http://ns.bccvl.org.au/") LOCAL = Namespace(u"urn:bccvl:") graph = Graph() # the user is our agent member = api.user.get_current() username = member.getProperty('fullname') or member.getId() user = Resource(graph, LOCAL['user']) user.add(RDF['type'], PROV['Agent']) user.add(RDF['type'], FOAF['Person']) user.add(FOAF['name'], Literal(username)) user.add(FOAF['mbox'], URIRef('mailto:{}'.format(member.getProperty('email')))) # add software as agent software = Resource(graph, LOCAL['software']) software.add(RDF['type'], PROV['Agent']) software.add(RDF['type'], PROV['SoftwareAgent']) software.add(FOAF['name'], Literal('BCCVL Job Script')) # script content is stored somewhere on result and will be exported with zip? # ... or store along with pstats.json ? hidden from user # -> execenvironment after import -> log output? # -> source code ... maybe some link expression? stored on result ? separate entity? activity = Resource(graph, LOCAL['activity']) activity.add(RDF['type'], PROV['Activity']) # TODO: this is rather queued or created time for this activity ... could capture real start time on running status update (or start transfer) now = datetime.now().replace(microsecond=0) activity.add(PROV['startedAtTime'], Literal(now.isoformat(), datatype=XSD['dateTime'])) activity.add(PROV['hasAssociationWith'], user) activity.add(PROV['hasAssociationWith'], software) # add job parameters to activity for idx, (key, value) in enumerate(result.job_params.items()): param = Resource(graph, LOCAL[u'param_{}'.format(idx)]) activity.add(BCCVL['algoparam'], param) param.add(BCCVL['name'], Literal(key)) # We have only dataset references as parameters if key in ('future_climate_datasets', ): for dsuuid in value.keys(): param.add(BCCVL['value'], LOCAL[dsuuid]) elif key in ('species_distribution_models', ): param.add(BCCVL['value'], LOCAL[value]) else: param.add(BCCVL['value'], Literal(value)) # iterate over all input datasets and add them as entities for key in ('species_distribution_models', ): dsbrain = uuidToCatalogBrain(result.job_params[key]) if not dsbrain: continue ds = dsbrain.getObject() dsprov = Resource(graph, LOCAL[result.job_params[key]]) dsprov.add(RDF['type'], PROV['Entity']) #dsprov.add(PROV['..'], Literal('')) dsprov.add(DCTERMS['creator'], Literal(ds.Creator())) dsprov.add(DCTERMS['title'], Literal(ds.title)) dsprov.add(DCTERMS['description'], Literal(ds.description)) dsprov.add(DCTERMS['rights'], Literal(ds.rights)) # ds.rightsstatement dsprov.add(DCTERMS['format'], Literal(ds.file.contentType)) # location / source # graph.add(uri, DCTERMS['source'], Literal('')) # TODO: genre ... # TODO: resolution # species metadata md = IBCCVLMetadata(ds) dsprov.add(BCCVL['scientificName'], Literal(md['species']['scientificName'])) dsprov.add(BCCVL['taxonID'], URIRef(md['species']['taxonID'])) # ... species data, ... species id for layer in md.get('layers_used', ()): dsprov.add(BCCVL['layer'], LOCAL[layer]) # link with activity activity.add(PROV['used'], dsprov) for uuid, layers in result.job_params['future_climate_datasets'].items( ): key = 'future_climate_datasets' ds = uuidToObject(uuid) dsprov = Resource(graph, LOCAL[uuid]) dsprov.add(RDF['type'], PROV['Entity']) dsprov.add(DCTERMS['creator'], Literal(ds.Creator())) dsprov.add(DCTERMS['title'], Literal(ds.title)) dsprov.add(DCTERMS['description'], Literal(ds.description)) dsprov.add(DCTERMS['rights'], Literal(ds.rights)) # ds.rightsstatement if IFile.providedBy(ds): dsprov.add(DCTERMS['format'], Literal(ds.file.contentType)) # TODO: genre, resolution, emsc, gcm, year(s) ... for layer in layers: dsprov.add(BCCVL['layer'], LOCAL[layer]) # location / source # layers selected + layer metadata # ... raster data, .. layers # link with activity activity.add(PROV['used'], dsprov) provdata.data = graph.serialize(format="turtle")
def test_adding(self): self.portal.invokeFactory('File', 'doc1') self.assertTrue(IFile.providedBy(self.portal['doc1']))
def _filesize(self): """Return the filesize if the contenttype is a File or an Image""" if IFile.providedBy(self.obj): return self.obj.file.size if IImage.providedBy(self.obj): return self.obj.image.size
def test_adding(self): self.portal.invokeFactory( 'File', 'doc1' ) self.assertTrue(IFile.providedBy(self.portal['doc1']))
def test_adding(self): self.portal.invokeFactory("File", "doc1") self.assertTrue(IFile.providedBy(self.portal["doc1"]))
def _getAllObjectsData(self, context, objects_listing, tfile, tmp=False): """ Returns the data in all files with a content object to be placed in a zipfile """ props = getToolByName(context, 'portal_properties') nameByTitle = props.zipfile_properties.name_by_title allow_zip64 = props.zipfile_properties.allow_zip64 # Use temporary IO object instead of writing to filesystem. if tmp: fd, path = tempfile.mkstemp('.zipfiletransport') tfile = path close(fd) zipFile = ZipFile(tfile, 'w', ZIP_DEFLATED, allowZip64=allow_zip64) context_path = str(context.virtual_url_path()) for obj in objects_listing: object_extension = '' object_path = str(obj.virtual_url_path()) file_data = None if HAS_PAC: if IImage.providedBy(obj): file_data = str(obj.image.data) object_path = object_path.replace(context_path + '/', '') elif IFile.providedBy(obj): file_data = str(obj.file.data) object_path = object_path.replace(context_path + '/', '') if file_data is not None and object_path is not None: # early escape coming from plone.app.contenttypes pass elif self._objImplementsInterface(obj, interfaces.IATFile) or \ self._objImplementsInterface(obj, interfaces.IATImage): file_data = str(obj.data) object_path = object_path.replace(context_path + '/', '') elif self._objImplementsInterface(obj, interfaces.IATDocument): if "text/html" == obj.Format(): file_data = obj.getText() object_extension = ".html" elif "text/x-rst" == obj.Format(): file_data = obj.getRawText() object_extension = ".rst" elif "text/structured" == obj.Format(): file_data = obj.getRawText() object_extension = ".stx" elif "text/plain" == obj.Format(): file_data = obj.getRawText() object_extension = ".txt" else: file_data = obj.getRawText() object_path = object_path.replace(context_path + '/', '') elif self._objImplementsInterface(obj, interfaces.IATFolder): if hasattr(obj, 'getRawText'): file_data = obj.getRawText() if object_path == context_path: object_path = object_path.split("/")[-1] else: object_path = object_path.replace(context_path + '/', '') if object_path[-5:] != ".html" and object_path[-4:] != ".htm": object_extension = ".html" else: continue # start point for object path, adding 1 removes the initial '/' object_path = self.generateSafeFileName(object_path) if object_path: # Reconstruct path with filename, use ID unless # name_by_title has been set. Name by ID is the # default behavior so that links to other documents # in documents will be preserved when the same file # is imported back into your Plone site. If you use # name_by_title, you will be able to save non-ascii # chars in the filename but you will not be able to # round trip the ZIP archive and have links in your # documents continue to work. ID is the preferred # solution, as it is much work to go through lots of # documents by hand, find the internal links and # correct them manually. filename_path = [] for i in range(0, len(object_path.split('/'))): if nameByTitle: # Use title for filename in ZIP export filename_path.append(obj.Title()) else: filename_path.append(obj.getId()) obj = obj.aq_inner.aq_parent if len(filename_path) > 1: filename_path.reverse() filename_path = '/'.join(filename_path) else: filename_path = filename_path[0] # Add the correct file extension if filename_path[-len(object_extension):] != object_extension: filename_path += object_extension if 'Windows' in context.REQUEST['HTTP_USER_AGENT']: filename_path = filename_path.decode('utf-8').encode('cp437') zipFile.writestr(filename_path, file_data) zipFile.close() if not tmp: tfile = '' return tfile