def build_transferzip(node): nid = node.id zipfilepath = join_paths(config.get("paths.tempdir"), nid + "_transfer.zip") if os.path.exists(zipfilepath): zipfilepath = join_paths(config.get("paths.tempdir"), nid + "_" + str(random.random()) + "_transfer.zip") zip = zipfile.ZipFile(zipfilepath, "w", zipfile.ZIP_DEFLATED) files_written = 0 for n in node.getAllChildren(): if n.isActiveVersion(): for fn in n.getFiles(): if fn.getType() in ['doc', 'document', 'zip', 'attachment', 'other']: fullpath = fn.retrieveFile() if os.path.isfile(fullpath) and os.path.exists(fullpath): dirname, filename = os.path.split(fullpath) print "adding to zip: ", fullpath, "as", filename zip.write(fullpath, filename) files_written += 1 if os.path.isdir(fullpath): for f in get_all_file_paths(fullpath): newpath = f.replace(fullpath, "") print "adding from ", fullpath, "to zip: ", f, "as", newpath zip.write(f, newpath) files_written += 1 zip.close() return zipfilepath, files_written
def build_transferzip(node): nid = node.id zipfilepath = join_paths(config.get("paths.tempdir"), nid + "_transfer.zip") if os.path.exists(zipfilepath): zipfilepath = join_paths( config.get("paths.tempdir"), nid + "_" + str(random.random()) + "_transfer.zip") zip = zipfile.ZipFile(zipfilepath, "w", zipfile.ZIP_DEFLATED) files_written = 0 for n in node.getAllChildren(): if n.isActiveVersion(): for fn in n.getFiles(): if fn.getType() in [ 'doc', 'document', 'zip', 'attachment', 'other' ]: fullpath = fn.retrieveFile() if os.path.isfile(fullpath) and os.path.exists(fullpath): dirname, filename = os.path.split(fullpath) print "adding to zip: ", fullpath, "as", filename zip.write(fullpath, filename) files_written += 1 if os.path.isdir(fullpath): for f in get_all_file_paths(fullpath): newpath = f.replace(fullpath, "") print "adding from ", fullpath, "to zip: ", f, "as", newpath zip.write(f, newpath) files_written += 1 zip.close() return zipfilepath, files_written
def r(p): if os.path.isdir(join_paths(path, p)): for file in os.listdir(join_paths(path, p)): r(join_paths(p, file)) else: while len(p) > 0 and p[0] == "/": p = p[1:] try: zip.write(join_paths(path, p), p) except: pass
def upload_ziphandler(req): schemes = get_permitted_schemas() files = [] scheme_type = {} basenode = q(Node).get(req.params.get('id')) for file in basenode.files: if file.abspath.endswith(req.params.get('file')): z = zipfile.ZipFile(file.abspath) for f in z.namelist(): #strip unwanted garbage from string name = mybasename(f).decode('utf8', 'ignore').encode('utf8') random_str = ustr(random.random())[2:] if name.startswith("._"): # ignore Mac OS X junk continue if name.split('.')[0] == '': name = random_str + name files.append(name.replace(" ", "_")) _m = getMimeType(name) if random_str in name: newfilename = join_paths(config.get("paths.tempdir"), name.replace(" ", "_")) else: newfilename = join_paths( config.get("paths.tempdir"), random_str + name.replace(" ", "_")) with codecs.open(newfilename, "wb") as fi: fi.write(z.read(f)) fn = importFileToRealname(mybasename(name.replace(" ", "_")), newfilename) basenode.files.append(fn) if os.path.exists(newfilename): os.unlink(newfilename) if _m[1] not in scheme_type: scheme_type[_m[1]] = [] for scheme in schemes: if _m[1] in scheme.getDatatypes(): scheme_type[_m[1]].append(scheme) try: z.close() os.remove(file.abspath) except: pass basenode.files.remove(file) db.session.commit() return {'files': files, 'schemes': scheme_type}
def sendZipFile(req, path): tempfile = join_paths(config.get("paths.tempdir"), str( random.random())) + ".zip" zip = zipfile.ZipFile(tempfile, "w") zip.debug = 3 def r(p): if os.path.isdir(join_paths(path, p)): for file in os.listdir(join_paths(path, p)): r(join_paths(p, file)) else: while len(p) > 0 and p[0] == "/": p = p[1:] try: zip.write(join_paths(path, p), p) except: pass r("/") zip.close() req.reply_headers[ 'Content-Disposition'] = "attachment; filename=shoppingbag.zip" req.sendFile(tempfile, "application/zip") if os.sep == '/': # Unix? os.unlink( tempfile ) # unlinking files while still reading them only works on Unix/Linux
def export(req): """ export definition: url contains /[type]/[id] """ if not current_user.is_admin: return httpstatus.HTTP_FORBIDDEN path = req.path[1:].split("/") try: module = findmodule(path[1]) tempfile = join_paths(config.get("paths.tempdir"), str(random.random())) with codecs.open(tempfile, "w", encoding='utf8') as f: try: f.write(module.export(req, path[2])) except UnicodeDecodeError: f.write(module.export(req, path[2]).decode('utf-8')) _sendFile(req, tempfile, u"application/xml", nginx_x_accel_redirect_enabled=False) if os.sep == '/': # Unix? os.unlink( tempfile ) # unlinking files while still reading them only works on Unix/Linux except: logg.info("module has no export method")
def upload_ziphandler(req): schemes = getSchemes(req) files = [] scheme_type = {} basenode = tree.getNode(req.params.get('id')) for file in basenode.getFiles(): if file.retrieveFile().endswith(req.params.get('file')): z = zipfile.ZipFile(file.retrieveFile()) for f in z.namelist(): #strip unwanted garbage from string name = mybasename(f).decode('utf8', 'ignore').encode('utf8') random_str = str(random.random())[2:] if name.startswith("._"): # ignore Mac OS X junk continue if name.split('.')[0] == '': name = random_str + name files.append(name.replace(" ", "_")) _m = getMimeType(name) if random_str in name: newfilename = join_paths(config.get("paths.tempdir"), name.replace(" ", "_")) else: newfilename = join_paths(config.get("paths.tempdir"), random_str + name.replace(" ", "_")) fi = open(newfilename, "wb") fi.write(z.read(f)) fi.close() fn = importFileToRealname(mybasename(name.replace(" ", "_")), newfilename) basenode.addFile(fn) if os.path.exists(newfilename): os.unlink(newfilename) if _m[1] not in scheme_type: scheme_type[_m[1]] = [] for scheme in schemes: if _m[1] in scheme.getDatatypes(): scheme_type[_m[1]].append(scheme) try: z.close() os.remove(file.retrieveFile()) except: pass basenode.removeFile(file) return {'files': files, 'schemes': scheme_type}
def upload_ziphandler(req): schemes = get_permitted_schemas() files = [] scheme_type = {} basenode = q(Node).get(req.params.get('id')) for file in basenode.files: if file.abspath.endswith(req.params.get('file')): z = zipfile.ZipFile(file.abspath) for f in z.namelist(): #strip unwanted garbage from string name = mybasename(f).decode('utf8', 'ignore').encode('utf8') random_str = ustr(random.random())[2:] if name.startswith("._"): # ignore Mac OS X junk continue if name.split('.')[0] == '': name = random_str + name files.append(name.replace(" ", "_")) _m = getMimeType(name) if random_str in name: newfilename = join_paths(config.get("paths.tempdir"), name.replace(" ", "_")) else: newfilename = join_paths(config.get("paths.tempdir"), random_str + name.replace(" ", "_")) with codecs.open(newfilename, "wb") as fi: fi.write(z.read(f)) fn = importFileToRealname(mybasename(name.replace(" ", "_")), newfilename) basenode.files.append(fn) if os.path.exists(newfilename): os.unlink(newfilename) if _m[1] not in scheme_type: scheme_type[_m[1]] = [] for scheme in schemes: if _m[1] in scheme.getDatatypes(): scheme_type[_m[1]].append(scheme) with suppress(Exception, warn=False): z.close() os.remove(file.abspath) basenode.files.remove(file) db.session.commit() return {'files': files, 'schemes': scheme_type}
def send_attfile(req): access = AccessData(req) f = req.path[9:].split('/') try: node = getNode(f[0]) except tree.NoSuchNodeError: return 404 if not access.hasAccess(node, "data") and node.type != "directory": return 403 if len([file for file in node.getFiles() if file._path in ["/".join(f[1:]), "/".join(f[1:-1])]]) == 0: # check filepath return 403 filename = clean_path("/".join(f[1:])) path = join_paths(config.get("paths.datadir"), filename) mime, type = getMimeType(filename) if(get_filesize(filename) > 16 * 1048576): req.reply_headers["Content-Disposition"] = 'attachment; filename="{}"'.format(filename) return req.sendFile(path, mime)
def export(req): """ export definition: url contains /[type]/[id] """ user = users.getUserFromRequest(req) if not user.isAdmin(): return httpstatus.HTTP_FORBIDDEN path = req.path[1:].split("/") try: module = findmodule(path[1]) tempfile = join_paths(config.get("paths.tempdir"), str(random.random())) file = open(tempfile, "w") file.write(module.export(req, path[2])) file.close() req.sendFile(tempfile, "application/xml") if os.sep == '/': # Unix? os.unlink(tempfile) # unlinking files while still reading them only works on Unix/Linux except: print "module has no export method"
def sendZipFile(req, path): tempfile = join_paths(config.get("paths.tempdir"), str(random.random())) + ".zip" zip = zipfile.ZipFile(tempfile, "w") zip.debug = 3 def r(p): if os.path.isdir(join_paths(path, p)): for file in os.listdir(join_paths(path, p)): r(join_paths(p, file)) else: while len(p) > 0 and p[0] == "/": p = p[1:] try: zip.write(join_paths(path, p), p) except: pass r("/") zip.close() req.reply_headers['Content-Disposition'] = "attachment; filename=shoppingbag.zip" req.sendFile(tempfile, "application/zip") if os.sep == '/': # Unix? os.unlink(tempfile) # unlinking files while still reading them only works on Unix/Linux
def export(req): """ export definition: url contains /[type]/[id] """ if not current_user.is_admin: return httpstatus.HTTP_FORBIDDEN path = req.path[1:].split("/") try: module = findmodule(path[1]) tempfile = join_paths(config.get("paths.tempdir"), str(random.random())) with codecs.open(tempfile, "w", encoding='utf8') as f: try: f.write(module.export(req, path[2])) except UnicodeDecodeError: f.write(module.export(req, path[2]).decode('utf-8')) req.sendFile(tempfile, u"application/xml", nginx_x_accel_redirect_enabled=False) if os.sep == '/': # Unix? os.unlink(tempfile) # unlinking files while still reading them only works on Unix/Linux except: logg.info("module has no export method")
def export(req): """ export definition: url contains /[type]/[id] """ user = users.getUserFromRequest(req) if not user.isAdmin(): return httpstatus.HTTP_FORBIDDEN path = req.path[1:].split("/") try: module = findmodule(path[1]) tempfile = join_paths(config.get("paths.tempdir"), str(random.random())) file = open(tempfile, "w") file.write(module.export(req, path[2])) file.close() req.sendFile(tempfile, "application/xml") if os.sep == '/': # Unix? os.unlink( tempfile ) # unlinking files while still reading them only works on Unix/Linux except: print "module has no export method"
def send_attfile(req): access = AccessData(req) f = req.path[9:].split('/') try: node = getNode(f[0]) except tree.NoSuchNodeError: return 404 if not access.hasAccess(node, "data") and node.type != "directory": return 403 if len([ file for file in node.getFiles() if file._path in ["/".join(f[1:]), "/".join(f[1:-1])] ]) == 0: # check filepath return 403 filename = clean_path("/".join(f[1:])) path = join_paths(config.get("paths.datadir"), filename) mime, type = getMimeType(filename) if (get_filesize(filename) > 16 * 1048576): req.reply_headers[ "Content-Disposition"] = 'attachment; filename="{}"'.format( filename) return req.sendFile(path, mime)
def runAction(self, node, op=""): fnode = None for fnode in node.files: if fnode.filetype == "document": break def reformatAuthors(s): authors = s.strip().split(";") if len(authors) > 1: authors = ", ".join(authors[:-1]) + " and " + authors[-1] else: authors = authors[0] return authors # get pdf form appended to this workflow step through upload field 'upload_pdfform' current_workflow = getNodeWorkflow(node) current_workflow_step = getNodeWorkflowStep(node) formfilelist, formfilelist2 = getFilelist(current_workflow_step, 'upload_pdfform') pdf_fields_editable = current_workflow_step.get("pdf_fields_editable") pdf_form_separate = current_workflow_step.get("pdf_form_separate") pdf_form_overwrite = current_workflow_step.get("pdf_form_overwrite") if pdf_fields_editable.lower() in ["1", "true"]: pdf_fields_editable = True else: pdf_fields_editable = False if pdf_form_separate.lower() in ["1", "true"]: pdf_form_separate = True else: pdf_form_separate = False fields = [] f_retrieve_path = None schema = getMetaType(node.schema) if formfilelist: # take newest (mtime) f_mtime, f_name, f_mimetype, f_size, f_type, f_retrieve_path, f = formfilelist[ -1] for field_dict in parse_pdftk_fields_dump( get_pdftk_fields_dump(f_retrieve_path)): fieldname = field_dict.get('FieldName', None) if fieldname: value = '' if fieldname in dict(node.attrs.items()): schemafield = schema.children.filter_by( name=fieldname).first() value = schemafield.getFormattedValue(node)[1] if fieldname.find('author') >= 0: value = reformatAuthors(value) elif fieldname.lower() == 'node.schema': value = getMetaType(node.schema).getLongName() elif fieldname.lower() == 'node.id': value = unicode(node.id) elif fieldname.lower() == 'node.type': value = node.type elif fieldname.lower() == 'date()': value = format_date(now(), format='%d.%m.%Y') elif fieldname.lower() == 'time()': value = format_date(now(), format='%H:%M:%S') elif fieldname.find("+") > 0: for _fn in fieldname.split('+'): value = node.get(_fn) if value: break elif '[att:' in fieldname: value = fieldname while '[att:' in value: m = re.search('(?<=\[att:)([^&\]]+)', value) if m: if m.group(0) == 'id': v = unicode(node.id) elif m.group(0) == 'type': v = node.type elif m.group(0) == 'schema': v = getMetaType(node.schema).getLongName() else: schemafield = schema.children.filter_by( name=m.group(0)).first() v = schemafield.getFormattedValue(node)[0] value = value.replace( '[att:%s]' % (m.group(0)), v) else: logg.warning( "workflowstep %s (%s): could not find attribute for pdf form field '%s' - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, fieldname, node.name, node.id) fields.append((fieldname, remove_tags(desc(value)))) if not pdf_form_separate and fnode and f_retrieve_path and os.path.isfile( f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error( "workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return origname = fnode.abspath outfile = addPagesToPDF(pages, origname) for f in node.files: node.files.remove(f) fnode.path = outfile.replace(config.get("paths.datadir"), "") node.files.append(fnode) node.files.append( File(origname, 'upload', 'application/pdf')) # store original filename node.event_files_changed() db.session.commit() logg.info( "workflow '%s' (%s), workflowstep '%s' (%s): added pdf form to pdf (node '%s' (%s)) fields: %s", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields) elif pdf_form_separate and f_retrieve_path and os.path.isfile( f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error( "workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return importdir = getImportDir() try: new_form_path = join_paths(importdir, "%s_%s" % (node.id, f_name)) counter = 0 if not pdf_form_overwrite: # build correct filename while os.path.isfile(new_form_path): counter += 1 new_form_path = join_paths( importdir, "%s_%s_%s" % (node.id, counter, f_name)) # copy new file and remove tmp shutil.copy(pages, new_form_path) if os.path.exists(pages): os.remove(pages) except Exception: logg.exception( "workflowstep %s (%s): could not copy pdf form to import directory - node: '%s' (%s), import directory: '%s'", current_workflow_step.name, current_workflow_step.id, node.name, node.id, importdir) found = 0 for fn in node.files: if fn.abspath == new_form_path: found = 1 break if found == 0 or (found == 1 and not pdf_form_overwrite): node.files.append( File(new_form_path, 'pdf_form', 'application/pdf')) db.session.commit() logg.info( "workflow '%s' (%s), workflowstep '%s' (%s): added separate pdf form to node (node '%s' (%s)) fields: %s, path: '%s'", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields, new_form_path) else: logg.warning( "workflowstep %s (%s): could not process pdf form - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, node.name, node.id) self.forward(node, True)
def runAction(self, node, op=""): fnode = None for fnode in node.files: if fnode.filetype == "document": break def reformatAuthors(s): authors = s.strip().split(";") if len(authors) > 1: authors = ", ".join(authors[:-1]) + " and " + authors[-1] else: authors = authors[0] return authors # get pdf form appended to this workflow step through upload field 'upload_pdfform' current_workflow = getNodeWorkflow(node) current_workflow_step = getNodeWorkflowStep(node) formfilelist, formfilelist2 = getFilelist(current_workflow_step, 'upload_pdfform') pdf_fields_editable = current_workflow_step.get("pdf_fields_editable") pdf_form_separate = current_workflow_step.get("pdf_form_separate") pdf_form_overwrite = current_workflow_step.get("pdf_form_overwrite") if pdf_fields_editable.lower() in ["1", "true"]: pdf_fields_editable = True else: pdf_fields_editable = False if pdf_form_separate.lower() in ["1", "true"]: pdf_form_separate = True else: pdf_form_separate = False fields = [] f_retrieve_path = None schema = getMetaType(node.schema) if formfilelist: # take newest (mtime) f_mtime, f_name, f_mimetype, f_size, f_type, f_retrieve_path, f = formfilelist[-1] for field_dict in parse_pdftk_fields_dump(get_pdftk_fields_dump(f_retrieve_path)): fieldname = field_dict.get('FieldName', None) if fieldname: value = '' if fieldname in dict(node.attrs.items()): schemafield = schema.children.filter_by(name=fieldname).first() value = schemafield.getFormattedValue(node)[1] if fieldname.find('author') >= 0: value = reformatAuthors(value) elif fieldname.lower() == 'node.schema': value = getMetaType(node.schema).getLongName() elif fieldname.lower() == 'node.id': value = unicode(node.id) elif fieldname.lower() == 'node.type': value = node.type elif fieldname.lower() == 'date()': value = format_date(now(), format='%d.%m.%Y') elif fieldname.lower() == 'time()': value = format_date(now(), format='%H:%M:%S') elif fieldname.find("+") > 0: for _fn in fieldname.split('+'): value = node.get(_fn) if value: break elif '[att:' in fieldname: value = fieldname while '[att:' in value: m = re.search('(?<=\[att:)([^&\]]+)', value) if m: if m.group(0) == 'id': v = unicode(node.id) elif m.group(0) == 'type': v = node.type elif m.group(0) == 'schema': v = getMetaType(node.schema).getLongName() else: schemafield = schema.children.filter_by(name=m.group(0)).first() v = schemafield.getFormattedValue(node)[0] value = value.replace('[att:%s]' % (m.group(0)), v) else: logg.warning("workflowstep %s (%s): could not find attribute for pdf form field '%s' - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, fieldname, node.name, node.id) fields.append((fieldname, remove_tags(desc(value)))) if not pdf_form_separate and fnode and f_retrieve_path and os.path.isfile(f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error("workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return origname = fnode.abspath outfile = addPagesToPDF(pages, origname) for f in node.files: node.files.remove(f) fnode.path = outfile.replace(config.get("paths.datadir"), "") node.files.append(fnode) node.files.append(File(origname, 'upload', 'application/pdf')) # store original filename node.event_files_changed() db.session.commit() logg.info("workflow '%s' (%s), workflowstep '%s' (%s): added pdf form to pdf (node '%s' (%s)) fields: %s", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields) elif pdf_form_separate and f_retrieve_path and os.path.isfile(f_retrieve_path): pages = fillPDFForm(f_retrieve_path, fields, input_is_fullpath=True, editable=pdf_fields_editable) if pages == "": # error in pdf creation -> forward to false operation logg.error("workflowstep %s (%s): could not create pdf file - node: '%s' (%s)" % (current_workflow_step.name, current_workflow_step.id, node.name, node.id)) self.forward(node, False) return importdir = getImportDir() try: new_form_path = join_paths(importdir, "%s_%s" % (node.id, f_name)) counter = 0 if not pdf_form_overwrite: # build correct filename while os.path.isfile(new_form_path): counter += 1 new_form_path = join_paths(importdir, "%s_%s_%s" % (node.id, counter, f_name)) # copy new file and remove tmp shutil.copy(pages, new_form_path) if os.path.exists(pages): os.remove(pages) except Exception: logg.exception("workflowstep %s (%s): could not copy pdf form to import directory - node: '%s' (%s), import directory: '%s'", current_workflow_step.name, current_workflow_step.id, node.name, node.id, importdir) found = 0 for fn in node.files: if fn.abspath == new_form_path: found = 1 break if found == 0 or (found == 1 and not pdf_form_overwrite): node.files.append(File(new_form_path, 'pdf_form', 'application/pdf')) db.session.commit() logg.info( "workflow '%s' (%s), workflowstep '%s' (%s): added separate pdf form to node (node '%s' (%s)) fields: %s, path: '%s'", current_workflow.name, current_workflow.id, current_workflow_step.name, current_workflow_step.id, node.name, node.id, fields, new_form_path) else: logg.warning("workflowstep %s (%s): could not process pdf form - node: '%s' (%s)", current_workflow_step.name, current_workflow_step.id, node.name, node.id) self.forward(node, True)
def export_shoppingbag_zip(req): from web.frontend.streams import sendZipFile from utils.utils import join_paths import core.config as config import random import os access = AccessData(req) items = [] for key in req.params.keys(): if key.startswith("select_"): _nid = key[7:] _n = tree.getNode(_nid) if access.hasAccess(_n, 'read'): items.append(_nid) dest = join_paths(config.get("paths.tempdir"), str(random.random())) + "/" # images if req.params.get("type") == "image": if req.params.get("metadata") in ["no", "yes"]: format_type = req.params.get("format_type") processtype = "" processvalue = "" if format_type == "perc": processtype = "percentage" _perc = req.params.get("img_perc", ";").split(";") if _perc[0] != "": processvalue = _perc[0] else: processvalue = int(_perc[1]) elif format_type == "pix": processtype = "pixels" _pix = req.params.get("img_pix", ";;").split(";") if _pix[0] != "": processvalue = _pix[0] else: processvalue = int(_pix[1]) elif format_type == "std": processtype = "standard" processvalue = req.params.get("img_pix", ";;").split(";")[2] for item in items: node = tree.getNode(item) if not access.hasAccess(node, 'data'): continue if node.processImage(processtype, processvalue, dest) == 0: print "image not found" # documenttypes if req.params.get("type") == "document": if req.params.get("metadata") in ["no", "yes"]: if not os.path.isdir(dest): os.mkdir(dest) for item in items: node = tree.getNode(item) if not access.hasAccess(node, 'data'): continue if node.processDocument(dest) == 0: print "document not found" # documenttypes if req.params.get("type") == "media": if req.params.get("metadata") in ["no", "yes"]: if not os.path.isdir(dest): os.mkdir(dest) for item in items: node = tree.getNode(item) if not access.hasAccess(node, 'data'): continue if node.processMediaFile(dest) == 0: print "file not found" # metadata def flatten(arr): return sum( map(lambda a: flatten(a) if (a and isinstance(a[0], list) and a != "") else [a], [a for a in arr if a not in['', []]]), []) if req.params.get("metadata") in ["yes", "meta"]: for item in items: node = tree.getNode(item) if not access.hasAccess(node, 'read'): continue if not os.path.isdir(dest): os.mkdir(dest) content = {"header": [], "content": []} for c in flatten(node.getFullView(lang(req)).getViewHTML([node], VIEW_DATA_ONLY)): content["header"].append(c[0]) content["content"].append(c[1]) f = open(dest + item + ".txt", "w") f.write("\t".join(content["header"]) + "\n") f.write("\t".join(content["content"]) + "\n") f.close() if len(items) > 0: sendZipFile(req, dest) for root, dirs, files in os.walk(dest, topdown=False): for name in files: os.remove(os.path.join(root, name)) for name in dirs: os.rmdir(os.path.join(root, name)) if os.path.isdir(dest): os.rmdir(dest)