Esempio n. 1
0
def runTALSnippet(s, context, mask=None):
    if s.find('tal:') < 0:
        return s

    header = '''<?xml version="1.0" encoding="UTF-8" ?>'''
    xmlns = '''<talnamespaces xmlns:tal="http://xml.zope.org/namespaces/tal" xmlns:metal="http://xml.zope.org/namespaces/metal">'''
    footer = '''</talnamespaces>'''
    cutter = "----cut-TAL-result-here----\n"

    if mask:
        exportheader = mask.get('exportheader')
        if exportheader.startswith("<?xml"):
            header = exportheader
        else:
            header += exportheader
        footer += mask.get('exportfooter')

    to_be_processed = header + xmlns + cutter + s + cutter + footer
    try:  # normally only encoding errors
        wr_result = tal.getTALstr(to_be_processed, context, mode='xml')
    except:  # try with u2 method
        try:
            wr_result = tal.getTALstr(u2(to_be_processed), context, mode='xml')
        except:
            wr_result = tal.getTALstr(u2(to_be_processed), context)
        #wr_result = tal.getTALstr(u2(to_be_processed), context, mode='xml')

    return wr_result[wr_result.find(cutter) + len(cutter):wr_result.rfind(cutter)]
Esempio n. 2
0
def runTALSnippet(s, context, mask=None):
    if s.find('tal:') < 0:
        return s

    header = '''<?xml version="1.0" encoding="UTF-8" ?>'''
    xmlns = '''<talnamespaces xmlns:tal="http://xml.zope.org/namespaces/tal" xmlns:metal="http://xml.zope.org/namespaces/metal">'''
    footer = '''</talnamespaces>'''
    cutter = "----cut-TAL-result-here----\n"

    if mask:
        exportheader = mask.get('exportheader')
        if exportheader.startswith("<?xml"):
            header = exportheader
        else:
            header += exportheader
        footer += mask.get('exportfooter')

    to_be_processed = header + xmlns + cutter + s + cutter + footer
    try:  # normally only encoding errors
        wr_result = tal.getTALstr(to_be_processed, context, mode='xml')
    except:  # try with u2 method
        try:
            wr_result = tal.getTALstr(u2(to_be_processed), context, mode='xml')
        except:
            wr_result = tal.getTALstr(u2(to_be_processed), context)
        #wr_result = tal.getTALstr(u2(to_be_processed), context, mode='xml')

    return wr_result[wr_result.find(cutter) +
                     len(cutter):wr_result.rfind(cutter)]
Esempio n. 3
0
def buildNodeDescriptor(req,
                        node,
                        indent=None,
                        written=None,
                        children=True,
                        children_access=None,
                        parents=False):
    nd = []
    d = {}
    if written is None:
        written = {}

    nodedict = {}
    nodedict['id'] = node.id

    mask = req.params.get('mask', 'none').lower()

    attrlist = req.params.get('attrlist', [])
    if attrlist:
        attrlist = attrlist.split(',')

    attrspec = req.params.get('attrspec', 'default_mask')
    # 'all': no restriction, send all attributes
    # 'none': to not send any attribute at all
    # 'default_mask' (default): only send attributes that correspond to the default mask fields
    #
    # remark: attributes specified comma separated in 'attrlist' are added to those specified by 'attrspec'
    #

    if mask == 'default':
        maskcachetype = req.params.get('maskcache',
                                       'deep')  # 'deep', 'shallow', 'none'
        nodedict['defaultexport'] = node.show_node_text(
            labels=1,
            language=req.params.get('lang', ''),
            cachetype=maskcachetype)
        # except:
        #    logging.getLogger('services').error('Error: web.services.jsonnode: could not get default mask content')
        #    nodedict['defaultexport'] = []

    elif mask not in ["", "none"]:  # deliver every mask
        try:
            mask_obj = getMetaType(node.getSchema()).getMask(mask)
            if mask_obj:
                nodedict['defaultexport'] = mask_obj.getViewHTML([node],
                                                                 flags=8)
            else:
                nodedict['defaultexport'] = "mask not found"
        except:
            nodedict['defaultexport'] = "error"

    if children:
        nodedict['children'] = []
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and
                                         children_access.hasAccess(c, 'read')):
                nodedict['children'].append({
                    'id': str(c.id),
                    'type': c.type,
                    'name': esc(c.name)
                })

    if parents:
        nodedict['parents'] = []
        for c in node.getParents().sort_by_orderpos():
            if (not children_access) or (children_access and
                                         children_access.hasAccess(c, 'read')):
                nodedict['parents'].append({
                    'id': str(c.id),
                    'type': c.type,
                    'name': esc(c.name)
                })

    nd.append(nodedict)

    if (children):
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and
                                         children_access.hasAccess(c, 'read')):
                if c.id not in written:
                    written[c.id] = None
                    childnodedict = buildNodeDescriptor(
                        req, c, indent, children_access=children_access)
                    nd.append(childnodedict)

    # if node.read_access:
    #    nodedict['read'] = esc(node.read_access)
    # if node.write_access:
    #    nodedict['write'] = esc(node.write_access)
    # if node.data_access:
    #    nodedict['data'] = esc(node.data_access)

    nodeattributes_dict = {}

    if attrspec == 'none':
        # no attributes should be sent
        pass
    elif attrspec == 'default_mask' or attrspec not in ['none', 'all']:
        from contenttypes.default import make_lookup_key, get_maskcache_entry, maskcache
        language = req.params.get('lang', '')
        lookup_key = make_lookup_key(node, language=language, labels=False)
        if lookup_key not in maskcache:
            # fill cache
            node.show_node_text(labels=False,
                                language=language,
                                cachetype='deep')

        field_descriptors = get_maskcache_entry(lookup_key)

        try:
            mask = field_descriptors[0]
            for field_descriptor in field_descriptors[1:]:
                field_attribute = field_descriptor[0]
                #fd = field_descriptor[1]
                if field_attribute not in attrlist:
                    # attrlist may be an empty list or filled from the request parameters
                    attrlist.append(field_attribute)
        except:
            # no mask for this metadata type
            msg = "no 'nodesmall' or 'shortview' for node %s" % str(node.id)
            logging.getLogger("services").warning(msg)

    elif attrspec == 'all':
        nodeattributes_dict_all_attributes = node.attributes.copy()
        if nodeattributes_dict_all_attributes:
            for k in filter(attribute_name_filter,
                            nodeattributes_dict_all_attributes.keys()):
                nodeattributes_dict[k] = u2(
                    (nodeattributes_dict_all_attributes[k]))

    if attrlist:
        for attr in filter(attribute_name_filter, attrlist):
            nodeattributes_dict[attr] = u2(node.get(attr))

    if nodeattributes_dict:
        nodedict['attributes'] = nodeattributes_dict

    if 'files' in req.params:

        nodedict['files'] = []

        for file in node.getFiles():
            if file.type == "metadata" or file.type == "statistic":
                continue
            mimetype = file.mimetype
            if mimetype is None:
                mimetype = "application/x-download"
            nodedict['files'].append({
                'filename': esc(file.getName()),
                'mime-type': mimetype,
                'type': file.type
            })

    if 'nodename' in req.params:
        nodedict['name'] = node.name

    if 'nodetype' in req.params:
        if node.type is None:
            nodedict['type'] = 'node'
        else:
            nodedict['type'] = node.type
    return nd
Esempio n. 4
0
def writexml(
    node,
    fi,
    indent=None,
    written=None,
    children=True,
    children_access=None,
    exclude_filetypes=[],
    exclude_children_types=[],
    attribute_name_filter=None,
):
    if written is None:
        written = {}
    if indent is None:
        indent = 0
    # there are a lot of nodes without name ...
    nodename_copy = node.name
    if nodename_copy is None:
        nodename_copy = ""
    # fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), esc(nodename_copy), str(node.id)))
    # non-utf8 encoded umlauts etc. may cause invalid xml
    fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), u2(esc(nodename_copy)), str(node.id)))
    if node.type is None:
        node.type = "node"
    fi.write('type="%s" ' % node.type)
    if node.read_access:
        fi.write('read="%s" ' % esc(node.read_access))
    if node.write_access:
        fi.write('write="%s" ' % esc(node.write_access))
    if node.data_access:
        fi.write('data="%s" ' % esc(node.data_access))
    fi.write(">\n")

    indent += 4

    for name, value in node.items():
        u_esc_name = u(esc(name))
        if attribute_name_filter and not attribute_name_filter(u_esc_name):
            continue
        fi.write('%s<attribute name="%s"><![CDATA[%s]]></attribute>\n' % ((" " * indent), u_esc_name, u2(value)))

    for file in node.getFiles():
        if file.type == "metadata" or file.type in exclude_filetypes:
            continue
        mimetype = file.mimetype
        if mimetype is None:
            mimetype = "application/x-download"
        fi.write(
            '%s<file filename="%s" mime-type="%s" type="%s"/>\n'
            % ((" " * indent), esc(file.getName()), mimetype, (file.type is not None and file.type or "image"))
        )
    if children:
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, "read")):
                if c.type not in exclude_children_types:
                    fi.write('%s<child id="%s" type="%s"/>\n' % ((" " * indent), str(c.id), c.type))

    indent -= 4
    fi.write("%s</node>\n" % (" " * indent))
    if children:
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, "read")):
                if c.type not in exclude_children_types:
                    if c.id not in written:
                        written[c.id] = None
                        c.writexml(
                            fi,
                            indent=indent,
                            written=written,
                            children=children,
                            children_access=children_access,
                            exclude_filetypes=exclude_filetypes,
                            exclude_children_types=exclude_children_types,
                            attribute_name_filter=attribute_name_filter,
                        )

    if node.type in ["mask"]:
        try:
            exportmapping_id = node.get("exportmapping").strip()
            if exportmapping_id and exportmapping_id not in written:
                try:
                    exportmapping = tree.getNode(exportmapping_id)
                    written[exportmapping_id] = None
                    exportmapping.writexml(
                        fi,
                        indent=indent,
                        written=written,
                        children=children,
                        children_access=children_access,
                        exclude_filetypes=exclude_filetypes,
                        exclude_children_types=exclude_children_types,
                        attribute_name_filter=attribute_name_filter,
                    )
                except:
                    msg = (
                        "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'"
                        % (str(node.id), node.name, node.type, str(exportmapping_id))
                    )
                    logging.getLogger("backend").error(msg)
            else:
                pass
        except:
            msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % (
                str(node.id),
                node.name,
                node.type,
                str(exportmapping_id),
            )
            logging.getLogger("backend").error(msg)
Esempio n. 5
0
def getentries(filename):
    save_import_file(filename)

    fi = codecs.open(filename, "r", "utf-8")

    try:
        data = fi.read()
    except UnicodeDecodeError:
        fi.close()
        msg = "bibtex import: getentries(filename): encoding error when trying codec 'utf-8', filename was " + filename
        logger.error(msg)
        msg = "bibtex import: getentries(filename): going to try without codec 'utf-8', filename was " + filename
        logger.info(msg)

        try:
            fi = codecs.open(filename, "r")
            try:
                data = fi.read()
                data = u2(data)
            except Exception as e:
                fi.close()
                msg = "bibtex import: getentries(filename): error at second attempt: " + str(
                    e)
                logger.info(msg)

                raise MissingMapping("wrong encoding")
        except Exception as e:
            msg = "bibtex import: getentries(filename): error at second attempt: " + str(
                e)
            logger.error(msg)

            raise MissingMapping("wrong encoding")
    try:
        fi.close()
    except:
        pass

    data = data.replace("\r", "\n")
    # throw out BOM
    try:
        data = u2(data).replace('\xef\xbb\xbf', "")
    except:
        pass
    data = comment.sub('\n', data)
    recordnr = 1

    size = len(data)
    pos = 0
    records = []
    fields = {}
    doctype = None
    placeholder = {}
    while True:
        m = token.search(data, pos)
        if not m:
            break
        start = m.start()
        end = m.end()
        if data[start] == '@':
            doctype = data[start + 1:end - 1].replace("{", "").strip().lower()
            m = delim2.search(data[end:])
            if m:  # and m.start()>end:
                key = data[end:end + m.end()].strip()
                pos = end + m.end()
                if key[-1] == ",":
                    key = key[0:-1]
            else:
                key = "record%05d" % recordnr
                recordnr = recordnr + 1
                #pos = m.end()
                pos = end

            if ESCAPE_BIBTEX_KEY:
                key = escape_bibtexkey(key)

            fields = {}
            key = u2(key)
            fields["key"] = key
            records += [(doctype, key, fields)]

            if doctype == "string":
                # found placeholder
                t2 = re.compile(r'[^}]*')
                x = t2.search(data, end)
                x_start = x.start()
                x_end = x.end()
                s = data[x_start:x_end + 1]
                key, value = s.split("=")

                placeholder[key.strip()] = value.strip()[1:-1]
                pos = x_end

                if VERBOSE:
                    try:
                        msg = "bibtex import: placeholder: key='%s', value='%s'" % (
                            key.strip(), value.strip()[1:-1])
                        logger.info(msg)
                    except Exception as e:
                        try:
                            msg = "bibtex import: placeholder: key='%s', value='%s'" % (
                                key.strip(), value.strip()[1:-1].encode(
                                    "utf8", "replace"))
                            logger.info(msg)
                        except Exception as e:
                            msg = "bibtex import: placeholder: 'not printable key-value pair'"
                            logger.info(msg)

        elif doctype:
            # new record
            s = data[start:end]

            if end and data[end - 1].isalnum():
                # for the \w+\s*=\s+[0-9a-zA-Z_] case
                end = end - 1

            field = s[:s.index("=")].strip().lower()
            pos = end
            next_token = token.search(data, pos)
            if next_token:
                content = data[pos:next_token.start()]
            else:
                content = data[pos:]

            content = content.replace("{", "")
            content = content.replace("~", " ")
            content = content.replace("}", "")

            for key in placeholder:
                content = content.replace(key, placeholder[key])

            # some people use html entities in their bibtex...
            content = content.replace("&quot;", "'")
            content = xspace.sub(
                " ", backgarbage.sub("", frontgarbage.sub("", content)))

            content = u(content)
            content = content.replace("\\\"u", "\xc3\xbc").replace("\\\"a", "\xc3\xa4").replace("\\\"o", "\xc3\xb6") \
                .replace("\\\"U", "\xc3\x9c").replace("\\\"A", "\xc3\x84").replace("\\\"O", "\xc3\x96")
            content = content.replace("\\\'a", "\xc3\xa0").replace("\\\'A", "\xc3\x80").replace("\\vc", "\xc4\x8d") \
                .replace("\\vC", "\xc4\x8c")
            content = content.replace("\\", "")
            content = content.replace("{\"u}", "\xc3\xbc").replace("{\"a}", "\xc3\xa4").replace("{\"o}", "\xc3\xb6") \
                .replace("{\"U}", "\xc3\x9c").replace("{\"A}", "\xc3\x84").replace("{\"O}", "\xc3\x96")

            content = content.strip()

            if field in ["author", "editor"] and content:
                authors = []
                for author in content.split(" and "):
                    author = author.strip()
                    if "," not in author and " " in author:
                        i = author.rindex(' ')
                        if i > 0:
                            forename, lastname = author[0:i].strip(
                            ), author[i + 1:].strip()
                        author = "%s, %s" % (lastname, forename)
                    authors += [author]
                content = ";".join(authors)

            fields[field] = content
        else:
            pos = end
    return records
Esempio n. 6
0
def writexml(node, fi, indent=None, written=None, children=True, children_access=None,
             exclude_filetypes=[], exclude_children_types=[], attribute_name_filter=None):
    if written is None:
        written = {}
    if indent is None:
        indent = 0
    # there are a lot of nodes without name ...
    nodename_copy = node.name
    if nodename_copy is None:
        nodename_copy = ""
    #fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), esc(nodename_copy), str(node.id)))
    # non-utf8 encoded umlauts etc. may cause invalid xml
    fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), u2(esc(nodename_copy)), str(node.id)))
    if node.type is None:
        node.type = "node"
    fi.write('type="%s" ' % node.type)
    if node.read_access:
        fi.write('read="%s" ' % esc(node.read_access))
    if node.write_access:
        fi.write('write="%s" ' % esc(node.write_access))
    if node.data_access:
        fi.write('data="%s" ' % esc(node.data_access))
    fi.write(">\n")

    indent += 4

    for name, value in node.items():
        u_esc_name = u(esc(name))
        if attribute_name_filter and not attribute_name_filter(u_esc_name):
            continue
        fi.write('%s<attribute name="%s"><![CDATA[%s]]></attribute>\n' % ((" " * indent), u_esc_name, u2(value)))

    for file in node.getFiles():
        if file.type == "metadata" or file.type in exclude_filetypes:
            continue
        mimetype = file.mimetype
        if mimetype is None:
            mimetype = "application/x-download"
        fi.write('%s<file filename="%s" mime-type="%s" type="%s"/>\n' %
                 ((" " * indent), esc(file.getName()), mimetype, (file.type is not None and file.type or "image")))
    if children:
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, 'read')):
                if c.type not in exclude_children_types:
                    fi.write('%s<child id="%s" type="%s"/>\n' % ((" " * indent), str(c.id), c.type))

    indent -= 4
    fi.write("%s</node>\n" % (" " * indent))
    if(children):
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, 'read')):
                if c.type not in exclude_children_types:
                    if c.id not in written:
                        written[c.id] = None
                        c.writexml(fi, indent=indent,
                                   written=written,
                                   children=children,
                                   children_access=children_access,
                                   exclude_filetypes=exclude_filetypes,
                                   exclude_children_types=exclude_children_types,
                                   attribute_name_filter=attribute_name_filter
                                   )

    if node.type in ["mask"]:
        try:
            exportmapping_id = node.get("exportmapping").strip()
            if exportmapping_id and exportmapping_id not in written:
                try:
                    exportmapping = tree.getNode(exportmapping_id)
                    written[exportmapping_id] = None
                    exportmapping.writexml(fi, indent=indent,
                                           written=written,
                                           children=children,
                                           children_access=children_access,
                                           exclude_filetypes=exclude_filetypes,
                                           exclude_children_types=exclude_children_types,
                                           attribute_name_filter=attribute_name_filter
                                           )
                except:
                    msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % (
                        str(node.id), node.name, node.type, str(exportmapping_id))
                    logging.getLogger("backend").error(msg)
            else:
                pass
        except:
            msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % (
                str(node.id), node.name, node.type, str(exportmapping_id))
            logging.getLogger("backend").error(msg)
Esempio n. 7
0
def getentries(filename):
    save_import_file(filename)

    fi = codecs.open(filename, "r", "utf-8")

    try:
        data = fi.read()
    except UnicodeDecodeError:
        fi.close()
        msg = "bibtex import: getentries(filename): encoding error when trying codec 'utf-8', filename was " + filename
        logger.error(msg)
        msg = "bibtex import: getentries(filename): going to try without codec 'utf-8', filename was " + filename
        logger.info(msg)

        try:
            fi = codecs.open(filename, "r")
            try:
                data = fi.read()
                data = u2(data)
            except Exception as e:
                fi.close()
                msg = "bibtex import: getentries(filename): error at second attempt: " + str(e)
                logger.info(msg)

                raise MissingMapping("wrong encoding")
        except Exception as e:
            msg = "bibtex import: getentries(filename): error at second attempt: " + str(e)
            logger.error(msg)

            raise MissingMapping("wrong encoding")
    try:
        fi.close()
    except:
        pass

    data = data.replace("\r", "\n")
    # throw out BOM
    try:
        data = u2(data).replace('\xef\xbb\xbf', "")
    except:
        pass
    data = comment.sub('\n', data)
    recordnr = 1

    size = len(data)
    pos = 0
    records = []
    fields = {}
    doctype = None
    placeholder = {}
    while True:
        m = token.search(data, pos)
        if not m:
            break
        start = m.start()
        end = m.end()
        if data[start] == '@':
            doctype = data[start + 1:end - 1].replace("{", "").strip().lower()
            m = delim2.search(data[end:])
            if m:  # and m.start()>end:
                key = data[end:end + m.end()].strip()
                pos = end + m.end()
                if key[-1] == ",":
                    key = key[0:-1]
            else:
                key = "record%05d" % recordnr
                recordnr = recordnr + 1
                #pos = m.end()
                pos = end

            if ESCAPE_BIBTEX_KEY:
                key = escape_bibtexkey(key)

            fields = {}
            key = u2(key)
            fields["key"] = key
            records += [(doctype, key, fields)]

            if doctype == "string":
                # found placeholder
                t2 = re.compile(r'[^}]*')
                x = t2.search(data, end)
                x_start = x.start()
                x_end = x.end()
                s = data[x_start:x_end + 1]
                key, value = s.split("=")

                placeholder[key.strip()] = value.strip()[1:-1]
                pos = x_end

                if VERBOSE:
                    try:
                        msg = "bibtex import: placeholder: key='%s', value='%s'" % (key.strip(), value.strip()[1:-1])
                        logger.info(msg)
                    except Exception as e:
                        try:
                            msg = "bibtex import: placeholder: key='%s', value='%s'" % (
                                key.strip(), value.strip()[1:-1].encode("utf8", "replace"))
                            logger.info(msg)
                        except Exception as e:
                            msg = "bibtex import: placeholder: 'not printable key-value pair'"
                            logger.info(msg)

        elif doctype:
            # new record
            s = data[start:end]

            if end and data[end - 1].isalnum():
                # for the \w+\s*=\s+[0-9a-zA-Z_] case
                end = end - 1

            field = s[:s.index("=")].strip().lower()
            pos = end
            next_token = token.search(data, pos)
            if next_token:
                content = data[pos:next_token.start()]
            else:
                content = data[pos:]

            content = content.replace("{", "")
            content = content.replace("~", " ")
            content = content.replace("}", "")

            for key in placeholder:
                content = content.replace(key, placeholder[key])

            # some people use html entities in their bibtex...
            content = content.replace("&quot;", "'")
            content = xspace.sub(" ", backgarbage.sub("", frontgarbage.sub("", content)))

            content = u(content)
            content = content.replace("\\\"u", "\xc3\xbc").replace("\\\"a", "\xc3\xa4").replace("\\\"o", "\xc3\xb6") \
                .replace("\\\"U", "\xc3\x9c").replace("\\\"A", "\xc3\x84").replace("\\\"O", "\xc3\x96")
            content = content.replace("\\\'a", "\xc3\xa0").replace("\\\'A", "\xc3\x80").replace("\\vc", "\xc4\x8d") \
                .replace("\\vC", "\xc4\x8c")
            content = content.replace("\\", "")
            content = content.replace("{\"u}", "\xc3\xbc").replace("{\"a}", "\xc3\xa4").replace("{\"o}", "\xc3\xb6") \
                .replace("{\"U}", "\xc3\x9c").replace("{\"A}", "\xc3\x84").replace("{\"O}", "\xc3\x96")

            content = content.strip()

            if field in ["author", "editor"] and content:
                authors = []
                for author in content.split(" and "):
                    author = author.strip()
                    if "," not in author and " " in author:
                        i = author.rindex(' ')
                        if i > 0:
                            forename, lastname = author[0:i].strip(), author[i + 1:].strip()
                        author = "%s, %s" % (lastname, forename)
                    authors += [author]
                content = ";".join(authors)

            fields[field] = content
        else:
            pos = end
    return records
Esempio n. 8
0
def buildNodeDescriptor(req, node, indent=None, written=None, children=True, children_access=None, parents=False):
    nd = []
    d = {}
    if written is None:
        written = {}

    nodedict = {}
    nodedict['id'] = node.id

    mask = req.params.get('mask', 'none').lower()

    attrlist = req.params.get('attrlist', [])
    if attrlist:
        attrlist = attrlist.split(',')

    attrspec = req.params.get('attrspec', 'default_mask')
    # 'all': no restriction, send all attributes
    # 'none': to not send any attribute at all
    # 'default_mask' (default): only send attributes that correspond to the default mask fields
    #
    # remark: attributes specified comma separated in 'attrlist' are added to those specified by 'attrspec'
    #

    if mask == 'default':
        maskcachetype = req.params.get('maskcache', 'deep')  # 'deep', 'shallow', 'none'
        nodedict['defaultexport'] = node.show_node_text(labels=1, language=req.params.get('lang', ''), cachetype=maskcachetype)
        # except:
        #    logging.getLogger('services').error('Error: web.services.jsonnode: could not get default mask content')
        #    nodedict['defaultexport'] = []

    elif mask not in ["", "none"]:  # deliver every mask
        try:
            mask_obj = getMetaType(node.getSchema()).getMask(mask)
            if mask_obj:
                nodedict['defaultexport'] = mask_obj.getViewHTML([node], flags=8)
            else:
                nodedict['defaultexport'] = "mask not found"
        except:
            nodedict['defaultexport'] = "error"

    if children:
        nodedict['children'] = []
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, 'read')):
                nodedict['children'].append({'id': str(c.id), 'type': c.type, 'name': esc(c.name)})

    if parents:
        nodedict['parents'] = []
        for c in node.getParents().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, 'read')):
                nodedict['parents'].append({'id': str(c.id), 'type': c.type, 'name': esc(c.name)})

    nd.append(nodedict)

    if(children):
        for c in node.getChildren().sort_by_orderpos():
            if (not children_access) or (children_access and children_access.hasAccess(c, 'read')):
                if c.id not in written:
                    written[c.id] = None
                    childnodedict = buildNodeDescriptor(req, c, indent, children_access=children_access)
                    nd.append(childnodedict)

    # if node.read_access:
    #    nodedict['read'] = esc(node.read_access)
    # if node.write_access:
    #    nodedict['write'] = esc(node.write_access)
    # if node.data_access:
    #    nodedict['data'] = esc(node.data_access)

    nodeattributes_dict = {}

    if attrspec == 'none':
        # no attributes should be sent
        pass
    elif attrspec == 'default_mask' or attrspec not in ['none', 'all']:
        from contenttypes.default import make_lookup_key, get_maskcache_entry, maskcache
        language = req.params.get('lang', '')
        lookup_key = make_lookup_key(node, language=language, labels=False)
        if lookup_key not in maskcache:
            # fill cache
            node.show_node_text(labels=False, language=language, cachetype='deep')

        field_descriptors = get_maskcache_entry(lookup_key)

        try:
            mask = field_descriptors[0]
            for field_descriptor in field_descriptors[1:]:
                field_attribute = field_descriptor[0]
                #fd = field_descriptor[1]
                if field_attribute not in attrlist:
                    # attrlist may be an empty list or filled from the request parameters
                    attrlist.append(field_attribute)
        except:
            # no mask for this metadata type
            msg = "no 'nodesmall' or 'shortview' for node %s" % str(node.id)
            logging.getLogger("services").warning(msg)

    elif attrspec == 'all':
        nodeattributes_dict_all_attributes = node.attributes.copy()
        if nodeattributes_dict_all_attributes:
            for k in filter(attribute_name_filter, nodeattributes_dict_all_attributes.keys()):
                nodeattributes_dict[k] = u2((nodeattributes_dict_all_attributes[k]))

    if attrlist:
        for attr in filter(attribute_name_filter, attrlist):
            nodeattributes_dict[attr] = u2(node.get(attr))

    if nodeattributes_dict:
        nodedict['attributes'] = nodeattributes_dict

    if 'files' in req.params:

        nodedict['files'] = []

        for file in node.getFiles():
            if file.type == "metadata" or file.type == "statistic":
                continue
            mimetype = file.mimetype
            if mimetype is None:
                mimetype = "application/x-download"
            nodedict['files'].append({'filename': esc(file.getName()), 'mime-type': mimetype, 'type': file.type})

    if 'nodename' in req.params:
        nodedict['name'] = node.name

    if 'nodetype' in req.params:
        if node.type is None:
            nodedict['type'] = 'node'
        else:
            nodedict['type'] = node.type
    return nd