def nodeToFulltextSearch(self, node, schema): # build fulltext index from node if not hasattr(node, "getCategoryName") or not node.getCategoryName() == "document": # only build fulltext of document nodes return True r = re.compile("[a-zA-Z0-9]+") if self.execute('SELECT id from textsearchmeta where id=\'{}\''.format(node.id), schema, 'text'): # FIXME: we should not delete the old textdata from this node, and insert # the new files. Only problem is, DELETE from a FTS3 table is prohibitively # slow. return for file in node.getFiles(): w = '' if file.getType() == "fulltext" and os.path.exists(file.retrieveFile()): data = {} content = '' f = open(file.retrieveFile()) try: for line in f: if FULLTEXT_INDEX_MODE == 0: content += u(line) else: for w in re.findall(r, line): if w not in data.keys(): data[w] = 1 try: data[w] += 1 except KeyError: data[w] = 1 finally: f.close() if FULLTEXT_INDEX_MODE == 1: for key in data.keys(): content += key + " " elif FULLTEXT_INDEX_MODE == 2: for key in data.keys(): content += key + " [" + str(data[key]) + "] " content = u(content.replace("'", "").replace('"', "")) if len(content) > 0: content_len = len(content) p = 0 while p in range(0, int(ceil(content_len / 500000.0))): sql = 'INSERT INTO textsearchmeta (id, type, schema, value) VALUES("{}", "{}", "{}", "{}")'.format(node.id, node.getContentType(), schema, normalize_utf8((content[p * 500000:(p + 1) * 500000 - 1]))) try: self.execute(sql, schema, 'text') except: print "\nerror in fulltext of node", node.id return False p += 1 return True return True
def update_node(req, path, params, data, id): # get the user and verify the signature if params.get('user'): user = users.getUser(params.get('user')) userAccess = AccessData(user=user) if userAccess.user: valid = userAccess.verify_request_signature(req.fullpath, params) if not valid: userAccess = None else: userAccess = None else: user = users.getUser('Gast') userAccess = AccessData(user=user) node = tree.getNode(id) # check user access if userAccess and userAccess.hasAccess(node, "write"): pass else: s = "No Access" req.write(s) d = { 'status': 'fail', 'html_response_code': '403', 'errormessage': 'no access' } return d['html_response_code'], len(s), d node.name = params.get('name') metadata = json.loads(params.get('metadata')) # set provided metadata for key, value in metadata.iteritems(): node.set(u(key), u(value)) # service flags node.set("updateuser", user.getName()) node.set("updatetime", format_date()) node.setDirty() d = { 'status': 'OK', 'html_response_code': '200', 'build_response_end': time.time() } s = "OK" # we need to write in case of POST request, send as buffer wil not work req.write(s) req.reply_headers['updatetime'] = node.get('updatetime') return d['html_response_code'], len(s), d
def update_node(req, path, params, data, id): # get the user and verify the signature if params.get('user'): user = users.getUser(params.get('user')) userAccess = AccessData(user=user) if userAccess.user: valid = userAccess.verify_request_signature(req.fullpath, params) if not valid: userAccess = None else: userAccess = None else: user = users.getUser('Gast') userAccess = AccessData(user=user) node = tree.getNode(id) # check user access if userAccess and userAccess.hasAccess(node, "write"): pass else: s = "No Access" req.write(s) d = { 'status': 'fail', 'html_response_code': '403', 'errormessage': 'no access'} return d['html_response_code'], len(s), d node.name = params.get('name') metadata = json.loads(params.get('metadata')) # set provided metadata for key, value in metadata.iteritems(): node.set(u(key), u(value)) # service flags node.set("updateuser", user.getName()) node.set("updatetime", format_date()) node.setDirty() d = { 'status': 'OK', 'html_response_code': '200', 'build_response_end': time.time()} s = "OK" # we need to write in case of POST request, send as buffer wil not work req.write(s) req.reply_headers['updatetime'] = node.get('updatetime') return d['html_response_code'], len(s), d
def getSQL(type, value, spc={}): # deliver sql for given type value = normalize_utf8(protect(u(value))) if type == "full": # all metadata return 'select distinct(id) from fullsearchmeta where fullsearchmeta match \'value:' + value + '\' and type <>\'directory\'' elif type == "fulltext": # fulltext return 'select distinct(id) from textsearchmeta where textsearchmeta match \'value:' + value + '\' and type<>\'directory\'' elif type == "schema": # schemadef return 'select distinct(id) from fullsearchmeta where schema="' + value.replace("'", "") + '"' elif type == "objtype": # object type return 'select distinct(id) from fullsearchmeta where type="' + value.replace("'", "") + '"' elif type == "updatetime": # update time with operator <|>|= if len(value) == 10: value += "T00:00:00" return 'select distinct(id) from searchmeta where updatetime ' + spc['op'] + ' "' + value.replace("t", "T") + '"' elif type == "field": return 'select position, name from searchmeta_def where attrname=\'' + value + '\'' elif type == "spcompare": return 'select distinct(id) from searchmeta where schema="' + \ str(spc['pos'][1]) + '" and field' + str(spc['pos'][0]) + ' ' + spc['op'] + ' "' + value + '"' elif type == "spfield": return 'select distinct(id) from searchmeta where field' + str(spc['pos'][0]) + '=""' elif type == "spmatch": return 'select distinct(id) from searchmeta where schema=\'' + \ str(spc['pos'][1]) + '\' and field' + str(spc['pos'][0]) + ' match \'' + value + '\'' elif type == "content_full": return 'select * from fullsearchmeta where id=\'' + value + '\'' elif type == "content_text": return 'select * from textsearchmeta where id=\'' + value + '\'' elif type == "content_ext": return 'select * from searchmeta where id=\'' + value + '\''
def getSQL(type, value, spc={}): # deliver sql for given type value = normalize_utf8(protect(u(value))) if type == "full": # all metadata return 'select distinct(id) from fullsearchmeta where fullsearchmeta match \'value:' + value + '\' and type <>\'directory\'' elif type == "fulltext": # fulltext return 'select distinct(id) from textsearchmeta where textsearchmeta match \'value:' + value + '\' and type<>\'directory\'' elif type == "schema": # schemadef return 'select distinct(id) from fullsearchmeta where schema="' + value.replace( "'", "") + '"' elif type == "objtype": # object type return 'select distinct(id) from fullsearchmeta where type="' + value.replace( "'", "") + '"' elif type == "updatetime": # update time with operator <|>|= if len(value) == 10: value += "T00:00:00" return 'select distinct(id) from searchmeta where updatetime ' + spc[ 'op'] + ' "' + value.replace("t", "T") + '"' elif type == "field": return 'select position, name from searchmeta_def where attrname=\'' + value + '\'' elif type == "spcompare": return 'select distinct(id) from searchmeta where schema="' + \ str(spc['pos'][1]) + '" and field' + str(spc['pos'][0]) + ' ' + spc['op'] + ' "' + value + '"' elif type == "spfield": return 'select distinct(id) from searchmeta where field' + str( spc['pos'][0]) + '=""' elif type == "spmatch": return 'select distinct(id) from searchmeta where schema=\'' + \ str(spc['pos'][1]) + '\' and field' + str(spc['pos'][0]) + ' match \'' + value + '\'' elif type == "content_full": return 'select * from fullsearchmeta where id=\'' + value + '\'' elif type == "content_text": return 'select * from textsearchmeta where id=\'' + value + '\'' elif type == "content_ext": return 'select * from searchmeta where id=\'' + value + '\''
def addChildren(self, children): self.addData(Paragraph('%s:' % t(self.language, "print_view_children"), self.bp)) _head = 0 # count headers for c in children: if len(c) > 0 and c[0][3] == "header": _head += 1 items = [] _c = 1 for c in children: if len(c) > 0 and c[0][3] == "header": for item in items: self.addData(Paragraph("[%s/%s]: %s" % (_c, len(children) - _head, "; ".join(item)), self.bv)) _c += 1 self.addData(Paragraph(u(c[0][1]).replace('&', '&'), self.bf)) items = [] continue values = [] for item in c: if item[1].strip() != "": values.append(item[1]) items.append(values) for item in items: try: self.addData(Paragraph("[%s/%s]: %s" % (_c, len(children) - _head, ", ".join(item)), self.bv)) _c += 1 except: self.addData(Paragraph("[%s/%s]: %s" % (_c, len(children) - _head, esc(", ".join(item))), self.bv))
def nodeToExtSearch(self, node): # build extended search index from node if len(node.getSearchFields()) == 0: # stop if schema has no searchfields return True v_list = {} i = 1 for field in node.getSearchFields(): v_list[str(i)] = node.get(field.getName()) i += 1 # save definition self.nodeToSchemaDef(node) sql = 'INSERT INTO searchmeta (id, type, schema, ' values = '' try: if len(v_list) > 0: for key in v_list: sql += 'field' + str(key) + ', ' #values += '"'+u(v_list[key])+ '", ' values += '"' + normalize_utf8(u(v_list[key])) + '", ' sql = sql[:-2] values = values[:-2] sql = '{}) VALUES("{}", "{}", "{}", {})'.format( sql, node.id, node.getContentType(), node.getSchema(), values) else: sql = sql[:-2] sql = '{}) VALUES("{}", "{}", "{}")'.format( sql, node.id, node.getContentType(), node.getSchema()) self.db.execute(sql) return True except: return False
def nodeToSimpleSearch(self, node, schema, type=""): # build simple search index from node sql_upd = "UPDATE fullsearchmeta SET type='{}', schema='{}', value='{}| ".format( node.getContentType(), node.getSchema(), node.name) sql_ins = "INSERT INTO fullsearchmeta (id, type, schema, value) VALUES('{}', '{}', '{}', '{}| ".format( node.id, node.getContentType(), node.getSchema(), node.name) # attributes val = '' for key, value in node.items(): if key not in SYSTEMATTRS: # ignore system attributes val += protect(u(value)) + '| ' for v in val.split(" "): v = u(v) if normalize_utf8(v) != v.lower(): val += ' ' + normalize_utf8(v) val = val.replace(chr(0), "") + ' ' # remove tex markup val = modify_tex(val, 'strip') # files for file in node.getFiles(): val += protect( u(file.getName() + '| ' + file.getType() + '| ' + file.getMimeType()) + '| ') sql_upd += val + '\' WHERE id=\'{}\''.format(node.id) sql_ins += val + '\')' sql = "" try: sql = 'SELECT id from fullsearchmeta WHERE id=\'{}\''.format( node.id) if self.execute(sql, schema, 'full'): # check existance sql = sql_upd # do update else: sql = sql_ins # do insert self.execute(sql, schema, 'full') return True except: logException('error in sqlite insert/update: ' + sql) return False
def getAllAttributeValues(self, attribute, access, schema=""): values = {} try: if schema != "": fields = db.get_all_attribute_values(attribute, schema, distinct=True) else: fields = db.getMetaFields(attribute) except: log.exception("") fields = db.getMetaFields(attribute) # REVERT BACK TO SIMPLE SQL QUERY BECAUSE BELOW CODE TOO *SLOW* # MK/2008/10/27 #fields = db.getMetaFields(attribute) for f in fields: for s in f[0].split(";"): s = u(s.strip()) values[s] = values.get(s, 0) + 1 return values ALL = -1 self.lock.acquire() # FIXME: this lock is aquired way too long try: if not hasattr( self, 'attrlist') or attribute not in self.attrlist.keys(): self.attrlist = {} self.attrlist[attribute] = {} # current attribute not listed -> create id list if ALL not in self.attrlist[attribute].keys(): self.attrlist[attribute][ALL] = {} ret = {} # TODO: optimize this for node in self.getAllChildren(): v = node.get(attribute) if v not in ret.keys(): ret[v] = [] ret[v].append(node.id) for key in ret.keys(): self.attrlist[attribute][ALL][key] = NodeList( ret[key], key) finally: self.lock.release() level = access.getPrivilegeLevel() if level not in self.attrlist[attribute].keys(): self.attrlist[attribute][level] = {} for item in self.attrlist[attribute][ALL].keys(): if level == 0: l = self.attrlist[attribute][ALL][item] else: l = self.attrlist[attribute][ALL][item].filter(access) self.attrlist[attribute][level][item] = len(l) return self.attrlist[attribute][level]
def simple_search(req): from web.frontend.content import ContentList res = [] words = [] collections = [] collection_ids = {} access = AccessData(req) q = u(req.params.get("query", "")) # test whether this query is restricted to a number of collections for key, value in req.params.items(): if key.startswith("c_"): collection_ids[key[2:]] = 1 # no collection means: all collections if len(collection_ids) == 0 or 1 in collection_ids.keys(): for collection in access.filter(tree.getRoot("collections").getChildren()): collection_ids[collection.id] = 1 # now retrieve all results in all collections for collection in getAllCollections(): if collection.id in collection_ids: collections.append(collection) num = 0 logging.getLogger('usertracing').info(access.user.name + " search for '" + q + "', " + str(num) + " results") try: if req.params.get("act_node", None) and tree.getNode(req.params.get("act_node")).getContentType() != "collections": # actual node is a collection or directory result = tree.getNode(req.params.get("act_node")).search('full=' + q) result = access.filter(result) num += len(result) if len(result) > 0: cl = ContentList(result, collection, words) cl.feedback(req) cl.linkname = "Suchergebnis" cl.linktarget = "" res.append(cl) else: # actual node is collections-node for collection in collections: result = collection.search('full=' + q) result = access.filter(result) num += len(result) if len(result) > 0: cl = ContentList(result, collection, words) cl.feedback(req) cl.linkname = "Suchergebnis" cl.linktarget = "" res.append(cl) if len(res) == 1: return res[0] else: return SearchResult(res, q, collections) except: return SearchResult(None, q, collections)
def nodeToSimpleSearch(self, node, schema, type=""): # build simple search index from node sql_upd ="UPDATE fullsearchmeta SET type='{}', schema='{}', value='{}| ".format(node.getContentType(), node.getSchema(), node.name) sql_ins = "INSERT INTO fullsearchmeta (id, type, schema, value) VALUES('{}', '{}', '{}', '{}| ".format(node.id, node.getContentType(), node.getSchema(), node.name) # attributes val = '' for key, value in node.items(): if key not in SYSTEMATTRS: # ignore system attributes val += protect(u(value)) + '| ' for v in val.split(" "): v = u(v) if normalize_utf8(v) != v.lower(): val += ' ' + normalize_utf8(v) val = val.replace(chr(0), "") + ' ' # remove tex markup val = modify_tex(val, 'strip') # files for file in node.getFiles(): val += protect(u(file.getName() + '| ' + file.getType() + '| ' + file.getMimeType()) + '| ') sql_upd += val + '\' WHERE id=\'{}\''.format(node.id) sql_ins += val + '\')' sql = "" try: sql = 'SELECT id from fullsearchmeta WHERE id=\'{}\''.format(node.id) if self.execute(sql, schema, 'full'): # check existance sql = sql_upd # do update else: sql = sql_ins # do insert self.execute(sql, schema, 'full') return True except: logException('error in sqlite insert/update: ' + sql) return False
def nodeToExtSearch(self, node, schema): # build extended search index from node if len(node.getSearchFields() ) == 0: # stop if schema has no searchfields return True self.nodeToSchemaDef(node, schema) # save definition keyvalue = [] i = 1 for field in node.getSearchFields(): key = "field%d" % i i += 1 value = "" if field.getFieldtype() == "union": for item in field.get("valuelist").split(";"): value += node.get(item) + '|' else: value = node.get(field.getName()) keyvalue += [(key, modify_tex(u(protect(value)), 'strip'))] sql0 = 'SELECT id FROM searchmeta where id=\'{}\''.format(node.id) sql1 = 'UPDATE searchmeta SET ' sql2 = 'INSERT INTO searchmeta (id, type, schema, updatetime' for key, value in keyvalue: sql1 += key + "='" + normalize_utf8(value) + "', " sql2 += ", " sql2 += key sql1 += "type='" + node.getContentType( ) + "', schema='" + schema + "', updatetime='" + node.get( "updatetime") + "'" sql2 += ") VALUES(" sql2 += '\'{}\', "{}", "{}", "{}"'.format(node.id, node.getContentType(), schema, node.get("updatetime")) for key, value in keyvalue: sql2 += ", '" + normalize_utf8(value) + "'" sql1 += " WHERE id='{}'".format(node.id) sql2 += ")" sql = "" try: sql = sql0 if self.execute(sql0, schema, 'ext'): # select sql = sql1 self.execute(sql1, schema, 'ext') # do update else: sql = sql2 self.execute(sql2, schema, 'ext') # do insert return True except: logException('error in sqlite insert/update: ' + sql) return False
def getAllAttributeValues(attribute, schema): values = {} nids_values = tree.db.get_all_nids_attribute_values_for_schema(attribute, schema) for nid, value in nids_values: for s in value.split(";"): s = u(s.strip()) if s not in values: values[s] = [] values[s].append(nid) return values
def getAllAttributeValues(attribute, schema): values = {} nids_values = tree.db.get_all_attribute_values(attribute, schema) for nid, value in nids_values: for s in value.split(";"): s = u(s.strip()) if s not in values: values[s] = [] values[s].append(nid) return values
def getAllAttributeValues(self, attribute, access, schema=""): values = {} try: if schema != "": fields = db.get_all_attribute_values(attribute, schema, distinct=True) else: fields = db.getMetaFields(attribute) except: log.exception("") fields = db.getMetaFields(attribute) # REVERT BACK TO SIMPLE SQL QUERY BECAUSE BELOW CODE TOO *SLOW* # MK/2008/10/27 #fields = db.getMetaFields(attribute) for f in fields: for s in f[0].split(";"): s = u(s.strip()) values[s] = values.get(s, 0) + 1 return values ALL = -1 self.lock.acquire() # FIXME: this lock is aquired way too long try: if not hasattr(self, 'attrlist') or attribute not in self.attrlist.keys(): self.attrlist = {} self.attrlist[attribute] = {} # current attribute not listed -> create id list if ALL not in self.attrlist[attribute].keys(): self.attrlist[attribute][ALL] = {} ret = {} # TODO: optimize this for node in self.getAllChildren(): v = node.get(attribute) if v not in ret.keys(): ret[v] = [] ret[v].append(node.id) for key in ret.keys(): self.attrlist[attribute][ALL][key] = NodeList(ret[key], key) finally: self.lock.release() level = access.getPrivilegeLevel() if level not in self.attrlist[attribute].keys(): self.attrlist[attribute][level] = {} for item in self.attrlist[attribute][ALL].keys(): if level == 0: l = self.attrlist[attribute][ALL][item] else: l = self.attrlist[attribute][ALL][item].filter(access) self.attrlist[attribute][level][item] = len(l) return self.attrlist[attribute][level]
def getAllAttributeValues(attribute, schema): values = {} nids_values = q(Node.id, Node.a[attribute]).filter(Node.schema == schema).filter( Node.a[attribute] != None and Node.a[attribute] != '').distinct(Node.a[attribute]).all() for nid, value in nids_values: for s in value.split(";"): s = u(s.strip()) if s not in values: values[s] = [] values[s].append(nid) return values
def nodeToExtSearch(self, node, schema): # build extended search index from node if len(node.getSearchFields()) == 0: # stop if schema has no searchfields return True self.nodeToSchemaDef(node, schema) # save definition keyvalue = [] i = 1 for field in node.getSearchFields(): key = "field%d" % i i += 1 value = "" if field.getFieldtype() == "union": for item in field.get("valuelist").split(";"): value += node.get(item) + '|' else: value = node.get(field.getName()) keyvalue += [(key, modify_tex(u(protect(value)), 'strip'))] sql0 = 'SELECT id FROM searchmeta where id=\'{}\''.format(node.id) sql1 = 'UPDATE searchmeta SET ' sql2 = 'INSERT INTO searchmeta (id, type, schema, updatetime' for key, value in keyvalue: sql1 += key + "='" + normalize_utf8(value) + "', " sql2 += ", " sql2 += key sql1 += "type='" + node.getContentType() + "', schema='" + schema + "', updatetime='" + node.get("updatetime") + "'" sql2 += ") VALUES(" sql2 += '\'{}\', "{}", "{}", "{}"'.format(node.id, node.getContentType(), schema, node.get("updatetime")) for key, value in keyvalue: sql2 += ", '" + normalize_utf8(value) + "'" sql1 += " WHERE id='{}'".format(node.id) sql2 += ")" sql = "" try: sql = sql0 if self.execute(sql0, schema, 'ext'): # select sql = sql1 self.execute(sql1, schema, 'ext') # do update else: sql = sql2 self.execute(sql2, schema, 'ext') # do insert return True except: logException('error in sqlite insert/update: ' + sql) return False
def getAllAttributeValues(attribute, schema): values = {} nids_values = q( Node.id, Node.a[attribute]).filter(Node.schema == schema).filter( Node.a[attribute] != None and Node.a[attribute] != '').distinct(Node.a[attribute]).all() for nid, value in nids_values: for s in value.split(";"): s = u(s.strip()) if s not in values: values[s] = [] values[s].append(nid) return values
def nodeToFulltextSearch(self, node): # build fulltext index from node if not node.getContentType() in ("document", "dissertation"): # only build fulltext of document nodes # print "object is no document" return True r = re.compile("[a-zA-Z0-9]+") for file in node.getFiles(): w = '' if file.getType() == "fulltext" and os.path.exists(file.retrieveFile()): data = {} content = '' f = open(file.retrieveFile()) try: for line in f: if FULLTEXT_INDEX_MODE == 0: content += u(line) else: for w in re.findall(r, line): if w not in data.keys(): data[w] = 1 try: data[w] += 1 except KeyError: data[w] = 1 finally: f.close() if FULLTEXT_INDEX_MODE == 1: for key in data.keys(): content += key + " " elif FULLTEXT_INDEX_MODE == 2: for key in data.keys(): content += key + " [" + str(data[key]) + "] " sql = "" if len(content) > 0: try: sql = 'INSERT INTO textsearchmeta (id, type, schema, value) VALUES("{}", "{}", "{}", "{}")'.format(node.id, node.getContentType(), node.getSchema(), iso2utf8(esc(content))) self.db.execute(sql) except: print "error", node.id, "\n" return False else: print "no Content" return True
def nodeToFulltextSearch(self, node): # build fulltext index from node if not node.getContentType() in ("document", "dissertation"): # only build fulltext of document nodes # print "object is no document" return True r = re.compile("[a-zA-Z0-9]+") for file in node.getFiles(): w = '' if file.getType() == "fulltext" and os.path.exists( file.retrieveFile()): data = {} content = '' f = open(file.retrieveFile()) try: for line in f: if FULLTEXT_INDEX_MODE == 0: content += u(line) else: for w in re.findall(r, line): if w not in data.keys(): data[w] = 1 try: data[w] += 1 except KeyError: data[w] = 1 finally: f.close() if FULLTEXT_INDEX_MODE == 1: for key in data.keys(): content += key + " " elif FULLTEXT_INDEX_MODE == 2: for key in data.keys(): content += key + " [" + str(data[key]) + "] " sql = "" if len(content) > 0: try: sql = 'INSERT INTO textsearchmeta (id, type, schema, value) VALUES("{}", "{}", "{}", "{}")'.format( node.id, node.getContentType(), node.getSchema(), iso2utf8(esc(content))) self.db.execute(sql) except: print "error", node.id, "\n" return False else: print "no Content" return True
def nodeToSimpleSearch(self, node): # build simple search index from node try: sql = 'INSERT INTO fullsearchmeta (id, type, schema, value) VALUES(\'{}\', \'{}\', \'{}\', \'{}| '.format( node.id, node.getContentType(), node.getSchema(), node.name) # attributes a = '' for key, value in node.items(): if key not in SYSTEMATTRS: a += protect(u(value)) + '| ' a = normalize_utf8(a) sql += a # files for file in node.getFiles(): sql += protect( u(file.getName() + '| ' + file.getType() + '| ' + file.getMimeType()) + '| ') sql += '\')' self.db.execute(sql) return True except: return False
def nodeToSimpleSearch(self, node): # build simple search index from node try: sql = 'INSERT INTO fullsearchmeta (id, type, schema, value) VALUES(\'{}\', \'{}\', \'{}\', \'{}| '.format(node.id, node.getContentType(), node.getSchema(), node.name) # attributes a = '' for key, value in node.items(): if key not in SYSTEMATTRS: a += protect(u(value)) + '| ' a = normalize_utf8(a) sql += a # files for file in node.getFiles(): sql += protect(u(file.getName() + '| ' + file.getType() + '| ' + file.getMimeType()) + '| ') sql += '\')' self.db.execute(sql) return True except: return False
def get_extended_field_ratio(schema, node, db_content): """ Compares the values in the ext search db and the values in the node instance and returns a ratio of likeness between the two values. @param schema: String, name of the schema @param node: Node, an core.tree node instance @return: Float """ ratios = [] field_names = get_zero_index_schema_fields(schema) for field in field_names: node_value = normalize_utf8(modify_tex(u(protect(node.get(field.name))), 'strip')) db_value = str(db_content[field.position]) equality_ratio = difflib.SequenceMatcher(None, db_value, node_value).ratio() ratios.append(equality_ratio) return sum(ratios) / len(ratios)
def get_extended_field_ratio(schema, node, db_content): """ Compares the values in the ext search db and the values in the node instance and returns a ratio of likeness between the two values. @param schema: String, name of the schema @param node: Node, an core.tree node instance @return: Float """ ratios = [] field_names = get_zero_index_schema_fields(schema) for field in field_names: node_value = normalize_utf8( modify_tex(u(protect(node.get(field.name))), 'strip')) db_value = str(db_content[field.position]) equality_ratio = difflib.SequenceMatcher(None, db_value, node_value).ratio() ratios.append(equality_ratio) return sum(ratios) / len(ratios)
def nodeToExtSearch(self, node): # build extended search index from node if len(node.getSearchFields()) == 0: # stop if schema has no searchfields return True v_list = {} i = 1 for field in node.getSearchFields(): v_list[str(i)] = node.get(field.getName()) i += 1 # save definition self.nodeToSchemaDef(node) sql = 'INSERT INTO searchmeta (id, type, schema, ' values = '' try: if len(v_list) > 0: for key in v_list: sql += 'field' + str(key) + ', ' #values += '"'+u(v_list[key])+ '", ' values += '"' + normalize_utf8(u(v_list[key])) + '", ' sql = sql[:-2] values = values[:-2] sql = '{}) VALUES("{}", "{}", "{}", {})'.format(sql, node.id, node.getContentType(), node.getSchema(), values) else: sql = sql[:-2] sql = '{}) VALUES("{}", "{}", "{}")'.format(sql, node.id, node.getContentType(), node.getSchema()) self.db.execute(sql) return True except: return False
def getContent(req, ids): def getSchemes(req): schemes = AccessData(req).filter(loadTypesFromDB()) return filter(lambda x: x.isActive(), schemes) ret = "" v = {"message": ""} if len(ids) >= 0: ids = ids[0] v["id"] = ids if "do_action" in req.params.keys(): # process nodes fieldname = req.params.get("fields") old_values = u(req.params.get("old_values", "")).split(";") new_value = u(req.params.get("new_value")) basenode = tree.getNode(ids) entries = getAllAttributeValues(fieldname, req.params.get("schema")) c = 0 for old_val in old_values: for n in AccessData(req).filter(tree.NodeList(entries[old_val])): try: n.set(fieldname, replaceValue(n.get(fieldname), u(old_val), u(new_value))) n.setDirty() c += 1 except: pass v["message"] = req.getTAL("web/edit/modules/manageindex.html", {"number": c}, macro="operationinfo") if "style" in req.params.keys(): # load schemes if req.params.get("action", "") == "schemes": v["schemes"] = getSchemes(req) req.writeTAL("web/edit/modules/manageindex.html", v, macro="schemes_dropdown") return "" elif req.params.get("action", "").startswith("indexfields__"): # load index fields schema = getMetaType(req.params.get("action", "")[13:]) fields = [] for field in schema.getMetaFields(): if field.getFieldtype() == "ilist": fields.append(field) v["fields"] = fields v["schemaname"] = schema.getName() req.writeTAL("web/edit/modules/manageindex.html", v, macro="fields_dropdown") return "" elif req.params.get("action", "").startswith("indexvalues__"): # load values of selected indexfield node = tree.getNode(ids) fieldname = req.params.get("action").split("__")[-2] schema = req.params.get("action").split("__")[-1] v["entries"] = [] if node: v["entries"] = getAllAttributeValues(fieldname, schema) v["keys"] = v["entries"].keys() v["keys"].sort(lambda x, y: cmp(x.lower(), y.lower())) req.writeTAL("web/edit/modules/manageindex.html", v, macro="fieldvalues") return "" elif req.params.get("action", "").startswith("children__"): # search for children of current collection scheme = req.params.get("action", "").split("__")[1] fieldname = req.params.get("action", "").split("__")[2] values = req.params.get("action", "").split("__")[3].split(";")[:-1] all_values = getAllAttributeValues(fieldname, scheme) def isChildOf(access, node, basenodeid): for ls in getPaths(node, access): if str(basenodeid) in tree.NodeList(ls).getIDs(): return 1 return 0 subitems = {} for value in values: value = u(value) if value in all_values: subitems[value] = [] for l in all_values[value]: if isChildOf(AccessData(req), tree.getNode(l), ids): subitems[value].append(l) v["items"] = subitems v["keys"] = subitems.keys() v["keys"].sort() req.writeTAL("web/edit/modules/manageindex.html", v, macro="valueinfo") return "" else: return req.getTAL("web/edit/modules/manageindex.html", v, macro="manageform")
def extended_search(req): from web.frontend.content import ContentList max = 3 if req.params.get("searchmode") == "extendedsuper": max = 10 sfields = [] access = AccessData(req) metatype = None collectionid = req.params.get("collection", tree.getRoot().id) try: collection = tree.getNode(collectionid) except: for coll in tree.getRoot("collections").getChildren(): collection = tree.getNode(coll.id) break q_str = '' q_user = '' first2 = 1 for i in range(1, max + 1): f = u(req.params.get("field" + str(i), "").strip()) q = u(req.params.get("query" + str(i), "").strip()) if not q and "query" + str(i) + "-from" not in req.params: continue if not first2: q_str += " and " q_user += " %s " % (translate("search_and", request=req)) first2 = 0 if not f.isdigit(): q = u(req.params.get("query" + str(i), "").strip()) q_str += f + '=' + protect(q) q_user += f + '=' + protect(q) else: masknode = tree.getNode(f) assert masknode.type == "searchmaskitem" first = 1 q_str += "(" for metatype in masknode.getChildren(): if not first: q_str += " or " q_user += " %s " % (translate("search_or", request=req)) first = 0 if "query" + str( i) + "-from" in req.params and metatype.getFieldtype( ) == "date": date_from = "0000-00-00T00:00:00" date_to = "0000-00-00T00:00:00" fld = metatype if str(req.params["query" + str(i) + "-from"]) != "": date_from = date.format_date( date.parse_date( str(req.params["query" + str(i) + "-from"]), fld.getValues()), "%Y-%m-%dT%H:%M:%S") if str(req.params["query" + str(i) + "-to"]) != "": date_to = date.format_date( date.parse_date( str(req.params["query" + str(i) + "-to"]), fld.getValues()), "%Y-%m-%dT%H:%M:%S") if date_from == "0000-00-00T00:00:00" and date_to != date_from: # from value q_str += metatype.getName() + ' <= ' + date_to q_user += "%s ≤ \"%s\"" % ( metatype.getName(), str(req.params["query" + str(i) + "-to"])) elif date_to == "0000-00-00T00:00:00" and date_to != date_from: # to value q_str += metatype.getName() + ' >= ' + date_from q_user += "%s ≥ \"%s\"" % ( metatype.getName(), str(req.params["query" + str(i) + "-from"])) else: q_str += '({} >= {} and {} <= {})'.format( metatype.getName(), date_from, metatype.getName(), date_to) q_user += "(%s %s \"%s\" %s \"%s\")" % ( metatype.getName(), translate("search_between", request=req), str(req.params["query" + str(i) + "-from"]), translate("search_and", request=req), str(req.params["query" + str(i) + "-to"])) else: q = u(req.params.get("query" + str(i), "").strip()) q_str += metatype.getName() + '=' + protect(q) if metatype.getLabel() != "": q_user += "%s = %s" % (metatype.getLabel(), protect(q)) else: q_user += "%s = %s" % (metatype.getName(), protect(q)) q_str += ")" try: if req.params.get( "act_node", "") and req.params.get("act_node") != str(collection.id): result = tree.getNode(req.params.get("act_node")).search(q_str) else: result = collection.search(q_str) result = access.filter(result) logging.getLogger('usertracing').info(access.user.name + " xsearch for '" + q_user + "', " + str(len(result)) + " results") if len(result) > 0: cl = ContentList(result, collection, q_user.strip()) cl.feedback(req) cl.linkname = "" cl.linktarget = "" return cl return SearchResult([], q_user.strip()) except: return SearchResult(None, q_user.strip())
def prss(s): '''protect rss item elements''' return esc(no_html(esc(no_html(esc(no_html(esc(u(s))))))))
def getentries(filename): save_import_file(filename) fi = codecs.open(filename, "r", "utf-8") try: data = fi.read() except UnicodeDecodeError: fi.close() msg = "bibtex import: getentries(filename): encoding error when trying codec 'utf-8', filename was " + filename logger.error(msg) msg = "bibtex import: getentries(filename): going to try without codec 'utf-8', filename was " + filename logger.info(msg) try: fi = codecs.open(filename, "r") try: data = fi.read() data = u2(data) except Exception as e: fi.close() msg = "bibtex import: getentries(filename): error at second attempt: " + str(e) logger.info(msg) raise MissingMapping("wrong encoding") except Exception as e: msg = "bibtex import: getentries(filename): error at second attempt: " + str(e) logger.error(msg) raise MissingMapping("wrong encoding") try: fi.close() except: pass data = data.replace("\r", "\n") # throw out BOM try: data = u2(data).replace('\xef\xbb\xbf', "") except: pass data = comment.sub('\n', data) recordnr = 1 size = len(data) pos = 0 records = [] fields = {} doctype = None placeholder = {} while True: m = token.search(data, pos) if not m: break start = m.start() end = m.end() if data[start] == '@': doctype = data[start + 1:end - 1].replace("{", "").strip().lower() m = delim2.search(data[end:]) if m: # and m.start()>end: key = data[end:end + m.end()].strip() pos = end + m.end() if key[-1] == ",": key = key[0:-1] else: key = "record%05d" % recordnr recordnr = recordnr + 1 #pos = m.end() pos = end if ESCAPE_BIBTEX_KEY: key = escape_bibtexkey(key) fields = {} key = u2(key) fields["key"] = key records += [(doctype, key, fields)] if doctype == "string": # found placeholder t2 = re.compile(r'[^}]*') x = t2.search(data, end) x_start = x.start() x_end = x.end() s = data[x_start:x_end + 1] key, value = s.split("=") placeholder[key.strip()] = value.strip()[1:-1] pos = x_end if VERBOSE: try: msg = "bibtex import: placeholder: key='%s', value='%s'" % (key.strip(), value.strip()[1:-1]) logger.info(msg) except Exception as e: try: msg = "bibtex import: placeholder: key='%s', value='%s'" % ( key.strip(), value.strip()[1:-1].encode("utf8", "replace")) logger.info(msg) except Exception as e: msg = "bibtex import: placeholder: 'not printable key-value pair'" logger.info(msg) elif doctype: # new record s = data[start:end] if end and data[end - 1].isalnum(): # for the \w+\s*=\s+[0-9a-zA-Z_] case end = end - 1 field = s[:s.index("=")].strip().lower() pos = end next_token = token.search(data, pos) if next_token: content = data[pos:next_token.start()] else: content = data[pos:] content = content.replace("{", "") content = content.replace("~", " ") content = content.replace("}", "") for key in placeholder: content = content.replace(key, placeholder[key]) # some people use html entities in their bibtex... content = content.replace(""", "'") content = xspace.sub(" ", backgarbage.sub("", frontgarbage.sub("", content))) content = u(content) content = content.replace("\\\"u", "\xc3\xbc").replace("\\\"a", "\xc3\xa4").replace("\\\"o", "\xc3\xb6") \ .replace("\\\"U", "\xc3\x9c").replace("\\\"A", "\xc3\x84").replace("\\\"O", "\xc3\x96") content = content.replace("\\\'a", "\xc3\xa0").replace("\\\'A", "\xc3\x80").replace("\\vc", "\xc4\x8d") \ .replace("\\vC", "\xc4\x8c") content = content.replace("\\", "") content = content.replace("{\"u}", "\xc3\xbc").replace("{\"a}", "\xc3\xa4").replace("{\"o}", "\xc3\xb6") \ .replace("{\"U}", "\xc3\x9c").replace("{\"A}", "\xc3\x84").replace("{\"O}", "\xc3\x96") content = content.strip() if field in ["author", "editor"] and content: authors = [] for author in content.split(" and "): author = author.strip() if "," not in author and " " in author: i = author.rindex(' ') if i > 0: forename, lastname = author[0:i].strip(), author[i + 1:].strip() author = "%s, %s" % (lastname, forename) authors += [author] content = ";".join(authors) fields[field] = content else: pos = end return records
def getentries(filename): save_import_file(filename) fi = codecs.open(filename, "r", "utf-8") try: data = fi.read() except UnicodeDecodeError: fi.close() msg = "bibtex import: getentries(filename): encoding error when trying codec 'utf-8', filename was " + filename logger.error(msg) msg = "bibtex import: getentries(filename): going to try without codec 'utf-8', filename was " + filename logger.info(msg) try: fi = codecs.open(filename, "r") try: data = fi.read() data = u2(data) except Exception as e: fi.close() msg = "bibtex import: getentries(filename): error at second attempt: " + str( e) logger.info(msg) raise MissingMapping("wrong encoding") except Exception as e: msg = "bibtex import: getentries(filename): error at second attempt: " + str( e) logger.error(msg) raise MissingMapping("wrong encoding") try: fi.close() except: pass data = data.replace("\r", "\n") # throw out BOM try: data = u2(data).replace('\xef\xbb\xbf', "") except: pass data = comment.sub('\n', data) recordnr = 1 size = len(data) pos = 0 records = [] fields = {} doctype = None placeholder = {} while True: m = token.search(data, pos) if not m: break start = m.start() end = m.end() if data[start] == '@': doctype = data[start + 1:end - 1].replace("{", "").strip().lower() m = delim2.search(data[end:]) if m: # and m.start()>end: key = data[end:end + m.end()].strip() pos = end + m.end() if key[-1] == ",": key = key[0:-1] else: key = "record%05d" % recordnr recordnr = recordnr + 1 #pos = m.end() pos = end if ESCAPE_BIBTEX_KEY: key = escape_bibtexkey(key) fields = {} key = u2(key) fields["key"] = key records += [(doctype, key, fields)] if doctype == "string": # found placeholder t2 = re.compile(r'[^}]*') x = t2.search(data, end) x_start = x.start() x_end = x.end() s = data[x_start:x_end + 1] key, value = s.split("=") placeholder[key.strip()] = value.strip()[1:-1] pos = x_end if VERBOSE: try: msg = "bibtex import: placeholder: key='%s', value='%s'" % ( key.strip(), value.strip()[1:-1]) logger.info(msg) except Exception as e: try: msg = "bibtex import: placeholder: key='%s', value='%s'" % ( key.strip(), value.strip()[1:-1].encode( "utf8", "replace")) logger.info(msg) except Exception as e: msg = "bibtex import: placeholder: 'not printable key-value pair'" logger.info(msg) elif doctype: # new record s = data[start:end] if end and data[end - 1].isalnum(): # for the \w+\s*=\s+[0-9a-zA-Z_] case end = end - 1 field = s[:s.index("=")].strip().lower() pos = end next_token = token.search(data, pos) if next_token: content = data[pos:next_token.start()] else: content = data[pos:] content = content.replace("{", "") content = content.replace("~", " ") content = content.replace("}", "") for key in placeholder: content = content.replace(key, placeholder[key]) # some people use html entities in their bibtex... content = content.replace(""", "'") content = xspace.sub( " ", backgarbage.sub("", frontgarbage.sub("", content))) content = u(content) content = content.replace("\\\"u", "\xc3\xbc").replace("\\\"a", "\xc3\xa4").replace("\\\"o", "\xc3\xb6") \ .replace("\\\"U", "\xc3\x9c").replace("\\\"A", "\xc3\x84").replace("\\\"O", "\xc3\x96") content = content.replace("\\\'a", "\xc3\xa0").replace("\\\'A", "\xc3\x80").replace("\\vc", "\xc4\x8d") \ .replace("\\vC", "\xc4\x8c") content = content.replace("\\", "") content = content.replace("{\"u}", "\xc3\xbc").replace("{\"a}", "\xc3\xa4").replace("{\"o}", "\xc3\xb6") \ .replace("{\"U}", "\xc3\x9c").replace("{\"A}", "\xc3\x84").replace("{\"O}", "\xc3\x96") content = content.strip() if field in ["author", "editor"] and content: authors = [] for author in content.split(" and "): author = author.strip() if "," not in author and " " in author: i = author.rindex(' ') if i > 0: forename, lastname = author[0:i].strip( ), author[i + 1:].strip() author = "%s, %s" % (lastname, forename) authors += [author] content = ";".join(authors) fields[field] = content else: pos = end return records
def simple_search(req): from web.frontend.content import ContentList res = [] words = [] collections = [] collection_ids = {} access = AccessData(req) q = u(req.params.get("query", "")) # test whether this query is restricted to a number of collections for key, value in req.params.items(): if key.startswith("c_"): collection_ids[key[2:]] = 1 # no collection means: all collections if len(collection_ids) == 0 or 1 in collection_ids.keys(): for collection in access.filter( tree.getRoot("collections").getChildren()): collection_ids[collection.id] = 1 # now retrieve all results in all collections for collection in getAllCollections(): if collection.id in collection_ids: collections.append(collection) num = 0 logging.getLogger('usertracing').info(access.user.name + " search for '" + q + "', " + str(num) + " results") try: if req.params.get("act_node", None) and tree.getNode( req.params.get("act_node")).getContentType() != "collections": # actual node is a collection or directory result = tree.getNode(req.params.get("act_node")).search('full=' + q) result = access.filter(result) num += len(result) if len(result) > 0: cl = ContentList(result, collection, words) cl.feedback(req) cl.linkname = "Suchergebnis" cl.linktarget = "" res.append(cl) else: # actual node is collections-node for collection in collections: result = collection.search('full=' + q) result = access.filter(result) num += len(result) if len(result) > 0: cl = ContentList(result, collection, words) cl.feedback(req) cl.linkname = "Suchergebnis" cl.linktarget = "" res.append(cl) if len(res) == 1: return res[0] else: return SearchResult(res, q, collections) except: return SearchResult(None, q, collections)
def extended_search(req): from web.frontend.content import ContentList max = 3 if req.params.get("searchmode") == "extendedsuper": max = 10 sfields = [] access = AccessData(req) metatype = None collectionid = req.params.get("collection", tree.getRoot().id) try: collection = tree.getNode(collectionid) except: for coll in tree.getRoot("collections").getChildren(): collection = tree.getNode(coll.id) break q_str = '' q_user = '' first2 = 1 for i in range(1, max + 1): f = u(req.params.get("field" + str(i), "").strip()) q = u(req.params.get("query" + str(i), "").strip()) if not q and "query" + str(i) + "-from" not in req.params: continue if not first2: q_str += " and " q_user += " %s " % (translate("search_and", request=req)) first2 = 0 if not f.isdigit(): q = u(req.params.get("query" + str(i), "").strip()) q_str += f + '=' + protect(q) q_user += f + '=' + protect(q) else: masknode = tree.getNode(f) assert masknode.type == "searchmaskitem" first = 1 q_str += "(" for metatype in masknode.getChildren(): if not first: q_str += " or " q_user += " %s " % (translate("search_or", request=req)) first = 0 if "query" + str(i) + "-from" in req.params and metatype.getFieldtype() == "date": date_from = "0000-00-00T00:00:00" date_to = "0000-00-00T00:00:00" fld = metatype if str(req.params["query" + str(i) + "-from"]) != "": date_from = date.format_date( date.parse_date(str(req.params["query" + str(i) + "-from"]), fld.getValues()), "%Y-%m-%dT%H:%M:%S") if str(req.params["query" + str(i) + "-to"]) != "": date_to = date.format_date( date.parse_date(str(req.params["query" + str(i) + "-to"]), fld.getValues()), "%Y-%m-%dT%H:%M:%S") if date_from == "0000-00-00T00:00:00" and date_to != date_from: # from value q_str += metatype.getName() + ' <= ' + date_to q_user += "%s ≤ \"%s\"" % (metatype.getName(), str(req.params["query" + str(i) + "-to"])) elif date_to == "0000-00-00T00:00:00" and date_to != date_from: # to value q_str += metatype.getName() + ' >= ' + date_from q_user += "%s ≥ \"%s\"" % (metatype.getName(), str(req.params["query" + str(i) + "-from"])) else: q_str += '({} >= {} and {} <= {})'.format(metatype.getName(), date_from, metatype.getName(), date_to) q_user += "(%s %s \"%s\" %s \"%s\")" % (metatype.getName(), translate("search_between", request=req), str(req.params["query" + str(i) + "-from"]), translate("search_and", request=req), str(req.params["query" + str(i) + "-to"])) else: q = u(req.params.get("query" + str(i), "").strip()) q_str += metatype.getName() + '=' + protect(q) if metatype.getLabel() != "": q_user += "%s = %s" % (metatype.getLabel(), protect(q)) else: q_user += "%s = %s" % (metatype.getName(), protect(q)) q_str += ")" try: if req.params.get("act_node", "") and req.params.get("act_node") != str(collection.id): result = tree.getNode(req.params.get("act_node")).search(q_str) else: result = collection.search(q_str) result = access.filter(result) logging.getLogger('usertracing').info(access.user.name + " xsearch for '" + q_user + "', " + str(len(result)) + " results") if len(result) > 0: cl = ContentList(result, collection, q_user.strip()) cl.feedback(req) cl.linkname = "" cl.linktarget = "" return cl return SearchResult([], q_user.strip()) except: return SearchResult(None, q_user.strip())
def show_printview(req): """ create a pdf preview of given node (id in path e.g. /print/[id]/[area])""" p = req.path[1:].split("/") try: nodeid = int(p[1]) except ValueError: raise ValueError("Invalid Printview URL: " + req.path) if len(p) == 3: if p[2] == "edit": req.reply_headers['Content-Type'] = "application/pdf" editprint = printmethod(req) if editprint: req.write(editprint) else: req.write("") return # use objects from session if str(nodeid) == "0": children = [] if "contentarea" in req.session: try: nodes = req.session["contentarea"].content.files except: c = req.session["contentarea"].content nodes = c.resultlist[c.active].files for n in nodes: c_mtype = getMetaType(n.getSchema()) c_mask = c_mtype.getMask("printlist") if not c_mask: c_mask = c_mtype.getMask("nodesmall") _c = c_mask.getViewHTML([n], VIEW_DATA_ONLY + VIEW_HIDE_EMPTY) if len(_c) > 0: children.append(_c) req.reply_headers['Content-Type'] = "application/pdf" req.write( printview.getPrintView(lang(req), None, [["", "", t(lang(req), "")]], [], 3, children)) else: node = getNode(nodeid) if node.get("system.print") == "0": return 404 access = AccessData(req) if not access.hasAccess(node, "read"): req.write(t(req, "permission_denied")) return style = int(req.params.get("style", 2)) # nodetype mtype = getMetaType(node.getSchema()) mask = None metadata = None if mtype: for m in mtype.getMasks(): if m.getMasktype() == "fullview": mask = m if m.getMasktype() == "printview": mask = m break if not mask: mask = mtype.getMask("nodebig") if mask: metadata = mask.getViewHTML([node], VIEW_DATA_ONLY + VIEW_HIDE_EMPTY) if not metadata: metadata = [['nodename', node.getName(), 'Name', 'text']] files = node.getFiles() imagepath = None for file in files: if file.getType().startswith("presentati"): imagepath = file.retrieveFile() # children children = [] if node.isContainer(): ret = [] getPrintChildren(req, node, ret) for c in ret: if not c.isContainer(): # items c_mtype = getMetaType(c.getSchema()) c_mask = c_mtype.getMask("printlist") if not c_mask: c_mask = c_mtype.getMask("nodesmall") _c = c_mask.getViewHTML([c], VIEW_DATA_ONLY) if len(_c) > 0: children.append(_c) else: # header items = getPaths(c, AccessData(req)) p = [] for item in items[0]: p.append(u(item.getName())) p.append(u(c.getName())) children.append([(c.id, " > ".join(p[1:]), u(c.getName()), "header")]) if len(children) > 1: col = [] order = [] try: sort = getCollection(node).get("sortfield") except: sort = "" for i in range(0, 2): col.append((0, "")) order.append(1) if req.params.get("sortfield" + str(i)) != "": sort = req.params.get("sortfield" + str(i), sort) if sort != "": if sort.startswith("-"): sort = sort[1:] order[i] = -1 _i = 0 for c in children[0]: if c[0] == sort: col[i] = (_i, sort) _i += 1 if col[i][1] == "": col[i] = (0, children[0][0][0]) # sort method for items def myCmp(x, y, col, order): cx = "" cy = "" for item in x: if item[0] == col[0][1]: cx = item[1] break for item in y: if item[0] == col[0][1]: cy = item[1] break if cx.lower() > cy.lower(): return 1 * order[0] return -1 * order[0] sorted_children = [] tmp = [] for item in children: if item[0][3] == "header": if len(tmp) > 0: tmp.sort(lambda x, y: myCmp(x, y, col, order)) sorted_children.extend(tmp) tmp = [] sorted_children.append(item) else: tmp.append(item) tmp.sort(lambda x, y: myCmp(x, y, col, order)) sorted_children.extend(tmp) children = sorted_children req.reply_headers['Content-Type'] = "application/pdf" req.write( printview.getPrintView(lang(req), imagepath, metadata, getPaths(node, AccessData(req)), style, children, getCollection(node)))
def upload_new_node(req, path, params, data): try: uploadfile = params['data'] del params['data'] except KeyError: uploadfile = None # get the user and verify the signature if params.get('user'): # user=users.getUser(params.get('user')) #userAccess = AccessData(user=user) _user = users.getUser(params.get('user')) if not _user: # user of dynamic class dummyuser: # dummy user class # return all groups with given dynamic user def getGroups(self): return [ g.name for g in tree.getRoot('usergroups').getChildren() if g.get('allow_dynamic') == '1' and params.get('user') in g.get('dynamic_users') ] def getName(self): return params.get('user') def getDirID(self): # unique identifier return params.get('user') def isAdmin(self): return 0 _user = dummyuser() userAccess = AccessData(user=_user) if userAccess.user: user = userAccess.user if not userAccess.verify_request_signature(req.fullpath + '?', params): userAccess = None else: userAccess = None else: user = users.getUser(config.get('user.guestuser')) userAccess = AccessData(user=user) parent = tree.getNode(params.get('parent')) # check user access if userAccess and userAccess.hasAccess(parent, "write"): pass else: s = "No Access" req.write(s) d = { 'status': 'fail', 'html_response_code': '403', 'errormessage': 'no access' } logger.error("user has no edit permission for node %s" % parent) return d['html_response_code'], len(s), d datatype = params.get('type') uploaddir = users.getUploadDir(user) n = tree.Node(name=params.get('name'), type=datatype) if isinstance(uploadfile, types.InstanceType): # file object used nfile = importFile(uploadfile.filename, uploadfile.tempname) else: # string used nfile = importFileFromData('uploadTest.jpg', base64.b64decode(uploadfile)) if nfile: n.addFile(nfile) else: logger.error("error in file uploadservice") try: # test metadata metadata = json.loads(params.get('metadata')) except ValueError: metadata = dict() # set provided metadata for key, value in metadata.iteritems(): n.set(u(key), u(value)) # service flags n.set("creator", user.getName()) n.set("creationtime", format_date()) parent.addChild(n) # process the file, we've added to the new node if hasattr(n, "event_files_changed"): try: n.event_files_changed() except OperationException as e: for file in n.getFiles(): if os.path.exists(file.retrieveFile()): os.remove(file.retrieveFile()) raise OperationException(e.value) # make sure the new node is visible immediately from the web service and # the search index gets updated n.setDirty() tree.remove_from_nodecaches(parent) d = { 'status': 'Created', 'html_response_code': '201', 'build_response_end': time.time() } s = "Created" # provide the uploader with the new node ID req.reply_headers['NodeID'] = n.id # we need to write in case of POST request, send as buffer will not work req.write(s) return d['html_response_code'], len(s), d
def validate(req, op): """standard validator""" user = users.getUserFromRequest(req) try: if "action" in req.params.keys(): if req.params.get("action") == "titleinfo": group = getGroup(u(req.params.get("group"))) schema = group.getSchemas() req.write('|'.join(schema)) return "" for key in req.params.keys(): if key.startswith("new"): # create new group return editGroup_mask(req, "") elif key.startswith("edit_"): # edit usergroup return editGroup_mask(req, str(key[5:-2])) elif key.startswith("delete_"): # delete group log.debug("user %r going to delete group %r" % (user.getName(), key[7:-2])) deleteGroup(key[7:-2]) break if "form_op" in req.params.keys(): _option = "" for key in req.params.keys(): if key.startswith("option_"): _option += key[7] if req.params.get("form_op", "") == "save_new": # save new group values if req.params.get("groupname", "") == "": return editGroup_mask(req, "", 1) # no groupname selected elif existGroup(req.params.get("groupname", "")): return editGroup_mask(req, "", 2) # group still existing else: log.debug( "user %r going to save new group %r" % (user.getName(), req.params.get("groupname", ""))) if req.params.get("create_rule", "") == "True": updateAclRule(req.params.get("groupname", ""), req.params.get("groupname", "")) if req.params.get("checkbox_allow_dynamic", "") in ["on", "1"]: allow_dynamic = "1" else: allow_dynamic = "" dynamic_users = req.params.get("dynamic_users", "") group = create_group( req.params.get("groupname", ""), description=req.params.get("description", ""), option=str(_option), allow_dynamic=allow_dynamic, dynamic_users=dynamic_users, ) group.setHideEdit(req.params.get("leftmodule", "").strip()) saveGroupMetadata( group.name, req.params.get("leftmodulemeta", "").strip()) elif req.params.get("form_op") == "save_edit": # save changed values groupname = req.params.get("groupname", "") oldgroupname = req.params.get("oldgroupname", "") group = getGroup(oldgroupname) if oldgroupname != groupname: updateAclRule(oldgroupname, groupname) group.setName(groupname) group.setDescription(req.params.get("description", "")) group.setOption(str(_option)) group.setHideEdit(req.params.get("leftmodule", "").strip()) saveGroupMetadata( groupname, req.params.get("leftmodulemeta", "").split(";")) if ALLOW_DYNAMIC_USERS: allow_dynamic = req.params.get("checkbox_allow_dynamic", "") dynamic_users = req.params.get("dynamic_users", "") if allow_dynamic.lower() in ['on', 'true', '1']: group.set("allow_dynamic", "1") else: group.set("allow_dynamic", "") group.set("dynamic_users", dynamic_users) if groupname == oldgroupname: log.debug("user %r edited group %r" % (user.getName(), groupname)) else: log.debug("user %r edited group %r, new groupname: %r" % (user.getName(), oldgroupname, groupname)) sortUserGroups() return view(req) except: print "Warning: couldn't load module for type", type print sys.exc_info()[0], sys.exc_info()[1] traceback.print_tb(sys.exc_info()[2])
def writexml(node, fi, indent=None, written=None, children=True, children_access=None, exclude_filetypes=[], exclude_children_types=[], attribute_name_filter=None): if written is None: written = {} if indent is None: indent = 0 # there are a lot of nodes without name ... nodename_copy = node.name if nodename_copy is None: nodename_copy = "" #fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), esc(nodename_copy), str(node.id))) # non-utf8 encoded umlauts etc. may cause invalid xml fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), u2(esc(nodename_copy)), str(node.id))) if node.type is None: node.type = "node" fi.write('type="%s" ' % node.type) if node.read_access: fi.write('read="%s" ' % esc(node.read_access)) if node.write_access: fi.write('write="%s" ' % esc(node.write_access)) if node.data_access: fi.write('data="%s" ' % esc(node.data_access)) fi.write(">\n") indent += 4 for name, value in node.items(): u_esc_name = u(esc(name)) if attribute_name_filter and not attribute_name_filter(u_esc_name): continue fi.write('%s<attribute name="%s"><![CDATA[%s]]></attribute>\n' % ((" " * indent), u_esc_name, u2(value))) for file in node.getFiles(): if file.type == "metadata" or file.type in exclude_filetypes: continue mimetype = file.mimetype if mimetype is None: mimetype = "application/x-download" fi.write('%s<file filename="%s" mime-type="%s" type="%s"/>\n' % ((" " * indent), esc(file.getName()), mimetype, (file.type is not None and file.type or "image"))) if children: for c in node.getChildren().sort_by_orderpos(): if (not children_access) or (children_access and children_access.hasAccess(c, 'read')): if c.type not in exclude_children_types: fi.write('%s<child id="%s" type="%s"/>\n' % ((" " * indent), str(c.id), c.type)) indent -= 4 fi.write("%s</node>\n" % (" " * indent)) if(children): for c in node.getChildren().sort_by_orderpos(): if (not children_access) or (children_access and children_access.hasAccess(c, 'read')): if c.type not in exclude_children_types: if c.id not in written: written[c.id] = None c.writexml(fi, indent=indent, written=written, children=children, children_access=children_access, exclude_filetypes=exclude_filetypes, exclude_children_types=exclude_children_types, attribute_name_filter=attribute_name_filter ) if node.type in ["mask"]: try: exportmapping_id = node.get("exportmapping").strip() if exportmapping_id and exportmapping_id not in written: try: exportmapping = tree.getNode(exportmapping_id) written[exportmapping_id] = None exportmapping.writexml(fi, indent=indent, written=written, children=children, children_access=children_access, exclude_filetypes=exclude_filetypes, exclude_children_types=exclude_children_types, attribute_name_filter=attribute_name_filter ) except: msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % ( str(node.id), node.name, node.type, str(exportmapping_id)) logging.getLogger("backend").error(msg) else: pass except: msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % ( str(node.id), node.name, node.type, str(exportmapping_id)) logging.getLogger("backend").error(msg)
def writexml( node, fi, indent=None, written=None, children=True, children_access=None, exclude_filetypes=[], exclude_children_types=[], attribute_name_filter=None, ): if written is None: written = {} if indent is None: indent = 0 # there are a lot of nodes without name ... nodename_copy = node.name if nodename_copy is None: nodename_copy = "" # fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), esc(nodename_copy), str(node.id))) # non-utf8 encoded umlauts etc. may cause invalid xml fi.write('%s<node name="%s" id="%s" ' % ((" " * indent), u2(esc(nodename_copy)), str(node.id))) if node.type is None: node.type = "node" fi.write('type="%s" ' % node.type) if node.read_access: fi.write('read="%s" ' % esc(node.read_access)) if node.write_access: fi.write('write="%s" ' % esc(node.write_access)) if node.data_access: fi.write('data="%s" ' % esc(node.data_access)) fi.write(">\n") indent += 4 for name, value in node.items(): u_esc_name = u(esc(name)) if attribute_name_filter and not attribute_name_filter(u_esc_name): continue fi.write('%s<attribute name="%s"><![CDATA[%s]]></attribute>\n' % ((" " * indent), u_esc_name, u2(value))) for file in node.getFiles(): if file.type == "metadata" or file.type in exclude_filetypes: continue mimetype = file.mimetype if mimetype is None: mimetype = "application/x-download" fi.write( '%s<file filename="%s" mime-type="%s" type="%s"/>\n' % ((" " * indent), esc(file.getName()), mimetype, (file.type is not None and file.type or "image")) ) if children: for c in node.getChildren().sort_by_orderpos(): if (not children_access) or (children_access and children_access.hasAccess(c, "read")): if c.type not in exclude_children_types: fi.write('%s<child id="%s" type="%s"/>\n' % ((" " * indent), str(c.id), c.type)) indent -= 4 fi.write("%s</node>\n" % (" " * indent)) if children: for c in node.getChildren().sort_by_orderpos(): if (not children_access) or (children_access and children_access.hasAccess(c, "read")): if c.type not in exclude_children_types: if c.id not in written: written[c.id] = None c.writexml( fi, indent=indent, written=written, children=children, children_access=children_access, exclude_filetypes=exclude_filetypes, exclude_children_types=exclude_children_types, attribute_name_filter=attribute_name_filter, ) if node.type in ["mask"]: try: exportmapping_id = node.get("exportmapping").strip() if exportmapping_id and exportmapping_id not in written: try: exportmapping = tree.getNode(exportmapping_id) written[exportmapping_id] = None exportmapping.writexml( fi, indent=indent, written=written, children=children, children_access=children_access, exclude_filetypes=exclude_filetypes, exclude_children_types=exclude_children_types, attribute_name_filter=attribute_name_filter, ) except: msg = ( "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % (str(node.id), node.name, node.type, str(exportmapping_id)) ) logging.getLogger("backend").error(msg) else: pass except: msg = "ERROR: node xml export error node.id='%s', node.name='%s', node.type='%s', exportmapping:'%s'" % ( str(node.id), node.name, node.type, str(exportmapping_id), ) logging.getLogger("backend").error(msg)
def validate(req, op): """standard validator""" user = users.getUserFromRequest(req) try: if "action" in req.params.keys(): if req.params.get("action") == "titleinfo": group = getGroup(u(req.params.get("group"))) schema = group.getSchemas() req.write('|'.join(schema)) return "" for key in req.params.keys(): if key.startswith("new"): # create new group return editGroup_mask(req, "") elif key.startswith("edit_"): # edit usergroup return editGroup_mask(req, str(key[5:-2])) elif key.startswith("delete_"): # delete group log.debug("user %r going to delete group %r" % (user.getName(), key[7:-2])) deleteGroup(key[7:-2]) break if "form_op" in req.params.keys(): _option = "" for key in req.params.keys(): if key.startswith("option_"): _option += key[7] if req.params.get("form_op", "") == "save_new": # save new group values if req.params.get("groupname", "") == "": return editGroup_mask(req, "", 1) # no groupname selected elif existGroup(req.params.get("groupname", "")): return editGroup_mask(req, "", 2) # group still existing else: log.debug("user %r going to save new group %r" % (user.getName(), req.params.get("groupname", ""))) if req.params.get("create_rule", "") == "True": updateAclRule(req.params.get("groupname", ""), req.params.get("groupname", "")) if req.params.get("checkbox_allow_dynamic", "") in ["on", "1"]: allow_dynamic = "1" else: allow_dynamic = "" dynamic_users = req.params.get("dynamic_users", "") group = create_group(req.params.get("groupname", ""), description=req.params.get("description", ""), option=str(_option), allow_dynamic=allow_dynamic, dynamic_users=dynamic_users, ) group.setHideEdit(req.params.get("leftmodule", "").strip()) saveGroupMetadata(group.name, req.params.get("leftmodulemeta", "").strip()) elif req.params.get("form_op") == "save_edit": # save changed values groupname = req.params.get("groupname", "") oldgroupname = req.params.get("oldgroupname", "") group = getGroup(oldgroupname) if oldgroupname != groupname: updateAclRule(oldgroupname, groupname) group.setName(groupname) group.setDescription(req.params.get("description", "")) group.setOption(str(_option)) group.setHideEdit(req.params.get("leftmodule", "").strip()) saveGroupMetadata(groupname, req.params.get("leftmodulemeta", "").split(";")) if ALLOW_DYNAMIC_USERS: allow_dynamic = req.params.get("checkbox_allow_dynamic", "") dynamic_users = req.params.get("dynamic_users", "") if allow_dynamic.lower() in ['on', 'true', '1']: group.set("allow_dynamic", "1") else: group.set("allow_dynamic", "") group.set("dynamic_users", dynamic_users) if groupname == oldgroupname: log.debug("user %r edited group %r" % (user.getName(), groupname)) else: log.debug("user %r edited group %r, new groupname: %r" % (user.getName(), oldgroupname, groupname)) sortUserGroups() return view(req) except: print "Warning: couldn't load module for type", type print sys.exc_info()[0], sys.exc_info()[1] traceback.print_tb(sys.exc_info()[2])
def xml_start_element(self, name, attrs): try: node = self.nodes[-1] except: node = None if name == "nodelist": if "exportversion" in attrs: logging.getLogger("backend").info("starting xml import: %s" % str(attrs)) elif name == "node": self.node_already_seen = False parent = node try: type = attrs["type"].encode("utf-8") except: type = "directory" if "id" not in attrs: attrs["id"] = str(random.random()) old_id = attrs["id"] if old_id in self.id2node: node = self.id2node[old_id] self.node_already_seen = True return elif type in ["mapping"]: node = tree.Node(name=(attrs["name"] + "_imported_" + old_id).encode("utf-8"), type=type) else: node = tree.Node(name=attrs["name"].encode("utf-8"), type=type) if "read" in attrs: node.setAccess("read", attrs["read"].encode("utf-8")) if "write" in attrs: node.setAccess("write", attrs["write"].encode("utf-8")) if "data" in attrs: node.setAccess("data", attrs["data"].encode("utf-8")) if self.verbose: msg = "created node '%s', '%s', '%s', old_id from attr='%s'" % ( node.name, node.type, str(node.id), str(attrs["id"]), ) logging.getLogger("backend").info(msg) self.id2node[attrs["id"].encode("utf-8")] = node node.tmpchilds = [] self.nodes += [node] if self.root is None: self.root = node return elif name == "attribute" and not self.node_already_seen: attr_name = attrs["name"].encode("utf-8") if "value" in attrs: if attr_name in ["valuelist"]: node.setAttribute( attr_name, attrs["value"].encode("utf-8").replace("\n\n", "\n").replace("\n", ";").replace(";;", ";"), ) else: node.setAttribute(attr_name, attrs["value"].encode("utf-8")) else: self.attributename = attr_name elif name == "child" and not self.node_already_seen: id = u(attrs["id"]) node.tmpchilds += [id] elif name == "file" and not self.node_already_seen: try: type = attrs["type"].encode("utf-8") except: type = None try: mimetype = attrs["mime-type"].encode("utf-8") except: mimetype = None filename = attrs["filename"].encode("utf-8") node.addFile(tree.FileNode(name=filename, type=type, mimetype=mimetype))
def xml_start_element(self, name, attrs): try: node = self.nodes[-1] except: node = None if name == "nodelist": if "exportversion" in attrs: logging.getLogger("backend").info("starting xml import: %s" % str(attrs)) elif name == "node": self.node_already_seen = False parent = node try: type = attrs["type"].encode("utf-8") except: type = "directory" if "id" not in attrs: attrs["id"] = str(random.random()) old_id = attrs["id"] if old_id in self.id2node: node = self.id2node[old_id] self.node_already_seen = True return elif type in ["mapping"]: node = tree.Node(name=(attrs["name"] + "_imported_" + old_id).encode("utf-8"), type=type) else: node = tree.Node(name=attrs["name"].encode("utf-8"), type=type) if "read" in attrs: node.setAccess("read", attrs["read"].encode("utf-8")) if "write" in attrs: node.setAccess("write", attrs["write"].encode("utf-8")) if "data" in attrs: node.setAccess("data", attrs["data"].encode("utf-8")) if self.verbose: msg = "created node '%s', '%s', '%s', old_id from attr='%s'" % (node.name, node.type, str(node.id), str(attrs["id"])) logging.getLogger("backend").info(msg) self.id2node[attrs["id"].encode("utf-8")] = node node.tmpchilds = [] self.nodes += [node] if self.root is None: self.root = node return elif name == "attribute" and not self.node_already_seen: attr_name = attrs["name"].encode("utf-8") if "value" in attrs: if attr_name in ["valuelist"]: node.setAttribute(attr_name, attrs["value"].encode("utf-8").replace("\n\n", "\n").replace("\n", ";").replace(";;", ";")) else: node.setAttribute(attr_name, attrs["value"].encode("utf-8")) else: self.attributename = attr_name elif name == "child" and not self.node_already_seen: id = u(attrs["id"]) node.tmpchilds += [id] elif name == "file" and not self.node_already_seen: try: type = attrs["type"].encode("utf-8") except: type = None try: mimetype = attrs["mime-type"].encode("utf-8") except: mimetype = None filename = attrs["filename"].encode("utf-8") node.addFile(tree.FileNode(name=filename, type=type, mimetype=mimetype))
def getViewHTML(self, field, nodes, flags, language=None, template_from_caller=None, mask=None): element = field.getField() if not element: return [] fieldtype = element.get("type") t = getMetadataType(element.get("type")) unit = '' if field.getUnit() != "": unit = ' ' + field.getUnit() if flags & VIEW_DATA_ONLY: if fieldtype in ['text']: value = u( t.getFormatedValue( element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1]) else: value = u(t.getFormatedValue(element, nodes[0], language)[1]) else: if field.getFormat() != "": if fieldtype in ['text']: value = t.getFormatedValue( element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1] else: value = t.getFormatedValue(element, nodes[0], language)[1] value = field.getFormat().replace("<value>", value) else: if fieldtype in ['text']: if template_from_caller and template_from_caller[ 0]: # checking template on test nodes: show full length fieldvalue = nodes[0].get(element.name) if fieldvalue.strip(): # field is filled for this node value = str( t.getFormatedValue( element, nodes[0], language, template_from_caller=fieldvalue, mask=mask)[1]) else: # use default value = str( t.getFormatedValue( element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1]) else: # cut long values value = str( formatLongText( t.getFormatedValue( element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1], element)) elif fieldtype in ['upload']: # passing mask necessary for fieldtype='upload' value = str( formatLongText( t.getFormatedValue(element, nodes[0], language, mask=mask)[1], element)) else: value = str( formatLongText( t.getFormatedValue(element, nodes[0], language)[1], element)) if len(value.strip()) > 0: value += str(unit) label = ' ' if field.getLabel() != "": label = field.getLabel() + ': ' if flags & VIEW_DATA_ONLY: # return a valuelist return [ element.getName(), value, element.getLabel(), element.get("type") ] elif flags & VIEW_SUB_ELEMENT: # element in hgroup # only using value omitting label, delimiter like ' ' may be inserted in hgroup.getViewHTML return value elif flags & VIEW_HIDE_EMPTY and value.strip() == "": # hide empty elements return '' elif flags & VIEW_DATA_EXPORT: if fieldtype in ['text']: return str( t.getFormatedValue( element, nodes[0], language, html=0, template_from_caller=template_from_caller, mask=mask)[1]) else: return str( t.getFormatedValue(element, nodes[0], language, html=0)[1]) # return element.get("type") else: # standard view ret = '<div class="mask_row field-' + element.getName() + '"><div>' ret += '<div class="mask_label">' + label + '</div>\n<div class="mask_value">' + value + ' </div>\n' ret += '</div></div>' return ret
def nodeToFulltextSearch(self, node, schema): # build fulltext index from node if not hasattr(node, "getCategoryName") or not node.getCategoryName( ) == "document": # only build fulltext of document nodes return True r = re.compile("[a-zA-Z0-9]+") if self.execute( 'SELECT id from textsearchmeta where id=\'{}\''.format( node.id), schema, 'text'): # FIXME: we should not delete the old textdata from this node, and insert # the new files. Only problem is, DELETE from a FTS3 table is prohibitively # slow. return for file in node.getFiles(): w = '' if file.getType() == "fulltext" and os.path.exists( file.retrieveFile()): data = {} content = '' f = open(file.retrieveFile()) try: for line in f: if FULLTEXT_INDEX_MODE == 0: content += u(line) else: for w in re.findall(r, line): if w not in data.keys(): data[w] = 1 try: data[w] += 1 except KeyError: data[w] = 1 finally: f.close() if FULLTEXT_INDEX_MODE == 1: for key in data.keys(): content += key + " " elif FULLTEXT_INDEX_MODE == 2: for key in data.keys(): content += key + " [" + str(data[key]) + "] " content = u(content.replace("'", "").replace('"', "")) if len(content) > 0: content_len = len(content) p = 0 while p in range(0, int(ceil(content_len / 500000.0))): sql = 'INSERT INTO textsearchmeta (id, type, schema, value) VALUES("{}", "{}", "{}", "{}")'.format( node.id, node.getContentType(), schema, normalize_utf8( (content[p * 500000:(p + 1) * 500000 - 1]))) try: self.execute(sql, schema, 'text') except: print "\nerror in fulltext of node", node.id return False p += 1 return True return True
def getContent(req, ids): def getSchemes(req): schemes = AccessData(req).filter(loadTypesFromDB()) return filter(lambda x: x.isActive(), schemes) ret = "" v = {"message": ""} if len(ids) >= 0: ids = ids[0] v["id"] = ids if "do_action" in req.params.keys(): # process nodes fieldname = req.params.get("fields") old_values = u(req.params.get("old_values", "")).split(";") new_value = u(req.params.get("new_value")) basenode = q(Node).get(ids) entries = getAllAttributeValues(fieldname, req.params.get("schema")) c = 0 for old_val in old_values: for n in AccessData(req).filter( q(Node).filter(Node.id.in_(entries[old_val])).all()): with suppress(Exception, warn=False): n.set( fieldname, replaceValue(n.get(fieldname), u(old_val), u(new_value))) c += 1 v["message"] = req.getTAL("web/edit/modules/manageindex.html", {"number": c}, macro="operationinfo") if "style" in req.params.keys(): # load schemes if req.params.get("action", "") == "schemes": v["schemes"] = getSchemes(req) req.writeTAL("web/edit/modules/manageindex.html", v, macro="schemes_dropdown") return "" elif req.params.get( "action", "").startswith("indexfields__"): # load index fields schema = getMetaType(req.params.get("action", "")[13:]) fields = [] for field in schema.getMetaFields(): if field.getFieldtype() == "ilist": fields.append(field) v["fields"] = fields v["schemaname"] = schema.getName() req.writeTAL("web/edit/modules/manageindex.html", v, macro="fields_dropdown") return "" elif req.params.get("action", "").startswith( "indexvalues__"): # load values of selected indexfield node = q(Node).get(ids) fieldname = req.params.get("action").split("__")[-2] schema = req.params.get("action").split("__")[-1] v["entries"] = [] if node: v["entries"] = getAllAttributeValues(fieldname, schema) v["keys"] = v["entries"].keys() v["keys"].sort(lambda x, y: cmp(x.lower(), y.lower())) req.writeTAL("web/edit/modules/manageindex.html", v, macro="fieldvalues") return "" elif req.params.get("action", "").startswith( "children__"): # search for children of current collection scheme = req.params.get("action", "").split("__")[1] fieldname = req.params.get("action", "").split("__")[2] values = req.params.get("action", "").split("__")[3].split(";")[:-1] all_values = getAllAttributeValues(fieldname, scheme) def isChildOf(access, node, basenodeid): for ls in getPaths(node): if basenodeid in [unicode(n.id) for n in ls]: return 1 return 0 subitems = {} for value in values: value = u(value) if value in all_values: subitems[value] = [] for l in all_values[value]: if isChildOf(AccessData(req), q(Node).get(l), ids): subitems[value].append(l) v["items"] = subitems v["keys"] = subitems.keys() v["keys"].sort() req.writeTAL("web/edit/modules/manageindex.html", v, macro="valueinfo") return "" else: v["csrf"] = req.csrf_token.current_token return req.getTAL("web/edit/modules/manageindex.html", v, macro="manageform")
def show_printview(req): """ create a pdf preview of given node (id in path e.g. /print/[id]/[area])""" p = req.path[1:].split("/") try: nodeid = int(p[1]) except ValueError: raise ValueError("Invalid Printview URL: " + req.path) if len(p) == 3: if p[2] == "edit": req.reply_headers['Content-Type'] = "application/pdf" editprint = printmethod(req) if editprint: req.write(editprint) else: req.write("") return # use objects from session if str(nodeid) == "0": children = [] if "contentarea" in req.session: try: nodes = req.session["contentarea"].content.files except: c = req.session["contentarea"].content nodes = c.resultlist[c.active].files for n in nodes: c_mtype = getMetaType(n.getSchema()) c_mask = c_mtype.getMask("printlist") if not c_mask: c_mask = c_mtype.getMask("nodesmall") _c = c_mask.getViewHTML([n], VIEW_DATA_ONLY + VIEW_HIDE_EMPTY) if len(_c) > 0: children.append(_c) req.reply_headers['Content-Type'] = "application/pdf" req.write(printview.getPrintView(lang(req), None, [["", "", t(lang(req), "")]], [], 3, children)) else: node = getNode(nodeid) if node.get("system.print") == "0": return 404 access = AccessData(req) if not access.hasAccess(node, "read"): req.write(t(req, "permission_denied")) return style = int(req.params.get("style", 2)) # nodetype mtype = getMetaType(node.getSchema()) mask = None metadata = None if mtype: for m in mtype.getMasks(): if m.getMasktype() == "fullview": mask = m if m.getMasktype() == "printview": mask = m break if not mask: mask = mtype.getMask("nodebig") if mask: metadata = mask.getViewHTML([node], VIEW_DATA_ONLY + VIEW_HIDE_EMPTY) if not metadata: metadata = [['nodename', node.getName(), 'Name', 'text']] files = node.getFiles() imagepath = None for file in files: if file.getType().startswith("presentati"): imagepath = file.retrieveFile() # children children = [] if node.isContainer(): ret = [] getPrintChildren(req, node, ret) for c in ret: if not c.isContainer(): # items c_mtype = getMetaType(c.getSchema()) c_mask = c_mtype.getMask("printlist") if not c_mask: c_mask = c_mtype.getMask("nodesmall") _c = c_mask.getViewHTML([c], VIEW_DATA_ONLY) if len(_c) > 0: children.append(_c) else: # header items = getPaths(c, AccessData(req)) p = [] for item in items[0]: p.append(u(item.getName())) p.append(u(c.getName())) children.append([(c.id, " > ".join(p[1:]), u(c.getName()), "header")]) if len(children) > 1: col = [] order = [] try: sort = getCollection(node).get("sortfield") except: sort = "" for i in range(0, 2): col.append((0, "")) order.append(1) if req.params.get("sortfield" + str(i)) != "": sort = req.params.get("sortfield" + str(i), sort) if sort != "": if sort.startswith("-"): sort = sort[1:] order[i] = -1 _i = 0 for c in children[0]: if c[0] == sort: col[i] = (_i, sort) _i += 1 if col[i][1] == "": col[i] = (0, children[0][0][0]) # sort method for items def myCmp(x, y, col, order): cx = "" cy = "" for item in x: if item[0] == col[0][1]: cx = item[1] break for item in y: if item[0] == col[0][1]: cy = item[1] break if cx.lower() > cy.lower(): return 1 * order[0] return -1 * order[0] sorted_children = [] tmp = [] for item in children: if item[0][3] == "header": if len(tmp) > 0: tmp.sort(lambda x, y: myCmp(x, y, col, order)) sorted_children.extend(tmp) tmp = [] sorted_children.append(item) else: tmp.append(item) tmp.sort(lambda x, y: myCmp(x, y, col, order)) sorted_children.extend(tmp) children = sorted_children req.reply_headers['Content-Type'] = "application/pdf" req.write(printview.getPrintView(lang(req), imagepath, metadata, getPaths( node, AccessData(req)), style, children, getCollection(node)))
def upload_new_node(req, path, params, data): try: uploadfile = params['data'] del params['data'] except KeyError: uploadfile = None # get the user and verify the signature if params.get('user'): # user=users.getUser(params.get('user')) #userAccess = AccessData(user=user) _user = users.getUser(params.get('user')) if not _user: # user of dynamic class dummyuser: # dummy user class # return all groups with given dynamic user def getGroups(self): return [g.name for g in tree.getRoot('usergroups').getChildren() if g.get( 'allow_dynamic') == '1' and params.get('user') in g.get('dynamic_users')] def getName(self): return params.get('user') def getDirID(self): # unique identifier return params.get('user') def isAdmin(self): return 0 _user = dummyuser() userAccess = AccessData(user=_user) if userAccess.user: user = userAccess.user if not userAccess.verify_request_signature( req.fullpath + '?', params): userAccess = None else: userAccess = None else: user = users.getUser(config.get('user.guestuser')) userAccess = AccessData(user=user) parent = tree.getNode(params.get('parent')) # check user access if userAccess and userAccess.hasAccess(parent, "write"): pass else: s = "No Access" req.write(s) d = { 'status': 'fail', 'html_response_code': '403', 'errormessage': 'no access'} logger.error("user has no edit permission for node %s" % parent) return d['html_response_code'], len(s), d datatype = params.get('type') uploaddir = users.getUploadDir(user) n = tree.Node(name=params.get('name'), type=datatype) if isinstance(uploadfile, types.InstanceType): # file object used nfile = importFile(uploadfile.filename, uploadfile.tempname) else: # string used nfile = importFileFromData( 'uploadTest.jpg', base64.b64decode(uploadfile)) if nfile: n.addFile(nfile) else: logger.error("error in file uploadservice") try: # test metadata metadata = json.loads(params.get('metadata')) except ValueError: metadata = dict() # set provided metadata for key, value in metadata.iteritems(): n.set(u(key), u(value)) # service flags n.set("creator", user.getName()) n.set("creationtime", format_date()) parent.addChild(n) # process the file, we've added to the new node if hasattr(n, "event_files_changed"): try: n.event_files_changed() except OperationException as e: for file in n.getFiles(): if os.path.exists(file.retrieveFile()): os.remove(file.retrieveFile()) raise OperationException(e.value) # make sure the new node is visible immediately from the web service and # the search index gets updated n.setDirty() tree.remove_from_nodecaches(parent) d = { 'status': 'Created', 'html_response_code': '201', 'build_response_end': time.time()} s = "Created" # provide the uploader with the new node ID req.reply_headers['NodeID'] = n.id # we need to write in case of POST request, send as buffer will not work req.write(s) return d['html_response_code'], len(s), d
def getViewHTML(self, field, nodes, flags, language=None, template_from_caller=None, mask=None): element = field.getField() if not element: return [] fieldtype = element.get("type") t = getMetadataType(element.get("type")) unit = '' if field.getUnit() != "": unit = ' ' + field.getUnit() if flags & VIEW_DATA_ONLY: if fieldtype in ['text']: value = u(t.getFormatedValue(element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1]) else: value = u(t.getFormatedValue(element, nodes[0], language)[1]) else: if field.getFormat() != "": if fieldtype in ['text']: value = t.getFormatedValue(element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1] else: value = t.getFormatedValue(element, nodes[0], language)[1] value = field.getFormat().replace("<value>", value) else: if fieldtype in ['text']: if template_from_caller and template_from_caller[0]: # checking template on test nodes: show full length fieldvalue = nodes[0].get(element.name) if fieldvalue.strip(): # field is filled for this node value = str(t.getFormatedValue(element, nodes[0], language, template_from_caller=fieldvalue, mask=mask)[1]) else: # use default value = str( t.getFormatedValue(element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1]) else: # cut long values value = str( formatLongText( t.getFormatedValue( element, nodes[0], language, template_from_caller=template_from_caller, mask=mask)[1], element)) elif fieldtype in ['upload']: # passing mask necessary for fieldtype='upload' value = str(formatLongText(t.getFormatedValue(element, nodes[0], language, mask=mask)[1], element)) else: value = str(formatLongText(t.getFormatedValue(element, nodes[0], language)[1], element)) if len(value.strip()) > 0: value += str(unit) label = ' ' if field.getLabel() != "": label = field.getLabel() + ': ' if flags & VIEW_DATA_ONLY: # return a valuelist return [element.getName(), value, element.getLabel(), element.get("type")] elif flags & VIEW_SUB_ELEMENT: # element in hgroup # only using value omitting label, delimiter like ' ' may be inserted in hgroup.getViewHTML return value elif flags & VIEW_HIDE_EMPTY and value.strip() == "": # hide empty elements return '' elif flags & VIEW_DATA_EXPORT: if fieldtype in ['text']: return str(t.getFormatedValue(element, nodes[0], language, html=0, template_from_caller=template_from_caller, mask=mask)[1]) else: return str(t.getFormatedValue(element, nodes[0], language, html=0)[1]) # return element.get("type") else: # standard view ret = '<div class="mask_row field-' + element.getName() + '"><div>' ret += '<div class="mask_label">' + label + '</div>\n<div class="mask_value">' + value + ' </div>\n' ret += '</div></div>' return ret