def _run(self,args): for x in session.query(Attribute).all(): if x.description: # If there are no descriptions, there is no point in # dumping attributes as there is no additional # information as the short names are part of node path raise RuntimeError("Description in an attribute") total = self.dfs(Node.root().lower(), lambda x: None) dumped = self.dfs(Node.root().lower(), self.dump) if total != dumped: raise RuntimeError("Dumped {} leafs but {} found in total".format(dumped, total))
def _run(self,args): if self.args.attribute: return [ print(i.name) for i in session.query(Attribute).all()] rs = [ (i.attr.name, i.value) for i in self.children() ] if not rs: return rs.insert(0, ("Type","Value")) rs.insert(1, ("","")) just = max(map(lambda x: len(x),zip(*rs))) [ print("%s %s" % (x.rjust(just," "), y)) for x,y in rs]
def _run(self,args): print("db:\t{0}".format(self.app._get_db())) print("config:") for s in self.conf.sections(): print("\t%s" % s) for i in self.conf.items(s): print("\t\t%s = %s" % i) countable = [Node, Attribute, Edge] for i in countable: cnt = session.query(i).count() print("%s count: %d" % (i.__name__, cnt))
def tokenize_nodes(nodes): """ tokenize list of nodes in format attribute=value into list of (attribute,value). """ import re from cpk.model import Attribute, session attrs = [i.name for i in session.query(Attribute).all()] sre_parse_nodes = '^((?P<node_type>%s)=)?(?P<node_name>.+)?$' % "|".join(attrs) getLogger("%s" % (__name__,)).debug(sre_parse_nodes) sre_parse_nodes = re.compile(sre_parse_nodes) getLogger("%s" % (__name__,)).debug("tokenizer input: %s" % nodes) tokens = [sre_parse_nodes.match(i).groupdict() for i in nodes] tokens = [(t["node_type"], t["node_name"]) for t in tokens] getLogger("%s" % (__name__,)).debug("tokens: %s" % tokens) return tokens