def feature_types(self, print_tree=True): ''' Returns a summary of the feature types represented in the RefLoci database Parameters ---------- print_tree : bool (default: True) If True, prints the result before returning. Returns ------- An anytree Node object containing the root node. ''' raise NotImplementedError('This method is BUGGY') from anytree import Node, RenderTree cur = self._db.cursor() primary_ftypes = [x[0] for x in cur.execute(''' SELECT DISTINCT feature_type FROM primary_loci p JOIN loci l ON p.LID = l.LID; ''').fetchall()] ndict = dict() root = Node(self.name) for n in primary_ftypes: ndict[n] = Node(n,parent=root) ftypes = cur.execute(''' SELECT DISTINCT p.feature_type,c.feature_type FROM relationships r JOIN loci p ON r.parent = p.LID JOIN loci c ON r.child = c.LID; ''').fetchall() # Create Nodes for p,c in ftypes: ndict[c] = Node(c) for p,c in ftypes: if p in ndict: ndict[c].parent = ndict[p] else: ndict[c].parent = root if print_tree is True: print(RenderTree(root)) return root
from anytree import Node, RenderTree, findall, LevelOrderIter, PostOrderIter import matplotlib.pyplot as plt from tkinter import * from tkinter import filedialog q = 0 ct = 0 root = [] tdict = {} # the dictionary keeps track of all attributes of a document keys = { } # contains the parent of each attribute. This is used to retrive data at any particular level c = 1 r = [] nc = -1 fptr = {} lvl = Node('level') y = 0 root.append(Node("root" + str(ct), parent=lvl)) ct = ct + 1 threshold = 0.2 clusters = [[[]] for row in range(8)] #holds all the cluster details cluster_avg_len = [[0] for row in range(8)] cluster_count = [0 for row in range(8)] # cluster_avg_len[0].append(0) #set the avg len of first cluster to be zero similarity_count = [[[]] for row in range(8) ] #to store attribute_count and match_count sim_matrix = [[[]] for row in range(8)] MyTreeRoot = Node('Root') OptimalTree = Node('Optimal_Root') cluster_tree = Node('clusterRoot') highest_fscore = 0
from anytree import Node, RenderTree, find, Resolver import os file1 = {"id": 0, "datanodes": [], "size": 32, "metadata": "grrrrrrrrr"} file2 = {"id": 1, "datanodes": [], "size": 12, "metadata": "dhdjhsgdhjas"} file3 = {"id": 2, "datanodes": [], "size": 12, "metadata": "dhdjhsgdhjas"} root = Node("root") pics = Node('pics', parent=root) docs = Node('docs', parent=root) sum14 = Node('summer 2014', parent=pics) sum14_1 = Node('pic1', parent=sum14, file=file1) sum14_2 = Node('pic2', parent=sum14, file=file2) report = Node('report', parent=docs, file=file3) print(os.path.basename('/dsf/sdf/zhopa')) # path = 'summer 2014/pic1' # r = Resolver('name') # print(r.get(pics, path)) # # # # # path = '/root/pics/summer 2014' # r = Resolver('name') # resp = r.get(root, path) # new_node = Node('new file', parent=resp, file={"id": 3}) # # new_node = Node('new file2', parent=resp, file={"id": 4}) # #
namesWeightsRelations = {} for prog in topLevelTuple: if len(re.findall('[a-z]', prog[1])) > 1: namesWeightsRelations[re.findall('[a-z]+', prog[0])[0]] = (int( re.findall('\d+', prog[1])[0]), re.findall('[a-z]+', prog[1])) else: namesWeightsRelations[re.findall('[a-z]+', prog[0])[0]] = (int( re.findall('\d+', prog[1])[0]), '') thisModule = sys.modules[__name__] for name, values in zip(namesWeightsRelations.keys(), namesWeightsRelations.values()): setattr(thisModule, name, Node(name)) for name, values in zip(namesWeightsRelations.keys(), namesWeightsRelations.values()): if values[1] != '': for key in values[1]: globals()[key].parent = globals()[name] for pre, fill, node in RenderTree(lfkqyf.root): print("%s%s" % (pre, node.name)) for i in range(4): myLegs = [node.name for node in PreOrderIter(lfkqyf.root.children[i])] total = 0
def index_file(filename): stack = list() tree = list() is_root = True line_num = 0 # Check file type if not (filename.endswith('.lib') or filename.endswith('.lib.gz')): return None print("Indexing " + filename) # Pick open function open_func = gzip.open if filename.endswith(".gz") else open f = open_func(filename, 'r') for line in f: line_num += 1 if '{' in line: # Do we need to check for attributes? if not line.split('(')[0].strip() in NO_ATTRIBUTES: group = line.split('(')[0].strip() true_line = line_num attributes = set() while True: line = f.next() line_num += 1 # Grab attributes (they have : in them) or grab indexes for vectors if ':' in line or ('index' in line and 'vector' in group): attributes.add(line.strip().replace('\t', '')) elif line.strip() == '': continue else: break # Build up group name with attributes name = group.split('(')[0].strip() + '{' for attribute in attributes: if ':' in attribute: if not attribute.split( ':')[0].strip() in IGNORE_ATTRIBUTES: name += attribute.replace(' ', '').replace( ':', '=').replace(';', '').replace('"', '') + ', ' else: name += attribute.replace(' ', '').replace( ';', '').replace('"', '') + ', ' name = name.strip().strip(',') + '}' # Add group node to tree tree.append(Node(name, parent=stack[-1], start_line=true_line)) stack.append(tree[-1]) # Need to check if current line is a new group (it probably is if we just got group attributes) if '{' in line: name = line.strip().split('{')[0].strip() tree.append( Node(name, parent=stack[-1], start_line=line_num)) stack.append(tree[-1]) elif '}' in line: index = tree.index(stack[-1]) tree[index].end_line = line_num stack.pop() else: # Add a group that doesn't need attributes name = line.strip().split('{')[0].strip() # Check if we need (or can) use the cell name parser if 'cell' in name: cell = name.split('(')[1].strip(')').strip() if all_parsers != None: try: name = 'cell(' + all_parsers.parse_cell_name( cell) + ')' name = name.replace('%(', '[[').replace(')s', ']]') except AttributeError: name = line.strip().split('{')[0].strip() elif cell_name_parser != None: try: name = 'cell(' + cell_name_parser.parse_cell_name( cell) + ')' name = name.replace('%(', '[[').replace(')s', ']]') except AttributeError: name = line.strip().split('{')[0].strip() if is_root: name = name.split('(')[0] tree.append(Node(name, start_line=line_num)) stack.append(tree[0]) is_root = False else: tree.append( Node(name, parent=stack[-1], start_line=line_num)) stack.append(tree[-1]) elif '}' in line: # Reached end of group, record endline and remove from stack index = tree.index(stack[-1]) tree[index].end_line = line_num stack.pop() f.close() print("Indexing Complete!") return tree
def generate_tree(tree_root, depth, no_of_children, participants=None): # todo -- check how to pass depth and no of children automatically and by arguments # height and depth not fitting number of a participants if not (participants is None): if len(participants) > no_of_children**(depth - 1): return "error message" # todo -- customize the error message # optimization to scale tree if depth is None and no_of_children is None and participants is None: depth = 1 no_of_children = 2 elif depth is None and no_of_children is None and participants is not None: # or set up tree size no_of_children = 2 depth = math.ceil(math.log2(len(participants))) else: pass current_parents = [tree_root] node_count = 1 temp_parent = [] for d in range(depth): for parent in current_parents: for k in range(no_of_children): children_node = TreeNode(node_count) children_tree_node = Node(str(node_count), parent=parent, tree_node=children_node) children_tree_node.is_root temp_parent.append(children_tree_node) node_count += 1 current_parents.clear() current_parents = list(temp_parent) temp_parent.clear() if participants is None: for leaf in current_parents: Node("empty", parent=leaf.parent, leaf_node=LeafNode(leaf.tree_node.node_id)) leaf.parent = None leaf.tree_node = None return tree_root, current_parents, node_count ################################################################ leaf_nodes = current_parents participant_count = 0 node_id = node_count for leaf in leaf_nodes: if not (participants is None): if participant_count < len(participants): node_id += 1 # adding topic to the participant # does it make sense to do this at key manager level? # participants[participant_count].add_topic(topic) # topic commented leaf_node = LeafNode(node_id, participants[participant_count]) Node(participants[participant_count].participant_id, parent=leaf.parent, leaf_node=leaf_node) leaf.parent = None leaf.tree_node = None participant_count += 1 else: break for p in range(participant_count, len(leaf_nodes)): Node("empty", parent=leaf_nodes[p].parent, leaf_node=LeafNode(leaf_nodes[p].tree_node.node_id)) leaf_nodes[p].parent = None leaf_nodes[p].tree_node = None # leaf_node = leaf[0].tree_node return tree_root
def testExpand(self, state, has_children, child_states): leaf_node = Node(id={1: state}, numVisited=1, sumValue=1, actionPrior=0.5, isExpanded=False)
def make_node(self, name, parent, url): node = Node(name=name, parent=parent, url=url) node.root_path = self.root_path(node) return node
def tree_example(): from anytree import Node, RenderTree udo = Node(name='nsubj') marc = Node(name='dobj', parent=udo) lian = Node(parent=marc, name='amod') dan = Node(parent=udo, name='nmod:for') jet = Node(parent=dan, name='nsubj') jan = Node(parent=dan, name='compound') joe = Node(parent=dan, name='det') print(udo) Node('/Udo') print(joe) Node('/Udo/Dan/Joe') for pre, fill, node in RenderTree(udo): print("%s%s%s" % (pre, node.name, fill)) print(dan.children) (Node('/Udo/Dan/Jet'), Node('/Udo/Dan/Jan'), Node('/Udo/Dan/Joe'))
def sort_contours_by_level(contours): """Sort contours into parts. Returns a sorted list of lists, where inner lists represent contours at the same depth, and the outer list organizes inner lists by decreasing depth. """ # TODO: handle pre-closed contours. (Circles, ellipses, etc.) parts = [] height_interval_to_contours = { } # items are contour lists, since multiple contours can have the same height interval. contour_tree = IntervalTree() heights = set() contours_by_name = {} nested_contour_tree_items = {} # dict of contour nodes # Find min/max heights of all contours. layout_y_min = math.inf layout_y_max = -math.inf # Also find the left/right extremes to find global corners. layout_x_min = math.inf layout_x_max = -math.inf for contour in contours: # Store contours by name. contours_by_name[contour.name()] = contour # Store contour in a dict by height interval. Some contours can have the same height, so use lists. # This data structure is the input to build the interval tree. if (contour.y_min, contour.y_max) in height_interval_to_contours: height_interval_to_contours[(contour.y_min, contour.y_max)].append(contour) else: height_interval_to_contours[(contour.y_min, contour.y_max)] = [contour] # Update the extremes of the layout. if contour.y_min < layout_y_min: layout_y_min = contour.y_min if contour.y_max > layout_y_max: layout_y_max = contour.y_max if contour.x_min < layout_x_min: layout_x_min = contour.x_min if contour.x_max > layout_x_max: layout_x_max = contour.x_max # Add the contour's midpoint to the height intervals. heights.add((contour.y_max - contour.y_min) / 2 + contour.y_min) # Create interval tree. print("Packing Contours into Interval Tree for sorting speedup.") contour_tree.build(layout_y_min, layout_y_max, height_interval_to_contours) # Construct all contour in-out relationships. print("Constructing in-out contour relationships.") for height in heights: # Extract all the contours that exist at this height. contour_subset_lists = contour_tree.query(height) contour_subset_lists = [item[1] for item in contour_subset_lists ] # remove the keys. contour_subset_lists = [ item for sublist in contour_subset_lists for item in sublist ] # flatten remaining lists. # Build the In-Out relationship tree. for a_index, contour_a in enumerate(contour_subset_lists): contour_a_node = nested_contour_tree_items.get( contour_a.name(), Node(contour_a.name())) for b_index, contour_b in enumerate(contour_subset_lists[a_index + 1:]): point_a = (contour_a.start_x, contour_a.start_y) point_b = (contour_b.start_x, contour_b.start_y) # Check if a is in b. If so, insert pair relationship into tree. if point_in_contour(point_a, contour_b): # contour_b is contour_a's parent. Add back to the dict contour_b_node = nested_contour_tree_items.get( contour_b.name(), Node(contour_b.name())) contour_a_node.parent = contour_b_node nested_contour_tree_items[ contour_b.name()] = contour_b_node # Check if b is in a. If so, insert pair relationship into tree. elif point_in_contour(point_b, contour_a): # contour_a is contour_b's parent. Add back to the dict contour_b_node = nested_contour_tree_items.get( contour_b.name(), Node(contour_b.name())) contour_b_node.parent = contour_a_node nested_contour_tree_items[ contour_b.name()] = contour_b_node nested_contour_tree_items[contour_a.name()] = contour_a_node print("Organizing contours by depth") # A dict, keyed by level (int) of contours that live at that level. depth_lists = OrderedDict() # Contours may be sorted in multiple separate trees. # Pull contours out of the dict representation and put into lists sorted by depths while len(nested_contour_tree_items): # Find the root(s) and print out the tree from there. node = None # Pull an arbitrary item out from the nesting. node_key = list(nested_contour_tree_items.keys())[0] # Get the root of this tree. node = nested_contour_tree_items[node_key] while node.parent is not None: node = node.parent # https://anytree.readthedocs.io/en/latest/api/anytree.iterators.html#anytree.iterators.levelordergroupiter.LevelOrderGroupIter list_o_lists = [[node.name for node in children] for children in LevelOrderGroupIter(node)] for index, depth_list in enumerate(list_o_lists): old_depth_list = depth_lists.get(index, []) for contour_name in depth_list: old_depth_list.append(contours_by_name[contour_name]) del nested_contour_tree_items[contour_name] depth_lists[index] = old_depth_list # Return serialized tree and a starting point. return [v for k, v in depth_lists.items()], (layout_x_max, layout_y_max)
from anytree import Node, RenderTree import random import sys udo = Node(4) marc = Node("Marc", parent=udo) lian = Node("Lian", parent=marc) dan = Node("Dan", parent=udo) jet = Node("Jet", parent=dan) #Converts board so it is easier to navigate when evaluating board positions def board_convert(board): new_board = {} x_start = 143 y_start = 280 for i in range(1, 7): for j in range(1, 8): new_board[j, i] = board[x_start, y_start] x_start += 36 x_start = 143 y_start -= 33 return new_board #addes game piece to column def add_game_piece(board, column, color): for i in range(0, 7): if board[column, i] == 'None':
# K)YOU # I)SAN""".split("\n") orbit_pairs = [] nodes = {} for orbit_pair in input: if not orbit_pair: break pairs = orbit_pair.split(')') parent = pairs[0] child = pairs[1] orbit_pairs.append((parent, child)) if parent not in nodes: nodes[parent] = Node(parent) if child not in nodes: nodes[child] = Node(child) for parent, child in orbit_pairs: nodes[child].parent = nodes[parent] for node in nodes.values(): if node.is_root: print(node) # print(RenderTree(nodes['COM'], style=AsciiStyle())) ## # Part 1
def add_children(nodes, child_id, parent_node): node = Node(child_id, parent=parent_node) if child_id in nodes: for child in nodes[child_id]: add_children(nodes, child, node)
def update_stack(self, statement, key): self.stack.extend([Node(g, parent=statement) for g in self.p_table[key]][::-1])
f = Node("f") b = Node("b", parent=f) a = Node("a", parent=b) d = Node("d", parent=b) c = Node("c", parent=d) e = Node("e", parent=d) g = Node("g", parent=f) i = Node("i", parent=g) h = Node("h", parent=i) print(RenderTree(f, style=AsciiStyle()).by_attr()) ''' #pip3 install --user anytree from anytree import Node, RenderTree files, folders = [], [] root = Node(input('Enter root file name: ')) users = list(map(str, input('Enter the users\' name: ').split())) [ files.append( list(map(str, input('Enter the files for user ' + i + ' : ').split()))) for i in users ] print() [ folders.append( list(map(str, input('Enter the folders for user ' + i + ' : ').split()))) for i in users ] print()
def __init__(self): self.root = Node('DTL')
def add_participant(tree_root, participant, changed_root_keys=None): empty_nodes = findall_by_attr(tree_root, "empty") if len(empty_nodes) is 0: # tree structure change -- # get all the leaf nodes and maximum node id leaf_nodes = tree_root.leaves last_node_id = int(leaf_nodes[len(leaf_nodes) - 1].leaf_node.node_id) participant_counter = 0 added_participant = None message_details_dict_list = [] # change tree structure # children_node = TreeNode(node_count) # children_tree_node = Node(str(node_count), parent=parent, tree_node=children_node) for leaf in leaf_nodes: new_parent_tree_node = TreeNode(leaf.leaf_node.node_id) new_parent = Node(leaf.leaf_node.node_id, parent=leaf.parent, tree_node=new_parent_tree_node) # two children for this node -- or todo no of children of this root_tree -- # 1st child has the same participant and the second child is empty last_node_id += 1 leaf.leaf_node.name = str(last_node_id) leaf.parent = new_parent # send the changed structure message only to the participants affected # newly added participant messages will be handled by registration protocol not here message_detail = { "message_name": "change_tree_structure" + "/" + leaf.name, "encryption_key": leaf.leaf_node.participant.pairwise_key, "new_ancestor_key": new_parent.tree_node.node_key } message_details_dict_list.append(message_detail) # first check if the participant is already added if participant_counter is 0: # add new participant here last_node_id += 1 new_leaf_node = LeafNode(str(last_node_id), participant) added_participant = Node(participant.participant_id, parent=new_parent, leaf_node=new_leaf_node) participant_counter = 1 else: # add empty node else # if participant is already added 2nd child empty # Node("empty", parent=leaf_nodes[p].parent, leaf_node=LeafNode(leaf_nodes[p].tree_node.node_id)) last_node_id += 1 Node("empty", parent=new_parent, leaf_node=LeafNode(str(last_node_id))) return tree_root, added_participant, message_details_dict_list else: empty_node = empty_nodes[0] # participant.add_topic(topic) # include code for user-permissions # or better move this to top new_leaf_node = LeafNode(empty_node.leaf_node.node_id, participant) added_participant = Node(participant.participant_id, parent=empty_node.parent, leaf_node=new_leaf_node) # dis-allocate the old empty node after attaching the new one to the tree empty_node.parent = None empty_node.leaf_node = None # find ancestors of the added participant and change their keys ancestor_list = added_participant.ancestors for ancestor in ancestor_list: if ancestor.is_root and changed_root_keys is not None: ancestor.tree_node.root_node_keys = changed_root_keys.copy( ) else: ancestor.tree_node.reset_key() # change the keys of root node here # code to add details about the messages to be sent # first construct messages for participant and its siblings message_details_dict_list = [] # adding message for the newly added participant to be decided based on other implementations # todo """message_detail = {"message_name": str(added_participant.parent.tree_node.node_id) + "/" + str(added_participant.leaf_node.node_id), "encryption_key": added_participant.leaf_node.participant.pairwise_key, "changed_parent_key": added_participant.parent.tree_node.node_key} message_details_dict_list.append(message_detail)""" siblings = added_participant.siblings for sibling in siblings: if sibling.leaf_node.participant is not None: message_detail = { # "message_name": str(sibling.parent.tree_node.node_id) + "/" + str(sibling.leaf_node.node_id), "message_name": str(sibling.parent.tree_node.node_id) + "/" + str(sibling.leaf_node.participant.participant_id) + "__changeParent__" + str(sibling.parent.tree_node.node_id), "encryption_key": sibling.leaf_node.participant.pairwise_key, "changed_parent_key": sibling.parent.tree_node.node_key } message_details_dict_list.append(message_detail) # construct messages for ancestors and their siblings for ancestor in range(len(ancestor_list) - 2, -1, -1): children = ancestor_list[ancestor].children for child in children: message_detail = { "message_name": str(child.parent.tree_node.node_id) + "/" + str(child.tree_node.node_id) + "__changeParent__" + str(child.parent.tree_node.node_id), "encryption_key": child.tree_node.node_key } if child.parent.is_root and changed_root_keys is not None: message_detail[ "changed_parent_key"] = child.parent.tree_node.root_node_keys else: message_detail[ "changed_parent_key"] = child.parent.tree_node.node_key # "changed_parent_key": child.parent.tree_node.node_key} # if last i.e. root node then encryption keys is the list of changed pub-sub keys # add that condition for the last one. when ancestor = 0 basically. message_details_dict_list.append(message_detail) return tree_root, added_participant, message_details_dict_list
from anytree import Node, RenderTree #importamos la libreria anytree que nos servira para crear arboles udo = Node("Udo") #creamos la raiz de nuestro arbol ROOT marc = Node("Marc", parent=udo) #nodo hijo de udo lian = Node("Lian", parent=marc) #nodo hijo de marc dan = Node("Dan", parent=udo) #nodo hijo de udo jet = Node("Jet", parent=dan) #nodo hijo de dan jan = Node("Jan", parent=dan) #nodo hijo de dan joe = Node("Joe", parent=dan) #nodo hijo de dan for pre, fill, node in RenderTree( udo ): #le decimos a la libreria que rellene los nodos y los ordene utilizando la raiz udo print( "%s%s" % (pre, node.name) ) #imprimimos el arbol generado a partir de la raiz udo con los nodos hijos
def delete_participant(tree_root, participant, changed_root_keys=None): # find the node participant_to_be_removed = findall_by_attr( tree_root, participant.participant_id)[0] # find all ancestors of this participant and change keys ancestor_list = participant_to_be_removed.ancestors for ancestor in ancestor_list: if ancestor.is_root and changed_root_keys is not None: ancestor.tree_node.root_node_keys = changed_root_keys.copy() else: ancestor.tree_node.reset_key() # code to add details about the messages to be sent # first construct messages for to-be-deleted participant's siblings message_details_dict_list = [] siblings = participant_to_be_removed.siblings for sibling in siblings: if sibling.leaf_node.participant is not None: message_detail = { # "message_name": str(sibling.parent.tree_node.node_id) + "/" + str(sibling.leaf_node.node_id), "message_name": str(sibling.parent.tree_node.node_id) + "/" + str(sibling.leaf_node.participant.participant_id) + "__changeParent__" + str(sibling.parent.tree_node.node_id), "encryption_key": sibling.leaf_node.participant.pairwise_key, "changed_parent_key": sibling.parent.tree_node.node_key } message_details_dict_list.append(message_detail) # construct messages for ancestors and their siblings for ancestor in range(len(ancestor_list) - 2, -1, -1): children = ancestor_list[ancestor].children for child in children: message_detail = { "message_name": str(child.parent.tree_node.node_id) + "/" + str(child.tree_node.node_id) + "__changeParent__" + str(child.parent.tree_node.node_id), "encryption_key": child.tree_node.node_key } if child.parent.is_root and changed_root_keys is not None: message_detail[ "changed_parent_key"] = child.parent.tree_node.root_node_keys else: message_detail[ "changed_parent_key"] = child.parent.tree_node.node_key # "changed_parent_key": child.parent.tree_node.node_key} message_details_dict_list.append(message_detail) # delete the participant and add empty node there # moved this to manager class # participant.delete_topic(topic) new_leaf_node = LeafNode(participant_to_be_removed.leaf_node.node_id) new_leaf_node.participant = None new_empty_node = Node("empty", parent=participant_to_be_removed.parent, leaf_node=new_leaf_node) # dis-allocate the participant node after attaching the new empty node to the tree participant_to_be_removed.parent = None participant_to_be_removed.leaf_node = None return tree_root, new_empty_node, message_details_dict_list
def adopt_orphan_nodes(self): self.root = Node('jinjas') for node in self.orphan_nodes: node.parent = self.root
class ID3: # Step 1: create the root node T = Node("Root") def __init__(self, S, A): self.algorithm(S, A, self.T) def algorithm(self, S, A, T): # Step 2: if all the examples in S are of the same class c, returns the tree T labeled with class c c = self.areAllElementOfSetEqual(S) if c != "": return Node(c, parent=T) # Step 3: if A is empty, returns the tree T labeled with the majority class c in S if not A: c = self.majorityClassOfSet(S, A) return Node(c, parent=T) # Step 4: let a belongs to A such that a is optimal in A a = self.optimalAttribute(S, A) # Get all the values that the optimal attribute a can assume in S values = self.valuesByAttribute(S, a) # Update the tree T T_prime = Node(a, parent=T, value=values) # Remove the current optimal attribute a from A A.remove(a) # Make a recursive call for each value that the optimal attribute a can assume in S for i in range(len(values)): # Step 5: partition the set S according to the possible values that the optimal attribute a can assume S_prime = self.partition(S, a, values[i]) # Step 6: recursive call of ID3 self.algorithm(S_prime, A, T_prime) # Check if all the elements of a set are of the same class c def areAllElementOfSetEqual(self, S): c = S[0]["Sport"] for i in range(1, self.cardinality(S)): if S[i]["Sport"] != c: return "" return c # Determine the majority class in S def majorityClassOfSet(self, S, A): classes = {} for s in S: if s["Sport"] not in classes: classes[s["Sport"]] = 1 else: classes[s["Sport"]] += 1 return A[int(np.argmax(classes))] # Determine the optimal attribute in A def optimalAttribute(self, S, A): # If the |A| = 1, consider the only one attribute in A as optimal if self.cardinality(A) == 1: return A[0] information_gains = [] for a in A: information_gains.append(self.informationGain(S, a)) # Determine which attribute has the highest Information Gain index = np.argmax(information_gains) return A[int(index)] # Get the cardinality of a set def cardinality(self, S): return len(S) # Calculate the Information Gain def informationGain(self, S, x): values = {} summation = 0 for s in S: if s[x] not in values: values[s[x]] = 1 else: values[s[x]] += 1 for v in values: # Get the examples from S by the value v of the attribute x s_x = self.examplesByAttribute(S, x, v) summation += (values[v] / self.cardinality(S)) * self.entropy( s_x, method="cross-entropy") return self.entropy(S, method="cross-entropy") - summation # Get the examples from the set S with attribute x and value v def examplesByAttribute(self, S, x, v): s_x = [] for s in S: if s[x] == v: s_x.append(s) return s_x # Calculate the entropy def entropy(self, S, method): if method == "cross-entropy": return self.crossEntropy(S) if method == "gini-impurity": return self.giniImpurity(S) # Calculate the Cross-Entropy def crossEntropy(self, S): classes = {} for s in S: if s["Sport"] not in classes: classes[s["Sport"]] = 1 else: classes[s["Sport"]] += 1 E = 0 for c in classes: p_c = classes[c] / self.cardinality(S) E += p_c * log(p_c, 2) return -E # Calculate the Gini Impurity def giniImpurity(self, S): classes = {} for s in S: if s["Sport"] not in classes: classes[s["Sport"]] = 1 else: classes[s["Sport"]] += 1 GI = 0 for c in classes: p_c = classes[c] / self.cardinality(S) GI += p_c * p_c return 1 - GI # Get the values that an attribute x can assume in S def valuesByAttribute(self, S, x): values = [] for i in range(self.cardinality(S)): if S[i][x] not in values: values.append(S[i][x]) return values # Partition the set S by the value v that an attribute x can assume in S def partition(self, S, x, v): partitions = [] for i in range(self.cardinality(S)): if S[i][x] == v: partitions.append(S[i]) return partitions
def references_algorithm(start_msg): # type: (Message) -> t.List[Message] from anytree import Node, LoopError, PreOrderIter # find references # # first try message ids in the references header line # # if that fails use the first valid messageid in the in-reply-to header line as the only valid parent # # if the reply to doesn't work then there are no references references = start_msg.references or start_msg.in_reply_to[:1] # determine if a message is a reply or a forward # # A message is considered to be a reply or forward if the base # # subject extraction rules, applied to the original subject, # # remove any of the following: a subj-refwd, a "(fwd)" subj- # # trailer, or a subj-fwd-hdr and subj-fwd-trl # # see https://tools.ietf.org/html/rfc5256#section-2.1 for base subject extraction # # see https://tools.ietf.org/html/rfc5256#section-5 for def of abnf # PART 1 A from https://tools.ietf.org/html/rfc5256 REFERENCES # using the message ids in the messages references link corresponding messages # first is parent of second, second is parent of third, etc... # make sure there are no loops # if a message already has a parent don't change the existing link # if no message exists with the reference then create a dummy message # TODO not sure how to check valid message ids # nodes which don't have parents orphan_nodes = set() # type: t.Set[Node] current = None # Map of msg ids to Nodes node_map = {} # type: t.Dict[str, Node] for msg_id in references: node = node_map.get(msg_id, Node(msg_id)) node_map[msg_id] = node # if we are in a child and the child does not already have a parent # try to add the node if current is not None and node.parent is None: try: node.parent = current except LoopError: current = None # otherwise the node is a new orphan if current is None: current = node orphan_nodes.append(current) # nodes which are not in our database msg_map = { node_map[msg_id]: message_from_message_id(msg_id, start_msg._imap_account, start_msg.folder, start_msg._imap_client) for msg_id in references } # t.Dict[Node, t.Optional[Message]] dummy_nodes = {node for node, msg in msg_map.iteritems() if msg is None} # t.Set[Node] # PART 1 B # create a parent child link between the last reference and the current message. # if the current message already has a parent break the current parent child link unless this would create a loop node = node_map.get(start_msg._message_id, Node(start_msg._message_id)) # type: Node node_map[start_msg._message_id] = node try: node.parent = current except LoopError: pass # PART 2 # make any messages without parents children of a dummy root root = Node('root') # type: Node for orphan in orphan_nodes: orphan.parent = root # PART 3 # prune dummy messages from the tree # # If it is a dummy message with NO children, delete it. # # # # If it is a dummy message with children, delete it, but # # promote its children to the current level. In other # # words, splice them in with the dummy's siblings. # # # # Do not promote the children if doing so would make them # # children of the root, unless there is only one child. # # for node in list(PreOrderIter(root)): if node not in dummy_nodes: continue dummy_node = node # if there are no children if not dummy_node.children: dummy_node.parent = None # promote children but only promote at most one child to the root elif dummy_node.parent != root or len(dummy_node.children) == 1: for child in dummy_node.children: child.parent = dummy_node.parent # PART 4 # Sort the messages under the root (top-level siblings only) # by sent date as described in section 2.2. In the case of a # dummy message, sort its children by sent date and then use # the first child for the top-level sort. def sortkey(node): if node not in dummy_nodes: return msg_map[node].date node.children = sorted(node.children, key=sortkey) # assumes we have no dummies in the middle of the tree return min(msg_map[n].date for n in node.children) root.children = sorted(root.children, key=sortkey) assert isinstance(root.children, list)
def __init__(self, paragraph_list, symbol_width, symbol_height, WORD_EMBEDDINGS): """ Creates a tree structure that outlines the nested structure of the document Args: paragraph_list (list): list of paragraphs with { 'text':.. , 'bounding_box': ...} symbol_width (float): avg pixel width of symbol symbol_height (float): avg pixel height of symbol Attributes: root_node (Node): the root node of the tree structure annotation_list (list): the extra paragraphs that don't fit within the tree structure symbol_width (float): avg pixel width of symbol symbol_height (float): avg pixel height of symbol TODO: * Rotate image so that the text can be aligned before sending it to the vision api * Deal with differen columns on the same page * Deal with multiple pages and combining pages together """ self.root_node = Node('root') self.annotation_list = [] self.symbol_width = symbol_width self.symbol_height = symbol_height self.WORD_EMBEDDINGS = WORD_EMBEDDINGS # Removes paragraphs that does not contain letters or numbers paragraph_list = [ paragraph for paragraph in paragraph_list if re.search('\w', paragraph['text']) ] layer_num = 1 parent_nodes = [self.root_node] prev_layer_list = [] prev_top_left_x_val = 0 # loops through layers until there are no more while paragraph_list: top_left_idx = Document.find_top_left(paragraph_list, prev_top_left_x_val) top_left_x_val = paragraph_list[top_left_idx]['bounding_box'][ 'top_left']['x'] if top_left_idx is not None else 0 # If next top left value is extremely far away from the previous top left value, # break loop and set remaining values as annotations if top_left_idx is None or (prev_top_left_x_val != 0 and top_left_x_val > prev_top_left_x_val + (20 * self.symbol_width)): for paragraph in paragraph_list: sentences = Sentence.get_sentences_from_paragraph( paragraph['word_list'], paragraph['entity_list'], paragraph['syntax_list']) self.annotation_list.append({ 'sentences': sentences, 'paragraph': paragraph, 'text': paragraph['text'] }) break # Add child nodes to the previous layer if parent_nodes != []: layer_list = self.find_nodes_in_same_layer( paragraph_list, top_left_x_val) parent_node_idx_list = self.determine_parent_node( layer_list, prev_layer_list) new_parent_nodes = [] for i, paragraph in enumerate(layer_list): sentences = Sentence.get_sentences_from_paragraph( paragraph['word_list'], paragraph['entity_list'], paragraph['syntax_list']) child_node = Node( "layer: %s, child_num: %s" % (layer_num, i), parent=parent_nodes[parent_node_idx_list[i]], sentences=sentences, paragraph=paragraph, text=paragraph['text']) new_parent_nodes.append(child_node) # Update parent nodes list: parent_nodes = new_parent_nodes prev_layer_list = layer_list prev_top_left_x_val = top_left_x_val layer_num += 1 else: for paragraph in paragraph_list: sentences = Sentence.get_sentences_from_paragraph( paragraph['word_list'], paragraph['entity_list'], paragraph['syntax_list']) self.annotation_list.append({ 'sentences': sentences, 'paragraph': paragraph, 'text': paragraph['text'] }) break
def __init__(self, urdf_object, progressbar=None): """ Description ----------- Robot Constructor. You can construct a robot from an URDF Object. Parameters ---------- urdf_object : URDF.URDF URDF Object from the URDF library progressbar : PyQt5.QtWidgets.QProgressBar or None, optional default is None Progressbar to update during the robot creation (used in GUI) If it is None, no progressbar is updated Examples -------- Examples -------- You can create a robot from an URDF file using the parser : >>> from URDF import URDF >>> urdf_obj = URDF("./Examples/example_0.urdf") >>> robot_obj = RobotURDF(urdf_obj) """ # 1 - Robot Name ..................................................... if 'name' in urdf_object.robot[0].keys(): self.name = urdf_object.robot[0]['name'] else: self.name = "no_name" # 2 - Robot Links .................................................... self.links = [] for i in range(urdf_object.nlinks()): self.links.append(LinkURDF(urdf_object, i)) # 3 - Robot Joints ................................................... self.joints = [] for i in range(urdf_object.njoints()): if progressbar is not None: progressbar.setProperty("value", 100 * (i + 1) / urdf_object.njoints()) self.joints.append(JointURDF(urdf_object, i)) # 4 - Tree Representation ............................................ # Creating a Node per Link . . . . . . . . . . . . . . . . . . . . . . all_link_nodes = [] for i, _ in enumerate(self.links): all_link_nodes.append(Node('link_' + str(i))) # Creating a Node per Joint . . . . . . . . . . . . . . . . . . . . . all_joint_nodes = [] for i, joint in enumerate(self.joints): all_joint_nodes.append( Node('joint_' + str(i), parent=all_link_nodes[joint.parent])) # Setting parents for Link Nodes . . . . . . . . . . . . . . . . . . . root_link_id = 0 for i, _ in enumerate(all_link_nodes): if self.links[i].is_root: root_link_id = i continue all_link_nodes[i].parent = ( all_joint_nodes[self.links[i].child_joints[0]]) # Setting Global Tree self.tree = RenderTree(all_link_nodes[root_link_id]) self.mass = 0 for link in self.links: self.mass += link.mass super().__init__()
def p_inicializacao_variaveis(p): ''' inicializacao_variaveis : atribuicao ''' p[0] = Node('inicializacao_variaveis', value = 'inicializacao_variaveis', children = [p[1]])
from anytree import Node, RenderTree udo = Node("Udo") marc = Node("Marc", parent=udo) lian = Node("Lian", parent=marc) dan = Node("Dan", parent=udo) jet = Node("Jet", parent=dan) jan = Node("Jan", parent=dan) joe = Node("Joe", parent=dan) for pre, fill, node in RenderTree(udo): print("%s%s" % (pre, node.name))
def setTrees(MyTreeRoot, cluster_tree, t_range, t_val): for i in range(t_range): Node(t_val, parent=MyTreeRoot) Node(t_val, parent=cluster_tree) t_val = (t_val * 10 + 1) / 10
def assemble_import_tree(path: str) -> Node: ''' Assemble a bookmark tree structure from `Bookmarks` file to be able to either display or correctly import/merge the structure into internal bookmarks database. ''' with open(path, 'rb') as fbookmark: raw = json.loads(fbookmark.read().decode('utf-8')) trees = [] if 'bookmark_bar' in raw['roots']: folder_items = OperaImporter.walk_folders( raw['roots']['bookmark_bar'], 0) trees.append( assemble_folder_tree(items=folder_items, key='parent_folder_id', node_type=Folder)) if 'custom_root' in raw['roots']: raw_custom_sorted = sorted(raw['roots']['custom_root'].items(), key=lambda item: item[0]) for _, value in raw_custom_sorted: folder_items = OperaImporter.walk_folders(value, 0) trees.append( assemble_folder_tree(items=folder_items, key='parent_folder_id', node_type=Folder)) if 'other' in raw['roots']: folder_items = OperaImporter.walk_folders(raw['roots']['other'], 0) trees.append( assemble_folder_tree(items=folder_items, key='parent_folder_id', node_type=Folder)) if 'synced' in raw['roots']: folder_items = OperaImporter.walk_folders(raw['roots']['synced'], 0) trees.append( assemble_folder_tree(items=folder_items, key='parent_folder_id', node_type=Folder)) # printable folder tree folder_tree = Node(name=0, node_type=Folder, id=0, folder_name='<no title>', parent_folder_id=None, item={}) for tree in trees: tree.parent = folder_tree tree.parent_folder_id = folder_tree.id # pylint: disable=no-member bookmarks = [] for folder in traverse(folder_tree): bookmarks += OperaImporter.walk_bookmarks(folder.item, folder.id) delattr(folder, 'item') # printable folder+bookmark tree bookmark_tree = assemble_bookmark_tree(items=bookmarks, key='folder_id', folder_tree_root=folder_tree, node_type=Bookmark) return bookmark_tree
#these node need to be declared as var, so i tried to stack them in the list to not have to name them listeNode = [] #create a list and filling it witht he data needed for Nodes (word, position, position of its parent) listeDependency = [] for i in range(len(annotatedText["tokens"])): annotatedWord = annotatedText["tokens"][i] Dependency = (annotatedWord["dependencyEdge"]["headTokenIndex"]) Text = (annotatedWord["text"]["content"]) listeDependency.append((Text, i, Dependency)) #trouver le root for i in range(len(listeDependency)): if listeDependency[i][1] == listeDependency[i][ 2]: #if a word is its own parent, it's the root text = listeDependency[i][0] indexRoot = listeDependency[i][2] root = Node(text) #Node of the root (one arg only, no parent) listeNode.append(root) break print(listeDependency) # WiP : i should do each node, just some scratch right now #for i in range(len(listeDependency)): # if listeDependency[i][2] : # listeNode.append(Node(listeDependency[i][0],parent=listeNode[0])) #how you're suppoed to create a Node : chat = Node("chat", parent=root) #you print the Tree for pre, fill, node in RenderTree(root): print("%s%s" % (pre, node.name))
def condFPtree(CPBlist, transRecord, minSupp): CFPTlist = [] print('\n################### Conditional FP Tree ###################') for eachRow in CPBlist: print('\n------------------- ' + eachRow[0] + ' -------------------') nodes = [] nodePath = '' pathList = [] count = 0 root = Node('Null', count=None) for eachPath in eachRow[1]: node = root if eachPath[0] != 'null': pathCount = eachPath[1] nodes = eachPath[0].split(',') for eachNode in nodes: if node.is_leaf: child_node = Node(eachNode, parent=node, count=pathCount) node = child_node else: foundFlag = False for child in node.children: if child.name == eachNode: child.count += pathCount node = child foundFlag = True if foundFlag == False: child_node = Node(eachNode, parent=node, count=pathCount) node = child_node showTree(root) print() for eachNode in root.descendants: if eachNode.is_leaf: if eachNode.name != 'Null': check = False while check == False: if eachNode.count >= int(minSupp): tempList = [] parentList = [] try: nodePath = re.search( '(.+?/Null/' + eachNode.name + ')\', count', str(eachNode.path[-1])).group(1) except Exception as e: nodePath = re.search( '(.+?/Null/.+?' + eachNode.name + ')\', count', str(eachNode.path[-1])).group(1) nodePath = nodePath.replace('Node(\'/Null/', '').replace('/', ',') tempNodePath = re.search( '(.+?/' + eachNode.parent.name + ')', str(eachNode.path[-1])).group(1) tempNodePath = tempNodePath.replace( 'Node(\'/Null/', '').replace('/', ',') # if eachNode.parent.count != eachNode.count and eachNode.parent.count >= int(minSupp): # parentCheck = False # currentNode = eachNode # while parentCheck == False: # nodePathParent = re.search('(.+?/' + currentNode.parent.name + ')\', count', str(currentNode.parent.path[-1])).group(1) # nodePathParent = nodePath.replace('Node(\'/Null/', '').replace('/', ',') # tempList.append(nodePathParent) # tempList.append(currentNode.parent.count) # if currentNode.parent.parent.count != currentNode.count and currentNode.parent.parent.count >= int(minSupp) and currentNode.parent.parent.name != 'Null': # currentNode = currentNode.parent # else: # parentCheck = True # tempList.append(nodePath) # tempList.append(eachNode.count) # pathList.append(tempList) parentList = tempNodePath.split(',') parentCheck = False for element in parentList: for tempNode in root.descendants: if element == tempNode.name: if tempNode.count >= int( minSupp ) and tempNode.count != eachNode.count: parentCheck = True tempList.append(tempNode.name) tempList.append(tempNode.count) if parentCheck == True: tempList.append(eachNode.name) else: tempList.append(nodePath) tempList.append(eachNode.count) pathList.append(tempList) check = True else: if eachNode.parent.name == 'Null': check = True else: eachNode = eachNode.parent for everyRow in pathList: if len(everyRow) == 2: print(everyRow[0] + ': ' + str(everyRow[1])) else: count = 1 for each in everyRow: if count % 2 != 0: print(each, end=': ') else: if count == len(everyRow): print(each, end='') else: print(each, end=', ') count += 1 print() CFPTlist.append([eachRow[0], pathList]) freqPattern(CFPTlist, transRecord)
def create_ontology_graph(): # Construct ISA trees from triples graph = rdflib.Graph() graph.parse(os.path.join(ontology_dir, 'inferred_vrd')) ontology_labels_nodes = {} ontology_labels_equivalent_tmp = Set([]) ontology_labels_equivalent = Set([]) for s, p, o in graph.triples((None, URIRef("http://www.w3.org/2002/07/owl#equivalentProperty"), None)): # print s, " -> ", p, " -> ", o if "http://" in s and "http://" in o: subj_label = str(s.split("#")[1]) obj_label = str(o.split("#")[1]) ontology_labels_equivalent.add(subj_label) ontology_labels_equivalent.add(obj_label) if ontology_labels_nodes: new_node = True for node_label in ontology_labels_nodes.keys(): if subj_label in node_label.split(","): ontology_labels_equivalent_tmp.remove(node_label) ontology_labels_nodes[node_label].name = ontology_labels_nodes[node_label].name + "," + obj_label ontology_labels_equivalent_tmp.add(ontology_labels_nodes[node_label].name) ontology_labels_nodes[ontology_labels_nodes[node_label].name] = ontology_labels_nodes[ node_label] del ontology_labels_nodes[node_label] new_node = False elif obj_label in node_label.split(","): ontology_labels_equivalent_tmp.remove(node_label) ontology_labels_nodes[node_label].name = ontology_labels_nodes[node_label].name + "," + subj_label ontology_labels_equivalent_tmp.add(ontology_labels_nodes[node_label].name) ontology_labels_nodes[ontology_labels_nodes[node_label].name] = ontology_labels_nodes[node_label] del ontology_labels_nodes[node_label] new_node = False if new_node: ontology_labels_nodes[subj_label + "," + obj_label] = Node(subj_label + "," + obj_label) ontology_labels_equivalent_tmp.add(subj_label + "," + obj_label) else: ontology_labels_nodes[subj_label + "," + obj_label] = Node(subj_label + "," + obj_label) ontology_labels_equivalent_tmp.add(subj_label + "," + obj_label) for s, p, o in graph.triples((None, URIRef("http://www.w3.org/2000/01/rdf-schema#subPropertyOf"), None)): #print s, " -> ", p, " -> ", o if "http://" in s and "http://" in o: subj_label = str(s.split("#")[1]) obj_label = str(o.split("#")[1]) subj_node_name = "" obj_node_name = "" for node_label in ontology_labels_equivalent_tmp: if subj_label in node_label.split(","): subj_node_name = node_label continue if obj_label in node_label.split(","): obj_node_name = node_label continue if subj_node_name and obj_node_name: ontology_labels_nodes[subj_node_name].parent = ontology_labels_nodes[obj_node_name] if subj_label not in ontology_labels_equivalent and obj_label not in ontology_labels_equivalent: if subj_label not in ontology_labels_nodes: ontology_labels_nodes[subj_label] = Node(subj_label) if obj_label not in ontology_labels_nodes: ontology_labels_nodes[obj_label] = Node(obj_label) ontology_labels_nodes[subj_label].parent = ontology_labels_nodes[obj_label] if subj_label in ontology_labels_equivalent and obj_label not in ontology_labels_equivalent: if obj_label not in ontology_labels_nodes: ontology_labels_nodes[obj_label] = Node(obj_label) # retrieve subj node for node_label in ontology_labels_nodes.keys(): if subj_label in node_label.split(","): ontology_labels_nodes[node_label].parent = ontology_labels_nodes[obj_label] if subj_label not in ontology_labels_equivalent and obj_label in ontology_labels_equivalent: if subj_label not in ontology_labels_nodes: ontology_labels_nodes[subj_label] = Node(subj_label) # retrieve obj node for node_label in ontology_labels_nodes.keys(): if obj_label in node_label.split(","): ontology_labels_nodes[subj_label].parent = ontology_labels_nodes[node_label] tree_list = [] for node_label in ontology_labels_nodes: if ontology_labels_nodes[node_label].is_root: tree_list.append(ontology_labels_nodes[node_label]) return tree_list, ontology_labels_equivalent_tmp
def __init__(self,formula): formula=formula.replace(" ","") formula=self.clean_mess_in_formula(formula) self.node_list= [Node(formula)] self.generate_tree(formula, self.node_list[0])