def subtree_divide(root, trarray, extra_names): """This takes the root of a gene tree with labelled duplications and a list (trarray) which it fills with every node that is a direct child of a duplication node. """ # Every child of a duplication gets added to trarray, leading to overlap if # there are repeated duplications. if root.label == "D" and root.istip == False: Node1 = Node() Node2 = Node() array = [] name_array = [] # Get the outgroup taxa. get_first_before_dup(root, array) if len(array) != 0: for i in array: name_array.extend(i.lvsnms_uniq()) # Adding to trarray and extra_names. Node1 = copy.deepcopy(root.children[0]) Node2 = copy.deepcopy(root.children[1]) trarray.append(Node1) trarray.append(Node2) extra_names.append(name_array) extra_names.append(name_array) # Recursion to repeat these steps on every node in the tree. for child in root.children: subtree_divide(child, trarray, extra_names)
def get_known_devices(): # GET known devices from pwa print("[CONFIG] Awaiting known_devices") api = API() response = api.get() if response is None: print(f"[API] An error occurred, could not load known devices") elif response.status_code == 200: # clear known devices Node.knownDevices = {} # set known devices known_devices = json.loads(response.text) Node.known_devices_from_json(known_devices) else: print( f"[API] Response code {response.status_code}, could not load known devices") numberOfNodes = len(Node.knownDevices) # increase the time before reconnect by half of the base TBR for each 100 additional nodes configures timeBeforeReconnect = baseTimeBeforeReconnect + \ int((round(numberOfNodes, -2) / 100) * (baseTimeBeforeReconnect / 2)) print( f"[CONFIG] {numberOfNodes} device(s) were initialised, TBR set to {timeBeforeReconnect}")
def add_nodes(self, events, timexes): """Adds the events/instances and timexes to the nodes table. Also initializes the edges table now that all nodes are known.""" for timex in timexes: node = Node(timex=timex) self.nodes[node.id] = node for event in events: node = Node(event=event) self.nodes[node.id] = node for n1 in self.nodes.keys(): self.edges[n1] = {} for n2 in self.nodes.keys(): self.edges[n1][n2] = Edge(n1, n2, self)
def __init__(self, width, height, imageName, name): window.Window.__init__(self, width, height, name) self.showEdge = False self.width = width self.height = height self.voronoiImage = VoronoiImage.VoronoiImage(imageName) # Mannually add 4 nodes with triangulation edges far outside the sight to make it easy to make the delaunay and voronoi calculations. node1 = Node.Node(-9999999, -9999999) node2 = Node.Node(9999999, -9999999) node3 = Node.Node(0, 9999999) face1 = Face.Face(node1, node2, node3) halfEdge1 = HalfEdge.HalfEdge(node1, face1) halfEdge2 = HalfEdge.HalfEdge(node2, face1) halfEdge3 = HalfEdge.HalfEdge(node3, face1) # It is possible that there isn't a adjacent Edge. This is the case for the outer edges. halfEdge1.setNextEdge(halfEdge2) halfEdge2.setNextEdge(halfEdge3) halfEdge3.setNextEdge(halfEdge1) face1.setEdge(halfEdge1) nodes = [] nodes.append(node1) nodes.append(node2) nodes.append(node3) faces = [] faces.append(face1) halfEdges = [] halfEdges.append(halfEdge1) halfEdges.append(halfEdge2) halfEdges.append(halfEdge3) # We will select a sorta random edge that is not on the outside. self.theEdgeToShow = halfEdges[2] self.showFace = False self.showAllFaces = False self.showVoronoiFaces = False self.showNextEdge = False self.getAdjacentEdge = False self.flipEdge = False self.amountOfNodes = 200 self.graph = Graph.Graph(nodes, halfEdges, faces)
def parseNodes(filename): nodeList = {} with open(filename) as f: csv_reader = csv.reader(f) next(csv_reader) for line in csv_reader: node = int(line[0]) dia = line[1] itemList = parseNodeItems(line[2]) adList = parseAdjacency(line[3]) inspec = line[4] nodeList[node] = Node(node, dia, inspec, adList) f.close() return nodeList
def get_first_before_dup(root, array): # make sure this isn't the overall root if root.parent: # need the base of the duplication if root.parent.label == "D": get_first_before_dup(root.parent, array) else: nd = Node() if str(root.parent.children[0].get_newick_repr()) == str(root.get_newick_repr()): nd = copy.deepcopy(root.parent.children[1]) array.append(nd) else: nd = copy.deepcopy(root.parent.children[0]) array.append(nd) get_first_before_dup(root.parent, array)
def subtree_divide_at_base_of_dup(root, trarray, extra_names): if root.label == "D" and root.istip == False: Node1 = Node() # need to grab just the clade and make it a polytomy, the tip won't be # analyzed as part of a subtree otherwise array = [] name_array = [] # get the outgroup taxa get_first_before_dup(root, array) if len(array) != 0: for i in array: name_array.extend(i.lvsnms_uniq()) Node1 = copy.deepcopy(root) trarray.append(Node1) extra_names.append(name_array) for child in root.children: subtree_divide_at_base_of_dup(child, trarray, extra_names)
def add_nodes(self, sources, source_type): """Creates Nodes for each source and add them to the nodes table. Also initializes the edges table now that all nodes are known. A source is either an event or timex tag or simply an identifier.""" for source in sources: if source_type == 'IDENTIFIER': identifier = source text = '' elif source_type == TIMEX: identifier = source.attrs[TID] text = source.attrs[VALUE] elif source_type == EVENT: identifier = source.attrs[EIID] text = source.attrs[FORM] node = Node(source, identifier, source_type, text) self.nodes[node.id] = node for n1 in self.nodes.keys(): self.edges[n1] = {} for n2 in self.nodes.keys(): self.edges[n1][n2] = Edge(n1, n2, self)
def search(self): print "Search strategy: ", self.quingFunction nodes = Queue.Queue() initial_node = Node.Node(self.problem.initialState, 0, 0) nodes.put(initial_node) while True: # print ("RA7") if nodes.empty(): print "Failure" node = nodes.get() print(node) # print "It didn't Fail" if self.problem.goal_test(node.state): result = [] result.append(node) print "path from root" print self.actions(node) print "depth: ", node.depth return node # print "Adjust queue method" nodes = self.adjust_queue(nodes, node, self.quingFunction, self.heuristic)
from objects import Node, Tree from collections import deque from decorators import profile_me, delay_me import gc gc.disable() node1 = Node(1) node4 = Node(4) node3 = Node(3) node3.set_left(node1) node3.set_right(node4) node6 = Node(6) node9 = Node(9) node8 = Node(8) node8.set_left(node6) node8.set_right(node9) node5 = Node(5) node5.set_left(node3) node5.set_right(node8) @profile_me def bft_exec(root): bft(root) def bft(root): """Breadth first traversal of a binary tree""" clevel = 1 root.set_level(1)
def build(instr): """This takes in a tree as a string in newick format, then puts it in a data structure using Nodes that can be easily traversed. """ root = None name_array = [] index = 0 nextchar = instr[index] beginning = True keepgoing = True current_node = None counter = 0 # The first open bracket in a newick tree format denotes the root node. while keepgoing: if nextchar == "(" and beginning: root = Node() current_node = root beginning = False # In newick format, each new bracket denotes a new node which is # the child of the previous node. elif nextchar == "(" and not beginning: newnode = Node() current_node.add_child(newnode) current_node = newnode # Commas separate taxa in a clade (tips of the tree) so where we # see one we should move to a parent node to make the next tip. elif nextchar == ',': current_node = current_node.parent # Closing brackets close clades, so we need to move up a node to # open the next one. elif nextchar == ")": current_node = current_node.parent index += 1 # After closing brackets, there may be labels for that # node (e.g. bootstrap values are often found here). nextchar = instr[index] while True: if nextchar == ',' or nextchar == ')' \ or nextchar == ':' \ or nextchar == ';' \ or nextchar == '[': break name += nextchar index += 1 nextchar = instr[index] # This is to handle floats try: name = float(name) name = int(name) name = str(name) except ValueError: name = name current_node.label = name current_node.sup = name # We give each node a unique id from the counter, this # allows all internal nodes to be easily referenced. current_node.unique_id = str(counter) counter += 1 # When we've finished recording the label, we need to go # back one to be in the right place for the next step. index -= 1 # In newick format, a semicolon denotes the end of the tree. elif nextchar == ';': keepgoing = False break # In newick format, a colon (usually after a label) denotes a # branch length, which is information we want to keep. elif nextchar == ":": index += 1 nextchar = instr[index] while True: if nextchar == ',' or nextchar == ')' \ or nextchar == ':' \ or nextchar == ';' \ or nextchar == '[': break branch += nextchar index += 1 nextchar = instr[index] current_node.length = float(branch) # As before, we need to go back a place. index -= 1 # Whitespace means nothing in newick trees. elif nextchar == ' ': pass # Ortholog trees have locus labels preceded by '@'. This locus # label shouldn't be part of the taxon label as it refers to a # genomic location, not a taxon, but we will still need it. elif nextchar == '@': while True: if nextchar == ',' or nextchar == ')' \ or nextchar == ':' \ or nextchar == ';' \ or nextchar == '[': break name += nextchar index += 1 nextchar = instr[index] current_node.locus = name index -= 1 # If it's anything else, it's a taxon, so make an external node. else: newnode = Node() current_node.add_child(newnode) current_node = newnode current_node.istip = True while True: if nextchar == ',' or nextchar == ')' \ or nextchar == ':' \ or nextchar == ';' \ or nextchar == '[' \ or nextchar == '@': break name += nextchar index += 1 nextchar = instr[index] current_node.label = name current_node.sup = name name_array.append(name) index -= 1 # Each time, we move on to the next character in the newick. if index < len(instr) - 1: index += 1 nextchar = instr[index] # Re-initialise these each time. name = "" branch = "" locus = "" return root, name_array
import asyncio from objects import Node from time import sleep # None of what's in this file is really readable at this point - it should just show ten nodes being added, and the debugger should let you # inspect their successors/predecessors FINGER_TABLE_SIZE = 3 root = Node("ROOT", FINGER_TABLE_SIZE) root.cohere() node_table = [root] for i in range(10): node = Node("{i}".format(i=i), FINGER_TABLE_SIZE) node_table.append(node) node.join(root) print("Added node {i}".format(i=i)) for r in range(100): for x in node_table[::-1]: x.cohere() for x in node_table[::-1]: x.cohere() if i == 3: print("DEBUG WATCHDOG3") assert root.print_ring(i) from random import randint rvs = [] for i in range(10000):
def main_loop(self): clock.set_fps_limit(30) nodeSize = 5 timer = 0 while not self.has_exit: self.dispatch_events() self.clear() timer += 1 if timer == 20: nodeX = 737 nodeY = 702 nodeNew = Node.Node(nodeX, nodeY) self.graph.addNode(nodeNew) if timer == 50: nodeX = 190 nodeY = 210 nodeNew = Node.Node(nodeX, nodeY) self.graph.addNode(nodeNew) if self.showNextEdge: self.showNextEdge = False self.theEdgeToShow = self.theEdgeToShow.getNextEdge() if self.getAdjacentEdge: self.getAdjacentEdge = False if self.theEdgeToShow.getAdjacentEdge() is not None: self.theEdgeToShow = self.theEdgeToShow.getAdjacentEdge() if self.flipEdge: temp = self.graph.manuallyFlipEdge(self.theEdgeToShow) if temp != None: self.theEdgeToShow = temp self.flipEdge = False print("flipping edge :)") # White, so reset the colour glColor4f(1, 1, 1, 1) gl.glLineWidth(1) self.draw() gl.glEnable(gl.GL_BLEND) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) # Draw the nodes with how you can give it a colour if self.showAllFaces: for f in self.graph.getFaces(): colour = f.getColour() glColor4f(colour[0] / 256, colour[1] / 256, colour[2] / 256, colour[3]) n1 = f.getNode1() n2 = f.getNode2() n3 = f.getNode3() pyglet.graphics.draw(3, GL_POLYGON, ('v2f', [ n1.getX(), n1.getY(), n2.getX(), n2.getY(), n3.getX(), n3.getY() ])) if self.showVoronoiFaces: print("show Voronoi faces") glColor4f(1, 0, 0, 1.0) for n in self.graph.getNodes(): nodeX = n.getX() nodeY = n.getY() pyglet.graphics.draw(4, GL_QUADS, ('v2f', [ nodeX - nodeSize, nodeY - nodeSize, nodeX - nodeSize, nodeY + nodeSize, nodeX + nodeSize, nodeY + nodeSize, nodeX + nodeSize, nodeY - nodeSize ])) # draw the edges using the half edge data structure glColor4f(0, 1, 0, 1.0) for e in self.graph.getEdges(): adjacentEdge = e.getAdjacentEdge() # It is possible that there is no adjacent edge, this is the case for the outer edges, we don't need to draw them. if adjacentEdge != None: nodeFrom = e.getAdjacentEdge().getNode() nodeTo = e.getNode() pyglet.graphics.draw( 4, GL_LINES, ('v2f', (0, 0, 0, height, nodeFrom.getX(), nodeFrom.getY(), nodeTo.getX(), nodeTo.getY()))) if self.showEdge: # Some visual debugging, show the edge as thicker and blue and draw the face. gl.glLineWidth(5) glColor4f(0, 0, 1, 1.0) adjacentEdge = self.theEdgeToShow.getAdjacentEdge() if adjacentEdge != None: nodeFrom = self.theEdgeToShow.getAdjacentEdge().getNode() nodeTo = self.theEdgeToShow.getNode() # print("edge name is " + self.showEdge) pyglet.graphics.draw( 4, GL_LINES, ('v2f', (0, 0, 0, height, nodeFrom.getX(), nodeFrom.getY(), nodeTo.getX(), nodeTo.getY()))) if self.showFace: theFace = self.theEdgeToShow.getFace() n1 = theFace.getNode1() n2 = theFace.getNode2() n3 = theFace.getNode3() pyglet.graphics.draw(3, GL_POLYGON, ('v2f', [ n1.getX(), n1.getY(), n2.getX(), n2.getY(), n3.getX(), n3.getY() ])) # Draw the voronoi polygons (numberOfPoints, GL_POLYGON, ('v2f', [all x,y coordinates])) # pyglet.graphics.draw(8, GL_POLYGON, ('v2f', [300,300, 300,400, 400,500, 500,500, 600,400, 600,300, 500,200, 400,200])) glColor4f(0, 0, 0, 1.0) clock.tick() self.flip()
def on_mouse_release(self, x, y, button, modifiers): print("node added at x:", x, "y:", y) nodeNew = Node.Node(x, y) self.graph.addNode(nodeNew)
def parse(self, tokens, debug): main_program = Program.Program(['', '', '', '', '', '']) error_list = [] len_tokens = len(tokens) first_step = False if (len_tokens < 4): print("Not enough tokens to parse") else: if (tokens[0].symbol_type.name != 'class'): error_list.append("Parse error, missing class token") if (tokens[1].symbol_type.name != 'Program'): error_list.append("Parse error, missing Name of class token") if (tokens[2].symbol_type.name != '{'): error_list.append("Parse error, missing first { token") if (tokens[len_tokens - 1].symbol_type.name != '}'): error_list.append("Parse error, missing last closing } token") if (tokens[0].symbol_type.name == 'class' and tokens[1].symbol_type.name == 'Program' and tokens[2].symbol_type.name == '{' and tokens[len_tokens - 1].symbol_type.name == '}'): main_program.node_list[0] = Node.Node( tokens[0], tokens[0].symbol_type.name, []) main_program.node_list[1] = Node.Node( tokens[1], tokens[1].symbol_type.name, []) main_program.node_list[2] = Node.Node( tokens[2], tokens[2].symbol_type.name, []) main_program.node_list[5] = Node.Node( tokens[len_tokens - 1], tokens[len_tokens - 1].symbol_type.name, []) if (tokens[3].symbol_type.name != 'type' and tokens[3].symbol_type.name != 'void' and len_tokens >= 5): error_list.append("Parse error: Unexpected token " + tokens[3].symbol_type.name + " at line " + str(tokens[3].line)) else: counter_field_decl = 3 counter_last_index = 3 while ( (tokens[counter_field_decl].symbol_type.name == 'id' or tokens[counter_field_decl].symbol_type.name == ',' or tokens[counter_field_decl].symbol_type.name == ';' or tokens[counter_field_decl].symbol_type.name == 'type') and counter_field_decl < len_tokens - 1): if (tokens[counter_field_decl].symbol_type.name == ';' ): counter_last_index = counter_field_decl counter_field_decl += 1 field_decl_list = [] if (3 != counter_last_index): field_decl_list = tokens[3:counter_last_index + 1] field_decl_node_list = [] for token in field_decl_list: field_decl_node_list.append( Node.Node(token, token.symbol_type.name, [])) field_decl_node = Node.Node("<field_decl_list>", "field_decl_list", field_decl_node_list) main_program.node_list[3] = field_decl_node if (len(field_decl_node_list) == 0): method_decl_list = tokens[ counter_last_index:len_tokens - 1] else: method_decl_list = tokens[counter_last_index + 1:len_tokens - 1] method_decl_node_list = [] for token in method_decl_list: method_decl_node_list.append( Node.Node(token, token.symbol_type.name, [])) method_decl_node = Node.Node("<method_decl_list>", "method_decl_list", method_decl_node_list) main_program.node_list[4] = method_decl_node first_step = True if (first_step): dfa = ParseDFA.ParseDFA() # print("before parse", len(main_program.node_list[3].node_list)) dfa.parse_field(main_program, main_program.node_list[3], 'field_decl_list', debug, error_list) # print("after parse", len(main_program.node_list[3].node_list)) # print(main_program.node_list) # print("before parse", len(main_program.node_list[4].node_list)) # print("rip", main_program.node_list[4].node_list[0].type_node) dfa.parse_method(main_program, main_program.node_list[4], 'method_decl_list', debug, error_list) # print("after parse", len(main_program.node_list[4].node_list)) for method in main_program.getMethodDeclList(): method.node_list[5] = dfa.parse_block(main_program, method.node_list[5], debug, error_list) # DotExporter(program_ui).to_picture("AST.png") # print(main_program.symbol_table) # for field_decl in main_program.getFieldDeclList(): # print(field_decl.type_node) # print("---------------") # for method_decl in main_program.getMethodDeclList(): # print(method_decl.object_node) # for token in tokens: # print(token.symbol_type.name) #dfa.accepts(tokens) if (debug): print(error_list) #print(main_program.node_list) return main_program, error_list
def get_node_by_role(cls, instance, role_name): from objects import Node cluster_ids = db().query(models.Cluster.id).filter_by( release_id=instance.id ).subquery() return Node.get_nodes_by_role(cluster_ids, role_name).first()
async def event_handler(websocket, path): global current_clients, timeBeforeReconnect, threads # update active clients current_clients += 1 client = websocket.remote_address current_time = datetime.now().strftime("%Y.%m.%d - %H:%M:%S") print(f'\n[WSS] incoming connection: {client} @ {current_time }') print(f'[WSS] currently connected clients: {current_clients}') # receive the json containing all required information from the node msg_in = await websocket.recv() parsed = json.loads(msg_in) # show sensor data in console print(f"{client} > JSON data: ") print(json.dumps(parsed, indent=4, sort_keys=False)) # get/create node object node = Node.from_json(parsed) if node.isNew: http_new_device(node) node.sensor_data_from_json(parsed) print(f"[WSS] Node {node.chipId} has successfully transmitted its data") # print attributes of the node # node.print_attributes() # send time before reconnect with pseudorandom variation of tbrSpread to spread load msg_out = f"tbr:{timeBeforeReconnect + randint(0, tbrSpread)}" await websocket.send(msg_out) print(f"{client} < {msg_out}") # added to prove the server can handle multiple clients at once provided no blocking actions take place # see https://websockets.readthedocs.io/en/stable/faq.html # await asyncio.sleep(5) config = node.get_config() config_string = json.dumps(config) config_hash = hashlib.md5(config_string.encode()).hexdigest()[0:8] if config_hash != node.config_version: print(f"[WSS] New config available") dict_out = { "config-version": config_hash, "config": config } msg_out = f"config:{dict_out}" await websocket.send(msg_out) # check if update successful msg_in = await websocket.recv() print(f"[WSS] msg_in = {msg_in}") else: print(f"[WSS] Node config up to date!") # send exit message msg_out = f"bye" await websocket.send(msg_out) # print(f"{client} < {msg_out}") # update active clients current_clients -= 1 print('[WSS] Connection closed!') print(f'[WSS] Currently connected clients: {current_clients}')
def get_node_by_role(cls, instance, role_name): from objects import Node cluster_ids = db().query( models.Cluster.id).filter_by(release_id=instance.id).subquery() return Node.get_nodes_by_role(cluster_ids, role_name).first()
subtree_biparts, species_biparts, subtree_taxa, species_name_array, "some_log_name", 0, "r", "") # For the mapping, we want to divide the relationships into # conflicts and concordances. conflicts = [] concordances = [] for rel in rel_list: if rel.relation == 'conflict': conflicts.append(rel) elif rel.relation == 'concordant': concordances.append(rel) # Locate this subtree in the main tree. mrca = Node() g.find_mrca(pre_col_taxa, mrca) # Labelling the relevant nodes with conflicts and concordances. for x in concordances: mrca.label_node(x.ortholog_bipart, "*", cutoff) for x in conflicts: mrca.label_node(x.ortholog_bipart, "X", cutoff) # Writing out the subtrees if applicable. if outfile_subtrees: out_mrca = copy.deepcopy(tree) out_mrca.clr_label() make_trees.add_loci(out_mrca) if len(extra_names[count]) != 0: out_subs.write("((" + ",".join(extra_names[count]) +
def generic_visit(self, node: Node): """ Called if no explicit visitor function exists for a node. Implements preorder visiting of the node. """ for i, c in node.children(): self.visit(c)