def insert_after(self, current_node_item, new_node_item): tmp = self._first while tmp: if tmp.val == current_node_item: old_next_node = tmp.next_node new_node = Node(new_node_item) tmp.next_node = new_node new_node.next_node = old_next_node self._size += 1 break tmp = tmp.next_node
def idg(filename): dg = DependencyGraph() with open(filename) as fp: for line in fp: trimmed = line.strip() if len(trimmed) == 0: yield dg dg = DependencyGraph() else: dg.addNode(Node.byline(trimmed)) if dg.length() > 0: yield dg
def test_no_distance(self): n = Node("test") self.assertEqual(n.distance(), 0)
secondnode.next=firstnode firstnode.next=nextsecondnode else: previousfirstnode.next=secondnode nextsecondnode=secondnode.next secondnode.next=firstnode.next previoussecondnode.next=firstnode firstnode.next=nextsecondnode firstnode=Node(4) secondnode=Node(2) thirdnode=Node(6) fourthnode=Node(5) fifthnode=Node(9) sixthnode=Node(3) link=Linklist() link.insert(firstnode) link.insert(secondnode) link.insert(thirdnode) link.insert(fourthnode) link.insert(fifthnode) link.insert(sixthnode) swapnode(link,6,3)
def __init__(self, title, parent, token): Node.__init__(self, title, parent) self.token = token self.series_nodes = {}
def parse_graph(self, file_path, model_name, category, sub_category): """Method to parse file and Create a corresponding Graph object. Reads a tflite file into a tflite/Model Object and then extracts operators, tensors, graph structure and metadata and stores it into a Graph, Node and Edge objects. Nodes are operations and edges are tensors. Args: file_path (str): Path of the file to parse model_name (str): Unique model name of the model being parsed. category (str): Problem category of the model. sub_category (str) : Problem sub category of the model. Returns: The Graph object created for the file. """ model = self.parse(file_path) nodes = list() edges = list() adj_list = dict() start_node_indices = list() # Global list of opcodes in the model, referenced by Operators opcodes = list() for opcode_index in range(model.OperatorCodesLength()): opcodes.append(model.OperatorCodes(opcode_index)) # Only considering the main model subgraph = model.Subgraphs(0) # Dictionary to store origin and destination nodes for each edge to_nodes = dict() from_nodes = dict() for tensor_index in range(subgraph.TensorsLength()): tensor = subgraph.Tensors(tensor_index) # Converting tensor to an Edge object new_edge = self._TENSOR_TO_EDGE.convert(tensor) edges.append(new_edge) # Populating to_nodes, from_nodes # Add proxy nodes for Input and Output of the model for input_index in range(subgraph.InputsLength()): new_node = Node.Node(label="Input_Placeholder", operator_type="Input_Placeholder") nodes.append(new_node) node_index = len(nodes) - 1 start_node_indices.append(node_index) edge_index = subgraph.Inputs(input_index) if edge_index not in from_nodes: from_nodes.update({edge_index: []}) from_nodes[edge_index].append(node_index) for operator_index in range(subgraph.OperatorsLength()): operator = subgraph.Operators(operator_index) builtin_opcode = opcodes[operator.OpcodeIndex()].BuiltinCode() opname = self._builtin_optype[builtin_opcode] new_node = self._OP_TO_NODE.convert(operator, opname) # Condition to extract Conv 2D filter sizes and # input and output channels as it is contained in tensors # and not in operators if new_node.label == "CONV_2D": weight_tensor = subgraph.Tensors(operator.Inputs(1)) new_node.filter_height = weight_tensor.Shape(1) new_node.filter_width = weight_tensor.Shape(2) nodes.append(new_node) node_index = len(nodes) - 1 for input_index in range(operator.InputsLength()): edge_index = operator.Inputs(input_index) if edge_index not in to_nodes: to_nodes.update({edge_index: list()}) to_nodes[edge_index].append(node_index) for output_index in range(operator.OutputsLength()): edge_index = operator.Outputs(output_index) if edge_index not in from_nodes: from_nodes.update({edge_index: list()}) from_nodes[edge_index].append(node_index) for output_index in range(subgraph.OutputsLength()): new_node = Node.Node(label="Output_Placeholder", operator_type="Output_Placeholder") nodes.append(new_node) node_index = len(nodes) - 1 edge_index = subgraph.Outputs(output_index) if edge_index not in to_nodes: to_nodes.update({edge_index: []}) to_nodes[edge_index].append(node_index) # Constructing adjacency List from to_nodes, from_nodes for edge_index in range(len(edges)): if edge_index not in from_nodes or edge_index not in to_nodes: continue for node1_index in from_nodes[edge_index]: for node2_index in to_nodes[edge_index]: if node1_index not in adj_list: adj_list.update({node1_index: list()}) adj_list[node1_index].append([edge_index, node2_index]) graph = Graph.Graph(nodes, start_node_indices, edges, adj_list, model_name, category, sub_category) # Removing nodes which are not reachable from input graph.process_nodes() graph.source = "TFLite" return graph
from common import Node, parse, run cups = [Node(i) for i in parse()] starting = cups[0] for i, cup in enumerate(cups): cup.next = cups[i + 1] if i != len(cups) - 1 else cups[0] cups = sorted(cups, key=lambda cup: cup.value) run(cups, starting, 100) current = cups[0].next order = [] while current.value != 1: order.append(current.value) current = current.next print(''.join(str(i) for i in order))
def __init__(self, parent): Node.__init__(self, "SBS", parent)
def __init__(self, title, parent, url): Node.__init__(self, title, parent) self.url = url self.series_map = {}
def insert(self, value): if (self.root == None): self.root = Node(value) self.cts[self.root] = 1 else: self.__findleaf__(value, self.root)
def __init__(self, title, parent, url): Node.__init__(self, title, parent) self.video_id = url.split("/")[-1] self.can_download = True
def __init__(self, parent): Node.__init__(self, "Yahoo Plus7 (broken!)", parent)
def __init__(self, title, parent, url): Node.__init__(self, title, parent) self.url = url self.can_download = True
size = len(q) prev = None for _ in range(size): tmp = q.pop(0) if prev is not None: prev.next = tmp if tmp.left: q.append(tmp.left) if tmp.right: q.append(tmp.right) prev = tmp return root if __name__ == "__main__": root = Node(1) root.left = Node(2) root.right = Node(3) root.left.left = Node(4) root.left.right = Node(5) root.right.left = Node(6) root.right.right = Node(7) res = Solution().connect(root) while res: curr = res while curr: print(curr.val) curr = curr.next res = res.left
def __init__(self, title, parent, url): Node.__init__(self, title, parent) self.url = url self.unique_series = set()
def parse_models(self, parse_stateful = False): """Method to query and read data from database. Method to query database and read models into Graph objects. Args: parse_stateful (bool) : Boolean to indicate whether graphs with stateful partitioned call should be parsed, these graphs do not contain a graph structure or tensors. Defaults to False. Returns: List of Graph objects corresponding to the graph objects the models in the spanner database have been parsed into. """ model_graphs = list() # Query to get all models from Models table with self.database.snapshot() as snapshot: qresult_models = snapshot.execute_sql( "SELECT model_name, category, sub_category, source, num_inputs" " FROM Models" ) for row in qresult_models: # Checking num_inputs for presence of graph structure if row[4] == 0 and not parse_stateful: continue # Extracting model attributes model_name = row[0] category = row[1] sub_category = row[2] source = row[3] nodes = list() edges = list() start_node_indices = list() adj_list = dict() # Querying Operators of model_name with self.database.snapshot() as snapshot: qresult_operators = snapshot.execute_sql( "SELECT * from Models JOIN Operators" " ON Models.model_name = Operators.model_name" " WHERE Models.model_name = '" + model_name + "'" " ORDER BY operator_id" ) # Dictionary to hold which field is in which index of query results field_to_index = dict() # Boolean to check if field_to_dict needs to be populated populate_dicts = True # Extracting Node attributes for row in qresult_operators: if populate_dicts: for index in range(len(qresult_operators.metadata.row_type.fields)): field_name = qresult_operators.metadata.row_type.fields[index].name field_to_index[field_name] = index populate_dicts = False new_node = Node.Node(None, None) for attr in vars(new_node).keys(): if attr in field_to_index: setattr(new_node, attr, row[field_to_index[attr]]) nodes.append(new_node) # populating start_node_indices using is_input field if row[field_to_index['is_input']]: start_node_indices.append(len(nodes) - 1) # Querying Tensors of model_name with self.database.snapshot() as snapshot: qresult_tensors = snapshot.execute_sql( "SELECT * from Models JOIN Tensors" " ON Models.model_name = Tensors.model_name" " WHERE Models.model_name = '" + model_name + "'" " ORDER BY tensor_id" ) # Dictionary to hold which field is in which index of query results field_to_index.clear() # Boolean to check if field_to_dict needs to be populated populate_dicts = True # Extracting Edge attributes for row in qresult_tensors: if populate_dicts: for index in range(len(qresult_tensors.metadata.row_type.fields)): field_name = qresult_tensors.metadata.row_type.fields[index].name field_to_index[field_name] = index populate_dicts = False new_edge = Edge.Edge(None, None) for attr in vars(new_edge).keys(): if attr in field_to_index: setattr(new_edge, attr, row[field_to_index[attr]]) edges.append(new_edge) to_operator_ids = row[field_to_index['to_operator_ids']] from_operator_ids = row[field_to_index['from_operator_ids']] edge_index = len(edges) - 1 for src_node_index in from_operator_ids: src_node_index -= 1 for dest_node_index in to_operator_ids: dest_node_index -= 1 if src_node_index not in adj_list: adj_list.update({src_node_index : []}) adj_list[src_node_index].append([edge_index, dest_node_index]) new_graph = Graph.Graph(nodes, start_node_indices, edges, adj_list, model_name, category, sub_category) new_graph.source = source model_graphs.append(new_graph) return model_graphs
def test_simple_distance(self): com = Node("COM") b = Node("B", com) c = Node("C", b) self.assertEqual(c.distance(), 2)
def __init__(self, title, parent, token, video_id): Node.__init__(self, title, parent) self.can_download = True self.token = token self.video_id = video_id
def __init__(self, title, parent, video_key): Node.__init__(self, title, parent) self.video_key = video_key self.filename = title + ".ts" self.can_download = True
def __init__(self, title, parent, url): Node.__init__(self, title, parent) self.url = url
from common import Node class Solution(object): def levelOrder(self, root): if not root: return [] nodes = [root] traversal = [] while nodes: level = [node.val for node in nodes] traversal.append(level) new = [] for node in nodes: new.extend(node.children) nodes = new return traversal children = [Node(1, []), Node(2, []), Node(3, [])] root = Node(0, children) sol = Solution() print(sol.levelOrder(root))
class LinkedList(object): """ 链表 """ head = None tail = None size = 0 cursor = Node() def __init__(self): pass def __str__(self): return f"<LinkedList-{id(self)}: element size={self.size}, head={self.head}, tail={self.tail}>" def data_init(self, element_list: List[object]): """ 链表元素初始化 :param element_list: :return: """ raise NotImplementedError("需要实现链表的元素初始化") def data_iter(self): """ 链表元素迭代 :return: """ pass def add_element(self, element, index=-1): """ 链表元素新增 :param element: 新增元素 :param index: 元素添加的位置,默认=-1: 表示在链表尾部添加元素 :return: """ raise NotImplementedError("需要实现链表的元素添加逻辑") def remove_element(self, element): """ 链表元素删除 :param element: 删除元素 :return: """ raise NotImplementedError("需要实现链表的元素删除逻辑") def get_element(self, element): """ 链表元素查询 :param element: 查询元素 :return: """ raise NotImplementedError("需要实现链表的元素查询逻辑") def has_loop(self): """ 判断链表是否有环 :return: """ pass def is_empty(self): """ 判断链表是否为空 :return: """ pass pass
def push(self, val): node = Node(val) old = self._first self._first = node self._first.next_node = old self._size += 1
from common import Node, parse, run cups = [Node(i) for i in parse()] starting = cups[0] rest = [Node(i) for i in range(10, 1000000 + 1)] for i, cup in enumerate(cups): cup.next = cups[i + 1] if i != len(cups) - 1 else rest[0] for i, cup in enumerate(rest): cup.next = rest[i + 1] if i != len(rest) - 1 else cups[0] cups = sorted(cups, key=lambda cup: cup.value) + rest run(cups, starting, 10000000) print(cups[0].next.value * cups[0].next.next.value)
def attr_level_score(self) -> Node: # {{{1 ret = Node("attribute", dict( NAME="markdown-level", VALUE=Text(self.n_level), )) return ret
def indexed_child_nodes(): for entry in node.metadata_entries: if (entry == 0 or entry - 1 >= len(node.child_nodes)): yield Node.empty() else: yield node.child_nodes[entry - 1]
def test_distance_distance(self): com = Node("COM") b = Node("B", com) c = Node("C", b) d = Node("D", c) e = Node("E", d) f = Node("F", e) g = Node("G", b) h = Node("H", g) i = Node("I", d) san = Node("SAN", i) j = Node("J", e) k = Node("K", j) l = Node("L", k) you = Node("YOU", k) self.assertEqual(you.distance_with(san), 4)
while execution != 0: currentnode = link.head previousnode = None iteration = execution while iteration != 0: if currentnode.data > currentnode.next.data: swap_nodes(link, previousnode, currentnode, currentnode.next) break else: previousnode = currentnode currentnode = currentnode.next iteration -= 1 execution -= 1 firstnode = Node(4) secondnode = Node(2) thirdnode = Node(6) fourthnode = Node(5) # fifthnode=Node(9) # sixthnode=Node(3) link = Linklist() link.insert(firstnode) link.insert(secondnode) link.insert(thirdnode) link.insert(fourthnode) # link.insert(fifthnode) # link.insert(sixthnode) sort_linklist(link)
def __init__(self, title, parent, query, expected_tv_show): Node.__init__(self, title, parent) self.title = title self.query = query self.expected_tv_show = expected_tv_show self.video_ids = set()
def __init__(self, title, parent, video_url): Node.__init__(self, title, parent) self.can_download = True self.video_url = video_url
def append_script(nod: Nod1) -> None: if self.f_disable_script: return cmds = cmn.compose_script(mode.t()) nod.attr_replace("script1", cmds)