def best_first_graph_search(problem, f=lambda node: node.depth): frontier = utils.PriorityQueue(min, f) explored = utils.PriorityQueue(min, f) frontier.append(Node(problem.initial)) while frontier: children = utils.PriorityQueue(min, f) node = frontier.pop() if problem.goal_test(node[0].state): return node[0] explored.append(node[0]) children.extend(node[0].expand(problem)) for x in children.A: existing = 0 for y in explored.A: if x[1] == y[1]: existing = 1 break if existing == 0: for z in frontier.A: if x[1] == z[1]: existing = 1 if x[0] < z[0]: frontier.A.__delitem__(frontier.A.index(z)) frontier.append(x[1]) break if existing == 0: frontier.append(x[1]) return Node('fail')
def partition_swap_node(start_pre_node: Node, start_node: Node, end_node: Node) -> Node: """ 将这段链表进行分区(交换节点的方式) 说明: 范围是左闭右开[start_node, end_node) :param start_pre_node: 开始节点的父节点 :param start_node: 开始节点 :param end_node: 结束节点 """ little_head_node, big_head_node = Node(0), Node(0) little_node, big_node = little_head_node, big_head_node current_node = start_node.next_node while current_node and current_node != end_node: if current_node < start_node: little_node.next_node = current_node little_node = current_node else: big_node.next_node = current_node big_node = current_node current_node = current_node.next_node little_node.next_node = start_node big_node.next_node = end_node start_node.next_node = big_head_node.next_node start_pre_node.next_node = little_head_node.next_node return start_node
def match_entity(sentence): result = [] for wh_phrase in wh_phrases: if sentence.startswith(wh_phrase): result.append(Node(wh_phrase, 'wh', [])) sentence = sentence[len(wh_phrase) + 1:] keywords = '' is_start = False start_index = -1 for end_index, w in se_keywords.iter(sentence): keywords = keywords + ' ' + w if not is_start: start_index = end_index - len(w) + 1 is_start = True if start_index != -1: word = sentence[start_index:len(sentence)] rule_candidates = sparql.full_text_search(word) if rule_candidates: result.append(Node(word, 'Rule', rule_candidates)) for i, w in entity_dic.iter(sentence): if not is_repeat(result, w.word): result.append(Node(w.word, 'Name', w.entity)) for i, w in class_dic.iter(sentence): if not is_repeat(result, w.word): result.append(Node(w.word, 'OntologyType', [w.entity])) return result
def initialize_matrix(x, y): mat = [] for i in range(len(y) + 1): mat.append([Node(i, False)]) val = 0 for i in range(len(x)): val += (1 if get_type(x[i]) == 'tag' else 0) mat[0].append(Node(val, True)) return mat
def selection_sort_swap_value(link: Link) -> Link: """ 选择排序 不交换节点,只交换值的方式 """ head_node = Node(0, link.head_node) start_node = head_node.next_node while start_node: swap_node = None current_node = start_node.next_node while current_node: if current_node < start_node: if not swap_node or swap_node > current_node: swap_node = current_node current_node = current_node.next_node if swap_node: swap_node.element, start_node.element = start_node.element, swap_node.element start_node = start_node.next_node link.head_node = head_node.next_node return link
def getNumLeafs(n): numLeafs = 0 if isinstance(n.children, list): newNode = Node() template = '\n'.join( [' '.join(child.logTemplate) for child in n.children]) template = "\n".join( textwrap.wrap(template, width=15, expand_tabs=True, replace_whitespace=False, break_long_words=True)) # for node in n.children[1:]: # template = getTemplate(template,node.logTemplate) # token = ' '.join(template) newNode.token = template n.children = {template: newNode} for c in n.children: if isinstance(c, list): numLeafs += 1 continue current = n.children.get(c) if current.children != {}: numLeafs += getNumLeafs(current) else: numLeafs += 1 return numLeafs
def build_query_graph(): global dependency_tree dependency_tree = nlp.dependency_parse(question) convert_tree() edges = [] # 2-2 find path if len(nodes) == 1: n = nodes[0] pre_attribute = ' '.join(tokens[:n.index - 1]) edges.append(Edge(pre_attribute, Node('', 'wh', []), n)) return edges for n in nodes: node_indexes.append(n.index) for i in range(len(node_indexes) - 1): n1 = node_indexes[i] for j in range(i + 1, len(node_indexes)): n2 = node_indexes[j] path = find_path(tree_node_dic[n1], tree_node_dic[n2]) if path: word = "" for k in range(1, len(path)): for value in tree_node_dic[path[k]].values: word = word + tokens[value - 1] + ' ' edges.append(Edge(word.strip(), index_node_dic[n1], index_node_dic[n2])) return edges
def optimize_insertion_sort(link: Link) -> Link: """ 优化插入排序 测试: 链表的长度为 1000 --- Total execution time: 235 ms 链表的长度为 10000 --- Total execution time: 23785 ms leetcode: link: https://leetcode-cn.com/problems/insertion-sort-list/ 执行用时: 220 ms, 在所有 Python3 提交中击败了 74.71% 的用户 内存消耗: 15.3 MB, 在所有 Python3 提交中击败了 12.50% 的用户 """ head_node = Node(0, link.head_node) current_node = link.head_node while current_node and current_node.next_node: if current_node <= current_node.next_node: current_node = current_node.next_node continue start_node = head_node while start_node.next_node < current_node.next_node: start_node = start_node.next_node temp_node = current_node.next_node current_node.next_node = temp_node.next_node temp_node.next_node = start_node.next_node start_node.next_node = temp_node link.head_node = head_node.next_node return link
def add_node(self, text: str, pos: tp.Tuple[int, int], colour: str = "green", multibox: bool = False) -> int: new_node = Node(text, pos, colour, multibox) new_id = id(new_node) self._nodes[new_id] = new_node return new_id
def combineLeaves(self, nodeList): res = [] ans = Node() template = nodeList[0].logTemplate for node in nodeList[1:]: template = getTemplate(template, node.logTemplate) ans.token = template res.append([ans]) return res
def get_available_nodes(self): listNodes = ast.literal_eval(config.node_list) nodes = [] for node in listNodes: nodeModel = Node(node['id'], node['name']) nodes.append(nodeModel) if log.isEnabledFor(logging.DEBUG): log.debug("get_available_nodes(): nodes - " + str(nodes)) return NodeCollection(nodes)
def edit_workflow(workflow_id): wf = Workflow.query.filter_by(id=workflow_id).first() if wf is None: abort(404) if wf.user_id != current_user.id: abort(403) if request.method == "GET": tag_str = " ".join([t.name for t in wf.tags]) return render_template("edit.html", workflow=wf, tag_str=tag_str) elif request.method == "POST": form = NewWorkflow() tags = [] for tag in form.tag.data.split(" "): tag_name = tag.strip() t = Tag.query.filter_by(name=tag_name).first() if t is None: t = Tag(tag_name) db.session.add(t) db.session.commit() tags.append(t) subdir = os.path.join("static", "workflows", str(wf.id)) if form.knwf.data.filename: filename = secure_filename(form.knwf.data.filename) form.knwf.data.save(os.path.join(subdir, filename)) zfile = ZipFile(form.knwf.data) node_list = [] for name in zfile.namelist(): if name.endswith("workflow.svg"): png_file = os.path.join(subdir, "workflow.png") s = zfile.read(name) svg2png(bytestring=s, write_to=png_file) tname = name.split("/")[1] node_name = tname.split("(")[0] if not node_name.startswith( "workflow") and not node_name.startswith("."): node_name = node_name.rstrip() node_list.append(node_name) nl = list(set(node_list)) nodes = [] for node_name in nl: n = Node.query.filter_by(name=node_name).first() if n is None: n = Node(node_name) db.session.add(n) db.session.commit() nodes.append(n) wf.workflow = filename wf.nodes = nodes wf.tags = tags wf.name = form.name.data wf.content = form.content.data wf.rendered_content = md.convert(wf.content) db.session.commit() return redirect(url_for("display_workflow", workflow_id=workflow_id))
def addSeq(self, wordList): # get root of trie current = self.root subWordList = wordList[:self.maxDepth] for w in subWordList: w = "\n".join(textwrap.wrap(w, width=10, expand_tabs=True, replace_whitespace=False, break_long_words=True)) # create a child, count + 1 tokenNode = current.children.get(w) if tokenNode == None: current.children[w] = Node() current = current.children[w] current.token = w template = " ".join(wordList) template = "\n".join(textwrap.wrap(template, width=25, expand_tabs=True, replace_whitespace=False, break_long_words=True)) current.children[template] = Node() current.children[template].token = template
def ping(): if request.method == 'POST': node_id = request.headers.get('node-id') node_floor = request.headers.get('node-floor') print 'Incoming ping from node ' + node_id incoming_node = Node(node_id, node_floor) success = mongodb.add_or_update(incoming_node) if success: return '200' return '500'
def extract_nodes(): # split words global tokens tokens = nlp.word_tokenize(question) nodes.extend(ner.match_entity(question)) # use pos to filter wh-words pos = nlp.pos_tag(question) for i in range(len(pos)): p = pos[i] if p[1] in wh_tags: if not is_repeat(p[0]): nodes.append(Node(p[0], 'wh', []))
def quicksort_main_swap_node(link: Link) -> Link: """ 快速排序(递归,交换节点) 测试: 链表的长度为 1000 --- Total execution time: 19 ms 链表的长度为 10000 --- Total execution time: 239 ms """ head_pre_node = Node(0, link.head_node) quicksort_swap_node(head_pre_node, link.head_node, None) link.head_node = head_pre_node.next_node return link
def new_workflow(): form = NewWorkflow() # Don't pass request.form if form.validate_on_submit(): tags = [] for tag in form.tag.data.split(" "): tag_name = tag.strip() t = Tag.query.filter_by(name=tag_name).first() if t is None: t = Tag(tag_name) db.session.add(t) db.session.commit() tags.append(t) wf = Workflow(name=form.name.data, user_id=current_user.id, content=form.content.data) db.session.add(wf) db.session.commit() filename = secure_filename(form.knwf.data.filename) subdir = os.path.join("static", "workflows", str(wf.id)) if not os.path.exists(subdir): os.mkdir(subdir) form.knwf.data.save(os.path.join(subdir, filename)) zfile = ZipFile(form.knwf.data) node_list = [] for name in zfile.namelist(): if name.endswith("workflow.svg"): png_file = os.path.join(subdir, "workflow.png") s = zfile.read(name) svg2png(bytestring=s, write_to=png_file) tname = name.split("/")[1] node_name = tname.split("(")[0] if not node_name.startswith( "workflow") and not node_name.startswith("."): node_name = node_name.rstrip() node_list.append(node_name) nl = list(set(node_list)) nodes = [] for node_name in nl: n = Node.query.filter_by(name=node_name).first() if n is None: n = Node(node_name) db.session.add(n) db.session.commit() nodes.append(n) wf.workflow = filename wf.tags = tags wf.nodes = nodes db.session.commit() return redirect(url_for("index")) flash_errors(form) return render_template("new.html", form=form)
def get(self): if self.cur_user and self.cur_user.flag==99: n_id = self.request.get('id') if n_id: n_obj = Node.get_by_id(int(n_id)) else: n_obj = Node() if n_obj: title = "修改分类" else: n_obj = Node() title = "添加分类" self.echo('addnode.html', { 'title': title, 'n_obj': n_obj, 'newest_node': Node.get_newest(), }, layout='_layout.html') else: self.error(403) self.write('403:forbidden')
def start(self, tag, attrib): """parser enters tag""" from model import Node if tag == "node": # Start node writing self._current_node = Node(float(attrib["lat"]), float(attrib["lon"]), id=int(attrib["id"])) elif tag == "tag" and self._current_node: # Add tag to node self._current_node.add_tag(attrib["k"], attrib["v"])
def create_decision_tree(table): table_entropy_info = Function.calculate_info(table) max_gen_ratio_index = 0 max_gen_ratio_value = 0 for attribute_index in range(table.get_attribute_size()): gen_ratio = Function.calculate_attribute_gain_ration( attribute_index, table, table_entropy_info) if gen_ratio > max_gen_ratio_value: max_gen_ratio_value = gen_ratio max_gen_ratio_index = attribute_index if max_gen_ratio_value > 0: attribute_values = table.calculate_parameter_usage( max_gen_ratio_index) node = Node("a" + str(max_gen_ratio_index + 1)) for attribute in attribute_values: divide_table = table.divide_decision_table( max_gen_ratio_index, attribute) node.child[attribute] = Function.create_decision_tree( divide_table) return node else: return Node(table.get_decision_class())
def selection_sort(link: Link) -> Link: """ 选择排序 说明: 1. 交换任意两个节点,需要用定位到它们的父节点 2. 要分相邻和不相邻两种情况处理 测试: 链表的长度为 1000 --- Total execution time: 656 ms 链表的长度为 10000 --- Total execution time: 67671 ms """ head_node = Node(0, link.head_node) start_node = head_node while start_node.next_node: parent_node = None current_node = start_node.next_node while current_node.next_node: if current_node.next_node < start_node.next_node: if not parent_node or parent_node.next_node > current_node.next_node: parent_node = current_node current_node = current_node.next_node if parent_node: if parent_node.next_node == start_node.next_node.next_node: # 相邻 swap_node = parent_node.next_node start_node.next_node.next_node = swap_node.next_node swap_node.next_node = start_node.next_node start_node.next_node = swap_node else: # 不相邻 swap_node_a = start_node.next_node swap_node_b = parent_node.next_node swap_node_b.next_node, swap_node_a.next_node = swap_node_a.next_node, swap_node_b.next_node start_node.next_node = swap_node_b parent_node.next_node = swap_node_a start_node = start_node.next_node link.head_node = head_node.next_node return link
def getNumLeafs(n): numLeafs = 0 if isinstance(n.children, list): newNode = Node() template = n.children[0].logTemplate for node in n.children[1:]: template = getTemplate(template,node.logTemplate) token = ' '.join(template) newNode.token = token n.children = {token:newNode} for c in n.children: if isinstance(c,list): numLeafs += 1 continue current = n.children.get(c) if current.children != {}: numLeafs += getNumLeafs(current) else: numLeafs += 1 return numLeafs
def post(self): if self.cur_user and self.cur_user.flag == 99: n_id = self.request.get('id') name = self.POST['name'] imgurl = self.POST['imgurl'] about = self.POST['about'] if name: if n_id: n_obj = Node.get_by_id(int(n_id)) else: n_obj = None if n_obj: n_obj.name = name n_obj.imgurl = imgurl n_obj.about = about n_obj.put() else: #get node id nid_obj = Counter.get_or_insert('node_auto_increment', name='node_auto_increment', value=1) n_obj = Node(key=db.Key.from_path('Node', nid_obj.value)) n_obj.name = name n_obj.imgurl = imgurl n_obj.about = about n_obj.put() if n_obj.is_saved(): n_id = nid_obj.value nid_obj.value += 1 db.run_in_transaction(obj_runput, nid_obj, ['newest_add_node']) self.redirect('/add-node?id=%s' % str(n_id)) else: self.error(403) self.write('403:forbidden')
def generator_vehicle_data(): count = {} all_vehicle = {} t = 0 with open('C:/Users/26271/map/vehroutes11.xml') as f: text = f.read() info = re.findall(r'<timestep time="(.*?)">(.*?)</timestep>', text, re.S) # print(info) for (timeslot, v_info) in info: # print(v_info) print("timeslot", timeslot) timeslot = float(timeslot) all_vehicle[t] = [] v_info = v_info.strip() vinfo = re.findall( r'<vehicle id="(.*?)".*?speed="(.*?)".*?x="(.*?)".*?y="(.*?)"/>', v_info) for v1 in vinfo: # print(v1[2], type(v1[2]), len(v1[2])) id = v1[0] x = float(v1[2]) speed = float(v1[1]) y = float(v1[3]) grid_id = get_region_id(x, y) if speed != 0.0: if id not in count.keys(): count[id] = 0 count[id] += 1 node = Node(id, x, y, grid_id, speed) all_vehicle[t].append(node) t += 1 if t >= T: break print(t) store_data(all_vehicle, filename_data) return all_vehicle
def merge_sort_no_recursion(link: Link) -> Link: """归并排序(非递归) 说明: 将所有节点,两两归并,再四个四个归并,以此类推 测试: 链表的长度为 1000 --- Total execution time: 25 ms 链表的长度为 10000 --- Total execution time: 595 ms Arguments: link {Link} -- 链表 Returns: Link -- 链表 """ head_node = Node(0, link.head_node) length = 0 current = head_node.next_node while current: length += 1 current = current.next_node end = math.sqrt(length) end += 1 if not end.is_integer() else 0 for i in range(0, int(end)): new_head = head_node current = head_node.next_node while current: right = current left = cut_link(current, pow(2, i)) current = cut_link(left, pow(2, i)) new_head.next_node = merge_link(right, left) while new_head.next_node: new_head = new_head.next_node link.head_node = head_node.next_node return link
def __init__(self, root=None, maxDepth=4): if root == None: self.root = Node() else: self.root = root self.maxDepth = maxDepth
def _generate_new_node(mat, i, j, x, y): old_diag_node = mat[i - 1][j - 1] old_left_node = mat[i][j - 1] old_up_node = mat[i - 1][j] old_diag_score = old_diag_node.value old_right_score = old_left_node.value old_down_score = mat[i - 1][j].value xtype = get_type(x[j - 1]) ytype = get_type(y[i - 1]) # Calculate new_diag_score if x[j - 1] == y[i - 1]: if (xtype == TAG) or (old_diag_node.is_from_diag_tag): new_diag_score = old_diag_score else: new_diag_score = old_diag_score + 1 elif (xtype == TAG and ytype == TAG): new_diag_score = old_diag_score + 1 else: # This is an illegal move because you can only 'change' tag new_diag_score = sys.maxsize # Calcualte new_down_score new_down_score = old_down_score + 1 # Calculate new_right_score if (_is_last_elem_in_tag_deletion(old_left_node, x, j)): new_right_score = old_left_node.deleted_tag_value else: new_right_score = old_right_score + 1 # Pick the minimum value min_val = min(new_diag_score, new_right_score, new_down_score) # Create a new node and set is_from_deleted_tag to True if delete action is selected # and the element is part of tag deletion or a new tag. if min_val == new_right_score and ( xtype == TAG or _is_elem_in_tag_deletion(old_left_node, x, j)): node = Node(min_val, True) node.deleted_tag_value = min_val if xtype == TAG else old_left_node.deleted_tag_value else: node = Node(min_val, False) # This handle the case when the classes/ID is split if min_val == new_right_score: node.is_from_diag_tag = xtype != TAG and old_left_node.is_from_diag_tag elif min_val == new_diag_score: node.is_from_diag_tag = xtype == TAG or old_diag_node.is_from_diag_tag elif min_val == new_down_score: node.is_from_diag_tag = ytype != TAG and old_up_node.is_from_diag_tag # For debugging purpose. if min_val == new_right_score: node.direction = 'left' elif min_val == new_diag_score: node.direction = 'diag' elif min_val == new_down_score: node.direction = 'up' node.x = x[j - 1] node.y = y[i - 1] node.i = j node.j = i return node
def get_node(self, java_class): return Node(java_class.package, java_class.package_dependencies(), self._get_size(java_class))
def get_node(self, java_class): return Node(java_class.name, java_class.class_dependencies(), self._get_size(java_class))
def match_subject_entity(sentence): result = [] for i, w in entity_dic.iter(sentence): if not is_repeat(result, w.word): result.append(Node(w.word, 'Name', w.entity)) return result