def main(): colorama.init() args = parse_args() with open(args.file1, "r") as f: data1 = json.load(f) with open(args.file2, "r") as f: data2 = json.load(f) for idx, ch in enumerate(data1['symbols']['children']): root1 = importer.import_(ch) root2 = importer.import_(data2['symbols']['children'][idx]) print(f"{root1.name}\n+++++++++++++++++++++") for node in PreOrderIter(root1): # pylint: disable=undefined-loop-variable n = find(root2, lambda node2: node2.identifier == node.identifier) if n: if n.size != node.size: diff = n.size - node.size if diff == 0: continue if not n.children or not n.parent: if diff < 0: print( f"{n.identifier} -> {Fore.GREEN}{diff}{Fore.RESET}" ) else: print( f"{n.identifier} -> {Fore.RED}+{diff}{Fore.RESET}" ) else: if not node.children: print( f"{node.identifier} ({Fore.GREEN}-{node.size}{Fore.RESET}) disappeared." ) for node in PreOrderIter(root2): n = find(root1, lambda node2: node2.identifier == node.identifier) if not n: if not node.children and node.size != 0: print( f"{node.identifier} ({Fore.RED}+{node.size}{Fore.RESET}) is new." )
def fill_rankmap(): children = set(node.name for node in PreOrderIter(taxtree)) new_taxa = children.difference(rankmap.keys()) if new_taxa is None or len(new_taxa) == 0: return print('Querying Entrez for {} new taxa...'.format(len(new_taxa))) ids = [] for t in new_taxa: hdl = Entrez.esearch(db="taxonomy", term=t, field='Scientific Name', usehistory="y") entry = Entrez.read(hdl) l = entry.get('IdList') if len(l) == 0: print('{} not found'.format(t)) else: ids = ids + [l[0]] if len(l) > 1: print('found {} ids for {}'.format(len(l), t)) disp_status(len(ids), len(new_taxa)) search_results = Entrez.read(Entrez.epost("taxonomy", id=",".join(ids))) webenv = search_results["WebEnv"] query_key = search_results["QueryKey"] count = len(ids) batch_size = 100 for start in range(0, count, batch_size): end = min(count, start + batch_size) print("Going to download record %i to %i" % (start + 1, end)) attempt = 0 while attempt < 3: attempt += 1 try: fetch_handle = Entrez.esummary(db="taxonomy", retstart=start, retmax=batch_size, webenv=webenv, query_key=query_key) except HTTPError as err: if 500 <= err.code <= 599: print("Received error from server %s" % err) print("Attempt %i of 3" % attempt) time.sleep(15) else: raise data = ET.fromstring(fetch_handle.read()) fetch_handle.close() for entry in data.findall('./DocSum'): for item in entry.findall('./Item'): if item.get('Name') == 'ScientificName': name = item.text if item.get('Name') == 'Rank': rank = item.text if rank is None or len(rank) == 0: rank = 'unknown' rankmap[name] = rank hdl = open('taxa_rank_db.txt', 'w') for k, v in rankmap.items(): hdl.write('{} {}\n'.format(k, v))
def get_html_elements(self): elements = [] for entity in PreOrderIter(self._root): if entity.id == "root": continue if entity.visible: elements.append(entity.to_html()) return elements
def print_csv_content(file, tree): tree_node: VSSNode for tree_node in PreOrderIter(tree): data_type_str = tree_node.data_type.value if tree_node.has_data_type() else "" unit_str = tree_node.unit.value if tree_node.has_unit() else "" file.write(format_csv_line( tree_node.qualified_name('.'),tree_node.type.value,data_type_str,tree_node.deprecation,unit_str,tree_node.min,tree_node.max,tree_node.description,tree_node.comment,tree_node.allowed,tree_node.uuid))
def _node_by_name(self, name, start_node=None) -> (BizDataNode, Error): if start_node is None: start_node = self.root node: BizDataNode for node in PreOrderIter(start_node): if node.name == name: return node, Error(None) return None, Error("No such node:{0}".format(name))
def generate_csv_report(args): args['language'] ="en" root_node, parent_faq_map, parent_tags_map = analyzer.run_diagnostics(args) tree_traversal = [node for node in PreOrderIter(root_node)] myonology=af() response=myonology.run_diagnostics(root_node,parent_faq_map,parent_tags_map) analyzer.generate_csv_report(response,"csvreport.csv")
def return_tokens(task): r = 'AnsibleTask' root = build_ast(r, task) all_nodes = [node.name for node in PreOrderIter(root)] nodes = check_for_nested(all_nodes) return nodes
def filtre(self, tree, line): return (len([ node.lemma for node in PreOrderIter(tree[0], filter_=lambda n: hasattr(n, 'relation') and n. relation == u'pobj' and n.parent.lemma == u'to' and n. parent.parent.lemma == u'give') ]) > 0)
def get_leaf_branches(self): branches = [] for node in PreOrderIter(self.root): if node.is_leaf and node.num != None and node.type != None: branches.append((node.num, node.type)) return branches
def delete_overlap_node(swc_tree): node_list = swc_tree.get_node_list() for node in node_list: if node._type == 3: for son in node.children: son.parent = swc_tree.root() node.parent.remove_child(node) swc_tree.node_list = [node for node in PreOrderIter(swc_tree.root())]
def _cmp_local2remote(self, tree_loc, tree_rem): """Compare the changes in the local and remote trees. """ # loop through all remote nodes for node_rem in PreOrderIter(tree_rem.tree): node_loc = tree_loc.get_node_by_path( self._fix_path4local(node_rem.entry_path)) if node_loc is not None: # Only relevant for documents if node_rem.entry_type == "document": if not node_rem.file_size == node_loc.file_size: # local and remote are different self._handle_changes(node_loc, node_rem) else: # download targetpath = osp.join( osp.dirname(self._local_root), self._fix_path4local(node_rem.entry_path), ) print("Local node not found. Attempting download of {}".format( targetpath)) if node_rem.entry_type == "folder": os.mkdir(targetpath) # TODO: must add the node in the tree! else: self._downloader.download_file(node_rem.entry_path, targetpath, "remote_wins") # loop through all local nodes, upload those not on the remote for node_loc in PreOrderIter(tree_loc.tree): node_rem = tree_rem.get_node_by_path( self._fix_path4remote(node_loc.relpath)) if node_rem is None: # upload targetpath = self._fix_path4remote(node_loc.relpath) print("Remote node not found. Attempting upload of {}".format( targetpath)) if node_loc.entry_type == "folder": self._dp_mgr.mkdir(targetpath) conts = self._dp_mgr.dp.list_all() for nn in conts: if nn["entry_path"] == targetpath: tree_rem.insert_folder_node(nn) else: self._uploader.upload_file(node_loc.abspath, targetpath, "local_wins")
def treeiter(self, key): nodes = [] item = self.get_path(key) for node in PreOrderIter(item): if node.loaded == False: self.fetch(node) nodes.append(node) return nodes
def reset(self): """ Resets tree back to hist freqs """ assert self._is_fit, "Fit model before resetting tree" for index,node in enumerate(PreOrderIter(self.root)): node.freq = self.hist_freqs[index] return
def part1(raw_orbits, print_tree=False): orbit_map = generate_orbit_map(raw_orbits, print_tree=print_tree) total_orbits = 0 for node in PreOrderIter(orbit_map['COM']): ca = commonancestors(node) total_orbits += len(ca) return total_orbits
def _locate_paragraph_node_by_line_idx(root, idx): nodes = [x for x in PreOrderIter(root) if not hasattr(x, 'sidx')] node_lines = [x.line if hasattr(x, 'line') else -1 for x in nodes] node_idx = len(node_lines) - 1 - node_lines[::-1].index(idx) ans_node_idx = node_idx if is_paragraph(nodes[node_idx]) else nodes.index(nodes[node_idx].parent) return nodes[ans_node_idx]
def addParameterSymbolTable(self, node=Node): parameters = [] for n in PreOrderIter(node): if n.name == "PARAMETER_STATEMENT_STMT": for p in PreOrderIter(n): if p.name == "TYPE": data_type = p.lexeme elif p.name == "ID": p.data_type = data_type scope = p.scope lexeme = p.lexeme line = p.line parameters.append( ["parameter_func", data_type, lexeme, scope, line]) for p in parameters: if p not in self.SymbolTable: self.SymbolTable.append(p)
def tree_pruning(self, tree): gn = tree # print "\n\nMulai dari " + tree.name i = 0 for pn in PreOrderIter(gn): # gn = tree if pn.name != "root": gn = pn.parent # print "gn saat ini " + gn.name + "[" + str(gn.index) + "]" # pn = gn.children[i] # print "pn saat ini " + pn.name + "[" + str(pn.index) + "]" # print "anak gn saat ini: " + str(len(gn.children)) j = 0 while j < len(pn.children): cn = pn.children[j] # print "cn saat ini " + cn.name + "[" + str(cn.index) + "]" if cn.has_suffix_link(): sn = cn.get_suffix_link() # print "suffix dari " + cn.name + "[" + str(cn.index) + "] adalah " + sn.name + "[" + str( # sn.index) + "]" if gn != sn: # print "gn != sn " if self.is_related(sn, pn): # print "pn related to sn" cn.parent = None sn.parent = pn else: # print "pn/ccn not related to sn" for doc in sn.doc: cn.insert_doc(doc) sn.doc = [] cn.suffix_link = None for csn in sn.children: if csn.get_suffix_link( ) == pn or csn.get_suffix_link() == cn: csn.parent = cn elif gn == sn: # print "gn == sn" for doc in cn.doc: pn.insert_doc(doc) for ccn in cn.children: for doc2 in ccn.doc: pn.insert_doc(doc2) cn.parent = None j = j + 1 # print "j = " + str(j) # for pre, fill, node in RenderTree(tree): # print("%s%s (%s) suffix = %s, doc = %s" % (pre, node.name, node.index, node.get_index_suffix_link(), # node.doc)) # gn = pn # tree_pruning(gn) i = i + 1 # print "i = " + str(i) # print "\n\n" return tree
def __iter_edges(self, indent, nodenamefunc, edgeattrfunc): for node in PreOrderIter(self.node): nodename = nodenamefunc(node) for child in node.children: childname = nodenamefunc(child) edgeattr = edgeattrfunc(node, child) edgeattr = " [%s]" % edgeattr if edgeattr is not None else "" yield '%s"%s" -- "%s"%s;' % (indent, nodename, childname, edgeattr)
def build_filename(current_node): filename = '' for node in PreOrderIter(current_node.root): if node == current_node: filename = filename + str(node.obj[0]) + str(node.obj[1]) break if not node.is_leaf: filename = filename + str(node.obj[0]) + str(node.obj[1]) return filename
def __ast_line_map__(self): nodes = [node for node in PreOrderIter(self.ast.anytree)] for n in nodes: if str(n.start_line) not in self.line_map: self.line_map[str(n.start_line)] = [] self.line_map[str(n.start_line)].append(n.internal_type)
def depddp(X, return_tree=False, plot=False, verbose=0): if verbose: verbose_print = print else: verbose_print = lambda x: None indices = np.arange(X.shape[0]) tree_root = Node('r', idx=indices) cur_nodes = [tree_root] while len(cur_nodes) > 0: verbose_print('Current nodes: %s' % [n.name for n in cur_nodes]) splits = [] for node in cur_nodes: verbose_print(' Node %s' % node.name) split = split_data(X[node.idx], plot=plot) if split is not None: d, left_idx, right_idx = split left_idx = node.idx[left_idx] right_idx = node.idx[right_idx] split = (d, left_idx, right_idx) splits.append(split) no_split_idx = [i for i, j in enumerate(splits) if j is None] cur_nodes = [ j for i, j in enumerate(cur_nodes) if i not in no_split_idx ] splits = [j for i, j in enumerate(splits) if i not in no_split_idx] if len(cur_nodes) > 0: min_dens_node_idx = min(enumerate(splits), key=lambda x: x[1][0])[0] splitted_node = cur_nodes.pop(min_dens_node_idx) name = splitted_node.name left_leaf = Node(name + '/l', parent=splitted_node, idx=splits[min_dens_node_idx][1]) right_leaf = Node(name + '/r', parent=splitted_node, idx=splits[min_dens_node_idx][2]) cur_nodes.append(left_leaf) cur_nodes.append(right_leaf) if not return_tree: i = 0 for n in PreOrderIter(tree_root, filter_=lambda n: n.is_leaf): indices[n.idx] = i i += 1 return indices else: return tree_root
def _clean_before_decoding(self): """Reset intermediate BETA values. Run this before calling `__call__` method. """ for node in PreOrderIter(self._decoding_tree): if not (node.is_zero or node.is_one): node.beta *= 0
def run_diagnostics(self, args): self.file_path = args['input_file_path'] self.language = args['language'] self.read_file() if not self.file_data: oa_logger.error("Ontology not present in input file") return self.threshold = conf.get("PATH_COVERAGE") self.stopwords = StopWords.get_stop_words(self.file_data, self.language) try: self.lemmatizer.set_language(self.language) oa_logger.info('Ontology analyzer started') root_node, parent_faq_map, parent_tags_map = self.fetch_ontology() return root_node, parent_faq_map, parent_tags_map # print("root ",root_node) # print("parent_Faq ",parent_faq_map) # print("parent_tags_map ",parent_tags_map) quit() self.tree_traversal = [node for node in PreOrderIter(root_node)] response = dict() timestamp = datetime.datetime.utcnow().isoformat() + 'Z' response['timestamp'] = timestamp response['language'] = self.language suggestions = 0 errors = 0 warnings = 0 result, present_or_not = self.check_unreachable_questions(root_node, parent_faq_map, parent_tags_map) response['unreachable_questions'] = {'result': result, 'type': 'error'} errors += int(present_or_not) oa_logger.debug('Ontology analyzer: Check 1 (unreachable_questions) done for bot:' + root_node.name[ NODE_NAME] + ' and issue present: ' + str(present_or_not)) result, present_or_not = self.check_questions_at_root(root_node, parent_faq_map, parent_tags_map) response['questions_at_root'] = {'result': result, 'type': 'suggestion'} suggestions += int(present_or_not) oa_logger.debug('Ontology analyzer: Check 2 (questions_at_root) done for bot:' + root_node.name[ NODE_NAME] + ' and issue present: ' + str(present_or_not)) response['no_of_suggestions'] = suggestions response['no_of_errors'] = errors response['no_of_warnings'] = warnings response['total_no_of_issues'] = suggestions + errors + warnings oa_logger.info('Ontology analyzer ran for bot:' + root_node.name[NODE_NAME]) # oa_logger.debug('Ontology analyzer response for bot:' + root_node.name[NODE_NAME] + ' : ' + str(response)) self.generate_csv_report(response, 'analyzer_report.csv') print('Report generated and saved in analyzer_report.csv file ...') except Exception as e: oa_logger.debug(e) traceback.print_exc()
def __iter__(self): # prepare indent = " " * self.indent nodenamefunc = self.nodenamefunc if not nodenamefunc: def nodenamefunc(node): return node.name nodeattrfunc = self.nodeattrfunc if not nodeattrfunc: def nodeattrfunc(node): return None edgeattrfunc = self.edgeattrfunc if not edgeattrfunc: def edgeattrfunc(node, child): return None # intro yield "{self.graph} {self.name} {{".format(self=self) options = self.options if options: for option in options: yield "%s%s" % (indent, option) # nodes for node in PreOrderIter(self.node): nodename = nodenamefunc(node) nodeattr = nodeattrfunc(node) nodeattr = " [%s]" % nodeattr if nodeattr is not None else "" yield '%s"%s"%s;' % (indent, nodename, nodeattr) # edges for node in PreOrderIter(self.node): nodename = nodenamefunc(node) for child in node.children: childname = nodenamefunc(child) edgeattr = edgeattrfunc(node, child) edgeattr = " [%s]" % edgeattr if edgeattr is not None else "" yield '%s"%s" -> "%s"%s;' % (indent, nodename, childname, edgeattr) # outro yield "}"
def get_spatial_well_control(field, attrs, date_range=None, fill_shut=0., fill_outside=0.): """Get the model's control in a spatial. Also returns control dates relative to model start date. Parameters ---------- field: Field Geological model. attrs: tuple or list Control attributes to get data from. date_range: tuple Minimal and maximal dates for control events. fill_shut: float Value to fill closed perforations fill_outside: Value to fill non-perforated cells Returns ------- control: np.array """ spatial = field.state.spatial well_mask = field.well_mask attrs = [k.upper() for k in attrs] prehistory_dates, dates = get_control_interval_dates(field, date_range) spatial_dims = tuple(field.grid.dimens) if spatial else (np.sum( field.grid.actnum), ) control = np.full( (len(prehistory_dates) + len(dates), len(attrs)) + spatial_dims, fill_outside) for node in field.wells: if node.is_main_branch and 'EVENTS' in node: df = pd.DataFrame(fill_shut, index=dates, columns=attrs) df.loc[node.events['DATE'], attrs] = node.events[attrs].values df = df.fillna(fill_shut) if fill_shut: df = df.replace(0, fill_shut) for branch in PreOrderIter(node): control[len(prehistory_dates):, ..., well_mask == branch.name] = np.expand_dims( df.values, -1) control[:len(prehistory_dates), ..., well_mask == branch.name] = fill_shut sec_in_day = 86400 dates = prehistory_dates.union(dates) rel_dates = (pd.to_datetime(dates) - field.start).total_seconds().values / sec_in_day return {'control': control, 't': rel_dates}
def decorator(self, *args, **kwargs): """Returned decorator.""" res = [] for segment in PreOrderIter(self.root): if not include_groups and segment.is_group: continue res.append(method(self, segment, *args, **kwargs)) if isinstance(res[0], self.__class__): return self return np.array(res)
def update_tree_tab_positions(self, caller_info): """Update tab positions acording tree structure""" for idx, node in enumerate(PreOrderIter(self.tree_root)): if idx > 0: cur_idx = self.indexOf(node.name) self.tabBar().moveTab(cur_idx, idx - 1) # debbuging self.print_tree_tab_structure("---- Tree Tab Update ---- " + caller_info + "\n")
def get(self, request): """ Save changes """ exporter = DictExporter() db_tree = DBTree() cached_tree = CachedTree() for cache_node in PreOrderIter(cached_tree.tree): self.save(cache_node, db_tree) for cache_node in PreOrderIter(cached_tree.tree): db_node = db_tree.get_node_by_id(cache_node.id) cache_node.is_deleted = db_node.is_deleted return Response( data={ "db_tree": exporter.export(db_tree.tree), "cached_tree": exporter.export(cached_tree.tree) })
def _clean_up_tree(self): ''' remove non terminals and unmet terminals from leaf nodes ''' remove_nodes = [] for node in PreOrderIter(self.parse_tree): if not node.children and not hasattr( node, "token") and node.name != "EPSILON": remove_nodes.append(node) for node in remove_nodes: self._remove_node(node)
def copy(self): """Returns a deepcopy. Cached properties are not copied.""" copy = self.__class__(self.root.copy()) copy._state = deepcopy(self.state) #pylint: disable=protected-access for node in PreOrderIter(self.root): if node.is_root: continue node_copy = node.copy() node_copy.parent = copy[node.parent.name] return copy