def set_atribution_asto(self): assignment_list = findall_by_attr(self.root, "atribuicao") func_header_name = lambda leaves: "".join( str(cr) for cr in [l.name for l in leaves]) for a in assignment_list: indexes = findall_by_attr(a, "indice") if indexes: for index in indexes: index.parent = None func_calls = findall_by_attr(a, "chamada_funcao") if func_calls: for func in func_calls: name = func_header_name(func.leaves) func.name = name for child in func.children: child.parent = None leaves = [x for x in a.leaves if x.name not in ['(', ')']] eq_asto = self.eq_atribution_asto_subtree(leaves[2:]) leaves[1].children = [leaves[0], eq_asto] for child in a.children: child.parent = None a.children = [leaves[1]]
def __parse_tree(self, packet): """ Processes a packet from a new device that has not been counted """ info = extract_int_data(packet[Ether]) logger.info('Processing packet with info [%s]', info) macs = search.findall_by_attr(self.tree, info.get('srcMac'), name='name', maxlevel=2, maxcount=1) mac = None src_ip = None dst_ip = None dst_port = None packet_size = None if len(macs) > 0: mac = macs[0] src_ips = search.findall_by_attr(mac, info.get('srcIP'), name='name', maxlevel=2, maxcount=1) if len(src_ips) != 0: src_ip = src_ips[0] dst_ips = search.findall_by_attr(src_ip, info.get('dstIP'), name='name', maxlevel=2, maxcount=1) if len(dst_ips) != 0: dst_ip = dst_ips[0] logger.info('Processing source IPs - %s', src_ips) dst_ports = search.findall_by_attr(dst_ip, info.get('dstPort'), name='name', maxlevel=2, maxcount=1) if len(dst_ports) != 0: dst_port = dst_ports[0] packet_sizes = search.findall_by_attr( dst_port, info.get('packet_size'), name='name', maxlevel=2, maxcount=1) if len(packet_sizes) != 0: packet_size = packet_sizes[0] return mac, src_ip, dst_ip, dst_port, packet_size
def generate_var_table(self): var_obj_rows = [] declarations = findall_by_attr(self.root, "declaracao_variaveis") for list_var in declarations: _type = find_by_attr(list_var, "tipo").child().name var_list = findall_by_attr(list_var, "var") for var in var_list: if var.parent.name == "fator": break row = Row() try: one_plus_d = var.child() row.token = one_plus_d.name row.lexeme = one_plus_d.child().name row.type = _type factors = findall_by_attr((var.children)[1], "fator") row.dim = len(factors) row.tam_dim1 = factors[0].child().child().child().name row.tam_dim2 = 0 if row.dim == 2: row.tam_dim2 = factors[1].child().child().child().name row.scope = one_plus_d.scope row.line = one_plus_d.child().line except IndexError: one_d = var.child() row.token = one_d.name row.lexeme = one_d.child().name row.type = _type row.dim = 0 row.tam_dim1 = 1 row.tam_dim2 = 0 row.scope = one_d.scope row.line = one_d.child().line var_obj_rows.append(row) var_obj_rows = var_obj_rows + self.param_vars() var_obj_rows = [list((o.__dict__).values()) for o in var_obj_rows] var_table = pd.DataFrame(data=var_obj_rows, columns=[ "Token", "Lexema", "Tipo", "dim", "tam_dim1", "tam_dim2", "escopo", "linha" ]) print(var_table.to_markdown()) return var_table
def get_tense(tokens, verbose=False): starting_node = TreeModel.root mark = 0 for i in range(len(tokens)): token = tokens[i] if verbose: print("tokens: ", tokens) print("token: ", token) found = findall_by_attr(starting_node, token[1].lower(), maxlevel=2) if len(found) != 0: starting_node = found[0] if i == len(tokens) - 1: for j in range(mark, i + 1): tense = starting_node.tense depth = starting_node.depth tokens[j] += ( tense, depth, ) # ',' to make tuple else: if starting_node != TreeModel.root: for j in range(mark, i): tense = starting_node.tense depth = starting_node.depth tokens[j] += ( tense, depth, ) # ',' to make tuple starting_node = TreeModel.root i -= 1 mark = i return tokens
def expantion(game_tree,state,available_play): node_val={} for i in available_play: state_tmp=state.copy() path=search.findall_by_attr(game_tree ,i) state_tmp.append(i) if path==(): node_val[i]={"parent":{"nb_win":0,"nb_visit":0},"children":{"nb_win":0,"nb_visit":0}} else: for z in range(len(path)): if [node.name for node in path[z].path]==state_tmp: node_val[i]={"parent":{"nb_win":0,"nb_visit":0},"children":{"nb_win":0,"nb_visit":0}} node_val[i]["parent"]={"nb_win":path[z].parent.nb_win,"nb_visit":path[z].parent.nb_visit} node_val[i]["children"]={"nb_win":path[z].nb_win,"nb_visit":path[z].nb_visit} elif i not in node_val: node_val[i]={"parent":{"nb_win":0,"nb_visit":0},"children":{"nb_win":0,"nb_visit":0}} return node_val
def generate_func_table(self): func_obj_rows = [] declarations = findall_by_attr(self.root, "declaracao_funcao") for func in declarations: row = Row() _type = func.child(name="tipo") if _type: row.type = _type.child().name else: row.type = "VOID" header = func.child(name="cabecalho") row.name = header.child(name="ID").child().name types_nodes = findall_by_attr( header.child(name="lista_parametros"), "tipo") param_list = [] for param in types_nodes: param_type = param.child().name param_name = param.parent.child(name="id").child().name param_list.append((param_type, param_name)) row.params = param_list row.n_param = len(param_list) row.returns = ("VOID", None) returns = findall_by_attr(header, "retorna") if returns: exp = returns[0].child(name="expressao") return_item = exp.leaves[0] return_type = return_item.parent.type.split("_").pop() row.returns = (return_type, return_item.name) row.scope = header.child(name="ID").child().scope row.line = header.child(name="ID").child().line func_obj_rows.append(row) func_obj_rows = [list((o.__dict__).values()) for o in func_obj_rows] func_table = pd.DataFrame(data=func_obj_rows, columns=[ "Tipo", "Nome", "Parametros", "N-params", "Retorna", "Escopo", "Linha" ]) print(func_table.to_markdown()) return (func_table)
def makeOrUpdate(self, conceptState): optionList = findall_by_attr(self.MAXQ.actions, value='option', name='type') if findall_by_attr(self.MAXQ.actions, value=conceptState, name='conceptState'): self.makeOption(conceptState) # ↑ This will compute I, beta and pi # create the option using option = Option() class constructor # then add the option to MAXQ instance by using self.MAXQ.addOption(option) else: option = findall_by_attr(self.MAXQ.actions, value=conceptState, name='conceptState')[0] self.updateOption(option, option.conceptState)
def identify_parents(node_id): res = search.findall_by_attr(root, node_id, name="id") result_ids = [] result_labels = [] for re in res: result_ids.append(re.parent.id) result_labels.append(re.parent.name) return result_ids, result_labels
def get_atributions(): functions = findall_by_attr(root, 'cabecalho') all_function_atribs = [] for func in functions: func_name = func.child().child().name atribs = findall_by_attr(func, 'atribuicao') all_function_atribs = all_function_atribs + list(atribs) atributions_dict[func_name] = [[leaf.name for leaf in atrib.leaves] for atrib in atribs] global_atribs = list(findall_by_attr(root, 'atribuicao')) global_atribs = list(set(global_atribs) - set(all_function_atribs)) atributions_dict['global'] = [[leaf.name for leaf in atrib.leaves] for atrib in global_atribs]
def verbs(self, sents, lang='en', engine='stanza'): """ $ python -m sagas.nlu.anal verbs 'Nosotros estamos en la escuela.' es stanza :param sents: :param lang: :param engine: :return: """ tree_root = build_anal_tree(sents, lang, engine) words = findall_by_attr(tree_root, name='upos', value='VERB') return words
def set_scope(self): functions = findall_by_attr(self.root, "cabecalho") for function in functions: scope = function.child(name="ID").child().name var_declarations = findall(function, filter_=lambda node: node.name in ("var", "id")) for var in var_declarations: var.scope = scope for child in var.children: child.scope = scope
def proc_verb_subs(node): toks = findall_by_attr(node, name='dependency_relation', value='nsubj') if not toks: toks = findall_by_attr(node, name='dependency_relation', value='dislocated') if toks: tok = toks[0] objs = findall(node, filter_=lambda n: n.dependency_relation in ("obj")) if objs: print(f'✔[verb-{tok.dependency_relation}-obj]', node.text, tok.text, objs[0].text) return True objs = findall_by_attr(node, name='upos', value='NOUN') # objs=findall(f, filter_=lambda node: node.upos in ("NOUN", "ADJ")) if objs: print(f'✔[verb-{tok.dependency_relation}-noun]', node.text, tok.text, objs[0].dependency_relation, objs[0].text) return True return False
def find_container_subnodes(container_node_names): node_ids = [] for element in container_node_names: container_node = search.findall_by_attr(root,element,name='name') if len(container_node) ==0: pdb.set_trace() for node in container_node: node_ids.append(node.id) descendants = node.descendants for descendant in descendants: node_ids.append(descendant.id) node_ids = set(node_ids) node_ids = [i for i in node_ids] return node_ids
def param_vars(self): var_obj_rows = [] declarations = list(findall_by_attr(self.root, "parametro")) var_params = list( filter(lambda x: x.child(name="id") != None, declarations)) for var in var_params: var = var.child(name="id") row = Row() row.token = "ID" row.lexeme = var.child().name row.type = var.parent.child(name="tipo").child().name row.dim, row.tam_dim1, row.tam_dim2 = 0, 1, 0 row.scope, row.line = var.child().scope, var.child().line var_obj_rows.append(row) return var_obj_rows
def set_functions(): functions = findall_by_attr(root, 'cabecalho') for func in functions: func_name = func.child().child().name row_index = func_table.index[func_table['Nome'] == func_name].tolist()[0] func_type = get_type(func_table['Tipo'][row_index]) func_params_type_list = [ get_type(item[0]) for item in func_table['Parametros'][row_index] ] func_params_name_list = [ item[1] for item in func_table['Parametros'][row_index] ] func_declaration = ir.FunctionType(func_type, func_params_type_list) if func_name == "principal": func = ir.Function(module, func_declaration, name='main') else: func = ir.Function(module, func_declaration, name=func_name) for arg in func.args: arg.name = func_params_name_list[func.args.index(arg)] entry_block = func.append_basic_block('entry') exit_block = func.append_basic_block('exit') global builder builder = ir.IRBuilder(entry_block) var_indexes = var_table.index[var_table['escopo'] == func_name].tolist() if var_indexes: set_local_variables(var_indexes) set_atributions(func_name) builder.branch(exit_block) builder.position_at_end(exit_block) func_return = func_table['Retorna'][row_index][1] if isinstance(func_return, str): builder.ret(vars_dict[func_return]['alloca']) else: builder.ret(load_value(func_return))
def identify_data(self, branch=0): t_plot = self.get_t() x_plot = self.get_x() y_plot = self.get_y() z_plot = self.get_z() if branch == 0: x = x_plot y = y_plot z = z_plot t = t_plot else: node = findall_by_attr(self.tree, 'n' + str(branch)) x = [x_plot[ind] for ind in node[0]] y = [y_plot[ind] for ind in node[0]] z = [z_plot[ind] for ind in node[0]] t = [t_plot[ind] for ind in node[0]] cmap = cm.plasma norm = mcolors.Normalize(vmin=t[0], vmax=t[-1]) fig, ax = plt.subplots(2, 2, figsize=(10, 10)) ax[0, 0].scatter(t, z, marker='o', c=t, cmap=cmap, norm=norm) ax[0, 0].set_xlabel('Time [s]') ax[0, 0].set_ylabel('Height [m]') ax[0, 1].scatter(x, y, marker='o', c=t, cmap=cmap, norm=norm) ax[0, 1].set_xlabel('Easting [m]') ax[0, 1].set_ylabel('Northing [m]') ax[1, 0].scatter(t, x, marker='o', c=t, cmap=cmap, norm=norm) ax[1, 0].set_xlabel('Time [s]') ax[1, 0].set_ylabel('Easting [m]') ax[1, 1].scatter(t, y, marker='o', c=t, cmap=cmap, norm=norm) ax[1, 1].set_xlabel('Time [s]') ax[1, 1].set_ylabel('Northing [m]') fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax) plt.show()
def _recurse_dependencies(source, decision_packages, dependencies, tree_root, tree_parent): packages = OrderedDict() for dep in dependencies: name = dep.name extras = dep.package.req.extras resolved_version = decision_packages.get(dep.package) or _find_version( source, dep, extras) tree_node = Node( name, version=str(resolved_version), parent=tree_parent, # pip_string in metadata might be the wrong one (populated differently beforehand, higher up in the tree) # left here in case e.g. versions_available is needed in rendered tree # metadata=source._packages_metadata[name][str(resolved_version)], pip_string=dep.pip_string, extras_name=dep.package.req.extras_name, ) # detect cyclic depenencies matches = findall_by_attr(tree_root, name) if matches and matches[0] in tree_node.ancestors: logger.warning( "Cyclic dependency found: %s depends on %s and vice versa.", tree_node.name, tree_parent.name, ) setattr(tree_node, "cyclic", True) packages[(name, str(resolved_version))] = {} continue packages[(name, str(resolved_version))] = _recurse_dependencies( source, decision_packages, source.dependencies_for(dep.package, resolved_version), tree_root, tree_node, ) return packages
def digest_verb(sents, lang, engine='stanza'): f = build_anal_tree(sents, lang, engine) words = findall_by_attr(f, name='upos', value='VERB') if words: print(sents, len(words)) rs = [] for w in words: rs.append(proc_verb_subs(w)) def proc_text(t): if lang in ('ko'): return get_contrast(t, lang) return t succ = any(rs) cl = 'blue' if succ else 'white' tc.emp( cl, RenderTree(f, style=AsciiStyle()).by_attr( lambda n: f"{n.dependency_relation}: {proc_text(n.text)} {n.upos}")) return succ return False
def addOption(self, option): # NB: right now options can only be added at init. To be generalized optionName = option.name if option.name == 'Unknown Option': self.unkOptCount += 1 optionName = 'Unknown Option ' + str(self.unkOptCount) primActions = findall_by_attr(self.actions, value='primitive', name='type', maxlevel=2) option = Node(optionName, parent=self.actions, actionID=self.maxActionID, type='option', option=option) for action in primActions: Node(action.name, parent=option, actionID=action.actionID, type='primitive') self.maxActionID += 1 self.actions.n_opt += 1
def run(self, task, state, debug=False, history=False): if history: self.log.append([self.time, state, task.actionID]) if debug: print("Run with {} at coords [{}, {}]".format( self.treeLog + task.name, self.GridWorld.state2coord[state][0], self.GridWorld.state2coord[state][1])) self.counter[state, task.actionID] += 1 if task.type == 'primitive': if debug: print('Primitive!') [next_state, reward, absorb] = self.GridWorld.step(state, task.actionID) alpha = self.learningRate(state, task.actionID) # self.V[task.actionID, state] = (1-alpha)*self.V[task.actionID, state] + alpha*reward tempDiff = reward + self.GridWorld.gamma * max( self.V[:, next_state]) - self.V[task.actionID, state] self.V[task.actionID, state] = self.V[task.actionID, state] + alpha * tempDiff self.time += 1 self.lastTraj.append([state, reward, absorb]) return [1, next_state, absorb] elif task.type == 'option': if task.actionID > 0: self.treeLog = self.treeLog + str(task.actionID) + '--' if debug: print('Option!') pdb.set_trace() count = 0 absorb = False while task.option.log == 'active': if debug: print("Time {}".format(self.time)) subtaskID = task.option.policy( self.GridWorld.state2coord[state]) if debug: print("Substask chosen {}".format(subtaskID)) subtask = findall_by_attr(self.actions, value=subtaskID, name='actionID')[0] [N, next_state, absorb] = self.run(subtask, state, debug, history) if absorb: task.option.log = 'quit' [greedyValue, greedyAction] = self.evaluate(task, next_state) alpha = self.learningRate(state, subtask.actionID) self.C[task.actionID + 1, state, subtask.actionID] = (1 - alpha) * self.C[ task.actionID + 1, state, subtask.actionID] + alpha * np.power( self.GridWorld.gamma, N) * greedyValue count = count + N state = next_state task.option.escape(self.GridWorld.state2coord[state]) if task.actionID > 0: self.treeLog = self.treeLog[:-3] task.option.log = 'active' return [count, state, absorb] else: raise ValueError( "Action type should be either 'primitive' or 'option'")
def generateExcel(self): writer = pd.ExcelWriter(self.outputFile) samples = search.findall_by_attr(self.configTree, True, name = "selected", maxlevel=2) samples = samples[1:] #Skip root for s in samples: if s.sampleType == "SNP": col = pd.MultiIndex.from_arrays([[],[],[]]) #sub headers : Parameter ->Pair ->Real/Imag df = pd.DataFrame(columns=col, index = s.sample.freq) freq_title = f"Freq({s.sample.freq_unit})" new_freq=df.index.set_names(freq_title) df.index=new_freq #df['','', freq_title] = s.sample.freq #df.set_index([('', '', freq_title)], inplace=True) params = search.findall_by_attr(s, True, name = "selected", maxlevel=2) params = params[1:] #Skip root for p in params: param_name = p.name series = search.findall_by_attr(p , True, name = "selected", maxlevel=2) series = series[1:] #Skip root for line in series: line_name = line.name #pair if p.complex: if line_name == "Limit": pass else: df[param_name, line_name, "Re"] = np.real(s.sample.getParam(param_name, z=True)[line_name]) df[param_name, line_name, "Im"] = np.imag(s.sample.getParam(param_name,z=True)[line_name]) else: if param_name == "Propagation Delay": if line_name == "Limit": lim = s.sample.standard.limits['PropagationDelay'].getArray() #df[param_name, line_name, "ns"] = np.NaN idx, val = zip(*lim) #for idx, val in lim.items() : # i in enumerate(idx): df[param_name, line_name, "ns"] = val else: df[param_name, line_name, "ns"] = s.sample.getParam(param_name, z=False)[line_name] else: if line_name == "Limit": try: lim = s.sample.standard.limits[param_name].getArray() #df[param_name, line_name, "db"] = np.NaN idx, val = zip(*lim) #for idx, val in lim.items() : # i in enumerate(idx): #print(idx, val) df[param_name, line_name, "db"] = val #df[param_name, line_name, "db"] = val except Exception as e: print("FAIL on LIMIT : ", param_name, e) print(val) else: df[param_name, line_name, "db"] = s.sample.getParam(param_name, z=False)[line_name] try: print("sample name : ", s.sample.name) df.to_excel(writer, sheet_name=s.sample.name, engine='openpyxl') writer.book[s.sample.name].delete_rows(3) writer.save() except Exception as e: print(e) if s.sampleType == "Alien": params = search.findall_by_attr(s, True, name = "selected", maxlevel=2) params = params[1:] #Skip root for p in params: col = pd.MultiIndex.from_arrays([[],[],[]]) #sub headers : Parameter ->Pair ->Real/Imag df = pd.DataFrame(columns=col, index = s.sample.freq) freq_title = f"Freq({s.sample.freq_unit})" new_freq=df.index.set_names(freq_title) df.index=new_freq param_name = p.name series = search.findall_by_attr(p , True, name = "selected", maxlevel=2) series = series[1:] #Skip root for line in series: line_name = line.name #pair if line_name == "Limit" or line_name == "Avg Limit": try: lim = line.series.getArray() #df[param_name, line_name, "db"] = np.NaN idx, val = zip(*lim) #for idx, val in lim.items() : # i in enumerate(idx): #print(idx, val) df[param_name, line_name, "db"] = val #df[param_name, line_name, "db"] = val except Exception as e: print("FAIL on LIMIT : ", param_name, e) else: df[param_name, line_name, "db"] = line.series try: print("sample name : ", s.sample.name) df.to_excel(writer, sheet_name=s.sample.name + " - " + param_name) except Exception as e: print(e) writer.save()
with open('input.txt', 'r') as file: data = (line for line in file.read().splitlines()) data = (d.split('contain') for d in data) data = ((bag.strip().replace(' bags', '').replace(' bag', ''), content.strip().replace('.', '').split(', ')) for bag, content in data) rules = defaultdict(list) for rule in data: bag, contents = rule for c in contents: if c != 'no other bags': rule = c.split(' ', 1) rule[0] = int(rule[0]) rule[1] = rule[1].replace(' bags', '').replace(' bag', '') rules[bag].append(tuple(rule)) # Part 1 shiny_bag_count = 0 for root in find_roots(rules): rules_tree = generate_rules_tree(rules, root) nodes = findall_by_attr(rules_tree, 'shiny gold') if root != 'shiny gold' and nodes: shiny_bag_count += 1 print(shiny_bag_count) # Part 2 rules_tree = generate_rules_tree(rules, 'shiny gold') #display_tree(rules_tree) print(recursive_sum(rules_tree) - 1)
def error_handling(self): calls = findall_by_attr(self.root, "chamada_funcao") call_names = [call.child(name="ID").child().name for call in calls] not_called = [ func for func in list(self.func_table.Nome) if func not in call_names ] # Verifica erros relacionados a funcao principal principal = findall_by_attr(self.root, "cabecalho") principal = [ head.child(name="ID").child() for head in principal if head.child(name="ID").child().name == "principal" ] if principal: princip_calls = findall_by_attr(principal[0].parent.parent, "chamada_funcao") princip_call_names = [ call.child(name="ID").child().name for call in princip_calls ] if not principal[0].child(name="retorna"): self.raise_error( "Função principal deveria retornar inteiro, mas retorna vazio" ) if "principal" in princip_call_names: self.raise_warning("Chamada recursiva para principal") not_called.append("principal") else: self.raise_error("Função principal não declarada") try: not_called.remove("principal") except: self.raise_error(f"Chamada para a função principal não permitida") # Verifica as funcoes declaradas e nao utilizadas not_declared = [ func for func in call_names if func not in list(self.func_table.Nome) ] for nd in not_called: self.raise_warning( f"Função '{nd}' declarada, mas não utilizada", self.func_table.loc[self.func_table.Nome == nd, 'Linha'].values[0]) for call in calls: func_id = call.child(name="ID").child() # Verifica chamadas de funcao para funcoes nao declaradas if func_id.name in not_declared: self.raise_error( f"Chamada a função '{func_id.name}' que não foi declarada", func_id.line) # Verifica se o numero de parametros e de acordo com o declarado list_arg = call.child(name="lista_argumentos") n_params = len( findall(list_arg, filter_=lambda node: node.name in ("var", "numero"))) try: # Verifica se o retorno bate com o tipo da funcao table_index = self.func_table[self.func_table['Nome'] == func_id.name].index.item() return_type = self.func_table['Retorna'][table_index] func_type = self.func_table['Tipo'][table_index] if func_type != return_type[0]: if return_type[0] == "ID": if return_type[1] in self.var_table.Lexema.values: var_type = self.var_table.loc[ self.var_table.Lexema == return_type[1], 'Tipo'].values[0] if var_type != func_type: self.raise_error( f"Função '{func_id.name}' deveria retornar {func_type.lower()}, mas retorna {var_type.lower()}" ) else: self.raise_error( f"Função '{func_id.name}' retorna um parâmetro inexistente" ) elif return_type[0] != func_type: self.raise_error( f"Função '{func_id.name}' deveria retornar {func_type.lower()}, mas retorna {return_type[0].lower()}" ) if self.func_table['N-params'][table_index] != n_params: self.raise_error( f"Chamada à função '{func_id.name}' com número de parâmetros diferente que o declarado", func_id.line) except: pass # Verifica variaveis declaradas e não utilizadas var_uti = findall(self.root, filter_=lambda node: node.name in ("fator", "atribuicao")) var_uti_list = list( filter(lambda x: x.child(name="var") != None, var_uti)) var_uti_names = [ var.child(name="var").child(name="ID").child().name for var in var_uti_list ] for declared in self.var_table.Lexema.values: if declared not in var_uti_names: self.raise_warning( f"Variável '{declared}' declarada e não utilizada") for uti in var_uti_names: if uti not in self.var_table.Lexema.values: self.raise_error(f"Variável '{uti}' não declarada") # Verifica variaveis declaradas e nao inicializadas var_atrib = [ atrib.child().child().child().name for atrib in list( findall(self.root, filter_=lambda node: (node.name == "atribuicao") and (node.child(name="var") != None))) ] for declared in self.var_table.Lexema.values: if declared not in var_atrib: self.raise_warning( f"Variável '{declared}' declarada e não inicializada") # Verifica variaveis duplicadas duplicate_layer1 = self.var_table[self.var_table.duplicated( ['Lexema'], keep=False)] duplicate_layer2 = duplicate_layer1[duplicate_layer1.duplicated( ['escopo'])] for duplicate in duplicate_layer2.Lexema.values: self.raise_warning( f"Variável '{duplicate}' já declarada anteriormente") assignment_list = list(findall_by_attr(self.root, "atribuicao")) vars_list = self.var_table[["Lexema", "Tipo"]].values func_list = self.func_table[["Nome", "Tipo"]].values for assign in assignment_list: operators = list( filter( lambda n: (n.parent.name == "ID" or n.parent.name == "NUM_INTEIRO" or n.parent.name == "NUM_PONTO_FLUTUANTE"), list(assign.leaves))) operators = [op.name for op in operators] assigned = operators.pop(0) assigned_type = next( (var for var in vars_list if var[0] == assigned), [None, None]) for value in operators: if type(value) is str: for i in range(len(vars_list)): if value in vars_list[i]: if vars_list[i][1] != assigned_type[1]: self.raise_warning( f"Atribuição de tipos distintos, '{assigned}' {assigned_type[1].lower()} e '{vars_list[i][0]}' {vars_list[i][1].lower()}" ) for i in range(len(func_list)): if value in func_list[i]: if func_list[i][1] != assigned_type[1]: self.raise_warning( f"Atribuição de tipos distintos, '{assigned}' recebe {assigned_type[1].lower()} e '{func_list[i][0]}' retorna {func_list[i][1].lower()}" ) else: if type(value) is int and (assigned_type[1] == "FLUTUANTE"): self.raise_warning( f"Coerção implícita do valor atribuído para '{assigned}', variável flutuante recebendo um inteiro" ) elif type(value) is float and (assigned_type[1] == "INTEIRO"): self.raise_warning( f"Coerção implícita do valor atribuído para '{assigned}', variável inteira recebendo um flutuante" ) for index, row in self.var_table.iterrows(): if isinstance(row['tam_dim1'], str): pass if not float(row['tam_dim1']).is_integer() or not float( row['tam_dim2']).is_integer(): self.raise_error( f"índice de array '{row['Lexema']}' não inteiro")
def get_name(self, name): n = findall_by_attr(self.root, name) return n[0]
df_keyword_counts = df.groupby(['KeywordID'])['KeywordID'].agg('count') count = df_keyword_counts.to_frame(name="TotalNumberUsed").reset_index() df_keywords = df_keywords.merge(count, how='left', on="KeywordID") # Calculate how many interviewee uses a keyword number_of_interviewee_using = df.groupby( ['KeywordID'])['IntCode'].unique().map(lambda x: len(x)) number_of_interviewee_using = number_of_interviewee_using.to_frame( name="TotalNumberIntervieweeUsing").reset_index() df_keywords = df_keywords.merge(number_of_interviewee_using, how='left', on="KeywordID") # Find those keywords that describe times and places time_and_place_nodes = search.findall_by_attr(root, 78882, name="id")[0].descendants time_and_place_node_ids = [element.id for element in time_and_place_nodes] # Find those keywords that describe events of world history world_history_nodes = search.findall_by_attr(root, 43485, name="id")[0].descendants world_history_node_ids = [element.id for element in world_history_nodes] # Make a joint list of them generic_ids = time_and_place_node_ids + world_history_node_ids # Get the node id of all nodes all_nodes = search.findall_by_attr(root, -1, name="id")[0].descendants all_node_ids = [element.id for element in all_nodes] # Find all leaf nodes
def component(self, k): return findall_by_attr(self.component_tree, k)
def give_branch_ind(self, branch): node = findall_by_attr(self.tree, 'n' + str(branch)) return node[0]
def generatePDF(self): class SampleData: pass samples = search.findall_by_attr(self.configTree, True, name = "selected", maxlevel=2) samples = samples[1:] #Skip root sample_list = [] for s in samples: class ParamData: pass if s.sampleType == "SNP": freq_title = f"Freq({s.sample.freq_unit})" sd = SampleData() sd.type = "SNP" sd.name = s.sample.name sd.freq = s.sample.freq sd.freq_unit = s.sample.freq_unit sd.testFreq = np.ceil(np.geomspace(np.ceil(sd.freq[0]), np.ceil(sd.freq[-1]), 13, endpoint=True)).astype(int) params = search.findall_by_attr(s, True, name = "selected", maxlevel=2) params = params[1:] #Skip root param_list = [] for p in params: pd = ParamData() param_name = p.name pd.name = param_name series = search.findall_by_attr(p , True, name = "selected", maxlevel=2) series = series[1:] #Skip root series_list = [] class LineData: pass pd.testPoints = {'freq': sd.testFreq} pd.worst_margin = {} pd.standard = None for line in series: ld = LineData() line_name = line.name #pair ld.name = line_name if line_name == "Limit": try: lim = s.sample.standard.limits[param_name.replace(' ', '')].getArray() worst, pd.pass_fail = s.sample.getWorstMargin(param_name.replace(' ', '')) #print(worst) for key, (value, freq, limit, worstMargin) in worst.items(): pd.worst_margin[key] = (np.around(float(freq), decimals = 3), np.around(float(worstMargin, decimals=3))) pd.standard = s.sample.standard.name f, points = zip(*lim) #print(points) points = np.array(points, dtype=np.float16) except Exception as e: print("FAIL on LIMIT : ", param_name, e) else: points = s.sample.getParam(param_name)[line_name] pd.testPoints[line_name] = np.around(self.getTestPoints(sd.freq, points, desiredFreqs=sd.testFreq), decimals=3) series_list.append(ld) pd.lines = series_list param_list.append(pd) sd.params = param_list sample_list.append(sd) try: print("sample name : ", s.sample.name) except Exception as e: print(e) template = "graphs_report.prep" RML_DIR = "templates" DATA_DIR = os.path.join(RML_DIR, "data") output = self.outputFile templateName = os.path.join(RML_DIR, template) template = preppy.getModule(templateName) namespace = { 'samples': sample_list, 'filename' : ntpath.basename(output), 'RML_DIR': RML_DIR, 'IMG_DIR': 'img' } rml = template.getOutput(namespace) open(os.path.join(DATA_DIR,'latest.rml'), 'w').write(rml) #print(rml) buf = getBytesIO() rml2pdf.go(asBytes(rml), outputFileName=output) #buf = trml2pdf.parseString(asBytes(rml)) print("PDF exported - ", ntpath.basename(output))
def by_pos(self, pos: Text) -> Tuple['AnalNode', ...]: return findall_by_attr(self, name='upos', value=pos)