def find_delay(self, output_node: anytree.Node, rising_edge: bool = True, debug: bool = False) -> float: """ Calcula el delay saliendo por el nodo indicado """ w = anytree.Walker() upward, common, downwards = w.walk(self.tree.root, output_node.children[0]) last_slew = self.tree.root.device.get_output_slew() total_delay = 0 total_simulated_delay = 0 for idx, node in enumerate(downwards[:-2]): node.device.set_output_device(downwards[idx+1]) delay = node.device.get_delay(last_slew, rising_edge) simulated_delay = node.device.get_simulated_delay() last_slew = node.device.get_output_slew(last_slew, rising_edge) last_simulated_slew = node.device.get_simulated_slew() if isinstance(node.device, Inverter): rising_edge = not rising_edge if debug: #print(f"{node.name} tiene un delay de {delay:.2e} " \ # f"y un slew de {last_slew:.2e}") print(f"{node.name}: \n" \ f"Delay estimado: {delay:.2e} \n" \ f"Delay simulado: {simulated_delay:.2e} \n" f"Slew estimado: {last_slew:.2e} \n" \ f"Slew simulado: {last_simulated_slew:.2e} \n") total_delay += delay total_simulated_delay += simulated_delay return [total_delay, total_simulated_delay]
def plot_dendrogram( ax, root, index_key="sorted_adjacency_index", orientation="h", linewidth=0.7, cut=None, lowest_level=None, ): if lowest_level is None: lowest_level = root.height for node in (root.descendants) + (root,): y = node._hierarchical_mean(index_key) x = node.depth node.y = y node.x = x walker = anytree.Walker() walked = [] for node in root.leaves: upwards, common, downwards = walker.walk(node, root) curr_node = node for up_node in (upwards) + (root,): edge = (curr_node, up_node) if edge not in walked: xs = [curr_node.x, up_node.x] ys = [curr_node.y, up_node.y] xs, ys = get_x_y(xs, ys, orientation) ax.plot( xs, ys, linewidth=linewidth, color="black", alpha=1, ) walked.append(edge) curr_node = up_node y_max = node.node_data[index_key].max() y_min = node.node_data[index_key].min() xs = [node.x, node.x, node.x + 1, node.x + 1] ys = [node.y - 3, node.y + 3, y_max, y_min] xs, ys = get_x_y(xs, ys, orientation) ax.fill(xs, ys, facecolor="black") if orientation == "h": ax.set(xlim=(-1, lowest_level + 1)) if cut is not None: ax.axvline(cut - 1, linewidth=1, color="grey", linestyle=":") elif orientation == "v": ax.set(ylim=(lowest_level + 1, -1)) if cut is not None: ax.axhline(cut - 1, linewidth=1, color="grey", linestyle=":") ax.axis("off")
def _compute_distances(self): """ Computes the distance from the Entity to its Categories and stores them in a list, from root to immediate parent Similarity score based on the category tree (lenght of paths) 'distance' is a bit misleading as these are not distances but similarity scores. See Eq. 3 and 6. """ self._distances = list() w = at.Walker() paths = [w.walk(self, ancestor) for ancestor in self.ancestors] self._distances = [np.exp(-self.tree.similarity_tree_gamma\ *(len(path[0]) + len(path[2]))) for path in paths] self._distances = np.asarray(self.distances) self._distances = normalized(self.distances)
def select_closest_query(self, query_list, previous_query, standalone=True): """ Gets the available queries (for now only the still available target list) and goes for the one closest to the PREVIOUS one. query_list contains leaves_name of the available target. previous_query contains leaves_name of the previous target. Returns the AwA index of the selected query. See Learner M """ if standalone: random.shuffle(query_list) if previous_query is None: # if there is no previous, just ask the first (shuffled) entry index = 0 distances = [np.exp(-self.similarity_tree_gamma*2) for entity in \ query_list] else: ## Compute the similarities # Using a walker on the tree structure # path[0] is the path from entity to common ancestor # path[2] is the path from previous_entity to common ancestor w = at.Walker() paths = [w.walk(self.node_dictionary[self.leaves_to_wn[entity]], \ self.node_dictionary[self.leaves_to_wn[self.leaves\ [previous_query]]]) for entity in query_list] distances = [np.exp(-self.similarity_tree_gamma*(len(path[0]) +\ len(path[2]))) for path in paths] index, max_distance = max(enumerate(distances), key=lambda p: p[1]) # find the index in the original entities vector awa_index = self.leaves.index(query_list[index]) # remove the question from the list if standalone: query_list.remove(query_list[index]) return awa_index, distances
def nearest_common_ancestor(source, target): walker = anytree.Walker() _, nearest_common_ancestor, _ = walker.walk(source, target) return nearest_common_ancestor
def _initialize_learner(self): # list of names following the wn nomenclature self.entities = list() # list of names and ids following the awa nomenclature self.entities_awa = list() self.entities_id = list() # list of names in finnish self.entities_fin = list() # list of names and ids of the attributes self.attributes = list() self.attributes_id = list() # Path of the dataset if rospy.has_param('/dataset_path'): dataset_path = rospy.get_param('/dataset_path') # Collect the entities of AwA2: ids and names with open(dataset_path + '/classes_wn.txt', 'r') as f: for line in f: entity = line.split()[1].replace('+', '_') entity_id = int(line.split()[0]) - 1 self.entities.append(entity) self.entities_id.append(entity_id) with open(dataset_path + '/classes.txt', 'r') as f: for line in f: entity = line.split()[1].replace('+', ' ') entity_id = int(line.split()[0]) - 1 self.entities_awa.append(entity) with open(dataset_path + '/classes_finnish.txt', 'r') as f: for line in f: entity = line.split()[1].replace('+', ' ') entity_id = int(line.split()[0]) - 1 self.entities_fin.append(entity) # Build the category tree self.ct = al.CategoryTree('mammal.n.01', similarity_tree_gamma=0.7) self.ct.add_leaves(self.entities) self.ct.simplify_tree() # Collect the attributes of AwA2 with open(dataset_path + '/predicates.txt', 'r') as f: for line in f: attribute = line.split()[1] attribute_id = int(line.split()[0]) - 1 self.attributes.append(attribute) self.attributes_id.append(attribute_id) full_table = np.loadtxt(open(dataset_path +\ '/predicate-matrix-binary.txt', 'r')) binary_table = full_table[np.asarray(self.entities_id, dtype=int), :] self.binary_table = binary_table[:, np.asarray(self.attributes_id,\ dtype=int)] # Compute distance for the hybrid learner w = at.Walker() paths = list() for entity in self.entities: for other in self.entities: if entity is not other: paths.append(w.walk(self.ct.node_dictionary[\ self.ct.leaves_to_wn[entity]],self.ct.node_dictionary\ [self.ct.leaves_to_wn[other]])) distances = [(len(path[0]) + len(path[2])) for path in paths] self.min_distance = min(distances) self.max_distance = max(distances) # Experiment parameters from rosparam parameters_available = \ rospy.has_param('/time_bag') if parameters_available: self.time_bag = rospy.get_param('/time_bag') else: rospy.logerr('Parameters needed are not available') exit(5) self.selected_attributes = np.array([[22, 51], [30, 52], [31, 54]]) self.verbose_attributes = np.array([\ ['Do these animals have PAWS?','Do these animals EAT FISH?'],\ ['Do these animals have HORNS?', 'Do these animals EAT MEAT?'],\ ['Do these animals have CLAWS?','Are these animals HERBIVORE?']]) self.verbose_attributes_fi = np.array([\ ['Onko näillä eläimillä TASSUT?','Syövätkö nämä eläimet KALAA?'],\ ['Onko näillä eläimillä SARVET?', 'Syövätkö nämä eläimet LIHAA?'],\ ['Onko näillä eläimillä KYNNET?','Ovatko nämä eläimet KASVISSYÖJIÄ?']]) self.verbose_attributes_nao = np.array([\ ['Do these animals have\\pau=200\\ paws?','Do these animals\\pau=200\\ eat fish?'],\ ['Do these animals have\\pau=200\\ horns?', 'Do these animals\\pau=200\\ eat meat?'],\ ['Do these animals have\\pau=200\\ claws?','Are these animals\\pau=200\\ herbivore?']]) # Initialize data for logging self.experiment_data['attributes'] = self.selected_attributes self.experiment_data['time_budget'] = self.time_bag
def entities_distance(self, ent1, ent2): w = at.Walker() path = w.walk(self.node_dictionary[self.leaves_to_wn[self.leaves[ent1]]], \ self.node_dictionary[self.leaves_to_wn[self.leaves[ent2]]]) distance = len(path[0]) + len(path[2]) return distance