def test_edge_addition(self): rootNode = Node('rootNode') graph = Graph(rootNode) graph.add_node('node1', ['rootNode']) graph.add_node('node2', ['rootNode']) graph.add_edge('node2', 'node1') node1 = dfs(rootNode, 'node1').searchNode self.assertEqual(node1.name, 'node1') self.assertEqual(len(node1.parents), 2)
def test_search_node(self): rootNode = Node('rootNode') graph = Graph(rootNode) graph.add_node('node1', ['rootNode']) graph.add_node('node2', ['rootNode']) node1 = dfs(rootNode, 'node1').searchNode node2 = dfs(rootNode, 'node2').searchNode self.assertEqual(node1.name, 'node1') self.assertEqual(node2.name, 'node2')
def generate_graph(self, graph_info, dataset_path, dataloader, edge_threshold=0.5, save_path=None, verbose=True, max_samples=10): r""" Constructs entire concept graph based on the information provided graph_info: list of json: [{'layer_name', 'filter_idxs', 'concept_name', 'description'}] dataset_path: <str> root director of dataset dataloader : custom loader which takes image path and return both image and corresponding path simultaniously edge_threshold: <float> threshold value to form an edge save_path : <str> path to folder to save all the files and generated graph in .pickle format verbose: <bool> provide log statements max_samples : maximum number of samples required for expectation if -1 considers all images in provided root dir """ layers = [] filter_idxs = [] concept_names = [] descp = [] for cinfo in graph_info: layers.append(cinfo['layer_name']) filter_idxs.append(cinfo['filter_idxs']) concept_names.append(cinfo['concept_name']) descp.append(cinfo['description']) layers = np.array(layers) filter_idxs = np.array(filter_idxs) concept_names = np.array(concept_names) descp = np.array(descp) if not edge_threshold: raise ValueError("Assign proper edge threshold") layer_names = np.unique(layers) layer_names = layer_names[np.argsort( [int(idx.split('_')[-1]) for idx in layer_names])] node_ordering = [] node_indexing = [] for i in range(len(layer_names)): node_ordering.extend(concept_names[layers == layer_names[i]]) node_indexing.extend([i] * sum(layers == layer_names[i])) node_indexing = np.array(node_indexing) node_ordering = np.array(node_ordering) rootNode = Node('Input') rootNode.info = { 'concept_name': 'Input Image', 'layer_name': 'Placeholder', 'filter_idxs': [0], 'description': 'Input Image to a network' } self.causal_BN = Graph(rootNode) for ii, (idxi, nodei) in enumerate(zip(node_indexing, node_ordering)): nodei_info = { 'concept_name': nodei, 'layer_name': layers[concept_names == nodei][0], 'filter_idxs': filter_idxs[concept_names == nodei][0], 'description': descp[concept_names == nodei][0] } try: Nodei = self.causal_BN.get_node(nodei) self.causal_BN.current_node.info = nodei_info Aexists = True except: Aexists = False if nodei_info['layer_name'] == layer_names[0]: self.causal_BN.add_node(nodei, parentNodes=['Input']) self.causal_BN.get_node(nodei) self.causal_BN.current_node.info = nodei_info Aexists = True for jj, (idxj, nodej) in enumerate( zip(node_indexing[node_indexing > idxi], node_ordering[node_indexing > idxi])): nodej_info = { 'concept_name': nodej, 'layer_name': layers[concept_names == nodej][0], 'filter_idxs': filter_idxs[concept_names == nodej][0], 'description': descp[concept_names == nodej][0] } link_info = self.get_link(nodei_info, nodej_info, dataset_path=dataset_path, loader=dataloader, max_samples=max_samples) try: Nodej = self.causal_BN.get_node(nodej) Bexists = True self.causal_BN.current_node.info = nodej_info except: Bexists = False if verbose: print( "[INFO: BioExp Graphs] Causal Relation between: {}, {}; edge weights: {}" .format(nodei, nodej, link_info)) if link_info > edge_threshold: if Aexists: if not Bexists: self.causal_BN.add_node(nodej, parentNodes=[nodei]) self.causal_BN.get_node(nodej) self.causal_BN.current_node.info = nodej_info else: self.causal_BN.add_edge(nodei, nodej) else: pass self.causal_BN.print(rootNode) os.makedirs(save_path, exist_ok=True) pickle.dump({ 'graph': self.causal_BN, 'rootNode': rootNode }, open(os.path.join(save_path, 'causal_graph.pickle'), 'wb')) print("[INFO: BioExp Graphs] Causal Graph Generated") pass
import sys sys.path.append('..') from pgm.helpers.common import Node from pgm.representation.LinkedListMN import Graph rootNode = Node('rootNode') graph = Graph(rootNode) graph.add_node('node1', ['rootNode']) graph.add_node('node2', ['rootNode', 'node1']) graph.add_node('node3', ['node1']) graph.add_edge('node2', 'node3') graph.print(rootNode)
import sys sys.path.append('..') from pgm.helpers.common import Node from pgm.representation.LinkedListMN import Graph rootNode = Node('rootNode') rootNode.values = [0, 1, 2] rootNode.set_distribution() graph = Graph(rootNode) graph.add_node('node1', ['rootNode']) graph.print(rootNode) node1 = graph.get_node('node1') node1.values = [1, 2, 3] node1.set_distribution()
def test_node_addition(self): rootNode = Node('rootNode') graph = Graph(rootNode) graph.add_node('node1', ['rootNode']) graph.add_node('node2', ['rootNode']) self.assertEqual(len(rootNode.children), 2)
def generate_graph(self, graph_info, dataset_path, dataloader, nclasses = 2, type = 'segmentation', edge_threshold = 0.5, save_path = None, verbose = True, max_samples=10): r""" Constructs entire concept graph based on the information provided graph_info: <list[json]>; [{'layer_name', 'filter_idxs', 'concept_name', 'description'}] dataset_path: <str>; root director of dataset dataloader: <function>; custom loader which takes image path and return both image and corresponding path simultaniously nclasses: <int>; number of classes pixel wise or data wise type: <str>; one among ['segmentation', 'classification'] edge_threshold: <float>; threshold value to form an edge save_path: <str>; path to folder to save all the files and generated graph in .pickle format verbose: <bool>; provide log statements max_samples: <int>; maximum number of samples required for expectation if -1 considers all images in provided root dir """ if not type.lower() in ['segmentation', 'classification']: raise NotImplementedError("[INFO: BioExp Graphs] allowed types are ['segmentation', 'classification']") # ------------------------------- # seperating lists from json layers= [] filter_idxs =[] concept_names=[] descp = [] node_indexing = [] for cinfo in graph_info: layers.append(cinfo['layer_name']) filter_idxs.append(cinfo['filter_idxs']) concept_names.append(cinfo['concept_name']) descp.append(cinfo['description']) node_indexing.append(cinfo['node_order']) layers = np.array(layers); filter_idxs = np.array(filter_idxs); concept_names = np.array(concept_names); descp = np.array(descp); node_indexing = np.array(node_indexing) if not edge_threshold: raise ValueError("Assign proper edge threshold") node_ordering = [] for i in np.sort(np.unique(node_indexing)): node_ordering.extend(concept_names[node_indexing == i]) node_ordering = np.array(node_ordering) # ----------------------------------- rootNode = Node('Input') rootNode.info = {'concept_name': 'Input Image', 'layer_name': 'Placeholder', 'filter_idxs': [0], 'description': 'Input Image to a network'} self.causal_BN = Graph(rootNode) for ii, (idxi, nodei) in enumerate(zip(node_indexing, node_ordering)): nodei_info = {'concept_name': nodei, 'layer_name': layers[concept_names == nodei][0], 'filter_idxs': filter_idxs[concept_names == nodei][0], 'description': descp[concept_names == nodei][0]} try: Nodei = self.causal_BN.get_node(nodei) self.causal_BN.current_node.info = nodei_info Aexists = True except: Aexists = False if idxi == 0: self.causal_BN.add_node(nodei, parentNodes = ['Input']) self.causal_BN.get_node(nodei) self.causal_BN.current_node.info = nodei_info Aexists = True for jj, (idxj, nodej) in enumerate(zip(node_indexing[node_indexing > idxi], node_ordering[node_indexing > idxi])): nodej_info = {'concept_name': nodej, 'layer_name': layers[concept_names == nodej][0], 'filter_idxs': filter_idxs[concept_names == nodej][0], 'description': descp[concept_names == nodej][0]} link_info = self.get_link(nodei_info, nodej_info, dataset_path = dataset_path, loader = dataloader, max_samples = max_samples) try: Nodej =self.causal_BN.get_node(nodej) Bexists = True self.causal_BN.current_node.info = nodej_info except: Bexists = False if verbose: print("[INFO: BioExp Graphs] Causal Relation between: {}, {}; edge weights: {}".format(nodei, nodej, link_info)) if link_info > edge_threshold: if Aexists: if not Bexists: self.causal_BN.add_node(nodej, parentNodes = [nodei]) self.causal_BN.get_node(nodej) self.causal_BN.current_node.info = nodej_info else: self.causal_BN.add_edge(nodei, nodej) else: pass #-------------------------------- # final class nodes print ("[INFO: BioExp Graphs] leaf node addition]") for nodei in self.causal_BN.get_leafnodes(): link_info = self.get_classlink(nodei.info, dataset_path = dataset_path, loader = dataloader, max_samples = max_samples) if type.lower() == 'segmentation': class_ = np.argmax(link_info[1:]) + 1# removing background class elif type.lower() == 'classification': class_ = np.argmax(link_info) else: raise NotImplementedError("[INFO: BioExp Graphs] allowed types are ['segmentation', 'classification']") for cls_ in np.arange(nclasses)[link_info == link_info[class_]]: nodej = 'class' + str(cls_) nodej_info = {'concept_name': nodej, 'layer_name': 'output', 'filter_idxs': [], 'description': 'Output node Class_{}'.format(cls_)} try: nodej = self.causal_BN.get_node(nodej) Bexists = True self.causal_BN.current_node.info = nodej_info except: Bexists = False if not Bexists: self.causal_BN.add_node(nodej, parentNodes = [nodei.name]) self.causal_BN.get_node(nodej) self.causal_BN.current_node.info = nodej_info else: self.causal_BN.add_edge(nodei, nodej) if verbose: self.causal_BN.print(rootNode) os.makedirs(save_path, exist_ok=True) pickle.dump({'graph': self.causal_BN, 'rootNode': rootNode}, open(os.path.join(save_path, 'causal_graph.pickle'), 'wb')) print("[INFO: BioExp Graphs] Causal Graph Generated") pass