def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._transition_matrix = None self._fast_conjunctions = None
def __init__(self, ground_weight=1.0, output=None): self.graph = nx.Graph() self.conjunctions = set() self.ground_weight = ground_weight self.nodes = OrderedSet() self.edges = OrderedSet() self._edge_matrix = None self._conjunction_matrix = None self._node_matrix = None self._node_conjunctions = None self._edge_conductance = None self.output = output
def test_identity(): ''' Identity sets are just ranges of numbers. ''' iset = IdentitySet(10) eq_(iset[5], 5) eq_(iset.index(2), 2) eq_(len(iset), 10) assert iset == OrderedSet(range(10)) iset = pickle.loads(pickle.dumps(iset)) eq_(iset[5], 5) eq_(iset.index(2), 2) eq_(len(iset), 10) assert iset == OrderedSet(range(10))
def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._fast_matrix_down = None self._fast_conjunctions = None
def load(node_file, matrix_up, matrix_down, conjunction_file): trust = TrustNetwork() nodes = divisi2.load(node_file) trust.nodes = OrderedSet(nodes) trust._fast_matrix_up = divisi2.load(matrix_up) trust._transition_matrix = divisi2.load(matrix_down) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust
def test_delete_and_pickle(): ''' Deleting an element doesn't affect the remaining elements' indices. ''' s = OrderedSet(['dog', 'cat', 'banana']) del s[1] eq_(s[1], None) eq_(s.index('banana'), 2) # Pickling doesn't change things. s2 = pickle.loads(pickle.dumps(s)) eq_(s, s2) eq_(s2[1], None) eq_(s2.index('banana'), 2) assert None not in s2 assert None not in s2
def test_delete_and_pickle(): ''' Deleting an element doesn't affect the remaining elements' indices. ''' s = OrderedSet(['dog','cat','banana']) del s[1] eq_(s[1], None) eq_(s.index('banana'), 2) # Pickling doesn't change things. s2 = pickle.loads(pickle.dumps(s)) eq_(s, s2) eq_(s2[1], None) eq_(s2.index('banana'), 2) assert None not in s2 assert None not in s2
def test_pickle(): ''' Test that OrderedSets can be pickled. ''' s = OrderedSet(['dog', 'cat', 'banana']) import cPickle as pickle s2 = pickle.loads(pickle.dumps(s)) eq_(s, s2) eq_(s2[0], 'dog') eq_(s2.index('cat'), 1)
class TrustNetwork(object): def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._fast_matrix_down = None self._fast_conjunctions = None @staticmethod def load(node_file, matrix_up, matrix_down, conjunction_file): trust = TrustNetwork() nodes = divisi2.load(node_file) trust.nodes = OrderedSet(nodes) trust._fast_matrix_up = divisi2.load(matrix_up) trust._fast_matrix_down = divisi2.load(matrix_down) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust def add_nodes(self, nodes): for node in nodes: self.nodes.add(node) def scan_edge(self, source, dest): self.nodes.add(source) self.nodes.add(dest) def make_matrices(self): self._node_matrix = sparse.dok_matrix( (len(self.nodes), len(self.nodes))) self._node_conjunctions = sparse.dok_matrix( (len(self.nodes), len(self.nodes))) def add_edge(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) #self._node_matrix[source_idx, dest_idx] = weight self._node_matrix[dest_idx, source_idx] = weight def add_conjunction(self, sources, dest, weight): dest_idx = self.nodes.index(dest) for i, source in enumerate(sources): self.add_edge(source, dest, weight) source_idx = self.nodes.index(source) self._node_conjunctions[dest_idx, source_idx] = 1.0 def add_conjunction_piece(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) self._node_matrix[dest_idx, source_idx] = weight self._node_conjunctions[dest_idx, source_idx] = 1.0 def make_fast_matrix(self): self._fast_matrix = self._node_matrix.tocsr() for i in xrange(self._node_matrix.shape[0]): self._node_matrix[0, i] += EPS self._node_matrix[i, 0] += EPS abs_matrix = np.abs(self._fast_matrix) rowsums = 1.0 / (EPS + (abs_matrix).sum(axis=1)) rowsums = np.asarray(rowsums)[:, 0] rowdiag = make_diag(rowsums) self._fast_matrix_down = (rowdiag * self._fast_matrix).tocsr() #self._fast_matrix_up = self._fast_matrix_down.T.tocsr() colsums = 1.0 / (EPS + (abs_matrix).sum(axis=0)) colsums = np.asarray(colsums)[0, :] coldiag = make_diag(colsums) self._fast_matrix_up = (coldiag * self._fast_matrix.T).tocsr() def make_fast_conjunctions(self): csr_conjunctions = self._node_conjunctions.tocsr() n = csr_conjunctions.shape[0] scale_vec = np.zeros((n, )) for row in xrange(n): nnz = csr_conjunctions[row].nnz if nnz > 0: scale_vec[row] = 1.0 / nnz self._fast_conjunctions = make_diag(scale_vec) * csr_conjunctions def get_matrices(self): if self._fast_matrix_up is None: self.make_fast_matrix() return self._fast_matrix_up, self._fast_matrix_down def get_conjunctions(self): if self._fast_conjunctions is None: self.make_fast_conjunctions() return self._fast_conjunctions def corona(self): cmat = self.get_conjunctions() mat_up, mat_down = self.get_matrices() hub = np.ones((mat_up.shape[0], )) authority = np.ones((mat_up.shape[0], )) for iter in xrange(100): vec = authority + hub vec /= np.max(vec) root = vec * 0 root[0] = 1.0 conj_sums = cmat * vec conj_par = 1.0 / (np.maximum(EPS, cmat * (1.0 / np.maximum(EPS, vec)))) conj_factor = np.minimum(1.0, conj_par / (conj_sums + EPS)) conj_diag = make_diag(conj_factor) combined = conj_diag * (mat_up + mat_down) + make_diag( np.ones(len(vec))) #combined = (mat_up + mat_down) * 0.5 u, sigma, v = sparse_svd(combined, k=4) activation = np.dot(u, u[0]) #w, v = eigen(combined, k=3, v0=root) #activation = v[:, np.argmax(w)] #activation = (activation / (activation[0]+EPS)).real print activation print sigma print conj_factor print hub += self._fast_matrix.T * conj_diag * activation authority += conj_diag * self._fast_matrix * activation hub /= np.max(hub) authority /= np.max(authority) return zip(self.nodes, hub, authority)
class TrustNetwork(object): def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._fast_matrix_down = None self._fast_conjunctions = None @staticmethod def load(node_file, matrix_up, matrix_down, conjunction_file): trust = TrustNetwork() nodes = divisi2.load(node_file) trust.nodes = OrderedSet(nodes) trust._fast_matrix_up = divisi2.load(matrix_up) trust._fast_matrix_down = divisi2.load(matrix_down) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust def add_nodes(self, nodes): for node in nodes: self.nodes.add(node) def scan_edge(self, source, dest): self.nodes.add(source) self.nodes.add(dest) def make_matrices(self): self._node_matrix = sparse.dok_matrix((len(self.nodes), len(self.nodes))) self._node_conjunctions = sparse.dok_matrix((len(self.nodes), len(self.nodes))) def add_edge(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) #self._node_matrix[source_idx, dest_idx] = weight self._node_matrix[dest_idx, source_idx] = weight def add_conjunction(self, sources, dest, weight): dest_idx = self.nodes.index(dest) for i, source in enumerate(sources): self.add_edge(source, dest, weight) source_idx = self.nodes.index(source) self._node_conjunctions[dest_idx, source_idx] = 1.0 def add_conjunction_piece(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) self._node_matrix[dest_idx, source_idx] = weight self._node_conjunctions[dest_idx, source_idx] = 1.0 def make_fast_matrix(self): self._fast_matrix = self._node_matrix.tocsr() for i in xrange(self._node_matrix.shape[0]): self._node_matrix[0, i] += EPS self._node_matrix[i, 0] += EPS abs_matrix = np.abs(self._fast_matrix) rowsums = 1.0 / (EPS + (abs_matrix).sum(axis=1)) rowsums = np.asarray(rowsums)[:,0] rowdiag = make_diag(rowsums) self._fast_matrix_down = (rowdiag * self._fast_matrix).tocsr() #self._fast_matrix_up = self._fast_matrix_down.T.tocsr() colsums = 1.0 / (EPS + (abs_matrix).sum(axis=0)) colsums = np.asarray(colsums)[0,:] coldiag = make_diag(colsums) self._fast_matrix_up = (coldiag * self._fast_matrix.T).tocsr() def make_fast_conjunctions(self): csr_conjunctions = self._node_conjunctions.tocsr() n = csr_conjunctions.shape[0] scale_vec = np.zeros((n,)) for row in xrange(n): nnz = csr_conjunctions[row].nnz if nnz > 0: scale_vec[row] = 1.0/nnz self._fast_conjunctions = make_diag(scale_vec) * csr_conjunctions def get_matrices(self): if self._fast_matrix_up is None: self.make_fast_matrix() return self._fast_matrix_up, self._fast_matrix_down def get_conjunctions(self): if self._fast_conjunctions is None: self.make_fast_conjunctions() return self._fast_conjunctions def corona(self): cmat = self.get_conjunctions() mat_up, mat_down = self.get_matrices() hub = np.ones((mat_up.shape[0],)) authority = np.ones((mat_up.shape[0],)) for iter in xrange(100): vec = authority + hub vec /= np.max(vec) root = vec * 0 root[0] = 1.0 conj_sums = cmat * vec conj_par = 1.0/(np.maximum(EPS, cmat * (1.0 / np.maximum(EPS, vec)))) conj_factor = np.minimum(1.0, conj_par / (conj_sums+EPS)) conj_diag = make_diag(conj_factor) combined = conj_diag * (mat_up + mat_down) + make_diag(np.ones(len(vec))) #combined = (mat_up + mat_down) * 0.5 u, sigma, v = sparse_svd(combined, k=4) activation = np.dot(u, u[0]) #w, v = eigen(combined, k=3, v0=root) #activation = v[:, np.argmax(w)] #activation = (activation / (activation[0]+EPS)).real print activation print sigma print conj_factor print hub += self._fast_matrix.T * conj_diag * activation authority += conj_diag * self._fast_matrix * activation hub /= np.max(hub) authority /= np.max(authority) return zip(self.nodes, hub, authority)
def __init__(self, output=None): self.nodes = OrderedSet() self.node_objs = {} #dict mapping node name to Node object self._node_conjunctions = None self._fast_matrix = None self._fast_conjunctions = None
class BeliefNetwork(object): def __init__(self, output=None): self.nodes = OrderedSet() self.node_objs = {} #dict mapping node name to Node object self._node_conjunctions = None self._fast_matrix = None self._fast_conjunctions = None @staticmethod def load(node_file, matrix_file, conjunction_file): trust = BeliefNetwork() nodes = divisi2.load(node_file) for node in nodes: trust.nodes.add(node) trust.node_objs[node] = Node(node) trust._fast_matrix = divisi2.load(matrix_file) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust def add_nodes(self, nodes): for node in nodes: self.nodes.add(node) self.node_objs[node] = Node(node) def scan_edge(self, source, dest): self.nodes.add(source) self.nodes.add(dest) self.node_objs[source] = Node(source) self.node_objs[dest] = Node(dest) def initialize_matrices(self): self.justification = ReconstructedMatrix.make_random(self.nodes, self.nodes, 3) self.justification.right = self.justification.left.T self.justification.left.fill(0.01) self.justification.right.fill(0.01) def add_edge(self, source, dest, weight, conjunction = False): self.node_objs[source].add_outgoing_edge(dest, weight) if conjunction: self.node_objs[dest].add_incoming_edge(source, weight) #weight here? def add_conjunction(self, sources, dest, weight): for i, source in enumerate(sources): self.add_edge(source, dest, weight, conjunction = True) def add_conjunction_piece(self, source, dest, weight): self.add_edge(source, dest, weight, conjunction = True) def iterate(self, n=1000): for iter in xrange(n): for node in self.nodes: print node, '=>', self.activate_node(node) print return self.justification def activate_node(self, node): index = self.nodes.index(node) activation = None self.learn_justification(index, index, 1.0) curr = node for iter in xrange(20): #get the nodes that this node has outgoing edges to nz = self.node_objs[curr].outgoing_edges if len(nz) == 0: break choice = random.choice(xrange(len(nz))) next, weight = nz[choice] if activation is None: activation = weight else: activation = parallel(activation, weight) assert next != curr assert activation is not None conj = self.node_objs[next].incoming_edges for con in conj: conj_index = self.nodes.index(con[0]) if conj_index != curr: #FIXME: unsure of how to calculate this value factor = self.justification[index, conj_index] #* conj[0, conj_index] activation = parallel(activation, factor) print self.nodes[self.nodes.index(next)], ('(%4.4f) =>' % activation), self.learn_justification(index, self.nodes.index(next), activation) if activation <= 0: break curr = next def learn_justification(self, source, target, activation=1.0): self.justification.hebbian_step(source, target, activation)
class TrustNetwork(object): def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._transition_matrix = None self._fast_conjunctions = None @staticmethod def load(node_file, matrix_up, matrix_down, conjunction_file): trust = TrustNetwork() nodes = divisi2.load(node_file) trust.nodes = OrderedSet(nodes) trust._fast_matrix_up = divisi2.load(matrix_up) trust._transition_matrix = divisi2.load(matrix_down) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust def add_nodes(self, nodes): for node in nodes: self.nodes.add(node) def scan_edge(self, source, dest): self.nodes.add(source) self.nodes.add(dest) def make_matrices(self): self._node_matrix = sparse.dok_matrix((len(self.nodes), len(self.nodes))) self._node_conjunctions = sparse.dok_matrix((len(self.nodes), len(self.nodes))) def add_edge(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) #self._node_matrix[source_idx, dest_idx] = weight self._node_matrix[dest_idx, source_idx] = weight def add_conjunction(self, sources, dest, weight): dest_idx = self.nodes.index(dest) for i, source in enumerate(sources): self.add_edge(source, dest, weight) source_idx = self.nodes.index(source) self._node_conjunctions[dest_idx, source_idx] = 1.0 def add_conjunction_piece(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) self._node_matrix[dest_idx, source_idx] = weight self._node_conjunctions[dest_idx, source_idx] = 1.0 def make_fast_matrix(self): self._fast_matrix = self._node_matrix.tocsr() for i in xrange(self._node_matrix.shape[0]): self._node_matrix[0, i] += 1.0/self._node_matrix.shape[0] self._node_matrix[i, 0] += 1.0/self._node_matrix.shape[0] csr_matrix = self._node_matrix.tocsr() abs_matrix = csr_matrix.multiply(csr_matrix) rowsums = np.sqrt(1.0 / (EPS + (abs_matrix).sum(axis=1))) rowsums = np.asarray(rowsums)[:,0] rowdiag = make_diag(rowsums) colsums = np.sqrt(1.0 / (EPS + (abs_matrix).sum(axis=0))) colsums = np.asarray(colsums)[0,:] coldiag = make_diag(colsums) self._transition_matrix = (csr_matrix * coldiag).tocsr() self._transition_matrix_T = self._transition_matrix.T.tocsr() self._final_matrix = (self._fast_matrix * coldiag).tocsr() self._final_matrix_T = (coldiag * self._fast_matrix.T).tocsr() def make_fast_conjunctions(self): csr_conjunctions = self._node_conjunctions.tocsr() n = csr_conjunctions.shape[0] scale_vec = np.zeros((n,)) for row in xrange(n): nnz = csr_conjunctions[row].nnz if nnz > 0: scale_vec[row] = 1.0/nnz self._fast_conjunctions = make_diag(scale_vec) * csr_conjunctions def get_matrices(self): if self._final_matrix is None: self.make_fast_matrix() return self._transition_matrix_T, self._transition_matrix def get_conjunctions(self): if self._fast_conjunctions is None: self.make_fast_conjunctions() return self._fast_conjunctions def corona(self): cmat = self.get_conjunctions() mat_up, mat_down = self.get_matrices() hub = np.ones((mat_up.shape[0],)) / mat_up.shape[0] / 100 authority = np.ones((mat_up.shape[0],)) / mat_up.shape[0] / 100 prev_activation = np.zeros((mat_up.shape[0],)) prev_err = 1.0 for iter in xrange(100): vec = authority + hub vec /= np.max(vec) root = np.zeros(len(vec), 'f') root[0] = 1.0 conj_sums = cmat * vec conj_par = 1.0/(np.maximum(EPS, cmat * (1.0 / np.maximum(EPS, vec)))) conj_factor = np.minimum(1.0, conj_par / (conj_sums+EPS)) conj_diag = make_diag(conj_factor) combined = conj_diag * (mat_up + mat_down) * 0.25 + make_diag(np.ones(len(vec))*0.5) #combined = (mat_up + mat_down) * 0.5 u, sigma, v = sparse_svd(combined, k=1) activation = u[:, 0] #activation = np.dot(u, u[0]) #w, v = eigen(combined.T, k=1, v0=root, which='LR') #activation = v[:, np.argmax(w)].real activation *= np.sign(np.sum(activation)) activation /= (np.sum(np.abs(activation)) + EPS) hub += (hub + self._final_matrix_T * conj_diag * activation) / 2 authority += (authority + conj_diag * self._final_matrix * activation) / 2 print activation err = np.max(np.abs(activation - prev_activation))\ / np.max(np.abs(activation)) print err if iter >= 3 and err + prev_err < 1e-9: print "converged on iteration %d" % iter break prev_err = err prev_activation = activation.copy() print sigma print conj_factor print hub = self._final_matrix_T * conj_diag * activation authority = conj_diag * self._final_matrix * activation return zip(self.nodes, hub, authority)
class BeliefNetwork(object): def __init__(self, ground_weight=1.0, output=None): self.graph = nx.Graph() self.conjunctions = set() self.ground_weight = ground_weight self.nodes = OrderedSet() self.edges = OrderedSet() self._edge_matrix = None self._conjunction_matrix = None self._node_matrix = None self._node_conjunctions = None self._edge_conductance = None self.output = output def add_edge(self, source, dest, weight, dependencies=None): self.nodes.add(source) self.nodes.add(dest) self.edges.add((source, dest)) props = {"weight": weight} if dependencies is not None: props["dependencies"] = dependencies self.graph.add_edge(source, dest, **props) if self.output: self.output.write("%s\t%s\t%r\n" % (source, dest, props)) def add_conjunction(self, sources, dest, weight): sources = tuple(sources) self.conjunctions.add((sources, dest, weight)) for i, source in enumerate(sources): self.add_edge(source, dest, weight, dependencies=sources) def ordered_nodes(self): return sorted(self.graph.nodes()) def update_arrays(self): n_edges = len(self.edges) + len(self.nodes) n_nodes = len(self.nodes) offset = len(self.edges) mat = sparse.dok_matrix((n_edges, n_nodes)) conjunction_mat = sparse.dok_matrix((n_edges, n_nodes)) vec = np.zeros((n_edges,)) for source, dest in self.graph.edges(): if (source, dest) not in self.edges: (source, dest) = (dest, source) edge_num = self.edges.index((source, dest)) weight = self.graph.get_edge_data(source, dest)["weight"] source_idx = self.nodes.index(source) mat[edge_num, source_idx] = -1.0 dest_idx = self.nodes.index(dest) mat[edge_num, dest_idx] = 1.0 vec[edge_num] = weight # Add edges to ground for node in self.graph.nodes(): node_num = self.nodes.index(node) adjusted_node_num = node_num + offset mat[adjusted_node_num, node_num] = -1.0 vec[adjusted_node_num] = self.ground_weight # Make matrix of conjunctions, edges -> input nodes for sources, dest, weight in self.conjunctions: edge_indices = [] node_indices = [] for source in sources: if (source, dest) in self.edges: edge_num = self.edges.index((source, dest)) else: edge_num = self.edges.index((dest, source)) edge_indices.append(edge_num) node_indices.append(self.nodes.index(source)) conjunction_mat[edge_indices, node_indices] = 1 # for node_index in node_indices: # unconnected_edges = [e for e in edge_indices if mat[e, node_index] == 0.0] self._edge_matrix = mat.tocsr() self._edge_matrix_transpose = mat.T.tocsr() self._edge_conductance = vec self._conjunction_matrix = conjunction_mat.tocsr() return mat, vec def make_node_matrix(self): n_nodes = len(self.nodes) self._node_matrix = nx.to_scipy_sparse_matrix(self.graph, self.nodes) def make_node_conjunctions(self): n_nodes = len(self.nodes) node_conjunctions = sparse.dok_matrix((n_nodes, n_nodes)) for sources, dest, weight in self.conjunctions: node_indices = [self.nodes.index(source) for source in sources] node_conjunctions[self.nodes.index(dest), node_indices] = 1.0 self._node_conjunctions = node_conjunctions.tocsr() def get_node_matrix(self): if self._node_matrix is None: self.make_node_matrix() return self._node_matrix def get_node_conjunctions(self): if self._node_conjunctions is None: self.make_node_conjunctions() return self._node_conjunctions def spreading_activation(self, vec=None, root=None): if vec is None: vec = np.ones((len(self.nodes),)) cmat = self.get_node_conjunctions() nmat = self.get_node_matrix() for iter in xrange(100): conj_sums = cmat * vec conj_par = 1.0 / (cmat * (1.0 / np.maximum(0, vec))) conj_factor = np.minimum(1.0, conj_par / conj_sums) newvec = nmat.dot(vec) * conj_factor + vec if root is not None and newvec[self.nodes.index(root)] < 0.0: newvec = -newvec newvec /= np.max(newvec) print newvec vec = newvec return vec def get_edge_matrix(self): if self._edge_matrix is None: self.update_arrays() return self._edge_matrix def get_edge_matrix_transpose(self): if self._edge_matrix is None: self.update_arrays() return self._edge_matrix_transpose def get_conductance(self): if self._edge_conductance is None: self.update_arrays() return self._edge_conductance def adjusted_conductances(self, equiv_conductances): assert len(equiv_conductances) == len(self.nodes) equiv_resistances = 1.0 / np.maximum(0, equiv_conductances) edge_resistances = 1.0 / self._edge_conductance combined_resistances = self._conjunction_matrix.dot(equiv_resistances) new_resistances = edge_resistances + combined_resistances adjusted_conductances = 1.0 / new_resistances print adjusted_conductances return adjusted_conductances def get_system_matrix(self, conductances): A_T = self.get_edge_matrix_transpose() A = self.get_edge_matrix() G = self.adjusted_conductances(conductances) return make_product_operator(A_T, make_diag(G), A) def solve_system(self, known_conductances, current_source): """ Get the conductance from root to each node by solving the electrical system. """ current = np.zeros((len(self.nodes),)) current[current_source] = 1.0 system = self.get_system_matrix(known_conductances) A = self.get_edge_matrix() # Solve the sparse system of linear equations using cg new_potentials = sparse_linalg.cg(system, current)[0] # A = edges by nodes currents = -A.dot(new_potentials) potential_differences = new_potentials[current_source] - new_potentials current_magnitude = np.abs(self.get_edge_matrix_transpose()).dot(currents) conductance = 1.0 / potential_differences return conductance def run_analog(self, root, epsilon=1e-6): conductance = np.ones((len(self.nodes),)) converged = False root_index = self.nodes.index(root) for i in xrange(100): new_conductance = self.solve_system(conductance, root_index) diff = np.minimum(conductance, 1000000.0) - np.minimum(new_conductance, 1000000.0) conductance = new_conductance print conductance if np.linalg.norm(diff) < epsilon: converged = True break if not converged: print "Warning: failed to converge" return zip(self.nodes, conductance)
class BeliefNetwork(object): def __init__(self, ground_weight=1.0, output=None): self.graph = nx.Graph() self.conjunctions = set() self.ground_weight = ground_weight self.nodes = OrderedSet() self.edges = OrderedSet() self._edge_matrix = None self._conjunction_matrix = None self._node_matrix = None self._node_conjunctions = None self._edge_conductance = None self.output = output def add_edge(self, source, dest, weight, dependencies=None): self.nodes.add(source) self.nodes.add(dest) self.edges.add((source, dest)) props = {'weight': weight} if dependencies is not None: props['dependencies'] = dependencies self.graph.add_edge(source, dest, **props) if self.output: self.output.write("%s\t%s\t%r\n" % (source, dest, props)) def add_conjunction(self, sources, dest, weight): sources = tuple(sources) self.conjunctions.add((sources, dest, weight)) for i, source in enumerate(sources): self.add_edge(source, dest, weight, dependencies=sources) def ordered_nodes(self): return sorted(self.graph.nodes()) def update_arrays(self): n_edges = len(self.edges) + len(self.nodes) n_nodes = len(self.nodes) offset = len(self.edges) mat = sparse.dok_matrix((n_edges, n_nodes)) conjunction_mat = sparse.dok_matrix((n_edges, n_nodes)) vec = np.zeros((n_edges, )) for source, dest in self.graph.edges(): if (source, dest) not in self.edges: (source, dest) = (dest, source) edge_num = self.edges.index((source, dest)) weight = self.graph.get_edge_data(source, dest)['weight'] source_idx = self.nodes.index(source) mat[edge_num, source_idx] = -1.0 dest_idx = self.nodes.index(dest) mat[edge_num, dest_idx] = 1.0 vec[edge_num] = weight # Add edges to ground for node in self.graph.nodes(): node_num = self.nodes.index(node) adjusted_node_num = node_num + offset mat[adjusted_node_num, node_num] = -1.0 vec[adjusted_node_num] = self.ground_weight # Make matrix of conjunctions, edges -> input nodes for sources, dest, weight in self.conjunctions: edge_indices = [] node_indices = [] for source in sources: if (source, dest) in self.edges: edge_num = self.edges.index((source, dest)) else: edge_num = self.edges.index((dest, source)) edge_indices.append(edge_num) node_indices.append(self.nodes.index(source)) conjunction_mat[edge_indices, node_indices] = 1 #for node_index in node_indices: # unconnected_edges = [e for e in edge_indices if mat[e, node_index] == 0.0] self._edge_matrix = mat.tocsr() self._edge_matrix_transpose = mat.T.tocsr() self._edge_conductance = vec self._conjunction_matrix = conjunction_mat.tocsr() return mat, vec def make_node_matrix(self): n_nodes = len(self.nodes) self._node_matrix = nx.to_scipy_sparse_matrix(self.graph, self.nodes) def make_node_conjunctions(self): n_nodes = len(self.nodes) node_conjunctions = sparse.dok_matrix((n_nodes, n_nodes)) for sources, dest, weight in self.conjunctions: node_indices = [self.nodes.index(source) for source in sources] node_conjunctions[self.nodes.index(dest), node_indices] = 1.0 self._node_conjunctions = node_conjunctions.tocsr() def get_node_matrix(self): if self._node_matrix is None: self.make_node_matrix() return self._node_matrix def get_node_conjunctions(self): if self._node_conjunctions is None: self.make_node_conjunctions() return self._node_conjunctions def spreading_activation(self, vec=None, root=None): if vec is None: vec = np.ones((len(self.nodes), )) cmat = self.get_node_conjunctions() nmat = self.get_node_matrix() for iter in xrange(100): conj_sums = cmat * vec conj_par = 1.0 / (cmat * (1.0 / np.maximum(0, vec))) conj_factor = np.minimum(1.0, conj_par / conj_sums) newvec = nmat.dot(vec) * conj_factor + vec if root is not None and newvec[self.nodes.index(root)] < 0.0: newvec = -newvec newvec /= np.max(newvec) print newvec vec = newvec return vec def get_edge_matrix(self): if self._edge_matrix is None: self.update_arrays() return self._edge_matrix def get_edge_matrix_transpose(self): if self._edge_matrix is None: self.update_arrays() return self._edge_matrix_transpose def get_conductance(self): if self._edge_conductance is None: self.update_arrays() return self._edge_conductance def adjusted_conductances(self, equiv_conductances): assert len(equiv_conductances) == len(self.nodes) equiv_resistances = 1.0 / np.maximum(0, equiv_conductances) edge_resistances = 1.0 / self._edge_conductance combined_resistances = self._conjunction_matrix.dot(equiv_resistances) new_resistances = (edge_resistances + combined_resistances) adjusted_conductances = 1.0 / new_resistances print adjusted_conductances return adjusted_conductances def get_system_matrix(self, conductances): A_T = self.get_edge_matrix_transpose() A = self.get_edge_matrix() G = self.adjusted_conductances(conductances) return make_product_operator(A_T, make_diag(G), A) def solve_system(self, known_conductances, current_source): """ Get the conductance from root to each node by solving the electrical system. """ current = np.zeros((len(self.nodes), )) current[current_source] = 1.0 system = self.get_system_matrix(known_conductances) A = self.get_edge_matrix() # Solve the sparse system of linear equations using cg new_potentials = sparse_linalg.cg(system, current)[0] # A = edges by nodes currents = -A.dot(new_potentials) potential_differences = new_potentials[current_source] - new_potentials current_magnitude = np.abs( self.get_edge_matrix_transpose()).dot(currents) conductance = 1.0 / potential_differences return conductance def run_analog(self, root, epsilon=1e-6): conductance = np.ones((len(self.nodes), )) converged = False root_index = self.nodes.index(root) for i in xrange(100): new_conductance = self.solve_system(conductance, root_index) diff = (np.minimum(conductance, 1000000.0) - np.minimum(new_conductance, 1000000.0)) conductance = new_conductance print conductance if np.linalg.norm(diff) < epsilon: converged = True break if not converged: print "Warning: failed to converge" return zip(self.nodes, conductance)
def test_reprOfEmpty(): ''' repr() of an empty OrderedSet should not fail. ''' repr(OrderedSet())
class TrustNetwork(object): def __init__(self, output=None): self.nodes = OrderedSet() self._node_matrix = None self._node_conjunctions = None self._fast_matrix_up = None self._transition_matrix = None self._fast_conjunctions = None @staticmethod def load(node_file, matrix_up, matrix_down, conjunction_file): trust = TrustNetwork() nodes = divisi2.load(node_file) trust.nodes = OrderedSet(nodes) trust._fast_matrix_up = divisi2.load(matrix_up) trust._transition_matrix = divisi2.load(matrix_down) trust._fast_conjunctions = divisi2.load(conjunction_file) return trust def add_nodes(self, nodes): for node in nodes: self.nodes.add(node) def scan_edge(self, source, dest): self.nodes.add(source) self.nodes.add(dest) def make_matrices(self): self._node_matrix = sparse.dok_matrix( (len(self.nodes), len(self.nodes))) self._node_conjunctions = sparse.dok_matrix( (len(self.nodes), len(self.nodes))) def add_edge(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) #self._node_matrix[source_idx, dest_idx] = weight self._node_matrix[dest_idx, source_idx] = weight def add_conjunction(self, sources, dest, weight): dest_idx = self.nodes.index(dest) for i, source in enumerate(sources): self.add_edge(source, dest, weight) source_idx = self.nodes.index(source) self._node_conjunctions[dest_idx, source_idx] = 1.0 def add_conjunction_piece(self, source, dest, weight): source_idx = self.nodes.index(source) dest_idx = self.nodes.index(dest) self._node_matrix[dest_idx, source_idx] = weight self._node_conjunctions[dest_idx, source_idx] = 1.0 def make_fast_matrix(self): self._fast_matrix = self._node_matrix.tocsr() for i in xrange(self._node_matrix.shape[0]): self._node_matrix[0, i] += 1.0 / self._node_matrix.shape[0] self._node_matrix[i, 0] += 1.0 / self._node_matrix.shape[0] csr_matrix = self._node_matrix.tocsr() abs_matrix = csr_matrix.multiply(csr_matrix) rowsums = np.sqrt(1.0 / (EPS + (abs_matrix).sum(axis=1))) rowsums = np.asarray(rowsums)[:, 0] rowdiag = make_diag(rowsums) colsums = np.sqrt(1.0 / (EPS + (abs_matrix).sum(axis=0))) colsums = np.asarray(colsums)[0, :] coldiag = make_diag(colsums) self._transition_matrix = (csr_matrix * coldiag).tocsr() self._transition_matrix_T = self._transition_matrix.T.tocsr() self._final_matrix = (self._fast_matrix * coldiag).tocsr() self._final_matrix_T = (coldiag * self._fast_matrix.T).tocsr() def make_fast_conjunctions(self): csr_conjunctions = self._node_conjunctions.tocsr() n = csr_conjunctions.shape[0] scale_vec = np.zeros((n, )) for row in xrange(n): nnz = csr_conjunctions[row].nnz if nnz > 0: scale_vec[row] = 1.0 / nnz self._fast_conjunctions = make_diag(scale_vec) * csr_conjunctions def get_matrices(self): if self._final_matrix is None: self.make_fast_matrix() return self._transition_matrix_T, self._transition_matrix def get_conjunctions(self): if self._fast_conjunctions is None: self.make_fast_conjunctions() return self._fast_conjunctions def corona(self): cmat = self.get_conjunctions() mat_up, mat_down = self.get_matrices() hub = np.ones((mat_up.shape[0], )) / mat_up.shape[0] / 100 authority = np.ones((mat_up.shape[0], )) / mat_up.shape[0] / 100 prev_activation = np.zeros((mat_up.shape[0], )) prev_err = 1.0 for iter in xrange(100): vec = authority + hub vec /= np.max(vec) root = np.zeros(len(vec), 'f') root[0] = 1.0 conj_sums = cmat * vec conj_par = 1.0 / (np.maximum(EPS, cmat * (1.0 / np.maximum(EPS, vec)))) conj_factor = np.minimum(1.0, conj_par / (conj_sums + EPS)) conj_diag = make_diag(conj_factor) combined = conj_diag * (mat_up + mat_down) * 0.25 + make_diag( np.ones(len(vec)) * 0.5) #combined = (mat_up + mat_down) * 0.5 u, sigma, v = sparse_svd(combined, k=1) activation = u[:, 0] #activation = np.dot(u, u[0]) #w, v = eigen(combined.T, k=1, v0=root, which='LR') #activation = v[:, np.argmax(w)].real activation *= np.sign(np.sum(activation)) activation /= (np.sum(np.abs(activation)) + EPS) hub += (hub + self._final_matrix_T * conj_diag * activation) / 2 authority += (authority + conj_diag * self._final_matrix * activation) / 2 print activation err = np.max(np.abs(activation - prev_activation))\ / np.max(np.abs(activation)) print err if iter >= 3 and err + prev_err < 1e-9: print "converged on iteration %d" % iter break prev_err = err prev_activation = activation.copy() print sigma print conj_factor print hub = self._final_matrix_T * conj_diag * activation authority = conj_diag * self._final_matrix * activation return zip(self.nodes, hub, authority)