def trace(self, b1, b2): ''' Links external buckets b1 and b2 and eliminates any loops which result. ''' assert b1 in self.externalBuckets assert b2 in self.externalBuckets assert b1 != b2 n1 = b1.node n2 = b2.node if n1 == n2: # So we're just tracing an arrayTensor. n1.tensor = n1.tensor.trace([b1.index], [b2.index]) n1.buckets.remove(b1) n1.buckets.remove(b2) self.externalBuckets.remove(b1) self.externalBuckets.remove(b2) else: # We may be introducing a loop loop = self.pathBetween(n1, n2) if len(loop) > 0: if len(loop) == 2: # This special case is not possible when contracting in a new node. # The easy way to handle it is just to merge the two nodes and then # split them if the resulting rank is too high. _ = Link(b1, b2) n = self.mergeNodes(n1, n2) self.splitNode(n) else: _ = Link(b1, b2) self.eliminateLoop(loop)
def test_mergeLinks_TreeTensor_Compress(): for i in range(5): net = Network() x = np.random.randn(2, 2, 2, 2, 3, 3) xt = TreeTensor(accuracy=epsilon) xt.addTensor(ArrayTensor(x)) n1 = Node(xt) x = np.random.randn(2, 2, 2, 2, 3, 3) xt = TreeTensor(accuracy=epsilon) xt.addTensor(ArrayTensor(x)) n2 = Node(xt) net.addNode(n1) Link(n1.buckets[0], n2.buckets[0]) Link(n1.buckets[1], n2.buckets[1]) net.addNode(n2) arr1, bdict1 = net.array net.mergeLinks(n1, compress=True, accuracy=epsilon) arr2, bdict2 = net.array assert np.sum((arr1 - arr2)**2) < epsilon assert np.sum((arr1 - arr2)**2) < epsilon
def test_pathing(): tn = TreeNetwork(accuracy=epsilon) n1 = Node(ArrayTensor(np.random.randn(3, 3, 3))) tn.addNode(n1) assert len(tn.pathBetween(n1, n1)) == 1 n2 = Node(ArrayTensor(np.random.randn(3, 3, 3))) Link(n1.buckets[0], n2.buckets[0]) tn.addNode(n2) assert len(tn.pathBetween(n1, n2)) == 2 assert len(tn.pathBetween(n2, n1)) == 2 assert tn.pathBetween(n1, n2) == [n1, n2] assert tn.pathBetween(n2, n1) == [n2, n1] n3 = Node(ArrayTensor(np.random.randn(3, 3, 3))) Link(n3.buckets[0], n2.buckets[2]) tn.addNode(n3) assert len(tn.pathBetween(n1, n3)) == 3 assert len(tn.pathBetween(n3, n1)) == 3 assert tn.pathBetween(n1, n3) == [n1, n2, n3] assert tn.pathBetween(n3, n1) == [n3, n2, n1] assert tn.pathBetween(n2, n3) == [n2, n3] assert tn.pathBetween(n3, n2) == [n3, n2]
def test_links(): x = np.random.randn(2, 3, 3) xt = ArrayTensor(x) n1 = Node(xt) x = np.random.randn(2, 3, 3) xt = ArrayTensor(x) n2 = Node(xt) l1 = Link(n1.buckets[0], n2.buckets[0]) assert n1.linkedBuckets[0].otherBucket == n2.buckets[0] l2 = Link(n2.buckets[1], n1.buckets[1]) assert n1.linkedBuckets[1].otherBucket == n2.buckets[1] assert l1.bucket1.node == n1 or l1.bucket2.node == n1 assert l1.bucket1.node == n2 or l1.bucket2.node == n2 assert l1 in n1.findLinks(n2) assert l2 in n1.findLinks(n2) assert l1 in n2.findLinks(n1) assert l2 in n2.findLinks(n1) assert n1.findLink(n2) == l1 or n1.findLink(n2) == l2 assert n1.indexConnecting(n2) == 0 or n1.indexConnecting(n2) == 1 assert n2.indexConnecting(n1) == 0 or n2.indexConnecting(n1) == 1 assert 0 in n1.indicesConnecting(n2)[0] assert 1 in n1.indicesConnecting(n2)[0] assert 2 not in n1.indicesConnecting(n2)[0] assert 0 in n2.indicesConnecting(n1)[0] assert 1 in n2.indicesConnecting(n1)[0] assert 2 not in n2.indicesConnecting(n1)[0] assert 0 in n1.indicesConnecting(n2)[1] assert 1 in n1.indicesConnecting(n2)[1] assert 2 not in n1.indicesConnecting(n2)[1] assert 0 in n2.indicesConnecting(n1)[1] assert 1 in n2.indicesConnecting(n1)[1] assert 2 not in n2.indicesConnecting(n1)[1] assert len(n1.connectedNodes) == 1 assert n2 in n1.connectedNodes
def test_mergeNode_ArrayTensor(): for i in range(5): net = Network() x = np.random.randn(2, 3, 3) xt = ArrayTensor(x) n1 = Node(xt) xt = ArrayTensor(x) n2 = Node(xt) net.addNode(n1) Link(n1.buckets[0], n2.buckets[0]) net.addNode(n2) net.mergeNodes(n1, n2) assert len(net.nodes) == 1 assert len(net.buckets) == 4 assert len(net.internalBuckets) == 0 assert len(net.externalBuckets) == 4 assert len(net.optimizedLinks) == 0 arr, bdict = net.array assert arr.shape == (3, 3, 3, 3) for b in net.buckets: assert b.id in bdict for b1 in net.buckets: for b2 in net.buckets: if b1.id < b2.id: assert bdict[b1.id] < bdict[b2.id] assert np.sum((arr - np.einsum('ijk,ilm->jklm', x, x))**2) < epsilon
def splitNode(self, node, ignore=None): ''' Takes as input a Node and ensures that it is at most rank 3 by factoring rank 3 tensors out of it until what remains is rank 3. The factoring is done via a greedy algorithm, where the pair of indices with the least correlation with the rest are factored out. This is determined by explicitly tracing out all but those indices from the density matrix and computing the entropy. ignore may be None or a pair of indices. In the latter case, the pair of indices will be required to stay together. This is enforced by having the pair be the first one factored. ''' nodes = [] while node.tensor.rank > 3: self.removeNode(node) array = node.tensor.scaledArray s = [] if ignore is not None: p = ignore ignore = None else: p = entropy(array) u, v, indices1, indices2 = splitArray(array, p, accuracy=self.accuracy) if u.shape[-1] > 1: b1 = Bucket() b2 = Bucket() n1 = Node(ArrayTensor(u, logScalar=node.tensor.logScalar / 2), Buckets=[node.buckets[i] for i in indices1] + [b1]) n2 = Node(ArrayTensor(v, logScalar=node.tensor.logScalar / 2), Buckets=[b2] + [node.buckets[i] for i in indices2]) # This line has to happen before addNode to prevent b1 and b2 # from becoming externalBuckets _ = Link(b1, b2) else: # Cut link u = u[..., 0] v = v[0] n1 = Node(ArrayTensor(u, logScalar=node.tensor.logScalar / 2), Buckets=[node.buckets[i] for i in indices1]) n2 = Node(ArrayTensor(v, logScalar=node.tensor.logScalar / 2), Buckets=[node.buckets[i] for i in indices2]) self.addNode(n1) self.addNode(n2) nodes.append(n1) node = n2 nodes.append(node) return nodes
def test_mergeLinks_ArrayTensor(): for i in range(5): net = Network() x = np.random.randn(2, 2, 3, 3) xt = ArrayTensor(x) n1 = Node(xt) xt = ArrayTensor(x) n2 = Node(xt) net.addNode(n1) Link(n1.buckets[0], n2.buckets[0]) Link(n1.buckets[1], n2.buckets[1]) net.addNode(n2) arr1, bdict1 = net.array net.mergeLinks(n1) arr2, bdict2 = net.array assert np.sum((arr1 - arr2)**2) < epsilon assert np.sum((arr1 - arr2)**2) < epsilon
def test_init(): net = Network() assert len(net.nodes) == 0 assert len(net.buckets) == 0 assert len(net.internalBuckets) == 0 assert len(net.externalBuckets) == 0 assert len(net.optimizedLinks) == 0 x = np.random.randn(2, 3, 3) xt = ArrayTensor(x) n1 = Node(xt) net.addNode(n1) assert len(net.nodes) == 1 assert len(net.buckets) == 3 assert len(net.internalBuckets) == 0 assert len(net.externalBuckets) == 3 assert len(net.optimizedLinks) == 0 x = np.random.randn(2, 3, 3) xt = ArrayTensor(x) n2 = Node(xt) Link(n1.buckets[0], n2.buckets[0]) net.addNode(n2) assert len(net.nodes) == 2 assert len(net.buckets) == 6 assert len(net.internalBuckets) == 2 assert len(net.externalBuckets) == 4 assert len(net.optimizedLinks) == 0 net.removeNode(n1) assert len(net.nodes) == 1 assert len(net.buckets) == 3 assert len(net.internalBuckets) == 0 assert len(net.externalBuckets) == 3 assert len(net.optimizedLinks) == 0
def mergeClosestLinks(self, n1, compress=False, accuracy=1e-4): best = [1e100, None, None, None, None] for n2 in n1.connectedNodes: if hasattr(n2.tensor, 'compressedSize'): links = n1.linksConnecting(n2) buckets1 = [] buckets2 = [] if len(links) > 1: for l in links: if l.bucket1.node is n1: buckets1.append(l.bucket1) buckets2.append(l.bucket2) else: buckets1.append(l.bucket2) buckets2.append(l.bucket1) for b11 in buckets1: for b12 in buckets1: if b11 != b12: dist = n1.tensor.distBetweenBuckets(b11, b12) dist += n2.tensor.distBetweenBuckets( b11.otherBucket, b12.otherBucket) dist *= np.log(b11.size * b12.size) if dist < best[0]: best = [ dist, n2, b11, b12, b11.otherBucket, b12.otherBucket ] if best[0] < 1e100: dist, n2, b11, b12, b21, b22 = best b = n1.mergeBuckets([b11, b12]) b1 = n2.mergeBuckets([b21, b22]) l = Link(b, b1) if compress: compressLink(l, accuracy) return n2 return None
def __init__(self, dimension, rank, accuracy=0.0): super().__init__(accuracy) numLayers = layer(dimension) if rank == 0: self.addTensor(ArrayTensor(np.array(1.))) if rank == 1: self.addTensor(ArrayTensor(np.ones(dimension))) elif rank == 2: self.addTensor(ArrayTensor(np.identity(dimension))) else: numTensors = rank - 2 buckets = [] # Create identity array iden = np.zeros((dimension, dimension, dimension)) for i in range(dimension): iden[i, i, i] = 1.0 for i in range(numTensors): n = super().addTensor(ArrayTensor(iden)) buckets = buckets + n.buckets while len(self.network.externalBuckets) > rank: b = buckets.pop(0) i = 0 while buckets[i].node is b.node or len( buckets[i].node.connectedNodes) > 0: i += 1 Link(b, buckets[i]) self.externalBuckets.remove(b) self.externalBuckets.remove(buckets[i]) self.network.externalBuckets.remove(b) self.network.externalBuckets.remove(buckets[i]) self.network.internalBuckets.add(b) self.network.internalBuckets.add(buckets[i]) buckets.remove(buckets[i])
def mergeLinks(self, n, compress=False, accuracy=1e-4): merged = [] for n1 in n.connectedNodes: links = n1.linksConnecting(n) buckets1 = [] buckets2 = [] if len(links) > 1: for l in links: if l.bucket1.node is n: buckets1.append(l.bucket1) buckets2.append(l.bucket2) else: buckets1.append(l.bucket2) buckets2.append(l.bucket1) b = n.mergeBuckets(buckets1) b1 = n1.mergeBuckets(buckets2) l = Link(b, b1) if compress: compressLink(l, accuracy) merged.append(n1) return merged
def test_mergeNode_TreeTensor(): for i in range(5): net = Network() x = np.random.randn(2, 2, 2, 2, 2) xt = TreeTensor(accuracy=epsilon) xt.addTensor(ArrayTensor(x)) n1 = Node(xt) y = np.random.randn(2, 2, 2, 2, 2) xt = TreeTensor(accuracy=epsilon) xt.addTensor(ArrayTensor(y)) n2 = Node(xt) net.addNode(n1) Link(n1.buckets[0], n2.buckets[0]) net.addNode(n2) net.mergeNodes(n1, n2) assert len(net.nodes) == 1 assert len(net.buckets) == 8 assert len(net.internalBuckets) == 0 assert len(net.externalBuckets) == 8 assert len(net.optimizedLinks) == 0 arr, bdict = net.array assert arr.shape == (2, 2, 2, 2, 2, 2, 2, 2) for b in net.buckets: assert b.id in bdict for b1 in net.buckets: for b2 in net.buckets: if b1.id < b2.id: assert bdict[b1.id] < bdict[b2.id] assert np.sum( (arr - np.einsum('ijklm,iqwer->jklmqwer', x, y))**2) < epsilon
def PA3D(nX, nY, nZ, h, J, q, accuracy): network = Network() # Place to store the tensors lattice = [[[] for j in range(nY)] for i in range(nX)] bondL = [[[] for j in range(nY)] for i in range(nX)] # Each lattice site has seven indices of width five, and returns zero if # they are unequal and one otherwise. for i in range(nX): for j in range(nY): for k in range(nZ): lattice[i][j].append( Node(IdentityTensor(2, 7, accuracy=accuracy))) arr = np.zeros((2, 2)) # 2-point arr[0][0] = np.exp(-J) arr[1][1] = np.exp(-J) arr[0][1] = np.exp(J) arr[1][0] = np.exp(J) # 1-point arr[0] *= np.exp(h / 6) arr[1] *= np.exp(-h / 6) # Expand arr = np.einsum('ij,ik,il,ia,ib,ic->ijklabc', arr, arr, arr, arr, arr, arr) # 3-point arr[1, 1, :, 1, :, :, :] = 0 arr[1, 1, :, :, 1, :, :] = 0 arr[1, 1, :, :, :, 1, :] = 0 arr[1, 1, :, :, :, :, 1] = 0 arr[1, :, 1, 1, :, :, :] = 0 arr[1, :, 1, :, 1, :, :] = 0 arr[1, :, 1, :, :, 1, :] = 0 arr[1, :, 1, :, :, :, 1] = 0 arr[1, 1, :, 1, :, :, :] = 0 arr[1, :, 1, 1, :, :, :] = 0 arr[1, :, :, 1, :, 1, :] = 0 arr[1, :, :, 1, :, :, 1] = 0 arr[1, 1, :, :, 1, :, :] = 0 arr[1, :, 1, :, 1, :, :] = 0 arr[1, :, :, :, 1, 1, :] = 0 arr[1, :, :, :, 1, :, 1] = 0 arr[1, 1, :, :, :, 1, :] = 0 arr[1, :, 1, :, :, 1, :] = 0 arr[1, :, :, 1, :, 1, :] = 0 arr[1, :, :, :, 1, 1, :] = 0 arr[1, 1, :, :, :, :, 1] = 0 arr[1, :, 1, :, :, :, 1] = 0 arr[1, :, :, 1, :, :, 1] = 0 arr[1, :, :, :, 1, :, 1] = 0 # 4-point arr[1, 1, 1, 1, :, :, :] = np.exp(q) arr[1, 1, 1, :, 1, :, :] = np.exp(q) arr[1, 1, 1, :, :, 1, :] = np.exp(q) arr[1, 1, 1, :, :, :, 1] = np.exp(q) arr[1, 1, :, 1, 1, :, :] = np.exp(q) arr[1, :, 1, 1, 1, :, :] = np.exp(q) arr[1, :, :, 1, 1, 1, :] = np.exp(q) arr[1, :, :, 1, 1, :, 1] = np.exp(q) arr[1, 1, :, :, :, 1, 1] = np.exp(q) arr[1, :, 1, :, :, 1, 1] = np.exp(q) arr[1, :, :, 1, :, 1, 1] = np.exp(q) arr[1, :, :, :, 1, 1, 1] = np.exp(q) # 5-point for j in range(2): for k in range(2): for l in range(2): for m in range(2): for n in range(2): for p in range(2): if j + k + l + m + n + p >= 4: arr[1, j, k, l, m, n, p] = 0 t = ArrayTensor(arr) tt = TreeTensor(accuracy) tt.addTensor(t) # Make L-bonds for i in range(nX): for j in range(nY): for k in range(nZ): bondL[i][j].append(Node(deepcopy(tt))) # Attach links for i in range(nX): for j in range(nY): for k in range(nZ): Link(lattice[i][j][k].buckets[0], bondL[i][j][k].buckets[0]) Link(lattice[i][j][k].buckets[1], bondL[(i + 1) % nX][j][k].buckets[1]) Link(lattice[i][j][k].buckets[2], bondL[i - 1][j][k].buckets[2]) Link(lattice[i][j][k].buckets[3], bondL[i][(j + 1) % nY][k].buckets[3]) Link(lattice[i][j][k].buckets[4], bondL[i][j - 1][k].buckets[4]) Link(lattice[i][j][k].buckets[5], bondL[i][j][(k + 1) % nZ].buckets[5]) Link(lattice[i][j][k].buckets[6], bondL[i][j][k - 1].buckets[6]) # Add to Network for i in range(nX): for j in range(nY): for k in range(nZ): network.addNode(lattice[i][j][k]) network.addNode(bondL[i][j][k]) return network
def PA2D(nX, nY, h, J, q, accuracy): network = Network() # Place to store the tensors lattice = [[] for i in range(nX)] bondL = [[] for i in range(nX)] # Each lattice site has seven indices of width five, and returns zero if # they are unequal and one otherwise. for i in range(nX): for j in range(nY): lattice[i].append(Node(IdentityTensor(2, 5, accuracy=accuracy))) arr = np.zeros((2, 2)) # 2-point arr[0][0] = np.exp(-J) arr[1][1] = np.exp(-J) arr[0][1] = np.exp(J) arr[1][0] = np.exp(J) # 1-point arr[0] *= np.exp(h / 4) arr[1] *= np.exp(-h / 4) # Expand arr = np.einsum('ij,ik,il,ia->ijkla', arr, arr, arr, arr) # 3-point arr[1, 1, :, 1, :] = 0 arr[1, 1, :, :, 1] = 0 arr[1, :, 1, 1, :] = 0 arr[1, :, 1, :, 1] = 0 # 4-point arr[1, 1, 1, 1, :] = np.exp(q) arr[1, 1, 1, :, 1] = np.exp(q) arr[1, 1, :, 1, 1] = np.exp(q) arr[1, :, 1, 1, 1] = np.exp(q) # 5-point for j in range(2): for k in range(2): for l in range(2): for m in range(2): if j + k + l + m >= 4: arr[1, j, k, l, m] = 0 # Make L-bonds for i in range(nX): for j in range(nY): bondL[i].append(Node(ArrayTensor(arr))) # Attach links for i in range(nX): for j in range(nY): Link(lattice[i][j].buckets[0], bondL[i][j].buckets[0]) Link(lattice[i][j].buckets[1], bondL[(i + 1) % nX][j].buckets[1]) Link(lattice[i][j].buckets[2], bondL[i - 1][j].buckets[2]) Link(lattice[i][j].buckets[3], bondL[i][(j + 1) % nY].buckets[3]) Link(lattice[i][j].buckets[4], bondL[i][j - 1].buckets[4]) # Add to Network for i in range(nX): for j in range(nY): network.addNode(lattice[i][j]) network.addNode(bondL[i][j]) return network
def contract(self, ind, other, otherInd, front=True): # This method could be vastly simplified by defining a cycle basis # class # We copy the two networks first. If the other is an ArrayTensor we # cast it to a TreeTensor first. t1 = deepcopy(self) if hasattr(other, 'network'): t2 = deepcopy(other) else: t2 = TreeTensor(self.accuracy) t2.addTensor(other) # If front == True then we contract t2 into t1, otherwise we contract t1 into t2. # This is so we get the index order correct. Thus if not front: t1, t2 = t2, t1 otherInd, ind = ind, otherInd # Link the networks links = [] for i, j in zip(*(ind, otherInd)): b1, b2 = t1.externalBuckets[i], t2.externalBuckets[j] assert b1 in t1.network.buckets and b1 not in t2.network.buckets assert b2 in t2.network.buckets and b2 not in t1.network.buckets links.append(Link(b1, b2)) # Determine new external buckets list for l in links: t1.externalBuckets.remove(l.bucket1) t2.externalBuckets.remove(l.bucket2) extB = t1.externalBuckets + t2.externalBuckets # Merge the networks toRemove = set(t2.network.nodes) for n in toRemove: t2.network.removeNode(n) for n in toRemove: t1.network.addNode(n) # Merge any rank-1 or rank-2 objects done = set() while len(done.intersection(t1.network.nodes)) < len(t1.network.nodes): n = next(iter(t1.network.nodes.difference(done))) if n.tensor.rank <= 2: nodes = t1.network.internalConnected(n) if len(nodes) > 0: t1.network.mergeNodes(n, nodes.pop()) else: done.add(n) else: done.add(n) t1.externalBuckets = extB assert t1.network.externalBuckets == set(t1.externalBuckets) for n in t1.network.nodes: assert n.tensor.rank <= 3 assert t1.rank == self.rank + other.rank - 2 * len(ind) return t1
def BayesTest2(observations, discreteG, discreteQ, discreteW, discreteH, accuracy): ''' observations is a list of (k,M) pairs where k is the number of heads and M-k is the number of tails in a repeated Bernoulli coin toss. This model represents the likelihood L = M! p^k (1-p)^(M-k)/(k!(M-k)!) summed over all coins that were observed. Here we model p_i = min(g*h_i + q^w, 1) where each of g, q, w and h_i lie in [0,1] and have uniform priors. g, w and q are global parameters. discrete(G,W,Q) specify the g, w and q values to sample. discreteH is the same for h_i. ''' network = Network() # Local tensors hs = [] for i, obs in enumerate(observations): arr = np.zeros( (len(discreteG), len(discreteQ), len(discreteW), len(discreteH))) for j, gg in enumerate(discreteG): for k, qq in enumerate(discreteQ): for e, ww in enumerate(discreteW): for l, h in enumerate(discreteH): p = min(gg * h + qq**ww, 1) arr[j, k, e, l] = factorial(obs[1]) * p**obs[0] * (1 - p)**( obs[1] - obs[0]) / (factorial(obs[0]) * factorial(obs[1] - obs[0])) # Marginalizes over all of the individual distributions arr = np.sum(arr, axis=-1) h = Node(ArrayTensor(arr)) hs.append(h) extG = [h.buckets[0] for h in hs] extW = [h.buckets[1] for h in hs] extQ = [h.buckets[2] for h in hs] nodes = [] dimension = len(discreteG) while len(extG) > 1: iden = np.zeros((dimension, dimension, dimension)) for i in range(dimension): iden[i, i, i] = 1.0 n = Node(IdentityTensor(dimension, 3, accuracy=accuracy)) nodes.append(n) Link(n.buckets[0], extG[0]) Link(n.buckets[1], extG[1]) extG.append(n.buckets[2]) extG = extG[2:] dimension = len(discreteW) while len(extW) > 1: iden = np.zeros((dimension, dimension, dimension)) for i in range(dimension): iden[i, i, i] = 1.0 n = Node(IdentityTensor(dimension, 3, accuracy=accuracy)) nodes.append(n) Link(n.buckets[0], extW[0]) Link(n.buckets[1], extW[1]) extW.append(n.buckets[2]) extW = extW[2:] dimension = len(discreteQ) while len(extQ) > 1: iden = np.zeros((dimension, dimension, dimension)) for i in range(dimension): iden[i, i, i] = 1.0 n = Node(IdentityTensor(dimension, 3, accuracy=accuracy)) nodes.append(n) Link(n.buckets[0], extQ[0]) Link(n.buckets[1], extQ[1]) extQ.append(n.buckets[2]) extQ = extQ[2:] for h in hs: network.addNode(h) for n in nodes: network.addNode(n) return network
def BayesTest1(observations, discreteG, discreteQ, discreteW, discreteH, accuracy): ''' observations is a list of (k,M) pairs where k is the number of heads and M-k is the number of tails in a repeated Bernoulli coin toss. This model represents the likelihood L = M! p^k (1-p)^(M-k)/(k!(M-k)!) summed over all coins that were observed. Here we model p_i = min(g*h_i + q^w, 1) where each of g, q, w and h_i lie in [0,1] and have uniform priors. g, w and q are global parameters. discrete(G,W,Q) specify the g, w and q values to sample. discreteH is the same for h_i. ''' network = Network() # Global tensors n = len(observations) g = Node(IdentityTensor(len(discreteG), n + 1, accuracy=accuracy)) q = Node(IdentityTensor(len(discreteQ), n + 1, accuracy=accuracy)) w = Node(IdentityTensor(len(discreteW), n + 1, accuracy=accuracy)) # Local tensors hs = [] for i, obs in enumerate(observations): arr = np.zeros( (len(discreteG), len(discreteQ), len(discreteW), len(discreteH))) for j, gg in enumerate(discreteG): for k, qq in enumerate(discreteQ): for e, ww in enumerate(discreteW): for l, h in enumerate(discreteH): p = min(gg * h + qq**ww, 1) arr[j, k, e, l] = factorial(obs[1]) * p**obs[0] * (1 - p)**( obs[1] - obs[0]) / (factorial(obs[0]) * factorial(obs[1] - obs[0])) # Marginalizes over all of the individual distributions arr = np.sum(arr, axis=-1) h = Node(ArrayTensor(arr)) hs.append(h) Link(h.buckets[0], g.buckets[i]) Link(h.buckets[1], q.buckets[i]) Link(h.buckets[2], w.buckets[i]) # Assemble the network network.addNode(g) network.addNode(q) network.addNode(w) for h in hs: network.addNode(h) return network