def MCupperBoundRedIntrinInfDet_(P, dimU, dimBZU, noIterOuter, noIterInner, verbose=False): minVal = np.finfo(float).max for i in range(noIterOuter): #setup random channel XYZ->U and compute P_UXYZ PC_UXYZ = randChannelMP((dimU, ), P.shape) shpc = PC_UXYZ.shape P_XYZU = np.zeros((shpc[1:] + (shpc[0], ))) for u in range(PC_UXYZ.shape[-1]): P_XYZU[:, :, :, u] = np.multiply(PC_UXYZ[u, :, :, :], P) #get entropy Hu = entropy_(pr.marginal(P_XYZU, tuple(range(len(P_XYZU.shape)))[:-1])) # get the bound on intrInf of X;Y|ZU with detChannel val = MCupperBoundIntrinInfMPDet(P_XYZU, dimBZU) # temp I I = val + Hu #check for min if I < minVal: minVal = I if verbose: print("[MCupperBoundRedIntrinInfDet_] I = %f, E_U = %f" % (val, Hu)) return minVal
def allTests(P, inIter=10, outIter=10): """ performs all tests on a tripartite behaviour """ results = [] # get marginal for X,Y l = len(P.shape) t = tuple(np.arange(2, l)) m = pr.marginal(P, t) # calculate mutual information of the marginal X,Y mutInf = inf.mutInf(m) results.append(mutInf) # print('#Done mutInf') intrInf = inf.MCupperBoundIntrinInf(P, inIter) results.append(intrInf) # print('#Done intrInf') redIntrInf = inf.MCupperBoundRedIntrinInf_(P, inIter, outIter) results.append(redIntrInf) # print('#Done redIntrInf') # redIntrInf = 0.0 # quantum part. very slow. rho = qm.PrToRho(m) dims = tuple(int(np.sqrt(t)) for t in np.shape(rho)) ppt = qm.ppt(rho, dims) == 1 # ppt = False results.append(ppt) # trace with witness # trace = 0.0 trace = np.trace(np.matmul(rho, qm.wtns44())) results.append(trace) return results
def processInput(dim): P = bv.FourPDistribN(dim) P = pr.marginal(P, 3) uniform = np.ones_like(P) uniform = pr.normalize(uniform) # print("# X,Y range: {0}\t\tBhv shape: {1}".format(dim, P.shape)) return alys.PtestInfoAlongPath(P, uniform, iter=iter)
def condMutInf_(P, dimX, dimY, dimZ): """ Evaluates the I(X;Y|Z) for joint probability P Utilizes the definition of condMutInf I(X;Y|Z) = H(X,Z) + H(Y,Z) - H(X,Y,Z) - H(Z) Works for any dimensions of P, where the first two dimensions are interpreted as X,Y and all the remainig are grouped as Z Input: P : array-like probability distribution dimX : index of the X in the array dimY : index of the Y in the array dimZ : index(ices) of the remaining component(s) for the Z (can be a tuple) """ res = 0.0 Pxz = pr.marginal(P, (dimY)) Pyz = pr.marginal(P, (dimX)) Pz = pr.marginal(P, (dimX, dimY)) res = entropy_(Pxz) + entropy_(Pyz) - entropy_(P) - entropy_(Pz) return res
def allInfoTests(P, inIter=10, outIter=10): """ performs information theoretic tests on a tripartite behaviour """ results = [] # get marginal for X,Y l = len(P.shape) t = tuple(np.arange(2, l)) m = pr.marginal(P, t) # calculate mutual information of the marginal X,Y mutInf = inf.mutInf(m) results.append(mutInf) # print('#Done mutInf') intrInf = inf.MCupperBoundIntrinInf(P, inIter) results.append(intrInf) # print('#Done intrInf') # redIntrInf = inf.MCupperBoundRedIntrinInf_(P, inIter, outIter) redIntrInf = inf.MCupperBoundRedIntrinInfXDD(P, 2, 4) results.append(redIntrInf) return results
import bhvs as bv import info as inf import numpy as np import prob as pr import time import matplotlib.pyplot as plt # Test conditiona MutInf if False: P = bv.FourPDstrb() print("Test CondMutInfo") start = time.time() print(inf.condMutInf(pr.marginal(P, 3))) end = time.time() print("time Arne: %.8f" % (end - start)) start = time.time() print(inf.condMutInf_(pr.marginal(P, 3), 0, 1, 2)) end = time.time() print("time Mio: %.8f" % (end - start)) print("---") # Test channels if False: PC = inf.randChannel(3, 2) print(PC.shape) print(PC) P = bv.FourPDstrb() print(P.shape) print(inf.applyChannel(P, PC, (2)).shape) print(inf.applyChannel(P, PC, (3)).shape)
import numpy as np import prob as pr import bhvs as bv import analysis as alys # compute `iter` steps towards the uniform distribution # test for different range until endDim iter = 25 endDim = 10 for dim in range(4,endDim): P = bv.FourPDistribN(dim) P = pr.marginal(P,3) uniform = np.ones_like(P) uniform = pr.normalize(uniform) print("# X,Y range: {0}\t\tBhv shape: {1}".format(dim, P.shape)) alys.testInfoAlongPath(P, uniform, iter=iter) print() # enpty line
import prob as pr import bhvs as bv import info as inf import numpy as np P = bv.FourPDstrb() P1 = bv.FourPDstrb2() print('# Compare different methods') print(np.subtract(P, P1).max()) print() # new line P_XY = pr.marginal(P, (2, 3)) P_XYU = pr.marginal(P, 2) print('# P_XY_ZU:') pr.PrintFourPDstrb(P) # pr.PrintFourPDstrb(P1) print('# P_(X,Y):') print(np.sum(bv.ThreePDstrb(), axis=2)) print() # new line pr.PrintThreePDstrb(bv.ThreePDstrb()) print('# Noises functions:') pr.PrintThreePDstrb(bv.ThreePNoise1()) pr.PrintThreePDstrb(bv.ThreePNoise2()) pr.PrintThreePDstrb(bv.ThreePNoise3()) pr.PrintThreePDstrb(bv.ThreePUniformNoise()) # pr.PrintThreePDstrb( pr.mixBhvs( bv.ThreePDstrb(), bv.ThreePNoise2(), .1))
import numpy as np import prob as pr import quantum as qm import bhvs as bv # Get some entangled vector psi = np.multiply(1. / np.sqrt(2), np.identity(4)[:, 0] + np.identity(4)[:, 3]) rho = qm.proj(psi) print(rho) print(qm.pt(rho, (2, 2))) print(qm.ppt(rho, (2, 2))) # Transfer prob to state and check witness rho1 = qm.PrToRho(pr.marginal(bv.ThreePDstrb(), 2)) print("Witness from paper") print(qm.wtns44()) print("QmState from paper") print(qm.rho_a()) print(rho1) alpha = .5 threshold = -2. * (np.sqrt(2.) - 1.) / (2. + alpha) print(np.trace(np.matmul(qm.rho_a(alpha), qm.wtns44()))) print("Val from paper: " + str(threshold)) alpha = .05 threshold = -2. * (np.sqrt(2.) - 1.) / (2. + alpha) print(np.trace(np.matmul(qm.rho_a(alpha), qm.wtns44()))) print("Val from paper: " + str(threshold)) print(np.trace(np.matmul(rho1, qm.wtns44()))) # TO BE TESTED: that rho_a coincides with the transfered state...