def elementwise_multiply(a: MatrixVector2D, b: matrix) -> MatrixVector2D: if a.shape == b.shape: x = multiply(a.x, b) y = multiply(a.y, b) return MatrixVector2D(x, y) else: raise ValueError('MatrixVector2D and matrix shapes not the same.')
def testDigits(kTrup=('rbf', 10)): trainingDigits = 'F:\\panrui\\我的桌面\\learning file\\machinelearninginaction\\Ch06\\trainingDigits' dataArr, labelArr = loadImages(trainingDigits) b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTrup) datMat = mat(dataArr) labelMat = mat(labelArr).transpose() svInd = nonzero(alphas.A > 0)[0] sVs = datMat[svInd] labelSV = labelMat[svInd] print("there are %d Support Vectors " % shape(sVs)[0]) m, n = shape(datMat) errorCount = 0 for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], kTrup) predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b if sign(predict) != sign(labelArr[i]): errorCount += 1 print("the training error is: %f" % (float(errorCount) / m)) dataArr, labelArr = loadImages('testDigits') errorCount = 0 datMat = mat(dataArr) labelMat = mat(labelArr).transpose() m, n = shape(datMat) for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], kTrup) predict = kernelEval.T * multiply(labelSV, alphas[svInd]) + b if sign(predict) != sign(labelArr[i]): errorCount += 1 print("the training error is: %f" % (float(errorCount) / m))
def elementwise_multiply(a: MatrixVector, b: matrix) -> MatrixVector: if a.shape == b.shape: x = multiply(a.x, b) y = multiply(a.y, b) z = multiply(a.z, b) return MatrixVector(x, y, z) else: raise ValueError()
def elementwise_multiply(a: MatrixVector, b: matrix) -> MatrixVector: if a.shape == b.shape: x = multiply(a.x, b) y = multiply(a.y, b) z = multiply(a.z, b) return MatrixVector(x, y, z) else: raise ValueError('The shape of a and b need to be the same.')
def velocity_matrix(ra: MatrixVector, rb: MatrixVector, rc: MatrixVector, betm: float = 1.0, tol: float = 1e-12): if ra.shape != rb.shape: return ValueError() numi = rc.shape[0] numj = ra.shape[1] ra = ra.repeat(numi, axis=0) rb = rb.repeat(numi, axis=0) rc = rc.repeat(numj, axis=1) a = rc - ra b = rc - rb a.x = a.x / betm b.x = b.x / betm am = a.return_magnitude() bm = b.return_magnitude() # Velocity from Bound Vortex adb = elementwise_dot_product(a, b) abm = multiply(am, bm) dm = multiply(abm, abm + adb) axb = elementwise_cross_product(a, b) axbm = axb.return_magnitude() chki = (axbm == 0.0) chki = logical_and(axbm >= -tol, axbm <= tol) veli = elementwise_multiply(axb, divide(am + bm, dm)) veli.x[chki] = 0.0 veli.y[chki] = 0.0 veli.z[chki] = 0.0 # Velocity from Trailing Vortex A axx = MatrixVector(zeros(a.shape, dtype=float), a.z, -a.y) axxm = axx.return_magnitude() chka = (axxm == 0.0) vela = elementwise_divide(axx, multiply(am, am - a.x)) vela.x[chka] = 0.0 vela.y[chka] = 0.0 vela.z[chka] = 0.0 # Velocity from Trailing Vortex B bxx = MatrixVector(zeros(b.shape, dtype=float), b.z, -b.y) bxxm = bxx.return_magnitude() chkb = (bxxm == 0.0) velb = elementwise_divide(bxx, multiply(bm, bm - b.x)) velb.x[chkb] = 0.0 velb.y[chkb] = 0.0 velb.z[chkb] = 0.0 return veli, vela, velb
def phi_source_matrix(am, bm, dab, rl, phid): numrab = am + bm + dab denrab = am + bm - dab Pab = divide(numrab, denrab) Pab[denrab == 0.0] = 1.0 Qab = log(Pab) tmps = multiply(rl.y, Qab) phis = -multiply(rl.z, phid) - tmps return phis, Qab
def phi_doublet_matrix(vecs: MatrixVector, rls: MatrixVector, sgnz: matrix): mags = vecs.return_magnitude() ms = divide(vecs.x, rls.y) ths = arctan(ms) ths[rls.y == 0.0] = piby2 gs = multiply(ms, divide(rls.z, mags)) Js = arctan(gs) Js[rls.y == 0.0] = piby2 phids = Js - multiply(sgnz, ths) return phids, mags
def phi_source_matrix(am, bm, dab, rl, phid): numrab = am+bm+dab denrab = am+bm-dab Pab = divide(numrab, denrab) chkd = absolute(denrab) < tol Pab[chkd] = 1.0 Qab = log(Pab) tmps = multiply(rl.y, Qab) phis = -multiply(rl.z, phid) - tmps return phis, Qab
def vel_doublet_matrix(av, am, bv, bm): adb = elementwise_dot_product(av, bv) abm = multiply(am, bm) dm = multiply(abm, abm + adb) axb = elementwise_cross_product(av, bv) axbm = axb.return_magnitude() chki = (axbm == 0.0) chki = logical_and(axbm >= -tol, axbm <= tol) velvl = elementwise_multiply(axb, divide(am + bm, dm)) velvl.x[chki] = 0.0 velvl.y[chki] = 0.0 velvl.z[chki] = 0.0 return velvl
def backPropagate_(self): '''Back-Propagate errors (and node responsibilities).''' errors3 = multiply(-(self.xs__ - self.a3__), 1 - power(self.a3__, 2)) d3 = multiply(errors3, self.xs__) errors2 = multiply(d3 * self.weights2_, 1 - power(self.a2__, 2)) d2 = multiply(errors2, self.a2__) # average the errors d3 = np.sum(d3,0) / d3.shape[0] d2 = np.sum(d2,0) / d2.shape[0] self.d3__ = d3.T self.d2__ = d2.T
def vec_2_posterior(self, vec=None): if self.mixPriors == None or self.gaussCoeffs == None or \ self.mus == None or self.sig_inv == None: raise IOError("Model parameters not loaded properly") if not type(vec).__module__ == "numpy.matrixlib.defmatrix": raise TypeError( "Input vector not of type numpy.matrixlib.defmatrix") numVec, vecLen = vec.shape if numVec != 1: raise IndexError("Only one vector allowed at a time") if vecLen != self.dim: raise IndexError( "nMer vector dimension doesn't agree with gmm dimension") gaussP = matlib.zeros(shape=[1, self.numMix]) for bI in range(self.numMix): x = vec - self.mus[bI, :] x_sqr = x * self.sig_inv[bI] * x.transpose() pwr = -0.5 * x_sqr gaussP[0, bI] = self.gaussCoeffs[0, bI] * np.exp(pwr) postNorm = 1 / (gaussP * self.mixPriors.transpose()) postVec = postNorm[0, 0] * matlib.multiply(gaussP, self.mixPriors) # print postVec return postVec
def inf(self, x, meanonly=False): x = np.asmatrix(x) if x.shape[1] != self.d: if x.shape[0] == self.d: x = x.T else: raise Exception('Invalid test-set dimension -- ' 'expected d = ' + str(self.d) + '.') n = x.shape[0] # Handle empty test set if n == 0: return (np.zeros((0, 1)), np.zeros((0, 1))) ms = self.kernel.mean*np.ones((n, 1)) Kbb = self.kernel(x, diag=True) # Handle empty training set if len(self) == 0: return (ms, np.asmatrix(Kbb)) Kba = self.kernel(x, self.x) m = self.kernel.mean*np.ones((len(self), 1)) fm = ms + Kba*scipy.linalg.cho_solve((self.L, True), self.y - m, overwrite_b=True) if meanonly: return fm else: W = scipy.linalg.cho_solve((self.L, True), Kba.T) fv = np.asmatrix(Kbb - np.sum(np.multiply(Kba.T, W), axis=0).T) # W = np.asmatrix(scipy.linalg.solve(self.L, Kba.T, lower=True)) # fv = np.asmatrix(Kbb - np.sum(np.power(W, 2), axis=0).T) return (fm, fv)
def vel_source_matrix(Qab, rl, phid): velsl = zero_matrix_vector(Qab.shape, dtype=float) velsl.y = -Qab faco = ones(Qab.shape, dtype=float) faco[rl.z != 0.0] = -1.0 velsl.z = multiply(faco, phid) return velsl
def vel_doublet_matrix(av, am, bv, bm): adb = elementwise_dot_product(av, bv) abm = multiply(am, bm) dm = multiply(abm, abm+adb) axb = elementwise_cross_product(av, bv) axbm = axb.return_magnitude() chki = (axbm == 0.0) chki = logical_and(axbm >= -tol, axbm <= tol) chkd = absolute(dm) < tol fac = zeros(axbm.shape, dtype=float) divide(am+bm, dm, where=logical_not(chkd), out=fac) velvl = elementwise_multiply(axb, fac) velvl.x[chki] = 0.0 velvl.y[chki] = 0.0 velvl.z[chki] = 0.0 return velvl
def run(self, epochs): start_time = time.time() printProgress(0.0, start_time) num_examples = len(self.trainingSet_) batch_size = int(round(num_examples / self.NUM_BATCHES)) for i in xrange(0, epochs): self.errors = 0.0 for j in range(self.NUM_BATCHES): batch = [self.trainingSet_[j * batch_size:j * batch_size + batch_size]][0] self.iteration_(matlib.distance_matrix(batch)) xs = matlib.distance_matrix(self.trainingSet_) output = self.feedForward_(xs) err = xs - output meanSq = np.sum(multiply(err, err),1) / err.shape[1] rmse = np.asarray(meanSq.T[0])[0] ** 0.5 rmse = np.sum(rmse) / len(rmse) self.errors = rmse printProgress(float(i) / epochs, start_time) printErrors(self.errors) self.output() printProgress(1, start_time)
def phi_doublet_matrix(vecs: MatrixVector, sgnz: matrix): mags = vecs.return_magnitude() chkm = mags < tol chky = absolute(vecs.y) < tol vecs.y[chky] = 0.0 ms = zeros(mags.shape, dtype=float) divide(vecs.x, vecs.y, where=logical_not(chky), out=ms) ths = arctan(ms) ths[chky] = piby2 ts = zeros(mags.shape, dtype=float) divide(vecs.z, mags, where=logical_not(chkm), out=ts) gs = multiply(ms, ts) Js = arctan(gs) Js[chky] = piby2 phids = Js - multiply(sgnz, ths) return phids, mags
def vel_doublet_matrix(ov, om, faco): ov = faco * ov oxx = MatrixVector(zeros(ov.shape), -ov.z, ov.y) oxxm = oxx.return_magnitude() chko = (oxxm == 0.0) velol = elementwise_divide(oxx, multiply(om, om - ov.x)) velol.x[chko] = 0.0 velol.y[chko] = 0.0 velol.z[chko] = 0.0 return velol
def _backprop_hidden(nnet, evaluated): common = np.asmatrix(evaluated.inp * sigma_p(evaluated.sh).T) ret = [] for out_ind in xrange(nnet.n_out): w_mat = np.asmatrix(m.repmat(nnet.W2[:nnet.n_hidden, out_ind].T, common.shape[0], 1)) coeff = np.asmatrix(sigma_p(evaluated.so))[out_ind, 0] to_append = m.multiply(common, w_mat) ret.append(to_append * coeff) return ret
def phi_trailing_doublet_matrix(rls: MatrixVector, sgnz: matrix, faco: float): ths = zeros(rls.shape, dtype=float) ths[rls.y > 0.0] = piby2 ths[rls.y == 0.0] = -piby2 * faco ths[rls.y < 0.0] = -piby2 gs = divide(rls.z, rls.y) Js = arctan(gs) Js[rls.y == 0.0] = -piby2 * faco phids = Js - multiply(sgnz, ths) return phids
def phi_trailing_doublet_matrix(rls: MatrixVector, sgnz: matrix): ths = zeros(rls.shape, dtype=float) chky = absolute(rls.y) < tol ths[rls.y > 0.0] = piby2 ths[rls.y < 0.0] = -piby2 ths[chky] = -piby2 gs = zeros(rls.shape, dtype=float) divide(rls.z, rls.y, where=logical_not(chky), out=gs) Js = arctan(gs) Js[rls.y == 0.0] = -piby2 phids = Js - multiply(sgnz, ths) return phids
def _backprop_hidden(nnet, evaluated): common = np.asmatrix(evaluated.inp * sigma_p(evaluated.sh).T) ret = [] for out_ind in xrange(nnet.n_out): w_mat = np.asmatrix( m.repmat(nnet.W2[:nnet.n_hidden, out_ind].T, common.shape[0], 1)) coeff = np.asmatrix(sigma_p(evaluated.so))[out_ind, 0] to_append = m.multiply(common, w_mat) ret.append(to_append * coeff) return ret
def vel_trailing_doublet_matrix(ov, om, faco): ov: MatrixVector = faco * ov oxx = MatrixVector(zeros(ov.shape), -ov.z, ov.y) oxxm = oxx.return_magnitude() chko = absolute(oxxm) < tol den = multiply(om, om - ov.x) chkd = absolute(den) < tol denr = zeros(ov.shape, dtype=float) reciprocal(den, where=logical_not(chkd), out=denr) velol = elementwise_multiply(oxx, denr) velol.x[chko] = 0.0 velol.y[chko] = 0.0 velol.z[chko] = 0.0 return velol
def elementwise_cross_product(a: MatrixVector, b: MatrixVector) -> MatrixVector: if a.shape == b.shape: x = multiply(a.y, b.z) - multiply(a.z, b.y) y = multiply(a.z, b.x) - multiply(a.x, b.z) z = multiply(a.x, b.y) - multiply(a.y, b.x) return MatrixVector(x, y, z) else: raise ValueError('The shape of a and b need to be the same.')
def inf(self, x, meanonly=False): x = np.asmatrix(x) assert x.shape[1] == self.d n = x.shape[0] # Handle empty test set if n == 0: return (np.zeros((0, 1)), np.zeros((0, 1))) ms = self.kernel.mean*np.ones((n, 1)) Kbb = self.kernel(x, diag=True) # Handle empty training set if len(self) == 0: return (ms, np.asmatrix(np.diag(Kbb)).T) Kba = self.kernel(x, self.x) m = self.kernel.mean*np.ones((len(self), 1)) fm = ms + Kba*scipy.linalg.cho_solve((self.L, True), self.y - m, overwrite_b=True) if meanonly: return fm else: W = scipy.linalg.cho_solve((self.L, True), Kba.T) fv = np.asmatrix(Kbb - np.sum(np.multiply(Kba.T, W), axis=0).T) # W = np.asmatrix(scipy.linalg.solve(self.L, Kba.T, lower=True)) # fv = np.asmatrix(Kbb - np.sum(np.power(W, 2), axis=0).T) return (fm, fv)
def elementwise_dot_product(a: MatrixVector, b: MatrixVector) -> matrix: if a.shape == b.shape: return multiply(a.x, b.x) + multiply(a.y, b.y) + multiply(a.z, b.z) else: raise ValueError()
def backPropagate_(self): '''Back-Propagate errors (and node responsibilities).''' self.d3__ = multiply(-(self.xs__ - self.a3__), 1 - power(self.a3__, 2)) self.d2__ = multiply(self.weights2_.T * self.d3__, 1 - power(self.a2__, 2))
def elementwise_dot_product(a: MatrixVector2D, b: MatrixVector2D) -> matrix: if a.shape == b.shape: return multiply(a.x, b.x) + multiply(a.y, b.y) else: raise ValueError('MatrixVector2D shapes not the same.')
def proctomo(data, NumberOfIterations = 100, use_bell_basis=False): """Process tomography for an arbitrary number of ions Params data: either datafile string or a a cprb matrix NumberOfIterations: Number of iterations for the maximum likelihood algorithm """ Paulis = getopbase() NumberOfIons = np.int(np.log2(data.shape[1]-1)) RhoIn, Obs, ObsVal, NRho, NObs = LoadProctomData(data) # tic = time.time() RhoTObs = np.zeros((NRho*NObs,4**NumberOfIons,4**NumberOfIons),dtype=complex) TransposedRhoTObs = np.zeros((NRho*NObs,4**NumberOfIons,4**NumberOfIons),dtype=complex) for m in range(NRho): for n in range(NObs): tmp = npml.kron(RhoIn[m,:,:].transpose(),Obs[n,:,:]) RhoTObs[m*NObs+n,:,:] = tmp TransposedRhoTObs[m*NObs+n,:,:] = tmp.transpose() del RhoIn, Obs, tmp # TransposedRhoTObs[m*NObs+n,:,:] = transpose(RhoTObs[m*NObs+n,:,:]) # print(time.time()-tic,'seconds') # reserving some more memory space QOps = np.zeros((2**NumberOfIons,2**NumberOfIons,(2**NumberOfIons)**2)) QOps2 = np.zeros((2**NumberOfIons,2**NumberOfIons,(2**NumberOfIons)**2), dtype = complex) AB = np.zeros((4**NumberOfIons,4**NumberOfIons), dtype = complex) AA = np.zeros((4**NumberOfIons,4**NumberOfIons), dtype = complex) v = np.eye(2**NumberOfIons); # Quantenoperatoren for m in range(2**NumberOfIons): for n in range(2**NumberOfIons): QOps[:,:,n+2**NumberOfIons*m] = np.outer(v[:,m],v[:,n]) for k in range(4**NumberOfIons): Op_tmp = 1 for l in range(NumberOfIons): Op_tmp = npml.kron(Paulis[np.mod(k//4**l,4)],Op_tmp) QOps2[:,:,k]=Op_tmp for m in range(4**NumberOfIons): for n in range(4**NumberOfIons): AB[m,n] = np.sum(QOps[:,:,m]*np.transpose(QOps2[:,:,n])) AA[m,n] = np.sum(QOps[:,:,m]*np.transpose(QOps[:,:,n])) C = np.dot(lnlg.inv(AB), AA) del AB, AA, QOps, QOps2 # -------------------------- dimH = 2**NumberOfIons dimK = dimH idH = np.eye(dimH) idK = np.eye(dimK) S0 = 1.*npml.kron(idH,idK)/dimK Kstart = np.zeros((4**NumberOfIons,4**NumberOfIons)) # -------------------------- #print(RhoTObs.imag.max()) ObsValCalc = np.zeros(NRho*NObs) #S = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2)) # tic = time.time() for k in range(NumberOfIterations): ObsValCalc2 = np.real(np.sum(np.sum(npml.multiply(S0, TransposedRhoTObs), axis = 1), axis = 1)) # for mn in range(NRho*NObs): # ObsValCalc[mn] = np.sum(S0*np.transpose(RhoTObs[mn,:,:])) # alternative: this seems to be a factor of 2 faster for 2 ions, but becomes a factor of two slower for 3 ions ... # S0_long = tile(S0,(NRho*NObs,1,1)) # ObsValCalc = sum(sum(S0_long*TransposedRhoTObs,axis=2),axis=1) # tensordot(a,b,(0,0)) does something like sum_i a[i]*b[i,:,:] K = np.tensordot(ObsVal/ObsValCalc2,RhoTObs,(0,0)) # lagrange multiplication lamquad = __ptrace(np.dot(K,np.dot(S0,K)),dimH,dimK,2) # here is some complain about real/imag definitions ... laminv = lnlg.inv(lnlg.sqrtm(lamquad)) Laminv = npml.kron(laminv,idK) # new s-matrix S = np.dot(Laminv,np.dot(K,np.dot(S0,np.dot(K,Laminv)))) S0 = S # print(time.time()-tic,'seconds') # calculate corresponding chi matrix # all the info is in the S matrix V = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2)) for q in range(2**NumberOfIons): for m in range(2**NumberOfIons): V[:,q+2**NumberOfIons*m] = npml.kron(v[:,q],v[:,m]) Chi = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2),dtype = complex) for p in range(4**NumberOfIons): for q in range(4**NumberOfIons): Chi[p,q] = np.dot(np.conjugate(np.transpose(V[:,p])),np.dot(S,V[:,q])) Chi_final = np.dot(C,np.dot(Chi,np.transpose(np.conjugate(C)))) return Chi_final
def elementwise_cross_product(a: MatrixVector2D, b: MatrixVector2D) -> matrix: if a.shape == b.shape: z = multiply(a.x, b.y) - multiply(a.y, b.x) return z else: raise ValueError('MatrixVector2D shapes not the same.')
def iterfun(data, NumberOfIterations, path=None): """iterative maximum likelihood state tomography data can be either a matrix,data_object or a time string """ if type(data) == str: data = rd.ReadData(data, path=path) try: data = data.data_dict['cprb'] except AttributeError: pass # tic=time.time() NumberRows = len(data[:, 1]) NumberCols = len(data[1, :]) # check if number of rows and columns fit # numberofcols= 2^NumberOfIons+1 # numberrows=3^numberofions if not 3**np.log2(NumberCols - 1) == NumberRows: print("dataset not complete") NumberOfIons = np.int(np.log2(NumberCols - 1)) probs = data[:, 1:] pulsecode = data[:, 0] NoExp = len(pulsecode) # pulses = double(dec2base(pulsecode,3,NoI))-double('0'); pulses = np.zeros((3**NumberOfIons, NumberOfIons), dtype=np.int) # pulses.shape= for i in range(3**NumberOfIons): pulses[i, :] = np.array(__dec2base(i, 3, NumberOfIons)) #print(pulses) # pulsetable = reshape(kron(ones(1,2^NoI),pulses)',NoI,NoExp*2^NoI)'; # first part kron(ones(1,2^NoI),pulses)' = # = mtlb.transpose(mtlb.kron(ones(2**NumberOfIons),pulses)) # reshape = reshape(<whatever>,(NumberOfIons,3**NumberOfIons*2**NumberOfIons),order='F') a = npml.kron(np.ones(2**NumberOfIons, dtype=np.int), pulses).transpose() pulsetable = np.reshape(a, (NumberOfIons, 3**NumberOfIons * 2**NumberOfIons), order='F').transpose() #print(pulsetable) # Now the experimental data are stored in the same way: # probs = reshape(probs',1,NoExp*2^NoI)'; probs = np.reshape(probs.transpose(), (1, 3**NumberOfIons * 2**NumberOfIons), order='F').transpose() probs = probs[:, 0] # For each experimental data point, a measurement of the D state has to be # labeled with +1, a measurement of the S-state with 0; # meas = (double(dec2bin(2^NoI-1:-1:0)')-double('0'))'; # meastable = kron(ones(NoExp,1),meas); meas = np.zeros((2**NumberOfIons, NumberOfIons), dtype=np.int) k = 0 for i in range(2**NumberOfIons - 1, -1, -1): meas[k, :] = np.array(__dec2base(i, 2, NumberOfIons)) k += 1 a = np.ones((3**NumberOfIons, 1), dtype=np.int) meastable = npml.kron(a, meas) #meastable = meastable + 2 * pulsetable + 1; #Ntable=length(meastable); meastable += 2 * pulsetable Ntable = len(meastable) #Here are the corresponding projectors: #P(:,:,1) = [0;1]*[0;1]'; % - #P(:,:,2) = [1;0]*[1;0]'; % + #P(:,:,4) = [-1;1]*[-1;1]'/2; % - #P(:,:,3) = [1;1]*[1;1]'/2; % + #P(:,:,6) = [i;1]*[i;1]'/2; % - #P(:,:,5) = [-i;1]*[-i;1]'/2; % + P = np.zeros((6, 2, 2), dtype=complex) P[0, :, :] = np.outer(np.array([0, 1]), np.array([0, 1])) P[1, :, :] = np.outer(np.array([1, 0]), np.array([1, 0])) P[2, :, :] = np.outer(0.5 * np.array([-1, 1]), np.array([-1, 1])) P[3, :, :] = np.outer(0.5 * np.array([1, 1]), np.array([1, 1])) P[4, :, :] = np.outer(0.5 * np.array([1j, 1]), np.conjugate(np.array([1j, 1]))) P[5, :, :] = np.outer(0.5 * np.array([-1j, 1]), np.conjugate(np.array([-1j, 1]))) # about to start iterations ... rho = np.identity(2**NumberOfIons) / 2**NumberOfIons # AllOp=zeros(2^NoI,2^NoI,Ntable); # AllOp=zeros((Ntable,2**NumberOfIons,2**NumberOfIons),dtype=complex) AllOp = [] # toc2=time.time() # print(toc2-tic,'seconds for initialisation have elapsed') AllOp2 = np.zeros((Ntable, 2**NumberOfIons, 2**NumberOfIons), dtype=complex) AllOpTransposed = np.zeros((Ntable, 2**NumberOfIons, 2**NumberOfIons), dtype=complex) for k in range(Ntable): ind = meastable[k, :].copy() Op = P[ind[0], :, :] for m in range(1, NumberOfIons): Op = npml.kron(Op, P[ind[m], :, :]) AllOp.append(Op) AllOp2[k, :, :] = Op AllOpTransposed[k, :, :] = Op.transpose() # really starting with the iterations now # toc3=time.time() # print(toc3-toc2,'seconds for operator initalisation') ROp_start = np.zeros((2**NumberOfIons, 2**NumberOfIons), dtype=complex) list_probOp = np.zeros(Ntable) for i in range(NumberOfIterations): ROp = ROp_start.copy() list_probOp2 = np.sum(np.sum(npml.multiply(rho, AllOpTransposed), axis=1), axis=1) # for k in range(Ntable): # Op=AllOp[k] # # the following is tons faster because it relies on element wise multiplication only # probOp=(rho*Op.transpose()).sum() # list_probOp[k] = probOp # okay. if probOp would be zero, it would be a problem. but i # never got a zero here, so i'll just skip the if # if probOp > 0: # ROp += probs[k]/probOp * Op # tensordot results in a factor of 2 faster evaluation of # the w4 data compared to the old loop approach # print((list_probOp2[0] -list_probOp[0])) ROp2 = np.tensordot(probs / list_probOp2, AllOp2, (0, 0)) rho = np.dot(np.dot(ROp2, rho), ROp2) # rho=np.dot(np.dot(ROp,rho),ROp) rho /= rho.trace() # toc=time.time() # print(time.time()-toc3,' seconds for iterations') return rho
def iterfun(data,NumberOfIterations, path=None): """iterative maximum likelihood state tomography data can be either a matrix,data_object or a time string """ if type(data)== str: data = rd.ReadData(data, path=path) try: data = data.data_dict['cprb'] except AttributeError: pass # tic=time.time() NumberRows=len(data[:,1]) NumberCols=len(data[1,:]) # check if number of rows and columns fit # numberofcols= 2^NumberOfIons+1 # numberrows=3^numberofions if not 3**np.log2(NumberCols-1)==NumberRows: print("dataset not complete") NumberOfIons=np.int(np.log2(NumberCols-1)) probs=data[:,1:] pulsecode=data[:,0] NoExp=len(pulsecode) # pulses = double(dec2base(pulsecode,3,NoI))-double('0'); pulses=np.zeros((3**NumberOfIons,NumberOfIons)) # pulses.shape= for i in xrange(3**NumberOfIons): pulses[i,:]=np.array(__dec2base(i,3,NumberOfIons)) #print(pulses) # pulsetable = reshape(kron(ones(1,2^NoI),pulses)',NoI,NoExp*2^NoI)'; # first part kron(ones(1,2^NoI),pulses)' = # = mtlb.transpose(mtlb.kron(ones(2**NumberOfIons),pulses)) # reshape = reshape(<whatever>,(NumberOfIons,3**NumberOfIons*2**NumberOfIons),'FORTRAN') a=npml.kron(np.ones(2**NumberOfIons),pulses).transpose() pulsetable = np.reshape(a,(NumberOfIons,3**NumberOfIons*2**NumberOfIons),'FORTRAN').transpose() #print(pulsetable) # Now the experimental data are stored in the same way: # probs = reshape(probs',1,NoExp*2^NoI)'; probs=np.reshape(probs.transpose(),(1,3**NumberOfIons*2**NumberOfIons),'FORTRAN').transpose() probs=probs[:,0] # For each experimental data point, a measurement of the D state has to be # labeled with +1, a measurement of the S-state with 0; # meas = (double(dec2bin(2^NoI-1:-1:0)')-double('0'))'; # meastable = kron(ones(NoExp,1),meas); meas=np.zeros((2**NumberOfIons,NumberOfIons)) k=0 for i in xrange(2**NumberOfIons-1,-1,-1): meas[k,:]=np.array(__dec2base(i,2,NumberOfIons),dtype=int) k+=1 a=np.ones((3**NumberOfIons,1)) meastable=npml.kron(a,meas) #meastable = meastable + 2 * pulsetable + 1; #Ntable=length(meastable); meastable+= 2*pulsetable Ntable=len(meastable) #Here are the corresponding projectors: #P(:,:,1) = [0;1]*[0;1]'; % - #P(:,:,2) = [1;0]*[1;0]'; % + #P(:,:,4) = [-1;1]*[-1;1]'/2; % - #P(:,:,3) = [1;1]*[1;1]'/2; % + #P(:,:,6) = [i;1]*[i;1]'/2; % - #P(:,:,5) = [-i;1]*[-i;1]'/2; % + P=np.zeros((6,2,2),dtype=complex) P[0,:,:]=np.outer(np.array([0,1]),np.array([0,1])) P[1,:,:]=np.outer(np.array([1,0]),np.array([1,0])) P[2,:,:]=np.outer(0.5*np.array([-1,1]),np.array([-1,1])) P[3,:,:]=np.outer(0.5*np.array([1,1]),np.array([1,1])) P[4,:,:]=np.outer(0.5*np.array([1j,1]),np.conjugate(np.array([1j,1]))) P[5,:,:]=np.outer(0.5*np.array([-1j,1]),np.conjugate(np.array([-1j,1]))) # about to start iterations ... rho = np.identity(2**NumberOfIons)/2**NumberOfIons # AllOp=zeros(2^NoI,2^NoI,Ntable); # AllOp=zeros((Ntable,2**NumberOfIons,2**NumberOfIons),dtype=complex) AllOp=[] # toc2=time.time() # print toc2-tic,'seconds for initialisation have elapsed' AllOp2 = np.zeros((Ntable,2**NumberOfIons,2**NumberOfIons),dtype=complex) AllOpTransposed = np.zeros((Ntable,2**NumberOfIons,2**NumberOfIons),dtype=complex) for k in xrange(Ntable): ind=meastable[k,:].copy() Op=P[ind[0],:,:] for m in xrange(1,NumberOfIons): Op=npml.kron(Op,P[ind[m],:,:]) AllOp.append(Op) AllOp2[k,:,:] = Op AllOpTransposed[k,:,:] = Op.transpose() # really starting with the iterations now # toc3=time.time() # print toc3-toc2,'seconds for operator initalisation' ROp_start=np.zeros((2**NumberOfIons,2**NumberOfIons),dtype=complex) list_probOp = np.zeros(Ntable) for i in xrange(NumberOfIterations): ROp=ROp_start.copy() list_probOp2 = np.sum(np.sum(npml.multiply(rho, AllOpTransposed), axis = 1), axis = 1) # for k in xrange(Ntable): # Op=AllOp[k] # # the following is tons faster because it relies on element wise multiplication only # probOp=(rho*Op.transpose()).sum() # list_probOp[k] = probOp # okay. if probOp would be zero, it would be a problem. but i # never got a zero here, so i'll just skip the if # if probOp > 0: # ROp += probs[k]/probOp * Op # tensordot results in a factor of 2 faster evaluation of # the w4 data compared to the old loop approach # print (list_probOp2[0] -list_probOp[0]) ROp2 = np.tensordot(probs/list_probOp2,AllOp2,(0,0)) rho=np.dot(np.dot(ROp2,rho),ROp2) # rho=np.dot(np.dot(ROp,rho),ROp) rho/=rho.trace() # toc=time.time() # print time.time()-toc3,' seconds for iterations' return rho
def proctomo(data, NumberOfIterations = 100, use_bell_basis=False): """Process tomography for an arbitrary number of ions Params data: either datafile string or a a cprb matrix NumberOfIterations: Number of iterations for the maximum likelihood algorithm """ Paulis = getopbase() NumberOfIons = np.int(np.log2(data.shape[1]-1)) RhoIn, Obs, ObsVal, NRho, NObs = LoadProctomData(data) # tic = time.time() RhoTObs = np.zeros((NRho*NObs,4**NumberOfIons,4**NumberOfIons),dtype=complex) TransposedRhoTObs = np.zeros((NRho*NObs,4**NumberOfIons,4**NumberOfIons),dtype=complex) for m in xrange(NRho): for n in xrange(NObs): tmp = npml.kron(RhoIn[m,:,:].transpose(),Obs[n,:,:]) RhoTObs[m*NObs+n,:,:] = tmp TransposedRhoTObs[m*NObs+n,:,:] = tmp.transpose() del RhoIn, Obs, tmp # TransposedRhoTObs[m*NObs+n,:,:] = transpose(RhoTObs[m*NObs+n,:,:]) # print time.time()-tic,'seconds' # reserving some more memory space QOps = np.zeros((2**NumberOfIons,2**NumberOfIons,(2**NumberOfIons)**2)) QOps2 = np.zeros((2**NumberOfIons,2**NumberOfIons,(2**NumberOfIons)**2), dtype = complex) AB = np.zeros((4**NumberOfIons,4**NumberOfIons), dtype = complex) AA = np.zeros((4**NumberOfIons,4**NumberOfIons), dtype = complex) v = np.eye(2**NumberOfIons); # Quantenoperatoren for m in xrange(2**NumberOfIons): for n in xrange(2**NumberOfIons): QOps[:,:,n+2**NumberOfIons*m] = np.outer(v[:,m],v[:,n]) for k in xrange(4**NumberOfIons): Op_tmp = 1 for l in xrange(NumberOfIons): Op_tmp = npml.kron(Paulis[np.mod(k/4**l,4)],Op_tmp) QOps2[:,:,k]=Op_tmp for m in xrange(4**NumberOfIons): for n in xrange(4**NumberOfIons): AB[m,n] = np.sum(QOps[:,:,m]*np.transpose(QOps2[:,:,n])) AA[m,n] = np.sum(QOps[:,:,m]*np.transpose(QOps[:,:,n])) C = np.dot(lnlg.inv(AB), AA) del AB, AA, QOps, QOps2 # -------------------------- dimH = 2**NumberOfIons dimK = dimH idH = np.eye(dimH) idK = np.eye(dimK) S0 = 1.*npml.kron(idH,idK)/dimK Kstart = np.zeros((4**NumberOfIons,4**NumberOfIons)) # -------------------------- #print RhoTObs.imag.max() ObsValCalc = np.zeros(NRho*NObs) #S = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2)) # tic = time.time() for k in xrange(NumberOfIterations): ObsValCalc2 = np.real(np.sum(np.sum(npml.multiply(S0, TransposedRhoTObs), axis = 1), axis = 1)) # for mn in xrange(NRho*NObs): # ObsValCalc[mn] = np.sum(S0*np.transpose(RhoTObs[mn,:,:])) # alternative: this seems to be a factor of 2 faster for 2 ions, but becomes a factor of two slower for 3 ions ... # S0_long = tile(S0,(NRho*NObs,1,1)) # ObsValCalc = sum(sum(S0_long*TransposedRhoTObs,axis=2),axis=1) # tensordot(a,b,(0,0)) does something like sum_i a[i]*b[i,:,:] K = np.tensordot(ObsVal/ObsValCalc2,RhoTObs,(0,0)) # lagrange multiplication lamquad = __ptrace(np.dot(K,np.dot(S0,K)),dimH,dimK,2) # here is some complain about real/imag definitions ... laminv = lnlg.inv(lnlg.sqrtm(lamquad)) Laminv = npml.kron(laminv,idK) # new s-matrix S = np.dot(Laminv,np.dot(K,np.dot(S0,np.dot(K,Laminv)))) S0 = S # print time.time()-tic,'seconds' # calculate corresponding chi matrix # all the info is in the S matrix V = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2)) for q in xrange(2**NumberOfIons): for m in xrange(2**NumberOfIons): V[:,q+2**NumberOfIons*m] = npml.kron(v[:,q],v[:,m]) Chi = np.zeros(((2**NumberOfIons)**2,(2**NumberOfIons)**2),dtype = complex) for p in xrange(4**NumberOfIons): for q in xrange(4**NumberOfIons): Chi[p,q] = np.dot(np.conjugate(np.transpose(V[:,p])),np.dot(S,V[:,q])) Chi_final = np.dot(C,np.dot(Chi,np.transpose(np.conjugate(C)))) return Chi_final