def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.5): tx_d = sp.add(sp.multiply(sp.subtract(coverage_r, 200), sp.random.uniform(low=low_tx)), 50) tx_angle = sp.multiply(2, sp.multiply(sp.pi, sp.random.rand(1, 1))) self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.cos(tx_angle))) self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.sin(tx_angle))) plt.scatter(self.tx_x, self.tx_y, s=20, c='blue', marker='o') plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))
def testGauss(k1=1.3): dataArr, labelArr = loadData('testSetRBF.txt') # 训练,得到参数 b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('Gauss', k1)) datMat = sp.mat(dataArr) labelMat = sp.mat(labelArr).transpose() svInd = sp.nonzero(alphas.A > 0)[0] # 支持向量的矩阵 sVs = datMat[svInd] labelSV = labelMat[svInd] print("there are %d Support Vectors" % np.shape(sVs)[0]) m, n = np.shape(datMat) errorCount = 0 for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], ('Gauss', k1)) predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b if np.sign(predict) != np.sign(labelArr[i]): errorCount += 1 print("the training error rate is: %f" % (float(errorCount) / m)) # 测试参数在新数据上如何 dataArr, labelArr = loadData('testSetRBF2.txt') errorCount = 0 datMat = sp.mat(dataArr) labelMat = sp.mat(labelArr).transpose() m, n = np.shape(datMat) for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], ('Gauss', k1)) predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b if np.sign(predict) != np.sign(labelArr[i]): errorCount += 1 print("the test error rate is: %f" % (float(errorCount) / m))
def generateGaborMotherWavelet(self): pitch = 440.0 sigma = 6. NL = 48 NU = 39 print 'sampling rate:', self.fs, 'Hz' fs = float(self.fs) self.sample_duration = 10. #asigma = 0.3 limit_t = 0.1 #zurashi = 1. #NS = NL + NU + 1 f = sp.array([2**(i/12.) for i in range(NL+NU+1)]) * pitch*2**(-NL/12.) f = f[:, sp.newaxis] sigmao = sigma*10**(-3)*sp.sqrt(fs/f) t = sp.arange(-limit_t, limit_t+1/fs, 1/fs) inv_sigmao = sp.power(sigmao, -1) inv_sigmao_t = inv_sigmao * t t_inv_sigmao2 = sp.multiply(inv_sigmao_t, inv_sigmao_t) omega_t = 2*sp.pi*f*t gabor = (1/sp.sqrt(2*sp.pi)) gabor = sp.multiply(gabor, sp.diag(inv_sigmao)) exps = -0.5*t_inv_sigmao2+sp.sqrt(-1)*omega_t self.gabor = gabor*sp.exp(exps)
def energy(m1, m2, v1, v2): # total kinetic energy of two masses v1_norm_squared = sp.array([sp.power(v1[trial][0],2) + sp.power(v1[trial][1],2) for trial in range(len(v1))]) v2_norm_squared = sp.array([sp.power(v2[trial][0],2) + sp.power(v2[trial][1],2) for trial in range(len(v2))]) E1 = sp.multiply(0.5, sp.multiply(m1,v1_norm_squared)) E2 = sp.multiply(0.5, sp.multiply(m2,v2_norm_squared)) return sp.real(E1 + E2)
def __init__(self, fc, c_vel, alp_g, mu_los, mu_nlos, a, b, noise_var, hUAV, xUAV, yUAV, xUE, yUE): dist = sp.sqrt( sp.add(sp.square(sp.subtract(yUAV, yUE)), sp.square(sp.subtract(xUAV, xUE))) ) R_dist = sp.sqrt( sp.add(sp.square(dist), sp.square(hUAV)) ) temp1 = sp.multiply(10, sp.log10(sp.power(fc*4*sp.pi*R_dist/c_vel, alp_g))) temp2 = sp.multiply(sp.subtract(mu_los, mu_nlos), sp.divide(1, (1+a*sp.exp(-b*sp.arctan(hUAV/dist)-a)))) temp3 = sp.add(sp.add(temp1, temp2), mu_nlos) self.pathloss = sp.divide(sp.real(sp.power(10, -sp.divide(temp3, 10))), noise_var)
def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.1): tx_d = sp.multiply(coverage_r, sp.random.uniform(low=low_tx)) tx_angle = sp.multiply(0.5, sp.multiply(sp.pi, (sp.random.rand()))) self.tx_x = sp.add(origin_x, sp.multiply(tx_d, sp.sin(tx_angle))) self.tx_y = sp.add(origin_y, sp.multiply(tx_d, sp.cos(tx_angle))) plt.scatter(self.tx_x, self.tx_y, s=20, c='red') plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))
def update_image(original_im, ci_red, ci_green, ci_blue): # diagnostics = dict() original_im = scipy.transpose(original_im) # diagnostics['original_im'] = original_im # diagnostics['ci_red'] = ci_red # diagnostics['ci_green'] = ci_green # diagnostics['ci_blue'] = ci_blue new_r = scipy.multiply(original_im[0], original_im[0] > ci_red) new_g = scipy.multiply(original_im[1], original_im[1] > ci_green) new_b = scipy.multiply(original_im[2], original_im[2] > ci_blue) new_im = (new_r, new_g, new_b) new_im = scipy.transpose(new_im) # diagnostics['new_im'] = new_im # with open('/Users/lages/Documents/sauceda/pictures_processed/diagnostics' # '.p', 'wb') as f: # pickle.dump(diagnostics, f) return new_im
def update_image(original_im, ci_red, ci_green, ci_blue): ci_vec = sp.array((ci_red, ci_green, ci_blue)) ci_matrix = sp.multiply(sp.ones(original_im.shape), ci_vec) new_im = sp.multiply(original_im, original_im > ci_matrix) return new_im
def norm2(x): """compute |x|^2 = x*conjugate(x)""" if iscomplexobj(x): # t1=time.time() # mat1=x.real**2 + x.imag**2 # t2=time.time() mat2=multiply(x.real,x.real) + multiply(x.imag,x.imag) # t3=time.time() # print('---------------------------') # print('pow time='+str(t2-t1)) # print('multiply time='+str(t3-t2)) # print('pow time/multiply time='+str((t2-t1)/(t3-t2))) # print('shape(x)='+str(shape(x))) # print('x.typecode='+str(x.typecode())) # print('mat1.typecode='+str(mat1.typecode())) # print('mat2.typecode='+str(mat2.typecode())) # if len(shape(x))==1: # print('type(x[0])='+str(type(x[0]))) # print('type(mat1[0])='+str(type(mat1[0]))) # print('type(mat2[0])='+str(type(mat2[0]))) # else: # print('type(x[0,0])='+str(type(x[0,0]))) # print('type(mat1[0,0])='+str(type(mat1[0,0]))) # print('type(mat2[0,0])='+str(type(mat2[0,0]))) return mat2 else: return multiply(x,x)
def testDigits(kTup=('normal', 10)): # 和testGauss基本上差不多也 dataArr, labelArr = loadImage('trainingDigits') b, alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup) datMat = sp.mat(dataArr) labelMat = sp.mat(labelArr).transpose() svInd = sp.nonzero(alphas.A > 0)[0] # 支持向量的矩阵 sVs = datMat[svInd] labelSV = labelMat[svInd] print("there are %d Support Vectors" % np.shape(sVs)[0]) m, n = np.shape(datMat) errorCount = 0 for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], kTup) predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b if np.sign(predict) != np.sign(labelArr[i]): errorCount += 1 print("the training error rate is: %f" % (float(errorCount) / m)) # 测试参数在新数据上如何 dataArr, labelArr = loadImage('testDigits') errorCount = 0 datMat = sp.mat(dataArr) labelMat = sp.mat(labelArr).transpose() m, n = np.shape(datMat) for i in range(m): kernelEval = kernelTrans(sVs, datMat[i, :], kTup) predict = kernelEval.T * (sp.multiply(labelSV, alphas[svInd])) + b if np.sign(predict) != np.sign(labelArr[i]): errorCount += 1 print("the test error rate is: %f" % (float(errorCount) / m))
def __init__(self, id, coverage_r, origin_x=0, origin_y=0, low_tx=0.2): # uav_d = sp.add(sp.multiply(sp.subtract(coverage_r, 100), sp.random.rand(1, 1)), 50) uav_d = sp.add(sp.multiply(sp.subtract(coverage_r, 100), sp.random.uniform(low=low_tx)), 50) uav_angle = sp.multiply(2, sp.multiply(sp.pi, sp.random.rand(1, 1))) self.uav_x = sp.add(origin_x, sp.multiply(uav_d, sp.cos(uav_angle))) self.uav_y = sp.add(origin_y, sp.multiply(uav_d, sp.sin(uav_angle))) plt.scatter(self.uav_x, self.uav_y, s=40, c='red', marker='D') plt.annotate(id, (self.uav_x + 10, self.uav_y + 10))
def __init__(self, id, hon_size, ver_size, dis_ref): x_ref = sp.multiply(id, dis_ref) + sp.multiply(dis_ref, sp.random.rand(1)) y_ref = sp.multiply(ver_size, sp.random.rand(1)) self.tx_x = x_ref self.tx_y = y_ref plt.scatter(self.tx_x, self.tx_y, s=20, c='red') plt.annotate(id, (self.tx_x + 10, self.tx_y + 10))
def update_reservoir(self, u, n, Y): # u is input at specific time # u has shape (N_u (3 for L63)) # See page 16 eqtn 18 of Lukosevicius PracticalESN for feedback info. x_n_tilde = sp.tanh( sp.matmul(self.W, self.x[n]) + sp.matmul(self.W_in, sp.hstack((sp.array([1]), u))) + sp.matmul(self.W_fb, Y)) # TODO: Add derivative term? self.x[n+1] = sp.multiply((1-self.alpha_matrix), self.x[n]) \ + sp.multiply(self.alpha_matrix, x_n_tilde)
def y_momentum(m1, m2, v1, v2): # Total y momentum for two particles if (v1.shape)[-1] == 2: # if second index of v1 is 2 (2 rows implies one for x, and one for y) v1_y = sp.array([v1[trial][1] for trial in range(len(v1))]) v2_y = sp.array([v2[trial][1] for trial in range(len(v2))]) else: # if given v1 is just v1_y v1_y = v1 v2_y = v2 p1 = sp.multiply(m1, v1_y) p2 = sp.multiply(m2, v2_y) return sp.real(p1 + p2)
def nandot(x1, x2): if len(x1.shape) == 1 and len(x2.shape) == 2: x1T = SP.tile(x1, [x2.shape[1], 1]).transpose() return SP.nansum(SP.multiply(x1T, x2), axis=0) elif len(x2.shape) == 1 and len(x1.shape) == 2: x2T = SP.tile(x2, [x1.shape[0], 1]) return SP.nansum(SP.multiply(x1, x2T), axis=1) elif len(x1.shape) == 1 and len(x2.shape) == 1: return SP.nansum(SP.multiply(x1, x2)) return None
def Au(U,GF,EpsArr,NX,NY,NZ): """Returns the result of matrix-vector multiplication by the system matrix A=I-GX """ # reshaping input vector into 4-D array Uarr=sci.reshape(U,(NX,NY,NZ,3)) # extended zero-padded arrays Uext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Vext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Jext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) JFext=sci.zeros((2*NX,2*NY,2*NZ,3),complex) Uext[0:NX,0:NY,0:NZ,:]=Uarr # contrast current array s=0 while s<=2: Jext[0:NX,0:NY,0:NZ,s]=Uext[0:NX,0:NY,0:NZ,s]*(EpsArr[0:NX,0:NY,0:NZ]-1.0) JFext[:,:,:,s]=fft.fftn(sci.squeeze(Jext[:,:,:,s])) s=s+1 Vext[:,:,:,0]=Uext[:,:,:,0]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,0,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,0,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,0,2],JFext[:,:,:,2]))) Vext[:,:,:,1]=Uext[:,:,:,1]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,1,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,1,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,1,2],JFext[:,:,:,2]))) Vext[:,:,:,2]=Uext[:,:,:,2]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,2,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,2,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,2,2],JFext[:,:,:,2]))) # reshaping output into column vector V=sci.reshape(Vext[0:NX,0:NY,0:NZ,:],(NX*NY*NZ*3,1)) return V
def Au(U, GF, EpsArr, NX, NY, NZ): """Returns the result of matrix-vector multiplication by the system matrix A=I-GX """ # reshaping input vector into 4-D array Uarr = sci.reshape(U, (NX, NY, NZ, 3)) # extended zero-padded arrays Uext = sci.zeros((2 * NX, 2 * NY, 2 * NZ, 3), complex) Vext = sci.zeros((2 * NX, 2 * NY, 2 * NZ, 3), complex) Jext = sci.zeros((2 * NX, 2 * NY, 2 * NZ, 3), complex) JFext = sci.zeros((2 * NX, 2 * NY, 2 * NZ, 3), complex) Uext[0:NX, 0:NY, 0:NZ, :] = Uarr # contrast current array for s in range(3): Jext[0:NX, 0:NY, 0:NZ, s] = Uext[0:NX, 0:NY, 0:NZ, s] * (EpsArr[0:NX, 0:NY, 0:NZ] - 1.0) JFext[:, :, :, s] = fft.fftn(sci.squeeze(Jext[:, :, :, s])) Vext[:,:,:,0]=Uext[:,:,:,0]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,0,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,0,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,0,2],JFext[:,:,:,2]))) Vext[:,:,:,1]=Uext[:,:,:,1]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,1,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,1,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,1,2],JFext[:,:,:,2]))) Vext[:,:,:,2]=Uext[:,:,:,2]-\ fft.ifftn(sci.squeeze(sci.multiply(GF[:,:,:,2,0],JFext[:,:,:,0])+\ sci.multiply(GF[:,:,:,2,1],JFext[:,:,:,1])+\ sci.multiply(GF[:,:,:,2,2],JFext[:,:,:,2]))) # reshaping output into column vector V = sci.reshape(Vext[0:NX, 0:NY, 0:NZ, :], (NX * NY * NZ * 3, 1)) return V
def _calcVanillaOnlineGradient(self, sample, shapedfitnesses): invSigma = inv(self.sigma) phi = zeros(self.numDistrParams) phi[: self.numParameters] = self._logDerivX(sample, self.x, invSigma) logDerivSigma = self._logDerivFactorSigma(sample, self.x, invSigma, self.factorSigma) phi[self.numParameters :] = logDerivSigma.flatten() index = len(self.allSamples) % self.batchSize self.phiSquareWindow[index] = multiply(phi, phi) baseline = self._calcBaseline(shapedfitnesses) gradient = multiply((ones(self.numDistrParams) * shapedfitnesses[-1] - baseline), phi) return gradient
def european_option_rho(self): "Price of the call option" "the vectorized method can compute price of multiple options in array" numerator = sp.add( sp.log( sp.divide( self.spot_price, self.strike_price, ) ), sp.multiply( ( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2) ), self.time_to_maturity) ) d1 = sp.divide( numerator, sp.prod( [ self.sigma, sp.sqrt(self.time_to_maturity) ], axis=0, ) ) d2 = sp.add( d1, -sp.multiply( self.sigma, sp.sqrt(self.time_to_maturity) ) ) j = sp.product( [ self.spot_price, self.time_to_maturity, sp.exp( sp.multiply( -self.interest_rate, self.time_to_maturity ) ), ], axis=0 ) c_rho = j * self.bls_erf_value(d2) p_rho = -j * self.bls_erf_value(-d2) return c_rho, p_rho
def _calcVanillaOnlineGradient(self, sample, shapedfitnesses): invSigma = inv(self.sigma) phi = zeros(self.numDistrParams) phi[:self.numParameters] = self._logDerivX(sample, self.x, invSigma) logDerivSigma = self._logDerivFactorSigma(sample, self.x, invSigma, self.factorSigma) phi[self.numParameters:] = logDerivSigma.flatten() index = len(self.allSamples) % self.batchSize self.phiSquareWindow[index] = multiply(phi, phi) baseline = self._calcBaseline(shapedfitnesses) gradient = multiply((ones(self.numDistrParams) * shapedfitnesses[-1] - baseline), phi) return gradient
def sgrad(self, X, ndata=None): """Return a stochastic gradient at x. Projects the gradient in a uniformly random direction""" Nrandom = scipy.random.standard_normal(X.shape) norms = numpy.linalg.norm(Nrandom, axis=X.ndim - 1) if X.ndim > 1: Nrandom = Nrandom / norms.reshape(X.shape[0], 1) else: Nrandom = Nrandom / norms grad = self.boundgrad(self.grad(X), 1) ans = scipy.multiply(scipy.multiply(Nrandom, grad), Nrandom) return ans
def loss_to_pair(self, pair, gain=1e-3, exp_factor=sp.random.exponential(1), pl_exp=3): dist = sp.sqrt( sp.add(sp.square(sp.subtract(self.tx_x, pair.rx_x)), sp.square(sp.subtract(self.tx_y, pair.rx_y)))) loss = sp.multiply( gain, sp.multiply(sp.square(exp_factor), sp.power(dist, -pl_exp))) return loss
def fit(self, train_pairs, verbose=False): n = len(train_pairs) if n == 0: raise NameError('Error: Train set is empty') else: if verbose: print('fit: Fitting a multiplicative model on %d pairs' % n) bases = [w for w, _ in train_pairs] derivs = [w for _, w in train_pairs] B = self.space.get_rows(bases).mat D = self.space.get_rows(derivs).mat DB = sp.multiply(B, D) BB = sp.multiply(B, B) self.mul_vector = DB.sum(axis=0) / (n * BB.sum(axis=0))
def hitting_times(eq_pi, Z, n=None): """ Calculates the hitting times (mean first passage times) of various each node from the equilibrium distribution of states. Three quantities are determined and returned: Ei(Ti) : Expected number of steps to return to the state 'i' if the Markov chain is started in the state 'i'. Ei(Tj) : Expected number of steps to reach state 'j' if the Markov chain is started in state 'i'. Eπ(Ti) : The hitting time of state 'i' if we start with the equilibrium state distribution π. These three quantities are given in terms of the equilibrium distribution of states and the fundamental matrix as follows: Ei(Ti) = 1 / (πi) Ei(Tj) = Ej(Tj).(Zjj - Zij) Eπ(Ti) = Ei(Ti).Zii """ if n is None: n = eq_pi.size Ei_Ti = 1 / eq_pi # First, we calculate Zj, which is a matrix each of whose columns is # replaced by the diagonal element of that column Zj = sp.multiply(sp.ones((n, 1)), Z.diagonal().reshape((1, n))) # Next, we calculate Ej, which is a matrix, whose diagonal elements are # Ei_Ti Ej = sp.diag(Ei_Ti) # Finally, Ei_Tj is simply the product of the two matrices given by: Ei_Tj = sp.multiply(Ej, Zj - Z) Ei_Tj_test = sp.empty((n, n)) for i in range(n): for j in range(n): Ei_Tj_test[i][j] = Ei_Ti[j] * (Z[j][j] - Z[i][j]) # FIXME if (Ei_Tj == Ei_Tj_test).all(): print 'It worked!' else: print 'It didn\'t work :-/' #print (Ei_Tj - Ei_Tj_test) Epi_Ti = sp.multiply(Ei_Ti.reshape((1, n)), Z.diagonal().reshape((1, n))) return Ei_Ti, Ei_Tj, Epi_Ti
def _calcVanillaBatchGradient(self, samples, shapedfitnesses): invSigma = inv(self.sigma) phi = zeros((len(samples), self.numDistrParams)) phi[:, :self.numParameters] = self._logDerivsX(samples, self.x, invSigma) logDerivFactorSigma = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma) phi[:, self.numParameters:] = array(logDerivFactorSigma) Rmat = outer(shapedfitnesses, ones(self.numDistrParams)) # optimal baseline self.phiSquareWindow = multiply(phi, phi) baselineMatrix = self._calcBaseline(shapedfitnesses) gradient = sum(multiply(phi, (Rmat - baselineMatrix)), 0) return gradient
def _calcVanillaBatchGradient(self, samples, shapedfitnesses): invSigma = inv(self.sigma) phi = zeros((len(samples), self.numDistrParams)) phi[:, : self.numParameters] = self._logDerivsX(samples, self.x, invSigma) logDerivFactorSigma = self._logDerivsFactorSigma(samples, self.x, invSigma, self.factorSigma) phi[:, self.numParameters :] = array(logDerivFactorSigma) Rmat = outer(shapedfitnesses, ones(self.numDistrParams)) # optimal baseline self.phiSquareWindow = multiply(phi, phi) baselineMatrix = self._calcBaseline(shapedfitnesses) gradient = sum(multiply(phi, (Rmat - baselineMatrix)), 0) return gradient
def BodefromTwoTimeDomainVectors(timevector,output,input,truncfreq=100): """This function calculates the Bode response between two time domain signals. The timevector is used to calculate the frequency vector, which is then used to truncate the Bode response to reduce calculation time and return only useful information. Input and output are time domain vectors. The return values are freq, magnitude ratio, phase, complex The goal of this function is to be useful for small amounts of data and/or as part of a routine to calculate a Bode response from fixed sine data.""" N=len(timevector) f=makefreqvect(timevector) co=thresh_py(f,truncfreq) f=f[0:co] curin_fft=fft(input,None,0)*2/N curout_fft=fft(output,None,0)*2/N curin_fft=curin_fft[0:co] curout_fft=curout_fft[0:co] curGxx=norm2(curin_fft) curGyy=norm2(curout_fft) curGxy=scipy.multiply(scipy.conj(curin_fft),curout_fft) H=scipy.divide(curGxy,curGxx) Hmag=abs(H) Hphase=mat_atan2(scipy.imag(H),scipy.real(H))*180.0/pi return f,Hmag,Hphase,H
def bondOrientation2sh(atoms,basis,l,neighbs=None,rcut=None,debug=False): atoms = array(atoms) basis = array(basis) atoms = rectify(atoms,basis) if neighbs==None: bounds=[[0,basis[0][0]],[0,basis[1][1]],[0,basis[2][2]]] if rcut==None: rcut = generateRCut(atoms,basis,debug=debug) #print "Automatically generating r-cutoff=",rcut neighbs = secondShell( neighbors(atoms,bounds,rcut) ) #sum the spherical harmonic over ever neighbor pair a = 4*np.pi / (2*l+1.) Ql=list() for i,ineighbs in enumerate(neighbs): n=len(ineighbs) shij = np.vectorize(complex)(zeros(2*l+1)) #spherical harmonic for bond i-j for j in ineighbs: shij += pairSphereHarms(atoms[i],minImageAtom(atoms[i],atoms[j],basis),l)/n shi = a * sum( scipy.real( scipy.multiply(shij,scipy.conj(shij)) ) ) Ql.append(shi**0.5) return Ql,rcut
def solve_v(self, s8y): fl = s8y.flatten() self._v = fl / fl.sum() v = copy.copy(self._e) step = 0 def write_current_matrix(): f = open("%s/%s_%03d.v" % (temporary_directory(), self._debug_matrix_file, step), "w") x = v.reshape(len(self._p1.modules), len(self._p2.modules)) for i in xrange(len(self._p1.modules)): for j in xrange(len(self._p2.modules)): f.write("%f " % x[i, j]) f.write("\n") f.close() while 1: if self._debug: write_current_matrix() new = self.step(v) r = v - new r = scipy.multiply(r, r) s = r.sum() if s < 0.0000001 and step >= 10: return v step += 1 v = new
def reflect1(v, u, c): print("Reflect by vector math variant 1:") c = 0 center_ = eT(center(len(v))) print("center_:", center_) print("v:", v) v = scipy.subtract(v, center_) print("v:", v) print("u:", u) print("c:", c) v_dot_u = scipy.dot(v, u) print("v_dot_u:", v_dot_u) v_dot_u_minus_c = scipy.subtract(v_dot_u, c) print("v_dot_u_minus_c:", v_dot_u_minus_c) u_dot_u = scipy.dot(u, u) print("u_dot_u:", u_dot_u) quotient = scipy.divide(v_dot_u_minus_c, u_dot_u) print("quotient:", quotient) subtrahend = scipy.multiply((2 * quotient), u) print("subtrahend:", subtrahend) reflection = scipy.subtract(v, subtrahend) print("reflection:", reflection) reflection = scipy.add(reflection, center_) print("reflection:", reflection) return reflection
def run(self, nmrData): length = len(nmrData.allFid[-1][0]) nmrData.allFid.append( sp.multiply( nmrData.allFid[-1][:], sp.exp(-nmrData.fidTime[:length] * self.lineBroadening * np.pi)))
def dmd_evolve_dask(X0, mu, Phi, t): """ dmd_evolve_dask evolves the dmd components to time t starting from X0 Args: X0 - dask.array of length d the initial observation D[:,0] mu - the dmd eigenvalues Phi - the dmd modes t - an array of times #TODO for now the evolution is by increment of 1, should allow for a smaller timestep Returns: Psi - dask.array of dimensions r x t """ # calculate starting point b = da.dot(pinv_SVD(Phi), X0) # rank r = Phi.shape[1] # initialize Psi Psi = np.zeros([r, len(t)], dtype='complex') #Psi = da.zeros([r,len(t)],chunks = (r,len(t)),dtype='complex') # evolve Psi for i, _t in enumerate(t): Psi[:, i] = multiply(power(mu, _t), b) return (Psi)
def european_option_delta(self): numerator = sp.add( sp.log( sp.divide( self.spot_price, self.strike_price ) ), sp.multiply( ( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)), self.time_to_maturity ) ) d1 = sp.divide( numerator, sp.prod( [ self.sigma, sp.sqrt(self.time_to_maturity) ], axis=0, ) ) call_delta = self.bls_erf_value(d1) put_delta = call_delta - 1 return call_delta, put_delta
def scale_manual(self, event, val=None): a = P4Rm() if val is not None: P4Rm.ParamDict['DW_multiplication'] = val P4Rm.ParamDict['dwp'] = multiply(a.ParamDict['dwp'], a.ParamDict['DW_multiplication']) pub.sendMessage(pubsub_Re_Read_field_paramters_panel, event=event)
def run(self, nmrData): print("LB: {} Hz".format(self.lineBroadening)) length = len(nmrData.allFid[-1][0]) nmrData.allFid.append( sp.multiply(nmrData.allFid[-1][:], sp.exp(-nmrData.fidTimeForLB[:length] * self.lineBroadening * np.pi)))
def spectral_radius(net, typed=True, weighted=True): ''' Spectral radius of the graph, defined as the eigenvalue of greatest module. Parameters ---------- net : :class:`~nngt.Graph` or subclass Network to analyze. typed : bool, optional (default: True) Whether the excitatory/inhibitory type of the connnections should be considered. weighted : bool, optional (default: True) Whether the weights should be taken into account. Returns ------- the spectral radius as a float. ''' weights = None if typed and "type" in net.graph.eproperties.keys(): weights = net.eproperties["type"].copy() if weighted and "weight" in net.graph.eproperties.keys(): if weights is not None: weights = sp.multiply(weights, net.graph.eproperties["weight"]) else: weights = net.graph.eproperties["weight"].copy() mat_adj = adjacency(net.graph,weights) eigenval = [0] try: eigenval = spl.eigs(mat_adj,return_eigenvectors=False) except spl.eigen.arpack.ArpackNoConvergence,err: eigenval = err.eigenvalues
def calc_E( self ): load=self.loadDat() state_e=load[0] state_h=load[1] E_e=load[2] E_h=load[3] diff_E = [] diff_st = [] #we are considering e-h transitions only for i1 in range(len(E_e)): for i2 in range(len(E_h)): #diff=item2-item1 diff=abs( E_h[i2]-E_e[i1] ) #if diff > 0 and diff > abs(E[i1]) and diff > abs(E[i2]): a=Set([diff]) b=Set(diff_E) if not a.intersection(b): diff_E.append(diff) diff_st.append([state_e[i1],state_h[i2]]) diff_E=sp.multiply(diff_E,1000.0*na.ones(len(diff_E))) #print len(diff_E),len(diff_st) #print diff_E,diff_st #diff_E[0]=sp.true_divide(1240.0*na.ones(len(diff_E[0])),diff_E) out_E=list([diff_E,diff_st]) out_E_T=zip(*out_E) # print 'out_E_T', out_E_T out_E_sort=sorted(out_E_T, key=itemgetter(0)) #out_E.sort() print 'sorted', out_E_sort#, diff_st[0] #print '\n\n\n' print 'out sorted', out_E_sort[0][0] #print '\n\n\n' return out_E_sort
def solve_v(self, s8y): fl = s8y.flatten() self._v = fl / fl.sum() v = copy.copy(self._e) step = 0 def write_current_matrix(): f = open('%s/%s_%03d.v' % (tempfile.gettempdir(), self._debug_matrix_file, step), 'w') x = v.reshape(len(self._p1.modules), len(self._p2.modules)) for i in xrange(len(self._p1.modules)): for j in xrange(len(self._p2.modules)): f.write('%f ' % x[i,j]) f.write('\n') f.close() while 1: if self._debug: write_current_matrix() new = self.step(v) r = (v-new) r = scipy.multiply(r,r) s = r.sum() if s < 0.0000001 and step >= 10: return v step += 1 v = new
def calc_E( self ): filename = os.path.join(self.prefix, 'wf_spectrum_dot_kp8.dat') if not os.path.isfile(filename): print 'ERROR: file %s not found\n' % (filename) sys.exit(1) #E=na.genfromtxt(filename,unpack=True)[1][1:] E=na.loadtxt(filename,skiprows=1,unpack=True)[1] #state=na.genfromtxt(filename,unpack=True)[0][1:] state=na.loadtxt(filename,skiprows=1,unpack=True)[0] diff_E = [] diff_st = [] for i1 in range(len(E)): for i2 in range(len(E)): #diff=item2-item1 diff=abs( E[i2]-E[i1] ) #if diff > 0 and diff > abs(E[i1]) and diff > abs(E[i2]): a=Set([diff]) b=Set(diff_E) if not a.intersection(b): diff_E.append(diff) diff_st.append([state[i1],state[i2]]) diff_E=sp.multiply(diff_E, 1.0*na.ones(len(diff_E))) #print len(diff_E),len(diff_st) #print diff_E,diff_st #diff_E[0]=sp.true_divide(1240.0*na.ones(len(diff_E[0])),diff_E) out_E=list([diff_E,diff_st]) out_E_T=zip(*out_E) out_E_sort=sorted(out_E_T, key=itemgetter(0)) #out_E.sort() #print out_E_sort#, diff_st[0] #print diff_E return out_E_sort
def solve_v(self, s8y): fl = s8y.flatten() self._v = fl / fl.sum() v = copy.copy(self._e) step = 0 def write_current_matrix(): f = open( '%s/%s_%03d.v' % (tempfile.gettempdir(), self._debug_matrix_file, step), 'w') x = v.reshape(len(self._p1.modules), len(self._p2.modules)) for i in xrange(len(self._p1.modules)): for j in xrange(len(self._p2.modules)): f.write('%f ' % x[i, j]) f.write('\n') f.close() while 1: if self._debug: write_current_matrix() new = self.step(v) r = (v - new) r = scipy.multiply(r, r) s = r.sum() if s < 0.0000001 and step >= 10: return v step += 1 v = new
def costFunctionReg(flattendTheta, X, y, lmbda): """ Calculate the cost and gradient for logistic regression using regularization (helps with preventing overfitting with many features) """ # numpy fmin function only allows flattened arrays instead of # matrixes which is stupid so it has to be converted every time flattendTheta = sp.asmatrix(flattendTheta) (a, b) = flattendTheta.shape if a < b: theta = flattendTheta.T else: theta = flattendTheta m = sp.shape(y)[0] (J, grad) = costFunction(theta, X, y) # f is a filter vector that will disregard regularization for theta0 f = sp.ones((theta.shape[0], 1)) f[0, 0] = 0 thetaFiltered = sp.multiply(theta, f) J = J + (lmbda/(2.0 * m)) * (thetaFiltered.T.dot(thetaFiltered)) grad = grad + ((lmbda/m) * thetaFiltered).T return (J, grad)
def lineBroadening(self, fromPos, toPos, LB): """Applies line broadening of given widh (in Hz) to the FID. Resulting spectrum (after fft is called) will be convolution of the original spectrum (fromPos) and Lorentzian with full-width-at-half-maximum equal to LB""" self.checkToPos(toPos) #print("Len Fid Time: {}".format(len(self.fidTime))) #print("Len All Fid 0: {}".format(len(self.allFid[fromPos][0]))) self.allFid[toPos] = sp.multiply(self.allFid[fromPos][:], sp.exp(-self.fidTime[:len(self.allFid[fromPos][0])]*LB*np.pi))
def scale_manual(self, event, val=None): a = P4Rm() if val is not None: P4Rm.ParamDict['strain_multiplication'] = val temp_1 = a.ParamDict['sp'] temp_2 = a.ParamDict['strain_multiplication'] P4Rm.ParamDict['sp'] = multiply(temp_1, temp_2) pub.sendMessage(pubsub_Re_Read_field_paramters_panel, event=event)
def _updateSigmas(self, updateSize, lastSample): for c in range(self.numberOfCenters): self.sigmas[c] *= (1. - updateSize[c]) dif = self.mus[c] - lastSample if self.diagonalOnly: self.sigmas[c] += updateSize[c] * multiply(dif, dif) else: self.sigmas[c] += updateSize[c] * 1.2 * outer(dif, dif)
def _logDerivFactorSigma(self, sample, x, invSigma, factorSigma): logDerivSigma = 0.5 * dot(dot(invSigma, outer(sample - x, sample - x)), invSigma) - 0.5 * invSigma if self.vanillaScale: logDerivSigma = multiply( outer(diag(abs(self.factorSigma)), diag(abs(self.factorSigma))), logDerivSigma) return triu2flat(dot(factorSigma, (logDerivSigma + logDerivSigma.T)))
def rbf(inputs, centroids, weights): if len(inputs) > 0: icw = np.array([[inputs[i], centroids[i], weights[i]] for i in inputs.keys()]) sw = np.absolute(np.subtract(icw[:, 0], icw[:, 1])) return np.exp(-10 * np.multiply(sw, icw[:, 2]).sum()) # /len(inputs)) else: return 0
def affine(x, t): X = scipy.vstack((x.T, scipy.ones(x.shape[0]))) T = scipy.array([[1.0, 0.0, t[0]], [0.0, 1.0, t[1]], [1.0, 1.0, 1.0]]) Rx = scipy.array([[scipy.cos(t[2]), -scipy.sin(t[2])], [scipy.sin(t[2]), scipy.cos(t[2])]]) T[:2, :2] = Rx temp = scipy.dot(T, X)[:2, :].T return scipy.multiply(temp, t[3])
def __kullback_leibler(h1, h2): # 36.3 us """ The actual KL implementation. @see kullback_leibler() for details. Expects the histograms to be of type scipy.ndarray. """ result = h1.astype(scipy.float_) mask = h1 != 0 result[mask] = scipy.multiply(h1[mask], scipy.log(h1[mask] / h2[mask])) return scipy.sum(result)
def __minowski_low_positive_integer_p(h1, h2, p = 2): # 11..43 us for p = 1..24 \w 100 bins """ A faster implementation of the Minowski distance for positive integer < 25. @note do not use this function directly, but the general @link minowski() method. @note the passed histograms must be scipy arrays. """ mult = scipy.absolute(h1 - h2) dif = mult for _ in range(p - 1): dif = scipy.multiply(dif, mult) return math.pow(scipy.sum(dif), 1./p)
def diffmat(x): n= sp.size(x) e= sp.ones((n,1)) Xdiff= sp.outer(x,e)-sp.outer(e,x)+sp.identity(n) xprod= -reduce(mul, Xdiff) W= sp.outer(1/xprod,e) D= W/sp.multiply(W.T,Xdiff) d= 1-sum(D) for k in range(0,n): D[k,k] = d[k] return -D.T
def diffmat(x): # x is an ordered array of grid points n = sp.size(x) e = sp.ones((n,1)) Xdiff = sp.outer(x,e)-sp.outer(e,x)+sp.identity(n) xprod = -reduce(mul,Xdiff) # product of rows W = sp.outer(1/xprod,e) D = W/sp.multiply(W.T,Xdiff) d = 1-sum(D) for k in range(0,n): # Set diagonal elements D[k,k] = d[k] return -D.T
def kValues(self,tn,yn,h): #Initialise an empty vector k of the same length as b and init tnew A = self.A b = self.b c = self.c k = [ [0. * i *j for j in range(len(yn))] for i in range(len(b))] tnew = 0 ynew = scipy.zeros(len(yn)) lincombinatie = ynew for i in range(len(b)): tnew = tn + c[i]*h ynew = scipy.zeros(len(yn)) lincombinatie = scipy.zeros(len(yn)) for j in range(i): prod = scipy.multiply(A[i][j]*h,k[j]) lincombinatie = scipy.add(lincombinatie,prod) ynew = scipy.add(yn,lincombinatie) k[i] = scipy.multiply(1,self.ode.f(tnew,ynew)) #k[i] = scipy.multiply(h,self.ode.f(tnew,ynew)) return k
def mmul_diag(Adiag, B, act_right=True, out=None): if act_right: assert B.shape[0] == Adiag.shape[0] else: assert B.shape[1] == Adiag.shape[0] assert Adiag.ndim == 1 assert B.ndim == 2 if act_right: if out is None: out = sp.empty((Adiag.shape[0], B.shape[1]), dtype=sp.promote_types(Adiag.dtype, B.dtype)) out = out.T sp.multiply(Adiag, B.T, out) out = out.T else: if out is None: out = sp.empty((B.shape[0], Adiag.shape[0]), dtype=sp.promote_types(Adiag.dtype, B.dtype)) sp.multiply(Adiag, B, out) return out
def predict(self, time = None): """ This function take the (list of) date and return prediction in a timeseriesframe :param time: the specific date of the weight :type time: datetime.date :return: TimeSeriesFrame of estimate :rtype: TimeSeriesFram<double> """ pre = TimeSeriesFrame( scipy.multiply(self.X ,self.est.data).sum(axis = 1), self.respond.rheader, self.respond.cheader) if time is None: return pre elif isinstance(time, date): return pre[time] else: raise TypeError("time is not in datetime.date format")
def orthogonal(n): """Generate a random orthogonal 'd' dimensional matrix, using the the technique described in: Francesco Mezzadri, "How to generate random matrices from the classical compact groups" """ n = int( n ) z = sc.randn(n, n) q,r = sc.linalg.qr(z) d = sc.diagonal(r) ph = d/sc.absolute(d) q = sc.multiply(q, ph, q) return q
def scroll_callback(self, event): if not event.inaxes: return a = P4Rm() if event.key == 'u' and event.button == 'up': temp = a.ParamDict['DW_multiplication'] + 0.01 P4Rm.ParamDict['DW_multiplication'] = temp elif event.key == 'u' and event.button == 'down': temp = a.ParamDict['DW_multiplication'] - 0.01 P4Rm.ParamDict['DW_multiplication'] = temp P4Rm.ParamDict['dwp'] = multiply(a.ParamDictbackup['dwp'], a.ParamDict['DW_multiplication']) pub.sendMessage(pubsub_Re_Read_field_paramters_panel, event=event)