def cluster(self, tokens, assign_clusters=False, trace=False): assert chktype(1, tokens, [Token]) assert chktype(2, assign_clusters, bool) assert chktype(3, trace, bool) assert len(tokens) > 0 vectors = map(lambda tk: tk['FEATURES'], tokens) # normalise the vectors if self._should_normalise: vectors = map(self._normalise, vectors) # use SVD to reduce the dimensionality if self._svd_dimensions and self._svd_dimensions < len(vectors[0]): [u, d, vt] = numarray.linear_algebra.singular_value_decomposition( numarray.transpose(numarray.array(vectors))) S = d[:self._svd_dimensions] * \ numarray.identity(self._svd_dimensions, numarray.Float64) T = u[:,:self._svd_dimensions] Dt = vt[:self._svd_dimensions,:] vectors = numarray.transpose(numarray.matrixmultiply(S, Dt)) self._Tt = numarray.transpose(T) # call abstract method to cluster the vectors self.cluster_vectorspace(vectors, trace) # assign the tokens to clusters if assign_clusters: for token in tokens: self.classify(token)
def regress(X): """ Apply a linear regression on X. X is an numarray array of form [Y|X]. We split this up, do the regression and return the regression coefficients, beta_i. """ X = N.array(X) Y = X[:,0].copy() X[:,0] = 1.0 a = N.dot(NL.inverse(N.dot(N.transpose(X), X)), N.transpose(X)) return N.dot(a, Y)
def stdForm(a, b): def invert(L): # Inverts lower triangular matrix L n = len(L) for j in range(n - 1): L[j, j] = 1.0 / L[j, j] for i in range(j + 1, n): L[i, j] = -dot(L[i, j:i], L[j:i, j]) / L[i, i] L[n - 1, n - 1] = 1.0 / L[n - 1, n - 1] n = len(a) L = choleski(b) invert(L) h = matrixmultiply(b, matrixmultiply(a, transpose(L))) return h, transpose(L)
def __init__(self, parent, framesize = None, hopsize = 100, number_of_vectors_used = 15): if not framesize: lp = len(parent) #logfs = min(int(scipy.log2(lp / 32)), 10) log2 = lambda x: math.log(x) / math.log(2) logfs = min(int(log2(lp / 32)), 10) framesize = pow(2, logfs) print framesize if hopsize > framesize / 4: hopsize = framesize / 4 self.hopsize = hopsize self.framesize = framesize #if len(parent) < framesize: # self._delete_from_parents() # raise UnderflowError, "Thingy is too small...\nHere I pull the plug in order to avoid a segfaulty thingy." input_array = parent self.feature_vectors = None interesting_parts = input_array[:, :number_of_vectors_used] interesting_parts = numarray.transpose(interesting_parts) for row in interesting_parts: specspectrum = calculate_spectrogram(row, framesize = framesize, hopsize = hopsize) import cPickle z, lambdas, EOFs = svdeofs.svdeofs(specspectrum) print ".", s = z[:, :15] svd_fft_fft_vectors = numarray.transpose(s) #print "svd_eofs ok!" if self.feature_vectors is None: self.feature_vectors = numarray.transpose(svd_fft_fft_vectors) else: self.feature_vectors = numarray.concatenate((self.feature_vectors, numarray.transpose(svd_fft_fft_vectors)), 1) #print "svd_fft_fft_vectors shapes:", shape(svd_fft_fft_vectors), shape(self.feature_vectors) z, lambdas, EOFs = svdeofs.svdeofs(self.feature_vectors) self.feature_vectors = z[:, :15] self.arr = self.feature_vectors
def get_residual(pcanode, pts): pvals = pcanode.execute(pts, pcanode.get_output_dim()) pproj = mat(pvals) * mat(transpose(pcanode.v)) res = pts - array(pproj) # orthogonal complement of projection onto PCs resval_part = sqrt(sum(res**2)) resval = sqrt(sum(resval_part**2)) # ?! return resval
def get_residual(pcanode, pts): pvals = pcanode.execute(pts, pcanode.get_output_dim()) pproj = mat(pvals)*mat(transpose(pcanode.v)) res = pts-array(pproj) # orthogonal complement of projection onto PCs resval_part = sqrt(sum(res**2)) resval = sqrt(sum(resval_part**2)) # ?! return resval
def normalize(self, dim=-1): """ If dim=-1 all elements sum to 1. Otherwise sum to specific dimension, such that sum(Pr(x=i|Pa(x))) = 1 for all values of i and a specific set of values for Pa(x) """ if dim == -1 or len(self.cpt.shape) == 1: self.cpt /= self.cpt.sum() else: ndim = self.assocdim[dim] order = range(len(self.names_list)) order[0] = ndim order[ndim] = 0 tcpt = na.transpose(self.cpt, order) t1cpt = na.sum(tcpt, axis=0) t1cpt = na.resize(t1cpt,tcpt.shape) tcpt = tcpt/t1cpt self.cpt = na.transpose(tcpt, order)
def _castCopyAndTranspose(type, *numarray, **kargs): # This should work as a drop in replacement for LinearAlgegra2._castCopyAndTranspose indices = kargs.get("indices") cast_numarray = [] for a in numarray: if indices is None: a = na.transpose(a) else: a = na.transpose(a, indices) if a.type() == type: cast_numarray.append(a.copy()) else: cast_numarray.append(a.astype(type)) if len(cast_numarray) == 1: return cast_numarray[0] else: return cast_numarray
def testEq(self): a = Table(['a','b'],[2,3],range(6),'Float32') b = Table(['a','b'],[2,3],range(6),'Float32') c = Table(['a'],[6],range(6),'Float32') d = na.arange(6,shape=(2,3)) e = Table(['b','a'],[3,2], na.transpose(a.cpt)) assert(a == b and \ not a == c and \ a == d and \ a == e and e == a), \ "__eq__ does not work"
def testUnion(self): """ test Union between two Tables """ a = Table(['a','b','c','d'],[2,3,4,5],range(2*3*4*5)) b = Table(['c','b','e'],[4,3,6],range(12*6)) ab,bb = a.union(b) assert(ab.names_list == ['a','b','c','d','e'] and \ ab.shape == tuple([2,3,4,5,1]) and \ na.all(bb == na.transpose(b.cpt[...,na.NewAxis,na.NewAxis],axes=[3,1,0,4,2]))), \ """ union does not work ..."""
def Update(self, other): """ updates this Table with the values contained in the other""" # check that all variables in self are contained in other if self.names != other.names: return "error in update, all variables in other should be contained in self" # find the correspondance vector correspond = [] for vara in self.names_list: correspond.append(other.assocdim[vara]) self.cpt = copy(na.transpose(other.cpt, axes=correspond))
def notes_roc (la, lb, eps): from numarray import transpose, add, resize """ creates a matrix of size len(la)*len(lb) then look for hit and miss in it within eps tolerance windows """ gdn,fpw,fpg,fpa,fdo,fdp = 0,0,0,0,0,0 m = len(la) n = len(lb) x = resize(la[:][0],(n,m)) y = transpose(resize(lb[:][0],(m,n))) teps = (abs(x-y) <= eps[0]) x = resize(la[:][1],(n,m)) y = transpose(resize(lb[:][1],(m,n))) tpitc = (abs(x-y) <= eps[1]) res = teps * tpitc res = add.reduce(res,axis=0) for i in range(len(res)) : if res[i] > 1: gdn+=1 fdo+=res[i]-1 elif res [i] == 1: gdn+=1 fpa = n - gdn - fpa return gdn,fpw,fpg,fpa,fdo,fdp
def notes_roc(la, lb, eps): from numarray import transpose, add, resize """ creates a matrix of size len(la)*len(lb) then look for hit and miss in it within eps tolerance windows """ gdn, fpw, fpg, fpa, fdo, fdp = 0, 0, 0, 0, 0, 0 m = len(la) n = len(lb) x = resize(la[:][0], (n, m)) y = transpose(resize(lb[:][0], (m, n))) teps = (abs(x - y) <= eps[0]) x = resize(la[:][1], (n, m)) y = transpose(resize(lb[:][1], (m, n))) tpitc = (abs(x - y) <= eps[1]) res = teps * tpitc res = add.reduce(res, axis=0) for i in range(len(res)): if res[i] > 1: gdn += 1 fdo += res[i] - 1 elif res[i] == 1: gdn += 1 fpa = n - gdn - fpa return gdn, fpw, fpg, fpa, fdo, fdp
def union(a,b): """ Returns a new instance of same class as a that contains all data contained in a but also has any new variables found in b with unary dimensions. Also returns a view of b.cpt ready for an operation with the returned instance. eg. a= Pr(A,B,C,D,E) b= Pr(C,G,A,F) a.union(b) --> returns (Pr(A,B,C,D,E,1,1),numarray([A,1,C,1,1,G,F])) Notes: - a and b remain unchanged - a and b must be Table instances (or something equivalent) - a always keeps the same order of its existing variables - any new variables found in b are added at the end of a in the order they appear in b. - new dimensions are added with numarray.NewAxis - the two numarrays objects returns have exactly the same dimensions and are ready for any kind of operation, *,/,... """ # make a copy of a new = copy(a) for varb in b.names_list: # varb is the name of a variable in b if not new.assocdim.has_key(varb): new.addDim(varb) # add new variable to new # new now contains all the variables contained in a and b # new = A U B correspond = [] b_assocdim = copy(b.assocdim) bcpt = b.cpt.view() for var in new.names_list: # var is the name of a variable in new if not b.assocdim.has_key(var): bcpt = bcpt[...,na.NewAxis] b_assocdim[var] = bcpt.rank - 1 correspond.append(b_assocdim[var]) # transpose dimensions in b to match those in a btr = na.transpose(bcpt, axes = correspond) # btr is now ready for any operation with new return new, btr
def fig3b(t, m, w, p): """ Plot image of age,metallicity weights -- nearest neighbor interpolation""" NREP = 50 xi = pylab.linspace(0.1, 15, 512) yi = pylab.linspace(-2.5, 0.9, 512) z = numarray.zeros((len(xi), len(yi)), numarray.Float) # create t, metallicity array y = numarray.repeat(m, NREP) dt = t[1] - t[0] x = numarray.arange(t[0], t[-1] + 2 * dt, dt / NREP) x = x[0:len(y)] x = 10.**x / 1.e9 weight = numarray.repeat(w, NREP) # Find the indices in the array xindex = numarray.searchsorted(xi, x) print "shape(x), shape(y), shape(weight)", numarray.shape( x), numarray.shape(y), numarray.shape(weight) if p.sigma > 0: if p.dsigmadlogt == 0.: for i in range(len(y)): nstars = weight[i] * normgauss(yi, y[i], p.sigma) j = xindex[i] z[j, :] += nstars if p.dsigmadlogt != 0.: for i in range(len(y)): logt0 = numarray.log10(x[0]) logt = numarray.log10(x[i]) sigma = p.sigma + p.dsigmadlogt * (logt - logt0) nstars = weight[i] * normgauss(yi, y[i], sigma) j = xindex[i] z[j, :] += nstars else: sigma = 0.01 for i in range(len(y)): nstars = weight[i] * normgauss(yi, y[i], sigma) j = xindex[i] z[j, :] += nstars # yindex = numarray.searchsorted(yi,y) # z[xindex,yindex] = z[xindex,yindex] + weight # increment the 2-d array zz = numarray.transpose(z) pylab.imshow(zz, extent=[0.1, 15, -2.5, 0.9], aspect='auto') pylab.xlabel("Age (Gyr)") pylab.ylabel("[Fe/H]") return xi, yi, zz
def plot_PCA_residuals(data, D=None, newfig=True, marker='o'): if D is None: D = shape(data)[1] p = doPCA(data, D, D) spec = zeros((D, 1), Float) for i in range(1, D): spec[i] = norm(p.d[:i]) res = transpose(sqrt(spec[-1]**2 - spec**2) / spec[-1])[0] print "2-norm of PCA spectrum =", spec[-1] if newfig: figure() style = 'k' + marker else: style = marker semilogy(res, style) ## title('PCA residuals') xlabel(r'$\rm{Dimension}$', fontsize=20) ylabel(r'$\rm{PCA \ residual}$', fontsize=20) return p, res
def testMul(self): """ test multiplication """ a = Table(['a','b','c','d'],[2,3,4,5],range(2*3*4*5)) b = Table(['c','b','e'],[4,3,6],range(12*6)) c = Table(['a','b','c','d','e'],[2,3,4,5,6],range(2*3*4*5*6)) acpt = a.cpt[...,na.NewAxis] bcpt = b.cpt[...,na.NewAxis,na.NewAxis] bcpt = na.transpose(bcpt,[3,1,0,4,2]) resab = acpt*bcpt ab = a*b cc = c*c bb = b*b assert (ab == Table(['a','b','c','d','e'],[2,3,4,5,6],resab) and \ cc == Table(['a','b','c','d','e'],[2,3,4,5,6],na.arange(2*3*4*5*6)**2) and \ bb == Table(['c','b','e'],[4,3,6],na.arange(12*6)**2)), \ " Multiplication does not work"
def fig3e(t,m,w,p): """ Plot image of log(age),metallicity weights -- nearest neighbor interpolation""" xi = pylab.linspace(8,10.25,512) yi = pylab.linspace(-2.5,0.9,512) z = numarray.zeros((len(xi),len(yi)),numarray.Float) # create t, metallicity array x = t y = m # Find the indices in the array xindex = numarray.searchsorted(xi,x) if p.sigma > 0: for i in range(len(y)): nstars = w[i]*normgauss(yi,y[i],p.sigma) print "shape(z),len(nstars)", numarray.shape(z), len(nstars) z[xindex[i],:] += nstars else: yindex = numarray.searchsorted(yi,y) z[xindex,yindex] = z[xindex,yindex] + weight # increment the 2-d array zz = numarray.transpose(z) pylab.imshow(zz,extent=[8,10.25,-2.5,0.9],aspect='auto')
def plot_PCA_residuals(data, D=None, newfig=True, marker='o'): if D is None: D = shape(data)[1] p=doPCA(data, D, D) spec=zeros((D,1),Float) for i in range(1,D): spec[i] = norm(p.d[:i]) res = transpose(sqrt(spec[-1]**2-spec**2)/spec[-1])[0] print "2-norm of PCA spectrum =", spec[-1] if newfig: figure() style='k'+marker else: style=marker semilogy(res,style) ## title('PCA residuals') xlabel(r'$\rm{Dimension}$',fontsize=20) ylabel(r'$\rm{PCA \ residual}$',fontsize=20) return p, res
def fig3b(t,m,w,p): """ Plot image of age,metallicity weights -- nearest neighbor interpolation""" NREP = 50 xi = pylab.linspace(0.1,15,512) yi = pylab.linspace(-2.5,0.9,512) z = numarray.zeros((len(xi),len(yi)),numarray.Float) # create t, metallicity array y = numarray.repeat(m,NREP) dt = t[1]-t[0] x = numarray.arange(t[0],t[-1]+2*dt,dt/NREP) x = x[0:len(y)] x = 10.**x/1.e9 weight = numarray.repeat(w,NREP) # Find the indices in the array xindex = numarray.searchsorted(xi,x) print "shape(x), shape(y), shape(weight)", numarray.shape(x),numarray.shape(y), numarray.shape(weight) if p.sigma > 0: if p.dsigmadlogt == 0.: for i in range(len(y)): nstars = weight[i]*normgauss(yi,y[i],p.sigma) j = xindex[i] z[j,:] += nstars if p.dsigmadlogt != 0.: for i in range(len(y)): logt0 = numarray.log10(x[0]) logt = numarray.log10(x[i]) sigma = p.sigma + p.dsigmadlogt*(logt-logt0) nstars = weight[i]*normgauss(yi,y[i],sigma) j = xindex[i] z[j,:] += nstars else: sigma = 0.01 for i in range(len(y)): nstars = weight[i]*normgauss(yi,y[i],sigma) j = xindex[i] z[j,:] += nstars # yindex = numarray.searchsorted(yi,y) # z[xindex,yindex] = z[xindex,yindex] + weight # increment the 2-d array zz = numarray.transpose(z) pylab.imshow(zz,extent=[0.1,15,-2.5,0.9],aspect='auto') pylab.xlabel("Age (Gyr)") pylab.ylabel("[Fe/H]") return xi,yi,zz
def testPrepareOther(self): c = Table(['e','b'],[2,3],range(6)) d = Table(['a','b','c','d','e'],[2,3,2,2,2],range(3*2**4)) e = Table(['e','b','f'],[2,3,4],range(6*4)) src = Table(['s','r','c'],[2,3,4],range(24)) cr = Table(['c','r'],[4,3],range(12)) dc = d.prepareOther(c) try: d.prepareOther(e) assert(0),""" this should produce an error...""" except: pass cr_ = src.prepareOther(cr) assert(dc.shape == tuple([1,3,1,1,2]) and \ na.all(dc[0,:,0,0,:] == na.transpose(c.cpt, axes=[1,0])) and \ cr_.shape == (1,3,4)), \ """ problem with prepareOther"""
def prepareOther(self, other): """ Prepares other for inplace multiplication/division with self. Returns a *view* of other.cpt ready for an operation. other must contain a subset of the variables of self. NON-DESTRUCTIVE! eg. a= Pr(A,B,C,D) b= Pr(D,B) a.prepareOther(b) --> returns a numarray Pr(1,B,1,D) a= Pr(A,B,C,D) b= Pr(C,B,E) a.prepareOther(b) --> ERROR (E not in {A,B,C,D}) Notes: - a and b are not altered in any way. NON-DESTRUCTIVE - b must contain a subset of a's variables a=Pr(X),b=Pr(Y); Y entirely included in X """ #self contains all variables found in other if len(other.names - self.names) > 0: raise "ERROR :"+str((other.names-self.names))+"not in"+str(self.names) # add new dimensions to b bcpt = other.cpt.view() b_assocdim = copy(other.assocdim) for var in (self.names-other.names): #for all variables found in self and not in other #add a new dimension to other bcpt = bcpt[...,na.NewAxis] b_assocdim[var] = bcpt.rank - 1 #create the transposition vector trans = list() for var in self.names_list: trans.append(b_assocdim[var]) bcpt_trans = na.transpose(bcpt, axes=trans) # transpose and return bcpt return bcpt_trans
def fig3e(t, m, w, p): """ Plot image of log(age),metallicity weights -- nearest neighbor interpolation""" xi = pylab.linspace(8, 10.25, 512) yi = pylab.linspace(-2.5, 0.9, 512) z = numarray.zeros((len(xi), len(yi)), numarray.Float) # create t, metallicity array x = t y = m # Find the indices in the array xindex = numarray.searchsorted(xi, x) if p.sigma > 0: for i in range(len(y)): nstars = w[i] * normgauss(yi, y[i], p.sigma) print "shape(z),len(nstars)", numarray.shape(z), len(nstars) z[xindex[i], :] += nstars else: yindex = numarray.searchsorted(yi, y) z[xindex, yindex] = z[xindex, yindex] + weight # increment the 2-d array zz = numarray.transpose(z) pylab.imshow(zz, extent=[8, 10.25, -2.5, 0.9], aspect='auto')
def solve_linear_equations(a, b): """solve_linear_equations(a, b) -> x such that dot(a,x) = b *a* may be either rank-2 or rank-3, in either case, it must be square along the last two axes *b* may either have a rank one lower than *a*, in which case it represents a vector or an array of vectors, or it may be the same rank as *a* in which case it represents a matrix or an array of matrices. Since that may be a bit confusing let's look at some examples. First the simplest case, a square matrix A, and a vector of results B. >>> A = [[1,2,3], [3,5,5], [5,6,7]] >>> B = [1,1,1] >>> x = solve_linear_equations(A, B) >>> _isClose(x, num.array([-0.5, 0. , 0.5])) 1 >>> _isClose(na.dot(A, x), # This should give us B ... num.array([ 1., 1., 1.])) 1 The next simplest case is a square matrix A and a matrix B. >>> B = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] >>> _isClose(solve_linear_equations(A, B), ... num.array([[-0.625, -0.5 , 0.625], ... [-0.5 , 1. , -0.5 ], ... [ 0.875, -0.5 , 0.125]])) 1 If *a* is rank-3, then the first dimension of *a* **and** *b* is interpreted as selecting different submatrices or subvectors to operate on. In this case, *b* will be rank-2 in the vector case and rank-3 in the matrix case. Here is what is looks like in the vector case. >>> A = [[[1, 3], [2j, 3j]], ... [[2, 4], [4j, 4j]], ... [[3, 5], [6j, 5j]]] >>> B = [[1, 0], [0, 1], [1, 1]] >>> _isClose(solve_linear_equations(A, B), ... num.array([[-1. +0.j , 0.66666667+0.j ], ... [ 0. -0.5j , 0. +0.25j ], ... [-0.33333333-0.33333333j, 0.4 +0.2j ]])) 1 The first dimensions of *a* and *b* must either match or one of them must be 1. In the latter case, the length-1 dimension is broadcast in the normal way. >>> B = [[1, 0], [0, 1]] >>> solve_linear_equations(A, B) Traceback (most recent call last): ... LinearAlgebraError: first dimensions of a and b must match or be 1 >>> B = [[1, 0]] >>> _isClose(solve_linear_equations(A, B), ... num.array([[-1. +0.j, 0.66666667+0.j], ... [-0.5 +0.j, 0.5 +0.j], ... [-0.33333333+0.j, 0.4 +0.j]])) 1 """ a = na.asarray(a) b = na.asarray(b) _assertRank((2,3), a) _assertSubmatrixSquareness(a) rank_a = len(a.shape) _assertRank((rank_a-1, rank_a), b) stretched = (rank_a == 2) if stretched: a = a[na.NewAxis,] b = b[na.NewAxis,] one_eq = (len(b.shape) == len(a.shape)-1) if one_eq: b = b[:,:,na.NewAxis] broadcast_a = (a.shape[0] == 1) broadcast_b = (b.shape[0] == 1) if not (broadcast_a or broadcast_b or a.shape[0] == b.shape[0]): raise LinAlgError, "first dimensions of a and b must match or be 1" # n_cases = max(a.shape[0], b.shape[0]) n_eq = a.shape[1] n_rhs = b.shape[2] if n_eq != b.shape[1]: raise LinAlgError, 'Incompatible dimensions' t = _commonType(a, b) if _array_kind[t] == 1: # Complex routines take different arguments lapack_routine = lapack_lite2.zgesv else: lapack_routine = lapack_lite2.dgesv a, b = _castCopyAndTranspose(t, a, b, indices=(0,2,1)) result = na.zeros([n_cases, n_rhs, n_eq], b.type()) result[:] = b b = result pivots = na.zeros(n_eq, 'l') a_stride = n_eq * n_eq * a.itemsize() b_stride = n_eq * n_rhs * b.itemsize() a_view = a[0] b_view = b[0] a_i = a_view.copy() b_i = b_view.copy() for i in range(n_cases): if i: if not broadcast_a: a_i._copyFrom(a_view) b_i._copyFrom(b_view) outcome = lapack_routine(n_eq, n_rhs, a_i, n_eq, pivots, b_i, n_eq, 0) if outcome['info'] > 0: raise LinAlgError, 'Singular matrix' b_view._copyFrom(b_i) a_view._byteoffset += a_stride b_view._byteoffset += b_stride b = na.transpose(b, (0,2,1)) if one_eq: b = b[...,0] if stretched: b = b[0] return b
for n in line.split(): U[row, col] = float(n) col += 1 row += 1 rows, columns = [int(a) for a in vfile.readline().split()] V = na.zeros((rows, columns), type="Float32") row = 0 for line in vfile: col = 0 for n in line.split(): V[row, col] = float(n) col += 1 row += 1 return U, S, V up = cPickle.Unpickler(file('artist_user.pickle', 'rb')) artists, users = up.load() #U is artists U, S, V = load_svd(file('big_s', 'r'), file('big_ut', 'r'), file('big_vt', 'r')) #I believe that U is already transposed tt = na.dot(na.transpose(U), na.dot(na.dot(S, S), U)) fout = file('lsi.out', 'wb') cp = cPickle.Pickler(fout, -1) cp.dump(tt) fout.close()
def compute(self, trajname, ics=None): assert self.funcspec.targetlang == 'python', \ ('Wrong target language for functional specification. ' 'Python needed for this class') assert isinstance(self.funcspec, RHSfuncSpec), ('Map system ' 'requires RHSfuncSpec type to proceed') if self.defined: self.validateSpec() ## assert remain(self.initialconditions.keys(), ## self._xdatadict.keys()+self.funcspec.auxvars) == [],\ ## ('mismatching entries between initial conditions and ' ## 'declared variable names') self.clearWarnings() self.clearErrors() if ics is not None: self.set(ics=ics) xnames = self._var_ixmap # ensures correct order # wrap up each dictionary initial value as a singleton list alltData = [self.indepvariable.depdomain.get(0)] allxDataDict = dict(zip(xnames, map(listid, sortedDictValues(self.initialconditions, self.funcspec.vars)))) rhsfn = eval("self."+self.funcspec.spec[1]) # Check i.c.'s are well defined (finite) self.checkInitialConditions() self.setEventICs(self.initialconditions, self.globalt0) ic = sortedDictValues(self.initialconditions, self.funcspec.vars) plist = sortedDictValues(self.pars) extralist = copy(plist) ilist = [] if self.inputs: # inputVarList is a list of Variables listend = self.numpars + len(self.inputs) inputVarList = sortedDictValues(self.inputs) try: for f in inputVarList: f.clearWarnings ilist.append(f(alltData[0], self.checklevel)) except AssertionError: print 'External input call has t out of range: t = ', \ self.indepvariable.depdomain.get(0) print 'Maybe checklevel is 3 and initial time is not', \ 'completely inside valid time interval' raise except ValueError: print 'External input call has value out of range: t = ', \ self.indepvariable.depdomain.get(0) for f in inputVarList: if len(f.warnings): print 'External input %s out of range:' % f.name print ' t = ', repr(f.warnings[-1][0]), ', ', \ f.name, ' = ', repr(f.warnings[-1][1]) raise else: listend = self.numpars inputVarList = [] extralist.extend(ilist) precevents = self.eventstruct.query(['precise']) if precevents != []: raise PyDSTool_ValueError('precise events are not valid for map systems') eventslist = self.eventstruct.query(['highlevel', 'active', 'notvarlinked']) termevents = self.eventstruct.query(['term'], eventslist) # initialize event info dictionaries Evtimes = {} Evpoints = {} for (evname, ev) in eventslist: Evtimes[evname] = [] Evpoints[evname] = [] if eventslist != []: self.eventstruct.resetHighLevelEvents(self.indepvariable.depdomain.get(0), eventslist) self.eventstruct.validateEvents(self.funcspec.vars + \ self.funcspec.auxvars + \ ['t'], eventslist) # per-iteration storage of variable data (initial values are irrelevant) xDataDict = {} # storage of all auxiliary variable data allaDataDict = {} anames = self.funcspec.auxvars avals = apply(eval("self."+self.funcspec.auxspec[1]), [self.indepvariable.depdomain.get(0), sortedDictValues(self.initialconditions, self.funcspec.vars), extralist]) for aix in range(len(anames)): aname = anames[aix] allaDataDict[aname] = [avals[aix]] # temp storage of first time at which terminal events found # (this is used for keeping the correct end point of new mesh) first_found_t = None tmesh = self.indepvariable.depdomain.uniformSample(self.tstep, strict=False, avoidendpoints=self.checklevel>2) # Main loop breakwhile = False success = False x = ic notdone = True # did i=0 for initial condition already i = 1 while notdone: t = tmesh[i] ## COMPUTE NEXT STATE y from x try: y = rhsfn(t, x, extralist) except: print "Error in calling right hand side function:" self.showSpec() raise for xi in xrange(self.dimension): xDataDict[xnames[xi]] = y[xi] if not self.contains(self.variables[xnames[xi]].depdomain, y[xi], self.checklevel): self.warnings.append((W_TERMSTATEBD, (t, xnames[xi], y[xi], self.variables[xnames[xi]].depdomain))) breakwhile = True break # for loop if breakwhile: notdone = False continue if eventslist != []: dataDict = copy(xDataDict) dataDict['t'] = t evsflagged = self.eventstruct.pollHighLevelEvents(None, dataDict, self.pars, eventslist) termevsflagged = filter(lambda e: e in evsflagged, termevents) nontermevsflagged = filter(lambda e: e not in termevsflagged, evsflagged) # register any non-terminating events in the warnings list if len(nontermevsflagged) > 0: evnames = [ev[0] for ev in nontermevsflagged] self.warnings.append((W_NONTERMEVENT, (t, evnames))) for evname in evnames: Evtimes[evname].append(t) Evpoints[evname].append(y) if termevsflagged != []: # active terminal event flagged at this time point # register the event in the warnings evnames = [ev[0] for ev in termevsflagged] self.warnings.append((W_TERMEVENT, \ (t, evnames))) for evname in evnames: Evtimes[evname].append(t) Evpoints[evname].append(y) notdone = False continue alltData.append(t) for xi in range(self.dimension): allxDataDict[xnames[xi]].append(y[xi]) avals = apply(eval("self."+self.funcspec.auxspec[1]), [t, sortedDictValues(xDataDict), extralist]) for aix in range(len(anames)): aname = anames[aix] allaDataDict[aname].append(avals[aix]) try: extralist[self.numpars:listend] = [apply(f, [t, self.checklevel]) \ for f in inputVarList] except ValueError: print 'External input call caused value out of range error:', \ 't = ', t for f in inputVarList: if len(f.warnings): print 'External input variable %s out of range:' % f.name print ' t = ', repr(f.warnings[-1][0]), ', ', \ f.name, ' = ', repr(f.warnings[-1][1]) raise except AssertionError: print 'External input call caused t out of range error: t = ', t raise if i >= len(tmesh) - 1: notdone = False else: i += 1 x = y # update success flag success = not notdone # Check that any terminal events found terminated the code correctly if first_found_t is not None: assert self.warnings[-1][0] == W_TERMEVENT, ("Event finding code " "for terminal event failed") # Package up computed trajectory in Variable variables # Add external inputs warnings to self.warnings, if any for f in inputVarList: for winfo in f.warnings: self.warnings.append((W_NONTERMSTATEBD, (winfo[0], f.name, winfo[1], f.depdomain))) # check for non-unique terminal event termcount = 0 for (w,i) in self.warnings: if w == W_TERMEVENT or w == W_TERMSTATEBD: termcount += 1 if termcount > 1: self.errors.append((E_NONUNIQUETERM, (alltData[-1], i[1]))) ## print 'Time interval adjusted according to %s: %s' % \ ## (self._warnmessages[w], str(i[0])+", "+ str(i[1])) # Create variables (self.variables contains no actual data) variables = copyVarDict(self.variables) # build event pointset information (reset previous trajectory's) self.trajevents = {} for (evname, ev) in eventslist: evpt = Evpoints[evname] if evpt == []: self.trajevents[evname] = None else: evpt = transpose(array(evpt)) self.trajevents[evname] = Pointset({'coordnames': xnames, 'indepvarname': 't', 'coordarray': evpt, 'indepvararray': Evtimes[evname], 'indepvartype': self.variables[xnames[0]].indepvartype}) for x in xnames: if len(alltData) > 1: variables[x] = Variable(Pointset({'coordnames': [x], 'coordarray': allxDataDict[x], 'coordtype': self.variables[x].coordtype, 'indepvarname': 't', 'indepvararray': alltData, 'indepvartype': self.variables[x].indepvartype}), 't', x, x) else: raise PyDSTool_ValueError, "Fewer than 2 data points computed" for a in anames: if len(alltData) > 1: variables[a] = Variable(Pointset({'coordnames': [a], 'coordarray': allaDataDict[a], 'coordtype': self.variables[a].coordtype, 'indepvarname': 't', 'indepvararray': alltData, 'indepvartype': self.variables[a].indepvartype}), 't', a, a) else: raise PyDSTool_ValueError, "Fewer than 2 data points computed" if success: self.validateSpec() self.defined = True return Trajectory(trajname, variables.values(), self.globalt0, self.checklevel) else: print 'Trajectory computation failed' self.errors.append((E_COMPUTFAIL, (t, self._errorcodes[errcode]))) self.defined = False
def compute(self, trajname, dirn='f'): continue_integ = ODEsystem.prepDirection(self, dirn) if self._dircode == -1: raise NotImplementedError, ('Backwards integration is not implemented') # validate spec if there exists a prior trajectory computation if self.defined: self.validateSpec() self.validateICs() self.clearWarnings() self.clearErrors() pnames = sortedDictKeys(self.pars) xnames = self._var_ixmap # ensures correct order # Check i.c.'s are well defined (finite) self.checkInitialConditions() if self._algparams['stiff']: methstr = 'bdf' methcode = 2 else: methstr = 'adams' methcode = 1 if self.haveJacobian(): haveJac = 1 else: haveJac = 0 if isinstance(self._algparams['atol'], list): if len(self._algparams['atol']) != self.dimension: raise ValueError, 'atol list must have same length as phase dimension' else: atol = self._algparams['atol'] self._algparams['atol'] = [atol for dimix in xrange(self.dimension)] indepdom0 = self.indepvariable.depdomain.get(0) indepdom1 = self.indepvariable.depdomain.get(1) if continue_integ: if self._tdata[0] != self._solver.t: print "Previous end time is %f"%self._solver.t raise ValueError, \ "Start time not correctly updated for continuing orbit" x0 = self._solver.y indepdom0 = self._solver.t else: x0 = sortedDictValues(self.initialconditions, self.funcspec.vars) if self._solver._integrator is None: # Banded Jacobians not yet supported # # start a new integrator, because method may have been # switched self._solver.set_integrator('vode', method=methstr, rtol=self._algparams['rtol'], atol=self._algparams['atol'], nsteps=self._algparams['max_pts'], max_step=self._algparams['max_step'], min_step=self._algparams['min_step'], first_step=self._algparams['init_step'], with_jacobian=haveJac) # speed up repeated access to solver by making a temp name for it solver = self._solver else: # speed up repeated access to solver by making a temp name for it solver = self._solver solver.with_jacobian = haveJac # self.mu = lband # self.ml = uband solver.rtol = self._algparams['rtol'] solver.atol = self._algparams['atol'] solver.method = methcode # self.order = order solver.nsteps = self._algparams['max_pts'] solver.max_step = self._algparams['max_step'] solver.min_step = self._algparams['min_step'] solver.first_step = self._algparams['init_step'] solver.set_initial_value(x0, indepdom0) ## if self._dircode == 1: ## solver.set_initial_value(x0, indepdom0) ## else: ## solver.set_initial_value(x0, indepdom1) # wrap up each dictionary initial value as a singleton list alltData = [indepdom0] allxDataDict = dict(zip(xnames, map(listid, x0))) plist = sortedDictValues(self.pars) extralist = copy(plist) if self.inputs: # inputVarList is a list of Variables inames = sortedDictKeys(self.inputs) listend = self.numpars + len(self.inputs) inputVarList = sortedDictValues(self.inputs) ilist = _pollInputs(inputVarList, alltData[0]+self.globalt0, self.checklevel) else: ilist = [] inames = [] listend = self.numpars inputVarList = [] extralist.extend(ilist) solver.set_f_params(extralist) if haveJac: solver.set_jac_params(extralist) dt = self._algparams['init_step'] strict = self._algparams['strictdt'] # Make t mesh if not all(isfinite(self.indepvariable.depdomain.get())): print "Time domain was: ", self.indepvariable.depdomain.get() raise ValueError, "Ensure time domain is finite" if dt == indepdom1 - indepdom0: # single-step integration required tmesh = [indepdom0, indepdom1] else: notDone = True repeatTol = 10 count = 0 while notDone and count <= repeatTol: try: tmesh = self.indepvariable.depdomain.uniformSample(dt, strict=strict, avoidendpoints=self.checklevel>2) notDone = False except AssertionError: count += 1 dt = dt/3.0 if count == repeatTol: raise AssertionError, \ ("supplied time step is too large for selected time" " interval") if len(tmesh)<=2: # safety net, in case too few points in mesh # too few points unless we can add endpoint if tmesh[-1] != indepdom1: # dt too large for tmesh to have more than one point tmesh.append(indepdom1) if not strict: # get actual time step used # don't use [0] in case avoided end points dt = tmesh[2]-tmesh[1] if self.eventstruct.query(['lowlevel']) != []: raise ValueError, "Only high level events can be passed to VODE" eventslist = self.eventstruct.query(['highlevel', 'active', 'notvarlinked']) termevents = self.eventstruct.query(['term'], eventslist) # reverse time by reversing mesh doesn't work ## if self._dircode == -1: ## tmesh.reverse() tmesh.pop(0) # get rid of first entry for initial condition # per-iteration storage of variable data (initial values are irrelevant) xDataDict = {} xnames = self.funcspec.vars # storage of all auxiliary variable data allaDataDict = {} anames = self.funcspec.auxvars avals = apply(getattr(self,self.funcspec.auxspec[1]), [indepdom0, x0, extralist]) for aix in range(len(anames)): aname = anames[aix] try: allaDataDict[aname] = [avals[aix]] except IndexError: print "\nVODE generator: There was a problem evaluating " \ + "an auxiliary variable" print "Debug info: avals (length", len(avals), ") was ", avals print "Index out of range was ", aix print self.funcspec.auxspec[1] print hasattr(self, self.funcspec.auxspec[1]) print "Args were:", [indepdom0, x0, extralist] raise # Initialize signs of event detection objects at IC dataDict = copy(self.initialconditions) dataDict.update(dict(zip(anames, avals))) dataDict['t'] = indepdom0 self.setEventICs(self.initialconditions, self.globalt0) if self.inputs: parsinps = copy(self.pars) parsinps.update(dict(zip(inames,ilist))) else: parsinps = self.pars if eventslist != []: evsflagged = self.eventstruct.pollHighLevelEvents(None, dataDict, parsinps, eventslist) if len(evsflagged) > 0: raise RuntimeError, "Some events flagged at initial condition" if continue_integ: # revert to prevprevsign, since prevsign changed after call self.eventstruct.resetHighLevelEvents(indepdom0, eventslist, 'prev') else: self.eventstruct.resetHighLevelEvents(indepdom0, eventslist) #, 'off') self.eventstruct.validateEvents(self.funcspec.vars + \ self.funcspec.auxvars + \ self.funcspec.inputs + \ ['t'], eventslist) # temp storage of first time at which terminal events found # (this is used for keeping the correct end point of new mesh) first_found_t = None # list of precise non-terminal events to be resolved after integration nontermprecevs = [] evnames = [ev[0] for ev in eventslist] lastevtime = {}.fromkeys(evnames, None) # initialize new event info dictionaries Evtimes = {} Evpoints = {} if continue_integ: for evname in evnames: try: # these are in global time, so convert to local time lastevtime[evname] = self.eventstruct.Evtimes[evname][-1]-self.globalt0 except (IndexError, KeyError): # IndexError: Evtimes[evname] was None # KeyError: Evtimes does not have key evname pass for evname in evnames: Evtimes[evname] = [] Evpoints[evname] = [] # temp storage for repeatedly used object attributes (for lookup efficiency) depdomains = {} for xi in xrange(self.dimension): depdomains[xi] = self.variables[xnames[xi]].depdomain # Main integration loop num_points = 0 breakwhile = False while not breakwhile: try: new_t = tmesh.pop(0) # this destroys tmesh for future use except IndexError: break try: errcode = solver.integrate(new_t) except: print "Error calling right hand side function:" self.showSpec() print "Numerical traceback information (current state, " \ + "parameters, etc.)" print "in generator dictionary 'traceback'" self.traceback = {'vars': dict(zip(xnames,solver.y)), 'pars': dict(zip(pnames,plist)), 'inputs': dict(zip(inames,ilist)), self.indepvariable.name: new_t} raise for xi in xrange(self.dimension): xDataDict[xnames[xi]] = solver.y[xi] if not self.contains(depdomains[xi], solver.y[xi], self.checklevel): self.warnings.append((W_TERMSTATEBD, (solver.t, xnames[xi],solver.y[xi], depdomains[xi].get()))) breakwhile = True break # for loop if breakwhile: break avals = apply(getattr(self,self.funcspec.auxspec[1]), [new_t, sortedDictValues(xDataDict), extralist]) # Uncomment the following assertion for debugging # assert all([isfinite(a) for a in avals]), \ # "Some auxiliary variable values not finite" aDataDict = dict(zip(anames,avals)) if eventslist != []: dataDict = copy(xDataDict) dataDict.update(aDataDict) dataDict['t'] = new_t if self.inputs: parsinps = copy(self.pars) parsinps.update(dict(zip(inames, ## extralist[self.numpars:listend]))) _pollInputs(inputVarList, new_t+self.globalt0, self.checklevel)))) else: parsinps = self.pars evsflagged = self.eventstruct.pollHighLevelEvents(None, dataDict, parsinps, eventslist) ## print new_t, evsflagged # evsflagged = [ev for ev in evsflagged if solver.t-indepdom0 > ev[1].eventinterval] termevsflagged = filter(lambda e: e in evsflagged, termevents) nontermevsflagged = filter(lambda e: e not in termevsflagged, evsflagged) # register any non-terminating events in the warnings # list, unless they are 'precise' in which case flag # them to be resolved after integration completes if len(nontermevsflagged) > 0: evnames = [ev[0] for ev in nontermevsflagged] precEvts = self.eventstruct.query(['precise'], nontermevsflagged) prec_evnames = [e[0] for e in precEvts] # first register non-precise events nonprec_evnames = remain(evnames, prec_evnames) # only record events if they have not been previously # flagged within their event interval if nonprec_evnames != []: temp_names = [] for evname in nonprec_evnames: prevevt_time = lastevtime[evname] if prevevt_time is None: ignore_ev = False else: if solver.t-prevevt_time < e[1].eventinterval: ignore_ev = True else: ignore_ev = False if not ignore_ev: temp_names.append(evname) lastevtime[evname] = solver.t self.warnings.append((W_NONTERMEVENT, (solver.t, temp_names))) for evname in temp_names: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) for e in precEvts: # only record events if they have not been previously # flagged within their event interval prevevt_time = lastevtime[e[0]] if prevevt_time is None: ignore_ev = False else: if solver.t-dt-prevevt_time < e[1].eventinterval: ignore_ev = True else: ignore_ev = False if not ignore_ev: nontermprecevs.append((solver.t-dt, solver.t,e)) # be conservative as to where the event is, so # that don't miss any events. lastevtime[e[0]] = solver.t-dt e[1].reset() #e[1].prevsign = None # do_termevs = [] if termevsflagged != []: # only record events if they have not been previously # flagged within their event interval for e in termevsflagged: prevevt_time = lastevtime[e[0]] ## print "Event %s flagged."%e[0] ## print " ... last time was ", prevevt_time ## print " ... event interval = ", e[1].eventinterval ## print " ... t = %f, dt = %f"%(solver.t, dt) if prevevt_time is None: ignore_ev = False else: ## print " ... comparison = %f < %f"%(solver.t-dt-prevevt_time, e[1].eventinterval) if solver.t-dt-prevevt_time < e[1].eventinterval: ignore_ev = True ## print "VODE ignore ev" else: ignore_ev = False if not ignore_ev: do_termevs.append(e) if len(do_termevs) > 0: # >= 1 active terminal event flagged at this time point if all([not ev[1].preciseFlag for ev in do_termevs]): # then none of the events specify greater accuracy # register the event in the warnings evnames = [ev[0] for ev in do_termevs] self.warnings.append((W_TERMEVENT, \ (solver.t, evnames))) for evname in evnames: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) breakwhile = True # break while loop after appending t, x else: # find which are the 'precise' events that flagged precEvts = self.eventstruct.query(['precise'], do_termevs) # these events have flagged once so eventdelay has # been used. now switch it off while finding event # precisely (should be redundant after change to # eventinterval and eventdelay parameters) evnames = [ev[0] for ev in precEvts] if first_found_t is None: ## print "first time round at", solver.t numtries = 0 first_found_t = solver.t restore_evts = deepcopy(precEvts) minbisectlimit = min([ev[1].bisectlimit for ev in precEvts]) for ev in precEvts: ev[1].eventdelay = 0. else: numtries += 1 ## print "time round: ", numtries if numtries > minbisectlimit: self.warnings.append((W_BISECTLIMIT, (solver.t, evnames))) breakwhile = True # find minimum eventtol in precEvts dt_min = min([e[1].eventtol for e in precEvts]) # get previous time point if len(alltData)>=1: # take one step back -> told, which will # get dt added back to first new meshpoint # (solver.t is the step *after* the event was # detected) told = solver.t-dt else: raise ValueError("Event %s found too "%evnames[0]+\ "close to local start time: try decreasing " "initial step size (current size is " "%f @ t=%f)"%(dt,solver.t+self.globalt0)) if dt_min >= dt: ## print "Registering event:", dt_min, dt # register the event in the warnings self.warnings.append((W_TERMEVENT, (solver.t, evnames))) for evname in evnames: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) # Cannot continue -- dt_min no smaller than # previous dt. If this is more than the first time # in this code then have found the event to within # the minimum 'precise' event's eventtol, o/w need # to set eventtol smaller. breakwhile = True # while loop if not breakwhile: dt_new = dt/5.0 # calc new tmesh trangewidth = 2*dt #first_found_t - told numpoints = int(math.ceil(trangewidth/dt_new)) # choose slightly smaller dt to fit trange exactly dt = trangewidth/numpoints tmesh = [told + i*dt for i in xrange(1, numpoints+1)] # reset events according to new time mesh, # setting known previous event state to be their # "no event found" state self.eventstruct.resetHighLevelEvents(told, precEvts, state='off') # build new ic with last good values (at t=told) if len(alltData)>1: new_ic = [allxDataDict[xname][-1] \ for xname in xnames] else: new_ic = x0 # reset integrator solver.set_initial_value(new_ic, told) extralist[self.numpars:listend] = \ [apply(f, [told+self.globalt0, self.checklevel]) \ for f in inputVarList] solver.set_f_params(extralist) # continue integrating over new mesh continue # while alltData.append(solver.t) for xi in xrange(self.dimension): allxDataDict[xnames[xi]].append(solver.y[xi]) for aix in xrange(len(anames)): aname = anames[aix] allaDataDict[aname].append(avals[aix]) num_points += 1 if not breakwhile: try: extralist[self.numpars:listend] = [apply(f, \ [solver.t+self.globalt0, self.checklevel]) \ for f in inputVarList] except ValueError: print 'External input call caused value out of range error:',\ 't = ', solver.t for f in inputVarList: if len(f.warnings): print 'External input variable %s out of range:' % f.name print ' t = ', repr(f.warnings[-1][0]), ', ', \ f.name, ' = ', repr(f.warnings[-1][1]) raise except AssertionError: print 'External input call caused t out of range error: t = ', \ solver.t raise solver.set_f_params(extralist) breakwhile = not solver.successful() # Check that any terminal events found terminated the code correctly if first_found_t is not None: # ... then terminal events were found. Those that were 'precise' had # their 'eventdelay' attribute temporarily set to 0. It now should # be restored. for evname1, ev1 in termevents: # restore_evts are copies of the originally flagged 'precise' # events for evname2, ev2 in restore_evts: if evname2 == evname1: ev1.eventdelay = ev2.eventdelay try: if self.warnings[-1][0] not in [W_TERMEVENT, W_TERMSTATEBD]: raise RuntimeError("Event finding code for terminal event " "failed in Generator " + self.name + \ ": try decreasing eventdelay or " "eventinterval below eventtol") except IndexError: print "Output stats: ", self.outputstats raise RuntimeError("Event finding failed in Generator " + \ self.name + ": try decreasing eventdelay " "or eventinterval below eventtol") # Package up computed trajectory in Variable variables # Add external inputs warnings to self.warnings, if any for f in inputVarList: for winfo in f.warnings: self.warnings.append((W_NONTERMSTATEBD, (winfo[0], f.name, winfo[1], f.depdomain.get()))) # check for non-unique terminal event termcount = 0 for (w,i) in self.warnings: if w == W_TERMEVENT or w == W_TERMSTATEBD: termcount += 1 if termcount > 1: self.errors.append((E_NONUNIQUETERM, (alltData[-1], i[1]))) # uncomment the following lines for debugging # assert len(alltData) == len(allxDataDict.values()[0]) \ # == len(allaDataDict.values()[0]), "Output data size mismatch" # for val_list in allaDataDict.values(): # assert all([isfinite(x) for x in val_list]) # Create variables (self.variables contains no actual data) # These versions of the variables are only final if no non-terminal # events need to be inserted. variables = copyVarDict(self.variables) for x in xnames: if len(alltData) > 1: variables[x] = Variable(interp1d(alltData, allxDataDict[x]), 't', x, x) else: print "Error in Generator:", self.name print "t = ", alltData print "x = ", allxDataDict raise PyDSTool_ValueError, "Fewer than 2 data points computed" for a in anames: if len(alltData) > 1: variables[a] = Variable(interp1d(alltData, allaDataDict[a]), 't', a, a) else: print "Error in Generator:", self.name print "t = ", alltData print "x = ", allxDataDict raise PyDSTool_ValueError, "Fewer than 2 data points computed" # Resolve non-terminal 'precise' events that were flagged, using the # variables created. Then, add them to a new version of the variables. ntpe_tdict = {} for (et0,et1,e) in nontermprecevs: lost_evt = False search_dt = max((et1-et0)/5,e[1].eventinterval) et_precise_list = e[1].searchForEvents(trange=[et0,et1], dt=search_dt, checklevel=self.checklevel, parDict=self.pars, vars=variables, inputs=self.inputs, abseps=self._abseps, eventdelay=False, globalt0=self.globalt0) if et_precise_list == []: lost_evt = True for et_precise in et_precise_list: if et_precise[0] is not None: if et_precise[0] in ntpe_tdict: # add event name at this time (that already exists in the dict) ntpe_tdict[et_precise[0]].append(e[0]) else: # add event name at this time (when time is not already in dict) ntpe_tdict[et_precise[0]] = [e[0]] else: lost_evt = True if lost_evt: raise PyDSTool_ExistError, \ ("Internal error: A non-terminal, 'precise' event '" +e[0]+"' was lost after integration!") # add non-terminal event points to variables if ntpe_tdict != {}: # find indices of times at which event times will be inserted tix = 0 evts = ntpe_tdict.keys() evts.sort() for evix in xrange(len(evts)): evt = evts[evix] evnames = ntpe_tdict[evt] self.warnings.append((W_NONTERMEVENT, (evt, evnames))) xval = [variables[x](evt) for x in xnames] for evname in evnames: Evtimes[evname].append(evt) Evpoints[evname].append(array(xval)) tcond = less_equal(alltData[tix:], evt).tolist() try: tix = tcond.index(0) + tix # lowest index for t > evt do_insert = (alltData[tix-1] != evt) except ValueError: # evt = last t value so no need to add it do_insert = False if do_insert: alltData.insert(tix, evt) xvaldict = dict(zip(xnames, xval)) for x in xnames: allxDataDict[x].insert(tix, xvaldict[x]) for a in anames: allaDataDict[a].insert(tix, variables[a](evt)) for x in xnames: variables[x] = Variable(interp1d(alltData, allxDataDict[x]), 't', x, x) for a in anames: variables[a] = Variable(interp1d(alltData, allaDataDict[a]), 't', a, a) self.outputstats = {'last_step': dt, 'num_fcns': num_points, 'num_steps': num_points, 'errorStatus': errcode } if solver.successful(): self.validateSpec() for evname, evtlist in Evtimes.iteritems(): try: self.eventstruct.Evtimes[evname].extend([et+self.globalt0 \ for et in evtlist]) except KeyError: self.eventstruct.Evtimes[evname] = [et+self.globalt0 \ for et in evtlist] # build event pointset information (reset previous trajectory's) self.trajevents = {} for (evname, ev) in eventslist: evpt = Evpoints[evname] if evpt == []: self.trajevents[evname] = None else: evpt = transpose(array(evpt)) self.trajevents[evname] = Pointset({'coordnames': xnames, 'indepvarname': 't', 'coordarray': evpt, 'indepvararray': Evtimes[evname], 'indepvartype': Float}) self.defined = True return Trajectory(trajname, variables.values(), self.globalt0, self.checklevel) else: try: self.errors.append((E_COMPUTFAIL, (solver.t, self._errorcodes[errcode]))) except TypeError: # e.g. when errcode has been used to return info list print "Error information: ", errcode self.errors.append((E_COMPUTFAIL, (solver.t, self._errorcodes[0]))) self.defined = False
col = 0 for n in line.split(): U[row, col] = float(n) col += 1 row += 1 rows, columns = [int(a) for a in vfile.readline().split()] V = na.zeros((rows, columns), type="Float32") row = 0 for line in vfile: col = 0 for n in line.split(): V[row, col] = float(n) col += 1 row += 1 return U, S, V up = cPickle.Unpickler(file('artist_user.pickle', 'rb')) artists, users = up.load() #U is artists U, S, V = load_svd(file('big_s', 'r'), file('big_ut', 'r'), file('big_vt', 'r')) #I believe that U is already transposed tt = na.dot(na.transpose(U), na.dot(na.dot(S, S), U)) fout = file('lsi.out', 'wb') cp = cPickle.Pickler(fout, -1) cp.dump(tt) fout.close()
def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3, MAXIT=500): """ Uses the Newton-Raphson algorithm to calculate a maximum likelihood estimate logistic regression. The algorithm is known as 'iteratively re-weighted least squares', or IRLS. x - rank-1 or rank-2 array of predictors. If x is rank-2, the number of predictors = x.shape[0] = N. If x is rank-1, it is assumed N=1. y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x)) beta_start - initial beta vector (default zeros(N+1,x.dtype.char)) if verbose=True, diagnostics printed for each iteration (default False). MAXIT - max number of iterations (default 500) CONV_THRESH - convergence threshold (sum of absolute differences of beta-beta_old, default 0.001) returns beta (the logistic regression coefficients, an N+1 element vector), J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood). J_bar can be used to estimate the covariance matrix and the standard error for beta. l can be used for a chi-squared significance test. covmat = inverse(J_bar) --> covariance matrix of coefficents (beta) stderr = sqrt(diag(covmat)) --> standard errors for beta deviance = -2l --> scaled deviance statistic chi-squared value for -2l is the model chi-squared test. """ if x.shape[-1] != len(y): raise ValueError, "x.shape[-1] and y should be the same length!" try: N, npreds = x.shape[1], x.shape[0] except: # single predictor, use simple logistic regression routine. return _simple_logistic_regression(x,y,beta_start=beta_start, CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose) if beta_start is None: beta_start = NA.zeros(npreds+1,x.dtype.char) X = NA.ones((npreds+1,N), x.dtype.char) X[1:, :] = x Xt = NA.transpose(X) iter = 0; diff = 1.; beta = beta_start # initial values if verbose: print 'iteration beta log-likliehood |beta-beta_old|' while iter < MAXIT: beta_old = beta ebx = NA.exp(NA.dot(beta, X)) p = ebx/(1.+ebx) l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood s = NA.dot(X, y-p) # scoring function J_bar = NA.dot(X*p,Xt) # information matrix beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences if verbose: print iter+1, beta, l, diff if diff <= CONV_THRESH: break iter = iter + 1 if iter == MAXIT and diff > CONV_THRESH: print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT) return beta, J_bar, l
#! /usr/bin/env python # -*- compile-command: "python hello.py" -*- print "Calcul numerique en Python" A=[[1,2],[3,4]] print A print A[1] # selectionne la deuxieme ligne (indice commence a 1) print [A[i][0] for i in range(len(A))] # selectionne la premiere colonne print [i[0] for i in A] # selectionne la premiere colonne (+ court) B=[[1,2],[3,4]] #print add(A,B) # voir pour les operations import numarray as np A_num = np.array([[1,2],[3,4]]) print np.transpose(A_num)[0]
r12 = 0.5 # average correlation between the first predictor and the obs. r13 = 0.25 # avg correlation between the second predictor and the obs. r23 = 0.125 # avg correlation between predictors. # random draws from trivariate normal distribution x = multivariate_normal( NA.array([0, 0, 0]), NA.array([[1, r12, r13], [r12, 1, r23], [r13, r23, 1]]), nsamps) x2 = multivariate_normal( NA.array([0, 0, 0]), NA.array([[1, r12, r13], [r12, 1, r23], [r13, r23, 1]]), nsamps) print 'correlations (r12,r13,r23) = ', r12, r13, r23 print 'number of realizations = ', nsamps # training data. obs = x[:, 0] climprob = NA.sum((obs > 0).astype('f')) / nsamps fcst = NA.transpose(x[:, 1:]) # 2 predictors. obs_binary = obs > 0. # independent data for verification. obs2 = x2[:, 0] fcst2 = NA.transpose(x2[:, 1:]) # compute logistic regression. beta, Jbar, llik = logistic_regression(fcst, obs_binary, verbose=True) covmat = LA.inverse(Jbar) stderr = NA.sqrt(mlab.diag(covmat)) print 'beta =', beta print 'standard error =', stderr # forecasts from independent data. prob = calcprob(beta, fcst2) # compute Brier Skill Score verif = (obs2 > 0.).astype('f') bs = mlab.mean((0.01 * prob - verif)**2)
def logistic_regression(x, y, beta_start=None, verbose=False, CONV_THRESH=1.e-3, MAXIT=500): """ Uses the Newton-Raphson algorithm to calculate a maximum likelihood estimate logistic regression. The algorithm is known as 'iteratively re-weighted least squares', or IRLS. x - rank-1 or rank-2 array of predictors. If x is rank-2, the number of predictors = x.shape[0] = N. If x is rank-1, it is assumed N=1. y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x)) beta_start - initial beta vector (default zeros(N+1,x.dtype.char)) if verbose=True, diagnostics printed for each iteration (default False). MAXIT - max number of iterations (default 500) CONV_THRESH - convergence threshold (sum of absolute differences of beta-beta_old, default 0.001) returns beta (the logistic regression coefficients, an N+1 element vector), J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood). J_bar can be used to estimate the covariance matrix and the standard error for beta. l can be used for a chi-squared significance test. covmat = inverse(J_bar) --> covariance matrix of coefficents (beta) stderr = sqrt(diag(covmat)) --> standard errors for beta deviance = -2l --> scaled deviance statistic chi-squared value for -2l is the model chi-squared test. """ if x.shape[-1] != len(y): raise ValueError, "x.shape[-1] and y should be the same length!" try: N, npreds = x.shape[1], x.shape[0] except: # single predictor, use simple logistic regression routine. return _simple_logistic_regression(x, y, beta_start=beta_start, CONV_THRESH=CONV_THRESH, MAXIT=MAXIT, verbose=verbose) if beta_start is None: beta_start = NA.zeros(npreds + 1, x.dtype.char) X = NA.ones((npreds + 1, N), x.dtype.char) X[1:, :] = x Xt = NA.transpose(X) iter = 0 diff = 1. beta = beta_start # initial values if verbose: print 'iteration beta log-likliehood |beta-beta_old|' while iter < MAXIT: beta_old = beta ebx = NA.exp(NA.dot(beta, X)) p = ebx / (1. + ebx) l = NA.sum(y * NA.log(p) + (1. - y) * NA.log(1. - p)) # log-likeliehood s = NA.dot(X, y - p) # scoring function J_bar = NA.dot(X * p, Xt) # information matrix beta = beta_old + NA.dot(LA.inverse(J_bar), s) # new value of beta diff = NA.sum(NA.fabs(beta - beta_old)) # sum of absolute differences if verbose: print iter + 1, beta, l, diff if diff <= CONV_THRESH: break iter = iter + 1 if iter == MAXIT and diff > CONV_THRESH: print 'warning: convergence not achieved with threshold of %s in %s iterations' % ( CONV_THRESH, MAXIT) return beta, J_bar, l
#!/usr/bin/python ## example2_8 from numarray import array,matrixmultiply,transpose from choleski import * a = array([[ 1.44, -0.36, 5.52, 0.0], \ [-0.36, 10.33, -7.78, 0.0], \ [ 5.52, -7.78, 28.40, 9.0], \ [ 0.0, 0.0, 9.0, 61.0]]) L = choleski(a) print 'L =\n',L print '\nCheck: L*L_transpose =\n', \ matrixmultiply(L,transpose(L)) raw_input("\nPress return to exit")
#! /usr/bin/env python # -*- compile-command: "python hello.py" -*- print "Calcul numerique en Python" A=[[1,2],[3,4]] print A print A[1] # selectionne la deuxieme ligne (indice commence a 1) print [A[i][0] for i in range(len(A))] # selectionne la premiere colonne print [i[0] for i in A] # selectionne la premiere colonne (+ court) B=[[1,2],[3,4]] #print add(A,B) # voir pour les operations import numarray A_num = numarray.array([[1,2],[3,4]]) print numarray.transpose(A_num)[0]
def exportPointset(thepointset, infodict, separator=' ', linesep='\n', precision=12, suppress_small=0, varvaldir='col', ext='', append=False): assert varvaldir in ['col', 'row'], \ "invalid variable value write direction" # in order to avoid import cycles, cannot explicitly check that # thepointset is of type Pointset, because Points.py imports this file # (utils.py), so check an attribute instead. try: thepointset.coordnames except AttributeError: raise TypeError, "Must pass Pointset to this function: use arrayToPointset first!" infodict_usedkeys = [] for key, info in infodict.iteritems(): if isinstance(info, str): infodict_usedkeys += [info] elif info == []: infodict[key] = copy.copy(thepointset.coordnames) infodict_usedkeys.extend(thepointset.coordnames) else: infodict_usedkeys += list(info) allnames = copy.copy(thepointset.coordnames) if thepointset._parameterized: allnames.append(thepointset.indepvarname) remlist = remain(infodict_usedkeys, allnames + range(len(allnames))) if remlist != []: print "Coords not found in pointset:", remlist raise ValueError, \ "invalid keys in infodict - some not present in thepointset" assert isinstance(ext, str), "'ext' extension argument must be a string" if ext != '': if ext[0] != '.': ext = '.' + ext if append: assert varvaldir == 'col', ("append mode not supported for row" "format of data ordering") modestr = 'a' else: modestr = 'w' totlen = len(thepointset) if totlen == 0: raise ValueError, ("Pointset is empty") for fname, tup in infodict.iteritems(): try: f = open(fname + ext, modestr) except IOError: print "There was a problem opening file " + fname + ext raise try: if isinstance(tup, str): try: varray = thepointset[tup] except TypeError: raise ValueError, "Invalid specification of coordinates" elif isinstance(tup, int): try: varray = thepointset[:, tup].toarray() except TypeError: raise ValueError, "Invalid specification of coordinates" elif type(tup) in [list, tuple]: if alltrue([type(ti) == str for ti in tup]): thetup = list(tup) if thepointset.indepvarname in tup: tix = thetup.index(thepointset.indepvarname) thetup.remove(thepointset.indepvarname) try: vlist = thepointset[thetup].toarray().tolist() except TypeError: raise ValueError, "Invalid specification of coordinates" if len(thetup) == 1: vlist = [vlist] if thepointset.indepvarname in tup: vlist.insert(tix, thepointset.indepvararray.tolist()) varray = array(vlist) elif alltrue([type(ti) == int for ti in tup]): try: varray = thepointset[:, tup].toarray() except TypeError: raise ValueError, "Invalid specification of coordinates" else: raise ValueError, "Invalid specification of coordinates" else: f.close() raise TypeError, \ "infodict values must be singletons or tuples/lists of strings or integers" except IOError: f.close() print "Problem writing to file" + fname + ext raise except KeyError: f.close() raise KeyError, ("Keys in infodict not found in pointset") if varvaldir == 'row': write_array(f, varray, separator, linesep, precision, suppress_small, keep_open=0) else: write_array(f, transpose(varray), separator, linesep, precision, suppress_small, keep_open=0)
def transform(self, t): tmp = transpose(array(sequence=self.points, shape=(len(self.points), 3))) self.points = transpose(matrixmultiply(t, tmp)).tolist()
def get_evec(p, dim): # extra call to array() to make it contiguous v = array(transpose(p.v)[dim - 1]) v.shape = (totdim, 3) return v
def compute(self, trajname, ics=None): assert self.funcspec.targetlang == "python", ( "Wrong target language for functional specification. " "Python needed for this class" ) assert isinstance(self.funcspec, RHSfuncSpec), "Map system " "requires RHSfuncSpec type to proceed" if self.defined: self.validateSpec() ## assert remain(self.initialconditions.keys(), ## self._xdatadict.keys()+self.funcspec.auxvars) == [],\ ## ('mismatching entries between initial conditions and ' ## 'declared variable names') self.clearWarnings() self.clearErrors() if ics is not None: self.set(ics=ics) xnames = self._var_ixmap # ensures correct order # wrap up each dictionary initial value as a singleton list alltData = [self.indepvariable.depdomain.get(0)] allxDataDict = dict(zip(xnames, map(listid, sortedDictValues(self.initialconditions, self.funcspec.vars)))) rhsfn = eval("self." + self.funcspec.spec[1]) # Check i.c.'s are well defined (finite) self.checkInitialConditions() self.setEventICs(self.initialconditions, self.globalt0) ic = sortedDictValues(self.initialconditions, self.funcspec.vars) plist = sortedDictValues(self.pars) extralist = copy(plist) ilist = [] if self.inputs: # inputVarList is a list of Variables listend = self.numpars + len(self.inputs) inputVarList = sortedDictValues(self.inputs) try: for f in inputVarList: f.clearWarnings ilist.append(f(alltData[0], self.checklevel)) except AssertionError: print "External input call has t out of range: t = ", self.indepvariable.depdomain.get(0) print "Maybe checklevel is 3 and initial time is not", "completely inside valid time interval" raise except ValueError: print "External input call has value out of range: t = ", self.indepvariable.depdomain.get(0) for f in inputVarList: if len(f.warnings): print "External input %s out of range:" % f.name print " t = ", repr(f.warnings[-1][0]), ", ", f.name, " = ", repr(f.warnings[-1][1]) raise else: listend = self.numpars inputVarList = [] extralist.extend(ilist) precevents = self.eventstruct.query(["precise"]) if precevents != []: raise PyDSTool_ValueError("precise events are not valid for map systems") eventslist = self.eventstruct.query(["highlevel", "active", "notvarlinked"]) termevents = self.eventstruct.query(["term"], eventslist) # initialize event info dictionaries Evtimes = {} Evpoints = {} for (evname, ev) in eventslist: Evtimes[evname] = [] Evpoints[evname] = [] if eventslist != []: self.eventstruct.resetHighLevelEvents(self.indepvariable.depdomain.get(0), eventslist) self.eventstruct.validateEvents(self.funcspec.vars + self.funcspec.auxvars + ["t"], eventslist) # per-iteration storage of variable data (initial values are irrelevant) xDataDict = {} # storage of all auxiliary variable data allaDataDict = {} anames = self.funcspec.auxvars avals = apply( eval("self." + self.funcspec.auxspec[1]), [ self.indepvariable.depdomain.get(0), sortedDictValues(self.initialconditions, self.funcspec.vars), extralist, ], ) for aix in range(len(anames)): aname = anames[aix] allaDataDict[aname] = [avals[aix]] # temp storage of first time at which terminal events found # (this is used for keeping the correct end point of new mesh) first_found_t = None tmesh = self.indepvariable.depdomain.uniformSample(self.tstep, strict=False, avoidendpoints=self.checklevel > 2) # Main loop breakwhile = False success = False x = ic notdone = True # did i=0 for initial condition already i = 1 while notdone: t = tmesh[i] ## COMPUTE NEXT STATE y from x try: y = rhsfn(t, x, extralist) except: print "Error in calling right hand side function:" self.showSpec() raise for xi in xrange(self.dimension): xDataDict[xnames[xi]] = y[xi] if not self.contains(self.variables[xnames[xi]].depdomain, y[xi], self.checklevel): self.warnings.append((W_TERMSTATEBD, (t, xnames[xi], y[xi], self.variables[xnames[xi]].depdomain))) breakwhile = True break # for loop if breakwhile: notdone = False continue if eventslist != []: dataDict = copy(xDataDict) dataDict["t"] = t evsflagged = self.eventstruct.pollHighLevelEvents(None, dataDict, self.pars, eventslist) termevsflagged = filter(lambda e: e in evsflagged, termevents) nontermevsflagged = filter(lambda e: e not in termevsflagged, evsflagged) # register any non-terminating events in the warnings list if len(nontermevsflagged) > 0: evnames = [ev[0] for ev in nontermevsflagged] self.warnings.append((W_NONTERMEVENT, (t, evnames))) for evname in evnames: Evtimes[evname].append(t) Evpoints[evname].append(y) if termevsflagged != []: # active terminal event flagged at this time point # register the event in the warnings evnames = [ev[0] for ev in termevsflagged] self.warnings.append((W_TERMEVENT, (t, evnames))) for evname in evnames: Evtimes[evname].append(t) Evpoints[evname].append(y) notdone = False continue alltData.append(t) for xi in range(self.dimension): allxDataDict[xnames[xi]].append(y[xi]) avals = apply(eval("self." + self.funcspec.auxspec[1]), [t, sortedDictValues(xDataDict), extralist]) for aix in range(len(anames)): aname = anames[aix] allaDataDict[aname].append(avals[aix]) try: extralist[self.numpars : listend] = [apply(f, [t, self.checklevel]) for f in inputVarList] except ValueError: print "External input call caused value out of range error:", "t = ", t for f in inputVarList: if len(f.warnings): print "External input variable %s out of range:" % f.name print " t = ", repr(f.warnings[-1][0]), ", ", f.name, " = ", repr(f.warnings[-1][1]) raise except AssertionError: print "External input call caused t out of range error: t = ", t raise if i >= len(tmesh) - 1: notdone = False else: i += 1 x = y # update success flag success = not notdone # Check that any terminal events found terminated the code correctly if first_found_t is not None: assert self.warnings[-1][0] == W_TERMEVENT, "Event finding code " "for terminal event failed" # Package up computed trajectory in Variable variables # Add external inputs warnings to self.warnings, if any for f in inputVarList: for winfo in f.warnings: self.warnings.append((W_NONTERMSTATEBD, (winfo[0], f.name, winfo[1], f.depdomain))) # check for non-unique terminal event termcount = 0 for (w, i) in self.warnings: if w == W_TERMEVENT or w == W_TERMSTATEBD: termcount += 1 if termcount > 1: self.errors.append((E_NONUNIQUETERM, (alltData[-1], i[1]))) ## print 'Time interval adjusted according to %s: %s' % \ ## (self._warnmessages[w], str(i[0])+", "+ str(i[1])) # Create variables (self.variables contains no actual data) variables = copyVarDict(self.variables) # build event pointset information (reset previous trajectory's) self.trajevents = {} for (evname, ev) in eventslist: evpt = Evpoints[evname] if evpt == []: self.trajevents[evname] = None else: evpt = transpose(array(evpt)) self.trajevents[evname] = Pointset( { "coordnames": xnames, "indepvarname": "t", "coordarray": evpt, "indepvararray": Evtimes[evname], "indepvartype": self.variables[xnames[0]].indepvartype, } ) for x in xnames: if len(alltData) > 1: variables[x] = Variable( Pointset( { "coordnames": [x], "coordarray": allxDataDict[x], "coordtype": self.variables[x].coordtype, "indepvarname": "t", "indepvararray": alltData, "indepvartype": self.variables[x].indepvartype, } ), "t", x, x, ) else: raise PyDSTool_ValueError, "Fewer than 2 data points computed" for a in anames: if len(alltData) > 1: variables[a] = Variable( Pointset( { "coordnames": [a], "coordarray": allaDataDict[a], "coordtype": self.variables[a].coordtype, "indepvarname": "t", "indepvararray": alltData, "indepvartype": self.variables[a].indepvartype, } ), "t", a, a, ) else: raise PyDSTool_ValueError, "Fewer than 2 data points computed" if success: self.validateSpec() self.defined = True return Trajectory(trajname, variables.values(), self.globalt0, self.checklevel) else: print "Trajectory computation failed" self.errors.append((E_COMPUTFAIL, (t, self._errorcodes[errcode]))) self.defined = False
import numpy.oldnumeric.mlab as mlab # number of realizations. nsamps = 100000 # correlations r12 = 0.5 # average correlation between the first predictor and the obs. r13 = 0.25 # avg correlation between the second predictor and the obs. r23 = 0.125 # avg correlation between predictors. # random draws from trivariate normal distribution x = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps) x2 = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps) print 'correlations (r12,r13,r23) = ',r12,r13,r23 print 'number of realizations = ',nsamps # training data. obs = x[:,0] climprob = NA.sum((obs > 0).astype('f'))/nsamps fcst = NA.transpose(x[:,1:]) # 2 predictors. obs_binary = obs > 0. # independent data for verification. obs2 = x2[:,0] fcst2 = NA.transpose(x2[:,1:]) # compute logistic regression. beta,Jbar,llik = logistic_regression(fcst,obs_binary,verbose=True) covmat = LA.inverse(Jbar) stderr = NA.sqrt(mlab.diag(covmat)) print 'beta =' ,beta print 'standard error =',stderr # forecasts from independent data. prob = calcprob(beta, fcst2) # compute Brier Skill Score verif = (obs2 > 0.).astype('f') bs = mlab.mean((0.01*prob - verif)**2)
#!/usr/bin/python ## example2_8 from numarray import array, matrixmultiply, transpose from choleski import * a = array([[ 1.44, -0.36, 5.52, 0.0], \ [-0.36, 10.33, -7.78, 0.0], \ [ 5.52, -7.78, 28.40, 9.0], \ [ 0.0, 0.0, 9.0, 61.0]]) L = choleski(a) print 'L =\n', L print '\nCheck: L*L_transpose =\n', \ matrixmultiply(L,transpose(L)) raw_input("\nPress return to exit")
def get_evec(p, dim): # extra call to array() to make it contiguous v = array(transpose(p.v)[dim-1]) v.shape = (totdim, 3) return v
def exportPointset(thepointset, infodict, separator=' ', linesep='\n', precision=12, suppress_small=0, varvaldir='col', ext='', append=False): assert varvaldir in ['col', 'row'], \ "invalid variable value write direction" # in order to avoid import cycles, cannot explicitly check that # thepointset is of type Pointset, because Points.py imports this file # (utils.py), so check an attribute instead. try: thepointset.coordnames except AttributeError: raise TypeError, "Must pass Pointset to this function: use arrayToPointset first!" infodict_usedkeys = [] for key, info in infodict.iteritems(): if isinstance(info, str): infodict_usedkeys += [info] elif info == []: infodict[key] = copy.copy(thepointset.coordnames) infodict_usedkeys.extend(thepointset.coordnames) else: infodict_usedkeys += list(info) allnames = copy.copy(thepointset.coordnames) if thepointset._parameterized: allnames.append(thepointset.indepvarname) remlist = remain(infodict_usedkeys, allnames+range(len(allnames))) if remlist != []: print "Coords not found in pointset:", remlist raise ValueError, \ "invalid keys in infodict - some not present in thepointset" assert isinstance(ext, str), "'ext' extension argument must be a string" if ext != '': if ext[0] != '.': ext = '.'+ext if append: assert varvaldir == 'col', ("append mode not supported for row" "format of data ordering") modestr = 'a' else: modestr = 'w' totlen = len(thepointset) if totlen == 0: raise ValueError, ("Pointset is empty") for fname, tup in infodict.iteritems(): try: f = open(fname+ext, modestr) except IOError: print "There was a problem opening file "+fname+ext raise try: if isinstance(tup, str): try: varray = thepointset[tup] except TypeError: raise ValueError, "Invalid specification of coordinates" elif isinstance(tup, int): try: varray = thepointset[:,tup].toarray() except TypeError: raise ValueError, "Invalid specification of coordinates" elif type(tup) in [list, tuple]: if alltrue([type(ti)==str for ti in tup]): thetup=list(tup) if thepointset.indepvarname in tup: tix = thetup.index(thepointset.indepvarname) thetup.remove(thepointset.indepvarname) try: vlist = thepointset[thetup].toarray().tolist() except TypeError: raise ValueError, "Invalid specification of coordinates" if len(thetup)==1: vlist = [vlist] if thepointset.indepvarname in tup: vlist.insert(tix, thepointset.indepvararray.tolist()) varray = array(vlist) elif alltrue([type(ti)==int for ti in tup]): try: varray = thepointset[:,tup].toarray() except TypeError: raise ValueError, "Invalid specification of coordinates" else: raise ValueError, "Invalid specification of coordinates" else: f.close() raise TypeError, \ "infodict values must be singletons or tuples/lists of strings or integers" except IOError: f.close() print "Problem writing to file"+fname+ext raise except KeyError: f.close() raise KeyError, ("Keys in infodict not found in pointset") if varvaldir == 'row': write_array(f, varray, separator, linesep, precision, suppress_small, keep_open=0) else: write_array(f, transpose(varray), separator, linesep, precision, suppress_small, keep_open=0)
def compute(self, trajname, dirn='f'): continue_integ = ODEsystem.prepDirection(self, dirn) if self._dircode == -1: raise NotImplementedError, ( 'Backwards integration is not implemented') # validate spec if there exists a prior trajectory computation if self.defined: self.validateSpec() self.validateICs() self.clearWarnings() self.clearErrors() pnames = sortedDictKeys(self.pars) xnames = self._var_ixmap # ensures correct order # Check i.c.'s are well defined (finite) self.checkInitialConditions() if self._algparams['stiff']: methstr = 'bdf' methcode = 2 else: methstr = 'adams' methcode = 1 if self.haveJacobian(): haveJac = 1 else: haveJac = 0 if isinstance(self._algparams['atol'], list): if len(self._algparams['atol']) != self.dimension: raise ValueError, 'atol list must have same length as phase dimension' else: atol = self._algparams['atol'] self._algparams['atol'] = [ atol for dimix in xrange(self.dimension) ] indepdom0 = self.indepvariable.depdomain.get(0) indepdom1 = self.indepvariable.depdomain.get(1) if continue_integ: if self._tdata[0] != self._solver.t: print "Previous end time is %f" % self._solver.t raise ValueError, \ "Start time not correctly updated for continuing orbit" x0 = self._solver.y indepdom0 = self._solver.t else: x0 = sortedDictValues(self.initialconditions, self.funcspec.vars) if self._solver._integrator is None: # Banded Jacobians not yet supported # # start a new integrator, because method may have been # switched self._solver.set_integrator( 'vode', method=methstr, rtol=self._algparams['rtol'], atol=self._algparams['atol'], nsteps=self._algparams['max_pts'], max_step=self._algparams['max_step'], min_step=self._algparams['min_step'], first_step=self._algparams['init_step'], with_jacobian=haveJac) # speed up repeated access to solver by making a temp name for it solver = self._solver else: # speed up repeated access to solver by making a temp name for it solver = self._solver solver.with_jacobian = haveJac # self.mu = lband # self.ml = uband solver.rtol = self._algparams['rtol'] solver.atol = self._algparams['atol'] solver.method = methcode # self.order = order solver.nsteps = self._algparams['max_pts'] solver.max_step = self._algparams['max_step'] solver.min_step = self._algparams['min_step'] solver.first_step = self._algparams['init_step'] solver.set_initial_value(x0, indepdom0) ## if self._dircode == 1: ## solver.set_initial_value(x0, indepdom0) ## else: ## solver.set_initial_value(x0, indepdom1) # wrap up each dictionary initial value as a singleton list alltData = [indepdom0] allxDataDict = dict(zip(xnames, map(listid, x0))) plist = sortedDictValues(self.pars) extralist = copy(plist) if self.inputs: # inputVarList is a list of Variables inames = sortedDictKeys(self.inputs) listend = self.numpars + len(self.inputs) inputVarList = sortedDictValues(self.inputs) ilist = _pollInputs(inputVarList, alltData[0] + self.globalt0, self.checklevel) else: ilist = [] inames = [] listend = self.numpars inputVarList = [] extralist.extend(ilist) solver.set_f_params(extralist) if haveJac: solver.set_jac_params(extralist) dt = self._algparams['init_step'] strict = self._algparams['strictdt'] # Make t mesh if not all(isfinite(self.indepvariable.depdomain.get())): print "Time domain was: ", self.indepvariable.depdomain.get() raise ValueError, "Ensure time domain is finite" if dt == indepdom1 - indepdom0: # single-step integration required tmesh = [indepdom0, indepdom1] else: notDone = True repeatTol = 10 count = 0 while notDone and count <= repeatTol: try: tmesh = self.indepvariable.depdomain.uniformSample( dt, strict=strict, avoidendpoints=self.checklevel > 2) notDone = False except AssertionError: count += 1 dt = dt / 3.0 if count == repeatTol: raise AssertionError, \ ("supplied time step is too large for selected time" " interval") if len(tmesh) <= 2: # safety net, in case too few points in mesh # too few points unless we can add endpoint if tmesh[-1] != indepdom1: # dt too large for tmesh to have more than one point tmesh.append(indepdom1) if not strict: # get actual time step used # don't use [0] in case avoided end points dt = tmesh[2] - tmesh[1] if self.eventstruct.query(['lowlevel']) != []: raise ValueError, "Only high level events can be passed to VODE" eventslist = self.eventstruct.query( ['highlevel', 'active', 'notvarlinked']) termevents = self.eventstruct.query(['term'], eventslist) # reverse time by reversing mesh doesn't work ## if self._dircode == -1: ## tmesh.reverse() tmesh.pop(0) # get rid of first entry for initial condition # per-iteration storage of variable data (initial values are irrelevant) xDataDict = {} xnames = self.funcspec.vars # storage of all auxiliary variable data allaDataDict = {} anames = self.funcspec.auxvars avals = apply(getattr(self, self.funcspec.auxspec[1]), [indepdom0, x0, extralist]) for aix in range(len(anames)): aname = anames[aix] try: allaDataDict[aname] = [avals[aix]] except IndexError: print "\nVODE generator: There was a problem evaluating " \ + "an auxiliary variable" print "Debug info: avals (length", len(avals), ") was ", avals print "Index out of range was ", aix print self.funcspec.auxspec[1] print hasattr(self, self.funcspec.auxspec[1]) print "Args were:", [indepdom0, x0, extralist] raise # Initialize signs of event detection objects at IC dataDict = copy(self.initialconditions) dataDict.update(dict(zip(anames, avals))) dataDict['t'] = indepdom0 self.setEventICs(self.initialconditions, self.globalt0) if self.inputs: parsinps = copy(self.pars) parsinps.update(dict(zip(inames, ilist))) else: parsinps = self.pars if eventslist != []: evsflagged = self.eventstruct.pollHighLevelEvents( None, dataDict, parsinps, eventslist) if len(evsflagged) > 0: raise RuntimeError, "Some events flagged at initial condition" if continue_integ: # revert to prevprevsign, since prevsign changed after call self.eventstruct.resetHighLevelEvents(indepdom0, eventslist, 'prev') else: self.eventstruct.resetHighLevelEvents(indepdom0, eventslist) #, 'off') self.eventstruct.validateEvents(self.funcspec.vars + \ self.funcspec.auxvars + \ self.funcspec.inputs + \ ['t'], eventslist) # temp storage of first time at which terminal events found # (this is used for keeping the correct end point of new mesh) first_found_t = None # list of precise non-terminal events to be resolved after integration nontermprecevs = [] evnames = [ev[0] for ev in eventslist] lastevtime = {}.fromkeys(evnames, None) # initialize new event info dictionaries Evtimes = {} Evpoints = {} if continue_integ: for evname in evnames: try: # these are in global time, so convert to local time lastevtime[evname] = self.eventstruct.Evtimes[evname][ -1] - self.globalt0 except (IndexError, KeyError): # IndexError: Evtimes[evname] was None # KeyError: Evtimes does not have key evname pass for evname in evnames: Evtimes[evname] = [] Evpoints[evname] = [] # temp storage for repeatedly used object attributes (for lookup efficiency) depdomains = {} for xi in xrange(self.dimension): depdomains[xi] = self.variables[xnames[xi]].depdomain # Main integration loop num_points = 0 breakwhile = False while not breakwhile: try: new_t = tmesh.pop(0) # this destroys tmesh for future use except IndexError: break try: errcode = solver.integrate(new_t) except: print "Error calling right hand side function:" self.showSpec() print "Numerical traceback information (current state, " \ + "parameters, etc.)" print "in generator dictionary 'traceback'" self.traceback = { 'vars': dict(zip(xnames, solver.y)), 'pars': dict(zip(pnames, plist)), 'inputs': dict(zip(inames, ilist)), self.indepvariable.name: new_t } raise for xi in xrange(self.dimension): xDataDict[xnames[xi]] = solver.y[xi] if not self.contains(depdomains[xi], solver.y[xi], self.checklevel): self.warnings.append( (W_TERMSTATEBD, (solver.t, xnames[xi], solver.y[xi], depdomains[xi].get()))) breakwhile = True break # for loop if breakwhile: break avals = apply( getattr(self, self.funcspec.auxspec[1]), [new_t, sortedDictValues(xDataDict), extralist]) # Uncomment the following assertion for debugging # assert all([isfinite(a) for a in avals]), \ # "Some auxiliary variable values not finite" aDataDict = dict(zip(anames, avals)) if eventslist != []: dataDict = copy(xDataDict) dataDict.update(aDataDict) dataDict['t'] = new_t if self.inputs: parsinps = copy(self.pars) parsinps.update( dict( zip( inames, ## extralist[self.numpars:listend]))) _pollInputs(inputVarList, new_t + self.globalt0, self.checklevel)))) else: parsinps = self.pars evsflagged = self.eventstruct.pollHighLevelEvents( None, dataDict, parsinps, eventslist) ## print new_t, evsflagged # evsflagged = [ev for ev in evsflagged if solver.t-indepdom0 > ev[1].eventinterval] termevsflagged = filter(lambda e: e in evsflagged, termevents) nontermevsflagged = filter(lambda e: e not in termevsflagged, evsflagged) # register any non-terminating events in the warnings # list, unless they are 'precise' in which case flag # them to be resolved after integration completes if len(nontermevsflagged) > 0: evnames = [ev[0] for ev in nontermevsflagged] precEvts = self.eventstruct.query(['precise'], nontermevsflagged) prec_evnames = [e[0] for e in precEvts] # first register non-precise events nonprec_evnames = remain(evnames, prec_evnames) # only record events if they have not been previously # flagged within their event interval if nonprec_evnames != []: temp_names = [] for evname in nonprec_evnames: prevevt_time = lastevtime[evname] if prevevt_time is None: ignore_ev = False else: if solver.t - prevevt_time < e[1].eventinterval: ignore_ev = True else: ignore_ev = False if not ignore_ev: temp_names.append(evname) lastevtime[evname] = solver.t self.warnings.append( (W_NONTERMEVENT, (solver.t, temp_names))) for evname in temp_names: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) for e in precEvts: # only record events if they have not been previously # flagged within their event interval prevevt_time = lastevtime[e[0]] if prevevt_time is None: ignore_ev = False else: if solver.t - dt - prevevt_time < e[ 1].eventinterval: ignore_ev = True else: ignore_ev = False if not ignore_ev: nontermprecevs.append((solver.t - dt, solver.t, e)) # be conservative as to where the event is, so # that don't miss any events. lastevtime[e[0]] = solver.t - dt e[1].reset() #e[1].prevsign = None # do_termevs = [] if termevsflagged != []: # only record events if they have not been previously # flagged within their event interval for e in termevsflagged: prevevt_time = lastevtime[e[0]] ## print "Event %s flagged."%e[0] ## print " ... last time was ", prevevt_time ## print " ... event interval = ", e[1].eventinterval ## print " ... t = %f, dt = %f"%(solver.t, dt) if prevevt_time is None: ignore_ev = False else: ## print " ... comparison = %f < %f"%(solver.t-dt-prevevt_time, e[1].eventinterval) if solver.t - dt - prevevt_time < e[ 1].eventinterval: ignore_ev = True ## print "VODE ignore ev" else: ignore_ev = False if not ignore_ev: do_termevs.append(e) if len(do_termevs) > 0: # >= 1 active terminal event flagged at this time point if all([not ev[1].preciseFlag for ev in do_termevs]): # then none of the events specify greater accuracy # register the event in the warnings evnames = [ev[0] for ev in do_termevs] self.warnings.append((W_TERMEVENT, \ (solver.t, evnames))) for evname in evnames: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) breakwhile = True # break while loop after appending t, x else: # find which are the 'precise' events that flagged precEvts = self.eventstruct.query(['precise'], do_termevs) # these events have flagged once so eventdelay has # been used. now switch it off while finding event # precisely (should be redundant after change to # eventinterval and eventdelay parameters) evnames = [ev[0] for ev in precEvts] if first_found_t is None: ## print "first time round at", solver.t numtries = 0 first_found_t = solver.t restore_evts = deepcopy(precEvts) minbisectlimit = min( [ev[1].bisectlimit for ev in precEvts]) for ev in precEvts: ev[1].eventdelay = 0. else: numtries += 1 ## print "time round: ", numtries if numtries > minbisectlimit: self.warnings.append( (W_BISECTLIMIT, (solver.t, evnames))) breakwhile = True # find minimum eventtol in precEvts dt_min = min([e[1].eventtol for e in precEvts]) # get previous time point if len(alltData) >= 1: # take one step back -> told, which will # get dt added back to first new meshpoint # (solver.t is the step *after* the event was # detected) told = solver.t - dt else: raise ValueError("Event %s found too "%evnames[0]+\ "close to local start time: try decreasing " "initial step size (current size is " "%f @ t=%f)"%(dt,solver.t+self.globalt0)) if dt_min >= dt: ## print "Registering event:", dt_min, dt # register the event in the warnings self.warnings.append( (W_TERMEVENT, (solver.t, evnames))) for evname in evnames: Evtimes[evname].append(solver.t) Evpoints[evname].append(solver.y) # Cannot continue -- dt_min no smaller than # previous dt. If this is more than the first time # in this code then have found the event to within # the minimum 'precise' event's eventtol, o/w need # to set eventtol smaller. breakwhile = True # while loop if not breakwhile: dt_new = dt / 5.0 # calc new tmesh trangewidth = 2 * dt #first_found_t - told numpoints = int(math.ceil(trangewidth / dt_new)) # choose slightly smaller dt to fit trange exactly dt = trangewidth / numpoints tmesh = [ told + i * dt for i in xrange(1, numpoints + 1) ] # reset events according to new time mesh, # setting known previous event state to be their # "no event found" state self.eventstruct.resetHighLevelEvents(told, precEvts, state='off') # build new ic with last good values (at t=told) if len(alltData) > 1: new_ic = [allxDataDict[xname][-1] \ for xname in xnames] else: new_ic = x0 # reset integrator solver.set_initial_value(new_ic, told) extralist[self.numpars:listend] = \ [apply(f, [told+self.globalt0, self.checklevel]) \ for f in inputVarList] solver.set_f_params(extralist) # continue integrating over new mesh continue # while alltData.append(solver.t) for xi in xrange(self.dimension): allxDataDict[xnames[xi]].append(solver.y[xi]) for aix in xrange(len(anames)): aname = anames[aix] allaDataDict[aname].append(avals[aix]) num_points += 1 if not breakwhile: try: extralist[self.numpars:listend] = [apply(f, \ [solver.t+self.globalt0, self.checklevel]) \ for f in inputVarList] except ValueError: print 'External input call caused value out of range error:',\ 't = ', solver.t for f in inputVarList: if len(f.warnings): print 'External input variable %s out of range:' % f.name print ' t = ', repr(f.warnings[-1][0]), ', ', \ f.name, ' = ', repr(f.warnings[-1][1]) raise except AssertionError: print 'External input call caused t out of range error: t = ', \ solver.t raise solver.set_f_params(extralist) breakwhile = not solver.successful() # Check that any terminal events found terminated the code correctly if first_found_t is not None: # ... then terminal events were found. Those that were 'precise' had # their 'eventdelay' attribute temporarily set to 0. It now should # be restored. for evname1, ev1 in termevents: # restore_evts are copies of the originally flagged 'precise' # events for evname2, ev2 in restore_evts: if evname2 == evname1: ev1.eventdelay = ev2.eventdelay try: if self.warnings[-1][0] not in [W_TERMEVENT, W_TERMSTATEBD]: raise RuntimeError("Event finding code for terminal event " "failed in Generator " + self.name + \ ": try decreasing eventdelay or " "eventinterval below eventtol") except IndexError: print "Output stats: ", self.outputstats raise RuntimeError("Event finding failed in Generator " + \ self.name + ": try decreasing eventdelay " "or eventinterval below eventtol") # Package up computed trajectory in Variable variables # Add external inputs warnings to self.warnings, if any for f in inputVarList: for winfo in f.warnings: self.warnings.append( (W_NONTERMSTATEBD, (winfo[0], f.name, winfo[1], f.depdomain.get()))) # check for non-unique terminal event termcount = 0 for (w, i) in self.warnings: if w == W_TERMEVENT or w == W_TERMSTATEBD: termcount += 1 if termcount > 1: self.errors.append((E_NONUNIQUETERM, (alltData[-1], i[1]))) # uncomment the following lines for debugging # assert len(alltData) == len(allxDataDict.values()[0]) \ # == len(allaDataDict.values()[0]), "Output data size mismatch" # for val_list in allaDataDict.values(): # assert all([isfinite(x) for x in val_list]) # Create variables (self.variables contains no actual data) # These versions of the variables are only final if no non-terminal # events need to be inserted. variables = copyVarDict(self.variables) for x in xnames: if len(alltData) > 1: variables[x] = Variable(interp1d(alltData, allxDataDict[x]), 't', x, x) else: print "Error in Generator:", self.name print "t = ", alltData print "x = ", allxDataDict raise PyDSTool_ValueError, "Fewer than 2 data points computed" for a in anames: if len(alltData) > 1: variables[a] = Variable(interp1d(alltData, allaDataDict[a]), 't', a, a) else: print "Error in Generator:", self.name print "t = ", alltData print "x = ", allxDataDict raise PyDSTool_ValueError, "Fewer than 2 data points computed" # Resolve non-terminal 'precise' events that were flagged, using the # variables created. Then, add them to a new version of the variables. ntpe_tdict = {} for (et0, et1, e) in nontermprecevs: lost_evt = False search_dt = max((et1 - et0) / 5, e[1].eventinterval) et_precise_list = e[1].searchForEvents(trange=[et0, et1], dt=search_dt, checklevel=self.checklevel, parDict=self.pars, vars=variables, inputs=self.inputs, abseps=self._abseps, eventdelay=False, globalt0=self.globalt0) if et_precise_list == []: lost_evt = True for et_precise in et_precise_list: if et_precise[0] is not None: if et_precise[0] in ntpe_tdict: # add event name at this time (that already exists in the dict) ntpe_tdict[et_precise[0]].append(e[0]) else: # add event name at this time (when time is not already in dict) ntpe_tdict[et_precise[0]] = [e[0]] else: lost_evt = True if lost_evt: raise PyDSTool_ExistError, \ ("Internal error: A non-terminal, 'precise' event '" +e[0]+"' was lost after integration!") # add non-terminal event points to variables if ntpe_tdict != {}: # find indices of times at which event times will be inserted tix = 0 evts = ntpe_tdict.keys() evts.sort() for evix in xrange(len(evts)): evt = evts[evix] evnames = ntpe_tdict[evt] self.warnings.append((W_NONTERMEVENT, (evt, evnames))) xval = [variables[x](evt) for x in xnames] for evname in evnames: Evtimes[evname].append(evt) Evpoints[evname].append(array(xval)) tcond = less_equal(alltData[tix:], evt).tolist() try: tix = tcond.index(0) + tix # lowest index for t > evt do_insert = (alltData[tix - 1] != evt) except ValueError: # evt = last t value so no need to add it do_insert = False if do_insert: alltData.insert(tix, evt) xvaldict = dict(zip(xnames, xval)) for x in xnames: allxDataDict[x].insert(tix, xvaldict[x]) for a in anames: allaDataDict[a].insert(tix, variables[a](evt)) for x in xnames: variables[x] = Variable(interp1d(alltData, allxDataDict[x]), 't', x, x) for a in anames: variables[a] = Variable(interp1d(alltData, allaDataDict[a]), 't', a, a) self.outputstats = { 'last_step': dt, 'num_fcns': num_points, 'num_steps': num_points, 'errorStatus': errcode } if solver.successful(): self.validateSpec() for evname, evtlist in Evtimes.iteritems(): try: self.eventstruct.Evtimes[evname].extend([et+self.globalt0 \ for et in evtlist]) except KeyError: self.eventstruct.Evtimes[evname] = [et+self.globalt0 \ for et in evtlist] # build event pointset information (reset previous trajectory's) self.trajevents = {} for (evname, ev) in eventslist: evpt = Evpoints[evname] if evpt == []: self.trajevents[evname] = None else: evpt = transpose(array(evpt)) self.trajevents[evname] = Pointset({ 'coordnames': xnames, 'indepvarname': 't', 'coordarray': evpt, 'indepvararray': Evtimes[evname], 'indepvartype': Float }) self.defined = True return Trajectory(trajname, variables.values(), self.globalt0, self.checklevel) else: try: self.errors.append( (E_COMPUTFAIL, (solver.t, self._errorcodes[errcode]))) except TypeError: # e.g. when errcode has been used to return info list print "Error information: ", errcode self.errors.append( (E_COMPUTFAIL, (solver.t, self._errorcodes[0]))) self.defined = False