def createSimilarAR(data): """ creates an AR-process that is similar to a given data set. data must be given in n x d-format """ # step 1: get "average" fit matrix l_A = [] for rep in arange(100): idx = randint(0,data.shape[0]-1,data.shape[0]-1) idat = data[idx,:] odat = data[idx+1,:] l_A.append(lstsq(idat,odat)[0]) sysmat = meanMat(l_A).T # idea: get "dynamic noise" from input data as difference of # expected vs. predicted data: # eta_i = (sysmat*(data[:,i-1]).T - data[:,i]) # however, in order to destroy any possible correlations in the # input noise (they would also occur in the output), the # noise per section has to be permuted. prediction = dot(sysmat,data[:-1,:].T) dynNoise = data[1:,:].T - prediction res = [zeros((dynNoise.shape[0],1)), ] for nidx in permutation(dynNoise.shape[1]): res.append( dot(sysmat,res[-1]) + dynNoise[:,nidx][:,newaxis] ) return hstack(res).T
def createSimilarAR(data): """ creates an AR-process that is similar to a given data set. data must be given in n x d-format """ # step 1: get "average" fit matrix l_A = [] for rep in arange(100): idx = randint(0, data.shape[0] - 1, data.shape[0] - 1) idat = data[idx, :] odat = data[idx + 1, :] l_A.append(lstsq(idat, odat)[0]) sysmat = meanMat(l_A).T # idea: get "dynamic noise" from input data as difference of # expected vs. predicted data: # eta_i = (sysmat*(data[:,i-1]).T - data[:,i]) # however, in order to destroy any possible correlations in the # input noise (they would also occur in the output), the # noise per section has to be permuted. prediction = dot(sysmat, data[:-1, :].T) dynNoise = data[1:, :].T - prediction res = [ zeros((dynNoise.shape[0], 1)), ] for nidx in permutation(dynNoise.shape[1]): res.append(dot(sysmat, res[-1]) + dynNoise[:, nidx][:, newaxis]) return hstack(res).T
def affineTransform(image, x1, y1, x2, y2, x3, y3, M, N): # Construct the matrix M mat_M = array([[x1, y1, 1, 0, 0, 0], \ [0, 0, 0, x1, y1, 1], \ [x2, y2, 1, 0, 0, 0], \ [0, 0, 0, x2, y2, 1], \ [x3, y3, 1, 0, 0, 0], \ [0, 0, 0, x3, y3, 1]]) # Construct vector q q = array([[0], [0], [M], [0], [M], [N]]) p = lstsq(mat_M, q) a, b, c, d, e, f = p[0][0][0], \ p[0][1][0], \ p[0][2][0], \ p[0][3][0], \ p[0][4][0], \ p[0][5][0] # A is the resulting matrix that describes the transformation A = array([[a, b, c], \ [d, e, f], \ [0, 0, 1]]) # Create the new image b = array([zeros(N, float)] * M) for i in range(0, M): for j in range(0, N): old_coor = dot(inv(A),([[i],[j],[1]])) b[i][j] = pV(image, old_coor[0][0], old_coor[1][0], 'linear') return b
def approximate(x,y): """ Linear approximation of y=f(x) using least square estimator. In: x : ndarray y : ndarray Out: a, b : float, as in a*x+b=y """ assert pl.shape(x) == pl.shape(y) A = pl.vstack([x, pl.ones(len(x))]).T a, b = pl.lstsq(A, y)[0] return a, b
def get_features(self,positions): wMin = 5 wMax = 18 track_length = pl.shape(positions)[0] steps = self._get_steps(positions,track_length) angles = self._get_angles(steps,track_length) feats = pl.zeros([track_length,self.many_features]) manyTimes = pl.zeros(track_length) msd = self._get_msd(positions,track_length) # following code is to _get diffusion coefficient xi = pl.arange(4) A = pl.array([xi, pl.ones(4)]).T diff_coeff = pl.lstsq(A,msd[:4])[0][0] for i in range(track_length-wMax+1): for j in range(wMin,wMax+1): feats[i:i+j,0] += self._get_straight(angles[i:i+j-2],j-1) feats[i:i+j,1] += self._get_bend(angles[i:i+j-2],j-1) feats[i:i+j,2] += self._get_eff(positions[i:i+j,:],steps[i:i+j-1,:],j-1) gyrationTensor = self._get_gyration_tensor(positions[i:i+j,:]) [eig_vals, eig_vecs] = pl.eig(gyrationTensor) eig_vals = pl.array([eig_vals[0],eig_vals[1]]) feats[i:i+j,3] += self._get_asymm(eig_vals[0],eig_vals[1]) dom_index = pl.argmax(eig_vals) dom_vec = eig_vecs[:,dom_index] pos_proj = self._get_projection(positions[i:i+j,:],dom_vec,j-1) proj_mean = pl.mean(pos_proj) feats[i:i+j,4] += self._get_skew(pos_proj,proj_mean,j-1) feats[i:i+j,5] += self._get_kurt(pos_proj,proj_mean,j-1) feats[i:i+j,6] += self._get_disp(positions[i:i+j,:]) feats[i:i+j,7] += self._get_conf(positions[i:i+j,:],j-1,diff_coeff) manyTimes[i:i+j] += 1 for i in range(self.many_features): feats[:,i] /= manyTimes return feats
def test_sincos(): """ Simple test/demo of Phaser, recovering a sine and cosine Demo courtesy of Jimmy Sastra, U. Penn 2011 """ from numpy import sin, cos, pi, array, linspace, cumsum, asarray, dot, ones from pylab import plot, legend, axis, show, randint, randn, std, lstsq # create separate trials and store times and data dats = [] t0 = [] period = 55 # i phaseNoise = 0.05 / sqrt(period) snr = 20 N = 10 print N, "trials with:" print "\tperiod %.2g" % period, "(samples)\n\tSNR %.2g" % snr, "\n\tphase noise %.2g" % phaseNoise, "(radian/cycle)" print "\tlength = [", for li in xrange(N): l = randint(400, 2000) # length of trial dt = pi * 2.0 / period + randn( l) * phaseNoise # create noisy time steps t = cumsum(dt) + rand() * 2 * pi # starting phase is random raw = asarray([sin(t), cos(t)]) # signal raw = raw + randn(*raw.shape) / snr # SNR=20 noise t0.append(t) dats.append(raw - nanmean(raw, axis=1)[:, newaxis]) print l, print "]" phr = Phaser(dats, psecfunc=lambda x: dot([1, -1], x)) phi = [phr.phaserEval(d) for d in dats] # extract phaseNoise reg = array([linspace(0, 1, t0[0].size), ones(t0[0].size)]).T tt = dot(reg, lstsq(reg, t0[0])[0]) plot(((tt - pi / 4) % (2 * pi)) / pi - 1, dats[0].T, '.') plot((phi[0].T % (2 * pi)) / pi - 1, dats[0].T, 'x') #plot data versus phase legend(['sin(t)', 'cos(t)', 'sin(phi)', 'cos(phi)']) axis([-1, 1, -1.2, 1.2]) show()
def CalculateRates(self, times, levels): N = len(levels) t_mat = pylab.matrix(times).T # normalize the cell_count data by its minimum count_matrix = pylab.matrix(levels).T norm_counts = count_matrix - min(levels) c_mat = pylab.matrix(norm_counts) if c_mat[-1, 0] == 0: ge_zero = c_mat[pylab.find(c_mat > 0)] if ge_zero.any(): c_mat[-1, 0] = min(ge_zero) for i in pylab.arange(N - 1, 0, -1): if c_mat[i - 1, 0] <= 0: c_mat[i - 1, 0] = c_mat[i, 0] c_mat = pylab.log(c_mat) res_mat = pylab.zeros( (N, 5)) # columns are: slope, offset, error, avg_value, max_value for i in xrange(N - self.window_size): i_range = range(i, i + self.window_size) x = pylab.hstack( [t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = c_mat[i_range, 0] # Measurements in window must all be above the min. if min(pylab.exp(y)) < self.minimum_level: continue (a, residues) = pylab.lstsq(x, y)[0:2] res_mat[i, 0] = a[0] res_mat[i, 1] = a[1] res_mat[i, 2] = residues res_mat[i, 3] = pylab.mean(count_matrix[i_range, 0]) res_mat[i, 4] = max(pylab.exp(y)) return res_mat
def test_sincos(): """ Simple test/demo of Phaser, recovering a sine and cosine Demo courtesy of Jimmy Sastra, U. Penn 2011 """ from numpy import sin,cos,pi,array,linspace,cumsum,asarray,dot,ones from pylab import plot, legend, axis, show, randint, randn, std,lstsq # create separate trials and store times and data dats=[] t0 = [] period = 55 # i phaseNoise = 0.5/sqrt(period) snr = 20 N = 10 print N,"trials with:" print "\tperiod %.2g"%period,"(samples)\n\tSNR %.2g"%snr,"\n\tphase noise %.2g"%phaseNoise,"(radian/cycle)" print "\tlength = [", for li in xrange(N): l = randint(400,2000) # length of trial dt = pi*2.0/period + randn(l)*0.07 # create noisy time steps t = cumsum(dt)+randn()*2*pi # starting phase is random raw = asarray([sin(t),cos(t)]) # signal raw = raw + randn(*raw.shape)/snr # SNR=20 noise t0.append(t) dats.append( raw - raw.nanmean(axis=1)[:,newaxis] ) print l, print "]" phr = Phaser( dats, psecfunc = lambda x : dot([1,-1],x)) phi = [ phr.phaserEval( d ) for d in dats ] # extract phaseNoise reg = array([linspace(0,1,t0[0].size),ones(t0[0].size)]).T tt = dot( reg, lstsq(reg,t0[0])[0] ) plot(((tt-pi/4) % (2*pi))/pi-1, dats[0].T,'.') plot( (phi[0].T % (2*pi))/pi-1, dats[0].T,'x')#plot data versus phase legend(['sin(t)','cos(t)','sin(phi)','cos(phi)']) axis([-1,1,-1.2,1.2]) show()
def CalculateRates(self, times, levels): N = len(levels) t_mat = pylab.matrix(times).T # normalize the cell_count data by its minimum count_matrix = pylab.matrix(levels).T norm_counts = count_matrix - min(levels) c_mat = pylab.matrix(norm_counts) if c_mat[-1, 0] == 0: ge_zero = c_mat[pylab.find(c_mat > 0)] if ge_zero.any(): c_mat[-1, 0] = min(ge_zero) for i in pylab.arange(N-1, 0, -1): if c_mat[i-1, 0] <= 0: c_mat[i-1, 0] = c_mat[i, 0] c_mat = pylab.log(c_mat) res_mat = pylab.zeros((N, 5)) # columns are: slope, offset, error, avg_value, max_value for i in xrange(N-self.window_size): i_range = range(i, i+self.window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = c_mat[i_range, 0] # Measurements in window must all be above the min. if min(pylab.exp(y)) < self.minimum_level: continue (a, residues) = pylab.lstsq(x, y)[0:2] res_mat[i, 0] = a[0] res_mat[i, 1] = a[1] res_mat[i, 2] = residues res_mat[i, 3] = pylab.mean(count_matrix[i_range,0]) res_mat[i, 4] = max(pylab.exp(y)) return res_mat
def fitData_2s(idat, odat, nps, nidx=None, nrep=500, rcond=1e-3): """ fits the data using two strides ahead - TODO: rewrite this docstring! performs a bootstrapped fit of the data, i.e. gives a matrix X that minimizes || odat.T - X * idat.T ||. This matrix X is the least squares estimate that maps the column vector representing stride k to the column vector of k+1, at a / the given Poincare section(s). idat, odat: input-data in 1d-format (see e.g. twoD_oneD for description) each row in idat and odat must represent a stride (idat) and the subsequent stride (odat), i.e. if you want to use all strides, odat = idat[1:,:] However, odat must not be shorter (fewer rows) than idat. nps: numbers of samples per stride nidx: number of strides that should be used for a fit in each bootstrap iteration. If omitted, idat.shape[0]*2/3 is used nrep: how many bootstrap iterations should be performed sections: list of integers that indicate which sections should be used as intermediate mappings. If only a single section is given, a "stride-map" is computed rcond: the numerical accuracy which is used for the fit (this value is passed through to lstsq). Too high values will cause loss of detection of lower eigenvalues, too low values will corrupt the return maps. returns a triple: (1) a list of a list of matrices. Each list containts the matrices that map from the first given section to the next. *NOTE* if len(sections) is 1, then this is left empty (equivalen matrices are then in (2)) (2) a list of matrices, which represent the "full stride" fits for the given set of indices. (3) a list of a list of indices. Each list lists the indices (rows of idat) that were used for the regression. """ if nidx is None: nidx = int(2. / 3. * idat.shape[0]) #if any(diff(sections)) < 0: # raise ValueError, 'sections must be given in increasing order!' # 1st: create bootstrap indices indices = [randint(1, idat.shape[0], nidx) for x in range(nrep)] #[(idat.shape[0]*rand(nidx-1)+1).astype(int) for x in range(nrep)] # 2nd: create section fits (if sections) # part A: do fits from one section to the next (within same stride) # sectMaps = [ # [ # lstsq( idat[idcs,sections[sct]::nps], # idat[idcs,sections[sct+1]::nps], rcond=rcond)[0].T # for idcs in indices ] # for sct in range(len(sections)-1) ] # part B: do fits from last section to first section of next stride #if len(sections) > 1: # sectMaps.append( [ # lstsq( idat[idcs,sections[-1]::nps], # odat[idcs,sections[0]::nps], rcond=rcond)[0].T # for idcs in indices ] ) # 3rd: create stride fits strideMaps = [ lstsq(hstack([idat[idcs - 1, 0::nps], idat[idcs, 0::nps]]), odat[idcs, 0::nps], rcond=rcond)[0].T for idcs in indices ] return strideMaps, indices
def qufromgain(caltable, badspw=[], paoffset=0.0): mytb = taskinit.tbtool() myme = taskinit.metool() mytb.open(caltable + '/ANTENNA') pos = mytb.getcol('POSITION') meanpos = pl.mean(pos, 1) frame = mytb.getcolkeyword('POSITION', 'MEASINFO')['Ref'] units = mytb.getcolkeyword('POSITION', 'QuantumUnits') mpos = myme.position(frame, str(meanpos[0]) + units[0], str(meanpos[1]) + units[1], str(meanpos[2]) + units[2]) myme.doframe(mpos) # _geodetic_ latitude latr = myme.measure(mpos, 'WGS84')['m1']['value'] print 'Latitude = ', latr * 180 / pi mytb.open(caltable + '/FIELD') nfld = mytb.nrows() dirs = mytb.getcol('DELAY_DIR')[:, 0, :] mytb.close() print 'Found as many as ' + str(nfld) + ' fields.' mytb.open(caltable + '/SPECTRAL_WINDOW') nspw = mytb.nrows() bandnames = [x.split('#')[0].split('_')[-1] for x in mytb.getcol('NAME')] mytb.close() print 'Found as many as ' + str(nspw) + ' spws.' R = pl.zeros((nspw, nfld)) Q = pl.zeros((nspw, nfld)) U = pl.zeros((nspw, nfld)) mask = pl.ones((nspw, nfld), dtype=bool) if (len(badspw) > 0): mask[badspw, :] = False QU = {} mytb.open(caltable) for ifld in range(nfld): for ispw in range(nspw): st = mytb.query('FIELD_ID==' + str(ifld) + ' && SPECTRAL_WINDOW_ID==' + str(ispw)) nrows = st.nrows() if nrows > 0: rah = dirs[0, ifld] * 12.0 / pi decr = dirs[1, ifld] times = st.getcol('TIME') gains = st.getcol('CPARAM') ants = st.getcol('ANTENNA1') nants = ants.max() + 1 # times time0 = 86400.0 * floor(times[0] / 86400.0) rtimes = times - time0 # amplitude ratio amps = pl.absolute(gains) amps[amps == 0.0] = 1.0 ratio = amps[0, 0, :] / amps[1, 0, :] ratio.resize(nrows / nants, nants) # parang parang = pl.zeros(len(times)) for itim in range(len(times)): tm = myme.epoch('UTC', str(times[itim]) + 's') last = myme.measure(tm, 'LAST')['m0']['value'] last -= floor(last) # days last *= 24.0 # hours ha = last - rah # hours har = ha * 2.0 * pi / 24.0 parang[itim] = atan2((cos(latr) * sin(har)), (sin(latr) * cos(decr) - cos(latr) * sin(decr) * cos(har))) parang.resize(nrows / nants, nants) parang += bandpa(bandnames[ispw]) # feed pos ang offset parang += (paoffset * pi / 180.) # manual feed pa offset parangd = parang * (180.0 / pi) A = pl.ones((nrows / nants, 3)) A[:, 1] = pl.cos(2 * parang[:, 0]) A[:, 2] = pl.sin(2 * parang[:, 0]) fit = pl.lstsq(A, pl.square(ratio)) ants0 = range(nants) rsum = pl.sum(ratio[:, ants0], 1) rsum /= len(ants0) fit = pl.lstsq(A, pl.square(rsum)) R[ispw, ifld] = fit[0][0] Q[ispw, ifld] = fit[0][1] / R[ispw, ifld] / 2.0 U[ispw, ifld] = fit[0][2] / R[ispw, ifld] / 2.0 P = sqrt(Q[ispw, ifld]**2 + U[ispw, ifld]**2) X = 0.5 * atan2(U[ispw, ifld], Q[ispw, ifld]) * 180 / pi print 'Fld=', ifld, 'Spw=', ispw, '(B=' + str( bandnames[ispw]) + ', PA offset=' + str( bandpa(bandnames[ispw]) * 180. / pi) + 'deg)', 'Gx/Gy=', R[ispw, ifld], 'Q=', Q[ ispw, ifld], 'U=', U[ispw, ifld], 'P=', P, 'X=', X else: mask[ispw, ifld] = False st.close() if sum(mask[:, ifld]) > 0: print 'For field id = ', ifld, ' there are ', sum( mask[:, ifld]), 'good spws.' Qm = pl.mean(Q[mask[:, ifld], ifld]) Um = pl.mean(U[mask[:, ifld], ifld]) QU[ifld] = (Qm, Um) Qe = pl.std(Q[mask[:, ifld], ifld]) Ue = pl.std(U[mask[:, ifld], ifld]) Pm = sqrt(Qm**2 + Um**2) Xm = 0.5 * atan2(Um, Qm) * 180 / pi print 'Spw mean: Fld=', ifld, 'Q=', Qm, 'U=', Um, '(rms=', Qe, Ue, ')', 'P=', Pm, 'X=', Xm mytb.close() return QU
def fitData(idat, odat, nps=1, nidx = None, nrep = 500, sections = [0,], rcond = 1e-6): """ performs a bootstrapped fit of the data, i.e. gives a matrix X that minimizes || odat.T - X * idat.T ||. This matrix X is the least squares estimate that maps the column vector representing stride k to the column vector of k+1, at a / the given Poincare section(s). idat, odat: input-data in 1d-format (see e.g. twoD_oneD for description) each row in idat and odat must represent a stride (idat) and the subsequent stride (odat), i.e. if you want to use all strides, odat = idat[1:,:] However, odat must not be shorter (fewer rows) than idat. nps: numbers of samples per stride nidx: number of strides that should be used for a fit in each bootstrap iteration. If omitted, idat.shape[0] is used nrep: how many bootstrap iterations should be performed sections: list of integers that indicate which sections should be used as intermediate mappings. If only a single section is given, a "stride-map" is computed rcond: the numerical accuracy which is used for the fit (this value is passed through to lstsq). Too high values will cause loss of detection of lower eigenvalues, too low values will corrupt the return maps. returns a triple: (1) a list of a list of matrices. Each list containts the matrices that map from the first given section to the next. *NOTE* if len(sections) is 1, then this is left empty (equivalen matrices are then in (2)) (2) a list of matrices, which represent the "full stride" fits for the given set of indices. (3) a list of a list of indices. Each list lists the indices (rows of idat) that were used for the regression. """ if nidx is None: nidx = idat.shape[0] if any(diff(sections)) < 0: raise ValueError, 'sections must be given in increasing order!' # 1st: create bootstrap indices indices = [(idat.shape[0]*rand(nidx)).astype(int) for x in range(nrep)] # 2nd: create section fits (if sections) # part A: do fits from one section to the next (within same stride) sectMaps = [ [ lstsq( idat[idcs,sections[sct]::nps], idat[idcs,sections[sct+1]::nps], rcond=rcond)[0].T for idcs in indices ] for sct in range(len(sections)-1) ] # part B: do fits from last section to first section of next stride if len(sections) > 1: sectMaps.append( [ lstsq( idat[idcs,sections[-1]::nps], odat[idcs,sections[0]::nps], rcond=rcond)[0].T for idcs in indices ] ) # 3rd: create stride fits strideMaps = [ lstsq(idat[idcs,sections[0]::nps], odat[idcs,sections[0]::nps], rcond=rcond)[0].T for idcs in indices ] return sectMaps, strideMaps, indices
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True, lstSqSolve=True): """ original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m % psa.m - Simple code for 2-norm pseudospectra of given matrix A. % Typically about N/4 times faster than the obvious SVD method. % Comes with no guarantees! - L. N. Trefethen, March 1999. parameter: A: the matrix to analyze npts: number of points at the grid s: axis limits (-s ... +s) gridPointSelect: ??? verbose: prints progress messages lstSqSolve: if true, use least squares in algorithm where solve could be used (probably) instead. (replacement for ldivide in MatLab) """ from scipy.linalg import schur, triu from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace, arange, isreal, inf, ones, lstsq, solve, sqrt, randn, eig, all) ldiv = lambda M1, M2: lstsq(M1, M2)[ 0] if lstSqSolve else lambda M1, M2: solve(M1, M2) def planerot(x): ''' return (G,y) with a matrix G such that y = G*x with y[1] = 0 ''' G = zeros((2, 2)) xn = x / norm(x) G[0, 0] = xn[0] G[1, 0] = -xn[1] G[0, 1] = xn[1] G[1, 1] = xn[0] return G, dot(G, x) xmin = -s xmax = s ymin = -s ymax = s x = linspace(xmin, xmax, npts, endpoint=False) y = linspace(ymin, ymax, npts, endpoint=False) xx, yy = meshgrid(x, y) zz = xx + 1j * yy #% Compute Schur form and plot eigenvalues: T, Z = schur(A, output='complex') T = triu(T) eigA = diag(T) # Reorder Schur decomposition and compress to interesting subspace: select = find(eigA.real > -250) # % <- ALTER SUBSPACE SELECTION n = len(select) for i in arange(n): for k in arange(select[i] - 1, i, -1): #:-1:i G = planerot([T[k, k + 1], T[k, k] - T[k + 1, k + 1]])[0].T[::-1, ::-1] J = slice(k, k + 2) T[:, J] = dot(T[:, J], G) T[J, :] = dot(G.T, T[J, :]) T = triu(T[:n, :n]) I = eye(n) # Compute resolvent norms by inverse Lanczos iteration and plot contours: sigmin = inf * ones((len(y), len(x))) #A = eye(5) niter = 0 for i in arange(len(y)): # 1:length(y) if all(isreal(A)) and (ymax == -ymin) and (i > len(y) / 2): sigmin[i, :] = sigmin[len(y) - i, :] else: for jj in arange(len(x)): z = zz[i, jj] T1 = z * I - T T2 = T1.conj().T if z.real < gridPointSelect: # <- ALTER GRID POINT SELECTION sigold = 0 qold = zeros((n, 1)) beta = 0 H = zeros((100, 100)) q = randn(n, 1) + 1j * randn(n, 1) while norm(q) < 1e-8: q = randn(n, 1) + 1j * randn(n, 1) q = q / norm(q) for k in arange(99): v = ldiv(T1, (ldiv(T2, q))) - dot(beta, qold) #stop alpha = dot(q.conj().T, v).real v = v - alpha * q beta = norm(v) qold = q q = v / beta H[k + 1, k] = beta H[k, k + 1] = beta H[k, k] = alpha if (alpha > 1e100): sig = alpha else: sig = max(abs(eig(H[:k + 1, :k + 1])[0])) if (abs(sigold / sig - 1) < .001) or (sig < 3 and k > 2): break sigold = sig niter += 1 #print 'niter = ', niter #%text(x(jj),y(i),num2str(k)) % <- SHOW ITERATION COUNTS sigmin[i, jj] = 1. / sqrt(sig) #end # end if verbose: print 'finished line ', str(i), ' out of ', str(len(y)) return x, y, sigmin
def qufromgain(caltable,badspw=[],paoffset=0.0): mytb=taskinit.tbtool() myme=taskinit.metool() mytb.open(caltable+'/ANTENNA') pos=mytb.getcol('POSITION') meanpos=pl.mean(pos,1) frame=mytb.getcolkeyword('POSITION','MEASINFO')['Ref'] units=mytb.getcolkeyword('POSITION','QuantumUnits') mpos=myme.position(frame, str(meanpos[0])+units[0], str(meanpos[1])+units[1], str(meanpos[2])+units[2]) myme.doframe(mpos) # _geodetic_ latitude latr=myme.measure(mpos,'WGS84')['m1']['value'] print 'Latitude = ',latr*180/pi mytb.open(caltable+'/FIELD') nfld=mytb.nrows() dirs=mytb.getcol('DELAY_DIR')[:,0,:] mytb.close() print 'Found as many as '+str(nfld)+' fields.' mytb.open(caltable+'/SPECTRAL_WINDOW') nspw=mytb.nrows() bandnames=[x.split('#')[0].split('_')[-1] for x in mytb.getcol('NAME')] mytb.close() print 'Found as many as '+str(nspw)+' spws.' R=pl.zeros((nspw,nfld)) Q=pl.zeros((nspw,nfld)) U=pl.zeros((nspw,nfld)) mask=pl.ones((nspw,nfld),dtype=bool) if (len(badspw)>0): mask[badspw,:]=False QU={} mytb.open(caltable) for ifld in range(nfld): for ispw in range(nspw): st=mytb.query('FIELD_ID=='+str(ifld)+' && SPECTRAL_WINDOW_ID=='+str(ispw)) nrows=st.nrows() if nrows > 0: rah=dirs[0,ifld]*12.0/pi decr=dirs[1,ifld] times=st.getcol('TIME') gains=st.getcol('CPARAM') ants=st.getcol('ANTENNA1') nants=ants.max()+1 # times time0=86400.0*floor(times[0]/86400.0) rtimes=times-time0 # amplitude ratio amps=pl.absolute(gains) amps[amps==0.0]=1.0 ratio=amps[0,0,:]/amps[1,0,:] ratio.resize(nrows/nants,nants) # parang parang=pl.zeros(len(times)) for itim in range(len(times)): tm=myme.epoch('UTC',str(times[itim])+'s') last=myme.measure(tm,'LAST')['m0']['value'] last-=floor(last) # days last*=24.0 # hours ha=last-rah # hours har=ha*2.0*pi/24.0 parang[itim]=atan2( (cos(latr)*sin(har)), (sin(latr)*cos(decr)-cos(latr)*sin(decr)*cos(har)) ) parang.resize(nrows/nants,nants) parang+=bandpa(bandnames[ispw]) # feed pos ang offset parang+=(paoffset*pi/180.) # manual feed pa offset parangd=parang*(180.0/pi) A=pl.ones((nrows/nants,3)) A[:,1]=pl.cos(2*parang[:,0]) A[:,2]=pl.sin(2*parang[:,0]) fit=pl.lstsq(A,pl.square(ratio)) ants0=range(nants) rsum=pl.sum(ratio[:,ants0],1) rsum/=len(ants0) fit=pl.lstsq(A,pl.square(rsum)) R[ispw,ifld]=fit[0][0] Q[ispw,ifld]=fit[0][1]/R[ispw,ifld]/2.0 U[ispw,ifld]=fit[0][2]/R[ispw,ifld]/2.0 P=sqrt(Q[ispw,ifld]**2+U[ispw,ifld]**2) X=0.5*atan2(U[ispw,ifld],Q[ispw,ifld])*180/pi print 'Fld=',ifld,'Spw=',ispw,'(B='+str(bandnames[ispw])+', PA offset='+str(bandpa(bandnames[ispw])*180./pi)+'deg)','Gx/Gy=',R[ispw,ifld],'Q=',Q[ispw,ifld],'U=',U[ispw,ifld],'P=',P,'X=',X else: mask[ispw,ifld]=False st.close() if sum(mask[:,ifld])>0: print 'For field id = ',ifld,' there are ',sum(mask[:,ifld]),'good spws.' Qm=pl.mean(Q[mask[:,ifld],ifld]) Um=pl.mean(U[mask[:,ifld],ifld]) QU[ifld]=(Qm,Um) Qe=pl.std(Q[mask[:,ifld],ifld]) Ue=pl.std(U[mask[:,ifld],ifld]) Pm=sqrt(Qm**2+Um**2) Xm=0.5*atan2(Um,Qm)*180/pi print 'Spw mean: Fld=', ifld,'Q=',Qm,'U=',Um,'(rms=',Qe,Ue,')','P=',Pm,'X=',Xm mytb.close() return QU
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True, lstSqSolve=True): """ original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m % psa.m - Simple code for 2-norm pseudospectra of given matrix A. % Typically about N/4 times faster than the obvious SVD method. % Comes with no guarantees! - L. N. Trefethen, March 1999. parameter: A: the matrix to analyze npts: number of points at the grid s: axis limits (-s ... +s) gridPointSelect: ??? verbose: prints progress messages lstSqSolve: if true, use least squares in algorithm where solve could be used (probably) instead. (replacement for ldivide in MatLab) """ from scipy.linalg import schur, triu from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace, arange, isreal, inf, ones, lstsq, solve, sqrt, randn, eig, all) ldiv = lambda M1,M2 :lstsq(M1,M2)[0] if lstSqSolve else lambda M1,M2: solve(M1,M2) def planerot(x): ''' return (G,y) with a matrix G such that y = G*x with y[1] = 0 ''' G = zeros((2,2)) xn = x / norm(x) G[0,0] = xn[0] G[1,0] = -xn[1] G[0,1] = xn[1] G[1,1] = xn[0] return G, dot(G,x) xmin = -s xmax = s ymin = -s ymax = s; x = linspace(xmin,xmax,npts,endpoint=False) y = linspace(ymin,ymax,npts,endpoint=False) xx,yy = meshgrid(x,y) zz = xx + 1j*yy #% Compute Schur form and plot eigenvalues: T,Z = schur(A,output='complex'); T = triu(T) eigA = diag(T) # Reorder Schur decomposition and compress to interesting subspace: select = find( eigA.real > -250) # % <- ALTER SUBSPACE SELECTION n = len(select) for i in arange(n): for k in arange(select[i]-1,i,-1): #:-1:i G = planerot([T[k,k+1],T[k,k]-T[k+1,k+1]] )[0].T[::-1,::-1] J = slice(k,k+2) T[:,J] = dot(T[:,J],G) T[J,:] = dot(G.T,T[J,:]) T = triu(T[:n,:n]) I = eye(n); # Compute resolvent norms by inverse Lanczos iteration and plot contours: sigmin = inf*ones((len(y),len(x))); #A = eye(5) niter = 0 for i in arange(len(y)): # 1:length(y) if all(isreal(A)) and (ymax == -ymin) and (i > len(y)/2): sigmin[i,:] = sigmin[len(y) - i,:] else: for jj in arange(len(x)): z = zz[i,jj] T1 = z * I - T T2 = T1.conj().T if z.real < gridPointSelect: # <- ALTER GRID POINT SELECTION sigold = 0 qold = zeros((n,1)) beta = 0 H = zeros((100,100)) q = randn(n,1) + 1j*randn(n,1) while norm(q) < 1e-8: q = randn(n,1) + 1j*randn(n,1) q = q/norm(q) for k in arange(99): v = ldiv(T1,(ldiv(T2,q))) - dot(beta,qold) #stop alpha = dot(q.conj().T, v).real v = v - alpha*q beta = norm(v) qold = q q = v/beta H[k+1,k] = beta H[k,k+1] = beta H[k,k] = alpha if (alpha > 1e100): sig = alpha else: sig = max(abs(eig(H[:k+1,:k+1])[0])) if (abs(sigold/sig-1) < .001) or (sig < 3 and k > 2): break sigold = sig niter += 1 #print 'niter = ', niter #%text(x(jj),y(i),num2str(k)) % <- SHOW ITERATION COUNTS sigmin[i,jj] = 1./sqrt(sig); #end # end if verbose: print 'finished line ', str(i), ' out of ', str(len(y)) return x,y,sigmin
def FitGrowth(time, cell_count, window_size, start_threshold=0.01, plot_figure=False): """Compute growth rate. Args: time: list of data point time measurements (whatever time units you like). cell_count: list of cell counts at each time point. window_size: the size of the time window (same time units as above). start_threshold: minimum cell count to consider. plot_figure: whether or not to plot. Returns: growth rate in 1/(time unit) where "time unit" is the unit used above. """ def get_frame_range(times, mid_frame, window_size): T = times[mid_frame] i_range = [] for i in range(1, len(times)): if (times[i-1] > T - window_size/2.0 and times[i] < T + window_size/2.0): i_range.append(i) if (len(i_range) < 2): # there are not enough frames to get a good estimation raise ValueError() return i_range N = len(cell_count) #if (N < window_size): # raise Exception("The measurement time-series is too short (smaller than the windows-size)") t_mat = pylab.matrix(time).T # normalize the cell_count data by its minimum count_matrix = pylab.matrix(cell_count).T norm_counts = count_matrix - min(cell_count) c_mat = pylab.matrix(norm_counts) if c_mat[-1, 0] == 0: c_mat[-1, 0] = min(c_mat[pylab.find(c_mat > 0)]) for i in pylab.arange(N-1, 0, -1): if c_mat[i-1, 0] <= 0: c_mat[i-1, 0] = c_mat[i, 0] c_mat = pylab.log(c_mat) res_mat = pylab.zeros((N, 4)) # columns are: slope, offset, error, avg_value for i in range(N): try: # calculate the indices covered by the window i_range = get_frame_range(time, i, window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = c_mat[i_range, 0] if min(pylab.exp(y)) < start_threshold: # the measurements are still too low to use (because of noise) raise ValueError() (a, residues) = pylab.lstsq(x, y)[0:2] res_mat[i, 0] = a[0] res_mat[i, 1] = a[1] res_mat[i, 2] = residues res_mat[i, 3] = pylab.mean(count_matrix[i_range,0]) except ValueError: pass max_i = res_mat[:,0].argmax() abs_res_mat = pylab.array(res_mat) abs_res_mat[:,0] = pylab.absolute(res_mat[:,0]) order = abs_res_mat[:,0].argsort(axis=0) stationary_indices = pylab.array(filter(lambda x: x >= max_i, order)) stationary_level = res_mat[stationary_indices[0], 3] if plot_figure: pylab.hold(True) pylab.plot(time, norm_counts) pylab.plot(time, res_mat[:,0]) pylab.plot([0, time.max()], [start_threshold, start_threshold], 'r--') i_range = get_frame_range(time, max_i, window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = x * pylab.matrix(res_mat[max_i, 0:2]).T pylab.plot(x[:,0], pylab.exp(y), 'k:', linewidth=4) pylab.plot([0, max(time)], [stationary_level, stationary_level]) pylab.yscale('log') pylab.legend(['OD', 'growth rate', 'threshold', 'fit', 'stationary']) return res_mat[max_i, 0], stationary_level
def FitGrowth(time, cell_count, window_size, start_threshold=0.01, plot_figure=False): """Compute growth rate. Args: time: list of data point time measurements (whatever time units you like). cell_count: list of cell counts at each time point. window_size: the size of the time window (same time units as above). start_threshold: minimum cell count to consider. plot_figure: whether or not to plot. Returns: growth rate in 1/(time unit) where "time unit" is the unit used above. """ def get_frame_range(times, mid_frame, windows_size): T = times[mid_frame] i_range = [] for i in range(1, len(times)): if (times[i - 1] > T - window_size / 2.0 and times[i] < T + window_size / 2.0): i_range.append(i) if (len(i_range) < 2): # there are not enough frames to get a good estimation raise ValueError() return i_range N = len(cell_count) if (N < window_size): raise Exception( "The measurement time-series is too short (smaller than the windows-size)" ) t_mat = pylab.matrix(time).T # normalize the cell_count data by its minimum ( c_mat = pylab.matrix(cell_count).T - min(cell_count) if c_mat[-1, 0] == 0: c_mat[-1, 0] = min(c_mat[pylab.find(c_mat > 0)]) for i in pylab.arange(N - 1, 0, -1): if c_mat[i - 1, 0] <= 0: c_mat[i - 1, 0] = c_mat[i, 0] c_mat = pylab.log(c_mat) res_mat = pylab.zeros((N, 3)) # columns are: slope, offset, error for i in range(N): try: # calculate the indices covered by the window i_range = get_frame_range(time, i, window_size) x = pylab.hstack( [t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = c_mat[i_range, 0] if min( pylab.exp(y) ) < start_threshold: # the measurements are still too low to use (because of noise) raise ValueError() (a, residues) = pylab.lstsq(x, y)[0:2] res_mat[i, 0] = a[0] res_mat[i, 1] = a[1] res_mat[i, 2] = residues except ValueError: pass max_i = res_mat[:, 0].argmax() if plot_figure: pylab.hold(True) pylab.plot(time, cell_count - min(cell_count)) pylab.plot(time, res_mat[:, 0]) pylab.plot([0, time.max()], [start_threshold, start_threshold], 'r--') i_range = get_frame_range(time, max_i, window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = x * pylab.matrix(res_mat[max_i, 0:2]).T pylab.plot(x[:, 0], pylab.exp(y), 'k:', linewidth=4) #plot(time, errors / errors.max()) pylab.yscale('log') #legend(['OD', 'growth rate', 'error']) pylab.legend(['OD', 'growth rate', 'threshold', 'fit']) return res_mat[max_i, 0]
def qufromgain(caltable,badspw=[],badant=[],fieldids=[],paoffset=None): mytb=taskinit.tbtool() myme=taskinit.metool() pos=myme.observatory('atca') myme.doframe(pos) # _geodetic_ latitude latr=myme.measure(pos,'WGS84')['m1']['value'] #print 'latitude: ',latr*180/pi mytb.open(caltable+'/FIELD') nfld=mytb.nrows() dirs=mytb.getcol('DELAY_DIR')[:,0,:] mytb.close() print 'Found '+str(nfld)+' fields.' mytb.open(caltable+'/SPECTRAL_WINDOW') freq=mytb.getcol('REF_FREQUENCY') nspw=mytb.nrows() mytb.close() print 'Found '+str(nspw)+' spws.' #sort out pa offset to apply paoff=pl.zeros(nspw) if paoffset==None: # use defaults for ATCA # (should get these from Feed subtable, but cal table doesn't have one) for ispw in range(nspw): if freq[ispw]<30e9 or freq[ispw]>50e9: paoff[ispw]=45. else: paoff[ispw]=135. else: paoff=paoffset R=pl.zeros((nspw,nfld)) Q=pl.zeros((nspw,nfld)) U=pl.zeros((nspw,nfld)) mask=pl.ones((nspw,nfld),dtype=bool) if (len(badspw)>0): mask[badspw,:]=False if (len(fieldids)==0): fieldids=range(nfld) QU={} mytb.open(caltable) for ifld in fieldids: for ispw in range(nspw): if not mask[ispw,ifld]: continue st=mytb.query('FIELD_ID=='+str(ifld)+' && SPECTRAL_WINDOW_ID=='+str(ispw)) nrows=st.nrows() if nrows > 0: rah=dirs[0,ifld]*12.0/pi decr=dirs[1,ifld] times=st.getcol('TIME') gains=st.getcol('CPARAM') ants=st.getcol('ANTENNA1') ntimes=len(pl.unique(times)) nants=ants.max()+1 #print nrows, ntimes, nants # times time0=86400.0*floor(times[0]/86400.0) rtimes=times-time0 # amplitude ratio amps=pl.absolute(gains) amps[amps==0.0]=1.0 ratio=amps[0,0,:]/amps[1,0,:] # parang parang=pl.zeros(len(times)) for itim in range(len(times)): tm=myme.epoch('UTC',str(times[itim])+'s') last=myme.measure(tm,'LAST')['m0']['value'] last-=floor(last) # days last*=24.0 # hours ha=last-rah # hours har=ha*2.0*pi/24.0 parang[itim]=atan2( (cos(latr)*sin(har)), (sin(latr)*cos(decr)-cos(latr)*sin(decr)*cos(har)) ) ratio.resize(nrows/nants,nants) parang.resize(nrows/nants,nants) parang+=paoff[ispw]*pi/180. parangd=parang*(180.0/pi) A=pl.ones((nrows/nants,3)) A[:,1]=pl.cos(2*parang[:,0]) A[:,2]=pl.sin(2*parang[:,0]) fit=pl.lstsq(A,pl.square(ratio)) amask=pl.ones(nants,dtype=bool) if len(badant)>0: amask[badant]=False rsum=pl.sum(ratio[:,amask],1) rsum/=pl.sum(amask) fit=pl.lstsq(A,pl.square(rsum)) R[ispw,ifld]=fit[0][0] Q[ispw,ifld]=fit[0][1]/R[ispw,ifld]/2.0 U[ispw,ifld]=fit[0][2]/R[ispw,ifld]/2.0 P=sqrt(Q[ispw,ifld]**2+U[ispw,ifld]**2) X=0.5*atan2(U[ispw,ifld],Q[ispw,ifld])*180/pi print 'Fld=%i, Spw=%i, PA Offset=%5.1f, Gx/Gy=%5.3f, Q=%5.3f, U=%5.3f, P=%5.3f, X=%5.1f' % (ifld,ispw,paoff[ispw],R[ispw,ifld],Q[ispw,ifld],U[ispw,ifld],P,X) else: mask[ispw,ifld]=False st.close() if (sum(mask[:,ifld]))>0: print 'For field id = ',ifld,' there are ',sum(mask[:,ifld]),'good spws.' Qm=pl.mean(Q[mask[:,ifld],ifld]) Um=pl.mean(U[mask[:,ifld],ifld]) QU[ifld]=(Qm,Um) Qe=pl.std(Q[mask[:,ifld],ifld]) Ue=pl.std(U[mask[:,ifld],ifld]) Pm=sqrt(Qm**2+Um**2) Xm=0.5*atan2(Um,Qm)*180/pi print 'Spw mean: Fld=%i Fractional: Q=%5.3f, U=%5.3f, (rms= %5.3f,%5.3f), P=%5.3f, X=%5.1f' % (ifld,Qm,Um,Qe,Ue,Pm,Xm) mytb.close() return QU
def sim(): global V,Vlin,tao_e,Rar,Rmr Rar = pl.arange(Ras[0],Ras[1],Ras[2]) Rmr = pl.arange(Rms[0],Rms[1],Rms[2]) ns.mech.setcurrent(Ie*Ies,ns.dt) li = len(Rmr) lj = len(Rar) tao_e = pl.empty((li,lj)) tao_l = pl.empty((li,lj)) tao_n = pl.empty((li,lj)) for i in range(li): for j in range(lj): #Special conditions if Rar[j] < 10.: sec.L(15000.) else: sec.L(7000.) if Rmr[i] > 5000.: ns.h.tstop = 50. else: ns.h.tstop = 20. sec.Rm(Rmr[i]) sec.Ra(Rar[j]) print Rmr[i],Rar[j] ns.sim() #Obtain voltage, steady state voltage, normalize and #get logarithmic values t = ns.t Vinf = sec.nrnV0[-1] V = 1 - pl.array(sec.nrnV0)[:-1]/Vinf Vlin = pl.log(V) print Vinf #Estimate the time constant finding the #point at witch the voltage reaches the #value 1/e nz, = pl.nonzero(V>(1/pl.e)) #The time where V ~ 1/e is the point #right after the last nz tao_e[i,j] = t[nz[-1]+1] - tstart print 'tao_e',tao_e[i,j] #Define least squares data interval and #make the pulse starting time to be zero i0 = int(t0/ns.dt) i1 = int(t1/ns.dt) t01 = t[:i1-i0] V01 = V[i0:i1] Vlin01 = Vlin[i0:i1] #Linear least squares A = pl.c_[t01,pl.ones_like(t01)] m, c = pl.lstsq(A, Vlin01.copy())[0] tao_l[i,j] = -1./m - tstart print 'tao_l',tao_l[i,j],'(',m, c, pl.exp(c),')' #Parametric function: v is the parameter vector and #x the independent varible fp = lambda p, t: p[0]*pl.exp(p[1]*t) #fp = lambda p, t: p[0]*pl.exp(p[1]*t) + p[2]*pl.exp(p[3]*t) #fp = lambda p, t: pl.exp(p[0]*t) #Error function e = lambda p, t, V: (fp(p,t)-V) #Initial parameter guess p0 = [1., -5.] #p0 = [1., -5., 1., -1.] #p0 = [-5.] #Fitting p, success = leastsq(e, p0, args=(t01,V01), maxfev=10000) tao_n[i,j] = -1./p[1] - tstart print 'tao_n',tao_n[i,j],'(',p,success,')' """
def FitGrowth(time, cell_count, window_size, start_threshold=0.01, plot_figure=False): def get_frame_range(times, mid_frame, windows_size): T = times[mid_frame] i_range = [] for i in range(1, len(times)): if (times[i-1] > T - window_size/2.0 and times[i] < T + window_size/2.0): i_range.append(i) if (len(i_range) < 2): # there are not enough frames to get a good estimation raise ValueError() return i_range N = len(cell_count) if (N < window_size): raise Exception("The measurement time-series is too short (smaller than the windows-size)") t_mat = pylab.matrix(time).T # normalize the cell_count data by its minimum ( c_mat = pylab.matrix(cell_count).T - min(cell_count) if c_mat[-1, 0] == 0: c_mat[-1, 0] = min(c_mat[pylab.find(c_mat > 0)]) for i in pylab.arange(N-1, 0, -1): if c_mat[i-1, 0] <= 0: c_mat[i-1, 0] = c_mat[i, 0] c_mat = pylab.log(c_mat) res_mat = pylab.zeros((N, 3)) # columns are: slope, offset, error for i in range(N): try: # calculate the indices covered by the window i_range = get_frame_range(time, i, window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = c_mat[i_range, 0] if min(pylab.exp(y)) < start_threshold: # the measurements are still too low to use (because of noise) raise ValueError() (a, residues) = pylab.lstsq(x, y)[0:2] res_mat[i, 0] = a[0] res_mat[i, 1] = a[1] res_mat[i, 2] = residues except ValueError: pass max_i = res_mat[:,0].argmax() if plot_figure: pylab.hold(True) pylab.plot(time, cell_count-min(cell_count)) pylab.plot(time, res_mat[:,0]) pylab.plot([0, time.max()], [start_threshold, start_threshold], 'r--') i_range = get_frame_range(time, max_i, window_size) x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))]) y = x * pylab.matrix(res_mat[max_i, 0:2]).T pylab.plot(x[:,0], pylab.exp(y), 'k:', linewidth=4) #plot(time, errors / errors.max()) pylab.yscale('log') #legend(['OD', 'growth rate', 'error']) pylab.legend(['OD', 'growth rate', 'threshold', 'fit']) return res_mat[max_i, 0]
def sim(): global V, Vlin, tao_e, Rar, Rmr Rar = pl.arange(Ras[0], Ras[1], Ras[2]) Rmr = pl.arange(Rms[0], Rms[1], Rms[2]) ns.mech.setcurrent(Ie * Ies, ns.dt) li = len(Rmr) lj = len(Rar) tao_e = pl.empty((li, lj)) tao_l = pl.empty((li, lj)) tao_n = pl.empty((li, lj)) for i in range(li): for j in range(lj): #Special conditions if Rar[j] < 10.: sec.L(15000.) else: sec.L(7000.) if Rmr[i] > 5000.: ns.h.tstop = 50. else: ns.h.tstop = 20. sec.Rm(Rmr[i]) sec.Ra(Rar[j]) print Rmr[i], Rar[j] ns.sim() #Obtain voltage, steady state voltage, normalize and #get logarithmic values t = ns.t Vinf = sec.nrnV0[-1] V = 1 - pl.array(sec.nrnV0)[:-1] / Vinf Vlin = pl.log(V) print Vinf #Estimate the time constant finding the #point at witch the voltage reaches the #value 1/e nz, = pl.nonzero(V > (1 / pl.e)) #The time where V ~ 1/e is the point #right after the last nz tao_e[i, j] = t[nz[-1] + 1] - tstart print 'tao_e', tao_e[i, j] #Define least squares data interval and #make the pulse starting time to be zero i0 = int(t0 / ns.dt) i1 = int(t1 / ns.dt) t01 = t[:i1 - i0] V01 = V[i0:i1] Vlin01 = Vlin[i0:i1] #Linear least squares A = pl.c_[t01, pl.ones_like(t01)] m, c = pl.lstsq(A, Vlin01.copy())[0] tao_l[i, j] = -1. / m - tstart print 'tao_l', tao_l[i, j], '(', m, c, pl.exp(c), ')' #Parametric function: v is the parameter vector and #x the independent varible fp = lambda p, t: p[0] * pl.exp(p[1] * t) #fp = lambda p, t: p[0]*pl.exp(p[1]*t) + p[2]*pl.exp(p[3]*t) #fp = lambda p, t: pl.exp(p[0]*t) #Error function e = lambda p, t, V: (fp(p, t) - V) #Initial parameter guess p0 = [1., -5.] #p0 = [1., -5., 1., -1.] #p0 = [-5.] #Fitting p, success = leastsq(e, p0, args=(t01, V01), maxfev=10000) tao_n[i, j] = -1. / p[1] - tstart print 'tao_n', tao_n[i, j], '(', p, success, ')' """