def PlotComparison(old_img, new_img, title="Title"): """ Purpose: Generate a 2x2 plot showing image statistics to compare before/after gain normalization. Inputs: -old_img {numpy array}- Original pre-normalized image -old_img {numpy array}- Original pre-normalized image """ rows, cols = new_img.shape oldstd = [] newstd = [] oldmed = [] newmed = [] for col in range(0, cols): oldstd.append(np.std(old_img[:, col])) newstd.append(np.std(new_img[:, col])) oldmed.append(centroid.frobomad(old_img[:, col])[0]) newmed.append(centroid.frobomad(new_img[:, col])[0]) randcol = pylab.randint(0, cols, 500) randrow = pylab.randint(0, rows, 500) pylab.figure(num=None, figsize=(13, 7), dpi=80, facecolor='w', edgecolor='k') pylab.title(title) #Standard Deviation Comparison pylab.subplot(2, 2, 1) pylab.plot(oldstd) pylab.plot(newstd) pylab.legend(['Before Normalization \sigma', 'After Normalization \sigma']) pylab.xlabel('Column') pylab.ylabel('USELESS Standard Deviation') pylab.subplot(2, 2, 3) pylab.plot(oldmed) pylab.plot(newmed) pylab.legend(['Before Normalization Rmean', 'After Normalization Rmean']) pylab.xlabel('Column') pylab.ylabel('Robust Mean') #fourrier signal pylab.subplot(2, 2, 2) pylab.hist(old_img[randrow, randcol], bins=75) pylab.xlabel('Hist') pylab.ylabel('Old Rand Selection Intensity') pylab.subplot(2, 2, 4) pylab.hist(new_img[randrow, randcol], bins=75) pylab.xlabel('Hist') pylab.ylabel('New Rand Selection Intensity')
def PlotComparison(old_img,new_img,title="Title"): """ Purpose: Generate a 2x2 plot showing image statistics to compare before/after gain normalization. Inputs: -old_img {numpy array}- Original pre-normalized image -old_img {numpy array}- Original pre-normalized image """ rows,cols=new_img.shape oldstd=[] newstd=[] oldmed=[] newmed=[] for col in range(0,cols): oldstd.append(np.std(old_img[:,col])) newstd.append(np.std(new_img[:,col])) oldmed.append(centroid.frobomad(old_img[:,col])[0]) newmed.append(centroid.frobomad(new_img[:,col])[0]) randcol=pylab.randint(0,cols,500) randrow=pylab.randint(0,rows,500) pylab.figure(num=None, figsize=(13, 7), dpi=80, facecolor='w', edgecolor='k') pylab.title(title) #Standard Deviation Comparison pylab.subplot(2,2,1) pylab.plot(oldstd) pylab.plot(newstd) pylab.legend(['Before Normalization \sigma','After Normalization \sigma']) pylab.xlabel('Column') pylab.ylabel('USELESS Standard Deviation') pylab.subplot(2,2,3) pylab.plot(oldmed) pylab.plot(newmed) pylab.legend(['Before Normalization Rmean','After Normalization Rmean']) pylab.xlabel('Column') pylab.ylabel('Robust Mean') #fourrier signal pylab.subplot(2,2,2) pylab.hist(old_img[randrow,randcol],bins=75) pylab.xlabel('Hist') pylab.ylabel('Old Rand Selection Intensity') pylab.subplot(2,2,4) pylab.hist(new_img[randrow,randcol],bins=75) pylab.xlabel('Hist') pylab.ylabel('New Rand Selection Intensity')
def init(): global g, nextg g = nx.karate_club_graph() for i in g.nodes(): g.node[i]['state'] = pylab.randint(1,11) nextg = g.copy() g.pos = nx.spring_layout(g)
def get_age_sex(is_crew=False, min_age=18, max_age=99, crew_age=35, crew_std=5, guest_age=68, guest_std=8): ''' Define age-sex distributions. Passenger age distribution based on: https://www.nytimes.com/reuters/2020/02/12/world/asia/12reuters-china-health-japan.html "About 80% of the passengers were aged 60 or over [=2130], with 215 in their 80s and 11 in the 90s, the English-language Japan Times newspaper reported." ''' # Define female (0) or male (1) -- evenly distributed sex = pl.randint(2) # Define age distribution for the crew and guests if is_crew: age = pl.normal(crew_age, crew_std) else: age = pl.normal(guest_age, guest_std) # Normalize age = pl.median([min_age, age, max_age]) return age, sex
def varRed(idat,odat,A,bootstrap = None): """ computed the variance reduction when using A*idat[:,x].T as predictor for odat[:,x].T if bootstrap is an integer > 1, a bootstrap with the given number of iterations will be performed. returns tVred, sVred: the total relative variance after prediction (all coordinates) and the variance reduction for each coordinate separately. These data are scalar and array or lists of scalars and arrays when a bootstrap is performed. Note: in the bootstrapped results, the first element refers to the "full" data variance reduction. """ nBoot = bootstrap if type(bootstrap) is int else 0 if nBoot < 2: nBoot = 0 odat_pred = dot(A,idat.T) rdiff = odat_pred - odat.T # remaining difference rvar = var(rdiff,axis=1)/var(odat.T,axis=1) # relative variance trvar = var(rdiff.flat)/var(odat.T.flat) # total relative variance if nBoot > 0: rvar = [rvar,] trvar = [trvar,] for rep in range(nBoot-1): indices = randint(0,odat.T.shape[1],odat.T.shape[1]) odat_pred = dot(A,idat[indices,:].T) rdiff = odat_pred - odat[indices,:].T # remaining difference rvar.append( var(rdiff,axis=1)/var(odat.T,axis=1) ) # relative variance trvar.append (var(rdiff.flat)/var(odat.T.flat) ) # total relative variance return trvar, rvar
def __init__(self, pars, age=0, sex=0, crew=False): super().__init__(pars) # Set parameters self.uid = str(pl.randint(0, 1e9)) # Unique identifier for this person self.age = float(age) # Age of the person (in years) self.sex = sex # Female (0) or male (1) self.crew = crew # Wehther the person is a crew member if self.crew: self.contacts = self[ 'contacts_crew'] # Determine how many contacts they have else: self.contacts = self['contacts_guest'] # Define state self.alive = True self.susceptible = True self.exposed = False self.infectious = False self.diagnosed = False self.recovered = False # Keep track of dates self.date_exposed = None self.date_infectious = None self.date_diagnosed = None self.date_recovered = None return
def createSimilarAR(data): """ creates an AR-process that is similar to a given data set. data must be given in n x d-format """ # step 1: get "average" fit matrix l_A = [] for rep in arange(100): idx = randint(0, data.shape[0] - 1, data.shape[0] - 1) idat = data[idx, :] odat = data[idx + 1, :] l_A.append(lstsq(idat, odat)[0]) sysmat = meanMat(l_A).T # idea: get "dynamic noise" from input data as difference of # expected vs. predicted data: # eta_i = (sysmat*(data[:,i-1]).T - data[:,i]) # however, in order to destroy any possible correlations in the # input noise (they would also occur in the output), the # noise per section has to be permuted. prediction = dot(sysmat, data[:-1, :].T) dynNoise = data[1:, :].T - prediction res = [ zeros((dynNoise.shape[0], 1)), ] for nidx in permutation(dynNoise.shape[1]): res.append(dot(sysmat, res[-1]) + dynNoise[:, nidx][:, newaxis]) return hstack(res).T
def createSimilarAR(data): """ creates an AR-process that is similar to a given data set. data must be given in n x d-format """ # step 1: get "average" fit matrix l_A = [] for rep in arange(100): idx = randint(0,data.shape[0]-1,data.shape[0]-1) idat = data[idx,:] odat = data[idx+1,:] l_A.append(lstsq(idat,odat)[0]) sysmat = meanMat(l_A).T # idea: get "dynamic noise" from input data as difference of # expected vs. predicted data: # eta_i = (sysmat*(data[:,i-1]).T - data[:,i]) # however, in order to destroy any possible correlations in the # input noise (they would also occur in the output), the # noise per section has to be permuted. prediction = dot(sysmat,data[:-1,:].T) dynNoise = data[1:,:].T - prediction res = [zeros((dynNoise.shape[0],1)), ] for nidx in permutation(dynNoise.shape[1]): res.append( dot(sysmat,res[-1]) + dynNoise[:,nidx][:,newaxis] ) return hstack(res).T
def cstep(self): """Run one step of the Metropolis algorithm using weave""" i,j = (pl.randint(0,self.n),pl.randint(0,self.n)) spins = self.spins n = self.n J = self.J T = self.T code = """ #include <math.h> double neighbour_sum = 0; neighbour_sum += spins(i%n,(j-1+n)%n); neighbour_sum += spins(i%n,(j+1)%n); neighbour_sum += spins((i-1+n)%n,j%n); neighbour_sum += spins((i+1)%n,j%n); double Ediff = 2 * J * spins(i,j) * neighbour_sum; double Sdiff = -2 * spins(i,j); double PA = 1.; if(Ediff > 0) { if(T == 0) { PA = 0.; } else { PA = exp(-1/T*Ediff); } } py::tuple results(3); results[0] = PA; results[1] = Ediff; results[2] = Sdiff; return_val = results; """ PA,Ediff,Sdiff = weave.inline(code, ['spins','J','T','n','i','j'], type_converters=weave.converters.blitz) if PA > pl.random(): self.spinflip((i,j)) return Ediff,Sdiff else: return 0.,0.
def sim_time(dem_frac): n = pylab.randint(N_MIN, N_MAX) alpha = ALPHA net = random_network(n) r = ne_capacity(net)*dem_frac tic = time.clock() optimal_stackelberg(net,r,alpha) val = (n,time.clock() - tic) print val return val
def sim_time(i): n = pylab.randint(N_MIN, N_MAX) alpha = pylab.rand() net = random_network(n) r = ne_capacity(net)*((1-MIN_DEMAND)*pylab.rand() + MIN_DEMAND) tic = time.clock() optimal_stackelberg(net,r,alpha) val = (n,time.clock() - tic) print val return val
def bootstrapMedian ( data, N=5000 ): # determine 95% confidence intervals of the median M = len(data) percentile = [2.5,97.5] estimate = pl.zeros(N) for n in xrange (N): bsIndex = pl.randint ( 0, M, M ) bsData = data[bsIndex] estimate[n] = pl.prctile ( bsData, 50 ) CI = pl.prctile ( estimate, percentile ) return CI
def one_ci(v, ci, bootstraps): v = pylab.array(v) v = pylab.ma.masked_array(v,pylab.isnan(v)).compressed() if v.size == 0: return pylab.nan, 0, 0 #Nothing to compute r = pylab.randint(v.size, size=(v.size, bootstraps)) booted_samp = pylab.array([pylab.median(v[r[:,n]]) for n in xrange(bootstraps)]) booted_samp.sort() med = pylab.median(booted_samp) idx_lo = int(bootstraps * ci/2.0) idx_hi = int(bootstraps * (1.0-ci/2)) return med, med-booted_samp[idx_lo], booted_samp[idx_hi]-med
def random_subset(list, num): from pylab import randint assert num <= len(list) remaining_inds = range(len(list)) inds = [] for n in range(num): i = randint(len(remaining_inds)) actual_ind = remaining_inds[i] del remaining_inds[i] inds.append(actual_ind) return [list[i] for i in inds]
def update(self, noises): '''Dynamical update of activity as Monte Carlo procedure with a probability for some neuron to change his state. ''' if self.tauT: if self.test == 0: self.theta = self.A.sum() / self.N elif self.test == 1: self.theta = self.Omega * self.A.sum() / self.N elif self.test == 2: self.theta += self.dt / self.tauT * self.N * ( -self.theta + self.Omega * self.A.sum() / self.N) elif self.test == 3: self.theta += self.dt / self.tauT * ( -self.theta + self.Omega * self.A.sum() / self.N) k1 = randint(self.N) self.x[k1] = self.Psi * self.conn.getcol(k1).T.dot(self.A)[0] self.A[k1] = self.thresholding(self.x[k1]) for nothing in xrange(self.N): k2 = randint(self.N) if rand() < self.sigma2x / self.N: self.A[k2] = self.A[k2] == 0
def update(): global agents ag = agents[pylab.randint(n)] neighbourhood = [ nb for nb in agents if (nb.x - ag.x)**2 + (nb.y - ag.y)**2 < r**2 and nb != ag ] num_similar = 0 for j in neighbourhood: if j.color == ag.color: num_similar += 1 if len(neighbourhood) > 0: ratio = num_similar / float(len(neighbourhood)) if ratio < th: ag.x = pylab.random() ag.y = pylab.random()
def one_ci(v, ci, bootstraps): v = pylab.array(v) v = pylab.ma.masked_array(v, pylab.isnan(v)).compressed() if v.size == 0: return pylab.nan, 0, 0 #Nothing to compute r = pylab.randint(v.size, size=(v.size, bootstraps)) booted_samp = pylab.array( [pylab.median(v[r[:, n]]) for n in xrange(bootstraps)]) booted_samp.sort() med = pylab.median(booted_samp) idx_lo = int(bootstraps * ci / 2.0) idx_hi = int(bootstraps * (1.0 - ci / 2)) return med, med - booted_samp[idx_lo], booted_samp[idx_hi] - med
def boot_curvefit(x, y, fit, p0, ci=.05, bootstraps=2000): """use of bootstrapping to perform curve fitting. Inputs: x - x values y - corresponding y values fit - a packaged fitting function p0 - intial parameter list that fit will use fit should be a function of the form p1 = fit(x, y, p0) with p1 being the optimized parameter vector Outputs: ci - 3xn array (n = number of parameters: median, low_ci, high_ci) booted_p - an bxn array of parameter values (b = number of bootstraps) An example fit function is: def fit(x, y, p0): func = lambda p, t: p[0]*pylab.exp(-t/abs(p[1])) + p[2] errfunc = lambda p, t, y: func(p, t) - y p1, success = optimize.leastsq(errfunc, p0, args=(t, y)) return p1 """ p0 = pylab.array(p0) #Make it an array in case it isn't one if bootstraps > 1: idx = pylab.randint(x.size, size=(x.size, bootstraps)) else: idx = pylab.zeros((x.size, 1), dtype=int) idx[:, 0] = pylab.arange(x.size) booted_p = pylab.zeros((p0.size, bootstraps)) for n in xrange(bootstraps): booted_p[:, n] = fit(x[idx[:, n]], y[idx[:, n]], p0) p_ci = pylab.zeros((3, p0.size)) for p in xrange(p0.size): booted_samp = pylab.sort(booted_p[p]) med = pylab.median(booted_samp) idx_lo = int(bootstraps * ci / 2.0) idx_hi = int(bootstraps * (1.0 - ci / 2)) p_ci[:, p] = [med, med - booted_samp[idx_lo], booted_samp[idx_hi] - med] return p_ci, booted_p
def boot_curvefit(x,y,fit, p0, ci = .05, bootstraps=2000): """use of bootstrapping to perform curve fitting. Inputs: x - x values y - corresponding y values fit - a packaged fitting function p0 - intial parameter list that fit will use fit should be a function of the form p1 = fit(x, y, p0) with p1 being the optimized parameter vector Outputs: ci - 3xn array (n = number of parameters: median, low_ci, high_ci) booted_p - an bxn array of parameter values (b = number of bootstraps) An example fit function is: def fit(x, y, p0): func = lambda p, t: p[0]*pylab.exp(-t/abs(p[1])) + p[2] errfunc = lambda p, t, y: func(p, t) - y p1, success = optimize.leastsq(errfunc, p0, args=(t, y)) return p1 """ p0 = pylab.array(p0) #Make it an array in case it isn't one if bootstraps > 1: idx = pylab.randint(x.size, size=(x.size, bootstraps)) else: idx = pylab.zeros((x.size,1),dtype=int) idx[:,0] = pylab.arange(x.size) booted_p = pylab.zeros((p0.size, bootstraps)) for n in xrange(bootstraps): booted_p[:,n] = fit(x[idx[:,n]], y[idx[:,n]], p0) p_ci = pylab.zeros((3, p0.size)) for p in xrange(p0.size): booted_samp = pylab.sort(booted_p[p]) med = pylab.median(booted_samp) idx_lo = int(bootstraps * ci/2.0) idx_hi = int(bootstraps * (1.0-ci/2)) p_ci[:,p] = [med, med-booted_samp[idx_lo], booted_samp[idx_hi]-med] return p_ci, booted_p
def malloc(element_size, num_elements, base_addr=0x60000000): """ Build an OrderdDict of (int : int) pairs for a memory map starting a base memory address """ contents_dict = collections.OrderedDict({ 'header': ['Address (hex)', 'Content (hex)'], # header list 'content_formatter': f'<pre>%0{2*element_size}x</pre>' # adjustable to element_size }) # Content builder loop for i in range(base_addr, base_addr + (num_elements * element_size) + 1, element_size): key = i value = py.randint(0, 2**31 - 1) contents_dict[key] = value return contents_dict
def test_sincos(): """ Simple test/demo of Phaser, recovering a sine and cosine Demo courtesy of Jimmy Sastra, U. Penn 2011 """ from numpy import sin, cos, pi, array, linspace, cumsum, asarray, dot, ones from pylab import plot, legend, axis, show, randint, randn, std, lstsq # create separate trials and store times and data dats = [] t0 = [] period = 55 # i phaseNoise = 0.05 / sqrt(period) snr = 20 N = 10 print N, "trials with:" print "\tperiod %.2g" % period, "(samples)\n\tSNR %.2g" % snr, "\n\tphase noise %.2g" % phaseNoise, "(radian/cycle)" print "\tlength = [", for li in xrange(N): l = randint(400, 2000) # length of trial dt = pi * 2.0 / period + randn( l) * phaseNoise # create noisy time steps t = cumsum(dt) + rand() * 2 * pi # starting phase is random raw = asarray([sin(t), cos(t)]) # signal raw = raw + randn(*raw.shape) / snr # SNR=20 noise t0.append(t) dats.append(raw - nanmean(raw, axis=1)[:, newaxis]) print l, print "]" phr = Phaser(dats, psecfunc=lambda x: dot([1, -1], x)) phi = [phr.phaserEval(d) for d in dats] # extract phaseNoise reg = array([linspace(0, 1, t0[0].size), ones(t0[0].size)]).T tt = dot(reg, lstsq(reg, t0[0])[0]) plot(((tt - pi / 4) % (2 * pi)) / pi - 1, dats[0].T, '.') plot((phi[0].T % (2 * pi)) / pi - 1, dats[0].T, 'x') #plot data versus phase legend(['sin(t)', 'cos(t)', 'sin(phi)', 'cos(phi)']) axis([-1, 1, -1.2, 1.2]) show()
def varRed(idat, odat, A, bootstrap=None): """ computed the variance reduction when using A*idat[:,x].T as predictor for odat[:,x].T if bootstrap is an integer > 1, a bootstrap with the given number of iterations will be performed. returns tVred, sVred: the total relative variance after prediction (all coordinates) and the variance reduction for each coordinate separately. These data are scalar and array or lists of scalars and arrays when a bootstrap is performed. Note: in the bootstrapped results, the first element refers to the "full" data variance reduction. """ nBoot = bootstrap if type(bootstrap) is int else 0 if nBoot < 2: nBoot = 0 odat_pred = dot(A, idat.T) rdiff = odat_pred - odat.T # remaining difference rvar = var(rdiff, axis=1) / var(odat.T, axis=1) # relative variance trvar = var(rdiff.flat) / var(odat.T.flat) # total relative variance if nBoot > 0: rvar = [ rvar, ] trvar = [ trvar, ] for rep in range(nBoot - 1): indices = randint(0, odat.T.shape[1], odat.T.shape[1]) odat_pred = dot(A, idat[indices, :].T) rdiff = odat_pred - odat[indices, :].T # remaining difference rvar.append(var(rdiff, axis=1) / var(odat.T, axis=1)) # relative variance trvar.append(var(rdiff.flat) / var(odat.T.flat)) # total relative variance return trvar, rvar
def matDist(mat1, mat2, nidx=100): """ returns the distance of two lists of matrices mat1 and mat2. output: [d(mat1,mat1),d(mat2,mat2),d(mat1,mat2),d(mat2,mat1)] d(mat1,mat2) and d(mat2,mat1) should be the same up to random variance (when d(mat1,mat1) and d(mat2,mat2) have the same width in "FWHM sense") nidx: n matrices are compared to n matrices each, that is the result has length n**2 """ # pick up a random matrix from mat1 # compute distances from out-of-sample mat1 # compute distances from sample of same size in mat2 # repeat; for random matrix from mat2 d_11 = [] d_22 = [] d_12 = [] d_21 = [] nidx1 = nidx nidx2 = nidx # for d_11 and d_12 for nmat in randint(0, len(mat1), nidx1): refmat = mat1[nmat] for nmat_x in randint(0, len(mat1), nidx1): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_11.append(svd(mat1[nmat_x] - refmat, False, False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0, len(mat2), nidx1): d_12.append(svd(mat2[nmat_x] - refmat, False, False)[0]) for nmat in randint(0, len(mat2), nidx2): refmat = mat2[nmat] for nmat_x in randint(0, len(mat2), nidx2): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_22.append(svd(mat2[nmat_x] - refmat, False, False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0, len(mat1), nidx2): d_21.append(svd(mat1[nmat_x] - refmat, False, False)[0]) return (d_11, d_22, d_21, d_12)
def matDist(mat1, mat2,nidx = 100): """ returns the distance of two lists of matrices mat1 and mat2. output: [d(mat1,mat1),d(mat2,mat2),d(mat1,mat2),d(mat2,mat1)] d(mat1,mat2) and d(mat2,mat1) should be the same up to random variance (when d(mat1,mat1) and d(mat2,mat2) have the same width in "FWHM sense") nidx: n matrices are compared to n matrices each, that is the result has length n**2 """ # pick up a random matrix from mat1 # compute distances from out-of-sample mat1 # compute distances from sample of same size in mat2 # repeat; for random matrix from mat2 d_11 = [] d_22 = [] d_12 = [] d_21 = [] nidx1 = nidx nidx2 = nidx # for d_11 and d_12 for nmat in randint(0,len(mat1),nidx1): refmat = mat1[nmat] for nmat_x in randint(0,len(mat1),nidx1): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_11.append(svd(mat1[nmat_x] - refmat,False,False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0,len(mat2),nidx1): d_12.append(svd(mat2[nmat_x] - refmat,False,False)[0]) for nmat in randint(0,len(mat2),nidx2): refmat = mat2[nmat] for nmat_x in randint(0,len(mat2),nidx2): if nmat_x == nmat: nmat_x = (nmat - 1) if nmat > 0 else (nmat + 1) d_22.append(svd(mat2[nmat_x] - refmat,False,False)[0]) # ... I could use a []-statement, but I do not want to reformat a list # of lists ... for nmat_x in randint(0,len(mat1),nidx2): d_21.append(svd(mat1[nmat_x] - refmat,False,False)[0]) return (d_11,d_22,d_21,d_12)
def test_sincos(): """ Simple test/demo of Phaser, recovering a sine and cosine Demo courtesy of Jimmy Sastra, U. Penn 2011 """ from numpy import sin,cos,pi,array,linspace,cumsum,asarray,dot,ones from pylab import plot, legend, axis, show, randint, randn, std,lstsq # create separate trials and store times and data dats=[] t0 = [] period = 55 # i phaseNoise = 0.5/sqrt(period) snr = 20 N = 10 print N,"trials with:" print "\tperiod %.2g"%period,"(samples)\n\tSNR %.2g"%snr,"\n\tphase noise %.2g"%phaseNoise,"(radian/cycle)" print "\tlength = [", for li in xrange(N): l = randint(400,2000) # length of trial dt = pi*2.0/period + randn(l)*0.07 # create noisy time steps t = cumsum(dt)+randn()*2*pi # starting phase is random raw = asarray([sin(t),cos(t)]) # signal raw = raw + randn(*raw.shape)/snr # SNR=20 noise t0.append(t) dats.append( raw - raw.nanmean(axis=1)[:,newaxis] ) print l, print "]" phr = Phaser( dats, psecfunc = lambda x : dot([1,-1],x)) phi = [ phr.phaserEval( d ) for d in dats ] # extract phaseNoise reg = array([linspace(0,1,t0[0].size),ones(t0[0].size)]).T tt = dot( reg, lstsq(reg,t0[0])[0] ) plot(((tt-pi/4) % (2*pi))/pi-1, dats[0].T,'.') plot( (phi[0].T % (2*pi))/pi-1, dats[0].T,'x')#plot data versus phase legend(['sin(t)','cos(t)','sin(phi)','cos(phi)']) axis([-1,1,-1.2,1.2]) show()
def __init__(self, age=0, sex=0, crew=False, contacts=0, uid=0): self.uid = str(pl.randint(0, 1e9)) # Unique identifier for this person # self.uid = str(uid) # self.age = float(age) # Age of the person (in years) # self.sex = sex # Female (0) or male (1) # self.crew = crew # Wehther the person is a crew member self.contacts = contacts # Determine how many contacts they have self.quarantine = None # Define state self.alive = True self.susceptible = True self.exposed = False self.infectious = False self.diagnosed = False self.recovered = False # Keep track of dates self.date_exposed = None self.date_infectious = None self.date_diagnosed = None self.date_recovered = None return
def markov_step(self, state, distribution): state = int(state) for i in range(self.keep_every): if state == len(distribution): state = 0 if state == 0: im = len(distribution) - 1 else: im = state - 1 if state == len(distribution) - 1: ip = 0 else: ip = state + 1 pi = distribution[state] pim = distribution[im] pip = distribution[ip] attempt = randint(0, 2) if attempt == 0: if pim > pi: state = im else: z = random() if z < pim / pi: state = im else: state = state elif attempt == 1: if pip > pi: state = ip else: z = random() if z < pip / pi: state = ip else: state = state return state
def markov_step(self,state,distribution): state = int(state) for i in range(self.keep_every) : if state == len(distribution) : state = 0 if state == 0 : im = len(distribution)-1 else : im = state-1 if state == len(distribution) - 1: ip = 0 else : ip = state+1 pi = distribution[state] pim = distribution[im] pip = distribution[ip] attempt = randint(0,2) if attempt == 0 : if pim > pi : state = im else : z = random() if z < pim/pi : state = im else : state = state elif attempt == 1 : if pip > pi: state = ip else : z = random() if z < pip/pi : state = ip else : state = state return state
def fitData_2s(idat, odat, nps, nidx = None, nrep = 500, rcond = 1e-3): """ fits the data using two strides ahead - TODO: rewrite this docstring! performs a bootstrapped fit of the data, i.e. gives a matrix X that minimizes || odat.T - X * idat.T ||. This matrix X is the least squares estimate that maps the column vector representing stride k to the column vector of k+1, at a / the given Poincare section(s). idat, odat: input-data in 1d-format (see e.g. twoD_oneD for description) each row in idat and odat must represent a stride (idat) and the subsequent stride (odat), i.e. if you want to use all strides, odat = idat[1:,:] However, odat must not be shorter (fewer rows) than idat. nps: numbers of samples per stride nidx: number of strides that should be used for a fit in each bootstrap iteration. If omitted, idat.shape[0]*2/3 is used nrep: how many bootstrap iterations should be performed sections: list of integers that indicate which sections should be used as intermediate mappings. If only a single section is given, a "stride-map" is computed rcond: the numerical accuracy which is used for the fit (this value is passed through to lstsq). Too high values will cause loss of detection of lower eigenvalues, too low values will corrupt the return maps. returns a triple: (1) a list of a list of matrices. Each list containts the matrices that map from the first given section to the next. *NOTE* if len(sections) is 1, then this is left empty (equivalen matrices are then in (2)) (2) a list of matrices, which represent the "full stride" fits for the given set of indices. (3) a list of a list of indices. Each list lists the indices (rows of idat) that were used for the regression. """ if nidx is None: nidx = int(2./3.*idat.shape[0] ) #if any(diff(sections)) < 0: # raise ValueError, 'sections must be given in increasing order!' # 1st: create bootstrap indices indices = [randint(1,idat.shape[0],nidx) for x in range(nrep)] #[(idat.shape[0]*rand(nidx-1)+1).astype(int) for x in range(nrep)] # 2nd: create section fits (if sections) # part A: do fits from one section to the next (within same stride) # sectMaps = [ # [ # lstsq( idat[idcs,sections[sct]::nps], # idat[idcs,sections[sct+1]::nps], rcond=rcond)[0].T # for idcs in indices ] # for sct in range(len(sections)-1) ] # part B: do fits from last section to first section of next stride #if len(sections) > 1: # sectMaps.append( [ # lstsq( idat[idcs,sections[-1]::nps], # odat[idcs,sections[0]::nps], rcond=rcond)[0].T # for idcs in indices ] ) # 3rd: create stride fits strideMaps = [ lstsq(hstack([idat[idcs-1,0::nps],idat[idcs,0::nps]]), odat[idcs,0::nps], rcond=rcond)[0].T for idcs in indices ] return strideMaps, indices
# In[28]: # In[75]: get_ipython().magic(u'matplotlib inline') pl.rcParams['figure.figsize'] = 16,16 an.stationary_amplitude=3*pl.pi an.stationary_frequency=0.01 an.wrap_frequency=0.01 an.value(0.0) n = 3000 tmin = pl.randint(-100000,100000) trange=30 tmax = tmin+trange ts = pl.linspace(tmin,tmax,n) vs = [float(an.value(t)) for t in ts] rs = pl.linspace(0,1,n) xs = pl.cos(vs)*rs ys = pl.sin(vs)*rs pl.plot(xs,ys) pl.show() # In[70]: len(an._wns._angle_cache)
import csv f = open('dev_log.csv', 'a') f_csv = csv.writer(f) f_csv.writerow(results) f.close() return mod_mc if __name__ == '__main__': import pylab as pl import data data.age_range = pl.arange(0, 81, 20) data.time_range = pl.arange(1980, 2005, 5) data.regions = pl.randint(5,15) time.sleep(pl.rand()*5.) t0 = time.time() data.generate_fe('test_data/%s.csv'%t0) # included just to get good test coverage data.generate_smooth_gp_re_a('test_data/%s.csv'%t0, country_variation=True) std=5.*pl.rand(len(pl.csv2rec('test_data/%s.csv'%t0))) pct=90. print data.age_range, data.time_range, data.regions, pl.mean(std), pct data.add_sampling_error('test_data/%s.csv'%t0, 'test_data/noisy_%s.csv'%t0, std=std) data.knockout_uniformly_at_random('test_data/noisy_%s.csv'%t0, 'test_data/missing_noisy_%s.csv'%t0, pct=pct) mod_mc = evaluate_model('gp_re_a', 'knockout pct=%d, model matches data, has laplace priors, sigma_e = Exp(1)' % pct,
import pylab from pylab import matplotlib import time pylab.ion() tstart = time.time() # for profiling x = 50 y = 50 blob, = pylab.plot([x],[y],'bo') pylab.axis([0,100,0,100],'equal') for i in pylab.arange(1,200): x += 2*pylab.randint(0,2)-1 y += 2*pylab.randint(0,2)-1 blob.set_xdata([x]) blob.set_ydata([y]) # plot([x],[y],'bo') pylab.draw() # redraw the canvas
def bootstrap(Ws): """Bootstraps Ws N times and returns the average""" Ws_bstrp = Ws[pl.randint(0,pl.size(Ws,axis=0),pl.size(Ws,axis=0))] return Ws_bstrp
def fitmdl_step(self,nidx = 500,nrep = 500, psec_in = 0, psec_out = None, dimVol = None, rcond=0.03, nullMdl = None,useSym = False): """ similar to fitmdl, except that the Poincare-section for the input data (psec_in) and the output-data (psec_out) are within a single stride. if psec_out is not given, it is set to psec_in + nps/2 if psec_out is < psec_in (mod nps), then the subsequent step is taken fits a single stride; usually from phase x to phase x+pi dimVol: how many singular values should be taken into account when comparing the relative volume? Default: all useSym: Apply a symmetrization of the data using the self.sym - matrix """ # first: compute in_dat and out_dat assert type(psec_in) is int, 'psec_in must be of type int (from 0 to nps)' if psec_out is None: psec_out = psec_in + int(self.nps/2) psec_out = psec_out % self.nps # if psec_out would be in the next step -> shift out, cut in. # actually, for an ar-system this should not be relevant due to # time symmetrie of autocorrelation function, however, here we might # face a substantially different system. oneStrideAhead = False if psec_out < psec_in: oneStrideAhead = True data_in = self.data1D[:,psec_in::self.nps] if not oneStrideAhead else \ self.data1D[:-1,psec_in::self.nps] data_out_pre = self.data1D[:,psec_out::self.nps] if not oneStrideAhead else \ self.data1D[1:,psec_out::self.nps] if useSym: data_out = dot(self.sym,data_out_pre.T).T else: data_out = data_out_pre assert data_in.shape[0] == data_out.shape[0], 'in- and out-data shape mismatch' self.all_A = [] self.relVol = [] #relative volume after prediction self.vred = [] self.rv = [] # relative variance after prediction in_idx = arange(data_in.shape[0]) #gc.collect() for rep in range(nrep): # create input and output data for regression, dependend on # null model if nullMdl is None: pred_idx = in_idx[randint(0,len(in_idx),nidx)] idat = data_in[array(list(pred_idx),dtype=int),:] odat = data_out[array(list(pred_idx),dtype=int),:] elif nullMdl == 'inv': # acutally, this could be deleted pred_idx = in_idx[randint(len(in_idx),nidx)] idat = data_in[array(list(pred_idx),dtype=int),:] odat = data_out[array(list(pred_idx),dtype=int),:] elif nullMdl == 'rand': pred_idx = in_idx[randint(0,len(in_idx),nidx)] pred_idx_out = in_idx[randint(0,len(in_idx),nidx)] idat = data_in[array(list(pred_idx),dtype=int),:] odat = data_out[array(list(pred_idx_out),dtype=int),:] else: raise ValueError, 'Error: Null model type not understood' test_idx = set(in_idx).difference(pred_idx) A = lstsq(idat,odat,rcond = rcond)[0] test_idat = data_in[array(list(test_idx),dtype=int),:] test_odat = data_out[array(list(test_idx),dtype=int),:] pred = dot(test_idat,A) res = test_odat - pred self.vred.append(diag(cov(res.T))/diag(cov(test_odat.T))) self.all_A.append(A) self.rv.append(var(res)/var(test_odat)) # compute relative volume: # actually, another comparison might be usefull: compute along the # SAME projection, e.g. the principal component linear hull of odat! s_odat = svd(test_odat,full_matrices = False, compute_uv = False) s_pred = svd(res,full_matrices = False, compute_uv = False) volRatio = reduce(lambda x,y: x*y, s_pred[:dimVol]) / \ reduce(lambda x,y: x*y, s_odat[:dimVol]) # /sqrt(N) cancels out self.relVol.append(volRatio)
kx = data2array(pattAd.replace('Cand_S', 'Cand_X')) kA = data2array(pattAd.replace('Cand_X', 'Cand_S')) ksix = concatenate((ksix, kx)) ksiA = concatenate((ksiA, kA)) T0 = 20000 # 0.1 ms TF = 80000 # 0.1 ms sx = 0.2 sT = 0.2 x, A = [], [] ############################################################################################' # **Loop** tic() for iii in range(100): # Init m = randint(ksix.shape[0]) other.update({'x': ksix[m], 'A': ksiA[m]}) eva = main.evaCure(evaCon=conn, evaNoi=noise, evaMod=model, out=out, **other) #Run for i in range(T0): eva.update() eva.evaNoi.updateNoise(stdD_x=sx, stdD_T=sT) for i in range(TF): eva.update() #Save
nlo, nhi = hist2d_zspec.min(), hist2d_zspec.max() dn = abs(nlo - nhi) #cmap_colors = [pylab.cm.gray_r(i) for i in pylab.linspace(0, 1, 15)] #cmap = matplotlib.colors.ListedColormap(cmap_colors, name='custom grays', N=None) #cmap = pylab.cm.gray_r xgrid_zspec, ygrid_zspec = pylab.meshgrid((xedges_zspec[1:] + xedges_zspec[:-1])/2., (yedges_zspec[1:] + yedges_zspec[:-1])/2.) inds = pylab.where((hist2d_zspec > 0) & (hist2d_zspec <= 2) & (xgrid_zspec.T < 1.0) & (ygrid_zspec.T > 0.7)) hist2d_zspec[inds] = pylab.randint(1, 4, size=(nbins_zspec, nbins_zspec))[inds] #colormap = sp2.imshow(hist2d_zspec.T[::-1,:], extent=extent_zspec, interpolation='nearest', cmap=cmap) colormap = sp2.imshow(hist2d_zspec.T, extent=extent_zspec, interpolation='nearest', cmap=cmap) colormap.set_clim(nlo+dn*0.03, nhi-dn*0.03) colormap.set_clim(0, nhi) sp2.axis(xlims + ylims) #ntext = sp2.text(0.06, 0.86, 'Spec. sample\nN = %i' % len(uv_zspec), transform=sp2.transAxes) ntext = sp2.text(0.03, 0.96, 'Spectroscopic sample\n0.55 < $z_{\mathrm{spec}}$ < 1.3\nN = %i' % len(uv_zspec), horizontalalignment='left', verticalalignment='top', transform=sp2.transAxes)
def __init__(self,case): L = 10 dims = 2 dt = .01 self.distance_matrix = PeroidicDistanceMatrix(L) self.force = LeonardJonesForce(dims,self.distance_matrix) self.integrate = VerletIntegrator(dt,self.force) self.c = Container(self.integrate) c = self.c dist = c.L[0] / 5. vel = dist /5. initialization = case xlim = (-5.,15.) ylim = (-5.,15.) pot_energy_lim = (-10,30) kin_energy_lim = (40,70) tot_energy_lim = (40,70) if initialization == 'one': c.addParticle(0,dist,0,0,1) elif initialization == 'two': c.addParticle(-dist,0.,vel,0.,1.) c.addParticle(dist,0.,-vel,0.,1.) pot_energy_lim = (-1,1) kin_energy_lim = (-1,2) tot_energy_lim = (0,2) pressure_lim = (-1,1) elif initialization == 'three': c.addParticle(0.,dist*sqrt(3)/2.,0.,-vel,1.) c.addParticle(-dist,0.,vel,0.,1.0) c.addParticle(dist,0.,-vel,0.,1.) pot_energy_lim = (-2,1) kin_energy_lim = (-1,3) tot_energy_lim = (0,2) pressure_lim = (-1,1) elif initialization == 'four': c.addParticle(-dist,0.,vel,0.,1.) c.addParticle(dist,0.-vel,0.,1.) c.addParticle(0.,dist,0.,-vel,1.) c.addParticle(0.,-dist,0.,vel,1.) pot_energy_lim = (-5,5) kin_energy_lim = (-.5,10) tot_energy_lim = (0,4) pressure_lim = pot_energy_lim elif initialization == 'six': energy_lim = 80 c.addParticle(0.,dist,0.,-vel,1.) c.addParticle(0.,-dist,0.,vel,1.) c.addParticle(dist/sqrt(2),dist/sqrt(2),0,-vel/sqrt(2),-vel/sqrt(2),0.,1.) c.addParticle(-dist/sqrt(2),dist/sqrt(2),vel/sqrt(2),-vel/sqrt(2),1.) c.addParticle(-dist/sqrt(2),-dist/sqrt(2),vel/sqrt(2),vel/sqrt(2),1.) c.addParticle(dist/sqrt(2),-dist/sqrt(2),-vel/sqrt(2),vel/sqrt(2),0.,1.) pot_energy_lim = (-10,10) kin_energy_lim = (-.5,10) tot_energy_lim = (-10,10) pressure_lim = pot_energy_lim elif initialization == 'eight': energy_lim = 80 c.addParticle(-dist,0.,vel,0.,1.) c.addParticle(dist,0,-vel,0.,1.) c.addParticle(0.,dist,0.,-vel,1.) c.addParticle(0.,-dist,0.,vel,1.) c.addParticle(dist/sqrt(2),dist/sqrt(2),-vel/sqrt(2),-vel/sqrt(2),1.) c.addParticle(-dist/sqrt(2),dist/sqrt(2),vel/sqrt(2),-vel/sqrt(2),1.) c.addParticle(-dist/sqrt(2),-dist/sqrt(2),vel/sqrt(2),vel/sqrt(2),1.) c.addParticle(dist/sqrt(2),-dist/sqrt(2),-vel/sqrt(2),vel/sqrt(2),1.) pot_energy_lim = (-10,10) kin_energy_lim = (-.5,10) tot_energy_lim = (-10,10) pressure_lim = (-15,15) elif case == 'line': gamma = 1e-6 pot_energy_lim = (-10,100) kin_energy_lim = (-.5,100) xlim = (-1,11) ylim = xlim tot_energy_lim = (0,100) pressure_lim = (0,30) for i in range(11): if i ==5: c.addParticle(c.L[0]/2.,(i-.5)*c.L[1]/11.,1.-gamma,gamma,1.) else: c.addParticle(c.L[0]/2.,(i-.5)*c.L[1]/11.,1.,0.,1.) elif case == 'square_lattice' or case == 'crunch_square_lattice' or case == 'hot_square_lattice' or case =='square_lattice8x8': if case == 'hot_square_lattice': c.hot_idx = randint(64) N = 8 # Particles per row xlim = (-1,10) ylim = xlim if case == 'crunch_square_lattice': pot_energy_lim = (-300,1000) kin_energy_lim = (-.5,1000) tot_energy_lim = (-300,1000) pressure_lim = (-200,1000) else: pot_energy_lim = (-300,100) kin_energy_lim = (-.5,40) tot_energy_lim = (-300,100) pressure_lim = (-200,100) c.updateL(8) d = 2.**(1/6.) # Particle diameter x = linspace(d/2.,c.L[0]-d/2,N) y = linspace(d/2.,c.L[0]-d/2,N) for i in range(x.size): for j in range(y.size): if i*8 + j == c.hot_idx: (vx,vy) = (rand(),rand()) print "(vx,vy) = ({},{}) at ({},{})".format(vx,vy,i,j) c.addParticle(x[i],y[j],vx,vy,1) else: c.addParticle(x[i],y[j],0,0,1) elif case == 'triangle_lattice' or case == 'crunch_triangle_lattice': ylim = (-1,9) xlim = (-1,11) N = 8 # particles per row if case == 'crunch_triangle_lattice': pot_energy_lim = (-300,1000) kin_energy_lim = (-.5,1000) tot_energy_lim = (-300,1000) pressure_lim = (-200,1000) else: pot_energy_lim = (-300,100) kin_energy_lim = (-.5,15) tot_energy_lim = (-300,100) pressure_lim = (-200,100) c.L[0] = 8.8 c.L[1] = sqrt(3) / 2. * c.L[0] -.2 # Set this based on L[0] d = 2.**(1/6.) # diameter x = linspace(-c.L[0]/2 + 3.*d/4.,c.L[0]/2. - 1.*d/4., N) # Unstaggered xs = linspace(-c.L[0]/2 + d/4. ,c.L[0]/2. - 3.*d/4., N) # Staggered y = linspace(-c.L[1]/2 + d/2.,c.L[1]/2 - d/2, N) for i in range(N): for j in range(N): if mod(i,2)==0: c.addParticle(x[j],y[i],0,0,1) else: c.addParticle(xs[j],y[i],0,0,0,0,1) else: raise ValueError("Not an option") self.xlim = xlim self.ylim = ylim self.pot_energy_lim = pot_energy_lim self.kin_energy_lim = kin_energy_lim self.tot_energy_lim = tot_energy_lim self.pressure_lim = pressure_lim
def sample(self, k): if isinstance(k, tuple): k = randint(*k) return [self.sampler() for _ in range(k)]
for col in timeFeatures: Stat = T[[ 'id', col ]].groupby(col).agg('count').rename(columns={'id': col + '_stat'}) Stat /= Stat.sum() T = T.join(Stat, on=col) statFeatures.append(col + '_stat') numFeatures += timeFeatures numFeatures += statFeatures T2 = T[numFeatures + ['id', 'tr', 'ts', target]].copy() Ttr = T2[T.tr == 1] Tar_tr = Ttr[target].values n = 10 inx = [pl.randint(0, Ttr.shape[0], int(Ttr.shape[0] / n)) for k in range(n)] # inx is used for crossvalidation of calculating the correlation and p-value Corr = {} for c in numFeatures: # since some values might be 0s, I use x+1 to avoid missing some important relations C1, P1 = pl.nanmean([ pearsonr(Tar_tr[inx[k]], (1 + Ttr[c].iloc[inx[k]])) for k in range(n) ], 0) C2, P2 = pl.nanmean([ pearsonr(Tar_tr[inx[k]], 1 / (1 + Ttr[c].iloc[inx[k]])) for k in range(n) ], 0) if P2 < P1: T2[c] = 1 / (1 + T2[c]) Corr[c] = [C2, P2] else:
from pylab import tan, pi, randint # main rospy.init_node('bridge') # color to publish color = String() col_pub = rospy.Publisher('/color_to_detect', String, queue_size=10) while not rospy.has_param('/colors'): rospy.sleep(1) colors = rospy.get_param('/colors') colors = colors.keys() count = 0 color_idx = 0 while not rospy.is_shutdown(): # colors if count == 0: # always change color color_idx = (color_idx + randint(1,len(colors))) % len(colors) color.data = colors[color_idx] count = (count+1)%5 col_pub.publish(color) rospy.sleep(1)
def epsilon_greedy_action(self, state): p = uniform() if p < self.epsilon: return randint(0, self.env.get_num_actions()) else: return argmax(self.get_q(state))
def plot_pop(do_show=False, pause=0.2): ''' Plot an example population ''' plotconnections = True n = 5000 alpha = 0.5 # indices = pl.arange(1000) pl.seed(1) indices = pl.randint(0, n, 20) max_contacts = {'S': 20, 'W': 10} population = sp.make_population(n=n, max_contacts=max_contacts) nside = np.ceil(np.sqrt(n)) x, y = np.meshgrid(np.arange(nside), np.arange(nside)) x = x.flatten()[:n] y = y.flatten()[:n] people = list(population.values()) for p, person in enumerate(people): person['loc'] = dict(x=x[p], y=y[p]) ages = np.array([person['age'] for person in people]) f_inds = [ind for ind, person in enumerate(people) if not person['sex']] m_inds = [ind for ind, person in enumerate(people) if person['sex']] if do_show: use_terrain = False if use_terrain: import matplotlib.pyplot as plt import matplotlib.colors as colors colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 256)) colors_land = plt.cm.terrain(np.linspace(0.25, 1, 256)) all_colors = np.vstack((colors_undersea, colors_land)) terrain_map = colors.LinearSegmentedColormap.from_list( 'terrain_map', all_colors) pl.set_cmap(terrain_map) fig = pl.figure(figsize=(24, 18)) pl.subplot(111) minval = 0 # ages.min() maxval = 100 # ages.min() colors = sc.vectocolor(ages, minval=minval, maxval=maxval) for i, inds in enumerate([f_inds, m_inds]): pl.scatter(x[inds], y[inds], marker='os'[i], c=colors[inds]) pl.clim([minval, maxval]) pl.colorbar() if plotconnections: lcols = dict(H=[0, 0, 0], S=[0, 0.5, 1], W=[0, 0.7, 0], C=[1, 1, 0]) for index in indices: person = people[index] contacts = person['contacts'] lines = [] for lkey in lcols.keys(): for contactkey in contacts[lkey]: contact = population[contactkey] tmp = pl.plot( [person['loc']['x'], contact['loc']['x']], [person['loc']['y'], contact['loc']['y']], c=lcols[lkey], alpha=alpha) lines.append(tmp) if pause: pl.pause(pause) return fig
##### NEST SIMULATION FOR SPIKE TIMES, RUN IN PARALLELL ACROSS RANKS ########## #delete old files from prev nest sim if MASTER_MODE: os.system('rm -r *.gdf') run_brunel_delta_nest(**nestSimParams) ipdb.set_trace() #process gdf files, creating savedata/SpTimes*.h5 files with spike times gdfFilesProcessing(nestSimParams) #extract so many spikes for each cell for excitatory and inhib nest-cells if MASTER_MODE: NTIMES = 1000 SpCell0 = pl.randint(0, nestSimParams['order']*4, size=(POPULATION_SIZE, NTIMES)).astype('int32') SpCell1 = pl.randint(0, nestSimParams['order'], size=(POPULATION_SIZE, NTIMES)).astype('int32') else: SpCell0 = None SpCell1 = None #broadcast the cell indexes to all ranks SpCell0 = COMM.bcast(SpCell0, root=0) SpCell1 = COMM.bcast(SpCell1, root=0) print 'created SpTimes in %.6f' % (time() - TIME) #distrinite soma locations and the shuffled morphos etc if MASTER_MODE: ##### create lists/dicts of locations, morphologies, codes #################
def fitData_2s(idat, odat, nps, nidx=None, nrep=500, rcond=1e-3): """ fits the data using two strides ahead - TODO: rewrite this docstring! performs a bootstrapped fit of the data, i.e. gives a matrix X that minimizes || odat.T - X * idat.T ||. This matrix X is the least squares estimate that maps the column vector representing stride k to the column vector of k+1, at a / the given Poincare section(s). idat, odat: input-data in 1d-format (see e.g. twoD_oneD for description) each row in idat and odat must represent a stride (idat) and the subsequent stride (odat), i.e. if you want to use all strides, odat = idat[1:,:] However, odat must not be shorter (fewer rows) than idat. nps: numbers of samples per stride nidx: number of strides that should be used for a fit in each bootstrap iteration. If omitted, idat.shape[0]*2/3 is used nrep: how many bootstrap iterations should be performed sections: list of integers that indicate which sections should be used as intermediate mappings. If only a single section is given, a "stride-map" is computed rcond: the numerical accuracy which is used for the fit (this value is passed through to lstsq). Too high values will cause loss of detection of lower eigenvalues, too low values will corrupt the return maps. returns a triple: (1) a list of a list of matrices. Each list containts the matrices that map from the first given section to the next. *NOTE* if len(sections) is 1, then this is left empty (equivalen matrices are then in (2)) (2) a list of matrices, which represent the "full stride" fits for the given set of indices. (3) a list of a list of indices. Each list lists the indices (rows of idat) that were used for the regression. """ if nidx is None: nidx = int(2. / 3. * idat.shape[0]) #if any(diff(sections)) < 0: # raise ValueError, 'sections must be given in increasing order!' # 1st: create bootstrap indices indices = [randint(1, idat.shape[0], nidx) for x in range(nrep)] #[(idat.shape[0]*rand(nidx-1)+1).astype(int) for x in range(nrep)] # 2nd: create section fits (if sections) # part A: do fits from one section to the next (within same stride) # sectMaps = [ # [ # lstsq( idat[idcs,sections[sct]::nps], # idat[idcs,sections[sct+1]::nps], rcond=rcond)[0].T # for idcs in indices ] # for sct in range(len(sections)-1) ] # part B: do fits from last section to first section of next stride #if len(sections) > 1: # sectMaps.append( [ # lstsq( idat[idcs,sections[-1]::nps], # odat[idcs,sections[0]::nps], rcond=rcond)[0].T # for idcs in indices ] ) # 3rd: create stride fits strideMaps = [ lstsq(hstack([idat[idcs - 1, 0::nps], idat[idcs, 0::nps]]), odat[idcs, 0::nps], rcond=rcond)[0].T for idcs in indices ] return strideMaps, indices
import pylab data_list = [pylab.randint(0, 100) for i in range(100)] # make histogram def main(data): ave = pylab.mean(data) std = pylab.std(data) ave_p_std = ave + std ave_m_std = ave - std pylab.hist(data, bins=range(0, 111, 5)) ylims = pylab.ylim() pylab.plot([ave_m_std] * 2, ylims, 'r') pylab.plot([ave_p_std] * 2, ylims, 'r') pylab.grid() pylab.title('average = %g, std = %g' % (pylab.mean(data), pylab.std(data))) pylab.show() if __name__ == '__main__': main(data_list)
def fitmdl_step(self, nidx=500, nrep=500, psec_in=0, psec_out=None, dimVol=None, rcond=0.03, nullMdl=None, useSym=False): """ similar to fitmdl, except that the Poincare-section for the input data (psec_in) and the output-data (psec_out) are within a single stride. if psec_out is not given, it is set to psec_in + nps/2 if psec_out is < psec_in (mod nps), then the subsequent step is taken fits a single stride; usually from phase x to phase x+pi dimVol: how many singular values should be taken into account when comparing the relative volume? Default: all useSym: Apply a symmetrization of the data using the self.sym - matrix """ # first: compute in_dat and out_dat assert type( psec_in) is int, 'psec_in must be of type int (from 0 to nps)' if psec_out is None: psec_out = psec_in + int(self.nps / 2) psec_out = psec_out % self.nps # if psec_out would be in the next step -> shift out, cut in. # actually, for an ar-system this should not be relevant due to # time symmetrie of autocorrelation function, however, here we might # face a substantially different system. oneStrideAhead = False if psec_out < psec_in: oneStrideAhead = True data_in = self.data1D[:,psec_in::self.nps] if not oneStrideAhead else \ self.data1D[:-1,psec_in::self.nps] data_out_pre = self.data1D[:,psec_out::self.nps] if not oneStrideAhead else \ self.data1D[1:,psec_out::self.nps] if useSym: data_out = dot(self.sym, data_out_pre.T).T else: data_out = data_out_pre assert data_in.shape[0] == data_out.shape[ 0], 'in- and out-data shape mismatch' self.all_A = [] self.relVol = [] #relative volume after prediction self.vred = [] self.rv = [] # relative variance after prediction in_idx = arange(data_in.shape[0]) #gc.collect() for rep in range(nrep): # create input and output data for regression, dependend on # null model if nullMdl is None: pred_idx = in_idx[randint(0, len(in_idx), nidx)] idat = data_in[array(list(pred_idx), dtype=int), :] odat = data_out[array(list(pred_idx), dtype=int), :] elif nullMdl == 'inv': # acutally, this could be deleted pred_idx = in_idx[randint(len(in_idx), nidx)] idat = data_in[array(list(pred_idx), dtype=int), :] odat = data_out[array(list(pred_idx), dtype=int), :] elif nullMdl == 'rand': pred_idx = in_idx[randint(0, len(in_idx), nidx)] pred_idx_out = in_idx[randint(0, len(in_idx), nidx)] idat = data_in[array(list(pred_idx), dtype=int), :] odat = data_out[array(list(pred_idx_out), dtype=int), :] else: raise ValueError, 'Error: Null model type not understood' test_idx = set(in_idx).difference(pred_idx) A = lstsq(idat, odat, rcond=rcond)[0] test_idat = data_in[array(list(test_idx), dtype=int), :] test_odat = data_out[array(list(test_idx), dtype=int), :] pred = dot(test_idat, A) res = test_odat - pred self.vred.append(diag(cov(res.T)) / diag(cov(test_odat.T))) self.all_A.append(A) self.rv.append(var(res) / var(test_odat)) # compute relative volume: # actually, another comparison might be usefull: compute along the # SAME projection, e.g. the principal component linear hull of odat! s_odat = svd(test_odat, full_matrices=False, compute_uv=False) s_pred = svd(res, full_matrices=False, compute_uv=False) volRatio = reduce(lambda x,y: x*y, s_pred[:dimVol]) / \ reduce(lambda x,y: x*y, s_odat[:dimVol]) # /sqrt(N) cancels out self.relVol.append(volRatio)
def fitmdl(self, nidx=500, nrep=500, psec=0, rcond=0.03, dS=1, dimVol=None, nullMdl=None): """ performs a bootstrapped prediction (nrep times), based in nidx frames. also computes the out-of-sample variance reduction optionally, the number of the poincare-section can be given (psec). it must be a non-negative integer smaller than nps dS: number of strides to predict, typically: 1 dimVol: how many singular values should be taken into account when comparing the relative volume? Default: all nullMdl: None -> normal fitting 'inv' -> iDat <-> oDat switched (should have no effect in AR?) 'rand' -> iDat, oDat have no relation (random selection) """ if psec >= self.nps: raise ValueError, 'invalid poincare-section selected!' data = self.data1D[:, psec::self.nps] self.all_A = [] #gc.collect() #section = psec #u0,s0,v0 = svd(aa1D[:,section::10].T,full_matrices = False) #vi = util.VisEig(256) self.vred = [] self.rv = [] self.relVol = [] #vredpc = [] #vredb = [] in_idx = arange(self.data1D.shape[0] - dS) for rep in range(nrep): # create input and output data for regression, dependend on # null model if nullMdl is None: pred_idx = in_idx[randint(0, len(in_idx), nidx)] idat = data[array(list(pred_idx), dtype=int), :] odat = data[array(list(pred_idx), dtype=int) + dS, :] elif nullMdl == 'inv': pred_idx = in_idx[randint(dS, len(in_idx), nidx)] idat = data[array(list(pred_idx), dtype=int), :] odat = data[array(list(pred_idx), dtype=int) - dS, :] elif nullMdl == 'rand': pred_idx = in_idx[randint(0, len(in_idx), nidx)] pred_idx_out = in_idx[randint(0, len(in_idx), nidx)] idat = data[array(list(pred_idx), dtype=int), :] odat = data[array(list(pred_idx_out), dtype=int), :] else: raise ValueError, 'Error: Null model type not understood' test_idx = set(in_idx).difference(pred_idx) A = lstsq(idat, odat, rcond=rcond)[0] test_idat = data[array(list(test_idx), dtype=int), :] test_odat = data[array(list(test_idx), dtype=int) + dS, :] pred = dot(test_idat, A) res = test_odat - pred self.vred.append(diag(cov(res.T)) / diag(cov(test_odat.T))) self.all_A.append(A) self.rv.append(var(res) / var(test_odat)) # compute relative volume: # actually, another comparison might be usefull: compute along the # SAME projection, e.g. the principal component linear hull of odat! s_odat = svd(test_odat, full_matrices=False, compute_uv=False) s_pred = svd(res, full_matrices=False, compute_uv=False) volRatio = reduce(lambda x,y: x*y, s_pred[:dimVol]) / \ reduce(lambda x,y: x*y, s_odat[:dimVol]) # /sqrt(N) cancels out self.relVol.append(volRatio)
def genere_liste(n): #Generate a random list liste=[] for i in range(n): liste.append(randint(1,10*n)) return liste
def getsite(self): """Returns tuple to random point on the lattice (Selection probability)""" return (pl.randint(0,self.n),pl.randint(0,self.n))
import csv f = open('dev_log.csv', 'a') f_csv = csv.writer(f) f_csv.writerow(results) f.close() return mod_mc if __name__ == '__main__': import pylab as pl import data data.age_range = pl.arange(0, 81, 20) data.time_range = pl.arange(1980, 2005, 5) data.regions = pl.randint(5, 15) time.sleep(pl.rand() * 5.) t0 = time.time() data.generate_fe('test_data/%s.csv' % t0) # included just to get good test coverage data.generate_smooth_gp_re_a('test_data/%s.csv' % t0, country_variation=True) std = 5. * pl.rand(len(pl.csv2rec('test_data/%s.csv' % t0))) pct = 90. print data.age_range, data.time_range, data.regions, pl.mean(std), pct data.add_sampling_error('test_data/%s.csv' % t0, 'test_data/noisy_%s.csv' % t0,
def fitmdl(self,nidx = 500,nrep = 500,psec = 0, rcond=0.03, dS = 1, dimVol = None, nullMdl = None): """ performs a bootstrapped prediction (nrep times), based in nidx frames. also computes the out-of-sample variance reduction optionally, the number of the poincare-section can be given (psec). it must be a non-negative integer smaller than nps dS: number of strides to predict, typically: 1 dimVol: how many singular values should be taken into account when comparing the relative volume? Default: all nullMdl: None -> normal fitting 'inv' -> iDat <-> oDat switched (should have no effect in AR?) 'rand' -> iDat, oDat have no relation (random selection) """ if psec >= self.nps: raise ValueError, 'invalid poincare-section selected!' data = self.data1D[:,psec::self.nps] self.all_A = [] #gc.collect() #section = psec #u0,s0,v0 = svd(aa1D[:,section::10].T,full_matrices = False) #vi = util.VisEig(256) self.vred = [] self.rv = [] self.relVol = [] #vredpc = [] #vredb = [] in_idx = arange(self.data1D.shape[0]-dS) for rep in range(nrep): # create input and output data for regression, dependend on # null model if nullMdl is None: pred_idx = in_idx[randint(0,len(in_idx),nidx)] idat = data[array(list(pred_idx),dtype=int),:] odat = data[array(list(pred_idx),dtype=int)+dS,:] elif nullMdl == 'inv': pred_idx = in_idx[randint(dS,len(in_idx),nidx)] idat = data[array(list(pred_idx),dtype=int),:] odat = data[array(list(pred_idx),dtype=int)-dS,:] elif nullMdl == 'rand': pred_idx = in_idx[randint(0,len(in_idx),nidx)] pred_idx_out = in_idx[randint(0,len(in_idx),nidx)] idat = data[array(list(pred_idx),dtype=int),:] odat = data[array(list(pred_idx_out),dtype=int),:] else: raise ValueError, 'Error: Null model type not understood' test_idx = set(in_idx).difference(pred_idx) A = lstsq(idat,odat,rcond = rcond)[0] test_idat = data[array(list(test_idx),dtype=int),:] test_odat = data[array(list(test_idx),dtype=int)+dS,:] pred = dot(test_idat,A) res = test_odat - pred self.vred.append(diag(cov(res.T))/diag(cov(test_odat.T))) self.all_A.append(A) self.rv.append(var(res)/var(test_odat)) # compute relative volume: # actually, another comparison might be usefull: compute along the # SAME projection, e.g. the principal component linear hull of odat! s_odat = svd(test_odat,full_matrices = False, compute_uv = False) s_pred = svd(res,full_matrices = False, compute_uv = False) volRatio = reduce(lambda x,y: x*y, s_pred[:dimVol]) / \ reduce(lambda x,y: x*y, s_odat[:dimVol]) # /sqrt(N) cancels out self.relVol.append(volRatio)