def __init__(self, ccdId=0): StarlightCamUSB.__init__(self, ccdId) self.ccdHandle = self.dev self.ccdParams = self.ccdProperties if self.ccdParams['isInterlaced']: self.pixels1 = py.empty(self.ccdParams['height'] * self.ccdParams['width']) self.pixels2 = py.empty(self.ccdParams['height'] * self.ccdParams['width']) self.pixels = py.empty( (2 * self.ccdParams['height'], self.ccdParams['width'])) else: self.pixels = py.empty(self.ccdParams['height'] * self.ccdParams['width']) self.mustStartAcq = True self.exposureFinished = False self.firstExposureFinished = False self.noMoreWipes = False self.timeStart = 0. self.timeEnd = 0. self.curTime = 0. self.completionPercentage = 0
def residuals(params,x,y,z): xo = params[0] xs = params[1] yo = params[2] ys = params[3] zo = params[4] zs = params[5] xc = empty(shape(x)) for i in range(len(x)): xc[i] = (x[i] - xo) * xs yc = empty(shape(y)) for i in range(len(y)): yc[i] = (y[i] - yo) * ys zc = empty(shape(z)) for i in range(len(z)): zc[i] = (z[i] - zo) * zs res = [] for i in range(len(xc)): norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0 res.append(norm) return array(res)
def residuals(params,x,y,z): xo = params[0] xs = params[1] yo = params[2] ys = params[3] zo = params[4] zs = params[5] xys = params[6] xzs = params[7] yzs = params[8] xc = empty(shape(x)) yc = empty(shape(y)) zc = empty(shape(z)) for i in range(len(x)): _x = x[i] - xo _y = y[i] - yo _z = z[i] - zo xc[i] = _x * (xs + _y * xys + _z * xzs) yc[i] = _y * (ys + _z * yzs) zc[i] = _z * (zs) res = [] for i in range(len(xc)): norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0 res.append(norm) return array(res)
def plot_traces(traces, linecolor="k"): for t in traces: num_locations = len(t.locations) x = pylab.empty(num_locations) z = pylab.empty(num_locations) for i, loc in enumerate(t.locations): x[i], z[i] = loc pylab.plot(z, x, color=linecolor)
def inject_wall(self, simulation, nwall, time, first_part, boundary): local_time = time-nwall*self.time_between_walls N = self.resolution vertices = empty((N**2,3)) v = empty((N**2,3)) for i in range(N): x = (float(i)-float(N-1)/2.)*self.dx for j in range(N): y = (float(j)-float(N-1)/2.)*self.dx vertices[i*N+j,:] = self.center + x*self.u + y*self.v + local_time*self.velocity*self.normal v[i*N+j,:] = self.normal*self.velocity h = self.dx*self.hfact specific_energy = self.temperature * self.specific_energy_to_temperature_ratio simulation.inject_or_update_particles(first_part, N**2, asfortranarray(vertices.T), v, ones(N**2)*h, ones(N**2)*specific_energy, boundary)
def filter_csd(self): '''Spatial filtering of the CSD estimate, using an N-point filter''' if not self.f_order > 0 and type(self.f_order) == type(3): raise Exception, 'Filter order must be int > 0!' if self.f_type == 'boxcar': num = ss.boxcar(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'hamming': num = ss.hamming(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'triangular': num = ss.triang(self.f_order) denom = pl.array([num.sum()]) elif self.f_type == 'gaussian': num = ss.gaussian(self.f_order[0], self.f_order[1]) denom = pl.array([num.sum()]) else: raise Exception, '%s Wrong filter type!' % self.f_type num_string = '[ ' for i in num: num_string = num_string + '%.3f ' % i num_string = num_string + ']' denom_string = '[ ' for i in denom: denom_string = denom_string + '%.3f ' % i denom_string = denom_string + ']' print 'discrete filter coefficients: \nb = %s, \na = %s' % \ (num_string, denom_string) self.csd_filtered = pl.empty(self.csd.shape) for i in xrange(self.csd.shape[1]): self.csd_filtered[:, i] = ss.filtfilt(num, denom, self.csd[:, i])
def _make_log_freq_map(self): """ :: For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies and bandwidths of linear and log-scaled frequency axes for a constant-Q transform. """ fp = self.feature_params bpo = float(self.nbpo) # Bands per octave self._fftN = float(self.nfft) hi_edge = float(self.hi) lo_edge = float(self.lo) f_ratio = 2.0**(1.0 / bpo) # Constant-Q bandwidth self._cqtN = float(P.floor(P.log(hi_edge / lo_edge) / P.log(f_ratio))) self._dctN = self._cqtN self._outN = float(self.nfft / 2 + 1) if self._cqtN < 1: print("warning: cqtN not positive definite") mxnorm = P.empty(int(self._cqtN)) # Normalization coefficients # P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)]) fftfrqs = self._fftfrqs logfrqs = P.array([ lo_edge * P.exp(P.log(2.0) * i / bpo) for i in P.arange(self._cqtN) ]) logfbws = P.array([ max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) for i in P.arange(int(self._cqtN)) ]) #self._fftfrqs = fftfrqs self._logfrqs = logfrqs self._logfbws = logfbws self._make_cqt()
def compile_directory(dir_name, size): """Return a matrix of all MINE measures from a directory. Args: dir_name: str of path with MINE.jar results files in RX_MINE_RESULTS format. size: int of number of total MINE.jar results over all files in directory. Returns: [numpy.array('f')] of MINE.jar results, measures column order, no row order. """ # v['mic'], v['non'], v['mas'], v['mev'], v['mcn'], v['pcc'] = row # results vectors 'v' in column order of MINE.jar results M = [pylab.empty(size, 'f') for i in range(6)] i = 0 for filename in os.listdir(dir_name): if not RX_MINE_RESULTS.match(filename): continue with fp as open(os.path.join(dir_name, filename), "r"): assert(fp.next() == HEADER) # skip header for line in fp: row = map(float, line.split(',')[2:]) for j, x in enumerate(row): M[j][i] = x i += 1 assert(i==size) return M
def draw_rand_pos(self): x = pl.empty(self.n) y = pl.empty(self.n) z = pl.empty(self.n) for i in xrange(self.n): x[i] = (pl.rand() - 0.5) * self.radius * 2 y[i] = (pl.rand() - 0.5) * self.radius * 2 while pl.sqrt(x[i]**2 + y[i]**2) >= self.radius: x[i] = (pl.rand() - 0.5) * self.radius * 2 y[i] = (pl.rand() - 0.5) * self.radius * 2 z = pl.rand(self.n) * (self.z_max - self.z_min) + self.z_min r = pl.sqrt(x**2 + y**2 + z**2) soma_pos = {'xpos': x, 'ypos': y, 'zpos': z, 'r': r} return soma_pos
def getVarData(fn,var): def getMembers(tfh): names = sorted(tfh.getnames()) names.remove('.') names.remove('./input.cfg') names.remove('./output.log') return [tfh.getmember(n) for n in names] def getPassCount(fl): fh = netcdf_file(fl) S = fh.variables['u'].shape fh.close() return S[0] tfh = tarfile.open(fn) members = getMembers(tfh) Nf = len(members) Np = getPassCount(tfh.extractfile(members[0])) data = pl.empty( (Nf,Np) ) for k in range(len(members)): fl = tfh.extractfile(members[k]) fh = netcdf_file(fl) data[k,:] = fh.variables[var][:,0,0] fh.close() tfh.close() return data
def _make_log_freq_map(self): """ :: For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies and bandwidths of linear and log-scaled frequency axes for a constant-Q transform. """ fp = self.feature_params bpo = float(self.nbpo) # Bands per octave self._fftN = float(self.nfft) hi_edge = float( self.hi ) lo_edge = float( self.lo ) f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) ) self._dctN = self._cqtN self._outN = float(self.nfft/2+1) if self._cqtN<1: print "warning: cqtN not positive definite" mxnorm = P.empty(self._cqtN) # Normalization coefficients fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)]) logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)]) logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) for i in P.arange(self._cqtN)]) #self._fftfrqs = fftfrqs self._logfrqs = logfrqs self._logfbws = logfbws self._make_cqt()
def X(r=r, m=m, f=f): hazard = r + m + f pr_not_exit = pl.exp(-hazard) X = pl.empty(len(hazard)) X[-1] = 1 / hazard[-1] for i in reversed(range(len(X)-1)): X[i] = pr_not_exit[i] * (X[i+1] + 1) + 1 / hazard[i] * (1 - pr_not_exit[i]) - pr_not_exit[i] return X
def mu_age_X(r=rate["r"]["mu_age"], m=rate["m"]["mu_age"], f=rate["f"]["mu_age"]): hazard = r + m + f pr_not_exit = pl.exp(-hazard) X = pl.empty(len(hazard)) X[-1] = 1 / hazard[-1] for i in reversed(range(len(X) - 1)): X[i] = pr_not_exit[i] * (X[i + 1] + 1) + 1 / hazard[i] * (1 - pr_not_exit[i]) - pr_not_exit[i] return X
def X(r=r, m=m, f=f): hazard = r + m + f pr_not_exit = pl.exp(-hazard) X = pl.empty(len(hazard)) X[-1] = 1 / hazard[-1] for i in reversed(range(len(X) - 1)): X[i] = pr_not_exit[i] * (X[i + 1] + 1) + 1 / hazard[i] * (1 - pr_not_exit[i]) - pr_not_exit[i] return X
def testObjectWeightsAreUpdated(self): # don't understand this test don't think i need it # it is a bit random lol target = empty([NN.ni, NN.nh]) for j in range(len(self.nn.ah)): self.hidAstroL.neur_in_ws[:, j] = 10**j target[:, j] = 10**j npt.assert_array_equal(self.in_to_hidden, self.hidAstroL.neur_in_ws) npt.assert_array_equal(self.in_to_hidden, target)
def testObjectWeightsAreUpdated(self): # don't understand this test don't think i need it # it is a bit random lol target = empty([NN.ni, NN.nh]) for j in range(len(self.nn.ah)): self.hidAstroL.neur_in_ws[:,j] = 10 ** j target[:,j] = 10 ** j npt.assert_array_equal(self.in_to_hidden, self.hidAstroL.neur_in_ws) npt.assert_array_equal(self.in_to_hidden, target)
def plot_average_channels(ch): avfr = {} avdp = {} for ind, ch_ind in enumerate(ch): # print ind,ch_ind for k in sorted(ch[ch_ind].keys()): if k not in avfr and "bl" not in k: avfr[k] = pl.empty([96, 3]) avdp[k] = pl.empty([96, 3]) if "bl" not in k: avfr[k][ind, :] = ch[ch_ind][k]["fr_mu"] - ch[ch_ind]["bl_mu"] avdp[k][ind, :] = ch[ch_ind][k]["dprime"] pl.subplot(3, 1, 1) pl.hist(avfr["OSImage_5"][:, 0] - avfr["OSImage_45"][:, 0], bins=range(-30, 30, 5)) pl.subplot(3, 1, 2) pl.hist(avfr["OSImage_5"][:, 1] - avfr["OSImage_45"][:, 1], bins=range(-30, 30, 5)) pl.subplot(3, 1, 3) pl.hist(avfr["OSImage_5"][:, 2] - avfr["OSImage_45"][:, 2], bins=range(-30, 30, 5))
def quantiles_ex(logodd_sample, logodd_data): res = plt.empty((len(logodd_data[:, 0]), len(logodd_data[0, :]))) for i in range(len(logodd_data[0, :])): v = logodd_sample[:, i] v.sort() q = np.searchsorted(v, logodd_data[:, i]) res[:, i] = q / len(logodd_sample[0, :]) return (res)
def mu_age_X(r=rate['r']['mu_age'], m=rate['m']['mu_age'], f=rate['f']['mu_age']): hazard = r + m + f pr_not_exit = pl.exp(-hazard) X = pl.empty(len(hazard)) X[-1] = 1 / hazard[-1] for i in reversed(range(len(X) - 1)): X[i] = pr_not_exit[i] * (X[i + 1] + 1) + 1 / hazard[i] * ( 1 - pr_not_exit[i]) - pr_not_exit[i] return X
def plotConvergence(var,Data): Ns = Data[var][:,0,0].size for pass_k in range(1,Np): fig = pl.figure(tight_layout={'h_pad':0,'rect':(0,0,1,0.95)},figsize=(5,4)) ax1 = fig.add_subplot(2,1,1) ax2 = fig.add_subplot(2,1,2) for shear_k in range(Nd): I = Data[var][:,shear_k,pass_k] i = pl.linspace(1,Ns+1,Ns) m = pl.empty(Ns) d = pl.empty(Ns) m[-1] = I.mean() d[-1] = I.std() for k in range(2,Ns-1): m[k] = I[:k].mean()-m[-1] d[k] = I[:k].std()-d[-1] m[-1] = 0.0 d[-1] = 0.0 m /= abs(m[2:]).max() d /= abs(d[2:]).max() ax1.plot(i[2:],m[2:],'g-') ax2.plot(i[2:],d[2:],'c-') ax1.axhline(0.0,color='k',linestyle='--') ax2.axhline(0.0,color='k',linestyle='--') ax1.set_xlim(1,Ns) ax2.set_xlim(1,Ns) ax1.set_xticks([]) ax1.set_ylim(-1.1,1.1) ax2.set_ylim(-1.1,1.1) ax1.set_yticks([-1,0,1]) ax1.set_yticklabels(['','','']) ax2.set_yticks([-1,0,1]) ax2.set_yticklabels(['','','']) ax2.set_xlabel('Monte Carlo Iterations $k$ [#]') fig.suptitle('Normalized %s'%titles[var]) ax1.set_ylabel('Mean [-]') ax2.set_ylabel('Deviation [-]') fig.savefig('%s/%s-%d-conv.%s'%(fdir,var,pass_k,fext)) pl.close(fig)
def __init__(self, X, c): self.n, self.N = X.shape self.X = X self.mu = empty((3, self.n)) self.cov = empty((3, self.n, self.n)) self.P = empty(3) cond = zeros(self.N) for i in range(0, 3): cond = cond + 1.0 indices = where(c == cond) # Xa bevat alle elementen uit X waar de klasse gelijk van is aan i + 1.0 Xa = [X[:, b] for b in indices] # Bovenstaande pakt de xjes in een extra array, dit willen we niet Xa = Xa[0] Na = shape(Xa)[1] self.mu[i] = mean(Xa, axis=1) # Tile smeert mu uit zodat we mu kunnen aftrekken van de X matrix self.cov[i] = cov(Xa - tile(self.mu[i].T, Na).reshape(self.n, Na)) # De kans op deze klasse self.P[i] = (Na * 1.0) / self.N
def __init__(self, lfp, coord_electrode=pl.linspace(-700E-6, 700E-6, 15), diam=500E-6, cond=0.3, cond_top=0.3, f_type='gaussian', f_order=(3, 1)): '''Initialize delta-iCSD method''' Icsd.__init__(self) self.lfp = lfp self.coord_electrode = pl.array(coord_electrode) self.diam = diam self.cond = cond self.cond_top = cond_top self.f_type = f_type self.f_order = f_order #initialize F- and iCSD-matrices self.f_matrix = pl.empty((self.coord_electrode.size, \ self.coord_electrode.size)) self.csd = pl.empty(lfp.shape) self.calc_f_matrix() self.calc_csd() self.filter_csd()
def _make_dct(self): """ :: Construct the discrete cosine transform coefficients for the current size of constant-Q transform """ DCT_OFFSET = self.lcoef nm = 1 / P.sqrt( self._cqtN / 2.0 ) self.DCT = P.empty((self._dctN, self._cqtN)) for i in P.arange(self._dctN): for j in P.arange(self._cqtN): self.DCT[ i, j ] = nm * P.cos( i * (2 * j + 1) * (P.pi / 2.0) / self._cqtN ) for j in P.arange(self._cqtN): self.DCT[ 0, j ] *= P.sqrt(2.0) / 2.0
def _make_dct(self): """ :: Construct the discrete cosine transform coefficients for the current size of constant-Q transform """ DCT_OFFSET = self.feature_params['lcoef'] nm = 1 / pylab.sqrt( self._cqtN / 2.0 ) self.DCT = pylab.empty((self._dctN, self._cqtN)) for i in pylab.arange(self._dctN): for j in pylab.arange(self._cqtN): self.DCT[ i, j ] = nm * pylab.cos( i * (2 * j + 1) * (pylab.pi / 2.0) / float(self._cqtN) ) for j in pylab.arange(self._cqtN): self.DCT[ 0, j ] *= pylab.sqrt(2.0) / 2.0
def saw_pad(NN, L=80000, fullness=9, sep=0, env=(3000, 10000, .6, 15000)): '''Generate a sawtooth "supersaw" synth pad.''' I = pl.arange(L, dtype=float) SAW = pl.empty(L, dtype=float) r = pl.zeros(L + sep * (len(NN)-1)) ENV = ADSR(env, L) for i in range(fullness): detune = ((float(i) / fullness) - 0.5) * .04 for ni,n in enumerate(NN): SAW = I * (midi.freq(n + detune) / sr) SAW += pl.rand() SAW %= 1.0 SAW -= 0.5 SAW *= ENV r[ni * sep:ni * sep + L] += SAW return r
def getData(base): def getVarData(fn,var): def getMembers(tfh): names = sorted(tfh.getnames()) names.remove('.') names.remove('./input.cfg') names.remove('./output.log') return [tfh.getmember(n) for n in names] def getPassCount(fl): fh = netcdf_file(fl) S = fh.variables['u'].shape fh.close() return S[0] tfh = tarfile.open(fn) members = getMembers(tfh) Nf = len(members) Np = getPassCount(tfh.extractfile(members[0])) data = pl.empty( (Nf,Np) ) for k in range(len(members)): fl = tfh.extractfile(members[k]) fh = netcdf_file(fl) data[k,:] = fh.variables[var][:,0,0] fh.close() tfh.close() return data # Find MC sample count fn = '%s/s-%3.1f-data.tar.gz'%(base,shears[0]) data = getVarData(fn,'u') Ns = data.shape[0] # Create empty dictionary Data = {} # Pre-allocate arrays for storage for var in varNames: Data[var] = pl.empty( (Ns,Nd,Np) ) # Fill arrays for shear_k in range(Nd): fn = '%s/s-%3.1f-data.tar.gz'%(base,shears[shear_k]) for vn in varNames: data = getVarData(fn,vn) Data[vn][:,shear_k,:] = data[:,:] return Data
def beep(freq_phase_amp, L): '''Additive synthesis of sinewaves. freq_phase_amp -- a numpy array with a row for each sinewave, and frequency, phase and amplitude in each column. L -- length in samples. ''' res = pl.zeros(L) ii = pl.arange(L) tmp = pl.empty(L) for f, p, a in freq_phase_amp: pl.multiply(ii, f * tau / sr, tmp) pl.add(tmp, p, tmp) pl.sin(tmp, tmp) pl.multiply(tmp, a, tmp) pl.add(res, tmp, res) return res
def build_rep_trace(noise=1., seed_val=2931): p.seed(seed_val) height = 1. tau_1 = 10. tau_2 = 5. start = 30. offset = 50. repetitions = 100 result = p.empty(repetitions * len(times)) for i in xrange(repetitions): v = noisy_psp(height, tau_1, tau_2, start, offset, times, noise) result[i * len(times): (i + 1) * len(times)] = v return result
def TwoTri(m1, m2): if m1.ndim == 1: L = len(m1) * 2 N = int(sqrt(L)) + 1 else: N = len(m1) nruter = empty((N, N)) * NaN indS = triSup(nruter, ind=True) indI = triInf(nruter, ind=True) if m1.ndim == 1: v1, v2 = m1, m2 else: v1, v2 = m1.take(indS), m2.take(indS) nruter[unravel_index(indS, nruter.shape)] = v1 nruter[unravel_index(indI, nruter.shape)] = v2 return nruter
def segment(data, dt, interval): """ `reshape with one floating point index` reshape the given, one-dimensional array `data` with the given `interval`. `interval` can be a floating number; the result of the reshape is aligned to the nearest possible integer value of the shift. """ segment_len = int(p.ceil(interval / dt)) n_segments = len(data) // segment_len result = p.empty((n_segments, segment_len)) # "fuzzy reshape" for i in range(n_segments): offset = int(p.around(i * interval / dt)) result[i, :] = data[offset:offset + segment_len] return result
def calc_csd(self): '''Calculate the iCSD using the spline iCSD method''' #e_mat0, e_mat1, e_mat2, e_mat3 = self.calc_e_matrices() e_mat = self.calc_e_matrices() [el_len, n_tsteps] = self.lfp.shape # padding the lfp with zeros on top/bottom cs_lfp = pl.matrix(pl.zeros((el_len+2, n_tsteps))) cs_lfp[1:-1, :] = self.lfp # CSD coefficients csd_coeff = self.f_matrix**-1 * cs_lfp # The cubic spline polynomial coefficients a_mat0 = e_mat[0] * csd_coeff a_mat1 = e_mat[1] * csd_coeff a_mat2 = e_mat[2] * csd_coeff a_mat3 = e_mat[3] * csd_coeff # Extend electrode coordinates in both end by mean interdistance coord_ext = pl.zeros(el_len + 2) coord_ext[0] = 0 coord_ext[1:-1] = self.coord_electrode coord_ext[-1] = self.coord_electrode[-1] + \ pl.diff(self.coord_electrode).mean() # create high res spatial grid out_zs = pl.linspace(coord_ext[0], coord_ext[-1], self.num_steps) self.csd = pl.empty((self.num_steps, self.lfp.shape[1])) # Calculate iCSD estimate on grid from polynomial coefficients. i = 0 for j in xrange(self.num_steps): if out_zs[j] > coord_ext[i+1]: i += 1 self.csd[j, :] = a_mat0[i, :] + a_mat1[i, :] * \ (out_zs[j] - coord_ext[i]) +\ a_mat2[i, :] * (out_zs[j] - coord_ext[i])**2 + \ a_mat3[i, :] * (out_zs[j] - coord_ext[i])**3
def testPerformAstrocyteActions(self): self.hidAstroL.neur_in_ws[:] = ones(24).reshape(NN.ni, NN.nh) """Assuming we are at the fourth iter of minor iters and that astro parameters are reset after each input pattern, neur counters can only be in [4, -4, 2, -2, 0]""" self.hidAstroL.neur_counters[:] = array([4, -4, 2, -2, 0, 0]) self.hidAstroL.remaining_active_durs[:] = array([3, -3, 2, -2, 0, 0]) self.hidAstroL.astro_statuses[:] = array([1, -1, 1, -1, 0, 0]) self.hidAstroL.performAstroActions() target_weights = empty([NN.ni, NN.nh]) target_weights[:,0] = 1.25 target_weights[:,1] = .5 target_weights[:,2] = 1.25 target_weights[:,3] = .5 target_weights[:,4] = 1. target_weights[:,5] = 1. npt.assert_array_equal(self.in_to_hidden, target_weights) target_activations = array([1, -1, 1, -1, 0, 0]) npt.assert_array_equal(target_activations, self.hidAstroL.astro_statuses) target_remaining_active_durs = array([2, -2, 1, -1, 0, 0]) npt.assert_array_equal(target_remaining_active_durs, self.hidAstroL.remaining_active_durs)
def __init__(self, var_list, store_values=True, store_diagonal=False): """Initialize matrix. Args: var_list: [str] of variable names in matrix store_values: bool if to store values of pairs, else store only booleans store_diagonal: bool if to store variables paired with themselves """ self.n = len(var_list) self.store_diagonal = store_diagonal self.n_max_pairs = (self.n) * (self.n+1) / 2 if not store_diagonal: self.n_max_pairs -= self.n # Existence matrix, all set to zero. self.V = pylab.zeros(self.n_max_pairs, dtype=pylab.int8) self.store_values = store_values # Order of all variables for indexing self.vars = dict([(name, order) for order, name in enumerate(var_list)]) self.n_set = 0 if store_values: # Value matrix self.M = pylab.empty(q)
def plot_energyspec(di='.', i=0, nf=1): e = pl.empty(nf + 1) x, y, z, u = lod_vfield(di, i) e[0] = pl.sqrt( pl.norm(u['X'])**2 + pl.norm(u['Y'])**2 + pl.norm(u['Z'])**2) for m in range(nf): x, y, z, uc = lod_vfield(di, i=2 * m + 1 + i) x, y, z, us = lod_vfield(di, i=2 * m + 2 + i) norc = 0 nors = 0 for f in ['X', 'Y', 'Z']: norc += pl.norm(uc[f])**2 nors += pl.norm(us[f])**2 e[m + 1] = pl.sqrt(norc + nors) pl.semilogy(e) pl.grid(True) pl.xlabel(r'mode: $k$') pl.ylabel(r'$e=|| \mathbf{\hat{u}}_k||$', ha='left', va='bottom', rotation=0) pl.gca().yaxis.set_label_coords(-0.075, 1.02)
def testPerformAstrocyteActions(self): self.hidAstroL.neur_in_ws[:] = ones(24).reshape(NN.ni, NN.nh) """Assuming we are at the fourth iter of minor iters and that astro parameters are reset after each input pattern, neur counters can only be in [4, -4, 2, -2, 0]""" self.hidAstroL.neur_counters[:] = array([4, -4, 2, -2, 0, 0]) self.hidAstroL.remaining_active_durs[:] = array([3, -3, 2, -2, 0, 0]) self.hidAstroL.astro_statuses[:] = array([1, -1, 1, -1, 0, 0]) self.hidAstroL.performAstroActions() target_weights = empty([NN.ni, NN.nh]) target_weights[:, 0] = 1.25 target_weights[:, 1] = .5 target_weights[:, 2] = 1.25 target_weights[:, 3] = .5 target_weights[:, 4] = 1. target_weights[:, 5] = 1. npt.assert_array_equal(self.in_to_hidden, target_weights) target_activations = array([1, -1, 1, -1, 0, 0]) npt.assert_array_equal(target_activations, self.hidAstroL.astro_statuses) target_remaining_active_durs = array([2, -2, 1, -1, 0, 0]) npt.assert_array_equal(target_remaining_active_durs, self.hidAstroL.remaining_active_durs)
def __init__(self, lfp, coord_electrode=pl.linspace(-700E-6, 700E-6, 15), cond=0.3, vaknin_el=True, f_type='gaussian', f_order=(3, 1)): Icsd.__init__(self) self.lfp = lfp self.coord_electrode = pl.array(coord_electrode) self.cond = cond self.f_type = f_type self.f_order = f_order if vaknin_el: self.lfp = pl.empty((lfp.shape[0]+2, lfp.shape[1])) self.lfp[0, ] = lfp[0, ] self.lfp[1:-1, ] = lfp self.lfp[-1, ] = lfp[-1, ] self.f_inv_matrix = pl.zeros((lfp.shape[0]+2, lfp.shape[0]+2)) else: self.lfp = lfp self.f_inv_matrix = pl.zeros((lfp.shape[0], lfp.shape[0])) self.calc_f_inv_matrix() self.calc_csd() self.filter_csd()
import pylab as p from input_explorer import inputExplorer from proc_profile import ProcProfile prof_folder = path.join(getcwd(), "..", "PROC") chdir(prof_folder) ne = p.loadtxt('ne.dat') info = p.loadtxt('prof_info.dat') shot_number = 28061 shot_number = 28749 prof_list = listdir(prof_folder) prof_list.sort() position = p.empty(shape=((len(prof_list) - 2), len(ne))) times = p.empty(len(prof_list) - 2) print len(times) i = 0 for r_file in prof_list: name = r_file.strip('.dat') if name not in ('prof_info', 'ne'): position[i] = p.loadtxt(r_file) * 1e2 times[i] = name i += 1 fig, ax = p.subplots(1) ax.hold(False)
import pylab as pl def create_FCC_configuration(FCCspacing, nCellsPerSide, periodic, coords, MiddleAtomId) FCCshifts = pl.array([[0.,0.,0.],[0.5*FCCspacing,0.5*FCCspacing,0.],\ [0.5*FCCspacing,0.,0.5*FCCspacing],\ [0.,0.5*FCCspacing,0.5*FCCspacing]],pl.double) MiddleAtomID = 0 a = 0 coords[a:a+3,:] = latVec + FCCshifts latVec = pl.empty(nCellsPerSide,pl.double) nCellsPerside = [4,2,1] N = 4*nCellsPerSide[0]*nCellsPerSide[1]*nCellsPerSide[2]+\ 2*nCellsPerSide[0]*nCellsPerSide[1]+\ 2*nCellsPerSide[0]*nCellsPerSide[2]+\ 2*nCellsPerSide[1]*nCellsPerSide[2]+\ nCellsPerSide[0]+nCellsPerSide[1]+nCellsPerSide[2]+1
# 'y' : pl.array([0,0,-pl.cos(pl.pi/6),pl.cos(pl.pi/6)])*25., # 'z' : pl.array([-50.,0,0,0]), # 'sigma' : 0.1, # 'N' : pl.array([ [0,0,-1],[-1*pl.cos(pl.pi/9),0,-1*pl.sin(pl.pi/9)], # [pl.sin(pl.pi/6)*pl.cos(pl.pi/9), # -pl.cos(pl.pi/9)*pl.cos(pl.pi/9),-1*pl.sin(pl.pi/9)], # [-pl.sin(pl.pi/6)*pl.cos(pl.pi/9), # -pl.cos(pl.pi/9)*pl.cos(pl.pi/9),1*pl.sin(pl.pi/9)]]), # 'r' : 7., # 'n' : 100, # 'r_z': pl.array([[-1E199,-50.00001,-50,75,1E99],[0,0,7,48,48]]), # 'seedvalue' : None, # } ch = 32 N = pl.empty((ch, 3)) for i in xrange(N.shape[0]): N[i,] = [1, 0, 0] #normal unit vec. to contacts electrodeParams = { #parameters for electrode class 'sigma' : 0.1, #Extracellular potential 'x' : pl.zeros(ch), #Coordinates of electrode contacts 'y' : pl.zeros(ch), 'z' : pl.linspace(-500,1000,ch), 'n' : 20, 'r' : 15, #'r_z' : pl.array([[-1E199,-500.00001,-500.00001,75,1E99],[0,0,7,48,48]]), 'N' : N, } synparams_AMPA = { #Excitatory synapse parameters 'e' : 0, #reversal potential 'syntype' : 'Exp2Syn', #conductance based exponential synapse
# Excitatory neurons Inhibitory neurons import pylab as pl Ne = 800 Ni = 200 re = pl.rand(Ne) ri = pl.rand(Ni) a = pl.hstack([0.02*pl.ones(Ne), 0.02+0.08*ri]) b = pl.hstack([0.2*pl.ones(Ne), 0.25-0.05*ri]) c = pl.hstack([-65+15*re**2, -65*pl.ones(Ni)]) d = pl.hstack([8-6*re**2, 2*pl.ones(Ni)]) S = pl.hstack([0.5*pl.rand(Ne+Ni,Ne), -pl.rand(Ne+Ni,Ni)]) v = -65*pl.ones(Ne+Ni) # Initial values of v u = b*v # Initial values of u firings = pl.empty((2,0)) # spike timings for t in range(1000): # simulation of 1000 ms I = pl.hstack([5*pl.randn(Ne), 2*pl.randn(Ni)]) # thalamic input fired = pl.find( v>= 30) # indices of spikes if len(fired): firings = pl.hstack([firings, pl.array([t+0*fired,fired])]) v[fired] = c[fired] u[fired] = u[fired]+d[fired] I = I+S[:,fired].sum(axis=1) v = v+0.5*(0.04*v**2+5*v+140-u+I) # step 0.5 ms v = v+0.5*(0.04*v**2+5*v+140-u+I) # for numerical u = u+a*(b*v-u) # stability pl.scatter(firings[0,:],firings[1,:]) pl.show()
'rec_vmem': True, 'rec_isyn': True, 'rec_vmemsyn': True, } simparams2 = {'rec_istim': True} pop_params = { 'n': pop_params_n, 'radius': pop_geom[0], 'tstart': 0, 'tstop': simtime, 'z_min': pop_geom[1], 'z_max': pop_geom[2], } #LFP from bottom to top from zero-500 mum x-offset N = pl.empty((96, 3)) for i in range(N.shape[0]): N[i, ] = [0, 1, 0] x = pl.linspace(0, 500, 6) z = pl.linspace(-700, 800, 16) X, Z = pl.meshgrid(x, z) electrodeparams = { 'x': X.T.reshape(-1), 'y': pl.zeros(96), 'z': Z.T.reshape(-1), 'sigma': 0.3, 'color': 'g', 'marker': 'o', 'N': N, 'r': electrode_r, 'n': 100,
def processData(self): print self.magSampleList #plot(array(self.magSampleList)) #show() self.mags = array(self.magSampleList) xs = self.mags[:,0] ys = self.mags[:,1] zs = self.mags[:,2] cxs, x_offset, x_scale = self.applyCalibration(xs) cys, y_offset, y_scale = self.applyCalibration(ys) czs, z_offset, z_scale = self.applyCalibration(zs) def residuals(params,x,y,z): xo = params[0] xs = params[1] yo = params[2] ys = params[3] zo = params[4] zs = params[5] xc = empty(shape(x)) for i in range(len(x)): xc[i] = (x[i] - xo) * xs yc = empty(shape(y)) for i in range(len(y)): yc[i] = (y[i] - yo) * ys zc = empty(shape(z)) for i in range(len(z)): zc[i] = (z[i] - zo) * zs res = [] for i in range(len(xc)): norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0 res.append(norm) return array(res) p0 = [x_offset, x_scale, y_offset, y_scale, z_offset, z_scale] ls = leastsq(residuals, p0, args=(xs,ys,zs)) x_offset = ls[0][0] x_scale = ls[0][1] y_offset = ls[0][2] y_scale = ls[0][3] z_offset = ls[0][4] z_scale = ls[0][5] cxs = self.applyCalibration(xs, calibration=(x_offset,x_scale))[0] cys = self.applyCalibration(ys, calibration=(y_offset,y_scale))[0] czs = self.applyCalibration(zs, calibration=(z_offset,z_scale))[0] calibratedMag = empty((len(cxs),3)) calibratedMag[:,0] = cxs calibratedMag[:,1] = cys calibratedMag[:,2] = czs magnitudes = amap(lambda v: l2norm(v), calibratedMag) self.calibratedMag = calibratedMag mlab.points3d(cxs,cys,czs, scale_mode='none', scale_factor=0.02) #mlab.points3d(array([-1.0,1.0]),array([0.0,0.0]),array([0.0,0.0]), scale_mode='none', scale_factor=0.02) sphere = mlab.points3d(0,0,0, opacity=0.5, resolution=100, color=(1.0,0.0,0.0), scale_mode='none', scale_factor=2.0) sphere.actor.property.backface_culling = True mlab.show() figure() plot(magnitudes) show() # write calibration to a file f = open(self.filename,mode='w') f.write("id,x_offset,x_scale,y_offset,y_scale,z_offset,z_scale\n") f.write("%d,%f,%f,%f,%f,%f,%f\n"%(self.id,x_offset,x_scale,y_offset,y_scale,z_offset,z_scale)) f.write("\n") for l in self.magSampleList: f.write("%d,%d,%d\n"%(l[0],l[1],l[2])) f.close() resError = [] for i in range(len(cxs)): resError.append(self.calculateResidualError(cxs[i],cys[i],czs[i])) print len(resError) print mean(resError)
sys.exit(1) gen = iter(stream) # circular buffer holds 100 elements for the y-axis cbuf = bufs.circbuf(shape=stream.shape) # set up plotting stuff ax = p.subplot(111) canvas = ax.figure.canvas ax.set_ylim(-100, 100) # create the initial line x = p.arange(0, 100) plots = { } for i in range(stream.shape[0]): plots[i], = p.plot(x, p.empty(100), animated=True) # save the clean slate background -- everything but the animated line # is drawn and saved in the pixel buffer background background = None def update_line(*args): global gen, cbuf, background, plots inp = gen.next() if inp is False: print >> sys.stderr, "reached EOF or error in input" sys.exit(0) cbuf.put(inp)
cosSum = np.nansum(np.real(data_ecog_fft_norm), axis=0); PPC = (np.square(cosSum)+np.square(sinSum) - dof)/(dof*(dof-1)); thetaPPC = PPC[:,2] alphaPPC = np.mean(PPC[:,4:9],axis=1) betaPPC = np.mean(PPC[:,9:16],axis=1) gammalowPPC = np.mean(PPC[:,16:20],axis=1) gammahighPPC = np.mean(PPC[:,20:],axis=1) #----------Visualization---------------- times = spike_times_shaftA data = data_probe_hp ploted_points = 64 f_sample = f_sampling bad_spikes = pl.empty(0) fig = pl.figure(0) ax = fig.add_subplot(111) bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9) def on_pick(event): event.artist.set_visible(not event.artist.get_visible()) print(ax.lines.index(event.artist)) fig.canvas.draw() fig.canvas.callbacks.connect('pick_event', on_pick) lines = ax.plot(pl.arange(-ploted_points/f_sample, ploted_points/f_sample, 1/f_sample), pl.transpose(data[:, times[0]-ploted_points:times[0]+ploted_points]), picker=True) trial_text = pl.figtext(0.85, 0.85, "Spike: "+str(0), ha="right", va="top", size=20, bbox=bbox_props) if pl.size(pl.ginput(n=200, mouse_add=1, mouse_stop=3, mouse_pop=2))>0: bad_spikes = pl.append(bad_spikes, 0) print(0) for i in pl.arange(pl.size(times)-50, pl.size(times), 5): new_data = pl.transpose(data[:, times[i]-ploted_points:times[i]+ploted_points])
def analyze_vector(in_iter, size, title, out_dir="", x_axis_name="Measure", out=sys.stdout, err=sys.stderr, display_title=True): """Load vector, compute statistics, output to buffer. Args: in_iter: [*float] of vector entries in order size: int of expected vector size name: str name of plot out_dir: str of path where to save plot out: [*str] of output buffer for results err: [*str] of output buffer for status messages """ err.write("Loading vector %s...\n" % title) v = pylab.empty(size, dtype=float) for i, x in enumerate(in_iter): v[i] = x assert(len(v) == size) # Compute statistics on distribution. err.write("Computing statistics...\n") now = time.strftime("%a, %d %b %Y %H:%M:%S") out.write("#Statistics for %s. Created %s\n" % (title, now)) stats = { 'mean': pylab.mean(v), 'size': size, 'std': pylab.std(v), 'v_min': min(v), 'v_max': max(v), } # Manual binning: if |v_max-v_min| <= 10, bin in 0.01 increments. # Else, SKIP [NOT YET::: divide range into 100 increments] do_bins = (abs(stats['v_min'] - stats['v_max']) <= 10) if do_bins: # Bin in 0.01 increments. bins = {} for i in range(size): x = round(v[i], 2) bins[x] = bins.get(x, 0) + 1 # Output results. for name, value in stats.items(): out.write("%s: %.4f\n" % (name, value)) out.write("Bins:\n") if do_bins: for bin_value in sorted(bins.keys()): out.write("%.2f: %d\n" % (bin_value, bins[bin_value])) n_bins = 98 # Generate and save histograms err.write("Plotting histograms...\n") pylab.clf() pylab.cla() pylab.hist(v, bins=n_bins, log=False) if display_title: pylab.title(title) pylab.ylabel("Pairs") pylab.xlabel("Measure") pylab.savefig(os.path.join(out_dir, ("%s.hist.png" % title))) pylab.clf() pylab.cla() pylab.hist(v, bins=n_bins, log=True) if display_title: pylab.title(title + " -- Log scale") pylab.ylabel("Pairs (log)") pylab.xlabel("Measure") pylab.savefig(os.path.join(out_dir, ("%s.histlog.png" % title)))
def exposure(self, exposureTime, ilAcq=False, ilCorrDoubleExpo=False): """ Main CCD exposure function. It is meant to be used in a thread and called repeatedly, changing state when: - acquisition must start (first call only), - is undergoing, - has finished (last call only). exposureTime is the exposure time (integration time for one image) in seconds. ilAcq is a flag for basic interlaced acquisition. If True, the camera will read the odd rows first and then the even rows. On short integration times, this will result in huge distortion of the image since the read time becomes non negligeable. ilCorrDoubleExpo is a flag for double integration interlaced acquisition. If True, the camera will first perform a full integration reading the odd rows, then clear the CCD and perform another full integration and read the even rows. This cancels the distortion due to different reading times for odd and even rows, but is much slower. PLEASE NOTE: ilAcq must be True in order to activate ilCorrDoubleExpo """ assert (exposureTime >= 0) # exposure finished before the last call of this function. self.exposureFinished = False if not self.ccdParams['isInterlaced']: ilAcq = False if not ilAcq: # ilAcq must be True to allow double exposure ilCorrDoubleExpo = False # if exposureTime is smaller than 1 second, the function will block # execution for the time of the integration (up to 2 s in double expo # mode). if exposureTime < 1: if self.mustStartAcq: if ilCorrDoubleExpo: # double exposure, one reading for each self.clearCcd() self.data1 = self.readCcd(delay=exposureTime, bothRows=False, oddRows=True) # read odd rows self.clearCcd() self.data2 = self.readCcd(delay=exposureTime, bothRows=False, oddRows=False) # read even rows elif ilAcq: # single exposure, two readings self.clearCcd() self.data1 = self.readCcd(delay=exposureTime, bothRows=False, oddRows=True) # read odd rows self.data2 = self.readCcd(bothRows=False, oddRows=False) # read even rows else: # single exposure, single read (not interlaced mode) self.clearCcd() self.data = self.readCcd(delay=exposureTime) self.exposureFinished = True self.mustStartAcq = True self.completionPercentage = 100 # integration times longer than 1 second need software timing. # The function will not block execution and will update the # self.exposureFinished flag when finished else: if ilCorrDoubleExpo: # double exposure, one reading for each if not self.exposureFinished and not self.firstExposureFinished: self._longExposure(exposureTime, bothRows=False, oddRows=False) if self.exposureFinished: self.firstExposureFinished = True self.exposureFinished = False self.data1 = self.dataBuffer elif not self.exposureFinished: self._longExposure(exposureTime, bothRows=False, oddRows=True) if self.exposureFinished: self.data2 = self.dataBuffer elif ilAcq: # single exposure, two readings if not self.exposureFinished: self._longExposure(exposureTime, bothRows=False, oddRows=True) if self.exposureFinished: self.data1 = self.dataBuffer # read odd rows self.data2 = self.readCcd( bothRows=False, oddRows=False) # read even rows else: # single exposure, single read (not interlaced mode) if not self.exposureFinished: self._longExposure(exposureTime) if self.exposureFinished: self.data = self.dataBuffer if self.exposureFinished: if not ilAcq: self.pixels = bytesToPx(self.data).reshape( self.ccdParams['height'], self.ccdParams['width']) else: self.pixels1 = bytesToPx(self.data1).reshape( self.ccdParams['height'], self.ccdParams['width']) self.pixels2 = bytesToPx(self.data2).reshape( self.ccdParams['height'], self.ccdParams['width']) self.pixels = py.empty( (2 * self.ccdParams['height'], self.ccdParams['width']), dtype=py.ushort) self.pixels[ 0::2] = self.pixels1 # physical odd rows (image even rows) self.pixels[ 1::2] = self.pixels2 # physical even rows (image odd rows) self.firstExposureFinished = False
def sim(): global V, Vlin, tao_e, Rar, Rmr Rar = pl.arange(Ras[0], Ras[1], Ras[2]) Rmr = pl.arange(Rms[0], Rms[1], Rms[2]) ns.mech.setcurrent(Ie * Ies, ns.dt) li = len(Rmr) lj = len(Rar) tao_e = pl.empty((li, lj)) tao_l = pl.empty((li, lj)) tao_n = pl.empty((li, lj)) for i in range(li): for j in range(lj): #Special conditions if Rar[j] < 10.: sec.L(15000.) else: sec.L(7000.) if Rmr[i] > 5000.: ns.h.tstop = 50. else: ns.h.tstop = 20. sec.Rm(Rmr[i]) sec.Ra(Rar[j]) print Rmr[i], Rar[j] ns.sim() #Obtain voltage, steady state voltage, normalize and #get logarithmic values t = ns.t Vinf = sec.nrnV0[-1] V = 1 - pl.array(sec.nrnV0)[:-1] / Vinf Vlin = pl.log(V) print Vinf #Estimate the time constant finding the #point at witch the voltage reaches the #value 1/e nz, = pl.nonzero(V > (1 / pl.e)) #The time where V ~ 1/e is the point #right after the last nz tao_e[i, j] = t[nz[-1] + 1] - tstart print 'tao_e', tao_e[i, j] #Define least squares data interval and #make the pulse starting time to be zero i0 = int(t0 / ns.dt) i1 = int(t1 / ns.dt) t01 = t[:i1 - i0] V01 = V[i0:i1] Vlin01 = Vlin[i0:i1] #Linear least squares A = pl.c_[t01, pl.ones_like(t01)] m, c = pl.lstsq(A, Vlin01.copy())[0] tao_l[i, j] = -1. / m - tstart print 'tao_l', tao_l[i, j], '(', m, c, pl.exp(c), ')' #Parametric function: v is the parameter vector and #x the independent varible fp = lambda p, t: p[0] * pl.exp(p[1] * t) #fp = lambda p, t: p[0]*pl.exp(p[1]*t) + p[2]*pl.exp(p[3]*t) #fp = lambda p, t: pl.exp(p[0]*t) #Error function e = lambda p, t, V: (fp(p, t) - V) #Initial parameter guess p0 = [1., -5.] #p0 = [1., -5., 1., -1.] #p0 = [-5.] #Fitting p, success = leastsq(e, p0, args=(t01, V01), maxfev=10000) tao_n[i, j] = -1. / p[1] - tstart print 'tao_n', tao_n[i, j], '(', p, success, ')' """
def gen_ssmodel(self): """ generates full neural model Attributes: ---------- K: matrix matrix of connectivity kernel evaluated over the spatial domain of the kernel Sigma_e: matrix field disturbance covariance matrix Sigma_e_c: matrix cholesky decomposiotion of field disturbance covariance matrix Sigma_varepsilon_c: matrix cholesky decomposiotion of observation noise covariance matrix C: matrix matrix of sensors evaluated at each spatial location, it's not the same as C in the IDE model """ print "generating full neural model" K = pb.empty((len(self.field_space), len(self.field_space))) for i in range(len(self.field_space)): for j in range(len(self.field_space)): K[i, j] = self.kernel(pb.matrix([[self.field_space[i]], [self.field_space[j]]])) self.K = pb.matrix(K) # calculate field disturbance covariance matrix and its Cholesky decomposition gamma_space = pb.empty((self.field_space.size ** 2, 2), dtype=float) l = 0 for i in self.field_space: for j in self.field_space: gamma_space[l] = [i, j] l += 1 N1, D1 = gamma_space.shape diff = gamma_space.reshape(N1, 1, D1) - gamma_space.reshape(1, N1, D1) Sigma_e_temp = self.gamma_weight * np.exp(-np.sum(np.square(diff), -1) * (1.0 / self.gamma.width)) self.Sigma_e = pb.matrix(Sigma_e_temp) if hasattr(self, "Sigma_e_c"): pass else: self.Sigma_e_c = pb.matrix(sp.linalg.cholesky(self.Sigma_e)).T # calculate Cholesky decomposition of observation noise covariance matrix Sigma_varepsilon_c = pb.matrix(sp.linalg.cholesky(self.Sigma_varepsilon)).T self.Sigma_varepsilon_c = Sigma_varepsilon_c # Calculate sensors at each spatial locations, it's not the same as C in the IDE model t0 = time.time() sensor_space = pb.empty((self.observation_locs_mm.size ** 2, 2), dtype=float) l = 0 for i in self.observation_locs_mm: for j in self.observation_locs_mm: sensor_space[l] = [i, j] l += 1 N2, D2 = sensor_space.shape diff = sensor_space.reshape(N2, 1, D2) - gamma_space.reshape(1, N1, D1) C = np.exp(-np.sum(np.square(diff), -1) * (1.0 / self.sensor_kernel.width)) self.C = pb.matrix(C)
####print " numpy version ",numpy.__version__ # create an array of floating point start = 0.0 stop = 128.0 step = 1.0 x = pylab.arange(start, stop, step, 'float') # square it y = x * x lenx = len(x) print len(x), len(y) print x[lenx - 1], y[lenx - 1] # create an empty "ndarray" xy = pylab.empty((lenx, 2)) #, typecode='f') # fill the array with x and y xy[:, 0] = x xy[:, 1] = y # clear the figure pylab.clf() #create first of 2 sub plots pylab.subplot(211) pylab.xlabel('X') pylab.ylabel('X * X') pylab.plot(xy[:, 0], xy[:, 1]) # create second of 2 sub plots pylab.subplot(212) pylab.cla()