def random_tree(labels): """ Given a list of labels, create a list of leaf nodes, and then one by one pop them off, randomly grafting them on to the growing tree. Return the root node. """ assert len(labels) > 2 import RandomArray; RandomArray.seed() leaves = [] for label in labels: leaves.append(Fnode(istip=1, label=label)) leaf_indices = list(RandomArray.permutation(len(leaves))) joined = [leaves[leaf_indices.pop()]] remaining = leaf_indices while remaining: i = RandomArray.randint(0, len(joined)-1) c1 = joined[i] if c1.back: n = c1.bisect() else: n = InternalNode() n.add_child(c1) c = leaves[remaining.pop()] n.add_child(c) joined.append(c) joined.append(n) for node in joined: if not node.back: node.isroot = 1 return node
def main(): """ A simple example. Note that the Tkinter lines are there only because this code will be run standalone. On the interpreter, simply invoking surf and view would do the job.""" import Tkinter r = Tkinter.Tk() r.withdraw() def f(x, y): return Numeric.sin(x*y)/(x*y) x = Numeric.arange(-7., 7.05, 0.1) y = Numeric.arange(-5., 5.05, 0.05) v = surf(x, y, f) import RandomArray z = RandomArray.random((50, 25)) v1 = view(z) v2 = view(z, warp=1) z_large = RandomArray.random((1024, 512)) v3 = viewi(z_large) # A hack for stopping Python when all windows are closed. v.master = r v1.master = r v2.master = r #v3.master = r r.mainloop()
def randomArray(shape, seed=None, range=(0, 1), type=Float): """Utility to generate a Numeric array full of pseudorandom numbers in the given range. This will attempt to use the RandomArray module, but fall back on using the standard random module in a loop. """ global globalSeed if not seed: if not globalSeed: globalSeed = int(time.time()) seed = globalSeed # Keep our global seed mixed up enough that many requests for # random arrays consecutively still gives random-looking output. globalSeed = (globalSeed + random.randint(1, 0xFFFFF)) & 0x7FFFFFF try: import RandomArray RandomArray.seed(seed + 1, seed + 1) return (RandomArray.random(shape) * (range[1] - range[0]) + range[0]).astype(type) except ImportError: random.seed(seed) a = zeros(multiply.reduce(shape), Float) for i in xrange(a.shape[0]): a[i] = random.random() * (range[1] - range[0]) + range[0] return reshape(a, shape).astype(type)
def setUp(self): self.number = 50 X = RandomArray.random(self.number) Y = RandomArray.random(self.number) Z = RandomArray.random(self.number) co = Numeric.array([X, Y, Z]) self.points = [] for i in range(len(co[0])): self.points.append(tuple(co[:,i].tolist()))
def setUp(self): self.number = 50 X = RandomArray.random(self.number) Y = RandomArray.random(self.number) Z = RandomArray.random(self.number) co = Numeric.array([X, Y, Z]) self.points = [] for i in range(len(co[0])): self.points.append(tuple(co[:, i].tolist()))
def MakeRandomPartitionProblem(N, M): """ Returns a random series of N integers in the range 1 < p < 2**M, guaranteed to sum to an even number. Use RandomArray.randint to generate a length N vector S of the appropriate range. While sum(S) mod 2 is not zero, re-generate S. """ intSize = 2**M S = RandomArray.randint(1, intSize + 1, N) while sum(S) % 2 != 0: S = RandomArray.randint(1, intSize + 1, N) return S
def compare(m, Nobs, Ncodes, Nfeatures): obs = RandomArray.normal(0., 1., (Nobs, Nfeatures)) codes = RandomArray.normal(0., 1., (Ncodes, Nfeatures)) import scipy.cluster.vq scipy.cluster.vq print( 'vq with %d observation, %d features and %d codes for %d iterations' % (Nobs, Nfeatures, Ncodes, m)) t1 = time.time() for i in range(m): code, dist = scipy.cluster.vq.py_vq(obs, codes) t2 = time.time() py = (t2 - t1) print(' speed in python:', (t2 - t1) / m) print(code[:2], dist[:2]) t1 = time.time() for i in range(m): code, dist = scipy.cluster.vq.vq(obs, codes) t2 = time.time() print(' speed in standard c:', (t2 - t1) / m) print(code[:2], dist[:2]) print(' speed up: %3.2f' % (py / (t2 - t1))) # load into cache b = vq(obs, codes) t1 = time.time() for i in range(m): code, dist = vq(obs, codes) t2 = time.time() print(' speed inline/blitz:', (t2 - t1) / m) print(code[:2], dist[:2]) print(' speed up: %3.2f' % (py / (t2 - t1))) # load into cache b = vq2(obs, codes) t1 = time.time() for i in range(m): code, dist = vq2(obs, codes) t2 = time.time() print(' speed inline/blitz2:', (t2 - t1) / m) print(code[:2], dist[:2]) print(' speed up: %3.2f' % (py / (t2 - t1))) # load into cache b = vq3(obs, codes) t1 = time.time() for i in range(m): code, dist = vq3(obs, codes) t2 = time.time() print(' speed using C arrays:', (t2 - t1) / m) print(code[:2], dist[:2]) print(' speed up: %3.2f' % (py / (t2 - t1)))
def RunMovie(self,event = None): import RandomArray start = clock() shift = RandomArray.randint(0,0,(2,)) NumFrames = 50 for i in range(NumFrames): points = self.LEs.Points shift = RandomArray.randint(-5,5,(2,)) points += shift self.LEs.SetPoints(points) self.Canvas.Draw() print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames)
def compare(m, Nobs, Ncodes, Nfeatures): obs = RandomArray.normal(0., 1., (Nobs, Nfeatures)) codes = RandomArray.normal(0., 1., (Ncodes, Nfeatures)) import scipy.cluster.vq scipy.cluster.vq print 'vq with %d observation, %d features and %d codes for %d iterations' % \ (Nobs,Nfeatures,Ncodes,m) t1 = time.time() for i in range(m): code, dist = scipy.cluster.vq.py_vq(obs, codes) t2 = time.time() py = (t2 - t1) print ' speed in python:', (t2 - t1) / m print code[:2], dist[:2] t1 = time.time() for i in range(m): code, dist = scipy.cluster.vq.vq(obs, codes) t2 = time.time() print ' speed in standard c:', (t2 - t1) / m print code[:2], dist[:2] print ' speed up: %3.2f' % (py / (t2 - t1)) # load into cache b = vq(obs, codes) t1 = time.time() for i in range(m): code, dist = vq(obs, codes) t2 = time.time() print ' speed inline/blitz:', (t2 - t1) / m print code[:2], dist[:2] print ' speed up: %3.2f' % (py / (t2 - t1)) # load into cache b = vq2(obs, codes) t1 = time.time() for i in range(m): code, dist = vq2(obs, codes) t2 = time.time() print ' speed inline/blitz2:', (t2 - t1) / m print code[:2], dist[:2] print ' speed up: %3.2f' % (py / (t2 - t1)) # load into cache b = vq3(obs, codes) t1 = time.time() for i in range(m): code, dist = vq3(obs, codes) t2 = time.time() print ' speed using C arrays:', (t2 - t1) / m print code[:2], dist[:2] print ' speed up: %3.2f' % (py / (t2 - t1))
def RunMovie(self, event=None): import RandomArray start = clock() shift = RandomArray.randint(0, 0, (2, )) NumFrames = 50 for i in range(NumFrames): points = self.LEs.Points shift = RandomArray.randint(-5, 5, (2, )) points += shift self.LEs.SetPoints(points) self.Canvas.Draw() print "running the movie took %f seconds to disply %i frames" % ( (clock() - start), NumFrames)
def test_sparse_vs_dense(self): RandomArray.seed(0) # For reproducability for s, l in (100, 100000), (10000, 100000), (100000, 100000): small = Numeric.sort(RandomArray.randint(0, 100000, (s,))) large = Numeric.sort(RandomArray.randint(0, 100000, (l,))) sparse1 = soomfunc.sparse_intersect(small, large) sparse2 = soomfunc.sparse_intersect(large, small) dense1 = soomfunc.dense_intersect(small, large) dense2 = soomfunc.dense_intersect(large, small) self.assertEqual(sparse1, sparse2) self.assertEqual(dense1, dense2) self.assertEqual(sparse1, dense1)
def _synth(self, freq, msdur, vol, risefall): t = arange(0, msdur / 1000.0, 1.0 / _Beeper._dafreq) s = zeros((t.shape[0], 2)) # use trapezoidal envelope with risefall (below) time if msdur < 40: risefall = msdur / 2.0 env = -abs((t - (t[-1] / 2)) / (risefall / 1000.0)) env = env - min(env) env = where(less(env, 1.0), env, 1.0) bits = _Beeper._bits if bits < 0: bits = -bits signed = 1 else: signed = 0 fullrange = power(2, bits - 1) if freq is None: y = (env * vol * fullrange * \ RandomArray.random(t.shape)).astype(Int16) else: y = (env * vol * fullrange * \ sin(2.0 * pi * t * freq)).astype(Int16) if _Beeper._chans == 2: y = transpose(array([y, y])) s = pygame.sndarray.make_sound(y) return s
def readfiles(self): ncopy = 2 for j in range(ncopy): infile = open('/Users/rfinn/SDSS/fieldDR4/myclusters.cat', 'r') for line in infile: if line.find('#') > -1: continue t = line.split() self.id.append(float(t[0])) #C4 id name self.r200.append(float(t[3])) #R200 in Mpc self.sigma.append(float(t[4])) #sigma in km/s self.z.append(float(t[1])) c1 = Rand.randint(0, len( g.x1all)) #center on random galaxy in gal catalog #c2=Rand.randint(0,len(g.x1))#center on random galaxy in gal catalog #c3=Rand.randint(0,len(g.x1))#center on random galaxy in gal catalog self.x1.append(g.x1all[c1]) self.x2.append(g.x2all[c1]) self.x3.append(g.x3all[c1]) #c1=Rand.random()#center on random position in simulation #c2=Rand.random()#center on random position in simulation #c3=Rand.random()#center on random #self.x1.append(c1*simL) #self.x2.append(c2*simL) #self.x3.append(c3*simL) infile.close() self.r200 = N.array(self.r200, 'd') self.sigma = N.array(self.sigma, 'd') self.z = N.array(self.z, 'd') self.x1 = N.array(self.x1, 'd') self.x2 = N.array(self.x2, 'd') self.x3 = N.array(self.x3, 'd')
def _synth(self, freq, msdur, vol, risefall): t = arange(0, msdur / 1000.0, 1.0 / _Beeper._dafreq) s = zeros((t.shape[0], 2)) # use trapezoidal envelope with risefall (below) time if msdur < 40: risefall = msdur / 2.0 env = -abs((t - (t[-1] / 2)) / (risefall/1000.0)) env = env - min(env) env = where(less(env, 1.0), env, 1.0) bits = _Beeper._bits if bits < 0: bits = -bits signed = 1 else: signed = 0 fullrange = power(2, bits-1) if freq is None: y = (env * vol * fullrange * \ RandomArray.random(t.shape)).astype(Int16) else: y = (env * vol * fullrange * \ sin(2.0 * pi * t * freq)).astype(Int16) if _Beeper._chans == 2: y = transpose(array([y,y])) s = pygame.sndarray.make_sound(y) return s
def test2(shape=(100,100)): dl = DynamicLattice.DynamicLattice(shape) a = RandomArray.randint(0, 2, shape) dl.display(a) for i in range(shape[0]/2): for j in range(shape[0]/2): a[i,j] = 0 dl.display(a, (i,j))
def ThermalizingTransformer(atoms, T): """ Thermalizes velocities to m v^2 / 2 = kB T/2, with kB = 1 """ # vRMS = numpy.sqrt(T / atoms.mass) atoms.velocities = RandomArray.normal(0, vRMS, numpy.shape(atoms.velocities))
def __init__(self, rows, cols, size): self.rows = rows self.cols = cols self.vectorLen = size self.weight = RandomArray.random((rows, cols, size)) self.input = [] self.loadOrder = [] self.step = 0 self.maxStep = 1000.0
def sampled_ds(parent_dataset, sample, name=None, filter_label=None, **kwargs): parent_len = len(parent_dataset) samp_len = int(parent_len * sample) record_ids = Numeric.sort(RandomArray.randint(0, parent_len, samp_len)) if name is None: name = 'samp%02d_%s' % (sample * 100, parent_dataset.name) if filter_label is None: filter_label = '%.3g%% sample' % (sample * 100) return FilteredDataset(parent_dataset, record_ids, name=name, filter_label=filter_label, **kwargs)
def statistics(): pd = stats.norm(loc=1, scale=0.5) # normal distribution N(1,0.5) n=10000 r = pd.rvs(n) # random variates import RandomArray r = RandomArray.normal(1, 0.1, n) s = stats.stats print pd.stats() print 'mean=%g stdev=%g skewness=%g kurtosis=%g' % \ (s.mean(r), s.variation(r), s.skew(r), s.kurtosis(r)) bin_counts, bin_min, min_width, noutside = s.histogram(r, numbins=50)
def statistics(): pd = stats.norm(loc=1, scale=0.5) # normal distribution N(1,0.5) n = 10000 r = pd.rvs(n) # random variates import RandomArray r = RandomArray.normal(1, 0.1, n) s = stats.stats print pd.stats() print 'mean=%g stdev=%g skewness=%g kurtosis=%g' % \ (s.mean(r), s.variation(r), s.skew(r), s.kurtosis(r)) bin_counts, bin_min, min_width, noutside = s.histogram(r, numbins=50)
def RandomNonoverlappingListOfAtoms(L, neighborLocator, minDist=1.0, nAtoms=10, temperature=1.0, maxTriesQuiet=1000, mass=1.0, radius=0.5, color=vi.color.green): """ Put atoms of given radius in box (0,L)^dim, at random except without overlaps less than minDist. """ pos = RandomArray.uniform(radius, L - radius, (nAtoms, dim)) vel = numpy.zeros((nAtoms, dim)) atoms = ListOfAtoms(mass, radius, color, pos, vel) # Enforce no overlaps # neighborLocator.HalfNeighbors returns (n1, n2, r, dr) # with n1 > n2: remove n1 atoms overlappingAtoms = neighborLocator.HalfNeighbors(atoms, minDist)[0] nPairOverlap = len(overlappingAtoms) tries = 0 while nPairOverlap > 0: tries += 1 if tries % maxTriesQuiet == 0: print "After ", tries, "attempts, still have ", nPairOverlap, \ "overlapping pairs of atoms: may need fewer atoms or larger box" posNew = [pos for n, pos in enumerate(atoms.positions) \ if n not in overlappingAtoms] nOverlap = nAtoms - len(posNew) newAtomPositions = RandomArray.uniform(radius, L - radius, (nOverlap, dim)) posNew.extend(newAtomPositions) atoms.positions = numpy.array(posNew) overlappingAtoms = neighborLocator.HalfNeighbors(atoms, minDist)[0] nPairOverlap = len(overlappingAtoms) ThermalizingTransformer(atoms, temperature) return atoms
def RandomListOfAtoms(L, nAtoms=1000, temperature=1.0, mass=1.0, radius=0.5, color=vi.color.green): """ Put atoms of radius r in box (0,L)^dim """ positions = RandomArray.uniform(radius, L - radius, (nAtoms, dim)) velocities = numpy.zeros((nAtoms, dim)) atoms = ListOfAtoms(mass, radius, color, positions, velocities) ThermalizingTransformer(atoms, temperature) return atoms
def init_velocities(T, ma, ndf=None): # Simple equipartition, no Maxwell-Boltzmann # For Maxwell-Boltzmann, use RandomArray.standard_normal() import RandomArray v = RandomArray.random((len(ma), 3)) - 0.5 # Make the C.M. static vcm = v_cm(v, ma) for xyz in range(3): v[:, xyz] = v[:, xyz] - vcm[xyz] # Re-scale for correct kinetic energy kin = ekin(v, ma) # This will take into account the reduced no of degrees of freedom if ndf is None: ndf = 3 * len(ma) - 3 temp = temp_from_ekin(kin, ndf) v = v * math.sqrt(T / temp) return v
def __init__(self, *args): apply(QWidget.__init__, (self,) + args) # make a QwtPlot widget self.plot = QwtPlot('A PyQwt and MinPack Demonstration', self) # initialize the noisy data scatter = 0.05 x = arrayrange(-5.0, 5.0, 0.1) y = RandomArray.uniform(1.0-scatter, 1.0+scatter, shape(x)) * \ function([1.0, 1.0, -2.0, 2.0], x) # fit from a reasonable initial guess guess = asarray([0.5, 1.5, -1.0, 3.0]) yGuess = function(guess, x) solution = leastsq(function, guess, args=(x, y)) yFit = function(solution[0], x) print solution # insert a few curves c1 = self.plot.insertCurve('data') c2 = self.plot.insertCurve('guess') c3 = self.plot.insertCurve('fit') # set curve styles self.plot.setCurvePen(c1, QPen(Qt.black)) self.plot.setCurvePen(c2, QPen(Qt.red)) self.plot.setCurvePen(c3, QPen(Qt.green)) # copy the data self.plot.setCurveData(c1, x, y) self.plot.setCurveData(c2, x, yGuess) self.plot.setCurveData(c3, x, yFit) # set axis titles self.plot.setAxisTitle(QwtPlot.xBottom, 'x -->') self.plot.setAxisTitle(QwtPlot.yLeft, 'y -->') self.plot.enableLegend(1) self.plot.replot()
def PlotFit(traj=SandPConstantDollar, times=SandPTime, nRandomTrajs=0, a=0.04, output=False): plotEm = [] eta, trajFlattened = RemoveMeanGrowth(traj, times) plotEm.append(times) plotEm.append(trajFlattened) randomTimes = scipy.arange(times[0], times[-1] + 1.e-10, (times[-1] - times[0]) / (len(times) - 1)) randomTrajs = [] for i in range(nRandomTrajs): randomTrajs.append( scipy.exp(scipy.cumsum(a * (RandomArray.random(len(times)) - 0.5)))) randomTrajsFlattened = randomTrajs[:] for rF in randomTrajsFlattened: eta, rF = RemoveMeanGrowth(rF, randomTimes) plotEm.append(randomTimes) plotEm.append(rF) pylab.plot(*plotEm) pylab.show() if output: outputSandP = file("SandPFlattened.dat", "w") for t, data in zip(times, trajFlattened): outputSandP.write("%s %s\n" % (t, data)) outputSandP.close() outputRandom = file("OneDRandomFlattened.dat", "w") for rF in randomTrajs: eta, rF = RemoveMeanGrowth(rF, randomTimes) for t, data in zip(randomTimes, rF): outputRandom.write("%s %s\n" % (t, data)) outputRandom.write("\n") outputRandom.close()
import RandomArray, time, sys from tables import Filters import tables.netcdf3 as NetCDF import Scientific.IO.NetCDF # create an n1dim by n2dim random array. n1dim = 1000 n2dim = 10000 print 'reading and writing a %s by %s random array ..'%(n1dim,n2dim) array = RandomArray.random((n1dim,n2dim)) filters = Filters(complevel=0,complib='zlib',shuffle=0) # create a file, put a random array in it. # no compression is used. # first, use Scientific.IO.NetCDF t1 = time.time() file = Scientific.IO.NetCDF.NetCDFFile('test.nc','w') file.createDimension('n1', None) file.createDimension('n2', n2dim) foo = file.createVariable('data', 'd', ('n1','n2',)) for n in range(n1dim): foo[n] = array[n] file.close() print 'Scientific.IO.NetCDF took',time.time()-t1,'seconds' # now use pytables NetCDF emulation layer. t1 = time.time() file = NetCDF.NetCDFFile('test.h5','w') file.createDimension('n1', None) file.createDimension('n2', n2dim) # no compression (override default filters instance). foo = file.createVariable('data', 'd', ('n1','n2',),filters=filters) # this is faster foo.append(array)
sigma_A = sigma_y * Numeric.sqrt(Numeric.sum(x**2) / DELTA) sigma_B = sigma_y * Numeric.sqrt(N / DELTA) return A, B, sigma_y, sigma_A, sigma_B # Test program if __name__ == '__main__': # Use latex text formatting matplotlib.rc('text', usetex=True) # Create a test data set x = Numeric.arange(20) dy = RandomArray.standard_normal(20) y = 0.5 * x + 3 + dy A, B, dy, dA, dB = lregress(x, y) y2 = A + B * x print A, B, dy, dA, dB # pylab.plot(x,y,'o') # pylab.plot(x,y2,linewidth=3) # pylab.xlabel('x',fontsize='large') # pylab.ylabel('y',fontsize='large') # pylab.text(2, 13, 'A = %.1f $\pm$ %.1f (true: 3.0)' % (A,dA) ) # pylab.text(2, 12, 'B = %.2f $\pm$ %.2f (true: 0.5)' % (B,dB) ) # pylab.text(2, 11, 'dy = %.1f (true: 1.0)' % (dy) )
def OnTimerFraction( self, event ): """Perform the particle-system simulation calculations""" points = self.points.coord.point colors = self.points.color.color '''Our calculations are going to need to know how much time has passed since our last event. This is complicated by the fact that a "fraction" event is cyclic, returning to 0.0 after 1.0.''' f = event.fraction() if f < self.lastFraction: f += 1.0 deltaFraction = (f-self.lastFraction) self.lastFraction = event.fraction() '''If we have received an event which is so soon after a previous event as to have a 0.0s delta (this does happen on some platforms), then we need to ignore this simulation tick.''' if not deltaFraction: return '''Each droplet has been moving at their current velocity for deltaFraction seconds, update their position with the results of this speed * time. You'll note that this is not precisely accurate for a body under acceleration, but it makes for easy calculations. Two machines running the same simulation will get *different* results here, as a faster machine will apply acceleration more frequently, resulting in a faster total velocity.''' points = points + (self.velocities*deltaFraction) '''We also cycle the droplet's colour value, though with the applied texture it's somewhat hard to see.''' colors = colors + (self.colorVelocities*deltaFraction) '''Now, apply acceleration to the current velocities such that the droplets have a new velocity for the next simulation tick.''' self.velocities[:,1] = self.velocities[:,1] + (gravity * deltaFraction) '''Find all droplets which have "retired" by falling below the y==0.0 plane.''' below = less_equal( points[:,1], 0.0) dead = nonzero(below) if isinstance( dead, tuple ): # weird numpy change here... dead = dead[0] if len(dead): '''Move all dead droplets back to the emitter.''' def put( a, ind, b ): for i in ind: a[i] = b put( points, dead, emitter) '''Re-spawn up to half of the droplets...''' dead = dead[:(len(dead)//2)+1] if len(dead): '''Reset color to initialColor, as we are sending out these droplets right now.''' put( colors, dead, initialColor) '''Assign slightly randomized versions of our initial velocity for each of the re-spawned droplets. Replace the current velocities with the new velocities.''' if RandomArray: velocities = (RandomArray.random( (len(dead),3) ) + [-.5, 0.0, -.5 ]) * initialVelocityVector else: velocities = [ array( (random.random()-.5, random.random(), random.random()-.5), 'f')* initialVelocityVector for x in xrange(len(dead)) ] def copy( a, ind, b ): for x in xrange(len(ind)): i = ind[x] a[i] = b[x] copy( self.velocities, dead, velocities) '''Now re-set the point/color fields so that the nodes notice the array has changed and they update the GL with the changed values.''' self.points.coord.point = points self.points.color.color = colors
import BSTIterative import AVLIterative import RandomArray bt = BSTIterative.BST() at = AVLIterative.AVL() arraylength = 10000 l = RandomArray.getRandomArray(arraylength) print("Processing random case") for i in range(len(l)): bt.insertIter(l[i]) at.insertIter(l[i]) print("BST level traversals: ", bt.traversecount) print("AVL level traversals: ", at.traversecount) print("Done") bt = BSTIterative.BST() at = AVLIterative.AVL() print("Processing worst case") for i in range(arraylength): bt.insertIter(i) at.insertIter(i) print("BST level traversals: ", bt.traversecount)
def rand(*args): """rand(d1,...,dn) returns a matrix of the given dimensions which is initialized to random numbers from a uniform distribution in the range [0,1). """ return RandomArray.random(args)
import RandomArray import numpy from numpy.random import normal import sys import fcs import pylab sys.path.append('../') import flow if __name__ == '__main__': data = numpy.concatenate((RandomArray.normal(5, 1, (2000, 2)), RandomArray.normal(7, 1, (2000, 2)), RandomArray.normal(9, 1, (3000, 2)), RandomArray.normal(11, 1, (2000, 2)), RandomArray.normal(13, 1, (1000, 2))), axis=0) # f = fcs.FCSReader("../data/3FITC-4PE.004.fcs") # print f.data.keys() # m = 10000 # x1 = numpy.array((f.data['FSC-H'])[:m], 'd') # x2 = numpy.array((f.data['SSC-H'])[:m], 'd') # x3 = numpy.array((f.data['FL1-H'])[:m], 'd') # x4 = numpy.array((f.data['FL2-H'])[:m], 'd') # print min(x1), max(x1) # print min(x2), max(x2) # print min(x3), max(x3) # print min(x4), max(x4) # data_unscaled = numpy.transpose([x1, x2, x3, x4]) # #data = numpy.transpose([(x1-min(x1))/max(x1), (x2-min(x2))/max(x2), (x3-min(x3))/max(x3), (x4-min(x4))/max(x4)])
def _maxwellboltzmanndistribution(masses, temp): xi = RandomArray.standard_normal(shape=(len(masses),3)) momenta = xi * Numeric.sqrt(masses * temp)[:,Numeric.NewAxis] return momenta
def test(shape=(100,100)): dl = DynamicLattice.DynamicLattice(shape) for n in range(20): a = RandomArray.randint(0, 2, shape) dl.display(a)
import sys, cPickle, time, commands, os, re import RandomArray from Numeric import * from Asap.testtools import ReportTest # cpu time: time.clock(). Wall clock time: time.time() host = commands.getoutput("hostname") timesteps = 20 dbfilename = "bigtiming.dat" selfcheckfilename = "bigtiming-selfcheck.dat" logfilename = "bigtiming.log" asapversion = GetVersion() when = time.strftime("%a %d %b %Y %H:%M", time.localtime(time.time())) RandomArray.seed(42, 12345) PrintVersion(1) print "Running ASAP timing on "+host+"." if re.match("^n\d\d\d.dcsc.fysik.dtu.dk$", host): print " This is a d512 node on Niflheim." fullhost = "niflheim-d512/%s" % (host.split(".")[0]) host = "niflheim-d512" elif re.match("^[stu]\d\d\d.dcsc.fysik.dtu.dk$", host): print " This is an s50 node on Niflheim." fullhost = "niflheim-s50/%s" % (host.split(".")[0]) host = "niflheim-s50" else: fullhost = host print "Current time is "+when print ""
def randomBits(length): address = RandomArray.randint(0, 2, length) return address
sampleRate = 44100. c = 300. # meters per second spectralRange = sampleRate / 2 import numpy import math import RandomArray import stats T=r/c spectrum = numpy.zeros( nBins-1, numpy.complex) for x in RandomArray.normal(0,standardMicrophoneDeviation,(nSpeakers,)) : t1root = 1+x t2root = 1-x print t1root, t2root spectrum += numpy.array([ complex(1, t1root*w*T) * math.e**complex(math.cos(w*T*t1root), math.sin(w*T*t1root)) /x / w / w / T / T - complex(1, t2root*w*T) * math.e**complex(math.cos(w*T*t2root), math.sin(w*T*t2root)) /x / w / w / T / T for w in [ math.pi*2*normfreq*spectralRange/nBins for normfreq in xrange(1,nBins) ] ]) import Gnuplot gp=Gnuplot.Gnuplot(persist=1) gp('set data style lines') gp.plot(abs(spectrum), [bin.real for bin in spectrum], [bin.imag for bin in spectrum], numpy.zeros(nBins)) gp.hardcopy(filename="IncoherenceSimulation.png",terminal="png")
from Numeric import dot,sum import sys,numeric_version import RandomArray import LinearAlgebra print sys.version print "Numeric version:",numeric_version.version RandomArray.seed(123,456) a = RandomArray.normal(0,1,(100,10)) f = RandomArray.normal(0,1,(10,30)) e = RandomArray.normal(0,0.1,(100,30)) print "Got to seed:",RandomArray.get_seed() b = dot(a,f)+e (x,res,rank,s)=LinearAlgebra.linear_least_squares(a,b) f_res = sum((b-dot(a,f))**2) x_res = sum((b-dot(a,x))**2) print "'Planted' residues, upper bound for optimal residues:" print f_res print "Claimed residues:" print res print "Actual residues:" print x_res print "Ratio between actual and claimed (shoudl be 1):" print x_res/res print "Ratio between actual and planted (should be <1):" print x_res/f_res
data = N.floor((data - self.min)/self.bin_width).astype(N.Int) nbins = self.array.shape[0] histo = N.add.reduce(weights*N.equal(N.arange(nbins)[:,N.NewAxis], data), -1) histo[-1] = histo[-1] + N.add.reduce(N.repeat(weights, N.equal(nbins, data))) self.array[:, 1] = self.array[:, 1] + histo if __name__ == '__main__': if N.package == 'Numeric': import RandomArray as random elif N.package == 'NumPy': from numpy import random nsamples = 1000 random.seed(12,13) data = random.normal(1.0, 0.5, nsamples) h = Histogram(data, 50) # use 50 bins between min & max samples h.normalizeArea() # make probabilities in histogram x = h.getBinIndices() y = h.getBinCounts() # add many more samples: nsamples2 = nsamples*100 data = random.normal(1.0, 0.5, nsamples2) h.addData(data) h.normalizeArea() x2 = h.getBinIndices() y2 = h.getBinCounts() # and more:
import RandomArray, time, sys from tables import Filters import tables.netcdf3 as NetCDF import Scientific.IO.NetCDF # create an n1dim by n2dim random array. n1dim = 1000 n2dim = 10000 print 'reading and writing a %s by %s random array ..' % (n1dim, n2dim) array = RandomArray.random((n1dim, n2dim)) filters = Filters(complevel=0, complib='zlib', shuffle=0) # create a file, put a random array in it. # no compression is used. # first, use Scientific.IO.NetCDF t1 = time.time() file = Scientific.IO.NetCDF.NetCDFFile('test.nc', 'w') file.createDimension('n1', None) file.createDimension('n2', n2dim) foo = file.createVariable('data', 'd', ( 'n1', 'n2', )) for n in range(n1dim): foo[n] = array[n] file.close() print 'Scientific.IO.NetCDF took', time.time() - t1, 'seconds' # now use pytables NetCDF emulation layer. t1 = time.time() file = NetCDF.NetCDFFile('test.h5', 'w') file.createDimension('n1', None) file.createDimension('n2', n2dim) # no compression (override default filters instance).
def _random_norm(shape): matrix = asarray(RandomArray.random(shape), MATCODE) return _normalize(matrix)
import RandomArray import random def SortedArray(n): A = n for i in range(len(A) - 1): for z in range(len(A) - i - 1): print(A) if A[z] > A[z + 1]: A[z], A[z + 1] = A[z + 1], A[z] #n = input('Input the number:') #Ar = [5,6,7,9,0,1] n = random.randint(2, 20) SortedArray(RandomArray.RandomArray(n))
Classes: MarkovModel Holds the description of a markov model """ import math from Numeric import * import RandomArray import StringIO # StringIO is in Numeric's namespace, so import this after. from Bio import listfns #RandomArray.seed(0, 0) # use 0 for debugging RandomArray.seed() VERY_SMALL_NUMBER = 1E-300 LOG0 = log(VERY_SMALL_NUMBER) MATCODE = Float64 class MarkovModel: def __init__(self, states, alphabet, p_initial=None, p_transition=None, p_emission=None): self.states = states self.alphabet = alphabet self.p_initial = p_initial self.p_transition = p_transition self.p_emission = p_emission def __str__(self):
sigma_A = sigma_y * Numeric.sqrt( Numeric.sum(x**2)/DELTA ) sigma_B = sigma_y * Numeric.sqrt( N/DELTA ) return A,B,sigma_y,sigma_A,sigma_B # Test program if __name__== '__main__': # Use latex text formatting matplotlib.rc('text', usetex=True) # Create a test data set x = Numeric.arange(20) dy = RandomArray.standard_normal(20) y = 0.5*x+3 + dy A,B,dy,dA,dB = lregress(x,y) y2 = A + B*x print A, B, dy ,dA,dB # pylab.plot(x,y,'o') # pylab.plot(x,y2,linewidth=3) # pylab.xlabel('x',fontsize='large') # pylab.ylabel('y',fontsize='large') # pylab.text(2, 13, 'A = %.1f $\pm$ %.1f (true: 3.0)' % (A,dA) ) # pylab.text(2, 12, 'B = %.2f $\pm$ %.2f (true: 0.5)' % (B,dB) ) # pylab.text(2, 11, 'dy = %.1f (true: 1.0)' % (dy) )
B.append(max_value) i, j = 0, 0 C = [None]*size for k in range(0,size): if A[i] <= B[j]: C[k] = A[i] i += 1 else: C[k] = B[j] j += 1 #print "merged %s and %s and got %s"%(A,B,C) return C times = [] for i in range(0,10): A = RandomArray.getRandomArray(10000) start = time.clock() print (Mergesort(A)) stop = time.clock() print ("Process Finished in %s seconds"%(stop-start)) times.append(stop-start) for i in range(0,10): print ("%s"%(times[i]))
assert allclose(computeResiduals(As, None, lmbd, Q), zeros(kconv), 0.0, tol) assert allclose(lmbd, lmbd_exact, tol*tol, 0.0) print 'OK' #------------------------------------------------------------------------------- # Test 2: K = None print 'Test 2', lmbd_exact = zeros(ncv, 'd') for k in xrange(ncv): lmbd_exact[k] = A[k,k]/M[k,k] X0 = RandomArray.random((n,ncv)) kconv, lmbd, Q, it, it_inner = jdsym.jdsym(As, Ms, None, ncv, 0.0, tol, 150, itsolvers.qmrs, jmin=5, jmax=10, eps_tr=1e-4, clvl=1) assert ncv == kconv assert allclose(computeResiduals(As, Ms, lmbd, Q), zeros(kconv), 0.0, normM*tol) assert allclose(lmbd, lmbd_exact, normM*tol*tol, 0.0) print 'OK' #------------------------------------------------------------------------------- # Test 3: general case print 'Test 3',
def randn(*args): """u = randn(d0,d1,...,dn) returns zero-mean, unit-variance Gaussian random numbers in an array of size (d0,d1,...,dn).""" x1 = RandomArray.random(args) x2 = RandomArray.random(args) return sqrt(-2*log(x1))*cos(2*pi*x2)
x = [] y = [] for l in yprof.readlines(): l = l.split() x.append(float(l[0])) y.append(float(l[1])) i = 0 while i * delta[1] <= x[0]: vartopg[0, i, :] = y[0] i = i + 1 if i == numx: break for d in range(0, len(x) - 1): slope = (y[d + 1] - y[d]) / (x[d + 1] - x[d]) if i == numx: break while i * delta[1] <= x[d + 1]: vartopg[0, i, :] = y[d] + (i * delta[1] - x[d]) * slope i = i + 1 if i == numx: break for i in range(i, numx): vartopg[0, i, :] = y[d] else: vartopg[0, :, :] = 0.0 vartopg[0, :, :] = vartopg[0, :, :] + RandomArray.uniform(0.0, amplitude, (numy, numx)).astype(Numeric.Float32) cffile.close()
print "Error =", error def trainPattern(self, pattern): # will depend on self.step x, y, d = self.winner(pattern) error += self.updateMap(pattern, x, y) print "Winner is weight at (", x, y, ") (diff was", d, ") error = ", \ error def test(self): import numpy.oldnumeric as Numeric self.loadOrder = range(len(self.input)) histogram = Numeric.zeros((self.cols, self.rows), 'i') for p in self.loadOrder: x, y, d = self.winner(self.input[p]) # print "Input[%d] =" % p, self.input[p],"(%d, %d)" % (x, y) histogram[x][y] += 1 for r in range(self.rows): for c in range(self.cols): print "%5d" % histogram[c][r], print "" print "" if __name__ == '__main__': import numpy.oldnumeric as Numeric s = SOM(5, 7, 5) # rows, cols; length of high-dimensional input s.setInputs( RandomArray.random((100, 5))) s.maxStep = 100 s.train() s.test()
from pysparse.spmatrix import * import RandomArray import time n = 1000 nnz = 50000 A = ll_mat(n, n, nnz) R = RandomArray.randint(0, n, (nnz,2)) t1 = time.clock() for k in xrange(nnz): A[R[k,0],R[k,1]] = k print 'Time for populating matrix: %8.2f sec' % (time.clock() - t1, ) print A.nnz B = A[:,:] A.shift(-1.0, B) print A
if show: p.show() LAST = p return p def psprint(dest="-"): if TABLE: TABLE.write_eps(dest) if __name__=='__main__' : import sys, Numeric, RandomArray x = Numeric.arrayrange(-10,10); y = x**2; e = y/4 a = RandomArray.random([20,20,3]) imagesc(a, x=range(-10,10), y=range(-10,10)) drawnow() sys.stdin.readline() a = Numeric.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0],