def fillcontinents(self,ax,color=0.8): """ Fill continents. ax - current axis instance. color - color to fill continents (default gray). """ # define corners of map domain. p1 = (self.llcrnrx,self.llcrnry); p2 = (self.urcrnrx,self.urcrnry) p3 = (self.llcrnrx,self.urcrnry); p4 = (self.urcrnrx,self.llcrnry) for x,y in self.coastpolygons: xa = N.array(x,'f') ya = N.array(y,'f') # clip to map domain. xa = N.clip(xa, self.xmin, self.xmax) ya = N.clip(ya, self.ymin, self.ymax) # check to see if all four corners of domain in polygon (if so, # don't draw since it will just fill in the whole map). test1 = N.fabs(xa-self.xmax) < 10. test2 = N.fabs(xa-self.xmin) < 10. test3 = N.fabs(ya-self.ymax) < 10. test4 = N.fabs(ya-self.ymin) < 10. hasp1 = sum(test1*test3) hasp2 = sum(test2*test3) hasp4 = sum(test2*test4) hasp3 = sum(test1*test4) if not hasp1 or not hasp2 or not hasp3 or not hasp4: xy = zip(xa.tolist(),ya.tolist()) poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0) ax.add_patch(poly)
def demo(): """ Non-interactive demonstration of the clusterers with simple 2-D data. """ # use a set of tokens with 2D indices tokens = [Token(FEATURES=numarray.array([3, 3])), Token(FEATURES=numarray.array([1, 2])), Token(FEATURES=numarray.array([4, 2])), Token(FEATURES=numarray.array([4, 0])), Token(FEATURES=numarray.array([2, 3])), Token(FEATURES=numarray.array([3, 1]))] # test k-means using the euclidean distance metric, 2 means and repeat # clustering 10 times with random seeds clusterer = KMeansClusterer(2, euclidean_distance, repeats=10) clusterer.cluster(tokens, True) print 'using clusterer', clusterer print 'clustered', str(tokens)[:60], '...' # classify a new token token = Token(FEATURES=numarray.array([3, 3])) print 'classify(%s)' % token, clusterer.classify(token) print token # test the GAAC clusterer with 4 clusters clusterer = GroupAverageAgglomerativeClusterer(4) print 'using clusterer', clusterer clusterer.cluster(tokens, True) #print 'clustered', str(tokens)[:60], '...' print 'clustered', tokens # show the dendogram clusterer.dendogram().show() # classify a new token token = Token(FEATURES=numarray.array([3, 3])) print 'classify(%s)' % token, clusterer.classify(token) print token print # test the EM clusterer with means given by k-means (2) and # dimensionality reduction clusterer = KMeansClusterer(2, euclidean_distance, svd_dimensions=1) clusterer.cluster(tokens) means = clusterer.means() clusterer = ExpectationMaximizationClusterer(means, svd_dimensions=1) clusterer.cluster(tokens, True) print 'using clusterer', clusterer print 'clustered', str(tokens)[:60], '...' # classify a new token token = Token(FEATURES=numarray.array([3, 3])) print 'classify(%s)' % token, clusterer.classify(token) print token # show the classification probabilities token = Token(FEATURES=numarray.array([2.2, 2])) print 'classification_probdist(%s)' % token clusterer.classification_probdist(token) for sample in token['CLUSTER_PROBDIST'].samples(): print '%s => %.0f%%' % (sample, token['CLUSTER_PROBDIST'].prob(sample) *100)
def setParameters(self, mu = None, sigma = None, wi = None, sigma_type = 'full', \ tied_sigma = False, isAdjustable = False): #============================================================ # set the mean : # self.mean[i] = the mean for dimension i # self.mean.shape = (self.nvalues, q1,q2,...,qn) # where qi is the size of discrete parent i try: self.mean = na.array(mu, shape=[self.nvalues]+self.discrete_parents_shape, type='Float32') except: raise 'Could not convert mu to numarray of shape : %s, discrete parents = %s' %(str(self.discrete_parents_shape), str(self.discrete_parents)) #============================================================ # set the covariance : # self.sigma[i,j] = the covariance between dimension i and j # self.sigma.shape = (nvalues,nvalues,q1,q2,...,qn) # where qi is the size of discrete parent i try: self.sigma = na.array(sigma, shape=[self.nvalues,self.nvalues]+self.discrete_parents_shape, type='Float32') except: raise 'Not a valid covariance matrix' #============================================================ # set the weights : # self.weights[i,j] = the regression for dimension i and continuous parent j # self.weights.shape = (nvalues,x1,x2,...,xn,q1,q2,...,qn) # where xi is the size of continuous parent i) # and qi is the size of discrete parent i try: self.weights = na.array(wi, shape=[self.nvalues]+self.parents_shape, type='Float32') except: raise 'Not a valid weight'
def get_quad_strengths(self): particle = self.get_new_particle() brho = particle.ReferenceBRho() kxs = [] kys = [] ss = [] s = 0.0 for element in self.beamline: kx = 0.0 ky = 0.0 strength = element.Strength() length = element.OrbitLength(particle) s += length if (element.Type() == "quadrupole") or \ (element.Type() == "thinQuad"): kx = strength/brho; ky = -kx elif (element.Type() == "CF_sbend") or \ (element.Type() == "CF_rbend"): ky = -element.getQuadrupole()/brho/length kx = -ky + strength**2/(brho**2) else: pass kxs.append(kx) kys.append(ky) ss.append(s) element.propagate(particle) return (numarray.array(ss),numarray.array(kxs),numarray.array(kys))
def func(self, X, V): k = self.C.TFdata.k v1 = self.C.TFdata.v1 w1 = self.C.TFdata.w1 if k >=0: J_coords = self.F.sysfunc.J_coords w = sqrt(k) q = v1 - (1j/w)*matrixmultiply(self.F.sysfunc.J_coords,v1) p = w1 + (1j/w)*matrixmultiply(transpose(self.F.sysfunc.J_coords),w1) p /= linalg.norm(p) q /= linalg.norm(q) p = reshape(p,(p.shape[0],)) q = reshape(q,(q.shape[0],)) direc = conjugate(1/matrixmultiply(transpose(conjugate(p)),q)) p = direc*p l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, p=p, q=q) return array([l1]) else: return array([1])
def demo_em(): # example from figure 14.10, page 519, Manning and Schutze tokens = [Token(FEATURES=numarray.array(f)) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]] means = [[4, 2], [4, 2.01]] clusterer = ExpectationMaximizationClusterer(means, bias=0.1) clusterer.cluster(tokens, True, trace=True) print 'clustered', tokens for c in range(2): print 'cluster %d' % c print 'prior', clusterer._priors[c] print 'mean ', clusterer._means[c] print 'covar', clusterer._covariance_matrices[c] # classify a new token token = Token(FEATURES=numarray.array([2, 2])) print 'classify(%s)' % token, clusterer.classify(token) print token # show the classification probabilities token = Token(FEATURES=numarray.array([2, 2])) print 'classification_probdist(%s)' % token clusterer.classification_probdist(token) for sample in token['CLUSTER_PROBDIST'].samples(): print '%s => %.0f%%' % (sample, token['CLUSTER_PROBDIST'].prob(sample) *100)
def plot(prf,i1,i2,xmin,xmax,ymin,ymax,sky,ierase): pgpanl(1,1) if ierase == 0: pgeras() pgvsiz(0.5,6.5,2.7,4.1) ymax=-1.e33 ymin=+1.e33 for line in prf: y=line[0]-sky if line[3] < (i2-i1)/2. and y > 0: xmax=line[3] if -2.5*log10(y) > ymax: ymax=-2.5*log10(y) if -2.5*log10(y) < ymin: ymin=-2.5*log10(y) ymax=ymax+0.1*abs(ymax-ymin) ymin=ymin-0.1*abs(ymax-ymin) xmin=prf[1][3]-0.05*(xmax-prf[1][3]) xmax=xmax+0.1*xmax pgswin(xmin,xmax,ymax,ymin) pgbox('bcnst',0.,0,'bcnst',0.,0) for line in prf: if line[6] == 0: pgsci(2) elif line[6] == -1: pgsci(3) else: pgsci(4) try: y=line[0]-sky xs=numarray.array([line[3]]) ys=numarray.array([-2.5*log10(y)]) if y > 0: pgpt(xs,ys,5) except: pass pgsci(1) return
def readpsoutput(self,file,z): input=open(file,'r') mass=[] frac=[] for line in input: t=line.split() mass.append(float(t[2])) frac.append(float(t[3])) #try: #frac.append(float(t[3])) #except: # frac.append(1.e-10) input.close() mass=N.array(mass,'d') sigma=((mass/(1.2e15/h)*N.sqrt(.7+.3*(1+z)**3))**(1./3.))*1000. frac=N.array(frac,'d') sigma=N.log10(sigma) #maccret=(frac*mass) maccret=N.zeros(len(mass),'d') for i in range(len(mass)): maccret[i]=mass[i]*frac[i] print i,mass[i],frac[i],maccret[i] frac=N.log10(frac) self.sigma=sigma self.mass=mass self.frac=frac self.maccret=maccret
def starts(n): n = N.array([n], N.Int64) m2 = N.zeros([64], N.UInt64) while n[0] < 0: n += PERIOD while n[0] > PERIOD: n -= PERIOD if n[0] == 0: return 1 temp = N.array([1], N.UInt64) ival = N.array([0], N.UInt64) for i in range(64): m2[i] = temp[0] temp = (temp << 1) ^ ((temp.astype(N.Int64) < 0)[0] and POLY or 0) temp = (temp << 1) ^ ((temp.astype(N.Int64) < 0)[0] and POLY or 0) for i in range(62, -1, -1): if ((n >> i) & 1)[0]: break ran = N.array([2], N.UInt64) while (i > 0): temp[0] = 0 for j in range(64): if ((ran >> j) & 1)[0]: temp ^= m2[j:j + 1] ran[0] = temp[0] i -= 1 if ((n >> i) & 1)[0]: ran = (ran << 1) ^ ((ran.astype(N.Int64) < 0)[0] and POLY or 0) return ran[0]
def run_kut5(F, x, y, h): # Runge-Kutta-Fehlberg formulas C = array([37./378, 0., 250./621, 125./594, \ 0., 512./1771]) D = array([2825./27648, 0., 18575./48384, \ 13525./55296, 277./14336, 1./4]) n = len(y) K = zeros((6, n), type=Float64) K[0] = h * F(x, y) K[1] = h * F(x + 1. / 5 * h, y + 1. / 5 * K[0]) K[2] = h * F(x + 3. / 10 * h, y + 3. / 40 * K[0] + 9. / 40 * K[1]) K[3] = h*F(x + 3./5*h, y + 3./10*K[0]- 9./10*K[1] \ + 6./5*K[2]) K[4] = h*F(x + h, y - 11./54*K[0] + 5./2*K[1] \ - 70./27*K[2] + 35./27*K[3]) K[5] = h*F(x + 7./8*h, y + 1631./55296*K[0] \ + 175./512*K[1] + 575./13824*K[2] \ + 44275./110592*K[3] + 253./4096*K[4]) # Initialize arrays {dy} and {E} E = zeros((n), type=Float64) dy = zeros((n), type=Float64) # Compute solution increment {dy} and per-step error {E} for i in range(6): dy = dy + C[i] * K[i] E = E + (C[i] - D[i]) * K[i] # Compute RMS error e e = sqrt(sum(E**2) / n) return dy, e
def fillcontinents(self, ax, color=0.8): """ Fill continents. ax - current axis instance. color - color to fill continents (default gray). """ # define corners of map domain. p1 = (self.llcrnrx, self.llcrnry) p2 = (self.urcrnrx, self.urcrnry) p3 = (self.llcrnrx, self.urcrnry) p4 = (self.urcrnrx, self.llcrnry) for x, y in self.coastpolygons: xa = N.array(x, 'f') ya = N.array(y, 'f') # clip to map domain. xa = N.clip(xa, self.xmin, self.xmax) ya = N.clip(ya, self.ymin, self.ymax) # check to see if all four corners of domain in polygon (if so, # don't draw since it will just fill in the whole map). test1 = N.fabs(xa - self.xmax) < 10. test2 = N.fabs(xa - self.xmin) < 10. test3 = N.fabs(ya - self.ymax) < 10. test4 = N.fabs(ya - self.ymin) < 10. hasp1 = sum(test1 * test3) hasp2 = sum(test2 * test3) hasp4 = sum(test2 * test4) hasp3 = sum(test1 * test4) if not hasp1 or not hasp2 or not hasp3 or not hasp4: xy = zip(xa.tolist(), ya.tolist()) poly = Polygon(xy, facecolor=color, edgecolor=color, linewidth=0) ax.add_patch(poly)
def run_kut5(F, x, y, h): # Runge-Kutta-Fehlberg formulas C = array([37.0 / 378, 0.0, 250.0 / 621, 125.0 / 594, 0.0, 512.0 / 1771]) D = array([2825.0 / 27648, 0.0, 18575.0 / 48384, 13525.0 / 55296, 277.0 / 14336, 1.0 / 4]) n = len(y) K = zeros((6, n), type=Float64) K[0] = h * F(x, y) K[1] = h * F(x + 1.0 / 5 * h, y + 1.0 / 5 * K[0]) K[2] = h * F(x + 3.0 / 10 * h, y + 3.0 / 40 * K[0] + 9.0 / 40 * K[1]) K[3] = h * F(x + 3.0 / 5 * h, y + 3.0 / 10 * K[0] - 9.0 / 10 * K[1] + 6.0 / 5 * K[2]) K[4] = h * F(x + h, y - 11.0 / 54 * K[0] + 5.0 / 2 * K[1] - 70.0 / 27 * K[2] + 35.0 / 27 * K[3]) K[5] = h * F( x + 7.0 / 8 * h, y + 1631.0 / 55296 * K[0] + 175.0 / 512 * K[1] + 575.0 / 13824 * K[2] + 44275.0 / 110592 * K[3] + 253.0 / 4096 * K[4], ) # Initialize arrays {dy} and {E} E = zeros((n), type=Float64) dy = zeros((n), type=Float64) # Compute solution increment {dy} and per-step error {E} for i in range(6): dy = dy + C[i] * K[i] E = E + (C[i] - D[i]) * K[i] # Compute RMS error e e = sqrt(sum(E ** 2) / n) return dy, e
def contiguous(self, source, typeCode=None): """Get contiguous array from source source -- numarray Python array (or compatible object) for use as the data source. If this is not a contiguous array of the given typeCode, a copy will be made, otherwise will just be returned unchanged. typeCode -- optional 1-character typeCode specifier for the numarray.array function. All gl*Pointer calls should use contiguous arrays, as non- contiguous arrays will be re-copied on every rendering pass. Although this doesn't raise an error, it does tend to slow down rendering. """ typeCode = GL_TYPE_TO_ARRAY_MAPPING[typeCode] if isinstance(source, numarray.ArrayType): if source.iscontiguous() and (typeCode is None or typeCode == source.typecode()): return source else: # We have to do astype to avoid errors about unsafe conversions # XXX Confirm that this will *always* create a new contiguous array # XXX Allow a way to make this raise an error for performance reasons # XXX Guard against wacky conversion types like uint to float, where # we really don't want to have the C-level conversion occur. return numarray.array(source.astype(typeCode), typeCode) elif typeCode: return numarray.array(source, typeCode) else: return numarray.array(source)
def read_transmission(table,legend_value,read_err=True): legend_by = getattr(table.attrs,'legend_by',None) def rows(): if legend_by: return table.where(getattr(table.cols,legend_by) == legend_value) else: return table.iterrows() if hasattr(table.cols,'samples'): samples = array([ abs(x['samples']) for x in rows() ]) transmission_sum = array([ x['transmission_sum'] for x in rows() ]) if len(transmission_sum.shape) > len(samples.shape): samples = samples[:,NewAxis] transmission_avg = transmission_sum / samples if read_err and hasattr(table.cols,'transmission_sqsum'): transmission_sqsum = array([ x['transmission_sqsum'] for x in rows() ]) transmission_var = abs(transmission_sqsum/samples - transmission_avg**2) # abs is needed for cases where var==0 mathematically, and therefore possibly # var<0 numerically transmission_stdev = transmission_var**0.5 transmission_err = transmission_stdev/(samples**0.5) else: transmission_err = None else: transmission_avg = array([ x['transmission'] for x in rows() ]) if read_err and hasattr(intable.cols,'transmission_err'): transmission_err = array([ x['transmission_err'] for x in rows() ]) else: transmission_err = None return transmission_avg, transmission_err
def go(i): str1 = "A" * i str2 = str1[:i//2] + "B" + str1[i//2+1:] print i, t1 = time.time() assert hamming3(str1, str2) == 1 hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) hamming3(str1, str2) t2 = time.time() print makeSigDigs(t2-t1, 2), num1 = numarray.array(str1) num2 = numarray.array(str2) t1 = time.time() assert numeric_hamming4(num1, num2) == 1 numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) numeric_hamming4(num1, num2) t2 = time.time() print makeSigDigs(t2-t1, 2)
def readfile(): input = open('mosaic_extract.tbl', 'r') number = [] fluxiso = [] fluxerriso = [] ximage = [] yimage = [] for line in input: if line.find('NAXIS1') > -1: #skip lines with '#' in them f = line.split() xmax = float(f[3]) continue if line.find('NAXIS2') > -1: #skip lines with '#' in them f = line.split() ymax = float(f[3]) continue if line.find('#') > -1: #skip lines with '#' in them continue if line.find('\\') > -1: #skip lines with '#' in them continue if line.find('|') > -1: #skip lines with '#' in them continue t = line.split() number.append(float(t[0])) fluxiso.append(float(t[5])) fluxerriso.append(float(t[6])) ximage.append(float(t[1])) yimage.append(float(t[2])) number = N.array(number, 'f') fluxiso = N.array(fluxiso, 'f') fluxerriso = N.array(fluxerriso, 'f') ximage = N.array(ximage, 'f') yimage = N.array(yimage, 'f') input.close() return number, ximage, yimage, fluxiso, fluxerriso, xmax, ymax
def integrate(F, x, y, xStop, h, tol=1.0e-6): def run_kut5(F, x, y, h): # Runge-Kutta-Fehlberg formulas C = array([37.0 / 378, 0.0, 250.0 / 621, 125.0 / 594, 0.0, 512.0 / 1771]) D = array([2825.0 / 27648, 0.0, 18575.0 / 48384, 13525.0 / 55296, 277.0 / 14336, 1.0 / 4]) n = len(y) K = zeros((6, n), type=Float64) K[0] = h * F(x, y) K[1] = h * F(x + 1.0 / 5 * h, y + 1.0 / 5 * K[0]) K[2] = h * F(x + 3.0 / 10 * h, y + 3.0 / 40 * K[0] + 9.0 / 40 * K[1]) K[3] = h * F(x + 3.0 / 5 * h, y + 3.0 / 10 * K[0] - 9.0 / 10 * K[1] + 6.0 / 5 * K[2]) K[4] = h * F(x + h, y - 11.0 / 54 * K[0] + 5.0 / 2 * K[1] - 70.0 / 27 * K[2] + 35.0 / 27 * K[3]) K[5] = h * F( x + 7.0 / 8 * h, y + 1631.0 / 55296 * K[0] + 175.0 / 512 * K[1] + 575.0 / 13824 * K[2] + 44275.0 / 110592 * K[3] + 253.0 / 4096 * K[4], ) # Initialize arrays {dy} and {E} E = zeros((n), type=Float64) dy = zeros((n), type=Float64) # Compute solution increment {dy} and per-step error {E} for i in range(6): dy = dy + C[i] * K[i] E = E + (C[i] - D[i]) * K[i] # Compute RMS error e e = sqrt(sum(E ** 2) / n) return dy, e X = [] Y = [] X.append(x) Y.append(y) stopper = 0 # Integration stopper(0 = off, 1 = on) for i in range(10000): dy, e = run_kut5(F, x, y, h) # Accept integration step if error e is within tolerance if e <= tol: y = y + dy x = x + h X.append(x) Y.append(y) # Stop if end of integration range is reached if stopper == 1: break # Compute next step size from Eq. (7.24) if e != 0.0: hNext = 0.9 * h * (tol / e) ** 0.2 else: hNext = h # Check if next step is the last one; is so, adjust h if (h > 0.0) == ((x + hNext) >= xStop): hNext = xStop - x stopper = 1 h = hNext return array(X), array(Y)
def plotsigsff(sig, sf, file, nbin): psplot = file + ".ps" psplotinit(psplot) tot = N.ones(len(sf), 'f') (sigbin, sfbin) = my.binitsumequal(sig, sf, nbin) (sigbin, totbin) = my.binitsumequal(sig, tot, nbin) print sfbin print totbin (sff, sfferr) = my.ratioerror(sfbin, totbin) ppgplot.pgbox("", 0.0, 0, "L", 0.0, 0) ymin = -.05 ymax = 1.05 xmin = min(sig) - 10. #xmax=max(sig)-200. xmax = 350. ppgplot.pgenv(xmin, xmax, ymin, ymax, 0) ppgplot.pglab("\gS\d5\u (gal/Mpc\u2\d)", "Fraction EW([OII])>4 \(2078)", "") ppgplot.pgsls(1) #dotted ppgplot.pgslw(4) #line width sig = N.array(sig, 'f') sff = N.array(sff, 'f') ppgplot.pgsci(2) ppgplot.pgline(sigbin, sff) ppgplot.pgsci(1) ppgplot.pgpt(sigbin, sff, 17) my.errory(sigbin, sff, sfferr) ppgplot.pgend()
def _conv_numpy_to_numarray(array): _numarray_deprecation() kind = array.dtype.kind if kind == 'S': # homogeneous string array # We can't use the array protocol to do this conversion if array.shape == (): array = array.item() return numarray.strings.array( buffer=array, shape=array.shape, itemsize=array.itemsize, padc='\x00' ) if kind != 'V': # homogeneous array # NumPy scalars are mishandled by numarray, see #98. This # case may be removed when the bug in numarray is fixed. if numpy.isscalar(array): # A problem with the ``item()`` conversion is the lose of # precise type information (see #125). natype = numpy.sctypeNA[type(array)] return numarray.array(array.item(), type=natype) # This works for regular homogeneous arrays and even for rank-0 arrays # Using asarray gives problems in some tests (I don't know exactly why) ##return numarray.asarray(array) # use the array protocol return numarray.array(array) # use the array protocol (with copy) # For the remaining heterogeneous arrays and records, leave the # task to ``nra``. return nra.fromnumpy(array)
def _simple_logistic_regression(x,y,beta_start=None,verbose=False, CONV_THRESH=1.e-3,MAXIT=500): """ Faster than logistic_regression when there is only one predictor. """ if len(x) != len(y): raise ValueError, "x and y should be the same length!" if beta_start is None: beta_start = NA.zeros(2,x.dtype.char) iter = 0; diff = 1.; beta = beta_start # initial values if verbose: print 'iteration beta log-likliehood |beta-beta_old|' while iter < MAXIT: beta_old = beta p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x)) l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function # information matrix J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)], [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]]) beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences if verbose: print iter+1, beta, l, diff if diff <= CONV_THRESH: break iter = iter + 1 return beta, J_bar, l
def get_coordinates(*args): """ Returns the array of coordinates of the elements of the other arrays : either an array of tuples (theta, phi) or of tuples (rho, phi, z) - either in (degrees, degrees) or in (meters, degrees, meters). """ _ifar = _get_ifar(*args) n_theta=_get_n_theta(*args) n_phi=_get_n_phi(*args) theta_start = _get_theta_start(*args) delta_theta = _get_delta_theta(*args) phi_start = _get_phi_start(*args) delta_phi = _get_delta_phi(*args) l=[] if _ifar == 1 : rho = _PyNEC.nec_radiation_pattern_get_range(*args) for i in range(n_phi) : for j in range(n_theta) : l.append((rho, phi_start+i*delta_phi,theta_start+j*delta_theta)) ar = numarray.array(l); ar = numarray.reshape(ar, (n_phi,n_theta,3)) else : for i in range(n_phi) : for j in range(n_theta) : l.append((theta_start+j*delta_theta, phi_start+i*delta_phi)) ar = numarray.array(l); ar = numarray.reshape(ar, (n_phi,n_theta,2)) return ar
def put(self, name, value, array=None, type=None, allowNone=False): '''put(name,value,[array,type,allowNone]) --> push a variable to IDL''' if self._isexpr(name): #contains 'expression' syntax return self._putexpr(name, value, array, type, allowNone) try: len(value) #catch all non-sequence items except TypeError: return self._putvar(name, value, allowNone) self._validate(name) array, type = self._checkarray(name, value, array, type) if not array and isinstance(value, str): #put as string, not BYTE Array return self._putvar(name, value, allowNone) try: z = numarray.array(value, type) #convert to temporary NumArray except TypeError: #raised when list contains non-numeric items return self._putstringarr(name, value) self._delarr(name) idlshape = self._invertshape(z._shape) obj = self.pyidl.setarr(name, z._data, self.types[z._type], idlshape) self._putlocal(name, obj, store='putbuff') self._putlocal(name, value) ### CORRECT THE VALUE IN LOCAL STORE BASED ON ARRAY & TYPE ### #return self._synclocal(name) #cannot be used due to infinite loop if not array and self._isarray(name): #convert from NumArray if z.typecode() is '1': #BYTE-type (unicode) value = z.tostring() else: #rank-1 through n value = z.tolist() elif array and not self._isarray(name): #convert to NumArray value = numarray.array(z, type) if array: #make sure converted to correct type value = numarray.array(value, type) self._putlocal(name, value) return
def _processobj(self, obj, array='?'): '''_processobj(obj,[array]) --> extract value from IDL_array obj''' if not self._isarrayobj(obj): raise TypeError, "object is not an IDL_array object" if obj.type() is 7: #is an IDL STRING Array return self._processstringarrayobj(obj) numtyp = self.types[obj.type()] numarrayshape = self._invertshape(obj.shape()) val = numarray.array(buffer(obj), type=numtyp, shape=numarrayshape) if array == '?': #check what is stored locally array = self._toarray(obj.name()) if not array: if val.typecode() is '1': #BYTE-type (unicode) val = val.tostring() array = True #this preserves the BYTE Array on call to _put elif not val.rank: #convert rank-0 to rank-1 val = numarray.array([val], type=numtyp).tolist() val = val[0] #convert back to rank-0 else: #rank-1 through n val = val.tolist() else: val = numarray.array(val, numtyp) #uncouple from buffer self.put(obj.name(), val, array, numtyp) #reset memory reference self._putlocal(obj.name(), obj, store='getbuff') self._putlocal(obj.name(), val) return val
def abs_fft(self, points=None, zoom=None,write = 'off'): """ Fourier transforms the timesignal; points is the number of points to transform, if more points given than data points the rest is zero padded absfft(points=4096) """ realdata = numarray.array(self.the_result.y[0]) imdata = numarray.array(self.the_result.y[1]) data = realdata + 1j*imdata fftdata = self.rearrange(numarray.fft.fft(data, points)) absfft = numarray.sqrt(fftdata.real**2 + fftdata.imag**2) # create our x axis n = fftdata.size() self.the_result.x = numarray.arange(n)*(self.sampling_rate/n)-(self.sampling_rate/2.0) self.the_result.y[0] = absfft self.the_result.y[1] = numarray.zeros(n) if write == 'on': return self else: if zoom is None: return self.the_result else: center, width = zoom return self.zoom(self.the_result, center, width)
def getpositions(ximage, yimage, xmax, ymax): dmin = 5. #minimum distance to object + sqrt(isoarea) xpos = [] ypos = [] d = N.zeros(len(ximage), 'f') #print len(d) i = 0 while i < npoints: xtemp = int(round(random.uniform( 5., 125.))) #random.uniform(5.,125.)#*xmax ytemp = int(round(random.uniform( 5., 140.))) #random.uniform(5.,140.)#*ymax if (xtemp > 10.) & (ytemp < (ymax - 10)): d = N.sqrt((ximage - xtemp)**2 + (yimage - ytemp)**2) if (min(d) > dmin): if i > 2: xap = N.array(xpos, 'f') yap = N.array(ypos, 'f') d2 = N.sqrt((xtemp - xap)**2 + (ytemp - yap)**2) if (min(d2) > dmin): xpos.append(xtemp) ypos.append(ytemp) i = i + 1 else: xpos.append(xtemp) ypos.append(ytemp) i = i + 1 xpos = N.array(xpos, 'f') ypos = N.array(ypos, 'f') return xpos, ypos
def __neg__(self): "Redefining -self" if not self.contains_data(): return tmp_y = [] self.lock.acquire() for i in range(self.get_number_of_channels()): tmp_y.append(numarray.array(-self.y[i], type="Float64")) if self.uses_statistics(): r = Accumulation( x=numarray.array(self.x, type="Float64"), y=tmp_y, y_2=numarray.array(self.y_square), n=self.n, index=self.index, sampl_freq=self.sampling_rate, error=True, ) else: r = Accumulation( x=numarray.array(self.x, type="Float64"), y=tmp_y, n=self.n, index=self.index, sampl_freq=self.sampling_rate, error=False, ) self.lock.release() return r
def _simple_logistic_regression(x,y,beta_start=None,verbose=False, CONV_THRESH=1.e-3,MAXIT=500): """ Faster than logistic_regression when there is only one predictor. """ if len(x) != len(y): raise ValueError, "x and y should be the same length!" if beta_start is None: beta_start = NA.zeros(2,x.typecode()) iter = 0; diff = 1.; beta = beta_start # initial values if verbose: print 'iteration beta log-likliehood |beta-beta_old|' while iter < MAXIT: beta_old = beta p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x)) l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function # information matrix J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)], [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]]) beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences if verbose: print iter+1, beta, l, diff if diff <= CONV_THRESH: break iter = iter + 1 return beta, J_bar, l
def sample(self, index={}): """ in order to sample from this distributions, all parents must be known """ # mean = self.mean.copy() # sigma = self.sigma.copy() ## if index: ## # discrete parents ## for v,i in enumerate(reversed(self.discrete_parents)): ## # reverse: avoid missing axes when taking in random ## # we start from the end, that way all other dimensions keep the same index ## if index.has_key(v.name): ## # take the corresponding mean; +1 because first axis is the mean ## mean = na.take(mean, index[v], axis=(i+1) ) ## # take the corresponding covariance; +2 because first 2 axes are the cov ## sigma = na.take(sigma, index[v], axis=(i+2) ) ## ## # continuous parents ## for v in reversed(self.continuous_parents): ## if index.has_key(v): d_index, c_index = self._numIndexFromDict(index) mean = na.array(self.mean[tuple([slice(None, None, None)] + d_index)]) sigma = self.sigma[tuple([slice(None, None, None)] * 2 +d_index)] wi = na.sum(self.weights * na.array(c_index)[na.NewAxis,...], axis=1) # if self.continuous_parents: # wi = na.array(self.weights[tuple([slice(None,None,None)]+c_index)]) # else: wi = 0.0 # return a random number from a normal multivariate distribution return float(ra.multivariate_normal(mean + wi, sigma))
def _conv_numpy_to_numarray(array): _numarray_deprecation() kind = array.dtype.kind if kind == 'S': # homogeneous string array # We can't use the array protocol to do this conversion if array.shape == (): array = array.item() return numarray.strings.array(buffer=array, shape=array.shape, itemsize=array.itemsize, padc='\x00') if kind != 'V': # homogeneous array # NumPy scalars are mishandled by numarray, see #98. This # case may be removed when the bug in numarray is fixed. if numpy.isscalar(array): # A problem with the ``item()`` conversion is the lose of # precise type information (see #125). natype = numpy.sctypeNA[type(array)] return numarray.array(array.item(), type=natype) # This works for regular homogeneous arrays and even for rank-0 arrays # Using asarray gives problems in some tests (I don't know exactly why) ##return numarray.asarray(array) # use the array protocol return numarray.array(array) # use the array protocol (with copy) # For the remaining heterogeneous arrays and records, leave the # task to ``nra``. return nra.fromnumpy(array)
def contiguous( self, source, typeCode=None ): """Get contiguous array from source source -- numarray Python array (or compatible object) for use as the data source. If this is not a contiguous array of the given typeCode, a copy will be made, otherwise will just be returned unchanged. typeCode -- optional 1-character typeCode specifier for the numarray.array function. All gl*Pointer calls should use contiguous arrays, as non- contiguous arrays will be re-copied on every rendering pass. Although this doesn't raise an error, it does tend to slow down rendering. """ typeCode = GL_TYPE_TO_ARRAY_MAPPING[ typeCode ] if isinstance( source, numarray.ArrayType): if source.iscontiguous() and (typeCode is None or typeCode==source.typecode()): return source else: # We have to do astype to avoid errors about unsafe conversions # XXX Confirm that this will *always* create a new contiguous array # XXX Allow a way to make this raise an error for performance reasons # XXX Guard against wacky conversion types like uint to float, where # we really don't want to have the C-level conversion occur. return numarray.array( source.astype( typeCode ), typeCode ) elif typeCode: return numarray.array( source, typeCode ) else: return numarray.array( source )
def drawhist( x, y ): #draw histogram of binned data, x=left side of bins, y=number per bin #print "from within drawhist, x,y = ",x,y x1 = [] y1 = [] n = len(x) dx = x[1] - x[0] for i in range(n): x1.append(float(x[i])) x1.append(float(x[i])) x1.append(x[(n - 1)] + dx) x1.append(x[(n - 1)] + dx) #x1.append(max(x)+dx) #x1.append(max(x)+dx) y1.append(0.) for i in range(n): y1.append(float(y[i])) y1.append(float(y[i])) y1.append(0.) #print "Within drawhist" #for i in range(len(x1)): # print i,x1[i],y1[i], len(x1),len(y1) x2 = N.array(x1) y2 = N.array(y1)
def integrate(F, x, y, xStop, h, tol=1.0e-6): def run_kut5(F, x, y, h): # Runge-Kutta-Fehlberg formulas C = array([37./378, 0., 250./621, 125./594, \ 0., 512./1771]) D = array([2825./27648, 0., 18575./48384, \ 13525./55296, 277./14336, 1./4]) n = len(y) K = zeros((6, n), type=Float64) K[0] = h * F(x, y) K[1] = h * F(x + 1. / 5 * h, y + 1. / 5 * K[0]) K[2] = h * F(x + 3. / 10 * h, y + 3. / 40 * K[0] + 9. / 40 * K[1]) K[3] = h*F(x + 3./5*h, y + 3./10*K[0]- 9./10*K[1] \ + 6./5*K[2]) K[4] = h*F(x + h, y - 11./54*K[0] + 5./2*K[1] \ - 70./27*K[2] + 35./27*K[3]) K[5] = h*F(x + 7./8*h, y + 1631./55296*K[0] \ + 175./512*K[1] + 575./13824*K[2] \ + 44275./110592*K[3] + 253./4096*K[4]) # Initialize arrays {dy} and {E} E = zeros((n), type=Float64) dy = zeros((n), type=Float64) # Compute solution increment {dy} and per-step error {E} for i in range(6): dy = dy + C[i] * K[i] E = E + (C[i] - D[i]) * K[i] # Compute RMS error e e = sqrt(sum(E**2) / n) return dy, e X = [] Y = [] X.append(x) Y.append(y) stopper = 0 # Integration stopper(0 = off, 1 = on) for i in range(10000): dy, e = run_kut5(F, x, y, h) # Accept integration step if error e is within tolerance if e <= tol: y = y + dy x = x + h X.append(x) Y.append(y) # Stop if end of integration range is reached if stopper == 1: break # Compute next step size from Eq. (7.24) if e != 0.0: hNext = 0.9 * h * (tol / e)**0.2 else: hNext = h # Check if next step is the last one; is so, adjust h if (h > 0.0) == ((x + hNext) >= xStop): hNext = xStop - x stopper = 1 h = hNext return array(X), array(Y)
def __init__(self, x, y, axis=-1, makecopy=0, bounds_error=1, fill_value=None): """Initialize a piecewise-constant interpolation class Description: x and y are arrays of values used to approximate some function f: y = f(x) This class returns a function whose call method uses piecewise- constant interpolation to find the value of new points. Inputs: x -- a 1d array of monotonically increasing real values. x cannot include duplicate values. (otherwise f is overspecified) y -- an nd array of real values. y's length along the interpolation axis must be equal to the length of x. axis -- specifies the axis of y along which to interpolate. Interpolation defaults to the last axis of y. (default: -1) makecopy -- If 1, the class makes internal copies of x and y. If 0, references to x and y are used. The default is to copy. (default: 0) bounds_error -- If 1, an error is thrown any time interpolation is attempted on a value outside of the range of x (where extrapolation is necessary). If 0, out of bounds values are assigned the NaN (#INF) value. By default, an error is raised, although this is prone to change. (default: 1) """ self.datapoints = (array(x, Float), array(y, Float)) # RHC -- for access from PyDSTool self.type = Float # RHC -- for access from PyDSTool self.axis = axis self.makecopy = makecopy # RHC -- renamed from copy to avoid nameclash self.bounds_error = bounds_error if fill_value is None: self.fill_value = NaN # RHC -- was: array(0.0) / array(0.0) else: self.fill_value = fill_value # Check that both x and y are at least 1 dimensional. if len(shape(x)) == 0 or len(shape(y)) == 0: raise ValueError, "x and y arrays must have at least one dimension." # make a "view" of the y array that is rotated to the # interpolation axis. oriented_x = x oriented_y = swapaxes(y,self.interp_axis,axis) interp_axis = self.interp_axis len_x,len_y = shape(oriented_x)[interp_axis], \ shape(oriented_y)[interp_axis] if len_x != len_y: raise ValueError, "x and y arrays must be equal in length along "\ "interpolation axis." if len_x < 2 or len_y < 2: raise ValueError, "x and y arrays must have more than 1 entry" self.x = array(oriented_x,copy=self.makecopy) self.y = array(oriented_y,copy=self.makecopy)
def __neg__(self): "Redefining -self" tmp_y = [] for i in range(self.get_number_of_channels()): tmp_y.append(numarray.array(-self.y[i])) return ADC_Result(x = numarray.array(self.x), y = tmp_y, index = self.index, sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
def make_audio_envelope(time,data,maxpoints=10000): """ create gnuplot plot from an audio file """ import numarray import Gnuplot, Gnuplot.funcutils bufsize = 500 x = [i.mean() for i in numarray.array(time).resize(len(time)/bufsize,bufsize)] y = [i.mean() for i in numarray.array(data).resize(len(time)/bufsize,bufsize)] x,y = downsample_audio(x,y,maxpoints=maxpoints) return Gnuplot.Data(x,y,with='lines')
def downsample_audio(time,data,maxpoints=10000): """ resample audio data to last only maxpoints """ import numarray length = len(time) downsample = length/maxpoints if downsample == 0: downsample = 1 x = numarray.array(time).resize(length)[0:-1:downsample] y = numarray.array(data).resize(length)[0:-1:downsample] return x,y
def calcavesky(): input = open("noise.dat", 'r') aperture = [] counts = [] area = [] j = 0 for line in input: if line.find('#') > -1: #skip lines with '#' in them continue if line.find('mosaic_minus') > -1: #skip lines with '#' in them j = 0 continue j = j + 1 if (j > 3): #print j, line t = line.split() aperture.append(float(t[0])) counts.append(float(t[1])) area.append(float(t[2])) input.close() aperture = N.array(aperture, 'f') counts = N.array(counts, 'f') area = N.array(area, 'f') ap = N.zeros(npoints, 'f') aparea = N.zeros(nap, 'f') aveap = N.zeros(nap, 'f') aveaperr = N.zeros(nap, 'f') avearea = N.zeros(nap, 'f') aveareaerr = N.zeros(nap, 'f') #for i in range(len(ap)): for i in range(nap): #print i, len(ap),aperture[i],aperture[i+1] if (i < (nap - 1)): ap = N.compress( (aperture >= aperture[i]) & (aperture < aperture[i + 1]), counts) aparea = N.compress( (aperture >= aperture[i]) & (aperture < aperture[i + 1]), area) else: ap = N.compress((aperture >= aperture[i]) & (aperture < 20.), counts) aparea = N.compress((aperture >= aperture[i]) & (aperture < 20.), area) #print ap #aparea=N.compress((aperture >= aperture[i]) & (aperture < aperture[i+1]),area) aveap[i] = N.average(ap) aveaperr[i] = scipy.stats.std(ap) avearea[i] = N.average(aparea) aveareaerr[i] = scipy.stats.std(aparea) print "ave sky = %8.4f +/- %8.4f" % (N.average(ap), scipy.stats.std(ap)) print "ave area = %8.4f +/- %8.4f" % (N.average(aparea), scipy.stats.std(aparea)) return aveap, aveaperr, avearea, aveareaerr
def findmec(th0, th1): delta = 1e-3 if th0 < 0 and th0 - th1 < .1: sm = 5. sp = 6. else: sm = 3. sp = 5. params = [sm, sp] lasterr = 1e6 for i in range(25): sm, sp = params ath0, ath1 = trymec(sm, sp) c1c, c2c = th0 - ath0, th1 - ath1 err = c1c * c1c + c2c * c2c if 0: print '%findmec', sm, sp, ath0, ath1, err sys.stdout.flush() if err < 1e-9: return params if err > lasterr: return None lasterr = err dc1s = [] dc2s = [] for j in range(len(params)): params1 = N.array(params) params1[j] += delta sm, sp = params1 ath0, ath1 = trymec(sm, sp) c1p, c2p = th0 - ath0, th1 - ath1 params1 = N.array(params) params1[j] -= delta sm, sp = params1 ath0, ath1 = trymec(sm, sp) c1m, c2m = th0 - ath0, th1 - ath1 dc1s.append((c1p - c1m) / (2 * delta)) dc2s.append((c2p - c2m) / (2 * delta)) jm = N.array([dc1s, dc2s]) ji = la.inverse(jm) dp = N.dot(ji, [c1c, c2c]) if i < 4: scale = .5 else: scale = 1 params -= scale * dp if params[0] < 0: params[0] = 0. return params
def testNumSet(self): """ test that a raw index of numbers can access and set a position in the """ self.a.distribution[0,0,0,:] = -1 self.a.distribution[1,0,0,:] = 100 self.a.distribution[1,1,0,:] = na.array([-2, -3]) assert(na.all(self.a.distribution[0,0,0,:] == na.array([-1, -1])) and \ na.all(self.a.distribution[1,0,0,:] == na.array([100, 100])) and \ na.all(self.a.distribution[1,1,0,:] == na.array([-2, -3]))), \ "Error Setting cpt with num indices"
def func(self, X, V): if X[-1] >=0: J_coords = self.F.sysfunc.J_coords w = sqrt(X[-1]) l1 = firstlyapunov(X, self.F.sysfunc, w, J_coords=J_coords, V=self.F.testfunc.data.v, W=self.F.testfunc.data.w) return array([l1]) else: return array([1])
def testIndexing(self): fd = self.f.distribution # 2 discrete parents : d(2),e(3) fd.setParameters(mu=range(6), sigma=range(6)) # # test normal indexing # assert(na.allclose(fd[0][0].flat, na.array(range(3),type='Float32')) and \ # na.allclose(fd[0][1].flat, na.array(range(3), type='Float32')) and \ # na.allclose(fd[1,2][0].flat, na.array(5, type='Float32')) and \ # na.allclose(fd[1,2][1].flat, na.array(5, type='Float32'))), \ # "Normal indexing does not seem to work..." # test dict indexing assert(na.allclose(fd[{'d':0}][0].flat, na.array(range(3), type='Float32')) and \ na.allclose(fd[{'d':0}][1].flat, na.array(range(3), type='Float32')) and \ na.allclose(fd[{'d':1,'e':2}][0].flat, na.array(5, type='Float32')) and \ na.allclose(fd[{'d':1,'e':2}][1].flat, na.array(5, type='Float32'))), \ "Dictionary indexing does not seem to work..." # now test setting of parameters fd[{'d':1,'e':2}] = {'mean':0.5, 'sigma':0.6} fd[{'d':0}] = {'mean':[0,1.2,2.4],'sigma':[0,0.8,0.9]} na.allclose(fd[{'d':0}][0].flat, na.array([0,1.2,2.4],type='Float32')) assert(na.allclose(fd[{'d':0}][0].flat, na.array([0,1.2,2.4],type='Float32')) and \ na.allclose(fd[{'d':0}][1].flat,na.array([0,0.8,0.9],type='Float32')) and \ na.allclose(fd[{'d':1,'e':2}][0].flat, na.array(0.5, type='Float32')) and \ na.allclose(fd[{'d':1,'e':2}][1].flat, na.array(0.6, type='Float32'))), \ "Setting of values using dict does not seem to work..." # now run tests for continuous parents cd = self.c.distribution # 2 continuous parents a(1),b(2) cd[{'a':0,'b':1}] = {'weights':69} assert(na.allclose(cd[{'a':0,'b':1}][2],69.0)), \ "Indexing for continuous parents does not work"
def SfnFromM(m): "Converts an array to AstroMap::Map and calculates structure fn" sfn=pyplot.DoubleVector() count=pyplot.SizeTVector() pyplot.RndStructureFn( m, 10 , sfn, count) return ( numarray.array(sfn[1:]), numarray.array(count[1:]))
def curve(points,level=6): points[0].middle=False points[-1].middle=False pold=points[0] x=[pold.pos[0]]; y=[pold.pos[1]] for pnew in points[1:]: x0,y0=bezier(pold.pos,pnew.pos,level).bezier( pold.getmiddle2(),pnew.getmiddle1()) x.extend(x0[1:]) y.extend(y0[1:]) pold=pnew return numarray.array(x),numarray.array(y)
def __init__(self, names, shape, g=None, h=None, K=None): Potential.__init__(self, names) self.shape = shape # set parameters to 0s self.n = na.sum(shape) if not g: self.g = 0.0 else: self.g = float(g) if not h: self.h = na.zeros(shape=(self.n), type='Float32') else: self.h = na.array(h,shape=(self.n), type='Float32') if not K: self.K = na.zeros(shape=(self.n, self.n), type='Float32') else: self.K = na.array(K, shape=(self.n, self.n), type='Float32')
def bulStoer(F,x,y,xStop,H,tol=1.0e-6): X = [] Y = [] X.append(x) Y.append(y) while x < xStop: H = min(H,xStop - x) y = integrate(F,x,y,x + H,tol) # Midpoint method x = x + H X.append(x) Y.append(y) return array(X),array(Y)
def LoadElScanData(fnamein): eloff , tant, modtant = [] , [] , [] for line in open(fnamein): ld = string.split(line) eloff.append( float(ld[0] )) tant.append( float(ld[1] )) modtant.append( float(ld[3] )) return ( numarray.array(eloff), numarray.array(tant), numarray.array(modtant))
def insertInOrder(sourcelist, inslist, return_ixs=False): """Insert elements of inslist into sourcelist, sorting these lists in case they are not already in increasing order. The function will not create duplicate entries in the list, and will change neither the first or last entries of the list. If sourcelist is an array, an array is returned.""" try: sorted_inslist = inslist.tolist() except AttributeError: sorted_inslist = copy(inslist) sorted_inslist.sort() try: sorted_sourcelist = sourcelist.tolist() was_array = True except AttributeError: sorted_sourcelist = copy(sourcelist) was_array = False sorted_sourcelist.sort() tix = 0 # optimize by having separate versions of loop if return_ixs: ins_ixs = [] for t in sorted_inslist: tcond = less_equal(sorted_sourcelist[tix:], t).tolist() try: tix = tcond.index(0) + tix # lowest index for elt > t if sorted_sourcelist[tix - 1] != t and tix >= 0: sorted_sourcelist.insert(tix, t) ins_ixs.append(tix) except ValueError: # no 0 value in tcond, so t might be equal to the final value pass if was_array: return array(sorted_sourcelist), ins_ixs else: return sorted_sourcelist, ins_ixs else: for t in sorted_inslist: tcond = less_equal(sorted_sourcelist[tix:], t).tolist() try: tix = tcond.index(0) + tix # lowest index for elt > t if sorted_sourcelist[tix - 1] != t and tix >= 0: sorted_sourcelist.insert(tix, t) except ValueError: # no 0 value in tcond, so t might be equal to the final value pass if was_array: return array(sorted_sourcelist) else: return sorted_sourcelist
def insertInOrder(sourcelist, inslist, return_ixs=False): """Insert elements of inslist into sourcelist, sorting these lists in case they are not already in increasing order. The function will not create duplicate entries in the list, and will change neither the first or last entries of the list. If sourcelist is an array, an array is returned.""" try: sorted_inslist = inslist.tolist() except AttributeError: sorted_inslist = copy(inslist) sorted_inslist.sort() try: sorted_sourcelist = sourcelist.tolist() was_array = True except AttributeError: sorted_sourcelist = copy(sourcelist) was_array = False sorted_sourcelist.sort() tix = 0 # optimize by having separate versions of loop if return_ixs: ins_ixs = [] for t in sorted_inslist: tcond = less_equal(sorted_sourcelist[tix:], t).tolist() try: tix = tcond.index(0) + tix # lowest index for elt > t if sorted_sourcelist[tix-1] != t and tix >= 0: sorted_sourcelist.insert(tix, t) ins_ixs.append(tix) except ValueError: # no 0 value in tcond, so t might be equal to the final value pass if was_array: return array(sorted_sourcelist), ins_ixs else: return sorted_sourcelist, ins_ixs else: for t in sorted_inslist: tcond = less_equal(sorted_sourcelist[tix:], t).tolist() try: tix = tcond.index(0) + tix # lowest index for elt > t if sorted_sourcelist[tix-1] != t and tix >= 0: sorted_sourcelist.insert(tix, t) except ValueError: # no 0 value in tcond, so t might be equal to the final value pass if was_array: return array(sorted_sourcelist) else: return sorted_sourcelist
def test(): '''test(); simple test of put for list and array''' A = 1 B = [1, 2] C = numarray.array(B) S = 'foo' T = numarray.array(S) vars = [A, B, C, S, T] m = pyIDL.idl() print "numarray imported..." print "testing put()...\n" x = raw_input("test ? > ") if x: exec "vars = [" + x + "]" for v in vars: vv = 'X' print "\nvalue = %r" % v print "RESULTS: whos[], get()" m.put(vv, v) print "no args: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array=False) print "array=False: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array='?') print "array=?: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array=True) print "array=True: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array='?') print "array=?: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array=False, type='Int8') print "array=False,type=Int8: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array='?', type='?') print "array=?,type=?: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array='?', type='Int16') print "array=?,type=Int16: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array=True, type='Int8') print "array=True,type=Int8: %r, %r" % (m.whos[vv], m.get(vv)) m.put(vv, v, array='?', type='?') print "array=?,type=?: %r, %r" % (m.whos[vv], m.get(vv)) try: m.put(vv, v, array='?', type='Int16') print "array=?,type=Int16: %r, %r" % (m.whos[vv], m.get(vv)) except ValueError: print "array=?,type=Int16: raises ValueError..." m.delete(vv) return
def curve(points, level=6): points[0].middle = False points[-1].middle = False pold = points[0] x = [pold.pos[0]] y = [pold.pos[1]] for pnew in points[1:]: x0, y0 = bezier(pold.pos, pnew.pos, level).bezier(pold.getmiddle2(), pnew.getmiddle1()) x.extend(x0[1:]) y.extend(y0[1:]) pold = pnew return numarray.array(x), numarray.array(y)
def plot(): # main plotting function pgeras() pgswin(xmin, xmax, ymin, ymax) pgbox('bcnst', 0., 0, 'bcnst', 0., 0) pgpt(numarray.array(r), numarray.array(s), 5) xstep = (xmax - xmin) / 100. step = xmin pgmove(step, airy(step, a)) for tmp in range(100): step = step + xstep pgdraw(step, airy(step, a)) return
def locate(self, P1, P2, C): pointlist = [] for i, testfunc in enumerate(self.testfuncs): if self.flagfuncs[i] == iszero: for ind in range(testfunc.m): X, V = testfunc.findzero(P1, P2, ind) pointlist.append((X,V)) X = array(average([point[0] for point in pointlist])) V = array(average([point[1] for point in pointlist])) C.Corrector(X,V) return X, V
def greadphotfiles(self, gphotfile, dL, kcorr): self.photra = [] self.photdec = [] self.phu = [] self.phMv = [] for line in open(gphotfile): if line.find('#') > -1: continue if len(line) < 10: #phot files have blank line at end - skip it continue #print line fields = line.split(',') #print fields[0], fields[1], line #need to implement a mag cut g = float(fields[3]) r = float(fields[4]) if (g > 18.0): continue if (r > 17.7): continue g01 = g + 0.3134 + 0.4608 * ((g - r) * -0.6148) g01 = g01 + .01 #convert to vega r01 = g - 0.4118 - .8597 * ((g - r) * -0.6148) r01 = r01 - .04 #convert to vega V = g01 - 0.6689 - 0.9264 * ((g01 - r01) - 0.7252) g = g - (-0.08) #convert to vega r = r - (.16) #convert to vega #g=g-float(fields[10])#correct for extinction #r=r-float(fields[11])#correct for extinction V = g - 0.3556 - 0.7614 * ((g - r) - 0.6148) #mv=V - (5.*N.log10(dL)+25.+kcorr) mv = V - (5. * N.log10(dL) + 25.) if ((mv) < mabscut): self.photra.append(float(fields[0])) self.photdec.append(float(fields[1])) self.phu.append(float(fields[2])) self.phMv.append(float(mv)) #self.photra.append(float(fields[0])) #self.photdec.append(float(fields[1])) #self.phu.append(float(fields[2])) #self.phMv.append(float(mv)) self.photra = N.array(self.photra, 'f') self.photdec = N.array(self.photdec, 'f') self.phu = N.array(self.phu, 'f') self.phMv = N.array(self.phMv, 'f')