def testDiv(self): """ test division """ a = Table(['a','b','c','d'],[2,3,4,5],range(2*3*4*5)) b = Table(['c','b','e'],[4,3,6],range(12*6)) c = Table(['a','b','c','d','e'],[2,3,4,5,6],range(2*3*4*5*6)) acpt = copy(a.cpt)[...,na.NewAxis] bcpt = copy(b.cpt)[...,na.NewAxis,na.NewAxis] bcpt.transpose([3,1,0,4,2]) ab = a/b cc = c/c bb = b/b cres = na.ones(2*3*4*5*6) cres[0] = 0 bres = na.ones(12*6) bres[0] = 0 ares = acpt/bcpt ares[getnan(ares)] = 0.0 assert (ab == Table(['a','b','c','d','e'],[2,3,4,5,6],ares) and \ cc == Table(['a','b','c','d','e'],[2,3,4,5,6],cres) and \ bb == Table(['c','b','e'],[4,3,6],bres) ), \ " Division does not work"
def equations(x,h,m): # Set up finite difference eqs. h2 = h*h d = ones((m + 1))*(-2.0 + 4.0*h2) c = ones((m),type = Float64) e = ones((m),type = Float64) b = ones((m+1))*4.0*h2*x d[0] = 1.0 e[0] = 0.0 b[0] = 0.0 c[m-1] = 2.0 return c,d,e,b
def equations(x, h, m): # Set up finite difference eqs. h2 = h * h d = ones((m + 1)) * (-2.0 + 4.0 * h2) c = ones((m), type=Float64) e = ones((m), type=Float64) b = ones((m + 1)) * 4.0 * h2 * x d[0] = 1.0 e[0] = 0.0 b[0] = 0.0 c[m - 1] = 2.0 return c, d, e, b
def equations(x, h, m): # Set up finite difference eqs. h4 = h**4 d = ones((m + 1), type=Float64) * 6.0 e = ones((m), type=Float64) * (-4.0) f = ones((m - 1), type=Float64) b = zeros((m + 1), type=Float64) d[0] = 1.0 d[1] = 7.0 e[0] = 0.0 f[0] = 0.0 d[m - 1] = 7.0 d[m] = 3.0 b[m] = 0.5 * h**3 return d, e, f, b
def equations(x,h,m): # Set up finite difference eqs. h4 = h**4 d = ones((m + 1),type = Float64)*6.0 e = ones((m),type = Float64)*(-4.0) f = ones((m-1),type = Float64) b = zeros((m+1),type=Float64) d[0] = 1.0 d[1] = 7.0 e[0] = 0.0 f[0] = 0.0 d[m-1] = 7.0 d[m] = 3.0 b[m] = 0.5*h**3 return d,e,f,b
def lsq(x, y, sig=None, int_scat=None, clip=None, a=None, b=None, siga_in=0.0, sigb_in=0.0): if clip is None or clip == 0: if sig is None: sig = N.ones(x.shape) mwt = 0 else: sig = N.array(sig) mwt = 1 if a is None and b is None: if int_scat is None: results = nr.fit(x, y, sig) else: results = nr.fit_i(x, y, sig, int_scat) a, b, siga, sigb, chi2 = results scatter = calc_scatter(x, y, sig, int_scat, a, b) elif a is not None and b is None: if int_scat is None: results = nr.fit_slope(x, y, sig, mwt, a, siga_in) else: results = nr.fit_slope_i(x, y, sig, int_scat, a, siga_in) b, sigb, chi2 = results siga = 0.0 scatter = calc_scatter(x, y, sig, int_scat, a, b) elif a is None and b is not None: if int_scat is None: results = nr.fit_intercept(x, y, sig, mwt, b, sigb_in) else: results = nr.fit_intercept_i(x, y, sig, int_scat, b, sigb_in) a, siga, chi2 = results sigb = 0.0 scatter = calc_scatter(x, y, sig, int_scat, a, b) n = len(x) return (a, b, siga, sigb, chi2, scatter, n) else: return lsq_clipped(x, y, sig, int_scat, clip, a, b, siga_in, sigb_in)
def plotsigsff(sig, sf, file, nbin): psplot = file + ".ps" psplotinit(psplot) tot = N.ones(len(sf), 'f') (sigbin, sfbin) = my.binitsumequal(sig, sf, nbin) (sigbin, totbin) = my.binitsumequal(sig, tot, nbin) print sfbin print totbin (sff, sfferr) = my.ratioerror(sfbin, totbin) ppgplot.pgbox("", 0.0, 0, "L", 0.0, 0) ymin = -.05 ymax = 1.05 xmin = min(sig) - 10. #xmax=max(sig)-200. xmax = 350. ppgplot.pgenv(xmin, xmax, ymin, ymax, 0) ppgplot.pglab("\gS\d5\u (gal/Mpc\u2\d)", "Fraction EW([OII])>4 \(2078)", "") ppgplot.pgsls(1) #dotted ppgplot.pgslw(4) #line width sig = N.array(sig, 'f') sff = N.array(sff, 'f') ppgplot.pgsci(2) ppgplot.pgline(sigbin, sff) ppgplot.pgsci(1) ppgplot.pgpt(sigbin, sff, 17) my.errory(sigbin, sff, sfferr) ppgplot.pgend()
def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if structure is None: if footprint is None: if size is None: raise RuntimeError, "no footprint provided" separable = True else: footprint = numarray.asarray(footprint, numarray.Bool) if numarray.alltrue(numarray.ravel(footprint)): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numarray.asarray(structure, type=numarray.Float64) separable = False if footprint is None: footprint = numarray.ones(structure.shape, numarray.Bool) else: footprint = numarray.asarray(footprint, numarray.Bool) input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, 'Complex type not supported' output, return_value = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.rank) if separable: sizes = _ni_support._normalize_sequence(size, input.rank) axes = range(input.rank) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter = minimum_filter1d else: filter = maximum_filter1d if len(axes) > 0: for axis, size, origin in axes: filter(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, 'footprint array has incorrect shape.' for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, 'invalid origin' if not footprint.iscontiguous(): footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.rank: raise RuntimeError, 'structure array has incorrect shape' if not structure.iscontiguous(): structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return return_value
def __call__(self, x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices, 1, len(self.x) - 1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x, lo, axis=self.interp_axis) x_hi = take(self.x, hi, axis=self.interp_axis) y_lo = take(self.y, lo, axis=self.interp_axis) y_hi = take(self.y, hi, axis=self.interp_axis) slope = (y_hi - y_lo) / (x_hi - x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope * (x_new_1d - x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1] * len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape) * out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new, self.interp_axis, self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def buildzone(file, baseidx, x1d): width = 120.0 isize = x1d.shape[0] isize -= 1 jsize = 1 ksize = 1 dy = width / float(jsize) dz = width / float(ksize) yorig = -width / 2.0 zorig = -width / 2.0 size = range(9) size[0] = isize + 1 size[1] = jsize + 1 size[2] = ksize + 1 size[3] = isize size[4] = jsize size[5] = ksize size[6] = 0 size[7] = 0 size[8] = 0 zoneidx = file.zonewrite(baseidx, 'Zone', size, CGNS.Structured) x3d = numarray.array( numarray.ones((ksize + 1, jsize + 1, isize + 1), numarray.Float)) y3d = numarray.array( numarray.ones((ksize + 1, jsize + 1, isize + 1), numarray.Float)) z3d = numarray.array( numarray.ones((ksize + 1, jsize + 1, isize + 1), numarray.Float)) for k in range(ksize + 1): for j in range(jsize + 1): for i in range(isize + 1): x3d[k, j, i] = x1d[i] y3d[k, j, i] = yorig + j * dy z3d[k, j, i] = zorig + k * dz file.coordwrite(baseidx, zoneidx, CGNS.RealDouble, CGNS.CoordinateX, x3d) file.coordwrite(baseidx, zoneidx, CGNS.RealDouble, CGNS.CoordinateY, y3d) file.coordwrite(baseidx, zoneidx, CGNS.RealDouble, CGNS.CoordinateZ, z3d) return (zoneidx, isize, jsize, ksize)
def cluster_vectorspace(self, vectors, trace=False): assert len(vectors) > 0 # set the parameters to initial values dimensions = len(vectors[0]) means = self._means priors = self._priors if not priors: priors = self._priors = numarray.ones(self._num_clusters, numarray.Float64) / self._num_clusters covariances = self._covariance_matrices if not covariances: covariances = self._covariance_matrices = \ [ numarray.identity(dimensions, numarray.Float64) for i in range(self._num_clusters) ] # do the E and M steps until the likelihood plateaus lastl = self._loglikelihood(vectors, priors, means, covariances) converged = False while not converged: if trace: print 'iteration; loglikelihood', lastl # E-step, calculate hidden variables, h[i,j] h = numarray.zeros((len(vectors), self._num_clusters), numarray.Float64) for i in range(len(vectors)): for j in range(self._num_clusters): h[i,j] = priors[j] * self._gaussian(means[j], covariances[j], vectors[i]) h[i,:] /= sum(h[i,:]) # M-step, update parameters - cvm, p, mean for j in range(self._num_clusters): covariance_before = covariances[j] new_covariance = numarray.zeros((dimensions, dimensions), numarray.Float64) new_mean = numarray.zeros(dimensions, numarray.Float64) sum_hj = 0.0 for i in range(len(vectors)): delta = vectors[i] - means[j] new_covariance += h[i,j] * \ numarray.multiply.outer(delta, delta) sum_hj += h[i,j] new_mean += h[i,j] * vectors[i] covariances[j] = new_covariance / sum_hj means[j] = new_mean / sum_hj priors[j] = sum_hj / len(vectors) # bias term to stop covariance matrix being singular covariances[j] += self._bias * \ numarray.identity(dimensions, numarray.Float64) # calculate likelihood - FIXME: may be broken l = self._loglikelihood(vectors, priors, means, covariances) # check for convergence if abs(lastl - l) < self._conv_threshold: converged = True lastl = l
def sturmSeq(d,c,lam): n = len(d) + 1 p = ones((n),type=Float64) p[1] = d[0] - lam for i in range(2,n): ## if c[i-2] == 0.0: c[i-2] = 1.0e-12 p[i] = (d[i-1] - lam)*p[i-1] - (c[i-2]**2)*p[i-2] return p
def __call__(self,x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x,x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices,1,len(self.x)-1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1; hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x,lo,axis=self.interp_axis) x_hi = take(self.x,hi,axis=self.interp_axis) y_lo = take(self.y,lo,axis=self.interp_axis) y_hi = take(self.y,hi,axis=self.interp_axis) slope = (y_hi-y_lo)/(x_hi-x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope*(x_new_1d-x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1]*len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape)*out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new,self.interp_axis,self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def lsq_clipped(x, y, sig, int_scat=None, clip=3.0, astart=None, bstart=None, siga_in=0.0, sigb_in=0.0): olda = oldb = None if sig is None: sig = N.ones(x.shape) mwt = 0 else: sig = N.array(sig) mwt = 1 a = astart b = bstart while 1: if astart is None and bstart is None: if int_scat is None: results = nr.fit(x, y, sig) else: results = nr.fit_i(x, y, sig, int_scat) a, b, siga, sigb, chi2 = results scatter = calc_scatter(x, y, sig, int_scat, a, b) elif astart is not None and bstart is None: if int_scat is None: results = nr.fit_slope(x, y, sig, mwt, astart, siga_in) else: results = nr.fit_slope_i(x, y, sig, int_scat, astart, siga_in) b, sigb, chi2 = results siga = 0.0 scatter = calc_scatter(x, y, sig, int_scat, astart, b) elif astart is None and bstart is not None: if int_scat is None: results = nr.fit_intercept(x, y, sig, mwt, bstart, sigb_in) else: results = nr.fit_intercept_i(x, y, sig, int_scat, bstart, sigb_in) a, siga, chi2 = results sigb = 0.0 scatter = calc_scatter(x, y, sig, int_scat, a, bstart) if not (olda is None or oldb is None): if abs(a) < conv_limit: atest = conv_limit else: atest = abs(a) if abs(b) < conv_limit: btest = conv_limit else: btest = abs(b) if (abs(olda - a) < conv_limit*atest and abs(oldb - b) < conv_limit*btest): break keep = abs(y - a - b*x) < clip*scatter xnew = N.compress(keep, x) ynew = N.compress(keep, y) signew = N.compress(keep, sig) x = xnew y = ynew sig = signew olda, oldb = (a, b) n = len(x) return (a, b, siga, sigb, chi2, scatter, n)
def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum): if structure is None: if footprint is None: if size is None: raise RuntimeError, "no footprint provided" separable = True else: footprint = numarray.asarray(footprint, numarray.Bool) if numarray.alltrue(numarray.ravel(footprint)): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numarray.asarray(structure, type=numarray.Float64) separable = False if footprint is None: footprint = numarray.ones(structure.shape, numarray.Bool) else: footprint = numarray.asarray(footprint, numarray.Bool) input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, "Complex type not supported" output, return_value = _ni_support._get_output(output, input) origins = _ni_support._normalize_sequence(origin, input.rank) if separable: sizes = _ni_support._normalize_sequence(size, input.rank) axes = range(input.rank) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter = minimum_filter1d else: filter = maximum_filter1d if len(axes) > 0: for axis, size, origin in axes: filter(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, "footprint array has incorrect shape." for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, "invalid origin" if not footprint.iscontiguous(): footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.rank: raise RuntimeError, "structure array has incorrect shape" if not structure.iscontiguous(): structure = structure.copy() mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) return return_value
def drawmeridians(self,ax,meridians,color='k',linewidth=1., \ linestyle='--',dashes=[1,1]): """ draw meridians (longitude lines). ax - current axis instance. meridians - list containing longitude values to draw (in degrees). color - color to draw meridians (default black). linewidth - line width for meridians (default 1.) linestyle - line style for meridians (default '--', i.e. dashed). dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on, 1 pixel off). """ if self.projection not in ['merc','cyl']: lats = N.arange(-80,81).astype('f') else: lats = N.arange(-90,91).astype('f') xdelta = 0.1*(self.xmax-self.xmin) ydelta = 0.1*(self.ymax-self.ymin) for merid in meridians: lons = merid*N.ones(len(lats),'f') x,y = self(lons,lats) # remove points outside domain. testx = N.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta) x = N.compress(testx, x) y = N.compress(testx, y) testy = N.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta) x = N.compress(testy, x) y = N.compress(testy, y) if len(x) > 1 and len(y) > 1: # split into separate line segments if necessary. # (not necessary for mercator or cylindrical). xd = (x[1:]-x[0:-1])**2 yd = (y[1:]-y[0:-1])**2 dist = N.sqrt(xd+yd) split = dist > 500000. if N.sum(split) and self.projection not in ['merc','cyl']: ind = (N.compress(split,MLab.squeeze(split*N.indices(xd.shape)))+1).tolist() xl = [] yl = [] iprev = 0 ind.append(len(xd)) for i in ind: xl.append(x[iprev:i]) yl.append(y[iprev:i]) iprev = i else: xl = [x] yl = [y] # draw each line segment. for x,y in zip(xl,yl): # skip if only a point. if len(x) > 1 and len(y) > 1: l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle) l.set_color(color) l.set_dashes(dashes) ax.add_line(l)
def _rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation='rank'): input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, 'Complex type not supported' origins = _ni_support._normalize_sequence(origin, input.rank) if footprint == None: if size == None: raise RuntimeError, "no footprint or filter size provided" sizes = _ni_support._normalize_sequence(size, input.rank) footprint = numarray.ones(sizes, type=numarray.Bool) else: footprint = numarray.asarray(footprint, type=numarray.Bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, 'filter footprint array has incorrect shape.' for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, 'invalid origin' if not footprint.iscontiguous(): footprint = footprint.copy() filter_size = numarray.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError, 'invalid percentile' if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError, 'rank not within filter footprint size' if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origin) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origin) else: output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return return_value
def complement(ind_arr, n): """ Find the complement of the set of indices in ind_arr from arange(n) """ mat = numarray.ones(n) numarray.put(mat, ind_arr, 0) out = numarray.nonzero(mat) return out[0]
def lessthan(matrix,a): import numarray copymatrix = numarray.array(matrix) ones = numarray.ones([len(matrix),len(matrix[0])]) matrix = matrix - (a) * ones matrix = numarray.clip(matrix,-1*1e20,0) matrix = matrix * -1 matrix = numarray.clip(matrix,-1*1e20,10e-10) matrix = matrix * 1e9 copymatrix = matrix * copymatrix return copymatrix
def __init__(self, names, shape, elements=None): order = Potential.__init__(self, names) # sort shape in the same way names are sorted #print names, self.names_list,order #shape = na.take(shape,order) if elements == None: elements = na.ones(shape=shape) #elements = na.transpose(elements, axes=order) table.Table.__init__(self, self.names_list, shape=shape, \ elements=elements, type='Float32')
def exceptreplace(matrix,a): print a import numarray copymatrix = numarray.array(matrix) ones = numarray.ones([len(matrix),len(matrix[0])]) matrix = matrix - (a-1) * ones matrix = numarray.clip(matrix,0,2) matrix2 = copymatrix - (a+1) * ones matrix2 = numarray.clip(matrix2,-2,0) ##print matrix, matrix2 matrix = copymatrix * matrix * matrix2 * (-1 * ones) / (ones * a) #print matrix return matrix
def setParameters(self, mu = None, sigma = None, wi = None, sigma_type = 'full', \ tied_sigma = False, isAdjustable = False): #============================================================ # set the mean : # self.mean[i] = the mean for dimension i # self.mean.shape = (self.nvalues, q1,q2,...,qn) # where qi is the size of discrete parent i if mu == None: # set all mu to zeros mu = na.zeros(shape=([self.nvalues]+self.discrete_parents_shape), \ type='Float32') try: mu = na.array(shape=[self.nvalues]+self.discrete_parents_shape, \ type='Float32') except: raise 'Could not convert mu to numarray of shape : %s, discrete parents = %s' %(str(self.discrete_parents_shape), str([dp.name for dp in self.discrete_parents])) self.mean = mu #============================================================ # set the covariance : # self.sigma[i,j] = the covariance between dimension i and j # self.sigma.shape = (nvalues,nvalues,q1,q2,...,qn) # where qi is the size of discrete parent i if sigma == None: eye = na.identity(self.nvalues, type = 'Float32')[...,na.NewAxis] if len(self.discrete_parents) > 0: q = reduce(lambda a,b:a*b,self.discrete_parents_shape) # number of different configurations for the parents sigma = na.concatenate([eye]*q, axis=2) sigma = na.array(sigma,shape=[self.nvalues,self.nvalues]+self.discrete_parents_shape) try: sigma = na.array(sigma, shape=[self.nvalues,self.nvalues]+self.discrete_parents_shape, type='Float32') except: raise 'Not a valid covariance matrix' self.sigma = sigma #============================================================ # set the weights : # self.weights[i,j] = the regression for dimension i and continuous parent j # self.weights.shape = (nvalues,x1,x2,...,xn,q1,q2,...,qn) # where xi is the size of continuous parent i) # and qi is the size of discrete parent i if wi == None: wi = na.ones(shape=[self.nvalues]+self.parents_shape, type='Float32') try: wi = na.array(wi, shape=[self.nvalues]+self.parents_shape, type='Float32') except: raise 'Not a valid weight' self.weights = wi
def _rank_filter( input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation="rank" ): input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, "Complex type not supported" origins = _ni_support._normalize_sequence(origin, input.rank) if footprint == None: if size == None: raise RuntimeError, "no footprint or filter size provided" sizes = _ni_support._normalize_sequence(size, input.rank) footprint = numarray.ones(sizes, type=numarray.Bool) else: footprint = numarray.asarray(footprint, type=numarray.Bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, "filter footprint array has incorrect shape." for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, "invalid origin" if not footprint.iscontiguous(): footprint = footprint.copy() filter_size = numarray.where(footprint, 1, 0).sum() if operation == "median": rank = filter_size // 2 elif operation == "percentile": percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError, "invalid percentile" if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError, "rank not within filter footprint size" if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origin) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origin) else: output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) return return_value
def generic_filter( input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords={}, ): """Calculates a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Either a size or a footprint with the filter must be provided. An output array can optionally be provided. The origin parameter controls the placement of the filter. The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. The extra_arguments and extra_keywords arguments can be used to pass extra arguments and keywords that are passed to the function at each call.""" input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, "Complex type not supported" origins = _ni_support._normalize_sequence(origin, input.rank) if footprint == None: if size == None: raise RuntimeError, "no footprint or filter size provided" sizes = _ni_support._normalize_sequence(size, input.rank) footprint = numarray.ones(size, type=numarray.Bool) else: footprint = numarray.asarray(footprint, type=numarray.Bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, "filter footprint array has incorrect shape." for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, "invalid origin" if not footprint.iscontiguous(): footprint = footprint.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return return_value
def calcprob(beta, x): """ calculate probabilities (in percent) given beta and x """ try: N, npreds = x.shape[1], x.shape[0] except: # single predictor, x is a vector, len(beta)=2. N, npreds = len(x), 1 if len(beta) != npreds+1: raise ValueError,'sizes of beta and x do not match!' if npreds==1: # simple logistic regression return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x)) X = NA.ones((npreds+1,N), x.dtype.char) X[1:, :] = x ebx = NA.exp(NA.dot(beta, X)) return 100.*ebx/(1.+ebx)
def curvatures(xData,yData): n = len(xData) - 1 c = zeros((n),type=Float64) d = ones((n+1),type=Float64) e = zeros((n),type=Float64) k = zeros((n+1),type=Float64) c[0:n-1] = xData[0:n-1] - xData[1:n] d[1:n] = 2.0*(xData[0:n-1] - xData[2:n+1]) e[1:n] = xData[1:n] - xData[2:n+1] k[1:n] =6.0*(yData[0:n-1] - yData[1:n]) \ /(xData[0:n-1] - xData[1:n]) \ -6.0*(yData[1:n] - yData[2:n+1]) \ /(xData[1:n] - xData[2:n+1]) LUdecomp3(c,d,e) LUsolve3(c,d,e,k) return k
def calcprob(beta, x): """ calculate probabilities (in percent) given beta and x """ try: N, npreds = x.shape[1], x.shape[0] except: # single predictor, x is a vector, len(beta)=2. N, npreds = len(x), 1 if len(beta) != npreds+1: raise ValueError,'sizes of beta and x do not match!' if npreds==1: # simple logistic regression return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x)) X = NA.ones((npreds+1,N), x.typecode()) X[1:, :] = x ebx = NA.exp(NA.dot(beta, X)) return 100.*ebx/(1.+ebx)
def curvatures(xData, yData): n = len(xData) - 1 c = zeros((n), type=Float64) d = ones((n + 1), type=Float64) e = zeros((n), type=Float64) k = zeros((n + 1), type=Float64) c[0:n - 1] = xData[0:n - 1] - xData[1:n] d[1:n] = 2.0 * (xData[0:n - 1] - xData[2:n + 1]) e[1:n] = xData[1:n] - xData[2:n + 1] k[1:n] =6.0*(yData[0:n-1] - yData[1:n]) \ /(xData[0:n-1] - xData[1:n]) \ -6.0*(yData[1:n] - yData[2:n+1]) \ /(xData[1:n] - xData[2:n+1]) LUdecomp3(c, d, e) LUsolve3(c, d, e, k) return k
def generic_filter(input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords={}): """Calculates a multi-dimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1D array of double values. Either a size or a footprint with the filter must be provided. An output array can optionally be provided. The origin parameter controls the placement of the filter. The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. The extra_arguments and extra_keywords arguments can be used to pass extra arguments and keywords that are passed to the function at each call.""" input = numarray.asarray(input) if isinstance(input.type(), numarray.ComplexType): raise TypeError, 'Complex type not supported' origins = _ni_support._normalize_sequence(origin, input.rank) if footprint == None: if size == None: raise RuntimeError, "no footprint or filter size provided" sizes = _ni_support._normalize_sequence(size, input.rank) footprint = numarray.ones(size, type=numarray.Bool) else: footprint = numarray.asarray(footprint, type=numarray.Bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.rank: raise RuntimeError, 'filter footprint array has incorrect shape.' for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin > lenf): raise ValueError, 'invalid origin' if not footprint.iscontiguous(): footprint = footprint.copy() output, return_value = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return return_value
def plotsig10sffall(sigspec, sigphot, sf, file, nbin): psplot = file + ".ps" psplotinit(psplot) ppgplot.pgbox("", 0.0, 0, "L", 0.0, 0) ymin = -.01 ymax = 1.01 #xmin=min(sigspec)-10. #xmax=max(sig)-200. #xmax=400. xmin = -1. xmax = 2.7 ppgplot.pgenv(xmin, xmax, ymin, ymax, 0, 10) ppgplot.pglab("\gS\d10\u (gal/Mpc\u2\d)", "Fraction EW([OII])>4 \(2078)", "") ppgplot.pgsls(1) #dotted ppgplot.pgslw(4) #line width tot = N.ones(len(sf), 'f') (sigbin, sfbin) = my.binitsumequal(sigspec, sf, nbin) (sigbin, totbin) = my.binitsumequal(sigspec, tot, nbin) (sff, sfferr) = my.ratioerror(sfbin, totbin) #sig=N.array(sig,'f') #sff=N.array(sff,'f') ppgplot.pgsci(2) sigbin = N.log10(sigbin) ppgplot.pgline(sigbin, sff) ppgplot.pgsci(1) ppgplot.pgpt(sigbin, sff, 17) my.errory(sigbin, sff, sfferr) (sigbin, sfbin) = my.binitsumequal(sigphot, sf, nbin) (sigbin, totbin) = my.binitsumequal(sigphot, tot, nbin) (sff, sfferr) = my.ratioerror(sfbin, totbin) #sig=N.array(sig,'f') #sff=N.array(sff,'f') ppgplot.pgslw(4) #line width ppgplot.pgsci(4) sigbin = N.log10(sigbin) ppgplot.pgline(sigbin, sff) ppgplot.pgsci(1) ppgplot.pgpt(sigbin, sff, 21) #my.errory(sigbin,sff,sfferr) ppgplot.pgend()
def OOFColumns(nrows): "Create an empty OOF table" coldefs = [ pyfits.Column ( "DX", "E", "radians", array=numarray.zeros(nrows) ), pyfits.Column ( "DY", "E", "radians", array=numarray.zeros(nrows) ), pyfits.Column ( "FNu", "E", "Jy", array=numarray.zeros(nrows) ), pyfits.Column ( "UFNu", "E", "Jy", array=numarray.ones(nrows) ), pyfits.Column ( "Time", "E", "d", array=numarray.zeros(nrows) ) ] nh = pyfits.new_table( coldefs ) return nh
def __init__(self, v, mu = None, sigma = None, wi = None, \ sigma_type = 'full', tied_sigma = False, \ isAdjustable = True, ignoreFamily = False): Distribution.__init__(self, v, isAdjustable=isAdjustable, \ ignoreFamily=ignoreFamily) self.distribution_type = 'Gaussian' # check that current node is continuous if v.discrete: raise 'Node must be continuous' self.discrete_parents = [parent for parent in self.parents \ if parent.discrete] self.continuous_parents = [parent for parent in self.parents \ if not parent.discrete] self.discrete_parents_shape = [dp.nvalues for dp \ in self.discrete_parents] self.parents_shape = [p.nvalues for p in self.parents] if not self.parents_shape: self.parents_shape = [0] # set defaults # set all mu to zeros self.mean = na.zeros(shape=([self.nvalues] + \ self.discrete_parents_shape), type='Float32') # set sigma to ones along the diagonal eye = na.identity(self.nvalues, type = 'Float32')[..., na.NewAxis] if len(self.discrete_parents) > 0: q = reduce(lambda a, b:a * b, self.discrete_parents_shape) # number of different configurations for the parents sigma = na.concatenate([eye] * q, axis=2) self.sigma = na.array(sigma, shape=[self.nvalues, self.nvalues] + \ self.discrete_parents_shape) # set weights to self.weights = na.ones(shape=[self.nvalues] + self.parents_shape, type='Float32') # set the parameters : mean, sigma, weights self.setParameters(mu=mu, sigma=sigma, wi=wi, sigma_type=sigma_type, \ tied_sigma=tied_sigma, isAdjustable=isAdjustable)
def azmr(self): x=N.compress((self.mpaflag > 0.1) & (self.ew > 4.) & (self.Mabs < -18.),self.Mabs) y=N.compress((self.mpaflag > 0.1) & (self.ew > 4.) & (self.Mabs < -18.),self.ar) x1=N.compress((self.mpaflag > 0.1) & (self.ew > 4.) & (self.Mabs < -20.38),self.Mabs) y1=N.compress((self.mpaflag > 0.1) & (self.ew > 4.) & (self.Mabs < -20.38),self.ar) y=2.5*N.log10(y) #pylab.plot(x,y,'k.',markersize=0.1,zorder=1) print "average Ar for Mr < -20.38 = %5.2f +/- %5.2f"%(N.average(y1),pylab.std(y1)) (xbin,ybin)=my.binit(x1,y1,20) #(xbin,ybin,ybinerr)=my.biniterr(x,y,20) for i in range(len(xbin)): print i,xbin[i],ybin[i] print "Average of binned values = ",N.average(ybin) print "average Ar for Mr < -20.38 = %5.2f +/- %5.2f"%(N.average(N.log10(y1)),pylab.std(N.log10(y1))) #pylab.axis([-26.,-12.,0.1,30.]) pylab.xlabel(r'$\rm{M_r}$',fontsize=28.) pylab.ylabel(r'$\rm{A_r}$',fontsize=28.) (xbin,ybin)=my.binit(x,y,20) #(xbin,ybin,ybinerr)=my.biniterr(x,y,20) for i in range(len(xbin)): print i,xbin[i],ybin[i] pylab.plot(xbin,ybin,'r-',lw=5) ax=pylab.gca() xmin=-24. xmax=-18. ymin=-1. ymax=3. my.contourf(x,y,xmin,xmax,ymin,ymax) pylab.axvline(x=-20.6,linewidth=3,ls='--',c='g') xl=N.arange(-23.,-20.5,.2) yl=0.76*N.ones(len(xl),'f') pylab.plot(xl,yl,'b-',lw=3) pylab.axis([-24.,-18,-1.,2.4]) #ax.set_yscale('log') #pylab.show() pylab.savefig('armr.eps') print "fraction w/MPA stellar mass and Az = ",N.sum(self.mpaflag)/(1.*len(self.mpaflag))
def __init__(self, names, shape = None, elements = None, type = 'Float32'): ''' names = ['a','b',...] shape = (2,3,...) (default: binary) elements = [0,1,2,....] (a list or a numarray, default: all ones) type = 'Float32' or 'Float64' or 'UInt8', etc... (default: Float32) ''' # set default parameters if shape == None: shape = [2]*len(names) if elements == None: elements = na.ones(shape = shape) self.cpt = na.array(sequence=elements, shape=shape, type=type) self.names = set(names) self.names_list = list(names) # just to keep the order in an easy to use way # dict of name:dim number pairs self.assocdim = dict(zip(self.names_list,range(len(self.names_list)))) # dict of dim:name pairs self.assocname = dict(enumerate(self.names_list))
def lamRange(d,c,N): lamMin,lamMax = gerschgorin(d,c) r = ones((N+1),type=Float64) r[0] = lamMin # Search for eigenvalues in descending order for k in range(N,0,-1): # First bisection of interval(lamMin,lamMax) lam = (lamMax + lamMin)/2.0 h = (lamMax - lamMin)/2.0 for i in range(1000): # Find number of eigenvalues less than lam p = sturmSeq(d,c,lam) numLam = numLambdas(p) # Bisect again & find the half containing lam h = h/2.0 if numLam < k: lam = lam + h elif numLam > k: lam = lam - h else: break # If eigenvalue located, change the upper limit # of search and record it in [r] lamMax = lam r[k] = lam return r
def JCMTArrayToHDU(ar, pixsize_arcsecs, dz): "Convert array to table HDU" dx=numarray.zeros( ar.shape, numarray.Float64) dy=numarray.zeros( ar.shape, numarray.Float64) for j in range(ar.shape[1]): dx[:,j]= numarray.arange(-ar.shape[0]/2.0 * pixsize_arcsecs, ar.shape[0]/2.0 * pixsize_arcsecs, pixsize_arcsecs) for i in range(ar.shape[0]): dy[i,:]= numarray.arange(-ar.shape[1]/2.0 * pixsize_arcsecs, ar.shape[1]/2.0 * pixsize_arcsecs, pixsize_arcsecs) # Convert to radians dx *= math.pi / 180 / 3600 dy *= math.pi / 180 / 3600 coldefs = [ pyfits.Column ( "DX", "E", "radians", array=dx.flat ), pyfits.Column ( "DY", "E", "radians", array=dy.flat), pyfits.Column ( "fnu", "E", "Jy", array=ar.flat ), pyfits.Column ( "UFNU", "E", "Jy", array=numarray.ones(len(dx.flat))), pyfits.Column ( "TIME", "E", "d", array=numarray.arange(len(dx.flat))) ] nh = pyfits.new_table( coldefs ) nh.header.update("dz", dz ) return nh
def convarray(self): self.z = N.array(self.z, 'f') self.ra = N.array(self.ra, 'f') self.dec = N.array(self.dec, 'f') self.distBCG = N.array(self.distBCG, 'f') self.distBCGR200 = N.array(self.distBCGR200, 'f') self.dz = N.array(self.dz, 'f') self.o2 = N.array(self.o2, 'f') self.erro2 = N.array(self.erro2, 'f') self.u = N.array(self.u, 'f') self.g = N.array(self.g, 'f') self.r = N.array(self.r, 'f') self.i = N.array(self.i, 'f') self.zm = N.array(self.zm, 'f') self.V = N.array(self.V, 'f') #self.V=self.V-0.77#convert to h=0.7 self.memb = N.zeros(len(self.distBCGR200), 'f') self.sf = N.zeros(len(self.distBCGR200), 'f') self.tot = N.ones(len(self.distBCGR200), 'f') for i in range(len(self.memb)): if (self.distBCGR200[i] < 1): self.memb[i] = 1. if (self.o2[i] < -4.): self.sf[i] = 1.
## example9_6 from numarray import ones from inversePower5 import * def Bv(v): # Compute {z} = [B]{v} n = len(v) z = zeros((n), type=Float64) z[0] = 2.0 * v[0] - v[1] for i in range(1, n - 1): z[i] = -v[i - 1] + 2.0 * v[i] - v[i + 1] z[n - 1] = -v[n - 2] + 2.0 * v[n - 1] return z n = 100 # Number of interior nodes d = ones((n)) * 6.0 # Specify diagonals of [A] = [f\e\d\e\f] d[0] = 5.0 d[n - 1] = 7.0 e = ones((n - 1)) * (-4.0) f = ones((n - 2)) * 1.0 lam, x = inversePower5(Bv, d, e, f) print "PL^2/EI =", lam * (n + 1)**2 raw_input("\nPress return to exit")
def create_prediction_success_table( LCM, location_set, observed_choices_id, geographies=[], choice_method="mc", data_objects=None ): """this function creates a table tabulating number of agents observed versus predicted by geographies for location choice model LCM is an instance of Location Choice Model after run_estimation, location_set is the set of location in simulation, e.g. gridcell, observed_choice_id is the location_set id (e.g. grid_id) observed, geographies is a list of geographies to create prediction sucess table for, choice_method is the method used to select choice for agents, either mc or max_prob data_objects is the same as data_objects used to run LCM simulation, but includes entries for geographies """ LCM.simulate_step() choices = sample_choice(LCM.model.probabilities, choice_method) choices_index = LCM.model_resources.translate("index")[choices] # translate choices into index of location_set # maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob") #max prob choice # maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices] results = [] gcs = location_set for geography in geographies: geo = data_objects.translate(geography) # get geo_id for observed agents gc_index = gcs.get_id_index(observed_choices_id) if geo.id_name[0] not in gcs.get_attribute_names(): gcs.compute_variables(geo.id_name[0], resources=data_objects) geo_ids_obs = gcs.get_attribute(geo.id_name[0])[gc_index] # obs = copy.deepcopy(agent_set) # obs.subset_by_index(agents_index) # obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id) # resources.merge({"household": obs}) #, "gridcell": gcs, "zone": zones, "faz":fazes}) # obs.compute_variables(geo.id_name[0], resources=resources) # obs_geo_ids = obs.get_attribute(geo.id_name[0]) # get geo_id for simulated agents geo_ids_sim = gcs.get_attribute(geo.id_name[0])[choices_index] # sim = copy_dataset(obs) # sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index]) # resources.merge({"household": sim}) geo_size = geo.size() myids = geo.get_id_attribute() pred_matrix = zeros((geo_size, geo_size)) p_success = zeros((geo_size,)).astype(Float32) f = 0 for geo_id in myids: ids = geo_ids_sim[where(geo_ids_obs == geo_id)] # get simulated geo_id for agents observed in this geo_id # resources.merge({"agents_index": agents_index_in_geo, "agent":sim}) what = ones(ids.size()) pred_matrix[f] = array(nd_image_sum(what, labels=ids, index=myids)) print pred_matrix[f] if sum(pred_matrix[f]) > 0: p_success[f] = float(pred_matrix[f, f]) / sum(pred_matrix[f]) # sim.increment_version(gcs.id_name[0]) #to trigger recomputation in next iteration f += 1 print p_success results.append((pred_matrix.copy(), p_success.copy())) return results
print 'mean=', '%.1f' % xsky, '+/-', data = [] for t in sky: data.append(t[3]) skysig = xits(data, 3.)[2] print '%.1f' % skysig except: pass pgsci(1) if d[2] == 'h': stretch = abs(stretch - 1) try: pix2[0][0] except: pix2 = numarray.ones((pix.getshape()[0], pix.getshape()[1]), 'Float32') for x in range(pix.getshape()[0]): for y in range(pix.getshape()[1]): pix2[x][y] = asinh(pix[x][y] / (2. * skysig)) if d[2] == '?': os.system('clear') print print '^c = abort / = move to next frame' print ' c = contrast x = set contrast values' print ' z = zoom r = reset zoom' print ' Z = slide zoom' print ' p = peek at values t = toggle ellipse plot' print ' . = mark position , = clear marks' print ' l = mark label (s.tmp) i = .ims ellipses (s.tmp)' print ' a,1-9 = delete circle b = delete box'
def AllOnes(self): self.val = -1 self.cpt = na.ones(self.cpt.shape, type='Float32')
def drawmeridians(self,ax,meridians,color='k',linewidth=1., \ linestyle='--',dashes=[1,1],labels=[0,0,0,0],\ font='rm',fontsize=12): """ draw meridians (longitude lines). ax - current axis instance. meridians - list containing longitude values to draw (in degrees). color - color to draw meridians (default black). linewidth - line width for meridians (default 1.) linestyle - line style for meridians (default '--', i.e. dashed). dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on, 1 pixel off). labels - list of 4 values (default [0,0,0,0]) that control whether meridians are labelled where they intersect the left, right, top or bottom of the plot. For example labels=[1,0,0,1] will cause meridians to be labelled where they intersect the left and bottom of the plot, but not the right and top. Labels are located with a precision of 0.1 degrees and are drawn using mathtext. font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm'. fontsize - font size in points for labels (default 12). """ # don't draw meridians past latmax, always draw parallel at latmax. latmax = 80. # not used for cyl, merc projections. # offset for labels. yoffset = (self.urcrnry-self.llcrnry)/100./self.aspect xoffset = (self.urcrnrx-self.llcrnrx)/100. if self.projection not in ['merc','cyl']: lats = N.arange(-latmax,latmax+1).astype('f') else: lats = N.arange(-90,91).astype('f') xdelta = 0.1*(self.xmax-self.xmin) ydelta = 0.1*(self.ymax-self.ymin) for merid in meridians: lons = merid*N.ones(len(lats),'f') x,y = self(lons,lats) # remove points outside domain. testx = N.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta) x = N.compress(testx, x) y = N.compress(testx, y) testy = N.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta) x = N.compress(testy, x) y = N.compress(testy, y) if len(x) > 1 and len(y) > 1: # split into separate line segments if necessary. # (not necessary for mercator or cylindrical). xd = (x[1:]-x[0:-1])**2 yd = (y[1:]-y[0:-1])**2 dist = N.sqrt(xd+yd) split = dist > 500000. if N.sum(split) and self.projection not in ['merc','cyl']: ind = (N.compress(split,pylab.squeeze(split*N.indices(xd.shape)))+1).tolist() xl = [] yl = [] iprev = 0 ind.append(len(xd)) for i in ind: xl.append(x[iprev:i]) yl.append(y[iprev:i]) iprev = i else: xl = [x] yl = [y] # draw each line segment. for x,y in zip(xl,yl): # skip if only a point. if len(x) > 1 and len(y) > 1: l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle) l.set_color(color) l.set_dashes(dashes) ax.add_line(l) # draw labels for meridians. # search along edges of map to see if parallels intersect. # if so, find x,y location of intersection and draw a label there. if self.projection == 'cyl': dx = 0.01; dy = 0.01 elif self.projection == 'merc': dx = 0.01; dy = 1000 else: dx = 1000; dy = 1000 for dolab,side in zip(labels,['l','r','t','b']): if not dolab: continue # for cyl or merc, don't draw meridians on left or right. if self.projection in ['cyl','merc'] and side in ['l','r']: continue if side in ['l','r']: nmax = int((self.ymax-self.ymin)/dy+1) if self.urcrnry < self.llcrnry: yy = self.llcrnry-dy*N.arange(nmax) else: yy = self.llcrnry+dy*N.arange(nmax) if side == 'l': lons,lats = self(self.llcrnrx*N.ones(yy.shape,'f'),yy,inverse=True) else: lons,lats = self(self.urcrnrx*N.ones(yy.shape,'f'),yy,inverse=True) lons = N.where(lons < 0, lons+360, lons) lons = [int(lon*10) for lon in lons.tolist()] lats = [int(lat*10) for lat in lats.tolist()] else: nmax = int((self.xmax-self.xmin)/dx+1) if self.urcrnrx < self.llcrnrx: xx = self.llcrnrx-dx*N.arange(nmax) else: xx = self.llcrnrx+dx*N.arange(nmax) if side == 'b': lons,lats = self(xx,self.llcrnry*N.ones(xx.shape,'f'),inverse=True) else: lons,lats = self(xx,self.urcrnry*N.ones(xx.shape,'f'),inverse=True) lons = N.where(lons < 0, lons+360, lons) lons = [int(lon*10) for lon in lons.tolist()] lats = [int(lat*10) for lat in lats.tolist()] for lon in meridians: if lon<0: lon=lon+360. # find index of meridian (there may be two, so # search from left and right). try: nl = lons.index(int(lon*10)) except: nl = -1 try: nr = len(lons)-lons[::-1].index(int(lon*10))-1 except: nr = -1 if lon>180: lonlab = r'$\%s{%g\/^{\circ}\/W}$'%(font,N.fabs(lon-360)) elif lon<180 and lon != 0: lonlab = r'$\%s{%g\/^{\circ}\/E}$'%(font,lon) else: lonlab = r'$\%s{%g\/^{\circ}}$'%(font,lon) # meridians can intersect each map edge twice. for i,n in enumerate([nl,nr]): lat = lats[n]/10. # no meridians > latmax for projections other than merc,cyl. if self.projection not in ['merc','cyl'] and lat > latmax: continue # don't bother if close to the first label. if i and abs(nr-nl) < 100: continue if n > 0: if side == 'l': pylab.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',fontsize=fontsize) elif side == 'r': pylab.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',fontsize=fontsize) elif side == 'b': pylab.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',fontsize=fontsize) else: pylab.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',fontsize=fontsize) # make sure axis ticks are turned off ax.set_xticks([]) ax.set_yticks([])
def train(self, train_toks, **kwargs): """ Train a new C{ConditionalExponentialClassifier}, using the given training samples. This C{ConditionalExponentialClassifier} should encode the model that maximizes entropy from all the models that are emperically consistant with C{train_toks}. @param kwargs: Keyword arguments. - C{iterations}: The maximum number of times IIS should iterate. If IIS converges before this number of iterations, it may terminate. Default=C{20}. (type=C{int}) - C{debug}: The debugging level. Higher values will cause more verbose output. Default=C{0}. (type=C{int}) - C{classes}: The set of possible classes. If none is given, then the set of all classes attested in the training data will be used instead. (type=C{list} of (immutable)). - C{accuracy_cutoff}: The accuracy value that indicates convergence. If the accuracy becomes closer to one than the specified value, then IIS will terminate. The default value is None, which indicates that no accuracy cutoff should be used. (type=C{float}) - C{delta_accuracy_cutoff}: The change in accuracy should be taken to indicate convergence. If the accuracy changes by less than this value in a single iteration, then IIS will terminate. The default value is C{None}, which indicates that no accuracy-change cutoff should be used. (type=C{float}) - C{log_likelihood_cutoff}: specifies what log-likelihood value should be taken to indicate convergence. If the log-likelihod becomes closer to zero than the specified value, then IIS will terminate. The default value is C{None}, which indicates that no log-likelihood cutoff should be used. (type=C{float}) - C{delta_log_likelihood_cutoff}: specifies what change in log-likelihood should be taken to indicate convergence. If the log-likelihood changes by less than this value in a single iteration, then IIS will terminate. The default value is C{None}, which indicates that no log-likelihood-change cutoff should be used. (type=C{float}) """ assert _chktype(1, train_toks, [Token], (Token, )) # Process the keyword arguments. iter = 20 debug = 0 classes = None ll_cutoff = lldelta_cutoff = None acc_cutoff = accdelta_cutoff = None for (key, val) in kwargs.items(): if key in ('iterations', 'iter'): iter = val elif key == 'debug': debug = val elif key == 'classes': classes = val elif key == 'log_likelihood_cutoff': ll_cutoff = abs(val) elif key == 'delta_log_likelihood_cutoff': lldelta_cutoff = abs(val) elif key == 'accuracy_cutoff': acc_cutoff = abs(val) elif key == 'delta_accuracy_cutoff': accdelta_cutoff = abs(val) else: raise TypeError('Unknown keyword arg %s' % key) if classes is None: classes = attested_classes(train_toks) self._classes = classes # Find the classes, if necessary. if classes is None: classes = find_classes(train_toks) # Find the length of the first token's feature vector. if len(train_toks) == 0: raise ValueError('Expected at least one training token') vector0 = train_toks[0]['FEATURE_VECTOR'] self._feature_vector_len = len(vector0) self._weight_vector_len = self._feature_vector_len * len(self._classes) # Build the offsets dictionary. This maps from a class to the # index in the weight vector where that class's weights begin. self._offsets = dict([(cls, i * self._feature_vector_len) for i, cls in enumerate(classes)]) # Find the frequency with which each feature occurs in the # training data. ffreq_emperical = self._ffreq_emperical(train_toks) # Find the nf map, and related variables nfarray and nfident. # nf is the sum of the features for a given labeled text. # nfmap compresses this sparse set of values to a dense list. # nfarray performs the reverse operation. nfident is # nfarray multiplied by an identity matrix. nfmap = self._nfmap(train_toks) nfs = nfmap.items() nfs.sort(lambda x, y: cmp(x[1], y[1])) nfarray = numarray.array([nf for (nf, i) in nfs], 'd') nftranspose = numarray.reshape(nfarray, (len(nfarray), 1)) # An array that is 1 whenever ffreq_emperical is zero. In # other words, it is one for any feature that's not attested # in the data. This is used to avoid division by zero. unattested = numarray.zeros(self._weight_vector_len, 'd') for i in range(len(unattested)): if ffreq_emperical[i] == 0: unattested[i] = 1 # Build the classifier. Start with weight=1 for each feature, # except for the unattested features. Start those out at # zero, since we know that's the correct value. weights = numarray.ones(self._weight_vector_len, 'd') weights -= unattested classifier = ConditionalExponentialClassifier(classes, weights) if debug > 0: print ' ==> Training (%d iterations)' % iter if debug > 2: print print ' Iteration Log Likelihood Accuracy' print ' ---------------------------------------' # Train for a fixed number of iterations. for iternum in range(iter): if debug > 2: print(' %9d %14.5f %9.3f' % (iternum, classifier_log_likelihood(classifier, train_toks), classifier_accuracy(classifier, train_toks))) # Calculate the deltas for this iteration, using Newton's method. deltas = self._deltas(train_toks, classifier, unattested, ffreq_emperical, nfmap, nfarray, nftranspose) # Use the deltas to update our weights. weights = classifier.weights() weights *= numarray.exp(deltas) classifier.set_weights(weights) # Check log-likelihood cutoffs. if ll_cutoff is not None or lldelta_cutoff is not None: ll = classifier_log_likelihood(classifier, train_toks) if ll_cutoff is not None and ll > -ll_cutoff: break if lldelta_cutoff is not None: if (ll - ll_old) < lldelta_cutoff: break ll_old = ll # Check accuracy cutoffs. if acc_cutoff is not None or accdelta_cutoff is not None: acc = classifier_accuracy(classifier, train_toks) if acc_cutoff is not None and acc < acc_cutoff: break if accdelta_cutoff is not None: if (acc_old - acc) < accdelta_cutoff: break acc_old = acc if debug > 2: print(' %9d %14.5f %9.3f' % (iternum + 1, classifier_log_likelihood(classifier, train_toks), classifier_accuracy(classifier, train_toks))) print # Return the classifier. return classifier
def plotXYSVG(drawSpace, dataX, dataY, rank=0, dataLabel=[], plotColor = "black", axesColor="black", labelColor="black", symbolColor="red", XLabel=None, YLabel=None, title=None, fitcurve=None, connectdot=1, displayR=None, loadingPlot = 0, offset= (80, 20, 40, 60), zoom = 1, specialCases=[], showLabel = 1): 'displayR : correlation scatter plot, loadings : loading plot' dataXRanked, dataYRanked = webqtlUtil.calRank(dataX, dataY, len(dataX)) # Switching Ranked and Unranked X and Y values if a Spearman Rank Correlation if rank == 0: dataXPrimary = dataX dataYPrimary = dataY dataXAlt = dataXRanked dataYAlt = dataYRanked else: dataXPrimary = dataXRanked dataYPrimary = dataYRanked dataXAlt = dataX dataYAlt = dataY xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = drawSpace.attributes['width'] - xLeftOffset - xRightOffset plotHeight = drawSpace.attributes['height'] - yTopOffset - yBottomOffset if plotHeight<=0 or plotWidth<=0: return if len(dataXPrimary) < 1 or len(dataXPrimary) != len(dataYPrimary) or (dataLabel and len(dataXPrimary) != len(dataLabel)): return max_X=max(dataXPrimary) min_X=min(dataXPrimary) max_Y=max(dataYPrimary) min_Y=min(dataYPrimary) #for some reason I forgot why I need to do this if loadingPlot: min_X = min(-0.1,min_X) max_X = max(0.1,max_X) min_Y = min(-0.1,min_Y) max_Y = max(0.1,max_Y) xLow, xTop, stepX=detScale(min_X,max_X) yLow, yTop, stepY=detScale(min_Y,max_Y) xScale = plotWidth/(xTop-xLow) yScale = plotHeight/(yTop-yLow) #draw drawing region r = svg.rect(xLeftOffset, yTopOffset, plotWidth, plotHeight, 'none', axesColor, 1) drawSpace.addElement(r) #calculate data points data = map(lambda X, Y: (X, Y), dataXPrimary, dataYPrimary) xCoord = map(lambda X, Y: ((X-xLow)*xScale + xLeftOffset, yTopOffset+plotHeight-(Y-yLow)*yScale), dataXPrimary, dataYPrimary) labelFontF = "verdana" labelFontS = 11 if loadingPlot: xZero = -xLow*xScale+xLeftOffset yZero = yTopOffset+plotHeight+yLow*yScale for point in xCoord: drawSpace.addElement(svg.line(xZero,yZero,point[0],point[1], "red", 1)) else: if connectdot: pass #drawSpace.drawPolygon(xCoord,edgeColor=plotColor,closed=0) else: pass for i, item in enumerate(xCoord): if dataLabel and dataLabel[i] in specialCases: drawSpace.addElement(svg.rect(item[0]-3, item[1]-3, 6, 6, "none", "green", 0.5)) #drawSpace.drawCross(item[0],item[1],color=pid.blue,size=5) else: drawSpace.addElement(svg.line(item[0],item[1]+5,item[0],item[1]-5,symbolColor,1)) drawSpace.addElement(svg.line(item[0]+5,item[1],item[0]-5,item[1],symbolColor,1)) if showLabel and dataLabel: pass drawSpace.addElement(svg.text(item[0], item[1]+14, dataLabel[i], labelFontS, labelFontF, text_anchor="middle", style="stroke:blue;stroke-width:0.5;")) #canvas.drawString(, item[0]- canvas.stringWidth(dataLabel[i], # font=labelFont)/2, item[1]+14, font=labelFont, color=pid.blue) #draw scale #scaleFont=pid.Font(ttf="cour",size=14,bold=1) x=xLow for i in range(stepX+1): xc=xLeftOffset+(x-xLow)*xScale drawSpace.addElement(svg.line(xc,yTopOffset+plotHeight,xc,yTopOffset+plotHeight+5, axesColor, 1)) strX = cformat(d=x, rank=rank) drawSpace.addElement(svg.text(xc,yTopOffset+plotHeight+20,strX,13, "courier", text_anchor="middle")) x+= (xTop - xLow)/stepX y=yLow for i in range(stepY+1): yc=yTopOffset+plotHeight-(y-yLow)*yScale drawSpace.addElement(svg.line(xLeftOffset,yc,xLeftOffset-5,yc, axesColor, 1)) strY = cformat(d=y, rank=rank) drawSpace.addElement(svg.text(xLeftOffset-10,yc+5,strY,13, "courier", text_anchor="end")) y+= (yTop - yLow)/stepY #draw label labelFontF = "verdana" labelFontS = 17 if XLabel: drawSpace.addElement(svg.text(xLeftOffset+plotWidth/2.0, yTopOffset+plotHeight+yBottomOffset-10,XLabel, labelFontS, labelFontF, text_anchor="middle")) if YLabel: drawSpace.addElement(svg.text(xLeftOffset-50, yTopOffset+plotHeight/2,YLabel, labelFontS, labelFontF, text_anchor="middle", style="writing-mode:tb-rl", transform="rotate(270 %d %d)" % (xLeftOffset-50, yTopOffset+plotHeight/2))) #drawSpace.drawString(YLabel, xLeftOffset-50, yTopOffset+plotHeight- (plotHeight-drawSpace.stringWidth(YLabel,font=labelFont))/2.0, # font=labelFont,color=labelColor,angle=90) if fitcurve: sys.argv = [ "mod_python" ] #from numarray import linear_algebra as la #from numarray import ones, array, dot, swapaxes fitYY = array(dataYPrimary) fitXX = array([ones(len(dataXPrimary)),dataXPrimary]) AA = dot(fitXX,swapaxes(fitXX,0,1)) BB = dot(fitXX,fitYY) bb = la.linear_least_squares(AA,BB)[0] xc1 = xLeftOffset yc1 = yTopOffset+plotHeight-(bb[0]+bb[1]*xLow-yLow)*yScale if yc1 > yTopOffset+plotHeight: yc1 = yTopOffset+plotHeight xc1 = (yLow-bb[0])/bb[1] xc1=(xc1-xLow)*xScale+xLeftOffset elif yc1 < yTopOffset: yc1 = yTopOffset xc1 = (yTop-bb[0])/bb[1] xc1=(xc1-xLow)*xScale+xLeftOffset else: pass xc2 = xLeftOffset + plotWidth yc2 = yTopOffset+plotHeight-(bb[0]+bb[1]*xTop-yLow)*yScale if yc2 > yTopOffset+plotHeight: yc2 = yTopOffset+plotHeight xc2 = (yLow-bb[0])/bb[1] xc2=(xc2-xLow)*xScale+xLeftOffset elif yc2 < yTopOffset: yc2 = yTopOffset xc2 = (yTop-bb[0])/bb[1] xc2=(xc2-xLow)*xScale+xLeftOffset else: pass drawSpace.addElement(svg.line(xc1,yc1,xc2,yc2,"green", 1)) if displayR: labelFontF = "trebuc" labelFontS = 14 NNN = len(dataX) corr = webqtlUtil.calCorrelation(dataXPrimary,dataYPrimary,NNN)[0] if NNN < 3: corrPValue = 1.0 else: if abs(corr) >= 1.0: corrPValue = 0.0 else: ZValue = 0.5*log((1.0+corr)/(1.0-corr)) ZValue = ZValue*sqrt(NNN-3) corrPValue = 2.0*(1.0 - reaper.normp(abs(ZValue))) NStr = "N of Cases=%d" % NNN if rank == 1: corrStr = "Spearman's r=%1.3f P=%3.2E" % (corr, corrPValue) else: corrStr = "Pearson's r=%1.3f P=%3.2E" % (corr, corrPValue) drawSpace.addElement(svg.text(xLeftOffset,yTopOffset-10,NStr, labelFontS, labelFontF, text_anchor="start")) drawSpace.addElement(svg.text(xLeftOffset+plotWidth,yTopOffset-25,corrStr, labelFontS, labelFontF, text_anchor="end")) """ """ return
#!/usr/bin/python ## example2_11 from numarray import array,ones from LUdecomp3 import * d = ones((5))*2.0 c = ones((4))*(-1.0) b = array([5.0, -5.0, 4.0, -5.0, 5.0]) e = c.copy() c,d,e = LUdecomp3(c,d,e) x = LUsolve3(c,d,e,b) print "\nx =\n",x raw_input("\nPress return to exit")
def plotXY(canvas, dataX, dataY, rank=0, dataLabel=[], plotColor = pid.black, axesColor=pid.black, labelColor=pid.black, lineSize="thin", lineColor=pid.grey, idFont="arial", idColor=pid.blue, idSize="14", symbolColor=pid.black, symbolType="circle", filled="yes", symbolSize="tiny", XLabel=None, YLabel=None, title=None, fitcurve=None, connectdot=1, displayR=None, loadingPlot = 0, offset= (80, 20, 40, 60), zoom = 1, specialCases=[], showLabel = 1, bufferSpace = 15): 'displayR : correlation scatter plot, loadings : loading plot' dataXRanked, dataYRanked = webqtlUtil.calRank(dataX, dataY, len(dataX)) #get ID font size idFontSize = int(idSize) #If filled is yes, set fill color if filled == "yes": fillColor = symbolColor else: fillColor = None if symbolSize == "large": sizeModifier = 7 fontModifier = 12 elif symbolSize == "medium": sizeModifier = 5 fontModifier = 8 elif symbolSize == "small": sizeModifier = 3 fontModifier = 3 else: sizeModifier = 1 fontModifier = -1 if rank == 0: # Pearson correlation bufferSpace = 0 dataXPrimary = dataX dataYPrimary = dataY dataXAlt = dataXRanked #Values used just for printing the other corr type to the graph image dataYAlt = dataYRanked #Values used just for printing the other corr type to the graph image else: # Spearman correlation: Switching Ranked and Unranked X and Y values dataXPrimary = dataXRanked dataYPrimary = dataYRanked dataXAlt = dataX #Values used just for printing the other corr type to the graph image dataYAlt = dataY #Values used just for printing the other corr type to the graph image xLeftOffset, xRightOffset, yTopOffset, yBottomOffset = offset plotWidth = canvas.size[0] - xLeftOffset - xRightOffset plotHeight = canvas.size[1] - yTopOffset - yBottomOffset if plotHeight<=0 or plotWidth<=0: return if len(dataXPrimary) < 1 or len(dataXPrimary) != len(dataYPrimary) or (dataLabel and len(dataXPrimary) != len(dataLabel)): return max_X=max(dataXPrimary) min_X=min(dataXPrimary) max_Y=max(dataYPrimary) min_Y=min(dataYPrimary) #for some reason I forgot why I need to do this if loadingPlot: min_X = min(-0.1,min_X) max_X = max(0.1,max_X) min_Y = min(-0.1,min_Y) max_Y = max(0.1,max_Y) xLow, xTop, stepX=detScale(min_X,max_X) yLow, yTop, stepY=detScale(min_Y,max_Y) xScale = plotWidth/(xTop-xLow) yScale = plotHeight/(yTop-yLow) #draw drawing region canvas.drawRect(xLeftOffset-bufferSpace, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight+bufferSpace) canvas.drawRect(xLeftOffset-bufferSpace+1, yTopOffset, xLeftOffset+plotWidth, yTopOffset+plotHeight+bufferSpace-1) #calculate data points data = map(lambda X, Y: (X, Y), dataXPrimary, dataYPrimary) xCoord = map(lambda X, Y: ((X-xLow)*xScale + xLeftOffset, yTopOffset+plotHeight-(Y-yLow)*yScale), dataXPrimary, dataYPrimary) labelFont=pid.Font(ttf=idFont,size=idFontSize,bold=0) if loadingPlot: xZero = -xLow*xScale+xLeftOffset yZero = yTopOffset+plotHeight+yLow*yScale for point in xCoord: canvas.drawLine(xZero,yZero,point[0],point[1],color=pid.red) else: if connectdot: canvas.drawPolygon(xCoord,edgeColor=plotColor,closed=0) else: pass symbolFont = pid.Font(ttf="fnt_bs", size=12+fontModifier,bold=0) for i, item in enumerate(xCoord): if dataLabel and dataLabel[i] in specialCases: canvas.drawRect(item[0]-3, item[1]-3, item[0]+3, item[1]+3, edgeColor=pid.green) #canvas.drawCross(item[0],item[1],color=pid.blue,size=5) else: if symbolType == "vertRect": canvas.drawRect(x1=item[0]-sizeModifier+2,y1=item[1]-sizeModifier-2, x2=item[0]+sizeModifier-1,y2=item[1]+sizeModifier+2, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor) elif (symbolType == "circle" and filled != "yes"): canvas.drawString(":", item[0]-canvas.stringWidth(":",font=symbolFont)/2+1,item[1]+2,color=symbolColor, font=symbolFont) elif (symbolType == "circle" and filled == "yes"): canvas.drawString("5", item[0]-canvas.stringWidth("5",font=symbolFont)/2+1,item[1]+2,color=symbolColor, font=symbolFont) elif symbolType == "horiRect": canvas.drawRect(x1=item[0]-sizeModifier-1,y1=item[1]-sizeModifier+3, x2=item[0]+sizeModifier+3,y2=item[1]+sizeModifier-2, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor) elif (symbolType == "square"): canvas.drawRect(x1=item[0]-sizeModifier+1,y1=item[1]-sizeModifier-4, x2=item[0]+sizeModifier+2,y2=item[1]+sizeModifier-3, edgeColor=symbolColor, edgeWidth=1, fillColor=fillColor) elif (symbolType == "diamond" and filled != "yes"): canvas.drawString(",", item[0]-canvas.stringWidth(",",font=symbolFont)/2+2, item[1]+6, font=symbolFont, color=symbolColor) elif (symbolType == "diamond" and filled == "yes"): canvas.drawString("D", item[0]-canvas.stringWidth("D",font=symbolFont)/2+2, item[1]+6, font=symbolFont, color=symbolColor) elif symbolType == "4-star": canvas.drawString("l", item[0]-canvas.stringWidth("l",font=symbolFont)/2+1, item[1]+3, font=symbolFont, color=symbolColor) elif symbolType == "3-star": canvas.drawString("k", item[0]-canvas.stringWidth("k",font=symbolFont)/2+1, item[1]+3, font=symbolFont, color=symbolColor) else: canvas.drawCross(item[0],item[1]-2,color=symbolColor, size=sizeModifier+2) if showLabel and dataLabel: if (symbolType == "vertRect" or symbolType == "diamond"): labelGap = 15 elif (symbolType == "4-star" or symbolType == "3-star"): labelGap = 12 else: labelGap = 11 canvas.drawString(dataLabel[i], item[0]- canvas.stringWidth(dataLabel[i], font=labelFont)/2 + 1, item[1]+(labelGap+sizeModifier+(idFontSize-12)), font=labelFont, color=idColor) #draw scale scaleFont=pid.Font(ttf="cour",size=16,bold=1) x=xLow for i in range(stepX+1): xc=xLeftOffset+(x-xLow)*xScale if ((x == 0) & (rank == 1)): pass else: canvas.drawLine(xc,yTopOffset+plotHeight + bufferSpace,xc,yTopOffset+plotHeight+5 + bufferSpace, color=axesColor) strX = cformat(d=x, rank=rank) if ((strX == "0") & (rank == 1)): pass else: canvas.drawString(strX,xc-canvas.stringWidth(strX,font=scaleFont)/2,yTopOffset+plotHeight+20 + bufferSpace,font=scaleFont) x+= (xTop - xLow)/stepX y=yLow for i in range(stepY+1): yc=yTopOffset+plotHeight-(y-yLow)*yScale if ((y == 0) & (rank == 1)): pass else: canvas.drawLine(xLeftOffset - bufferSpace,yc,xLeftOffset-5 - bufferSpace,yc, color=axesColor) strY = cformat(d=y, rank=rank) if ((strY == "0") & (rank == 1)): pass else: canvas.drawString(strY,xLeftOffset-canvas.stringWidth(strY,font=scaleFont)- 10 - bufferSpace,yc+4,font=scaleFont) y+= (yTop - yLow)/stepY #draw label labelFont=pid.Font(ttf="verdana",size=canvas.size[0]/45,bold=0) titleFont=pid.Font(ttf="verdana",size=canvas.size[0]/40,bold=0) if (rank == 1 and not title): canvas.drawString("Spearman Rank Correlation", xLeftOffset-canvas.size[0]*.025+(plotWidth-canvas.stringWidth("Spearman Rank Correlation",font=titleFont))/2.0, 25,font=titleFont,color=labelColor) elif (rank == 0 and not title): canvas.drawString("Pearson Correlation", xLeftOffset-canvas.size[0]*.025+(plotWidth-canvas.stringWidth("Pearson Correlation",font=titleFont))/2.0, 25,font=titleFont,color=labelColor) if XLabel: canvas.drawString(XLabel,xLeftOffset+(plotWidth-canvas.stringWidth(XLabel,font=labelFont))/2.0, yTopOffset+plotHeight+yBottomOffset-25,font=labelFont,color=labelColor) if YLabel: canvas.drawString(YLabel, xLeftOffset-65, yTopOffset+plotHeight- (plotHeight-canvas.stringWidth(YLabel,font=labelFont))/2.0, font=labelFont,color=labelColor,angle=90) labelFont=pid.Font(ttf="verdana",size=20,bold=0) if title: canvas.drawString(title,xLeftOffset+(plotWidth-canvas.stringWidth(title,font=labelFont))/2.0, 20,font=labelFont,color=labelColor) if fitcurve: import sys sys.argv = [ "mod_python" ] #from numarray import linear_algebra as la #from numarray import ones, array, dot, swapaxes fitYY = array(dataYPrimary) fitXX = array([ones(len(dataXPrimary)),dataXPrimary]) AA = dot(fitXX,swapaxes(fitXX,0,1)) BB = dot(fitXX,fitYY) bb = la.linear_least_squares(AA,BB)[0] xc1 = xLeftOffset yc1 = yTopOffset+plotHeight-(bb[0]+bb[1]*xLow-yLow)*yScale if yc1 > yTopOffset+plotHeight: yc1 = yTopOffset+plotHeight xc1 = (yLow-bb[0])/bb[1] xc1=(xc1-xLow)*xScale+xLeftOffset elif yc1 < yTopOffset: yc1 = yTopOffset xc1 = (yTop-bb[0])/bb[1] xc1=(xc1-xLow)*xScale+xLeftOffset else: pass xc2 = xLeftOffset + plotWidth yc2 = yTopOffset+plotHeight-(bb[0]+bb[1]*xTop-yLow)*yScale if yc2 > yTopOffset+plotHeight: yc2 = yTopOffset+plotHeight xc2 = (yLow-bb[0])/bb[1] xc2=(xc2-xLow)*xScale+xLeftOffset elif yc2 < yTopOffset: yc2 = yTopOffset xc2 = (yTop-bb[0])/bb[1] xc2=(xc2-xLow)*xScale+xLeftOffset else: pass canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace,xc2,yc2,color=lineColor) if lineSize == "medium": canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace+1,xc2,yc2+1,color=lineColor) if lineSize == "thick": canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace+1,xc2,yc2+1,color=lineColor) canvas.drawLine(xc1 - bufferSpace,yc1 + bufferSpace-1,xc2,yc2-1,color=lineColor) if displayR: labelFont=pid.Font(ttf="trebuc",size=canvas.size[0]/60,bold=0) NNN = len(dataX) corr = webqtlUtil.calCorrelation(dataXPrimary,dataYPrimary,NNN)[0] if NNN < 3: corrPValue = 1.0 else: if abs(corr) >= 1.0: corrPValue = 0.0 else: ZValue = 0.5*log((1.0+corr)/(1.0-corr)) ZValue = ZValue*sqrt(NNN-3) corrPValue = 2.0*(1.0 - reaper.normp(abs(ZValue))) NStr = "N = %d" % NNN strLenN = canvas.stringWidth(NStr,font=labelFont) if rank == 1: if corrPValue < 0.0000000000000001: corrStr = "Rho = %1.3f P < 1.00 E-16" % (corr) else: corrStr = "Rho = %1.3f P = %3.2E" % (corr, corrPValue) else: if corrPValue < 0.0000000000000001: corrStr = "r = %1.3f P < 1.00 E-16" % (corr) else: corrStr = "r = %1.3f P = %3.2E" % (corr, corrPValue) strLen = canvas.stringWidth(corrStr,font=labelFont) canvas.drawString(NStr,xLeftOffset,yTopOffset-10,font=labelFont,color=labelColor) canvas.drawString(corrStr,xLeftOffset+plotWidth-strLen,yTopOffset-10,font=labelFont,color=labelColor) return xCoord
def ones(self): """ All CPT elements are set to 1 """ self.cpt = na.ones(self.cpt.shape, type=self.cpt.type())
## example9_13 from numarray import ones from lamRange import * from inversePower3 import * N = 10 n = 100 d = ones((n)) * 2.0 c = ones((n - 1)) * (-1.0) r = lamRange(d, c, N) # Bracket N smallest eigenvalues s = (r[N - 1] + r[N]) / 2.0 # Shift to midpoint of Nth bracket lam, x = inversePower3(d, c, s) # Inverse power method print "Eigenvalue No.", N, " =", lam raw_input("\nPress return to exit")
def logistic_regression(x, y, beta_start=None, verbose=False, CONV_THRESH=1.e-3, MAXIT=500): """ Uses the Newton-Raphson algorithm to calculate a maximum likelihood estimate logistic regression. The algorithm is known as 'iteratively re-weighted least squares', or IRLS. x - rank-1 or rank-2 array of predictors. If x is rank-2, the number of predictors = x.shape[0] = N. If x is rank-1, it is assumed N=1. y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x)) beta_start - initial beta vector (default zeros(N+1,x.dtype.char)) if verbose=True, diagnostics printed for each iteration (default False). MAXIT - max number of iterations (default 500) CONV_THRESH - convergence threshold (sum of absolute differences of beta-beta_old, default 0.001) returns beta (the logistic regression coefficients, an N+1 element vector), J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood). J_bar can be used to estimate the covariance matrix and the standard error for beta. l can be used for a chi-squared significance test. covmat = inverse(J_bar) --> covariance matrix of coefficents (beta) stderr = sqrt(diag(covmat)) --> standard errors for beta deviance = -2l --> scaled deviance statistic chi-squared value for -2l is the model chi-squared test. """ if x.shape[-1] != len(y): raise ValueError, "x.shape[-1] and y should be the same length!" try: N, npreds = x.shape[1], x.shape[0] except: # single predictor, use simple logistic regression routine. return _simple_logistic_regression(x, y, beta_start=beta_start, CONV_THRESH=CONV_THRESH, MAXIT=MAXIT, verbose=verbose) if beta_start is None: beta_start = NA.zeros(npreds + 1, x.dtype.char) X = NA.ones((npreds + 1, N), x.dtype.char) X[1:, :] = x Xt = NA.transpose(X) iter = 0 diff = 1. beta = beta_start # initial values if verbose: print 'iteration beta log-likliehood |beta-beta_old|' while iter < MAXIT: beta_old = beta ebx = NA.exp(NA.dot(beta, X)) p = ebx / (1. + ebx) l = NA.sum(y * NA.log(p) + (1. - y) * NA.log(1. - p)) # log-likeliehood s = NA.dot(X, y - p) # scoring function J_bar = NA.dot(X * p, Xt) # information matrix beta = beta_old + NA.dot(LA.inverse(J_bar), s) # new value of beta diff = NA.sum(NA.fabs(beta - beta_old)) # sum of absolute differences if verbose: print iter + 1, beta, l, diff if diff <= CONV_THRESH: break iter = iter + 1 if iter == MAXIT and diff > CONV_THRESH: print 'warning: convergence not achieved with threshold of %s in %s iterations' % ( CONV_THRESH, MAXIT) return beta, J_bar, l
def estimate_mixture(models, seqs, max_iter, eps, alpha=None): """ Given a Python-list of models and a SequenceSet seqs perform an nested EM to estimate maximum-likelihood parameters for the models and the mixture coefficients. The iteration stops after max_iter steps or if the improvement in log-likelihood is less than eps. alpha is a numarray of dimension len(models) containing the mixture coefficients. If alpha is not given, uniform values will be chosen. Result: The models are changed in place. Return value is (l, alpha, P) where l is the final log likelihood of seqs under the mixture, alpha is a numarray of dimension len(models) containing the mixture coefficients and P is a (|sequences| x |models|)-matrix containing P[model j| sequence i] """ done = 0 iter = 1 last_mixture_likelihood = -99999999.99 # The (nr of seqs x nr of models)-matrix holding the likelihoods l = numarray.zeros((len(seqs), len(models)), numarray.Float) if alpha == None: # Uniform alpha logalpha = numarray.ones(len(models), numarray.Float) * \ math.log(1.0/len(models)) else: logalpha = numarray.log(alpha) print logalpha, numarray.exp(logalpha) log_nrseqs = math.log(len(seqs)) while 1: # Score all sequences with all models for i, m in enumerate(models): loglikelihood = m.loglikelihoods(seqs) # numarray slices: l[:,i] is the i-th column of l l[:, i] = numarray.array(loglikelihood) #print l for i in xrange(len(seqs)): l[i] += logalpha # l[i] = ( log( a_k * P[seq i| model k]) ) #print l mixture_likelihood = numarray.sum(numarray.sum(l)) print "# iter %s joint likelihood = %f" % (iter, mixture_likelihood) improvement = mixture_likelihood - last_mixture_likelihood if iter > max_iter or improvement < eps: break # Compute P[model j| seq i] for i in xrange(len(seqs)): seq_logprob = sumlogs(l[i]) # \sum_{k} a_k P[seq i| model k] l[i] -= seq_logprob # l[i] = ( log P[model j | seq i] ) #print l l_exp = numarray.exp(l) # XXX Use approx with table lookup #print "exp(l)", l_exp #print numarray.sum(numarray.transpose(l_exp)) # Print row sums # Compute priors alpha for i in xrange(len(models)): logalpha[i] = sumlogs(l[:, i]) - log_nrseqs #print "logalpha", logalpha, numarray.exp(logalpha) for j, m in enumerate(models): # Set the sequence weight for sequence i under model m to P[m| i] for i in xrange(len(seqs)): seqs.setWeight(i, l_exp[i, j]) m.baumWelch(seqs, 10, 0.0001) iter += 1 last_mixture_likelihood = mixture_likelihood return (mixture_likelihood, numarray.exp(logalpha), l_exp)
#!/usr/bin/env python # Generate the NetCDF Test dataset from Scientific.IO import NetCDF import numarray nc = NetCDF.NetCDFFile('testdata.nc', 'w') nc.createDimension('x', 10) nc.createDimension('y', 10) def funcform(x,y): return (x-5)**2 + (y-5)**2 h = nc.createVariable('h', 'd', ('x','y') ) h.assignValue( numarray.fromfunction( funcform, (10,10) ) ) u = nc.createVariable('u', 'd', ('x','y') ) u.assignValue( numarray.identity(10) * 10 ) v = nc.createVariable('v', 'd', ('x','y') ) v.assignValue( numarray.ones( (10,10) ) * 5 ) nc.close()
fitsobj = pyfits.HDUList() hdu = pyfits.PrimaryHDU() x = FitsAxis("x-axis", 0., 0.2, 10) y = FitsAxis("y-axis", 0., 0.1, 20) hdu.data = numarray.zeros((y.naxis, x.naxis)) # note the ordering x.updateFitsHeader(hdu.header, 1) y.updateFitsHeader(hdu.header, 2) fitsobj.append(hdu) os.system("rm -f test.fits") fitsobj.writeto('test.fits') print "FitsAxis class tests completed.\n" print "FitsImageArray class tests:" x = FitsAxis("x-axis", 0., 0.2, 5) y = FitsAxis("y-axis", 0., 0.1, 10) array1 = numarray.ones((y.naxis, x.naxis)) z = {} z[1] = FitsImageArray(array1) z[1].setAxis(x, 1) z[1].setAxis(y, 2) z[1].setName('z1') z[2] = z[1] / 3. z[2].setName('z2') z[3] = z[1] + z[2] / 2. z[3].setName('z3') os.system("rm -f z3.fits") z[3].writeto("z3.fits") z[4] = z[3][:3, :] z[4].setName('z4') z[5] = FitsImageArray("z3.fits") z[5].setName('z5')
def estimate_mixture(models, seqs, max_iter, eps, alpha=None): """ Given a Python-list of models and a SequenceSet seqs perform an nested EM to estimate maximum-likelihood parameters for the models and the mixture coefficients. The iteration stops after max_iter steps or if the improvement in log-likelihood is less than eps. alpha is a numarray of dimension len(models) containing the mixture coefficients. If alpha is not given, uniform values will be chosen. Result: The models are changed in place. Return value is (l, alpha, P) where l is the final log likelihood of seqs under the mixture, alpha is a numarray of dimension len(models) containing the mixture coefficients and P is a (|sequences| x |models|)-matrix containing P[model j| sequence i] """ done = 0 iter = 1 last_mixture_likelihood = -99999999.99 # The (nr of seqs x nr of models)-matrix holding the likelihoods l = numarray.zeros((len(seqs), len(models)), numarray.Float) if alpha == None: # Uniform alpha logalpha = numarray.ones(len(models), numarray.Float) * \ math.log(1.0/len(models)) else: logalpha = numarray.log(alpha) print logalpha, numarray.exp(logalpha) log_nrseqs = math.log(len(seqs)) while 1: # Score all sequences with all models for i, m in enumerate(models): loglikelihood = m.loglikelihoods(seqs) # numarray slices: l[:,i] is the i-th column of l l[:,i] = numarray.array(loglikelihood) #print l for i in xrange(len(seqs)): l[i] += logalpha # l[i] = ( log( a_k * P[seq i| model k]) ) #print l mixture_likelihood = numarray.sum(numarray.sum(l)) print "# iter %s joint likelihood = %f" % (iter, mixture_likelihood) improvement = mixture_likelihood - last_mixture_likelihood if iter > max_iter or improvement < eps: break # Compute P[model j| seq i] for i in xrange(len(seqs)): seq_logprob = sumlogs(l[i]) # \sum_{k} a_k P[seq i| model k] l[i] -= seq_logprob # l[i] = ( log P[model j | seq i] ) #print l l_exp = numarray.exp(l) # XXX Use approx with table lookup #print "exp(l)", l_exp #print numarray.sum(numarray.transpose(l_exp)) # Print row sums # Compute priors alpha for i in xrange(len(models)): logalpha[i] = sumlogs(l[:,i]) - log_nrseqs #print "logalpha", logalpha, numarray.exp(logalpha) for j, m in enumerate(models): # Set the sequence weight for sequence i under model m to P[m| i] for i in xrange(len(seqs)): seqs.setWeight(i,l_exp[i,j]) m.baumWelch(seqs, 10, 0.0001) iter += 1 last_mixture_likelihood = mixture_likelihood return (mixture_likelihood, numarray.exp(logalpha), l_exp)
def create_prediction_success_table(LCM, location_set, observed_choices_id, geographies=[], \ choice_method='mc', data_objects=None): """this function creates a table tabulating number of agents observed versus predicted by geographies for location choice model LCM is an instance of Location Choice Model after run_estimation, location_set is the set of location in simulation, e.g. gridcell, observed_choice_id is the location_set id (e.g. grid_id) observed, geographies is a list of geographies to create prediction sucess table for, choice_method is the method used to select choice for agents, either mc or max_prob data_objects is the same as data_objects used to run LCM simulation, but includes entries for geographies """ LCM.simulate_step() choices = sample_choice(LCM.model.probabilities, choice_method) choices_index = LCM.model_resources.translate("index")[ choices] #translate choices into index of location_set #maxprob_choices = sample_choice(LCM.model.probabilities, method="max_prob") #max prob choice #maxprob_choices_index = LCM.model_resources.translate("index")[maxprob_choices] results = [] gcs = location_set for geography in geographies: geo = data_objects.translate(geography) #get geo_id for observed agents gc_index = gcs.get_id_index(observed_choices_id) if geo.id_name[0] not in gcs.get_attribute_names(): gcs.compute_variables(geo.id_name[0], resources=data_objects) geo_ids_obs = gcs.get_attribute(geo.id_name[0])[gc_index] # obs = copy.deepcopy(agent_set) # obs.subset_by_index(agents_index) # obs.set_values_of_one_attribute(gcs.id_name[0], observed_choices_id) #resources.merge({"household": obs}) #, "gridcell": gcs, "zone": zones, "faz":fazes}) # obs.compute_variables(geo.id_name[0], resources=resources) # obs_geo_ids = obs.get_attribute(geo.id_name[0]) #get geo_id for simulated agents geo_ids_sim = gcs.get_attribute(geo.id_name[0])[choices_index] #sim = copy_dataset(obs) #sim.set_values_of_one_attribute(gcs.id_name[0], gcs.get_id_attribute()[mc_choices_index]) #resources.merge({"household": sim}) geo_size = geo.size() myids = geo.get_id_attribute() pred_matrix = zeros((geo_size, geo_size)) p_success = zeros((geo_size, )).astype(Float32) f = 0 for geo_id in myids: ids = geo_ids_sim[where( geo_ids_obs == geo_id )] #get simulated geo_id for agents observed in this geo_id #resources.merge({"agents_index": agents_index_in_geo, "agent":sim}) what = ones(ids.size()) pred_matrix[f] = array(nd_image_sum(what, labels=ids, index=myids)) print pred_matrix[f] if sum(pred_matrix[f]) > 0: p_success[f] = float(pred_matrix[f, f]) / sum(pred_matrix[f]) #sim.increment_version(gcs.id_name[0]) #to trigger recomputation in next iteration f += 1 print p_success results.append((pred_matrix.copy(), p_success.copy())) return results
def drawmeridians(self,ax,meridians,color='k',linewidth=1., \ linestyle='--',dashes=[1,1],labels=[0,0,0,0],\ font='rm',fontsize=12): """ draw meridians (longitude lines). ax - current axis instance. meridians - list containing longitude values to draw (in degrees). color - color to draw meridians (default black). linewidth - line width for meridians (default 1.) linestyle - line style for meridians (default '--', i.e. dashed). dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on, 1 pixel off). labels - list of 4 values (default [0,0,0,0]) that control whether meridians are labelled where they intersect the left, right, top or bottom of the plot. For example labels=[1,0,0,1] will cause meridians to be labelled where they intersect the left and bottom of the plot, but not the right and top. Labels are located with a precision of 0.1 degrees and are drawn using mathtext. font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm'. fontsize - font size in points for labels (default 12). """ # don't draw meridians past latmax, always draw parallel at latmax. latmax = 80. # not used for cyl, merc projections. # offset for labels. yoffset = (self.urcrnry - self.llcrnry) / 100. / self.aspect xoffset = (self.urcrnrx - self.llcrnrx) / 100. if self.projection not in ['merc', 'cyl']: lats = N.arange(-latmax, latmax + 1).astype('f') else: lats = N.arange(-90, 91).astype('f') xdelta = 0.1 * (self.xmax - self.xmin) ydelta = 0.1 * (self.ymax - self.ymin) for merid in meridians: lons = merid * N.ones(len(lats), 'f') x, y = self(lons, lats) # remove points outside domain. testx = N.logical_and(x >= self.xmin - xdelta, x <= self.xmax + xdelta) x = N.compress(testx, x) y = N.compress(testx, y) testy = N.logical_and(y >= self.ymin - ydelta, y <= self.ymax + ydelta) x = N.compress(testy, x) y = N.compress(testy, y) if len(x) > 1 and len(y) > 1: # split into separate line segments if necessary. # (not necessary for mercator or cylindrical). xd = (x[1:] - x[0:-1])**2 yd = (y[1:] - y[0:-1])**2 dist = N.sqrt(xd + yd) split = dist > 500000. if N.sum(split) and self.projection not in ['merc', 'cyl']: ind = (N.compress( split, pylab.squeeze(split * N.indices(xd.shape))) + 1).tolist() xl = [] yl = [] iprev = 0 ind.append(len(xd)) for i in ind: xl.append(x[iprev:i]) yl.append(y[iprev:i]) iprev = i else: xl = [x] yl = [y] # draw each line segment. for x, y in zip(xl, yl): # skip if only a point. if len(x) > 1 and len(y) > 1: l = Line2D(x, y, linewidth=linewidth, linestyle=linestyle) l.set_color(color) l.set_dashes(dashes) ax.add_line(l) # draw labels for meridians. # search along edges of map to see if parallels intersect. # if so, find x,y location of intersection and draw a label there. if self.projection == 'cyl': dx = 0.01 dy = 0.01 elif self.projection == 'merc': dx = 0.01 dy = 1000 else: dx = 1000 dy = 1000 for dolab, side in zip(labels, ['l', 'r', 't', 'b']): if not dolab: continue # for cyl or merc, don't draw meridians on left or right. if self.projection in ['cyl', 'merc'] and side in ['l', 'r']: continue if side in ['l', 'r']: nmax = int((self.ymax - self.ymin) / dy + 1) if self.urcrnry < self.llcrnry: yy = self.llcrnry - dy * N.arange(nmax) else: yy = self.llcrnry + dy * N.arange(nmax) if side == 'l': lons, lats = self(self.llcrnrx * N.ones(yy.shape, 'f'), yy, inverse=True) else: lons, lats = self(self.urcrnrx * N.ones(yy.shape, 'f'), yy, inverse=True) lons = N.where(lons < 0, lons + 360, lons) lons = [int(lon * 10) for lon in lons.tolist()] lats = [int(lat * 10) for lat in lats.tolist()] else: nmax = int((self.xmax - self.xmin) / dx + 1) if self.urcrnrx < self.llcrnrx: xx = self.llcrnrx - dx * N.arange(nmax) else: xx = self.llcrnrx + dx * N.arange(nmax) if side == 'b': lons, lats = self(xx, self.llcrnry * N.ones(xx.shape, 'f'), inverse=True) else: lons, lats = self(xx, self.urcrnry * N.ones(xx.shape, 'f'), inverse=True) lons = N.where(lons < 0, lons + 360, lons) lons = [int(lon * 10) for lon in lons.tolist()] lats = [int(lat * 10) for lat in lats.tolist()] for lon in meridians: if lon < 0: lon = lon + 360. # find index of meridian (there may be two, so # search from left and right). try: nl = lons.index(int(lon * 10)) except: nl = -1 try: nr = len(lons) - lons[::-1].index(int(lon * 10)) - 1 except: nr = -1 if lon > 180: lonlab = r'$\%s{%g\/^{\circ}\/W}$' % (font, N.fabs(lon - 360)) elif lon < 180 and lon != 0: lonlab = r'$\%s{%g\/^{\circ}\/E}$' % (font, lon) else: lonlab = r'$\%s{%g\/^{\circ}}$' % (font, lon) # meridians can intersect each map edge twice. for i, n in enumerate([nl, nr]): lat = lats[n] / 10. # no meridians > latmax for projections other than merc,cyl. if self.projection not in ['merc', 'cyl'] and lat > latmax: continue # don't bother if close to the first label. if i and abs(nr - nl) < 100: continue if n > 0: if side == 'l': pylab.text(self.llcrnrx - xoffset, yy[n], lonlab, horizontalalignment='right', verticalalignment='center', fontsize=fontsize) elif side == 'r': pylab.text(self.urcrnrx + xoffset, yy[n], lonlab, horizontalalignment='left', verticalalignment='center', fontsize=fontsize) elif side == 'b': pylab.text(xx[n], self.llcrnry - yoffset, lonlab, horizontalalignment='center', verticalalignment='top', fontsize=fontsize) else: pylab.text(xx[n], self.urcrnry + yoffset, lonlab, horizontalalignment='center', verticalalignment='bottom', fontsize=fontsize) # make sure axis ticks are turned off ax.set_xticks([]) ax.set_yticks([])