def loop(potential,potential_reg,c2,w3,region): nmax = potential.shape[0] c3 = MV2.not_equal(w3,0.) c = MV2.logical_and(c2,c3) thisturn = MV2.ones(c.shape) for i in range(nmax): c1 = MV2.logical_or(MV2.equal(potential_reg[i],region),MV2.equal(potential[i],-999)) c2 = MV2.logical_and(c,c1) c2 = MV2.logical_and(c2,thisturn) potential_reg[i] = MV2.where(c2,region,potential_reg[i]) thisturn = MV2.where(c2,0,thisturn) c1 = MV2.logical_and(c2,MV2.equal(potential[i],-999)) c2 = MV2.logical_and(c2,MV2.not_equal(potential[i],-999)) potential[i] = MV2.where(c1,w3,potential[i]) potential[i] = MV2.where(c2,potential[i]+w3,potential[i]) ## Ultimate test to see if more would be needed ! if not MV2.allequal(MV2.logical_and(c,thisturn),0): raise 'OOOPS WE COULD USE MORE REGIONS BUDDY !' return
def main(): # Prepare dummy data -- create random array for testing random_array = np.random.rand(10, 30) X = cdms2.createAxis(['model_ ' + str(r) for r in list(range(0, 30))]) Y = cdms2.createAxis(['metric_ ' + str(r) for r in list(range(0, 10))]) stat_xy = MV2.array(random_array, axes=(Y, X), id='statistics') # Plant missing value stat_xy[5][5] = -1.e20 stat_xy = MV2.masked_where(MV2.equal(stat_xy, -1.e20), stat_xy) # Annotate test stat_xy_annotate = MV2.multiply(stat_xy, 2) # User options imgName = 'test_pp_random' plotTitle = 'test_pp_random' Normalize = True # Normalize rows by its median if Normalize: # Normalize by median value stat_xy = normalize_by_median(stat_xy) # Revise image file name imgName = imgName + '_normalized' # Colormap to be used colormap = "default" clevels = [-1.e20, -.5, -.4, -.3, -.2, -.1, 0, .1, .2, .3, .4, .5, 1.e20] ccolors = vcs.getcolors(clevels, split=0, colors=range(16, 240)) # Dummy data for additional triangles stat_xy_2 = normalize_by_median(MV2.add(stat_xy, 2)) stat_xy_3 = normalize_by_median(MV2.add(stat_xy, 3)) stat_xy_4 = normalize_by_median(MV2.add(stat_xy, 4)) axes = stat_xy.getAxisList() stat_xy_2.setAxisList(axes) stat_xy_3.setAxisList(axes) stat_xy_4.setAxisList(axes) # # Portrait plot # plot_portrait(stat_xy, imgName=imgName, colormap=colormap, clevels=clevels, ccolors=ccolors, num_box_partitioning=4, stat_xy_2=stat_xy_2, stat_xy_3=stat_xy_3, stat_xy_4=stat_xy_4, GridMeshLine=False)
def generateSurfaceTypeByRegionMask(mask,sftbyrgn=None,sftbyrgnmask=215,regions=range(201,223),maximum_regions_per_cell=4,extend_up_to=3,verbose=True): """ Maps a "regions" dataset onto a user provided land/sea mask or grid Usage: ----- mapped,found = generateSurfaceTypeByRegionMask(mask,sftbyrgn=None,sftbyrgnmask=None,regions=None,maximum_regions_per_cell=4,extend_up_to=3,verbose=True) Input: ----- mask User provided land/sea mask (100/0) or grid (the land/sea mask will be generated automagically) which will be mapped using the "sftbyrgn" internal dataset (will generate a land/sea mask for you) sftbyrgn Mask you wish to map onto your grid (if None uses internal "sftbyrgn" dataset (old ezget type)) sftbyrgnmask Land/sea mask for sftbyrgn (or a number specifying value limits for sftbyrgn which indicates land/sea threshold (greater values are land) - see URL below for integer region map) regions Numbers from sftbyrgn array that you want to map onto mask (integers from 201-222) maximum_regions_per_cell Maximum number of regions considered for a single cell extend_up_to How many grid cells around a cell can we extend to identify a guess verbose Prints to the screen what's going on (default is True) Output: ----- mapped Mapped input grid/mask using provided (or default) regions - sftbyrgn -> user provided grid/mask found Matrix containing number of regions matched for each output cell Notes: ----- - More detailed information, including a region map and tabulated region numbers are available from http://www-pcmdi.llnl.gov/publications/pdf/34.pdf """ cdat_info.pingPCMDIdb("cdat","cdutil.generateSurfaceTypeByRegionMask") ## OK first determine which regions are available ## Must be integer values if isinstance(mask,cdms2.grid.TransientRectGrid): mask = cdutil.generateLandSeaMask(mask)*100. if sftbyrgn is None: sftbyrgn = cdms2.open(os.path.join(cdat_info.get_prefix(),'share','cdutil','sftbyrgn.nc'))('sftbyrgn') if regions is None: if verbose: print 'Preparing regions' #regions = range(201,223) regions = [] for i in range(0,10000): genutil.statusbar(i,9999) c = float(MV2.sum(MV2.ravel(MV2.equal(sftbyrgn,i)),0)) if c != 0: regions.append(i) if verbose: print 'Regions:',regions ## If no mask passed fr sftbyrgn, assumes everything greater 5000 is land) if isinstance(sftbyrgnmask,int): split = sftbyrgnmask n = MV2.maximum(mask) sftbyrgnmask = MV2.greater_equal(sftbyrgn,sftbyrgnmask)*n else: split = MV2.maximum(sftbyrgnmask)/2. ## Now guess the type for each regions keys = {} ## ## Nice way to do it ## for r in regions: ## c=MV2.not_equal(sftbyrgn,r) ## c=MV2.masked_where(c,sftbyrgnmask) ## n=MV2.count(c) ## c=float(MV2.sum(MV2.ravel(c),0)/n) ## print r,c,n ## keys[r]=c ## Fast but not so "general" way to do it for r in regions: if r< split: keys[r] = 0. else: keys[r] = 100. sh = list(mask.shape) sh.insert(0,maximum_regions_per_cell) potential = MV2.ones(sh,dtype='d')*-999 potential_reg = MV2.ones(sh,dtype='d')*-999 g1 = sftbyrgn.getGrid() g2 = mask.getGrid() r1 = regrid2.Horizontal(g1,g2) w = cdutil.area_weights(sftbyrgn) if verbose: print 'First pass' itmp = 0. for ireg in keys.keys(): genutil.statusbar(itmp,len(keys.keys())-1) itmp += 1. c = MV2.equal(sftbyrgn,ireg) w2 = 1.-c*w s2,w3 = r1(sftbyrgn,mask=w2.filled(),returnTuple=1) c2 = MV2.equal(mask,keys[ireg]) loop(potential,potential_reg,c2,w3,ireg) found = MV2.zeros(sh[1:],typecode='f') for i in range(maximum_regions_per_cell): found = found+MV2.not_equal(potential[i],-999) sh2 = list(sh) for k in range(extend_up_to): sh2[1] = sh[1]+2*(k+1) sh2[2] = sh[2]+2*(k+1) ## Form the possible i/j couples ! s = MV2.sum(MV2.ravel(MV2.equal(potential[0],-999)),0) if verbose: print 'Expanding up to',k+1,'cells while trying to fix',s,'cells' #if dump: #f=cdms2.open('tmp_'+str(k)+'.nc','w') #f.write(sumregions(potential_reg,potential).astype('f'),id='sftbyrgn',axes=mask.getAxisList()) #f.close() #g=sumregions(potential_reg,potential).astype('d') #g=MV2.masked_equal(g,-999) #g=MV2.greater(g,4999)*100. #g=MV2.absolute(mask-g) #g=MV2.masked_equal(g,0.) #print 'Number of differences:',MV2.count(g) if float(s) != 0: c0 = MV2.equal(potential[0],-999) couples = [] sft2 = MV2.zeros(sh2[1:],dtype='d')-888. sft2[k+1:-k-1,k+1:-k-1] = mask for i in range(-k-1,k+2): for j in range(-k-1,k+2): if abs(i)>k or abs(j)>k: couples.append([i,j]) ntot = len(keys.keys())*len(couples)-1 itmp = 0 for ireg in keys.keys(): c = MV2.equal(sftbyrgn,ireg) w2 = 1.-c*w s2,w3 = r1(sftbyrgn,mask=w2.filled(),returnTuple=1) w4 = MV2.zeros(sh2[1:],typecode='d') w4[k+1:-k-1,k+1:-k-1] = w3 for i,j in couples: if verbose: genutil.statusbar(itmp,ntot) itmp += 1. c2 = MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],keys[ireg]) c3 = MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],mask) c2 = MV2.logical_and(c2,c3) c2 = MV2.logical_and(c2,c0) loop(potential,potential_reg,c2,w4[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],ireg) found = MV2.where(MV2.equal(potential[0],-999),found-1,found) out = sumregions(potential_reg,potential) out.setAxisList(mask.getAxisList()) out.id = 'sftbyrgn' out = out.astype('i') out.missing_value = -999 found.setAxisList(mask.getAxisList()) found.id = 'found' found = found.astype('i') found.missing_value = -999 del(out.name) del(found.name) return out,found
def generateSurfaceTypeByRegionMask(mask,sftbyrgn=None,sftbyrgnmask=215,regions = range(201,223), maximum_regions_per_cell=4,extend_up_to=3,verbose=True): """ Maps a "types" dataset onto a landsea mask Usage: mapped,found = generateSurfaceTypeByRegionMask(mask,sftbyrgn,sftbyrgnmask=None,regions=None,maximum_regions_per_cell=4,extend_up_to=3,verbode=True) Input: mask : land/sea mask (100/0) onto you wish to map our grid (will generate a ld/sea mask for you) sftbyrgn: mask you wish to map if None then uses our own "sftbyrgn" dataset (old ezget type) sftbyrgnmask: land/sea mask for sftbyrgn or a number specifying limit in values of sftbygrn which indicate the threshold land/sea (greater values are land) regions: Numbers from sftbyrgn array that you want to map onto mask maximum_regions_per_cell: maximum number f regions concidered in a cell extend_up_to : how many grid cells away around a cell can we extent to identify a guess verbose: prints to the screen what's going on (default is True) Output: mapped : mapped input mask found : ??? """ ## OK first determine which regions are available ## Must be integer values if isinstance(mask, cdms2.grid.TransientRectGrid): mask = cdutil.generateLandSeaMask(mask)*100. if sftbyrgn is None: sftbyrgn = cdms2.open(os.path.join(sys.prefix,'sample_data','sftbyrgn.nc'))('sftbyrgn') if regions is None: if verbose: print 'Preparing regions' ## regions = range(201,223) regions=[] for i in range(0,10000): genutil.statusbar(i,9999) c=float(MV2.sum(MV2.ravel(MV2.equal(sftbyrgn,i)),0)) if c!=0: regions.append(i) if verbose: print 'Regions:',regions ## If no mask passed fr sftbyrgn, assumes everything greater 5000 is land) if isinstance(sftbyrgnmask,int): split = sftbyrgnmask n=MV2.maximum(mask) sftbyrgnmask=MV2.greater_equal(sftbyrgn,sftbyrgnmask)*n else: split = MV2.maximum(sftbyrgnmask)/2. ## Now guess the type for each regions keys={} ## ## Nice way to do it ## for r in regions: ## c=MV2.not_equal(sftbyrgn,r) ## c=MV2.masked_where(c,sftbyrgnmask) ## n=MV2.count(c) ## c=float(MV2.sum(MV2.ravel(c),0)/n) ## print r,c,n ## keys[r]=c ## Fast but not so "general" way to do it for r in regions: if r< split: keys[r]=0. else: keys[r]=100. sh=list(mask.shape) sh.insert(0,maximum_regions_per_cell) potential=MV2.ones(sh,dtype='d')*-999 potential_reg=MV2.ones(sh,dtype='d')*-999 g1=sftbyrgn.getGrid() g2=mask.getGrid() r1=regrid2.Regridder(g1,g2) w=cdutil.area_weights(sftbyrgn) if verbose: print 'First pass' itmp=0. for ireg in keys.keys(): genutil.statusbar(itmp,len(keys.keys())-1) itmp+=1. c=MV2.equal(sftbyrgn,ireg) w2=1.-c*w s2,w3=r1(sftbyrgn,mask=w2.filled(),returnTuple=1) c2=MV2.equal(mask,keys[ireg]) loop(potential,potential_reg,c2,w3,ireg) found=MV2.zeros(sh[1:],typecode='f') for i in range(maximum_regions_per_cell): found=found+MV2.not_equal(potential[i],-999) sh2=list(sh) for k in range(extend_up_to): sh2[1]=sh[1]+2*(k+1) sh2[2]=sh[2]+2*(k+1) ## Form the possible i/j couples ! s=MV2.sum(MV2.ravel(MV2.equal(potential[0],-999)),0) if verbose: print 'Expanding up to',k+1,'cells while trying to fix',s,'cells' ## if dump: ## f=cdms2.open('tmp_'+str(k)+'.nc','w') ## f.write(sumregions(potential_reg,potential).astype('f'),id='sftbyrgn',axes=mask.getAxisList()) ## f.close() ## g=sumregions(potential_reg,potential).astype('d') ## g=MV2.masked_equal(g,-999) ## g=MV2.greater(g,4999)*100. ## g=MV2.absolute(mask-g) ## g=MV2.masked_equal(g,0.) ## print 'Number of differences:',MV2.count(g) if float(s)!=0: c0=MV2.equal(potential[0],-999) couples=[] sft2=MV2.zeros(sh2[1:],dtype='d')-888. sft2[k+1:-k-1,k+1:-k-1]=mask for i in range(-k-1,k+2): for j in range(-k-1,k+2): if abs(i)>k or abs(j)>k: couples.append([i,j]) ntot=len(keys.keys())*len(couples)-1 itmp=0 for ireg in keys.keys(): c=MV2.equal(sftbyrgn,ireg) w2=1.-c*w s2,w3=r1(sftbyrgn,mask=w2.filled(),returnTuple=1) w4=MV2.zeros(sh2[1:],typecode='d') w4[k+1:-k-1,k+1:-k-1]=w3 for i,j in couples: if verbose: genutil.statusbar(itmp,ntot) itmp+=1. c2=MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],keys[ireg]) c3=MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],mask) c2=MV2.logical_and(c2,c3) c2=MV2.logical_and(c2,c0) loop(potential,potential_reg,c2,w4[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],ireg) found=MV2.where(MV2.equal(potential[0],-999),found-1,found) out=sumregions(potential_reg,potential) out.setAxisList(mask.getAxisList()) found.setAxisList(mask.getAxisList()) found=found.astype('i') found.missing_value=-999 found.id='found' out.id='sftbyrgn' out=out.astype('i') out.missing_value=-999 del(out.name) del(found.name) return out,found
def get(self, returnTuple=1): value = self.data frc = None if type(value) in [types.TupleType, types.ListType]: value, frc = value if isinstance(value, numpy.ndarray) or numpy.ma.isMA( value): # Variable defined from array if frc is None: frc = numpy.ma.ones(value.shape, dtype=numpy.float32) kw = {} args = [] # Add user defined cdmsArguments for a in self.cdmsArguments: args.append(a) # Add user defined cdmsKeywords for k in self.cdmsKeywords.keys(): kw[k] = self.cdmsKeywords[k] # try to apply, if not forget about it try: v = value(*args, **kw) frc = frc(*args, **kw) # Now removes the slice types # because they can't be used twice for k in kw.keys(): if type(kw[k]) == types.SliceType: del (kw[k]) for i in range(len(args)): if type(args[i]) == types.SliceType: pop(args, i) i = i - 1 except: v = value else: # Variable comes from a file, need to be retrieved f = cdms2.open(self.file) kw = {} args = [] # Add user defined cdmsArguments for a in self.cdmsArguments: args.append(a) # Add user defined cdmsKeywords for k in self.cdmsKeywords.keys(): kw[k] = self.cdmsKeywords[k] v = f(self.var, *args, **kw) f.close() # Now removes the slice types # because they can't be used twice for k in kw.keys(): if type(kw[k]) == types.SliceType: del (kw[k]) for i in range(len(args)): if type(args[i]) == types.SliceType: pop(args, i) i = i - 1 ## At that stage applied the preprocess function if self.preprocess is not None: v = apply(self.preprocess, (v, ), self.preprocessKeywords) # Create the fractions if frc is None: frc = v.mask if frc is numpy.ma.nomask: #no mask # Create a bunch of ones (100%) frc = numpy.ones(v.shape, numpy.float32) else: # Fraction are actually just the opposite of the mask at that stage ! frc = frc.astype( MV2.float32) # Sometimes if it is bytes it doesn't work frc = 1. - frc frc = frc.astype( MV2.float32) # no need for double precision here ! else: m = v.mask if not m is numpy.ma.nomask: frc = MV2.where(m, 0., frc).filled(0.) # Now get the associted weights object # Note that we pass v in case some of the values are defined as "input" # in which case it would use v instead of the weights for weightsing m = self.weightsMaker(v) if not m is None: # grows the variable and the weights for possible Xtra dimensions m = m(*args, **kw) v, m = genutil.grower(v, m) # make sure variable and weights are compatible if m.shape != v.shape: raise VariableConditionerError, 'weights and variable have different shapes: weights is ' + str( m.shape) + ' and grid is ' + str(v.shape) # make sure they're on the same grid (in case one starts at 0 and one at -180 for example if not m.getGrid() is v.getGrid(): m = m.regrid(v.getGrid()) # Mask the dataset where the fraction are 0. v = MV2.masked_where(MV2.equal(m.filled(0), 0.), v) # Update the fractions frc = m.filled(0.) m = v.mask if not m is numpy.ma.nomask: frc = numpy.where(m, 0., frc) ## # Filll the mask with ones, i.e. set fraction to 0 when the mask is masked hahah ## frc = numpy.where(m.filled(1),0.,frc) # Now get the target grid g = self.weightedGridMaker() if not g is None: # we do have a target grid to go to ! # Create the regridder object rf = regrid2.Regridder(v.getGrid(), g) # and regrid passing the weights to use to each grid cell # at this point it should be only 0/1 v, frc = rf(v, mask=1. - frc, returnTuple=1) frc = MV2.array(frc) frc.setAxisList(v.getAxisList()) v = v(*args, **kw) frc = frc(*args, **kw).filled(0.) # Note that now frc is not necessarily 0. and 1. but actuall fraction # of the grid cell that has real data in it. # do we weights after this regridding ? # once again pass v in case the weightsing wants # to work on the variable m = self.weightedGridMaker.weightsMaker(v) if not m is None: # we have a weights m = m(*args, **kw) # apply the extra cdmsKeywords to it v, m = genutil.grower(v, m) # make sure variable and weights are compatible if m.shape != v.shape: raise VariableConditionerError, 'weights and variable have different shapes: weights is ' + str( m.shape) + ' and grid is ' + str(v.shape) # make sure they're on the same grid (in case one starts at 0 and one at -180 for example if not m.getGrid() is v.getGrid(): m = m.regrid(v.getGrid()) v = MV2.masked_where(MV2.equal(m.filled(0.), 0.), v) # weights the fraction where needed frc = m.filled(0.) m = v.mask if not m is numpy.ma.nomask: frc = numpy.where(m, 0., frc) ## frc=numpy.where(m.filled(1),0.,frc) # Now make the fraction an MV2 and puts the dim from v on it frc = MV2.array(frc) frc.setAxisList(v.getAxisList()) # just in case applies the cdmsKeywords again # usefull in case your final grid is global # and you specified Nino3 region for example. v = v(*args, **kw) frc = frc(*args, **kw).filled(0.) if v.missing_value is None: v.missing_value = 1.e20 v = MV2.masked_where(MV2.equal(frc, 0.), v) # Now applies the slope and offset if necessary if self.slope != 1.: v = v * self.slope if self.offset != 0.: v = v + self.offset if not ((v.mask is None) or (v.mask is MV2.nomask)): if numpy.ma.allclose(v.mask, 0.): v._mask = numpy.ma.nomask # Returns the variable and the fractions or just the variable if returnTuple: ## if not ((frc.mask is None) or (frc.mask is MV2.nomask)): ## if numpy.ma.allclose(frc.mask,0.): ## frc._mask=None return v, frc else: return v
def harmonic(data, k=3): data = data.reorder('t...') cdutil.setAxisTimeBoundsDaily(data.getTime()) axislist = data.getAxisList() dataid = data.id daily = True monthly = False timeAxis = axislist[0] N = 365. #len(timeAxis) # P = 10. # 10 year, yearly harmonic oscilation # P = 10*12 # 10 year, monthly harmonic oscilation # P = 10*365 # 10 year, daily harmonic oscilation # if P > N: # raise ValueError("P('%d') value should not exceed N(%d)" % (P,N)) if k > N/2: raise ValueError("k value should not exceed (%d) i.e. N/2 value" % (N/2)) if len(timeAxis) > 366: print 'found more than 1 year data.' # y_t = dailyClimatology(data, action='sum') else: y_t = data # end of if len(timeAxis) > 366: Y_0 = cdutil.averager(data, axis='t', action='average', weights='equal') # make memory free del data t = numpy.arange(1, N+1, dtype='float') otheraxis = list(Y_0.shape) ax_product = 1 for ax in otheraxis: ax_product *= ax otheraxis.insert(0,N) t = t.repeat(ax_product).reshape(otheraxis) angle = 2 * math.pi * t/N Y_k = 0. for i in range(1,k+1): kangle = angle*i A_k = (2./N) * cdutil.averager(y_t * numpy.cos(kangle), axis='t', action='sum') B_k = (2./N) * cdutil.averager(y_t * numpy.sin(kangle), axis='t', action='sum') C_k = MV2.sqrt((A_k*A_k) + (B_k*B_k)) # if A_k is positiv, then retain this phase_angle as it is. # phase_angle should be in degrees phase_angle = phase_arc_angle = MV2.arctan(B_k/A_k) # if A_k is zero, then replace phase_angle with pi/2 else retain same phase_angle = MV2.where(MV2.equal(A_k, 0.), math.pi/2.0, phase_arc_angle) # if A_k is negative, then add pi with phase_angle (if it is <= pi ) condition1 = MV2.logical_and(MV2.less(A_k, 0.), MV2.less_equal(phase_arc_angle, math.pi)) phase_angle = MV2.where(condition1, phase_arc_angle+math.pi, phase_arc_angle) # if A_k is negative, then subtract pi from phase_angle (if it is > pi ) condition2 = MV2.logical_and(MV2.less(A_k, 0.), MV2.greater(phase_arc_angle, math.pi)) condition3 = MV2.logical_or(condition1, condition2) phase_angle = MV2.where(condition3, phase_arc_angle-math.pi, phase_arc_angle) # make memory free del phase_arc_angle if daily and not monthly: # subtract 15 days lag to adjust phase_angle w.r.t daily print "Daily Subtraction" phase_angle -= (15.*2*math.pi)/N # end of if daily and not monthly: phase_angle = numpy.array(phase_angle) # phase_angle = numpy.tile(phase_angle, N).reshape(kangle.shape) kangle = numpy.array(kangle) Y_k += C_k * MV2.cos(kangle - phase_angle) # end of for i in range(1,k+1): # add mean to the sum of first k-th harmonic of data Y_k += Y_0 # make memory free del y_t, Y_0 sumOfMean_and_first_k_harmonic = cdms2.createVariable(Y_k, id=dataid) sumOfMean_and_first_k_harmonic.setAxisList(axislist) sumOfMean_and_first_k_harmonic.comments = 'sumOfMean_and_first_%d_harmonic' % k # make memory free del Y_k # return result return sumOfMean_and_first_k_harmonic
def logLinearInterpolation(A,P,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000],status=None): """ Log-linear interpolation to convert a field from sigma levels to pressure levels Value below surface are masked Input A : array on sigma levels P : pressure field from TOP (level 0) to BOTTOM (last level) levels : pressure levels to interplate to (same units as P), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000] P and levels must have same units Output array on pressure levels (levels) Examples: A=logLinearInterpolation(A,P),levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]) """ try: nlev=len(levels) # Number of pressure levels except: nlev=1 # if only one level len(levels) would breaks levels=[levels,] order=A.getOrder() A=A(order='z...') P=P(order='z...') sh=list(P.shape) nsigma=sh[0] #number of sigma levels sh[0]=nlev t=MV2.zeros(sh,typecode=MV2.float32) sh2=P[0].shape prev=-1 for ilev in range(nlev): # loop through pressure levels if status is not None: prev=genutil.statusbar(ilev,nlev-1.,prev) lev=levels[ilev] # get value for the level Pabv=MV2.ones(sh2,MV2.float) Aabv=-1*Pabv # Array on sigma level Above Abel=-1*Pabv # Array on sigma level Below Pbel=-1*Pabv # Pressure on sigma level Below Pabv=-1*Pabv # Pressure on sigma level Above Peq=MV2.masked_equal(Pabv,-1) # Area where Pressure == levels for i in range(1,nsigma): # loop from second sigma level to last one a=MV2.greater_equal(P[i], lev) # Where is the pressure greater than lev b= MV2.less_equal(P[i-1],lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Pabv, Pbel and Aabv, Abel a=MV2.logical_and(a,b) Pabv=MV2.where(a,P[i],Pabv) # Pressure on sigma level Above Aabv=MV2.where(a,A[i],Aabv) # Array on sigma level Above Pbel=MV2.where(a,P[i-1],Pbel) # Pressure on sigma level Below Abel=MV2.where(a,A[i-1],Abel) # Array on sigma level Below Peq= MV2.where(MV2.equal(P[i],lev),A[i],Peq) val=MV2.masked_where(MV2.equal(Pbel,-1),numpy.ones(Pbel.shape)*lev) # set to missing value if no data below lev if there is tl=MV2.log(val/Pbel)/MV2.log(Pabv/Pbel)*(Aabv-Abel)+Abel # Interpolation if ((Peq.mask is None) or (Peq.mask is MV2.nomask)): tl=Peq else: tl=MV2.where(1-Peq.mask,Peq,tl) t[ilev]=tl.astype(MV2.float32) ax=A.getAxisList() autobnds=cdms2.getAutoBounds() cdms2.setAutoBounds('off') lvl=cdms2.createAxis(MV2.array(levels).filled()) cdms2.setAutoBounds(autobnds) try: lvl.units=P.units except: pass lvl.id='plev' try: t.units=P.units except: pass ax[0]=lvl t.setAxisList(ax) t.id=A.id for att in A.listattributes(): setattr(t,att,getattr(A,att)) return t(order=order)
def logLinearInterpolation(A, P, levels=[ 100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000 ], status=None): """ Log-linear interpolation to convert a field from sigma levels to pressure levels Value below surface are masked Input A : array on sigma levels P : pressure field from TOP (level 0) to BOTTOM (last level) levels : pressure levels to interplate to (same units as P), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000] P and levels must have same units Output array on pressure levels (levels) Examples: A=logLinearInterpolation(A,P),levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]) """ try: nlev = len(levels) # Number of pressure levels except: nlev = 1 # if only one level len(levels) would breaks levels = [ levels, ] order = A.getOrder() A = A(order='z...') P = P(order='z...') sh = list(P.shape) nsigma = sh[0] #number of sigma levels sh[0] = nlev t = MV2.zeros(sh, typecode=MV2.float32) sh2 = P[0].shape prev = -1 for ilev in range(nlev): # loop through pressure levels if status is not None: prev = genutil.statusbar(ilev, nlev - 1., prev) lev = levels[ilev] # get value for the level Pabv = MV2.ones(sh2, MV2.float) Aabv = -1 * Pabv # Array on sigma level Above Abel = -1 * Pabv # Array on sigma level Below Pbel = -1 * Pabv # Pressure on sigma level Below Pabv = -1 * Pabv # Pressure on sigma level Above Peq = MV2.masked_equal(Pabv, -1) # Area where Pressure == levels for i in range(1, nsigma): # loop from second sigma level to last one a = MV2.greater_equal( P[i], lev) # Where is the pressure greater than lev b = MV2.less_equal(P[i - 1], lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Pabv, Pbel and Aabv, Abel a = MV2.logical_and(a, b) Pabv = MV2.where(a, P[i], Pabv) # Pressure on sigma level Above Aabv = MV2.where(a, A[i], Aabv) # Array on sigma level Above Pbel = MV2.where(a, P[i - 1], Pbel) # Pressure on sigma level Below Abel = MV2.where(a, A[i - 1], Abel) # Array on sigma level Below Peq = MV2.where(MV2.equal(P[i], lev), A[i], Peq) val = MV2.masked_where( MV2.equal(Pbel, -1), numpy.ones(Pbel.shape) * lev) # set to missing value if no data below lev if there is tl = MV2.log(val / Pbel) / MV2.log( Pabv / Pbel) * (Aabv - Abel) + Abel # Interpolation if ((Peq.mask is None) or (Peq.mask is MV2.nomask)): tl = Peq else: tl = MV2.where(1 - Peq.mask, Peq, tl) t[ilev] = tl.astype(MV2.float32) ax = A.getAxisList() autobnds = cdms2.getAutoBounds() cdms2.setAutoBounds('off') lvl = cdms2.createAxis(MV2.array(levels).filled()) cdms2.setAutoBounds(autobnds) try: lvl.units = P.units except: pass lvl.id = 'plev' try: t.units = P.units except: pass ax[0] = lvl t.setAxisList(ax) t.id = A.id for att in A.listattributes(): setattr(t, att, getattr(A, att)) return t(order=order)
refabbv = ref metrics_dictionary["References"][ref] = obs_dic[var][obs_dic[var][ref]] try: if obs_dic[var][obs_dic[var][ref]]["CMIP_CMOR_TABLE"]=="Omon": OBS = pcmdi_metrics.pcmdi.io.OBS(parameters.obs_data_path,var,obs_dic,ref) else: OBS = pcmdi_metrics.pcmdi.io.OBS(parameters.obs_data_path,var,obs_dic,ref) OBS.setTargetGrid(parameters.targetGrid,regridTool,regridMethod) OBS.realm = realm OBS.table = table_realm applyCustomKeys(OBS,parameters.custom_keys,var) if region is not None: ## Ok we need to apply a mask oMask = pcmdi_metrics.pcmdi.io.OBS(parameters.obs_data_path,"sftlf",obs_dic,ref) oMask = oMask.get("sftlf") OBS.mask = MV2.logical_not(MV2.equal(oMask,region)) OBS.targetMask = MV2.logical_not(MV2.equal(sftlf["targetGrid"],region)) try: if level is not None: do = OBS.get(var,level=level) else: do = OBS.get(var) except Exception,err: dup('failed with 4D OBS',var,ref,err) continue grd["GridResolution"] = do.shape[1:] metrics_dictionary["GridInfo"] = grd dup('OBS SHAPE IS ', do.shape) for model_version in parameters.model_versions: # LOOP THROUGH DIFFERENT MODEL VERSIONS OBTAINED FROM input_model_data.py
return stat_xy # ## Dummy data # In[6]: # Prepare dummy data -- create random array for testing random_array = np.random.rand(10,30) X = cdms2.createAxis(['model_ '+str(r) for r in list(range(0,30))]) Y = cdms2.createAxis(['metric_ '+str(r) for r in list(range(0,10))]) stat_xy = MV2.array(random_array, axes=(Y,X), id='statistics') # Plant missing value stat_xy[5][5] = -1.e20 stat_xy = MV2.masked_where(MV2.equal(stat_xy, -1.e20), stat_xy) # Normalize rows by its median Normalize = True if Normalize: # Normalize by median value stat_xy = normalize_by_median(stat_xy) # Additional dummy data for annotate test stat_xy_annotate = MV2.multiply(stat_xy, 2) # Additional dummy data for additional triangles stat_xy_2 = normalize_by_median(MV2.add(stat_xy, 2)) stat_xy_3 = normalize_by_median(MV2.add(stat_xy, 3)) stat_xy_4 = normalize_by_median(MV2.add(stat_xy, 4)) axes = stat_xy.getAxisList()
# Regrid to current obs data gridFile = '/clim_obs/obs/atm/mo/tas/JRA25/ac/tas_JRA25_000001-000012_ac.nc' f_g = cdm.open(gridFile) grid = f_g('tas').getGrid() landMask = landMask.regrid(grid, regridTool='ESMF', regridMethod='linear') f_g.close() landMask.id = 'sftlf' # Rename # Deal with interpolated values landMask[mv.greater(landMask, 75)] = 100 # Fix weird ocean values landMask[mv.less(landMask, 75)] = 0 # Fix continental halos landMask[mv.less(landMask, 0)] = 0 # Fix negative values # Invert land=100, ocean=0 landMask[mv.equal(landMask, 0)] = 50 # Convert ocean landMask[mv.equal(landMask, 100)] = 0 # Convert ocean landMask[mv.equal(landMask, 50)] = 100 # Convert ocean # Create outfile and write outFile = 'sftlf_pcmdi-metrics_fx_NCAR-JRA25_197901-201401.nc' # Write variables to file if os.path.isfile(outFile): os.remove(outFile) fOut = cdm.open(outFile, 'w') # Use function to write standard global atts globalAttWrite(fOut, options=None) fOut.pcmdi_metrics_version = '0.1-alpha' fOut.pcmdi_metrics_comment = 'This climatology was prepared by ' +\ 'PCMDI for the metrics package and is ' +\ 'intended for research purposes only'
def harmonic(data, k=3, time_type='daily', phase_shift=15): """ Inputs : data : climatology data k : Integer no to compute K th harmonic. By default it takes 3. time_type : daily | monthly | full (time type of input climatology) 'daily' -> it returns 365 days harmonic, 'monthly' -> it returns 12 month harmonic, 'full' -> it retuns harmonic for full length of input data. phase_shift : Used to subtract 'phase_shift' days lag to adjust phase_angle w.r.t daily or monthly. By default it takes 15 days lag to adjust phase_angle w.r.t daily data. User can pass None disable this option. Returns : Returns "sum mean of mean and first K th harmonic" of input climatology data. Concept : Earth science data consists of a strong seasonality component as indicated by the cycles of repeated patterns in climate variables such as air pressure, temperature and precipitation. The seasonality forms the strongest signals in this data and in order to find other patterns, the seasonality is removed by subtracting the monthly mean values of the raw data for each month. However since the raw data like air temperature, pressure, etc. are constantly being generated with the help of satellite observations, the climate scientists usually use a moving reference base interval of some years of raw data to calculate the mean in order to generate the anomaly time series and study the changes with respect to that. Fourier series analysis decomposes a signal into an infinite series of harmonic components. Each of these components is comprised initially of a sine wave and a cosine wave of equal integer frequency. These two waves are then combined into a single cosine wave, which has characteristic amplitude (size of the wave) and phase angle (offset of the wave). Convergence has been established for bounded piecewise continuous functions on a closed interval, with special conditions at points of discontinuity. Its convergence has been established for other conditions as well, but these are not relevant to the analysis at hand. Reference: Daniel S Wilks, 'Statistical Methods in the Atmospheric Sciences' second Edition, page no(372-378). Written By : Arulalan.T Date : 16.05.2014 """ data = data.reorder('t...') cdutil.setAxisTimeBoundsDaily(data.getTime()) axislist = data.getAxisList() timeAxis = axislist[0] dataid = data.id if time_type in ['daily']: N = 365.0 # must be float elif time_type[:3] in ['mon']: N = 12.0 # must be float elif time_type in ['full']: N = float(len(timeAxis)) if k > N/2: raise ValueError("k value should not exceed (%d) i.e. N/2 value" % (N/2)) if len(timeAxis) > 366: print 'found more than 1 year data.' raise ValueError("Kindly pass only climatology data") else: y_t = data # end of if len(timeAxis) > 366: Y_0 = cdutil.averager(data, axis='t', action='average', weights='equal') # make memory free del data t = numpy.arange(1, N+1, dtype='float') otheraxis = list(Y_0.shape) ax_product = 1 for ax in otheraxis: ax_product *= ax otheraxis.insert(0,N) t = t.repeat(ax_product).reshape(otheraxis) angle = 2 * math.pi * t/N Y_k = 0. for i in range(1,k+1): kangle = angle*i A_k = (2./N) * cdutil.averager(y_t * numpy.cos(kangle), axis='t', action='sum') B_k = (2./N) * cdutil.averager(y_t * numpy.sin(kangle), axis='t', action='sum') C_k = MV2.sqrt((A_k*A_k) + (B_k*B_k)) # if A_k is positiv, then retain this phase_angle as it is. # phase_angle should be in degrees phase_angle = phase_arc_angle = MV2.arctan(B_k/A_k) # if A_k is zero, then replace phase_angle with pi/2 else retain same phase_angle = MV2.where(MV2.equal(A_k, 0.), math.pi/2.0, phase_arc_angle) # if A_k is negative, then add pi with phase_angle (if it is <= pi ) condition1 = MV2.logical_and(MV2.less(A_k, 0.), MV2.less_equal(phase_arc_angle, math.pi)) phase_angle = MV2.where(condition1, phase_arc_angle+math.pi, phase_arc_angle) # if A_k is negative, then subtract pi from phase_angle (if it is > pi ) condition2 = MV2.logical_and(MV2.less(A_k, 0.), MV2.greater(phase_arc_angle, math.pi)) condition3 = MV2.logical_or(condition1, condition2) phase_angle = MV2.where(condition3, phase_arc_angle-math.pi, phase_arc_angle) # make memory free del phase_arc_angle if phase_shift: # subtract 15 days lag to adjust phase_angle w.r.t daily phase_angle -= (phase_shift *2 * math.pi) / N # end of if daily and not monthly: phase_angle = numpy.array(phase_angle) kangle = numpy.array(kangle) Y_k += C_k * MV2.cos(kangle - phase_angle) # end of for i in range(1,k+1): # add mean to the sum of first k-th harmonic of data Y_k += Y_0 # make memory free del y_t, Y_0 sumOfMean_and_first_k_harmonic = cdms2.createVariable(Y_k, id=dataid) sumOfMean_and_first_k_harmonic.setAxisList(axislist) sumOfMean_and_first_k_harmonic.comments = 'sumOfMean_and_first_%d_harmonic' % k # make memory free del Y_k # return result return sumOfMean_and_first_k_harmonic
def linearInterpolation(A, Idx, levels=[ 100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000 ], status=None, axis='z'): """ Linear interpolation to interpolate a field from some levels to another set of levels Values below "surface" are masked. :param A: array to interpolate :type A: :param I: interpolation field (usually Pressure or depth) from TOP (level 0) to BOTTOM (last level) i.e P value going up with each level. :type I: :param levels: levels to interpolate to (same units as I). Default levels:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000] :type levels: :param axis: Axis over which to do the linear interpolation. Can provide either an int representing axis index, or the axis name. Default: 'z'. :type axis: str or int .. note:: I and levels must have same units :returns: array on new levels (levels) :Examples: .. doctest:: vertical_linearInterpolation >>> A=interpolate(A,I) # interpolates A over default levels """ try: nlev = len(levels) # Number of pressure levels except BaseException: nlev = 1 # if only one level len(levels) would breaks levels = [ levels, ] order = A.getOrder() A = A(order='%s...' % axis) Idx = Idx(order='%s...' % axis) sh = list(Idx.shape) nsigma = sh[0] # number of sigma levels sh[0] = nlev t = MV2.zeros(sh, typecode=MV2.float32) sh2 = Idx[0].shape prev = -1 for ilev in range(nlev): # loop through pressure levels if status is not None: prev = genutil.statusbar(ilev, nlev - 1., prev) lev = levels[ilev] # get value for the level Iabv = MV2.ones(sh2, MV2.float) Aabv = -1 * Iabv # Array on sigma level Above Abel = -1 * Iabv # Array on sigma level Below Ibel = -1 * Iabv # Pressure on sigma level Below Iabv = -1 * Iabv # Pressure on sigma level Above Ieq = MV2.masked_equal(Iabv, -1) # Area where Pressure == levels for i in range(1, nsigma): # loop from second sigma level to last one a = MV2.greater_equal( Idx[i], lev) # Where is the pressure greater than lev b = MV2.less_equal(Idx[i - 1], lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Iabv, Ibel and Aabv, Abel a = MV2.logical_and(a, b) Iabv = MV2.where(a, Idx[i], Iabv) # Pressure on sigma level Above Aabv = MV2.where(a, A[i], Aabv) # Array on sigma level Above Ibel = MV2.where(a, Idx[i - 1], Ibel) # Pressure on sigma level Below Abel = MV2.where(a, A[i - 1], Abel) # Array on sigma level Below Ieq = MV2.where(MV2.equal(Idx[i], lev), A[i], Ieq) val = MV2.masked_where(MV2.equal(Ibel, -1.), numpy.ones(Ibel.shape) * lev) # set to missing value if no data below lev if # there is tl = (val - Ibel) / (Iabv - Ibel) * \ (Aabv - Abel) + Abel # Interpolation if ((Ieq.mask is None) or (Ieq.mask is MV2.nomask)): tl = Ieq else: tl = MV2.where(1 - Ieq.mask, Ieq, tl) t[ilev] = tl.astype(MV2.float32) ax = A.getAxisList() autobnds = cdms2.getAutoBounds() cdms2.setAutoBounds('off') lvl = cdms2.createAxis(MV2.array(levels).filled()) cdms2.setAutoBounds(autobnds) try: lvl.units = Idx.units except BaseException: pass lvl.id = 'plev' try: t.units = Idx.units except BaseException: pass ax[0] = lvl t.setAxisList(ax) t.id = A.id for att in A.listattributes(): setattr(t, att, getattr(A, att)) return t(order=order)
def generateSurfaceTypeByRegionMask(mask, sftbyrgn=None, sftbyrgnmask=215, regions=range(201, 223), maximum_regions_per_cell=4, extend_up_to=3, verbose=True): """ Maps a "types" dataset onto a landsea mask Usage: mapped,found = generateSurfaceTypeByRegionMask(mask,sftbyrgn,sftbyrgnmask=None,regions=None,maximum_regions_per_cell=4,extend_up_to=3,verbode=True) Input: mask : land/sea mask (100/0) onto you wish to map our grid (will generate a ld/sea mask for you) sftbyrgn: mask you wish to map if None then uses our own "sftbyrgn" dataset (old ezget type) sftbyrgnmask: land/sea mask for sftbyrgn or a number specifying limit in values of sftbygrn which indicate the threshold land/sea (greater values are land) regions: Numbers from sftbyrgn array that you want to map onto mask maximum_regions_per_cell: maximum number f regions concidered in a cell extend_up_to : how many grid cells away around a cell can we extent to identify a guess verbose: prints to the screen what's going on (default is True) Output: mapped : mapped input mask found : ??? """ ## OK first determine which regions are available ## Must be integer values if isinstance(mask, cdms2.grid.TransientRectGrid): mask = cdutil.generateLandSeaMask(mask) * 100. if sftbyrgn is None: sftbyrgn = cdms2.open( os.path.join(sys.prefix, 'sample_data', 'sftbyrgn.nc'))('sftbyrgn') if regions is None: if verbose: print 'Preparing regions' ## regions = range(201,223) regions = [] for i in range(0, 10000): genutil.statusbar(i, 9999) c = float(MV2.sum(MV2.ravel(MV2.equal(sftbyrgn, i)), 0)) if c != 0: regions.append(i) if verbose: print 'Regions:', regions ## If no mask passed fr sftbyrgn, assumes everything greater 5000 is land) if isinstance(sftbyrgnmask, int): split = sftbyrgnmask n = MV2.maximum(mask) sftbyrgnmask = MV2.greater_equal(sftbyrgn, sftbyrgnmask) * n else: split = MV2.maximum(sftbyrgnmask) / 2. ## Now guess the type for each regions keys = {} ## ## Nice way to do it ## for r in regions: ## c=MV2.not_equal(sftbyrgn,r) ## c=MV2.masked_where(c,sftbyrgnmask) ## n=MV2.count(c) ## c=float(MV2.sum(MV2.ravel(c),0)/n) ## print r,c,n ## keys[r]=c ## Fast but not so "general" way to do it for r in regions: if r < split: keys[r] = 0. else: keys[r] = 100. sh = list(mask.shape) sh.insert(0, maximum_regions_per_cell) potential = MV2.ones(sh, dtype='d') * -999 potential_reg = MV2.ones(sh, dtype='d') * -999 g1 = sftbyrgn.getGrid() g2 = mask.getGrid() r1 = regrid2.Regridder(g1, g2) w = cdutil.area_weights(sftbyrgn) if verbose: print 'First pass' itmp = 0. for ireg in keys.keys(): genutil.statusbar(itmp, len(keys.keys()) - 1) itmp += 1. c = MV2.equal(sftbyrgn, ireg) w2 = 1. - c * w s2, w3 = r1(sftbyrgn, mask=w2.filled(), returnTuple=1) c2 = MV2.equal(mask, keys[ireg]) loop(potential, potential_reg, c2, w3, ireg) found = MV2.zeros(sh[1:], typecode='f') for i in range(maximum_regions_per_cell): found = found + MV2.not_equal(potential[i], -999) sh2 = list(sh) for k in range(extend_up_to): sh2[1] = sh[1] + 2 * (k + 1) sh2[2] = sh[2] + 2 * (k + 1) ## Form the possible i/j couples ! s = MV2.sum(MV2.ravel(MV2.equal(potential[0], -999)), 0) if verbose: print 'Expanding up to', k + 1, 'cells while trying to fix', s, 'cells' ## if dump: ## f=cdms2.open('tmp_'+str(k)+'.nc','w') ## f.write(sumregions(potential_reg,potential).astype('f'),id='sftbyrgn',axes=mask.getAxisList()) ## f.close() ## g=sumregions(potential_reg,potential).astype('d') ## g=MV2.masked_equal(g,-999) ## g=MV2.greater(g,4999)*100. ## g=MV2.absolute(mask-g) ## g=MV2.masked_equal(g,0.) ## print 'Number of differences:',MV2.count(g) if float(s) != 0: c0 = MV2.equal(potential[0], -999) couples = [] sft2 = MV2.zeros(sh2[1:], dtype='d') - 888. sft2[k + 1:-k - 1, k + 1:-k - 1] = mask for i in range(-k - 1, k + 2): for j in range(-k - 1, k + 2): if abs(i) > k or abs(j) > k: couples.append([i, j]) ntot = len(keys.keys()) * len(couples) - 1 itmp = 0 for ireg in keys.keys(): c = MV2.equal(sftbyrgn, ireg) w2 = 1. - c * w s2, w3 = r1(sftbyrgn, mask=w2.filled(), returnTuple=1) w4 = MV2.zeros(sh2[1:], typecode='d') w4[k + 1:-k - 1, k + 1:-k - 1] = w3 for i, j in couples: if verbose: genutil.statusbar(itmp, ntot) itmp += 1. c2 = MV2.equal( sft2[j + k + 1:j + k + 1 + sh[1], i + k + 1:i + k + 1 + sh[2]], keys[ireg]) c3 = MV2.equal( sft2[j + k + 1:j + k + 1 + sh[1], i + k + 1:i + k + 1 + sh[2]], mask) c2 = MV2.logical_and(c2, c3) c2 = MV2.logical_and(c2, c0) loop( potential, potential_reg, c2, w4[j + k + 1:j + k + 1 + sh[1], i + k + 1:i + k + 1 + sh[2]], ireg) found = MV2.where(MV2.equal(potential[0], -999), found - 1, found) out = sumregions(potential_reg, potential) out.setAxisList(mask.getAxisList()) found.setAxisList(mask.getAxisList()) found = found.astype('i') found.missing_value = -999 found.id = 'found' out.id = 'sftbyrgn' out = out.astype('i') out.missing_value = -999 del (out.name) del (found.name) return out, found
# write out the total temperature data to a netcdf file o = cdms.open('era40_merged_tas_sst.nc', 'w') o.write(merged) # crete base period 1991-1993, inclusive start_time = cdtime.comptime(1991, 1, 1) end_time = cdtime.comptime(1993, 12, 1) # the annualcycle ac = cdutil.ANNUALCYCLE.climatology(merged(time=(start_time, end_time, 'co'))) # use the defined annual cycle and generate anomalies merged_an = cdutil.ANNUALCYCLE.departures(merged, ref=ac) # add metadata to the new anomaly variable merged_an = cdms.createVariable(merged_an, axes=(tim, lat, lon), typecode='f', id='anomalies_merged_tas_sst') merged_an.id = 'anomalies_merged_tas_sst' # Lastly apply the "spatial missing mask" to these data merged_an = MV2.masked_where(MV2.equal(mask1, 1), merged_an) y.clear() y.plot(merged_an) o.write(merged_an) o.close()
try: oMask = oMask.get("sftlf") # ok that failed falling back on autogenerate except: dup("Could not find obs mask, generating") foGrd = cdms2.open(OBS()) oGrd = foGrd(var, time=slice(0, 1)) foGrd.close() oMask = cdutil.generateLandSeaMask( oGrd, regridTool=regridTool).filled(1.) * 100. oMask = MV2.array(oMask) oMask.setAxis(-1, oGrd.getLongitude()) oMask.setAxis(-2, oGrd.getLatitude()) saved_obs_masks[oMasknm] = oMask OBS.mask = MV2.logical_not(MV2.equal(oMask, region)) OBS.targetMask = MV2.logical_not( MV2.equal( sftlf["targetGrid"], region)) try: if level is not None: do = OBS.get(var, level=level) else: do = OBS.get(var) except Exception as err: dup('failed with 4D OBS', var, ref, err) continue grd["GridResolution"] = do.shape[1:] metrics_dictionary["GridInfo"] = grd
x.clear() x.plot(merged) # write out the total temperature data to a netcdf file o = cdms.open("era40_merged_tas_sst.nc", "w") o.write(merged) # crete base period 1991-1993, inclusive start_time = cdtime.comptime(1991, 1, 1) end_time = cdtime.comptime(1993, 12, 1) # the annualcycle ac = cdutil.ANNUALCYCLE.climatology(merged(time=(start_time, end_time, "co"))) # use the defined annual cycle and generate anomalies merged_an = cdutil.ANNUALCYCLE.departures(merged, ref=ac) # add metadata to the new anomaly variable merged_an = cdms.createVariable(merged_an, axes=(tim, lat, lon), typecode="f", id="anomalies_merged_tas_sst") merged_an.id = "anomalies_merged_tas_sst" # Lastly apply the "spatial missing mask" to these data merged_an = MV2.masked_where(MV2.equal(mask1, 1), merged_an) y.clear() y.plot(merged_an) o.write(merged_an) o.close()
def linearInterpolation(A, I, levels=[ 100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000 ], status=None): """ Linear interpolation to interpolate a field from some levels to another set of levels Value below "surface" are masked Input A : array to interpolate I : interpolation field (usually Pressure or depth) from TOP (level 0) to BOTTOM (last level), i.e P value going up with each level levels : levels to interplate to (same units as I), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000] I and levels must have same units Output array on new levels (levels) Examples: A=interpolate(A,I,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]) """ try: nlev = len(levels) # Number of pressure levels except: nlev = 1 # if only one level len(levels) would breaks levels = [ levels, ] order = A.getOrder() A = A(order='z...') I = I(order='z...') sh = list(I.shape) nsigma = sh[0] #number of sigma levels sh[0] = nlev t = MV2.zeros(sh, typecode=MV2.float32) sh2 = I[0].shape prev = -1 for ilev in range(nlev): # loop through pressure levels if status is not None: prev = genutil.statusbar(ilev, nlev - 1., prev) lev = levels[ilev] # get value for the level Iabv = MV2.ones(sh2, MV2.float) Aabv = -1 * Iabv # Array on sigma level Above Abel = -1 * Iabv # Array on sigma level Below Ibel = -1 * Iabv # Pressure on sigma level Below Iabv = -1 * Iabv # Pressure on sigma level Above Ieq = MV2.masked_equal(Iabv, -1) # Area where Pressure == levels for i in range(1, nsigma): # loop from second sigma level to last one a = MV2.greater_equal( I[i], lev) # Where is the pressure greater than lev b = MV2.less_equal(I[i - 1], lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Iabv, Ibel and Aabv, Abel a = MV2.logical_and(a, b) Iabv = MV2.where(a, I[i], Iabv) # Pressure on sigma level Above Aabv = MV2.where(a, A[i], Aabv) # Array on sigma level Above Ibel = MV2.where(a, I[i - 1], Ibel) # Pressure on sigma level Below Abel = MV2.where(a, A[i - 1], Abel) # Array on sigma level Below Ieq = MV2.where(MV2.equal(I[i], lev), A[i], Ieq) val = MV2.masked_where( MV2.equal(Ibel, -1.), numpy.ones(Ibel.shape) * lev) # set to missing value if no data below lev if there is tl = (val - Ibel) / (Iabv - Ibel) * (Aabv - Abel) + Abel # Interpolation if ((Ieq.mask is None) or (Ieq.mask is MV22.nomask)): tl = Ieq else: tl = MV2.where(1 - Ieq.mask, Ieq, tl) t[ilev] = tl.astype(MV2.float32) ax = A.getAxisList() autobnds = cdms2.getAutoBounds() cdms2.setAutoBounds('off') lvl = cdms2.createAxis(MV2.array(levels).filled()) cdms2.setAutoBounds(autobnds) try: lvl.units = I.units except: pass lvl.id = 'plev' try: t.units = I.units except: pass ax[0] = lvl t.setAxisList(ax) t.id = A.id for att in A.listattributes(): setattr(t, att, getattr(A, att)) return t(order=order)
def get(self,returnTuple=1): value=self.data frc=None if type(value) in [types.TupleType, types.ListType]: value,frc=value if isinstance (value,numpy.ndarray ) or numpy.ma.isMA(value): # Variable defined from array if frc is None: frc=numpy.ma.ones(value.shape,dtype=numpy.float32) kw={} args=[] # Add user defined cdmsArguments for a in self.cdmsArguments: args.append(a) # Add user defined cdmsKeywords for k in self.cdmsKeywords.keys(): kw[k]=self.cdmsKeywords[k] # try to apply, if not forget about it try: v=value(*args,**kw) frc=frc(*args,**kw) # Now removes the slice types # because they can't be used twice for k in kw.keys(): if type(kw[k])==types.SliceType: del(kw[k]) for i in range(len(args)): if type(args[i])==types.SliceType: pop(args,i) i=i-1 except: v=value else: # Variable comes from a file, need to be retrieved f=cdms2.open(self.file) kw={} args=[] # Add user defined cdmsArguments for a in self.cdmsArguments: args.append(a) # Add user defined cdmsKeywords for k in self.cdmsKeywords.keys(): kw[k]=self.cdmsKeywords[k] v=f(self.var,*args,**kw) f.close() # Now removes the slice types # because they can't be used twice for k in kw.keys(): if type(kw[k])==types.SliceType: del(kw[k]) for i in range(len(args)): if type(args[i])==types.SliceType: pop(args,i) i=i-1 ## At that stage applied the preprocess function if self.preprocess is not None: v=apply(self.preprocess,(v,),self.preprocessKeywords) # Create the fractions if frc is None: frc=v.mask if frc is numpy.ma.nomask: #no mask # Create a bunch of ones (100%) frc=numpy.ones(v.shape,numpy.float32) else: # Fraction are actually just the opposite of the mask at that stage ! frc=frc.astype(MV2.float32) # Sometimes if it is bytes it doesn't work frc=1.-frc frc=frc.astype(MV2.float32) # no need for double precision here ! else: m=v.mask if not m is numpy.ma.nomask: frc=MV2.where(m,0.,frc).filled(0.) # Now get the associted weights object # Note that we pass v in case some of the values are defined as "input" # in which case it would use v instead of the weights for weightsing m=self.weightsMaker(v) if not m is None: # grows the variable and the weights for possible Xtra dimensions m=m(*args,**kw) v,m=genutil.grower(v,m) # make sure variable and weights are compatible if m.shape != v.shape: raise VariableConditionerError, 'weights and variable have different shapes: weights is '+str(m.shape)+' and grid is '+str(v.shape) # make sure they're on the same grid (in case one starts at 0 and one at -180 for example if not m.getGrid() is v.getGrid() : m = m.astype("i").regrid(v.getGrid()) # Mask the dataset where the fraction are 0. v = MV2.masked_where(MV2.equal(m.filled(0),0.),v) # Update the fractions frc=m.filled(0.) m=v.mask if not m is numpy.ma.nomask: frc=numpy.where(m,0.,frc) ## # Filll the mask with ones, i.e. set fraction to 0 when the mask is masked hahah ## frc = numpy.where(m.filled(1),0.,frc) # Now get the target grid g=self.weightedGridMaker() if not g is None: # we do have a target grid to go to ! # Create the regridder object rf=regrid2.Horizontal(v.getGrid(),g) # and regrid passing the weights to use to each grid cell # at this point it should be only 0/1 v,frc=rf(v,mask=1.-frc,returnTuple=1) frc=MV2.array(frc) frc.setAxisList(v.getAxisList()) v=v(*args,**kw) frc=frc(*args,**kw).filled(0.) # Note that now frc is not necessarily 0. and 1. but actuall fraction # of the grid cell that has real data in it. # do we weights after this regridding ? # once again pass v in case the weightsing wants # to work on the variable m=self.weightedGridMaker.weightsMaker(v) if not m is None: # we have a weights m=m(*args,**kw) # apply the extra cdmsKeywords to it v,m=genutil.grower(v,m) # make sure variable and weights are compatible if m.shape != v.shape: raise VariableConditionerError, 'weights and variable have different shapes: weights is '+str(m.shape)+' and grid is '+str(v.shape) # make sure they're on the same grid (in case one starts at 0 and one at -180 for example if not m.getGrid() is v.getGrid() : m = m.astype("i").regrid(v.getGrid()) v=MV2.masked_where(MV2.equal(m.filled(0.),0.),v) # weights the fraction where needed frc=m.filled(0.) m=v.mask if not m is numpy.ma.nomask: frc=numpy.where(m,0.,frc) ## frc=numpy.where(m.filled(1),0.,frc) # Now make the fraction an MV2 and puts the dim from v on it frc=MV2.array(frc) frc.setAxisList(v.getAxisList()) # just in case applies the cdmsKeywords again # usefull in case your final grid is global # and you specified Nino3 region for example. v = v (*args,**kw) frc = frc(*args,**kw) .filled(0.) if v.missing_value is None: v.missing_value=1.e20 v=MV2.masked_where(MV2.equal(frc,0.),v) # Now applies the slope and offset if necessary if self.slope!=1.: v=v*self.slope if self.offset!=0.: v=v+self.offset if not ((v.mask is None) or (v.mask is MV2.nomask)): if numpy.ma.allclose(v.mask,0.): v._mask=numpy.ma.nomask # Returns the variable and the fractions or just the variable if returnTuple: ## if not ((frc.mask is None) or (frc.mask is MV2.nomask)): ## if numpy.ma.allclose(frc.mask,0.): ## frc._mask=None return v,frc else: return v
def linearInterpolation(A,I,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000], status=None): """ Linear interpolation to interpolate a field from some levels to another set of levels Value below "surface" are masked Input A : array to interpolate I : interpolation field (usually Pressure or depth) from TOP (level 0) to BOTTOM (last level), i.e P value going up with each level levels : levels to interplate to (same units as I), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000] I and levels must have same units Output array on new levels (levels) Examples: A=interpolate(A,I,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]) """ try: nlev=len(levels) # Number of pressure levels except: nlev=1 # if only one level len(levels) would breaks levels=[levels,] order=A.getOrder() A=A(order='z...') I=I(order='z...') sh=list(I.shape) nsigma=sh[0] #number of sigma levels sh[0]=nlev t=MV2.zeros(sh,typecode=MV2.float32) sh2=I[0].shape prev=-1 for ilev in range(nlev): # loop through pressure levels if status is not None: prev=genutil.statusbar(ilev,nlev-1.,prev) lev=levels[ilev] # get value for the level Iabv=MV2.ones(sh2,MV2.float) Aabv=-1*Iabv # Array on sigma level Above Abel=-1*Iabv # Array on sigma level Below Ibel=-1*Iabv # Pressure on sigma level Below Iabv=-1*Iabv # Pressure on sigma level Above Ieq=MV2.masked_equal(Iabv,-1) # Area where Pressure == levels for i in range(1,nsigma): # loop from second sigma level to last one a = MV2.greater_equal(I[i], lev) # Where is the pressure greater than lev b = MV2.less_equal(I[i-1],lev) # Where is the pressure less than lev # Now looks if the pressure level is in between the 2 sigma levels # If yes, sets Iabv, Ibel and Aabv, Abel a=MV2.logical_and(a,b) Iabv=MV2.where(a,I[i],Iabv) # Pressure on sigma level Above Aabv=MV2.where(a,A[i],Aabv) # Array on sigma level Above Ibel=MV2.where(a,I[i-1],Ibel) # Pressure on sigma level Below Abel=MV2.where(a,A[i-1],Abel) # Array on sigma level Below Ieq= MV2.where(MV2.equal(I[i],lev),A[i],Ieq) val=MV2.masked_where(MV2.equal(Ibel,-1.),numpy.ones(Ibel.shape)*lev) # set to missing value if no data below lev if there is tl=(val-Ibel)/(Iabv-Ibel)*(Aabv-Abel)+Abel # Interpolation if ((Ieq.mask is None) or (Ieq.mask is MV22.nomask)): tl=Ieq else: tl=MV2.where(1-Ieq.mask,Ieq,tl) t[ilev]=tl.astype(MV2.float32) ax=A.getAxisList() autobnds=cdms2.getAutoBounds() cdms2.setAutoBounds('off') lvl=cdms2.createAxis(MV2.array(levels).filled()) cdms2.setAutoBounds(autobnds) try: lvl.units=I.units except: pass lvl.id='plev' try: t.units=I.units except: pass ax[0]=lvl t.setAxisList(ax) t.id=A.id for att in A.listattributes(): setattr(t,att,getattr(A,att)) return t(order=order)
def get(self,returnTuple=1): # Ok now the tough part try to figure out everything for the user... # overwrite the defintion for the variableConditioners cdmsArguments if self.cdmsArguments!=[] : setattr(self.V1,'cdmsArguments',self.cdmsArguments) setattr(self.V2,'cdmsArguments',self.cdmsArguments) if not self.EV is None : setattr(self.EV,'cdmsArguments',self.cdmsArguments) # overwrite the defintion for the variableConditioners cdmsKeyowrds for k in self.cdmsKeywords.keys(): self.V1.cdmsKeywords[k]=self.cdmsKeywords[k] self.V2.cdmsKeywords[k]=self.cdmsKeywords[k] if not self.EV is None: self.EV.cdmsKeywords[k]=self.cdmsKeywords[k] # Checks the time: # 2003-9-15: Added options if both var don't have time then still works d1 = None d2 = None frc1 = None frc2 = None autotime = None if not self.V1.cdmsKeywords.has_key('time'): if self.V2.cdmsKeywords.has_key('time'): d2=self.V2(returnTuple=returnTuple) if returnTuple: t=d2[0].getTime().asComponentTime() else: t=d2.getTime().asComponentTime() self.V1.cdmsKeywords['time']=(t[0],t[-1]) d1=self.V1(returnTuple=returnTuple) del(self.V1.cdmsKeywords['time']) else: # Automatically gets the maximum common time d2=self.V2(returnTuple=returnTuple) if returnTuple: t=d2[0].getTime() if not t is None: t=t.asComponentTime() else: t=d2.getTime() if not t is None: t=t.asComponentTime() if not t is None: self.V1.cdmsKeywords['time']=(t[0],t[-1]) d1=self.V1(returnTuple=returnTuple) if returnTuple: t1=d1[0].getTime() if not t1 is None: t1=t1.asComponentTime() else: t1=d1.getTime() if not t1 is None: t1=t1.asComponentTime() if not t1 is None: autotime=[t1[0],t1[-1],'ccb'] if cdtime.compare(t1[0],t[0])==-1: autotime[0]=t[0] if cdtime.compare(t1[-1],t[-1])==1: autotime[1]=t[-1] self.V1.cdmsKeywords['time']=autotime d1=self.V1(returnTuple=returnTuple) if not t1 is None: del(self.V1.cdmsKeywords['time']) self.V2.cdmsKeywords['time']=autotime d2=self.V2(returnTuple=returnTuple) del(self.V2.cdmsKeywords['time']) elif not self.V2.cdmsKeywords.has_key('time'): d1=self.V1(returnTuple=returnTuple) if returnTuple: t=d1[0].getTime().asComponentTime() else: t=d1.getTime().asComponentTime() if not t is None: self.V2.cdmsKeywords['time']=(t[0],t[-1]) d2=self.V2(returnTuple=returnTuple) if not t is None: del(self.V2.cdmsKeywords['time']) # Now get the variableConditioners 1 and 2 if necessary if d1 is None: d1=self.V1(returnTuple=returnTuple) if d2 is None: d2=self.V2(returnTuple=returnTuple) if returnTuple: # break the output if necessary frc2=d2[1] d2=d2[0] frc1=d1[1] d1=d1[0] frc1=MV2.array(frc1) frc2=MV2.array(frc2) else: frc1=MV2.ones(d1.shape,typecode=MV2.float32) frc2=MV2.ones(d2.shape,typecode=MV2.float32) frc1.setAxisList(d1.getAxisList()) frc2.setAxisList(d2.getAxisList()) ## # Gets the common time period, only if time keyword isn't defined ## if not(d1.getTime() is None) and not (d2.getTime() is None): ## if len(d1.getTime())!=len(d2.getTime()) and not self.V1.cdmsKeywords.has_key('time') and not self.V2.cdmsKeywords.has_key('time'): ## t1=d1.getTime().asComponentTime() ## t2=d2.getTime().asComponentTime() ## t=[t1[0],t1[-1]] ## if cdtime.compare(t1[0],t2[0])<0: ## t[0]=t2[0] ## if cdtime.compare(t1[-1],t2[-1])>0: ## t[1]=t2[-1] ## d1 = d1 (time=(t[0],t[1])) ## frc1 = frc1(time=(t[0],t[1])) ## d2 = d2 (time=(t[0],t[1])) ## frc2 = frc2(time=(t[0],t[1])) ## # remember the number of element in d1 to see if we add non dummy dimensions ## nd1=MV2.count(d1) ## nd2=MV2.count(d2) ## # Now tries to grow extra dims (like dummy levels, etc...) ## o1=d1.getOrder(ids=1) ## o2=d2.getOrder(ids=1) if d1.shape!=d2.shape: if d1.ndim>d2.ndim: d1,d2=genutil.grower(d1,d2,singleton=1) frc1,frc2=genutil.grower(frc1,frc2,singleton=1) else: d2,d1=genutil.grower(d2,d1,singleton=1) frc2,frc1=genutil.grower(frc2,frc1,singleton=1) # External variableConditioner ? if not self.EV is None: ed=None if not self.EV.cdmsKeywords.has_key('time'): t=d1.getTime().asComponentTime() if not t is None: self.EV.cdmsKeywords['time']=(t[0],t[-1]) ed=self.EV(returnTuple=1) frced=ed[1] ed=ed[0] frced=MV2.array(frced) frced.setAxisList(ed.getAxisList()) ## # Gets the common time between d1 and ed ## if not t is None: del(self.EV.cdmsKeywords['time']) ## if (not ed.getTime() is None) and (not d1.getTime() is None): ## if (len(ed.getTime())!=len(d1.getTime())): ## t1=d1.getTime().asComponentTime() ## t2=ed.getTime().asComponentTime() ## t=[t1[0],t1[-1]] ## if cdtime.compare(t1[0],t2[0])<0: ## t[0]=t2[0] ## if cdtime.compare(t1[-1],t2[-1])>0: ## t[1]=t2[-1] ## d1 = d1 (time=(t[0],t[1])) ## d2 = d2 (time=(t[0],t[1])) ## ed = ed (time=(t[0],t[1])) ## frc1 = frc1(time=(t[0],t[1])) ## frc2 = frc2(time=(t[0],t[1])) ## frced = wed(time=(t[0],t[1])) if ed is None: ed=self.EV(returnTuple=1) frced=ed[1] ed=ed[0] frced=MV2.array(frced) frced.setAxisList(ed.getAxisList()) g=ed.getGrid() g1=d1.getGrid() rf=regrid2.Horizontal(g1,g) d1,frc1=rf(d1,mask=1.-frc1.filled(0.),returnTuple=1) g2=d2.getGrid() rf=regrid2.Horizontal(g2,g) d2,frc2=rf(d2,mask=1.-frc2.filled(0.),returnTuple=1) frc1=MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2=MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) d1,ed=genutil.grower(d1,ed,singleton=1) d2,ed=genutil.grower(d2,ed,singleton=1) ed,frced=genutil.grower(ed,frced,singleton=1) frc1=numpy.ma.where(numpy.ma.equal(frc1.filled(0.),0.),0.,frced.filled(0.)) frc2=numpy.ma.where(numpy.ma.equal(frc2.filled(0.),0.),0.,frced.filled(0.)) d1=MV2.masked_where(MV2.equal(frc1.filled(0.),0.),d1) d2=MV2.masked_where(MV2.equal(frc2.filled(0.),0.),d2) # Final grid ? g=self.weightedGridMaker() if not g is None: g1=d1.getGrid() g2=d2.getGrid() rf1=regrid2.Horizontal(g1,g) rf2=regrid2.Horizontal(g2,g) d1,frc1=rf1(d1,mask=1.-frc1.filled(0.),returnTuple=1) ## m=1.-frc2.filled(0.) d2,frc2=rf2(d2,mask=1.-frc2.filled(0.),returnTuple=1) frc1=MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2=MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) m=self.weightedGridMaker.weightsMaker(d1) if not m is None: d1,m=genutil.grower(d1,m) frc1,m=genutil.grower(frc1,m) frc1=m.filled(0.) d1=MV2.masked_where(MV2.equal(frc1,0.),d1) m=d1.mask if not m is None: frc1=numpy.where(m,0.,frc1) m=self.weightedGridMaker.weightsMaker(d2) if not m is None: d2,m=genutil.grower(d2,m) frc2,m=genutil.grower(frc2,m) frc2=m.filled(0.) d2=MV2.masked_where(MV2.equal(frc2,0.),d2) m=d2.mask if not m is numpy.ma.nomask: frc2=numpy.where(m,0.,frc2) elif d1.getGrid()!=d2.getGrid(): g1=d1.getGrid() g2=d2.getGrid() rf=regrid2.Horizontal(g2,g1) d2,frc2=rf(d2,mask=1.-frc2.filled(0.),returnTuple=1) frc1=MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2=MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) # CdmsArguments or CdmsKeywords if not self.cdmsArguments is None: d1=d1(*self.cdmsArguments) d2=d2(*self.cdmsArguments) frc1=frc1(*self.cdmsArguments) frc2=frc2(*self.cdmsArguments) if not self.cdmsKeywords is None: d1=d1(**self.cdmsKeywords) d2=d2(**self.cdmsKeywords) frc1=frc1(**self.cdmsKeywords) frc2=frc2(**self.cdmsKeywords) d1=MV2.masked_where(MV2.equal(frc1,0.),d1) d2=MV2.masked_where(MV2.equal(frc2,0.),d2) if not ((d1.mask is None) or (d1.mask is MV2.nomask)): if numpy.ma.allclose(d1.mask,0.): d1._mask=numpy.ma.nomask if not ((d2.mask is None) or (d2.mask is MV2.nomask)): if numpy.ma.allclose(d2.mask,0.): d2._mask=numpy.ma.nomask if returnTuple: if not ((frc1.mask is None) or (frc1.mask is MV2.nomask)): if numpy.ma.allclose(frc1.mask,0.): frc1._mask=numpy.ma.nomask if not ((frc2.mask is None) or (frc2.mask is MV2.nomask)): if numpy.ma.allclose(frc2.mask,0.): frc2._mask=numpy.ma.nomask return (d1,frc1),(d2,frc2) else: return d1,d2
#pdb.set_trace() climInterp2Max = np.max(climInterp2) climInterp2Mean = np.mean(climInterp2) climInterp2Median = np.median(climInterp2) climInterp2Min = np.min(climInterp2) climInterp2Str = ''.join(['climInterp2.max():', '{:{}f}'.format(climInterp2Max,precision), ' mean:','{:{}f}'.format(climInterp2Mean,precision), ' median:','{:{}f}'.format(climInterp2Median,precision), ' min:','{:{}f}'.format(climInterp2Min,precision)]) print(climInterp2Str) writeToLog(logFile,climInterp2Str) #print('climInterp2 created') #pdb.set_trace() # Mask invalid datapoints climInterp3 = mv.masked_where(mv.equal(climInterp2,1e+20),climInterp2) climInterp3 = mv.masked_where(mv.greater(climInterp3,1e+10),climInterp3) ; # Add great to catch fringe values, switched from 1e+20 to 1e+10 print('climInterp3.missing:',climInterp3.missing) #climInterp3.setMissing(1e+20) ; # Specifically assign missing value #print('climInterp3 created') #pdb.set_trace() ''' import matplotlib.pyplot as plt climSlice = clim[0,0,:,:] ; plt.figure(1) ; plt.contourf(clim.getLongitude().data,clim.getLatitude().data,climSlice,20) ; #clim plt.show() climInterpSlice = climInterp[0,0,:,:] ; plt.figure(2) ; plt.contourf(climInterp.getLongitude().getData(),climInterp.getLatitude().getData(),climInterpSlice,20) ; #climInterp plt.show() #climInterp2Slice = climInterp2[0,0,:,:] ; plt.figure(3) ; plt.contourf(climInterp.getLongitude().getData(),climInterp.getLatitude().getData(),climInterp2Slice,20) ; #climInterp2 #plt.show() climInterp3Slice = climInterp3[0,0,:,:] ; plt.figure(4) ; plt.contourf(climInterp.getLongitude().getData(),climInterp.getLatitude().getData(),climInterp3Slice,20) ; #climInterp3 plt.show()
# Regrid to current obs data gridFile = "/clim_obs/obs/atm/mo/tas/JRA25/ac/tas_JRA25_000001-000012_ac.nc" f_g = cdm.open(gridFile) grid = f_g("tas").getGrid() landMask = landMask.regrid(grid, regridTool="ESMF", regridMethod="linear") f_g.close() landMask.id = "sftlf" # Rename # Deal with interpolated values landMask[mv.greater(landMask, 75)] = 100 # Fix weird ocean values landMask[mv.less(landMask, 75)] = 0 # Fix continental halos landMask[mv.less(landMask, 0)] = 0 # Fix negative values # Invert land=100, ocean=0 landMask[mv.equal(landMask, 0)] = 50 # Convert ocean landMask[mv.equal(landMask, 100)] = 0 # Convert ocean landMask[mv.equal(landMask, 50)] = 100 # Convert ocean # Create outfile and write outFile = "sftlf_pcmdi-metrics_fx_NCAR-JRA25_197901-201401.nc" # Write variables to file if os.path.isfile(outFile): os.remove(outFile) fOut = cdm.open(outFile, "w") # Use function to write standard global atts globalAttWrite(fOut, options=None) fOut.pcmdi_metrics_version = "0.1-alpha" fOut.pcmdi_metrics_comment = ( "This climatology was prepared by " + "PCMDI for the metrics package and is "
def get(self, returnTuple=1): # Ok now the tough part try to figure out everything for the user... # overwrite the defintion for the variableConditioners cdmsArguments if self.cdmsArguments != []: setattr(self.V1, 'cdmsArguments', self.cdmsArguments) setattr(self.V2, 'cdmsArguments', self.cdmsArguments) if not self.EV is None: setattr(self.EV, 'cdmsArguments', self.cdmsArguments) # overwrite the defintion for the variableConditioners cdmsKeyowrds for k in self.cdmsKeywords.keys(): self.V1.cdmsKeywords[k] = self.cdmsKeywords[k] self.V2.cdmsKeywords[k] = self.cdmsKeywords[k] if not self.EV is None: self.EV.cdmsKeywords[k] = self.cdmsKeywords[k] # Checks the time: # 2003-9-15: Added options if both var don't have time then still works d1 = None d2 = None frc1 = None frc2 = None autotime = None if not self.V1.cdmsKeywords.has_key('time'): if self.V2.cdmsKeywords.has_key('time'): d2 = self.V2(returnTuple=returnTuple) if returnTuple: t = d2[0].getTime().asComponentTime() else: t = d2.getTime().asComponentTime() self.V1.cdmsKeywords['time'] = (t[0], t[-1]) d1 = self.V1(returnTuple=returnTuple) del (self.V1.cdmsKeywords['time']) else: # Automatically gets the maximum common time d2 = self.V2(returnTuple=returnTuple) if returnTuple: t = d2[0].getTime() if not t is None: t = t.asComponentTime() else: t = d2.getTime() if not t is None: t = t.asComponentTime() if not t is None: self.V1.cdmsKeywords['time'] = (t[0], t[-1]) d1 = self.V1(returnTuple=returnTuple) if returnTuple: t1 = d1[0].getTime() if not t1 is None: t1 = t1.asComponentTime() else: t1 = d1.getTime() if not t1 is None: t1 = t1.asComponentTime() if not t1 is None: autotime = [t1[0], t1[-1], 'ccb'] if cdtime.compare(t1[0], t[0]) == -1: autotime[0] = t[0] if cdtime.compare(t1[-1], t[-1]) == 1: autotime[1] = t[-1] self.V1.cdmsKeywords['time'] = autotime d1 = self.V1(returnTuple=returnTuple) if not t1 is None: del (self.V1.cdmsKeywords['time']) self.V2.cdmsKeywords['time'] = autotime d2 = self.V2(returnTuple=returnTuple) del (self.V2.cdmsKeywords['time']) elif not self.V2.cdmsKeywords.has_key('time'): d1 = self.V1(returnTuple=returnTuple) if returnTuple: t = d1[0].getTime().asComponentTime() else: t = d1.getTime().asComponentTime() if not t is None: self.V2.cdmsKeywords['time'] = (t[0], t[-1]) d2 = self.V2(returnTuple=returnTuple) if not t is None: del (self.V2.cdmsKeywords['time']) # Now get the variableConditioners 1 and 2 if necessary if d1 is None: d1 = self.V1(returnTuple=returnTuple) if d2 is None: d2 = self.V2(returnTuple=returnTuple) if returnTuple: # break the output if necessary frc2 = d2[1] d2 = d2[0] frc1 = d1[1] d1 = d1[0] frc1 = MV2.array(frc1) frc2 = MV2.array(frc2) else: frc1 = MV2.ones(d1.shape, typecode=MV2.float32) frc2 = MV2.ones(d2.shape, typecode=MV2.float32) frc1.setAxisList(d1.getAxisList()) frc2.setAxisList(d2.getAxisList()) ## # Gets the common time period, only if time keyword isn't defined ## if not(d1.getTime() is None) and not (d2.getTime() is None): ## if len(d1.getTime())!=len(d2.getTime()) and not self.V1.cdmsKeywords.has_key('time') and not self.V2.cdmsKeywords.has_key('time'): ## t1=d1.getTime().asComponentTime() ## t2=d2.getTime().asComponentTime() ## t=[t1[0],t1[-1]] ## if cdtime.compare(t1[0],t2[0])<0: ## t[0]=t2[0] ## if cdtime.compare(t1[-1],t2[-1])>0: ## t[1]=t2[-1] ## d1 = d1 (time=(t[0],t[1])) ## frc1 = frc1(time=(t[0],t[1])) ## d2 = d2 (time=(t[0],t[1])) ## frc2 = frc2(time=(t[0],t[1])) ## # remember the number of element in d1 to see if we add non dummy dimensions ## nd1=MV2.count(d1) ## nd2=MV2.count(d2) ## # Now tries to grow extra dims (like dummy levels, etc...) ## o1=d1.getOrder(ids=1) ## o2=d2.getOrder(ids=1) if d1.shape != d2.shape: if d1.rank() > d2.rank(): d1, d2 = genutil.grower(d1, d2, singleton=1) frc1, frc2 = genutil.grower(frc1, frc2, singleton=1) else: d2, d1 = genutil.grower(d2, d1, singleton=1) frc2, frc1 = genutil.grower(frc2, frc1, singleton=1) # External variableConditioner ? if not self.EV is None: ed = None if not self.EV.cdmsKeywords.has_key('time'): t = d1.getTime().asComponentTime() if not t is None: self.EV.cdmsKeywords['time'] = (t[0], t[-1]) ed = self.EV(returnTuple=1) frced = ed[1] ed = ed[0] frced = MV2.array(frced) frced.setAxisList(ed.getAxisList()) ## # Gets the common time between d1 and ed ## if not t is None: del(self.EV.cdmsKeywords['time']) ## if (not ed.getTime() is None) and (not d1.getTime() is None): ## if (len(ed.getTime())!=len(d1.getTime())): ## t1=d1.getTime().asComponentTime() ## t2=ed.getTime().asComponentTime() ## t=[t1[0],t1[-1]] ## if cdtime.compare(t1[0],t2[0])<0: ## t[0]=t2[0] ## if cdtime.compare(t1[-1],t2[-1])>0: ## t[1]=t2[-1] ## d1 = d1 (time=(t[0],t[1])) ## d2 = d2 (time=(t[0],t[1])) ## ed = ed (time=(t[0],t[1])) ## frc1 = frc1(time=(t[0],t[1])) ## frc2 = frc2(time=(t[0],t[1])) ## frced = wed(time=(t[0],t[1])) if ed is None: ed = self.EV(returnTuple=1) frced = ed[1] ed = ed[0] frced = MV2.array(frced) frced.setAxisList(ed.getAxisList()) g = ed.getGrid() g1 = d1.getGrid() rf = regrid2.Regridder(g1, g) d1, frc1 = rf(d1, mask=1. - frc1.filled(0.), returnTuple=1) g2 = d2.getGrid() rf = regrid2.Regridder(g2, g) d2, frc2 = rf(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) d1, ed = genutil.grower(d1, ed, singleton=1) d2, ed = genutil.grower(d2, ed, singleton=1) ed, frced = genutil.grower(ed, frced, singleton=1) frc1 = numpy.ma.where(numpy.ma.equal(frc1.filled(0.), 0.), 0., frced.filled(0.)) frc2 = numpy.ma.where(numpy.ma.equal(frc2.filled(0.), 0.), 0., frced.filled(0.)) d1 = MV2.masked_where(MV2.equal(frc1.filled(0.), 0.), d1) d2 = MV2.masked_where(MV2.equal(frc2.filled(0.), 0.), d2) # Final grid ? g = self.weightedGridMaker() if not g is None: g1 = d1.getGrid() g2 = d2.getGrid() rf1 = regrid2.Regridder(g1, g) rf2 = regrid2.Regridder(g2, g) d1, frc1 = rf1(d1, mask=1. - frc1.filled(0.), returnTuple=1) ## m=1.-frc2.filled(0.) d2, frc2 = rf2(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) m = self.weightedGridMaker.weightsMaker(d1) if not m is None: d1, m = genutil.grower(d1, m) frc1, m = genutil.grower(frc1, m) frc1 = m.filled(0.) d1 = MV2.masked_where(MV2.equal(frc1, 0.), d1) m = d1.mask if not m is None: frc1 = numpy.where(m, 0., frc1) m = self.weightedGridMaker.weightsMaker(d2) if not m is None: d2, m = genutil.grower(d2, m) frc2, m = genutil.grower(frc2, m) frc2 = m.filled(0.) d2 = MV2.masked_where(MV2.equal(frc2, 0.), d2) m = d2.mask if not m is numpy.ma.nomask: frc2 = numpy.where(m, 0., frc2) elif d1.getGrid() != d2.getGrid(): g1 = d1.getGrid() g2 = d2.getGrid() rf = regrid2.Regridder(g2, g1) d2, frc2 = rf(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) # CdmsArguments or CdmsKeywords if not self.cdmsArguments is None: d1 = d1(*self.cdmsArguments) d2 = d2(*self.cdmsArguments) frc1 = frc1(*self.cdmsArguments) frc2 = frc2(*self.cdmsArguments) if not self.cdmsKeywords is None: d1 = d1(**self.cdmsKeywords) d2 = d2(**self.cdmsKeywords) frc1 = frc1(**self.cdmsKeywords) frc2 = frc2(**self.cdmsKeywords) d1 = MV2.masked_where(MV2.equal(frc1, 0.), d1) d2 = MV2.masked_where(MV2.equal(frc2, 0.), d2) if not ((d1.mask is None) or (d1.mask is MV2.nomask)): if numpy.ma.allclose(d1.mask, 0.): d1._mask = numpy.ma.nomask if not ((d2.mask is None) or (d2.mask is MV2.nomask)): if numpy.ma.allclose(d2.mask, 0.): d2._mask = numpy.ma.nomask if returnTuple: if not ((frc1.mask is None) or (frc1.mask is MV2.nomask)): if numpy.ma.allclose(frc1.mask, 0.): frc1._mask = numpy.ma.nomask if not ((frc2.mask is None) or (frc2.mask is MV2.nomask)): if numpy.ma.allclose(frc2.mask, 0.): frc2._mask = numpy.ma.nomask return (d1, frc1), (d2, frc2) else: return d1, d2
def get(self, returnTuple=1): # Ok now the tough part try to figure out everything for the user... # overwrite the defintion for the variableConditioners cdmsArguments if self.cdmsArguments != []: setattr(self.V1, 'cdmsArguments', self.cdmsArguments) setattr(self.V2, 'cdmsArguments', self.cdmsArguments) if self.EV is not None: setattr(self.EV, 'cdmsArguments', self.cdmsArguments) # overwrite the defintion for the variableConditioners cdmsKeyowrds for k in list(self.cdmsKeywords.keys()): self.V1.cdmsKeywords[k] = self.cdmsKeywords[k] self.V2.cdmsKeywords[k] = self.cdmsKeywords[k] if self.EV is not None: self.EV.cdmsKeywords[k] = self.cdmsKeywords[k] # Checks the time: # 2003-9-15: Added options if both var don't have time then still works d1 = None d2 = None frc1 = None frc2 = None autotime = None if 'time' not in self.V1.cdmsKeywords: if 'time' in self.V2.cdmsKeywords: d2 = self.V2(returnTuple=returnTuple) if returnTuple: t = d2[0].getTime().asComponentTime() else: t = d2.getTime().asComponentTime() self.V1.cdmsKeywords['time'] = (t[0], t[-1]) d1 = self.V1(returnTuple=returnTuple) del (self.V1.cdmsKeywords['time']) else: # Automatically gets the maximum common time d2 = self.V2(returnTuple=returnTuple) if returnTuple: t = d2[0].getTime() if t is not None: t = t.asComponentTime() else: t = d2.getTime() if t is not None: t = t.asComponentTime() if t is not None: self.V1.cdmsKeywords['time'] = (t[0], t[-1]) d1 = self.V1(returnTuple=returnTuple) if returnTuple: t1 = d1[0].getTime() if t1 is not None: t1 = t1.asComponentTime() else: t1 = d1.getTime() if t1 is not None: t1 = t1.asComponentTime() if t1 is not None: autotime = [t1[0], t1[-1], 'ccb'] if cdtime.compare(t1[0], t[0]) == -1: autotime[0] = t[0] if cdtime.compare(t1[-1], t[-1]) == 1: autotime[1] = t[-1] self.V1.cdmsKeywords['time'] = autotime d1 = self.V1(returnTuple=returnTuple) if t1 is not None: del (self.V1.cdmsKeywords['time']) self.V2.cdmsKeywords['time'] = autotime d2 = self.V2(returnTuple=returnTuple) del (self.V2.cdmsKeywords['time']) elif 'time' not in self.V2.cdmsKeywords: d1 = self.V1(returnTuple=returnTuple) if returnTuple: t = d1[0].getTime().asComponentTime() else: t = d1.getTime().asComponentTime() if t is not None: self.V2.cdmsKeywords['time'] = (t[0], t[-1]) d2 = self.V2(returnTuple=returnTuple) if t is not None: del (self.V2.cdmsKeywords['time']) # Now get the variableConditioners 1 and 2 if necessary if d1 is None: d1 = self.V1(returnTuple=returnTuple) if d2 is None: d2 = self.V2(returnTuple=returnTuple) if returnTuple: # break the output if necessary frc2 = d2[1] d2 = d2[0] frc1 = d1[1] d1 = d1[0] frc1 = MV2.array(frc1) frc2 = MV2.array(frc2) else: frc1 = MV2.ones(d1.shape, typecode=MV2.float32) frc2 = MV2.ones(d2.shape, typecode=MV2.float32) frc1.setAxisList(d1.getAxisList()) frc2.setAxisList(d2.getAxisList()) if d1.shape != d2.shape: if d1.ndim > d2.ndim: d1, d2 = genutil.grower(d1, d2, singleton=1) frc1, frc2 = genutil.grower(frc1, frc2, singleton=1) else: d2, d1 = genutil.grower(d2, d1, singleton=1) frc2, frc1 = genutil.grower(frc2, frc1, singleton=1) # External variableConditioner ? if self.EV is not None: ed = None if 'time' not in self.EV.cdmsKeywords: t = d1.getTime().asComponentTime() if t is not None: self.EV.cdmsKeywords['time'] = (t[0], t[-1]) ed = self.EV(returnTuple=1) frced = ed[1] ed = ed[0] frced = MV2.array(frced) frced.setAxisList(ed.getAxisList()) if ed is None: ed = self.EV(returnTuple=1) frced = ed[1] ed = ed[0] frced = MV2.array(frced) frced.setAxisList(ed.getAxisList()) g = ed.getGrid() g1 = d1.getGrid() rf = regrid2.Horizontal(g1, g) d1, frc1 = rf(d1, mask=1. - frc1.filled(0.), returnTuple=1) g2 = d2.getGrid() rf = regrid2.Horizontal(g2, g) d2, frc2 = rf(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) d1, ed = genutil.grower(d1, ed, singleton=1) d2, ed = genutil.grower(d2, ed, singleton=1) ed, frced = genutil.grower(ed, frced, singleton=1) frc1 = numpy.ma.where(numpy.ma.equal(frc1.filled(0.), 0.), 0., frced.filled(0.)) frc2 = numpy.ma.where(numpy.ma.equal(frc2.filled(0.), 0.), 0., frced.filled(0.)) d1 = MV2.masked_where(MV2.equal(frc1.filled(0.), 0.), d1) d2 = MV2.masked_where(MV2.equal(frc2.filled(0.), 0.), d2) # Final grid ? g = self.weightedGridMaker() if g is not None: g1 = d1.getGrid() g2 = d2.getGrid() rf1 = regrid2.Horizontal(g1, g) rf2 = regrid2.Horizontal(g2, g) d1, frc1 = rf1(d1, mask=1. - frc1.filled(0.), returnTuple=1) # m=1.-frc2.filled(0.) d2, frc2 = rf2(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) m = self.weightedGridMaker.weightsMaker(d1) if m is not None: d1, m = genutil.grower(d1, m) frc1, m = genutil.grower(frc1, m) frc1 = m.filled(0.) d1 = MV2.masked_where(MV2.equal(frc1, 0.), d1) m = d1.mask if m is not None: frc1 = numpy.where(m, 0., frc1) m = self.weightedGridMaker.weightsMaker(d2) if m is not None: d2, m = genutil.grower(d2, m) frc2, m = genutil.grower(frc2, m) frc2 = m.filled(0.) d2 = MV2.masked_where(MV2.equal(frc2, 0.), d2) m = d2.mask if m is not numpy.ma.nomask: frc2 = numpy.where(m, 0., frc2) elif d1.getGrid() != d2.getGrid(): g1 = d1.getGrid() g2 = d2.getGrid() rf = regrid2.Horizontal(g2, g1) d2, frc2 = rf(d2, mask=1. - frc2.filled(0.), returnTuple=1) frc1 = MV2.array(frc1) frc1.setAxisList(d1.getAxisList()) frc2 = MV2.array(frc2) frc2.setAxisList(d2.getAxisList()) # CdmsArguments or CdmsKeywords if self.cdmsArguments is not None: d1 = d1(*self.cdmsArguments) d2 = d2(*self.cdmsArguments) frc1 = frc1(*self.cdmsArguments) frc2 = frc2(*self.cdmsArguments) if self.cdmsKeywords is not None: d1 = d1(**self.cdmsKeywords) d2 = d2(**self.cdmsKeywords) frc1 = frc1(**self.cdmsKeywords) frc2 = frc2(**self.cdmsKeywords) d1 = MV2.masked_where(MV2.equal(frc1, 0.), d1) d2 = MV2.masked_where(MV2.equal(frc2, 0.), d2) if not ((d1.mask is None) or (d1.mask is MV2.nomask)): if numpy.ma.allclose(d1.mask, 0.): d1._mask = numpy.ma.nomask if not ((d2.mask is None) or (d2.mask is MV2.nomask)): if numpy.ma.allclose(d2.mask, 0.): d2._mask = numpy.ma.nomask if returnTuple: if not ((frc1.mask is None) or (frc1.mask is MV2.nomask)): if numpy.ma.allclose(frc1.mask, 0.): frc1._mask = numpy.ma.nomask if not ((frc2.mask is None) or (frc2.mask is MV2.nomask)): if numpy.ma.allclose(frc2.mask, 0.): frc2._mask = numpy.ma.nomask return (d1, frc1), (d2, frc2) else: return d1, d2