def testMaskingFunctions(self): xouter = MV2.outerproduct(MV2.arange(5.), [1] * 10) masked = MV2.masked_greater(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[2:], True)) self.assertTrue(MV2.allequal(masked.mask[:2], False)) masked = MV2.masked_greater_equal(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[1:], True)) self.assertTrue(MV2.allequal(masked.mask[:1], False)) masked = MV2.masked_less(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[:1], True)) self.assertTrue(MV2.allequal(masked.mask[1:], False)) masked = MV2.masked_less_equal(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[:2], True)) self.assertTrue(MV2.allequal(masked.mask[2:], False)) masked = MV2.masked_not_equal(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[1], False)) self.assertTrue(MV2.allequal(masked.mask[0], True)) self.assertTrue(MV2.allequal(masked.mask[2:], True)) masked = MV2.masked_equal(xouter, 1) self.assertTrue(MV2.allequal(masked.mask[1], True)) self.assertTrue(MV2.allequal(masked.mask[0], False)) self.assertTrue(MV2.allequal(masked.mask[2:], False)) masked = MV2.masked_outside(xouter, 1, 3) self.assertTrue(MV2.allequal(masked.mask[0:1], True)) self.assertTrue(MV2.allequal(masked.mask[1:4], False)) self.assertTrue(MV2.allequal(masked.mask[4:], True)) masked = MV2.masked_where( MV2.logical_or(MV2.greater(xouter, 3), MV2.less(xouter, 2)), xouter) self.assertTrue(MV2.allequal(masked.mask[0:2], True)) self.assertTrue(MV2.allequal(masked.mask[2:4], False)) self.assertTrue(MV2.allequal(masked.mask[4:], True))
def bony_sorting_part1(w500, binedges): A, B, C = w500.shape dx = np.diff(binedges)[0] # Compute composite: OKwaps = nanarray( (A, B, C, 2 + len(binedges))) # add 2 for the exceedances xx = 0 for x in binedges: xx += 1 w500_bin = MV.masked_less(w500, x) OKwaps[..., xx] = MV.masked_greater_equal(w500_bin, x + dx) # do the first wap bin: OKwaps[..., 0] = MV.masked_greater_equal(w500, binedges[0]) # do the last wap bin: OKwaps[..., -1] = MV.masked_less(w500, binedges[-1] + dx) return OKwaps # [month,lat,lon,wapbin]
lat = uf.getLatitude() lon = uf.getLongitude() xouter.setAxis(0, lat) xouter.setAxis(1, lon) xouter.setAxisList([lat, lon]) # Equivalent ## masked_equal(x, value) ## masked_equal(x, value) = x masked where x == value ## For floating point consider masked_values(x, value) instead. ## masked_greater(x, value) ## masked_greater(x, value) = x masked where x > value ## masked_greater_equal(x, value) ## masked_greater_equal(x, value) = x masked where x >= value xge = MV2.masked_greater_equal(xouter, 120) ## masked_less(x, value) ## masked_less(x, value) = x masked where x < value xl = MV2.masked_less(xouter, 160) ## masked_less_equal(x, value) ## masked_less_equal(x, value) = x masked where x <= value ## masked_not_equal(x, value) ## masked_not_equal(x, value) = x masked where x != value ## masked_outside(x, v1, v2) ## x with mask of all values of x that are outside [v1,v2] xmo = MV2.masked_outside(xouter, 120, 160)
def createVarSubdivisionShpNCfile(varid, cfile, shpfile, outpath, **kwarg): """ createVarSubdivisionShpNCfile : create variable of subdivison shaped using shp file as one of the input and store into nc file. args: varid : variable name of input file cfile : input climate netcdf file path shpfile : shpfile path which has proper subdivison level map outpath : output directory kwargs: preserve_original_data_shape_in_all_subregions : True | False (default). If it is True, then all subdivison regions of input variable will have same shape of full region but masked fully (lat, lon) other than subdivison / state boundary region. Useful to retain the full data shape but keep only subdivison region data alone. If it is False, then all subdivison regions of input variable will be store with its corresponding lat, lon shape. latitude : range of latitude to be retain in the output in case of preserve_original_data_shape_in_all_subregions is True. longitude : range of longitude to be retain in the output in case of preserve_original_data_shape_in_all_subregions is True. output : It create nc file named as varid + '_subdivisions.nc'. This function works well for the input shape of (time, latitude, longitude). Author : Arulalan.T ([email protected]), Research Scholar, CAS, IITD Date : 21-Apr-2017 """ # export the environment variable path of shape file shpfname = shpfile.split('/')[-1].split('.')[0 ] shppath = shpfile.split(shpfname)[0] ocgis.env.DIR_GEOMCABINET = shppath preserve_original_data_shape_in_all_subregions = kwarg.get('preserve_original_data_shape_in_all_subregions', False) lat = kwarg.get('latitude', None) lon = kwarg.get('longitude', None) outpath = '.' if outpath is None else outpath outfile_sequentialnumbers = varid + '_filledwith_sequentialnumbers.nc' shpdata_outfile = os.path.join(outpath, varid + '_subdivisions.nc') # open the nc file via uvcdat inf = cdms2.open(cfile) sdata = inf(varid, time=slice(1)) tottimeax = inf[varid].getTime() # get the shape of one time step, lat, lon for dummy sequential numbers dshape = sdata.shape[-2:] dlen = dshape[0] * dshape[1] oshape = (len(tottimeax), dshape[0], dshape[1]) dshape = (1, dshape[0], dshape[1]) # create dummy sequential numbers numbers = numpy.arange(1, dlen+1, 1).reshape(dshape) numbers = cdms2.createVariable(numbers, id=varid) timeax = sdata.getTime() taxis = cdms2.createAxis(numpy.array([0]), id='time') taxis.units = timeax.units taxis.designateTime() # set all axis numbers.setAxisList([taxis, sdata.getLatitude(), sdata.getLongitude()]) # store into temporary nc file of sequential numbers. outf = cdms2.open(outfile_sequentialnumbers, 'w') outf.write(numbers) outf.close() # repeat nos for time dimension tnumbers = numbers.data.repeat(len(tottimeax), 0) # get sequential numbers via ocgis rd_sequentialnumbers = ocgis.RequestDataset(outfile_sequentialnumbers, variable=varid) # get actual data via ocgis rd_data = ocgis.RequestDataset(cfile, variable=varid) print "Hold on ... it may take some time ..." # get list of state bounded numpy arrays of sequential numbers state_sequentialnumbers = ocgis.OcgOperations(dataset=rd_sequentialnumbers, spatial_operation='intersects', #'clip', aggregate=False, agg_selection=False, allow_empty=True, snippet=True, geom=shpfname, output_format='numpy').execute() # get list of state bounded numpy arrays of actual climate data state_data = ocgis.OcgOperations(dataset=rd_data, spatial_operation='intersects',# 'clip', aggregate=False, agg_selection=False, allow_empty=True, snippet=False, geom=shpfname, output_format='numpy').execute() # lets store the output here outf = cdms2.open(shpdata_outfile, 'w') outf.write(sdata(latitude=lat, longitude=lon)) # store sample data full spatial for idxx, state in enumerate(state_sequentialnumbers): # lets loop through all the state boundaries idx = idxx + 1 # get the sequential numbers of particular state boundary state_seq_val = state_sequentialnumbers[idx][varid].variables[varid].value # get the actual climate data of particular state boundary state_data_val = state_data[idx][varid].variables[varid].value.squeeze() # find start and end row of particular state boundary start_row = numpy.where(numbers==state_seq_val.min())[1][0] end_row = numpy.where(numbers==state_seq_val.max())[1][0] # find start and end coloum of particular state boundary start_col = numpy.where(numbers==state_seq_val[0][0][0][:,0].min())[2][0] end_col = numpy.where(numbers==state_seq_val[0][0][0][:,-1].max())[2][0] if preserve_original_data_shape_in_all_subregions: # get the mask of particular state boundary from sequential numbers result = MV2.masked_greater_equal(tnumbers, 0) result[:, start_row: end_row+1, start_col:end_col+1] = state_data_val result = result.reshape(oshape) # store the clipped actual data into original shapped data by masking other grid points result = cdms2.createVariable(result, id=varid+'_'+str(idx)) result.setAxisList([tottimeax, sdata.getLatitude(), sdata.getLongitude()]) if lat and lon: result = result(latitude=lat, longitude=lon) else: # store the clipped shaped data as it is into nc file with its lat, lon latax = sdata.getLatitude()[start_row: end_row+1] latax = cdms2.createAxis(latax, id='latitude'+str(idx)) latax.designateLatitude() lonax = sdata.getLongitude()[start_col:end_col+1] lonax = cdms2.createAxis(lonax, id='longitude'+str(idx)) lonax.designateLongitude() stshp = state_data_val.shape state_data_val = state_data_val.reshape((len(tottimeax), stshp[-2], stshp[-1])) result = cdms2.createVariable(state_data_val, id=varid+'_'+str(idx)) result.setAxisList([tottimeax, latax, lonax]) outf.write(result) print "stored stateboundary data", idx, "shaped : ", result.shape # end of for idx,state in enumerate(path): outf.close() inf.close() print "Stored the input data with boundaries of shape file into : ", shpdata_outfile os.remove(outfile_sequentialnumbers)
# daily and monthly-average global radiation, Solar Energy 28(4), pp 293-302, 1982. Eq. 1 kt = np.zeros([24,lat_num,lon_num]) kt[swtdn_tmp != 0.] = (swgdn_tmp[swtdn_tmp != 0.]/swtdn_tmp[swtdn_tmp != 0.]) kt[kt<0.] = 0. df = np.zeros([24,lat_num,lon_num]) # error1 df = np.where( kt<= 0.22, 1 - 0.09 * kt, df) df = np.where((kt > 0.22) & (kt <= 0.8), 0.9511 - 0.1604*kt + 4.388*kt**2 - 16.638*kt**3 + 12.336*kt**4, df) df = np.where( kt > 0.8, 0.165, df) # df = np.where( kt== 0.0, 1.0, df) # where no TOA SW, all diffuse flux dhi = df * swgdn_tmp # diffuse radiation dni = (swgdn_tmp - dhi) # direct radiation for hr_idx in range(24): zenith, solar_azi, ha = cal_solar_angles(lat, lon, days_ord, hr_idx) # All in radius mask1 = MV.filled(MV.masked_equal(swtdn_tmp[hr_idx],0)*0+1,0) mask2 = MV.filled(MV.masked_greater_equal(zenith,np.pi/2)*0+1,0) # Based on Braun and Mitchell, 1983 # Solar geometry for fixed and tracking surface, Solar Energy, 1983 incidence_rad, panel_tilt_rad = cal_incidence_angles(zenith, solar_azi, tilt_pv, azim_pv, 'h') cosine_zenith = np.cos(zenith)* mask1*mask2 cosine_incide = np.cos(incidence_rad) * mask1*mask2 adjust_factor_dni = replace_nan(cosine_incide / cosine_zenith) adjust_factor_dni[(adjust_factor_dni<1.)]=1. # Adjust dni and dhi based on Pfenninger and Staffell, 2016[3] # Long-term patterns of European PV output using 30 years of validated hourly reanalysis and satellite data, Energy, 2016 # The calculation of adjuated direct sunlight is corrected dni_adjust = dni[hr_idx] * adjust_factor_dni dhi_adjust = dhi[hr_idx]*(1+np.cos(panel_tilt_rad))/2. + 0.3*(dni[hr_idx]+dhi[hr_idx])*(1-np.cos(panel_tilt_rad))/2. rad_adjust = replace_nan(dni_adjust + dhi_adjust)
print('lat/lon number error') sys.exit kt = np.zeros([24, lat_num, lon_num]) kt[swtdn_tmp != 0.] = (swgdn_tmp[swtdn_tmp != 0.] / swtdn_tmp[swtdn_tmp != 0.]) kt[kt < 0.] = 0. for hr_idx in range(24): zenith, solar_azi, ha = cal_solar_angles(lat, lon, int(year), month, days, hr_idx, days_ord) incidence_rad, panel_tilt_rad = cal_incidence_angles( zenith, solar_azi, tilt_pv, azim_pv, 'h') mask1 = MV.filled(MV.masked_equal(swtdn_tmp[hr_idx], 0) * 0 + 1, 0) mask2 = MV.filled( MV.masked_greater_equal(zenith, np.pi / 2) * 0 + 1, 0) cosine_zenith = np.cos(zenith) * mask1 * mask2 #cosine_zenith[(cosine_zenith>0)&(cosine_zenith<0.087)] = 0.087 ### do we want it here? cosine_incide = np.cos(incidence_rad) * mask1 * mask2 adjust_factor_dni = replace_inf( replace_nan(cosine_incide / cosine_zenith)) adjust_factor_dni[(adjust_factor_dni < 1.)] = 1. maximum_index = np.argwhere( swgdn_tmp[hr_idx] == np.max(swgdn_tmp[hr_idx])) base = swgdn_tmp[hr_idx][maximum_index[0, 0]][maximum_index[ 0, 1]] / swtdn_tmp[hr_idx][maximum_index[0, 0]][maximum_index[0, 1]] cons = swtdn_tmp[hr_idx][maximum_index[0, 0]][maximum_index[0, 1]] potential_max_solar = np.zeros([lat_num, lon_num])
lat = uf.getLatitude() lon = uf.getLongitude() xouter.setAxis(0,lat) xouter.setAxis(1,lon) xouter.setAxisList([lat,lon]) # Equivalent ## masked_equal(x, value) ## masked_equal(x, value) = x masked where x == value ## For floating point consider masked_values(x, value) instead. ## masked_greater(x, value) ## masked_greater(x, value) = x masked where x > value ## masked_greater_equal(x, value) ## masked_greater_equal(x, value) = x masked where x >= value xge = MV2.masked_greater_equal(xouter, 120) ## masked_less(x, value) ## masked_less(x, value) = x masked where x < value xl = MV2.masked_less(xouter, 160) ## masked_less_equal(x, value) ## masked_less_equal(x, value) = x masked where x <= value ## masked_not_equal(x, value) ## masked_not_equal(x, value) = x masked where x != value ## masked_outside(x, v1, v2) ## x with mask of all values of x that are outside [v1,v2] xmo = MV2.masked_outside(xouter, 120, 160)