def compare_40_Interim(month,var): import netCDF4 import numpy as np import subroutine import datetime as dt a=subroutine.read_meta_data('var') filename=subroutine.celldata(a,'var',var,'ORAS4_fn') ncfile40=subroutine.dat_dir()+'ORAS4/NetCDF/ERA-40_1989.nc' ncfileInterim=subroutine.dat_dir()+'ORAS4/NetCDF/ERA-Interim_1989.nc' nc40=netCDF4.Dataset(ncfile40,'r') ncInterim=netCDF4.Dataset(ncfileInterim,'r') if filename=='tp': # 1988年以前の降水量のデータは、convectiveとlargescaleの和で算出する data40=nc40.variables['lsp'][month-1,:,:]+nc40.variables['cp'][month-1,:,:] dataInterim=ncInterim.variables['lsp'][month-1,:,:]+ncInterim.variables['cp'][month-1,:,:] else: data40=nc40.variables[filename][month-1,:,:] dataInterim=ncInterim.variables[filename][month-1,:,:] # add_offset=nc.variables[filename].add_offset # scale_factor=nc.variables[filename].scale_factor # data=data*scale_factor+add_offset # print 'scale_factor=',scale_factor # print 'add_offset=',add_offset nc40.close() ncInterim.close() return data40[::-1,:],dataInterim[::-1,:]
def __init__(self): ncfile=subroutine.dat_dir()+'ESTOCver02/NetCDF/k7oda_tmp_20010100_02b.nc' nc=netCDF4.Dataset(ncfile,'r') xgrid=nc.variables['LONGITUDE'][:] ygrid=nc.variables['LATITUDE'][:] zgrid=nc.variables['DEPTH'][:] self.nx=xgrid.size self.ny=ygrid.size self.nz=zgrid.size self.fy=1957 self.ncdir=subroutine.dat_dir()+'ESTOCver02/NetCDF/'
def get_data(year,month,var): import netCDF4 import numpy as np import subroutine import datetime as dt a=subroutine.read_meta_data('var') filename=subroutine.celldata(a,'var',var,'ERA_fn') if year <= 1967: ncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1958-1967.nc' elif year >= 1968 and year <= 1977: ncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1968-1977.nc' elif year >= 1978 and year<=1988: ncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1978-1988.nc' else: ncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-Interim.nc' nc=netCDF4.Dataset(ncfile,'r') time=nc.variables['time'][:] units=nc.variables['time'].units dtime=netCDF4.num2date(time,units=units) for i in range(0,dtime.size): if getattr(dtime[i],'year')==year and getattr(dtime[i],'month')==month: if year <=1988 and filename=='tp': # 1988年以前の降水量のデータは、convectiveとlargescaleの和で算出する data=nc.variables['lsp'][i,:,:]+nc.variables['cp'][i,:,:] elif year<=1988 and filename=='sff': data=(nc.variables['e'][i,:,:]+nc.variables['ro'][i,:,:]+\ nc.variables['lsp'][i,:,:]+nc.variables['cp'][i,:,:])*2 # 何故か2倍することで丁度いい elif filename=='sff': # 淡水フラックス data=nc.variables['e'][i,:,:]+nc.variables['ro'][i,:,:]+\ nc.variables['tp'][i,:,:] elif filename=='q': # 熱フラックス data=nc.variables['sshf'][i,:,:]+nc.variables['slhf'][i,:,:]+\ nc.variables['ssr'][i,:,:]+nc.variables['str'][i,:,:] else: data=nc.variables[filename][i,:,:] # print i,dtime[i] # add_offset=nc.variables[filename].add_offset # scale_factor=nc.variables[filename].scale_factor # data=data*scale_factor+add_offset # print 'scale_factor=',scale_factor # print 'add_offset=',add_offset if filename=='sff': # 何故か100倍することでオーダーが合う data=data*100 break nc.close() return data[::-1,:]
def nc_read(year,month,var,depth): # 1地点につき1つのデータしかないものに関してはdepthに何を入れても構わない。 # 46層あるデータに関しては、depth=1とすると、5メートル水深でのデータが得られるようにする。 import netCDF4 import numpy as np import subroutine import scipy as sp a=subroutine.read_meta_data('var') filename=subroutine.celldata(a,'var',var,'ORAS4_fn') ncfile=subroutine.dat_dir()+'ORAS4/NetCDF/'+filename+'_oras4_1m_'+\ str(year)+'_grid_1x1.nc' # print ncfile nc=netCDF4.Dataset(ncfile,'r') if var=='ht': # 海面高度の時だけ、データが2次元になる。 data=nc.variables[filename][month-1,:,:] else: # それ以外のデータは3次元。 if depth != 0: data=nc.variables[filename][month-1,depth-1,:,:] else: data=np.ones((ny,nx,nz)) for k in range(0,nz): data[:,:,k]=nc.variables[filename][month-1,k,:,:] nc.close() data[np.where(data>=10000)]=sp.nan # 欠損値のところはnanに。 return data
def get_grid_value(var): import netCDF4 import subroutine import numpy as np ncfile=subroutine.dat_dir()+'ERA/NetCDF/ERA-40_1958-1967.nc' nc=netCDF4.Dataset(ncfile,'r') xgrid=nc.variables['longitude'][:] ygrid=nc.variables['latitude'][:] nc.close() return [xgrid,ygrid[::-1],np.array([0])]
def get_grid_value(var): import netCDF4 import subroutine ncfile=subroutine.dat_dir()+'MOAA_GPV/NetCDF/TS_200101_GLB.nc' nc=netCDF4.Dataset(ncfile,'r') zgrid=nc.variables['PRES'][:] xgrid=nc.variables['LONGITUDE'][:] ygrid=nc.variables['LATITUDE'][:] nc.close() return [xgrid,ygrid,zgrid]
def get_grid_value(var): import netCDF4 import subroutine import numpy as np ncfile = subroutine.dat_dir() + "COBE-SST/sst.mon.mean.nc" nc = netCDF4.Dataset(ncfile, "r") xgrid = nc.variables["lon"][:] ygrid = nc.variables["lat"][:] nc.close() return [xgrid, ygrid, np.array([0])]
def get_grid_value(var): import netCDF4 import subroutine import numpy as np stryear,strmonth=subroutine.strym(2012,1) ncfile=subroutine.dat_dir()+'Aquarius/NetCDF/sss'+stryear+strmonth+'.v4.0cap.nc' nc=netCDF4.Dataset(ncfile,'r') xgrid=nc.variables['lon'][:] ygrid=nc.variables['lat'][:] nc.close() return [xgrid,ygrid,np.array([0])]
def get_grid_value(var): import netCDF4 import subroutine import numpy as np ncfile=subroutine.dat_dir()+'HadISST/HadISST_sst.nc' nc=netCDF4.Dataset(ncfile,'r') xgrid=nc.variables['longitude'][:] xgrid=np.r_[xgrid[xgrid.size/2:],360+xgrid[:xgrid.size/2]] ygrid=nc.variables['latitude'][:] nc.close() return [xgrid,ygrid[::-1],np.array([0])]
def get_grid_value(var): import netCDF4 import subroutine a=subroutine.read_meta_data('var') fn=subroutine.celldata(a,'var',var,'WOA01_fn') vn=subroutine.celldata(a,'var',var,'WOA01_vn') ncfile=subroutine.dat_dir()+'WOA01/'+filen+'.nc' nc=netCDF4.Dataset(ncfile,'r') var_p=nc.variables['Z'][:] xgrid=nc.variables['X'][:] ygrid=nc.variables['Y'][:] nc.close() return [xgrid,ygrid,var_p]
def nc_read(year,month): import netCDF4 import subroutine import scipy as sp import numpy as np Threshold=-998.0 stryear,strmonth=subroutine.strym(year,month) ncfile=subroutine.dat_dir()+'ERSST/ersst.'+stryear+strmonth+'.nc' nc=netCDF4.Dataset(ncfile,'r') the_sst=nc.variables['sst'][0,0,:,:] the_sst[np.where(the_sst<=Threshold)]=sp.nan # 欠損値のところはnanに。 nc.close() return the_sst
def get_grid_value(var): # 水温とか塩分であれば0を、流速であれば1を与えてやる file_name='vel' if var=='u' or var=='v' or var=='ustar' or var=='vstar' else 'tmp' ncfile=subroutine.dat_dir()+'ESTOCver02/NetCDF/k7oda_'+file_name+'_20010100_02b.nc' nc=netCDF4.Dataset(ncfile,'r') var_p=nc.variables['DEPTH'][:] xgrid=nc.variables['LONGITUDE'][:] ygrid=nc.variables['LATITUDE'][:] nc.close() if var=='w': var_h=np.zeros(var_p.size) for i in range(var_p.size): var_h[i]=2*var_p[i] if i==0 else 2*var_p[i]-var_h[i-1] var_p=var_h return [xgrid,ygrid,var_p]
def nc_read(year,month): import netCDF4 import subroutine import scipy as sp import numpy as np Threshold=-998.0 time_n=(year-1870)*12+(month-1) ncfile=subroutine.dat_dir()+'HadISST/HadISST_sst.nc' nc=netCDF4.Dataset(ncfile,'r') the_sst=nc.variables['sst'][time_n,:,:] the_sst=np.c_[the_sst[:,the_sst.shape[1]/2:],the_sst[:,:the_sst.shape[1]/2]] nc.close() the_sst[np.where(the_sst<=Threshold)]=sp.nan # 欠損値のところはnanに。 return the_sst[::-1,:]
def get_grid_value(var): import netCDF4 import subroutine import numpy as np a=subroutine.read_meta_data('var') fn=subroutine.celldata(a,'var',var,'RG_fn') ncfile=subroutine.dat_dir()+'Roemmich_Gilson/NetCDF/RG_ArgoClim_'+fn+'_2015.nc' nc=netCDF4.Dataset(ncfile,'r') zgrid=nc.variables['PRESSURE'][:] xgrid=nc.variables['LONGITUDE'][:] xgrid=np.r_[-360+xgrid[340:],xgrid[:340]] ygrid=nc.variables['LATITUDE'][:] nc.close() return [xgrid,ygrid,zgrid]
def nc_read(year, month): import netCDF4 import subroutine import scipy as sp import numpy as np Threshold = 998.0 time_n = (year - 1891) * 12 + (month - 1) ncfile = subroutine.dat_dir() + "COBE-SST/sst.mon.mean.nc" nc = netCDF4.Dataset(ncfile, "r") the_sst = nc.variables["sst"][time_n, :, :] nc.close() the_sst[np.where(the_sst >= Threshold)] = sp.nan # 欠損値のところはnanに。 return the_sst
def call_jamstecDMI(tres): # tres=0でweekly,tres=1でmonthly。 dtd = subroutine.dat_dir() fd = open(dtd + 'ESTOCver03/binary/DMI/dmi.monthly_JAMSTEC.out', 'r') chunk = np.fromfile(fd, dtype = np.dtype([("data", "<" + str(1585) + "f")]), count = 2) ym_convert = chunk[0]['data'] data = chunk[1]['data'] if tres == 0: return ym_convert, data elif tres == 1: ym = ts.ym_timeseries(1982, 2011) tt = interpolate.interp1d(ym_convert, data) Interpolated = tt(ym) return ym, Interpolated else: raise Exception('error! your tres is not valid!')
def get_data(year,month): import subroutine import netCDF4 dir=subroutine.dat_dir()+'AQC/NetCDF/' stryear,strmonth=subroutine.strym(year,month) ncfile=dir+'AQC_Profile_Data_'+stryear+strmonth+'.nc' nc=netCDF4.Dataset(ncfile,'r') time=nc.variables['TIME'][:,:] N_PROF=time.shape[0] STRING16=time.shape[1] pres=nc.variables['PRES'][:,:] N_LEVELS=pres.shape[1] temp=nc.variables['TEMP'][:] salt=nc.variables['PSAL'][:] lon=nc.variables['LONGITUDE'][:] lat=nc.variables['LATITUDE'][:] return temp,salt,pres,lon,lat
def get_data(year,month): import subroutine import netCDF4 import numpy as np import scipy as sp # ファイル名を指定 stryear,strmonth=subroutine.strym(year,month) ncfile=subroutine.dat_dir()+'Aquarius/NetCDF/sss'+stryear+strmonth+'.v4.0cap.nc' print ncfile # ファイル名を指定 nc=netCDF4.Dataset(ncfile,'r') lon=nc.variables['lon'][:] lon=nc.variables['lon'][:] data=nc.variables['sss_cap'][:,:] data[np.where(data<=-999)]=sp.nan # 欠損値のところはnanに。 return data
def masuda_data_read(year,month,var,depth): Mt=60*60*24*30 # 1ヶ月の秒数 Threshold=-1e+30 # Threshold of Missing value data_dir=subroutine.dat_dir()+'ESTOCver03/binary/each_data/' ID = Var.var_to_id(var) dt=np.dtype([("data","<"+str(nx*ny)+"f")]) stryear,strmonth=subroutine.strym(year,month) fd=open(data_dir+"/"+stryear+strmonth+"/"+var+".out","r") dim = Var.VAR[ID].Get_dim() if dim == '3D': count=ik else: count=1 if count == 1 and depth != 1: raise ValueError('if you choose 2D data, depth must be 1!') chunk=np.fromfile(fd,dtype=dt,count=count) if depth != 0: data=chunk[depth-1]['data'] data=np.reshape(data,(ny,nx)) else: data=np.ones((ny,nx,ik)) for k in range(0,ik): data[:,:,k]=np.reshape(chunk[k]['data'],(ny,nx)) data[np.where(data<=Threshold)]=np.nan # 欠損値のところはnanに。 # 単位に関してはすでに修正済み。 # taux,tauy:N/m^2 # u,v,w,sff:m/s # q:cal/(m^2 s) if var=='sff':# 単位をm/Monthに data=data*Mt elif var=='ustar' or var=='vstar' or var=='wstar': data=data / 100.0 # 単位をm/sに elif var=='eq': # 淡水フラックスと同じ次元で扱えるように data=data*-Mt*0.01 return data
def nc_read(month,var,depth): import numpy as np import netCDF4 import subroutine a=subroutine.read_meta_data('var') fn=subroutine.celldata(a,'var',var,'WOA01_fn') vn=subroutine.celldata(a,'var',var,'WOA01_vn') ncfile=subroutine.dat_dir()+'WOA01/'+filen+'.nc' nc=netCDF4.Dataset(ncfile,'r') if depth != 0: data=nc.variables[vn][month-1,depth-1,:,:] else: x,y,z=get_grid_value() nz,nx,ny=z.size,x.size,y.size data=np.ones((ny,nx,nz)) for k in range(0,nz): data_2D=nc.variables[vn][month-1,k,:,:] data[:,:,k]=np.reshape(data_2D,(ny,nx)) nc.close() return data
def nc_read(year,month,var,depth): import numpy as np import scipy as sp import netCDF4 import subroutine a=subroutine.read_meta_data('var') vn=subroutine.celldata(a,'var',var,'MOAA_GPV_vn') stryear,strmonth=subroutine.strym(year,month) ncfile=subroutine.dat_dir()+'MOAA_GPV/NetCDF/TS_'+stryear+strmonth+'_GLB.nc' nc=netCDF4.Dataset(ncfile,'r') if depth != 0: data=nc.variables[vn][depth-1,:,:] else: data=np.ones((ny,nx,nz)) for k in range(0,nz): data[:,:,k]=nc.variables[vn][k,:,:] nc.close() data[np.where(data>=10000)]=sp.nan # 欠損値のところはnanに。 return data
def call_productDMI(product_n): # fortranで作成したDMIを呼び出す pro_name, title_name, bin_dirname = subroutine.product_n_to_name(product_n) dtd = subroutine.dat_dir() f = open(dtd + '/' + pro_name + '/' + bin_dirname + '/DMI/fy.dat', 'r') fy = int(f.read()) f.close() f = open(dtd + '/' + pro_name + '/' + bin_dirname + '/DMI/ly.dat', 'r') ly = int(f.read()) f.close() fd = open(dtd + '/' + pro_name + '/' + bin_dirname + '/DMI/dmi.monthly_ESTOC.out', 'r') chunk = np.fromfile(fd, dtype = np.dtype([("data", "<" + str((ly - fy + 1)*12) + "f")]), count = 4) ym_convert = chunk[0]['data'] fy = ym_convert[0] ly = int(ym_convert[ym_convert.size - 1]) ym = ts.ym_timeseries02(fy, ly) data = chunk[1]['data'] data_rm = chunk[2]['data'] data_rm_tr = chunk[3]['data'] return ym, data, data_rm, data_rm_tr
def get_grid_value(var): import netCDF4 import subroutine import numpy as np if var=='w': filename='thetao' else: a=subroutine.read_meta_data('var') filename=subroutine.celldata(a,'var',var,'ORAS4_fn') ncfile=subroutine.dat_dir()+'ORAS4/NetCDF/'+filename+'_oras4_1m_1958_grid_1x1.nc' nc=netCDF4.Dataset(ncfile,'r') var_p=nc.variables['depth'][:] xgrid=nc.variables['lon'][:] ygrid=nc.variables['lat'][:] nc.close() if var=='w': var_h=np.zeros(var_p.size) for i in range(var_p.size): var_h[i]=2*var_p[i] if i==0 else 2*var_p[i]-var_h[i-1] var_p=var_h return [xgrid,ygrid,var_p]
def nc_read(year,month,var,depth): import numpy as np import scipy as sp import netCDF4 import subroutine Threshold=-999 a=subroutine.read_meta_data('var') fn=subroutine.celldata(a,'var',var,'RG_fn') ncfile=subroutine.dat_dir()+'Roemmich_Gilson/NetCDF/RG_ArgoClim_'+fn+'_2015.nc' nc=netCDF4.Dataset(ncfile,'r') vn1=subroutine.celldata(a,'var',var,'RG_vn1') vn2=subroutine.celldata(a,'var',var,'RG_vn2') time=(year-2004)*12+month-1 # 2004年1月であれば、time=0、2014年12月であれば、time=131 if depth != 0: meandata=nc.variables[vn1][depth-1,:,:] meandata[np.where(meandata<=Threshold)]=sp.nan # 欠損値のところはnanに。 anomdata=nc.variables[vn2][time,depth-1,:,:] anomdata[np.where(anomdata<=Threshold)]=sp.nan # 欠損値のところはnanに。 data=meandata+anomdata data=np.c_[data[:,340:],data[:,:340]] else: data=np.ones((ny,nx,nz)) meandata=nc.variables[vn1][:,:,:] meandata[np.where(meandata<=Threshold)]=sp.nan # 欠損値のところはnanに。 anomdata=nc.variables[vn2][time,:,:,:] anomdata[np.where(anomdata<=Threshold)]=sp.nan # 欠損値のところはnanに。 ndata=meandata+anomdata ndata=np.c_[ndata[:,:,340:],ndata[:,:,:340]] for k in range(0,nz): data[:,:,k]=ndata[k,:,:] nc.close() return data
def get_data(var,lon,lat,time_res): import subroutine import timeseries as ts import netCDF4 import numpy as np import scipy as sp dir=subroutine.dat_dir()+'RAMA/NetCDF/' # ファイル名を指定 if lat>=0.0: strlat=str(lat)+'n' else: strlat=str(-lat)+'s' strlon=str(lon)+'e' if var=='t': filevar='t' floatvar='T_20' elif var=='s': filevar='s' floatvar='S_41' elif var=='u': filevar='adcp' floatvar='u_1205' elif var=='v': filevar='adcp' floatvar='v_1206' elif var=='ssu': filevar='cur' floatvar='U_320' elif var=='ssv': filevar='cur' floatvar='V_321' else: raise Exception('your var is not valid!') if time_res=='D': # Dailyのデータ f_name_tr='dy' elif time_res=="M": # Monthlyのデータ f_name_tr='mon' else: raise Exception('your time_res in not valid!') ncfile=dir+filevar+strlat+strlon+'_'+f_name_tr+'.cdf' print ncfile # ファイル名を指定 nc=netCDF4.Dataset(ncfile,'r') time=nc.variables['time'][:] units=nc.variables['time'].units dtime=netCDF4.num2date(time,units=units) lon=nc.variables['lon'][:] lat=nc.variables['lat'][:] depth=nc.variables['depth'][:] data=nc.variables[floatvar][:,:,0,0] data[np.where(data>=10000)]=sp.nan # 欠損値のところはnanに。 if time_res=="M": # 期間を2001年1月から2016年12月まで fy=2001 ly=2016 all_data=np.zeros([(ly-fy+1)*12,depth.size]) all_dtime=ts.ym_timeseries02(fy,ly) nmin=max(max(np.where(all_dtime==dtime[0]))) # データの開始する番号 nmax=max(max(np.where(all_dtime==dtime[dtime.size-1]))) # データが終了する番号 all_data[nmin:nmax+1,:]=data[:,:] all_data[:nmin,:]=sp.nan all_data[nmax+1:,:]=sp.nan dtime=all_dtime data=all_data if var=='u' or var=='v' or var=='ssu' or var=='ssv':data=data*0.01 # 単位をm/sに return dtime,depth,data