def __init__(self,tmod,ymod,tobs,yobs, interpmodel=True, **kwargs): """ Inputs: tmod,tobs - vector of datetime object ymod,yobs - vector of values interpmodel - [default: True] interp the model onto the observations Keywords: long_name: string containing variable's name (used for plotting) units: string containing variable's units (used for plotting) Note that tmod and tobs don't need to be the same length. yobs is linearly interpolated onto tmod. """ self.__dict__.update(**kwargs) # Set the range inclusive of both observation and model result if isinstance(tmod,list): time0 = max(tmod[0],tobs[0]) time1 = min(tmod[-1],tobs[-1]) elif isinstance(tmod[0], np.datetime64): time0 = max(tmod[0],tobs[0]) time1 = min(tmod[-1],tobs[-1]) if time1 < time0: print 'Error - the two datasets have no overlapping period.' return None # Clip both the model and observation to this daterange t0m = othertime.findNearest(time0,tmod) t1m = othertime.findNearest(time1,tmod) TSmod = timeseries(tmod[t0m:t1m],ymod[...,t0m:t1m], **kwargs) t0 = othertime.findNearest(time0,tobs) t1 = othertime.findNearest(time1,tobs) TSobs = timeseries(tobs[t0:t1],yobs[...,t0:t1], **kwargs) # Interpolate the observed value onto the model step #tobs_i, yobs_i = TSobs.interp(tmod[t0:t1],axis=0) #self.TSobs = timeseries(tobs_i, yobs_i) # Interpolate the modeled value onto the observation time step if interpmodel: tmod_i, ymod_i = TSmod.interp(tobs[t0:t1],axis=-1,method='nearestmask') #self.TSmod = timeseries(tmod_i,ymod_i, **kwargs) self.TSmod = timeseries(tobs[t0:t1], ymod_i, **kwargs) self.TSobs = TSobs else: tobs_i, yobs_i = TSobs.interp(tmod[t0m:t1m],axis=-1,method='nearestmask') #self.TSobs = timeseries(tobs_i,yobs_i, **kwargs) self.TSobs = timeseries(tmod[t0m:t1m], yobs_i, **kwargs) self.TSmod = TSmod self.N = self.TSmod.t.shape[0] if self.N==0: print 'Error - zero model points detected' return None # Compute the error self.error = self.TSmod.y - self.TSobs.y self.calcStats() # Calculate the data limits self._calc_data_lims()
def init_kdv_inversion(ds, depthfile, infile, t1, t2, mode, basetime=datetime(2016, 1, 1)): """ Initialise the boundary conditions and the vKdV class for performing boundary condition inversion (optimization) calculations """ # Get the time series of A(t) A_obs = ds['A_n'].sel(time=slice(t1, t2), modes=mode) # Get the density at the start of the time rho = ds['rhobar'].sel(timeslow=t1, method='nearest') # Load the depth data depthtxt = np.loadtxt(depthfile, delimiter=',') z = np.arange(-depthtxt[0, 1], 5, 5)[::-1] # Get the density model parameters from the density profile iw = imodes.IWaveModes(rho.values[::-1], rho.z.values[::-1], \ density_class=density.FitDensity, density_func='double_tanh') iw(-250, 5, mode) density_params = iw.Fi.f0 rhonew = density.double_tanh_rho(z, *density_params) # Launch a KdV instance mykdv = start_kdv(infile, rhonew, z, depthfile) # Find the observation location with open(infile, 'r') as f: args = yaml.load(f) xpt = args['runtime']['xpt'] # Find the index of the output point xpt = np.argwhere(mykdv.x > xpt)[0][0] # Compute the travel time and the wave amplification factor ampfac = 1 / np.sqrt(mykdv.Qterm) twave = np.cumsum(1 / mykdv.c1 * mykdv.dx) # Compute the phase and amplitude of the signal At = timeseries(A_obs.time.values, A_obs.values) amp, phs, frq, _, Afit, _ = At.tidefit(frqnames=['M2', 'M4', 'M6'], basetime=basetime) # Now we need to scale the amplitude and phase for the boundary (linear inversion) phs_bc = 1 * phs amp_bc = 1 * amp for ii in range(3): phs_bc[ii] = phs[ii] - twave[xpt] * frq[ii] amp_bc[ii] = amp[ii] / ampfac[xpt] amp_re = amp_bc * np.cos(phs_bc) amp_im = amp_bc * np.sin(phs_bc) # Set the time in the model to correspond with the phase of the boundary forcing ## Start time: round up to the near 12 hours from the wave propagation time plus the ramp time ramptime = 6 * 3600. bctime = myround(twave[xpt] + ramptime) starttime = datetime.strptime(t1, '%Y-%m-%d %H:%M:%S') endtime = datetime.strptime(t2, '%Y-%m-%d %H:%M:%S') starttime_sec = (starttime - basetime).total_seconds() runtime = (endtime - starttime).total_seconds() #twave[xpt]+ramptime, bctime, runtime, starttime_sec # Testing only #ds2 = run_vkdv( np.hstack([amp_re,amp_im]), frq, starttime_sec-bctime, runtime+bctime, # mykdv, infile, verbose=False, ramptime=ramptime) # Input variables for the vKdV run # a0, frq, t0, runtime, a0 = np.hstack([amp_re, amp_im]) t0 = starttime_sec - bctime runtime = runtime + bctime return mykdv, At, a0, frq, t0, runtime, density_params, twave[xpt], ampfac[ xpt]
def calc_isopycnal_discharge(ncfile,xpt,ypt,saltbins,tstart,tend,scalarvar='salt'): """ Calculates the discharge as a function of salinity along a transect, defined by xpt/ypt, in the suntans model Returns a dictionary with the relevant variables """ nbins = saltbins.shape[0] # Load the slice object and extract the data SE = MultiSliceEdge(ncfile,xpt=xpt,ypt=ypt) # if SE==None: # SE = SliceEdge(ncfile,xpt=xpt,ypt=ypt) # SE.tstep = range(SE.Nt) # else: # SE.update_xy(xpt,ypt) # SE.tstep = SE.getTstep(tstart,tend) print 'Loading the salt flux data...' #s_F_all= SE.loadData(variable='s_F') s_F_all= SE.loadData(variable=scalarvar) print 'Loading the flux data...' Q_all = SE.loadData(variable='U_F') def Q_S_flux(salt,Q,saltbins,normal): # mask sure masked values are zeroed #s_F[s_F.mask]=0 #Q[Q.mask]=0 Q = Q*normal Nt,Nk,Ne = Q.shape #salt = np.abs(s_F)/np.abs(Q) #salt[np.isnan(salt)]=0 Ns = saltbins.shape[0] ds = np.diff(saltbins).mean() ### # Calculate Q(s,x) ### # Create an arrayo #Nt = len(SE.tstep) #ne = len(SE.j) # number of edges jindex = np.arange(0,Ne) jindex = np.repeat(jindex[np.newaxis,np.newaxis,:],Nt,axis=0) jindex = np.repeat(jindex,SE.Nkmax,axis=1) # Groups the salt matrix into bins sindex = np.searchsorted(saltbins,salt) sindex[sindex>=Ns]=Ns-1 #tindex = np.arange(0,Nt) #tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1) #tindex = np.repeat(tindex,SE.Nkmax,axis=1) # Calculate the salt flux for each time step Qs = np.zeros((Nt,Ns,Ne))# #Fs = np.zeros((Nt,Ne))# #dQds = np.zeros((Nt,Ns,Ne))# put salt in last dimension for easy multiplication for tt in range(Nt): # Create an array output array Q_S # This sums duplicate elements Q_S = sparse.coo_matrix((Q[tt,...].ravel(),\ (sindex[tt,...].ravel(),jindex[tt,...].ravel())),\ shape=(Ns,Ne)).todense() Qs[tt,...] = np.array(Q_S)#/Nt # Units m^3/s #### ## THIS IS WRONG DON'T USE #### ## Compute the gradient (this gives the same result after ## integration) #dQ_ds, dQ_de = np.gradient(Qs[tt,...],ds,1) ##dQtmp = -1*np.array(dQ_ds).T #dQds[tt,...] = -1*dQ_ds # #Fs[tt,:] = np.sum(-1*dQds[tt,...].T*saltbins ,axis=-1) output = {'time':SE.time[SE.tstep],'saltbins':saltbins,\ 'Qs':Qs} #'dQds':dQds,'Qs':Qs,'Fs':Fs} return output def Q_S_flux_old(s_F,Q,saltbins,x,normal,area,dz): # mask sure masked values are zeroed #s_F[s_F.mask]=0 #Q[Q.mask]=0 Q = Q*normal Nt,Nk,ne = Q.shape salt = np.abs(s_F)/np.abs(Q) salt[np.isnan(salt)]=0 # Calculate the mean Q Qbar = np.sum( np.sum(Q,axis=-1),axis=0)/Nt ### # Calculate Q(s,x) ### # Create an arrayo #Nt = len(SE.tstep) #ne = len(SE.j) # number of edges jindex = np.arange(0,ne) jindex = np.repeat(jindex[np.newaxis,np.newaxis,:],Nt,axis=0) jindex = np.repeat(jindex,SE.Nkmax,axis=1) # Groups the salt matrix into bins sindex = np.searchsorted(saltbins,salt) sindex[sindex>=nbins]=nbins-1 # Create an array output array Q_S # This sums duplicate elements Q_S_x = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),jindex.ravel())),\ shape=(nbins,ne)).todense() Q_S_x = np.array(Q_S_x)#/Nt # Units m^3/s ### # Calculate Q(s,t) ### # Create an arrayo tindex = np.arange(0,Nt) tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1) tindex = np.repeat(tindex,SE.Nkmax,axis=1) # Create an array output array Q_S # This sums duplicate elements Q_S_t = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),tindex.ravel())),\ shape=(nbins,Nt)).todense() Q_S_t = np.array(Q_S_t)#/ne # Units m^3/s ### # Calculate Q(s) ### Q_S = np.bincount(sindex.ravel(),weights=Q.ravel(),minlength=nbins) ### # Calculate the gradients with respect to S ### ds = np.diff(saltbins).mean() dsdt_inv = 1./(ds*Nt) saltbins = 0.5*(saltbins[1:] + saltbins[0:-1]) # Units are: [m^3 s^-1 psu^-1] dQ_S_x = np.diff(Q_S_x,axis=0) * dsdt_inv dQ_S_t = np.diff(Q_S_t,axis=0) * dsdt_inv dQ_S = np.diff(Q_S,axis=0) * dsdt_inv ### # Now integrate to calculate the flux terms # See Macready 2011 eq. 3 and 4 ### ind_in = dQ_S>=0 ind_out = dQ_S<0 Fin = np.sum(saltbins[ind_in] * -dQ_S[ind_in]*ds) Fout = np.sum(saltbins[ind_out] * -dQ_S[ind_out]*ds) Qin = np.sum(-dQ_S[ind_in]*ds) Qout = np.sum(-dQ_S[ind_out]*ds) # Put all of the relevant variables into a dictionary output = {'x':x,'time':SE.time[SE.tstep],'saltbins':saltbins,\ 'dQ_S':dQ_S,'dQ_S_x':dQ_S_x,'dQ_S_t':dQ_S_t,\ 'F_in':Fin,'F_out':Fout,'Q_in':Qin,'Q_out':Qout} return output,Qbar output =[] outputf =[] print 'Calculating slice fluxes...' ii=-1 Qbar = np.zeros((len(Q_all),SE.Nkmax)) for s_F,Q in zip(s_F_all,Q_all): ii+=1 x = SE.slices[ii]['distslice'][1:] normal = SE.slices[ii]['normal'] area = SE.slices[ii]['area'] dx = SE.slices[ii]['dx'] #tmp,Qbar[ii,:] = Q_S_flux_old(s_F,Q,saltbins,x,normal,area,SE.dz) tmp = Q_S_flux(s_F,Q,saltbins,normal) output.append(tmp) ## Also calculate the filtered TS = timeseries(SE.time[SE.tstep],s_F) TS.godinfilt() #s_F_filt = TS.y.reshape(s_F.shape) s_F_filt = TS.y.T TS = timeseries(SE.time[SE.tstep],Q) TS.godinfilt() #Q_filt = TS.y.reshape(Q.shape) Q_filt = TS.y.T tmp = Q_S_flux(s_F_filt,Q_filt,saltbins,normal) #TS = pd.Panel(s_F, items=SE.time[SE.tstep]) #TSf = mpd.godin(TS) #pdb.set_trace() outputf.append(tmp) # Calculate the eulerian mean #Sbar = SE.mean(s_F_all,axis='depth') #Qbar = SE.mean(Q_all,axis='depth') #z = SE.z_r return output,outputf, SE
def calc_isopycnal_discharge(ncfile, xpt, ypt, saltbins, tstart, tend, scalarvar='salt'): """ Calculates the discharge as a function of salinity along a transect, defined by xpt/ypt, in the suntans model Returns a dictionary with the relevant variables """ nbins = saltbins.shape[0] # Load the slice object and extract the data SE = MultiSliceEdge(ncfile, xpt=xpt, ypt=ypt) # if SE==None: # SE = SliceEdge(ncfile,xpt=xpt,ypt=ypt) # SE.tstep = range(SE.Nt) # else: # SE.update_xy(xpt,ypt) # SE.tstep = SE.getTstep(tstart, tend) print('Loading the salt flux data...') #s_F_all= SE.loadData(variable='s_F') s_F_all = SE.loadData(variable=scalarvar) print('Loading the flux data...') Q_all = SE.loadData(variable='U_F') def Q_S_flux(salt, Q, saltbins, normal): # mask sure masked values are zeroed #s_F[s_F.mask]=0 #Q[Q.mask]=0 Q = Q * normal Nt, Nk, Ne = Q.shape #salt = np.abs(s_F)/np.abs(Q) #salt[np.isnan(salt)]=0 Ns = saltbins.shape[0] ds = np.diff(saltbins).mean() ### # Calculate Q(s,x) ### # Create an arrayo #Nt = len(SE.tstep) #ne = len(SE.j) # number of edges jindex = np.arange(0, Ne) jindex = np.repeat(jindex[np.newaxis, np.newaxis, :], Nt, axis=0) jindex = np.repeat(jindex, SE.Nkmax, axis=1) # Groups the salt matrix into bins sindex = np.searchsorted(saltbins, salt) sindex[sindex >= Ns] = Ns - 1 #tindex = np.arange(0,Nt) #tindex = np.repeat(tindex[:,np.newaxis,np.newaxis],ne,axis=-1) #tindex = np.repeat(tindex,SE.Nkmax,axis=1) # Calculate the salt flux for each time step Qs = np.zeros((Nt, Ns, Ne)) # #Fs = np.zeros((Nt,Ne))# #dQds = np.zeros((Nt,Ns,Ne))# put salt in last dimension for easy multiplication for tt in range(Nt): # Create an array output array Q_S # This sums duplicate elements Q_S = sparse.coo_matrix((Q[tt,...].ravel(),\ (sindex[tt,...].ravel(),jindex[tt,...].ravel())),\ shape=(Ns,Ne)).todense() Qs[tt, ...] = np.array(Q_S) #/Nt # Units m^3/s #### ## THIS IS WRONG DON'T USE #### ## Compute the gradient (this gives the same result after ## integration) #dQ_ds, dQ_de = np.gradient(Qs[tt,...],ds,1) ##dQtmp = -1*np.array(dQ_ds).T #dQds[tt,...] = -1*dQ_ds # #Fs[tt,:] = np.sum(-1*dQds[tt,...].T*saltbins ,axis=-1) output = {'time':SE.time[SE.tstep],'saltbins':saltbins,\ 'Qs':Qs} #'dQds':dQds,'Qs':Qs,'Fs':Fs} return output def Q_S_flux_old(s_F, Q, saltbins, x, normal, area, dz): # mask sure masked values are zeroed #s_F[s_F.mask]=0 #Q[Q.mask]=0 Q = Q * normal Nt, Nk, ne = Q.shape salt = np.abs(s_F) / np.abs(Q) salt[np.isnan(salt)] = 0 # Calculate the mean Q Qbar = np.sum(np.sum(Q, axis=-1), axis=0) / Nt ### # Calculate Q(s,x) ### # Create an arrayo #Nt = len(SE.tstep) #ne = len(SE.j) # number of edges jindex = np.arange(0, ne) jindex = np.repeat(jindex[np.newaxis, np.newaxis, :], Nt, axis=0) jindex = np.repeat(jindex, SE.Nkmax, axis=1) # Groups the salt matrix into bins sindex = np.searchsorted(saltbins, salt) sindex[sindex >= nbins] = nbins - 1 # Create an array output array Q_S # This sums duplicate elements Q_S_x = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),jindex.ravel())),\ shape=(nbins,ne)).todense() Q_S_x = np.array(Q_S_x) #/Nt # Units m^3/s ### # Calculate Q(s,t) ### # Create an arrayo tindex = np.arange(0, Nt) tindex = np.repeat(tindex[:, np.newaxis, np.newaxis], ne, axis=-1) tindex = np.repeat(tindex, SE.Nkmax, axis=1) # Create an array output array Q_S # This sums duplicate elements Q_S_t = sparse.coo_matrix((Q.ravel(),(sindex.ravel(),tindex.ravel())),\ shape=(nbins,Nt)).todense() Q_S_t = np.array(Q_S_t) #/ne # Units m^3/s ### # Calculate Q(s) ### Q_S = np.bincount(sindex.ravel(), weights=Q.ravel(), minlength=nbins) ### # Calculate the gradients with respect to S ### ds = np.diff(saltbins).mean() dsdt_inv = 1. / (ds * Nt) saltbins = 0.5 * (saltbins[1:] + saltbins[0:-1]) # Units are: [m^3 s^-1 psu^-1] dQ_S_x = np.diff(Q_S_x, axis=0) * dsdt_inv dQ_S_t = np.diff(Q_S_t, axis=0) * dsdt_inv dQ_S = np.diff(Q_S, axis=0) * dsdt_inv ### # Now integrate to calculate the flux terms # See Macready 2011 eq. 3 and 4 ### ind_in = dQ_S >= 0 ind_out = dQ_S < 0 Fin = np.sum(saltbins[ind_in] * -dQ_S[ind_in] * ds) Fout = np.sum(saltbins[ind_out] * -dQ_S[ind_out] * ds) Qin = np.sum(-dQ_S[ind_in] * ds) Qout = np.sum(-dQ_S[ind_out] * ds) # Put all of the relevant variables into a dictionary output = {'x':x,'time':SE.time[SE.tstep],'saltbins':saltbins,\ 'dQ_S':dQ_S,'dQ_S_x':dQ_S_x,'dQ_S_t':dQ_S_t,\ 'F_in':Fin,'F_out':Fout,'Q_in':Qin,'Q_out':Qout} return output, Qbar output = [] outputf = [] print('Calculating slice fluxes...') ii = -1 Qbar = np.zeros((len(Q_all), SE.Nkmax)) for s_F, Q in zip(s_F_all, Q_all): ii += 1 x = SE.slices[ii]['distslice'][1:] normal = SE.slices[ii]['normal'] area = SE.slices[ii]['area'] dx = SE.slices[ii]['dx'] #tmp,Qbar[ii,:] = Q_S_flux_old(s_F,Q,saltbins,x,normal,area,SE.dz) tmp = Q_S_flux(s_F, Q, saltbins, normal) output.append(tmp) ## Also calculate the filtered TS = timeseries(SE.time[SE.tstep], s_F) TS.godinfilt() #s_F_filt = TS.y.reshape(s_F.shape) s_F_filt = TS.y.T TS = timeseries(SE.time[SE.tstep], Q) TS.godinfilt() #Q_filt = TS.y.reshape(Q.shape) Q_filt = TS.y.T tmp = Q_S_flux(s_F_filt, Q_filt, saltbins, normal) #TS = pd.Panel(s_F, items=SE.time[SE.tstep]) #TSf = mpd.godin(TS) #pdb.set_trace() outputf.append(tmp) # Calculate the eulerian mean #Sbar = SE.mean(s_F_all,axis='depth') #Qbar = SE.mean(Q_all,axis='depth') #z = SE.z_r return output, outputf, SE
def __init__(self, tmod, ymod, tobs, yobs, interpmodel=True, **kwargs): """ Inputs: tmod,tobs - vector of datetime object ymod,yobs - vector of values interpmodel - [default: True] interp the model onto the observations Keywords: long_name: string containing variable's name (used for plotting) units: string containing variable's units (used for plotting) Note that tmod and tobs don't need to be the same length. yobs is linearly interpolated onto tmod. """ self.__dict__.update(**kwargs) # Set the range inclusive of both observation and model result #if isinstance(tmod,list): # time0 = max(tmod[0],tobs[0]) # time1 = min(tmod[-1],tobs[-1]) #elif isinstance(tmod[0], np.datetime64): # time0 = max(tmod[0],tobs[0]) # time1 = min(tmod[-1],tobs[-1]) time0 = max(tmod[0], tobs[0]) time1 = min(tmod[-1], tobs[-1]) if time1 < time0: print('Error - the two datasets have no overlapping period.') return None if not (tmod.shape[0] == tobs.shape[0]) and\ not (tmod[0] == tobs[0]) and not (tmod[-1] == tobs[-1]) : # Clip both the model and observation to this daterange t0m = othertime.findNearest(time0, tmod) t1m = othertime.findNearest(time1, tmod) TSmod = timeseries(tmod[t0m:t1m], ymod[..., t0m:t1m], **kwargs) t0 = othertime.findNearest(time0, tobs) t1 = othertime.findNearest(time1, tobs) TSobs = timeseries(tobs[t0:t1], yobs[..., t0:t1], **kwargs) # Interpolate the observed value onto the model step #tobs_i, yobs_i = TSobs.interp(tmod[t0:t1],axis=0) #self.TSobs = timeseries(tobs_i, yobs_i) ## Don't interpolate if datasets are the same #if np.all(tobs==tmod): # self.TSobs = TSobs # self.TSmod = TSmod # Interpolate the modeled value onto the observation time step if interpmodel: tmod_i, ymod_i = TSmod.interp(tobs[t0:t1], axis=-1, method='nearestmask') #self.TSmod = timeseries(tmod_i,ymod_i, **kwargs) self.TSmod = timeseries(tobs[t0:t1], ymod_i, **kwargs) self.TSobs = TSobs else: tobs_i, yobs_i = TSobs.interp(tmod[t0m:t1m], axis=-1, method='nearestmask') #self.TSobs = timeseries(tobs_i,yobs_i, **kwargs) self.TSobs = timeseries(tmod[t0m:t1m], yobs_i, **kwargs) self.TSmod = TSmod else: self.TSmod = timeseries(tmod, ymod, **kwargs) self.TSobs = timeseries(tobs, yobs, **kwargs) ### Check the dimension sizes #print self.TSmod.y.shape, self.TSobs.y.shape #print self.TSmod.t.shape[0], self.TSobs.t.shape[0] assert self.TSmod.t.shape[0] == self.TSobs.t.shape[0],\ 'Number of time records not equal' assert self.TSmod.y.shape == self.TSobs.y.shape,\ 'Dimensions sizes not equal' self.N = self.TSmod.t.shape[0] if self.N == 0: print('Error - zero model points detected') return None # Compute the error self.error = self.TSmod.y - self.TSobs.y self.calcStats() # Calculate the data limits self._calc_data_lims()