Ejemplo n.º 1
1
def test_roundtrip():
    """Minimal conversion from simple_utide_test."""
    ts = 735604
    duration = 35

    time = np.linspace(ts, ts+duration, 842)
    tref = (time[-1] + time[0]) / 2

    const = ut_constants.const

    amp = 1.0
    phase = 53
    lat = 45.5

    freq_cpd = 24 * const.freq

    jj = 48-1  # Python index for M2

    arg = 2 * np.pi * (time - tref) * freq_cpd[jj] - np.deg2rad(phase)
    time_series = amp * np.cos(arg)

    opts = dict(constit='auto',
                phase='raw',
                nodal=False,
                trend=False,
                method='ols',
                conf_int='linear',
                Rayleigh_min=0.95,
                )

    speed_coef = solve(time, time_series, time_series, lat=lat, **opts)
    elev_coef = solve(time, time_series, lat=lat, **opts)

    amp_err = amp - elev_coef['A'][0]
    phase_err = phase - elev_coef['g'][0]
    ts_recon = reconstruct(time, elev_coef).h

    # pure smoke testing of reconstruct
    vel = reconstruct(time, speed_coef)
    vel = reconstruct(time, speed_coef, constit=('M2', 'S2'))
    htmp = reconstruct(time, elev_coef, constit=('M2', 'S2'))
    vel = reconstruct(time, speed_coef, min_SNR=3)
    htmp = reconstruct(time, elev_coef, min_SNR=3)
    vel = reconstruct(time, speed_coef, min_PE=10)
    htmp = reconstruct(time, elev_coef, min_PE=10)
    vel = reconstruct(time, speed_coef, min_SNR=0)
    htmp = reconstruct(time, elev_coef, min_SNR=0)


    # Now the round-trip check, just for the elevation.
    err = np.sqrt(np.mean((time_series-ts_recon)**2))

    print(amp_err, phase_err, err)
    print(elev_coef['aux']['reftime'], tref)
    print(elev_coef['aux']['opt'])

    np.testing.assert_almost_equal(amp_err, 0)
    np.testing.assert_almost_equal(phase_err, 0)
    np.testing.assert_almost_equal(err, 0)
Ejemplo n.º 2
0
def test_masked_input():
    """Masked values in time and/or time series."""
    ts = 735604
    duration = 35

    time = np.linspace(ts, ts+duration, 842)
    tref = (time[-1] + time[0]) / 2

    const = ut_constants.const

    amp = 1.0
    phase = 53
    lat = 45.5

    freq_cpd = 24 * const.freq

    jj = 48-1  # Python index for M2

    arg = 2 * np.pi * (time - tref) * freq_cpd[jj] - np.deg2rad(phase)
    time_series = amp * np.cos(arg)

    opts = {
        'constit': 'auto',
        'phase': 'raw',
        'nodal': False,
        'trend': False,
        'method': 'ols',
        'conf_int': 'linear',
        'Rayleigh_min': 0.95,
    }

    t = np.ma.array(time)
    t[[10, 15, 20, 21]] = np.ma.masked

    series = np.ma.array(time_series)
    series[[11, 17, 22, 25]] = np.ma.masked

    speed_coef = solve(t, series, series, lat=lat, **opts)
    elev_coef = solve(t, series, lat=lat, **opts)

    amp_err = amp - elev_coef['A'][0]
    phase_err = phase - elev_coef['g'][0]
    ts_recon = reconstruct(time, elev_coef).h
    assert isinstance(ts_recon, np.ndarray)

    # pure smoke testing of reconstruct
    vel = reconstruct(time, speed_coef)
    assert isinstance(vel, Bunch)

    elev = reconstruct(time, elev_coef)
    assert isinstance(elev, Bunch)

    np.testing.assert_almost_equal(amp_err, 0)
    np.testing.assert_almost_equal(phase_err, 0)
Ejemplo n.º 3
0
    def Harmonic_reconstruction(self,
                                harmo,
                                time_ind=slice(None),
                                debug=False,
                                **kwargs):
        """
        This function reconstructs the velocity components or the surface elevation
        from harmonic coefficients.
        Harmonic_reconstruction calls 'reconstruct'. This function assumes harmonics
        ('solve') has already been executed.

        Inputs:
          - Harmo = harmonic coefficient from harmo_analysis
          - elevation =True means that 'reconstruct' will be done for elevation.
          - velocity =True means that 'reconstruct' will be done for velocity.
          - time_ind = time indices to process, list of integers
        
        Output:
          - Reconstruct = reconstructed signal, dictionary

        Utide's options:
        Options are the same as for 'reconstruct', which are shown below with
        their default values:
            cnstit = [], minsnr = 2, minpe = 0

        *Notes*
        For more detailed information about 'reconstruct', please see
        https://github.com/wesleybowman/UTide
        """
        debug = (debug or self._debug)
        time = self._var.matlabTime[time_ind]
        #TR_comments: Add debug flag in Utide: debug=self._debug
        Reconstruct = reconstruct(time, harmo)

        return Reconstruct
Ejemplo n.º 4
0
    def Harmonic_reconstruction(self, harmo, time_ind=slice(None), debug=False, **kwarg):
        """
        This function reconstructs the velocity components or the surface elevation
        from harmonic coefficients.
        Harmonic_reconstruction calls 'reconstruct'. This function assumes harmonics
        ('solve') has already been executed.

        Inputs:
          - Harmo = harmonic coefficient from harmo_analysis
          - time_ind = time indices to process, list of integers
        
        Output:
          - Reconstruct = reconstructed signal, dictionary

        Options:
        Options are the same as for 'reconstruct', which are shown below with
        their default values:
            cnstit = [], minsnr = 2, minpe = 0

        *Notes*
        For more detailed information about 'reconstruct', please see
        https://github.com/wesleybowman/UTide

        """
        debug = (debug or self._debug)
        time = self._var.matlabTime[time_ind]
        #TR_comments: Add debug flag in Utide: debug=self._debug
        Reconstruct = reconstruct(time,harmo)

        return Reconstruct  
Ejemplo n.º 5
0
    def reconstr(self, harmo, time_ind=slice(None), **kwarg):
        """
        This function reconstructs the velocity components or the surface elevation
        from harmonic coefficients.
        Harmonic_reconstruction calls 'reconstruct'. This function assumes harmonics
        ('solve') has already been executed.

        Inputs:
          - Harmo = harmonic coefficient from harmo_analysis

        Output:
          - Reconstruct = reconstructed signal, dictionary

        Keywords:
          - time_ind = time indices to process, list of integers

        Utide's options:
        Options are the same as for 'reconstruct', which are shown below with
        their default values:
            cnstit = [], minsnr = 2, minpe = 0

        *Notes*
        For more detailed information about 'reconstruct', please see
        https://github.com/wesleybowman/UTide
        """
        time = self._var.matlabTime[time_ind]
        ts_recon = reconstruct(time, harmo, **kwarg)
        return ts_recon
def teknafeil(fig, canvas):
    fig.clf()
    ax = fig.add_subplot(111)
    tide = utide.reconstruct(data['dato'], coef)
    ax.plot(data['dato'], data['Dep'] - tide.h)
    canvas.draw()
    canvas.get_tk_widget().pack(fill=BOTH, expand=1)
Ejemplo n.º 7
0
    def skew_surge(self,mag='mag',args={'Minimum SNR':2,\
        'Latitude':-36.0}):
        #

        """ This function calculate the skew surge :
        see https://www.ntslf.org/storm-surges/skew-surges"""

        if hasattr(self.data,'latitude'):
            latitude=self.data.latitude
            if not self.data.latitude:
                latitude=args['Latitude']
        else:
            latitude=args['Latitude']

        xobs=self.data.index
        dt=(xobs[2]-xobs[1]).total_seconds()/3600. # in hours
        stime=np.array(date2num(xobs))
        lat=latitude
        ray=args['Minimum SNR']
        yobs = self.data[mag].values - np.nanmean(self.data[mag].values)
        opts = dict(method='ols',conf_int='linear', Rayleigh_min=ray)
        coef = solve(stime,yobs,lat= lat,**opts)


        min_time=xobs[0]
        max_time=xobs[-1]
        min_dt=15*60


        xpredi = pd.period_range(min_time, max_time,freq='%is'%min_dt)
        xpredi=xpredi.to_timestamp()

        if hasattr(self.data[mag],'short_name'):
            short_name=self.data[mag].short_name
        else:
            short_name=mag

        ypredi = reconstruct(np.array(date2num(xpredi)), coef).h


        pe,tr=peaks(ypredi)

        df_new=pd.DataFrame(index=xobs)
        skew=copy.deepcopy(yobs)
        skew[:]=np.nan
                
        for i in range(0,len(tr)-1):
            idx_pred=np.logical_and(xpredi>xpredi[tr[i]],xpredi<xpredi[tr[i+1]])
            idx_obs=np.logical_and(xobs>xpredi[tr[i]],xobs<xpredi[tr[i+1]])

            max_pre=np.max(ypredi[idx_pred])
            max_obs=np.max(yobs[idx_obs])
            max_obs_idx=np.argmax(yobs[idx_obs])
            skew[idx_obs.nonzero()[0][max_obs_idx]]=max_obs-max_pre

        df_new['skew_surge']=skew


        return df_new.dropna()
Ejemplo n.º 8
0
    def Harmonic_reconstruction(self, harmo, elevation=True, velocity=False,
                                time_ind=slice(None), debug=False, **kwarg):
        '''
        Description:
        ----------
        This function reconstructs the velocity components or the surface elevation
        from harmonic coefficients.
        Harmonic_reconstruction calls reconstruct. This function assumes harmonics
        (solve) has already been executed.

        Inputs:
        ------
          - Harmo = harmonic coefficient from harmo_analysis
          - elevation =True means that reconstruct will be done for elevation.
          - velocity =True means that ut_reconst will be done for velocity.
          - time_ind = time indices to process, list of integers
        
        Output:
        ------         
          - Reconstruct = reconstructed signal, dictionary

        Options:
        -------
        Options are the same as for reconstruct, which are shown below with
        their default values:
            cnstit = [], minsnr = 2, minpe = 0

        Notes:
        -----
        For more detailed information about reconstruct, please see
        https://github.com/wesleybowman/UTide

        '''
        debug = (debug or self._debug)
        time = self._var.matlabTime[time_ind]
        #TR_comments: Add debug flag in Utide: debug=self._debug
        Reconstruct = {}
        if velocity:
            U_recon, V_recon = reconstruct(time,harmo)
            Reconstruct['U'] = U_recon
            Reconstruct['V'] = V_recon
        if elevation:
            elev_recon, _ = reconstruct(time,harmo)
            Reconstruct['el'] = elev_recon
        return Reconstruct  
Ejemplo n.º 9
0
def test_roundtrip(conf_int):
    """Minimal conversion from simple_utide_test."""

    opts = {
        "constit": "auto",
        "phase": "raw",
        "nodal": False,
        "trend": False,
        "method": "ols",
        "conf_int": conf_int,
        "Rayleigh_min": 0.95,
        "epoch": "python",
    }

    speed_coef = solve(time, time_series, time_series, lat=lat, **opts)
    elev_coef = solve(time, time_series, lat=lat, **opts)

    amp_err = amp - elev_coef["A"][0]
    phase_err = phase - elev_coef["g"][0]
    ts_recon = reconstruct(time, elev_coef, epoch="python").h

    # pure smoke testing of reconstruct
    vel = reconstruct(time, speed_coef)
    vel = reconstruct(time, speed_coef, constit=("M2", "S2"))
    htmp = reconstruct(time, elev_coef, constit=("M2", "S2"))
    vel = reconstruct(time, speed_coef, min_SNR=3)
    htmp = reconstruct(time, elev_coef, min_SNR=3)
    vel = reconstruct(time, speed_coef, min_PE=10)
    htmp = reconstruct(time, elev_coef, min_PE=10)
    vel = reconstruct(time, speed_coef, min_SNR=0)
    htmp = reconstruct(time, elev_coef, min_SNR=0)
    assert isinstance(vel, Bunch)
    assert isinstance(htmp, Bunch)

    # Now the round-trip check, just for the elevation.
    err = np.sqrt(np.mean((tide - ts_recon)**2))

    print(amp_err, phase_err, err)
    print(elev_coef["aux"]["reftime"], tref)
    print(elev_coef["aux"]["opt"])

    np.testing.assert_almost_equal(amp_err, 0, decimal=4)
    np.testing.assert_almost_equal(phase_err, 0, decimal=4)
    np.testing.assert_almost_equal(err, 0, decimal=4)
Ejemplo n.º 10
0
    def Tidal_analysis(self, time_orig, time, WD_raw, u_raw, v_raw,
                       Rayleigh_min):
        coef_WD = utide.solve(
            time_orig,
            WD_raw,
            lat=53,
            nodal=False,
            trend=False,
            method='ols',
            conf_int='linear',
            Rayleigh_min=Rayleigh_min,
        )

        WD_predict = utide.reconstruct(time, coef_WD)

        coef_u = utide.solve(
            time_orig,
            u_raw,
            lat=53,
            nodal=False,
            trend=False,
            method='ols',
            conf_int='linear',
            Rayleigh_min=Rayleigh_min,
        )

        u_predict = utide.reconstruct(time, coef_u)

        coef_v = utide.solve(
            time_orig,
            v_raw,
            lat=53,
            nodal=False,
            trend=False,
            method='ols',
            conf_int='linear',
            Rayleigh_min=Rayleigh_min,
        )

        v_predict = utide.reconstruct(time, coef_v)

        clear_output()
        return WD_predict, u_predict, v_predict
Ejemplo n.º 11
0
    def utidePredict(self):
        '''U Tide Prediction processing'''

        input_dict2 = self.inputDict2()

        time_predic = input_dict2['predicted time']
        time_predic_num = date2num(time_predic.to_pydatetime())

        coef = self.utideAnalyse()
        msl = coef.mean
        predic = reconstruct(time_predic_num, coef, min_SNR=0)

        return {'prediction': predic, 'MSL': msl}
Ejemplo n.º 12
0
def test_solve(make_data):
    time, u, v = make_data
    coef = solve(time, u, v,
                 lat=-42.5,
                 nodal=False,
                 trend=False,
                 method='ols',
                 conf_int='linear',
                 Rayleigh_min=0.95,)
    assert isinstance(coef, Bunch)

    tide = reconstruct(time, coef)
    assert isinstance(tide, Bunch)
Ejemplo n.º 13
0
def test_masked_input():
    """Masked values in time and/or time series."""

    opts = {
        "constit": "auto",
        "phase": "raw",
        "nodal": False,
        "trend": False,
        "method": "ols",
        "conf_int": "linear",
        "Rayleigh_min": 0.95,
        "epoch": "python",
    }

    t = np.ma.array(time)
    t[[10, 15, 20, 21]] = np.ma.masked

    series = np.ma.array(time_series)
    series[[11, 17, 22, 25]] = np.ma.masked

    speed_coef = solve(t, series, series, lat=lat, **opts)
    elev_coef = solve(t, series, lat=lat, **opts)

    amp_err = amp - elev_coef["A"][0]
    phase_err = phase - elev_coef["g"][0]
    ts_recon = reconstruct(time, elev_coef).h
    assert isinstance(ts_recon, np.ndarray)

    # pure smoke testing of reconstruct
    vel = reconstruct(time, speed_coef)
    assert isinstance(vel, Bunch)

    elev = reconstruct(time, elev_coef)
    assert isinstance(elev, Bunch)

    np.testing.assert_almost_equal(amp_err, 0, decimal=4)
    np.testing.assert_almost_equal(phase_err, 0, decimal=4)
Ejemplo n.º 14
0
def test_roundtrip():
    """Minimal conversion from simple_utide_test."""
    ts = 735604
    duration = 35

    time = np.linspace(ts, ts+duration, 842)
    tref = (time[-1] + time[0]) / 2

    const = ut_constants.const

    amp = 1.0
    phase = 53
    lat = 45.5

    freq_cpd = 24 * const.freq

    jj = 48-1  # Python index for M2

    arg = 2 * np.pi * (time - tref) * freq_cpd[jj] - np.deg2rad(phase)
    time_series = amp * np.cos(arg)

    opts = dict(constit='auto',
                phase='raw',
                nodal=False,
                trend=False,
                method='ols',
                conf_int='linear',
                Rayleigh_min=0.95,
                )

    # speed_coef = solve(time, time_series, time_series, lat=lat, **opts)
    elev_coef = solve(time, time_series, lat=lat, **opts)

    amp_err = amp - elev_coef['A'][0]
    phase_err = phase - elev_coef['g'][0]
    ts_recon = reconstruct(time, elev_coef).h

    # vel = reconstruct(time, speed_coef)

    err = np.sqrt(np.mean((time_series-ts_recon)**2))

    print(amp_err, phase_err, err)
    print(elev_coef['aux']['reftime'], tref)
    print(elev_coef['aux']['opt'])

    np.testing.assert_almost_equal(amp_err, 0)
    np.testing.assert_almost_equal(phase_err, 0)
    np.testing.assert_almost_equal(err, 0)
Ejemplo n.º 15
0
    def predict(self,mag='mag',\
        args={'minimum time':datetime,'maximum time':datetime,'dt(s)':60,'Minimum SNR':2,\
        'Latitude':-36.0,
        }):

        """ This function predict the tide by first detiding a timeseries.
        Works if NaN are in the timeseries"""

        if hasattr(self.data,'latitude'):
            latitude=self.data.latitude
            if not self.data.latitude:
                latitude=args['Latitude']
        else:
            latitude=args['Latitude']

        time=self.data.index
        dt=(time[2]-time[1]).total_seconds()/3600. # in hours
        stime=np.array(date2num(time))
        lat=latitude
        ray=args['Minimum SNR']
        demeaned = self.data[mag].values - np.nanmean(self.data[mag].values)
        opts = dict(method='ols',conf_int='linear', trend=False, Rayleigh_min=ray)
        coef = solve(stime,demeaned,lat= lat,**opts)


        min_time=min(args['minimum time'],time[0])
        max_time=max(args['maximum time'],time[-1])
        min_dt=args['dt(s)']


        idx = pd.period_range(args['minimum time'], args['maximum time'],freq='%is'%args['dt(s)'])
        idx=idx.to_timestamp()
        df_new=pd.DataFrame(index=idx)

        if hasattr(self.data[mag],'short_name'):
            short_name=self.data[mag].short_name
        else:
            short_name=mag

        df_new[short_name+'t'] = reconstruct(np.array(date2num(df_new.index)), coef).h
        df_new.index.name='time'
        
        self.dfout=df_new#pd.merge_asof(self.dfout,df_new,on='time',direction='nearest', tolerance=pd.Timedelta("1s")).set_index('time')
        self.dfout.index.name='time' 


        return self.dfout
Ejemplo n.º 16
0
    def detide(self,mag='mag',\
        args={'Minimum SNR':2,\
        'Latitude':-36.0,
        'folder out':os.getcwd(),
        }):

        """ This function detide a timeseries.
        Works if NaN are in the timeseries"""

        if hasattr(self.data,'latitude'):
            latitude=self.data.latitude
            if not self.data.latitude:
                latitude=args['Latitude']
        else:
            latitude=args['Latitude']
        if hasattr(self.data[mag],'short_name'):
            short_name=self.data[mag].short_name
        else:
            short_name=mag
            
        time=self.data.index
        dt=(time[2]-time[1]).total_seconds()/3600 # in hours
        stime=np.array(date2num(time))
        lat=latitude
        if hasattr(self.data,'filename'):
            outfile=os.path.join(args['folder out'],os.path.splitext(self.data.filename)[0]+'_Conc.xlsx')
        else:
            outfile=os.path.join(args['folder out'],'Conc.xlsx')

        ray=args['Minimum SNR']
        demeaned = self.data[mag].values - np.nanmean(self.data[mag].values)

        opts = dict(method='ols',conf_int='linear', Rayleigh_min=ray)
        coef = solve(stime,demeaned,lat= lat,**opts)
        ts_recon = reconstruct(stime, coef).h

        self.dfout[short_name+'t']=ts_recon
        self.dfout[short_name+'o']=demeaned-ts_recon

        self._export_cons(outfile,short_name,coef['name'],coef['A'],coef['g'])
        
        return self.dfout
Ejemplo n.º 17
0
def test_robust():
    """
    Quick check that method='robust' works; no real checking
    of results, other than by using "py.test -s" and noting that
    the results are reasonable, and the weights for the outliers
    are very small.
    Minimal conversion from simple_utide_test

    """

    # Add noise
    np.random.seed(13579)
    noisy = tide + 0.01 * np.random.randn(len(time))

    # Add wild points
    noisy[:5] = 10
    noisy[-5:] = -10

    opts = {
        "constit": "auto",
        "phase": "raw",
        "nodal": False,
        "trend": False,
        "method": "robust",
        "conf_int": "linear",
        "Rayleigh_min": 0.95,
        "epoch": "python",
    }

    speed_coef = solve(time, noisy, noisy, lat=lat, **opts)
    elev_coef = solve(time, noisy, lat=lat, **opts)

    print(speed_coef.weights, elev_coef.weights)
    print(speed_coef.rf, elev_coef.rf)

    ts_recon = reconstruct(time, elev_coef, epoch="python").h
    err = np.std(tide - ts_recon)
    np.testing.assert_almost_equal(err, 0, decimal=2)
Ejemplo n.º 18
0
    def reconstr(self, harmo, time_ind=slice(None), **kwarg):
        '''
        Description:
        ----------
        This function reconstructs the velocity components or the surface elevation
        from harmonic coefficients.
        Harmonic_reconstruction calls reconstruct. This function assumes harmonics
        (solve) has already been executed.

        Inputs:
        ------
          - Harmo = harmonic coefficient from harmo_analysis

        Output:
        ------         
          - Reconstruct = reconstructed signal, dictionary

        Keywords:
        ------
          - time_ind = time indices to process, list of integers

        Options:
        -------
        Options are the same as for reconstruct, which are shown below with
        their default values:
            cnstit = [], minsnr = 2, minpe = 0

        Notes:
        -----
        For more detailed information about reconstruct, please see
        https://github.com/wesleybowman/UTide

        '''
        time = self._var.matlabTime[time_ind]
        ts_recon, _ = reconstruct(time, harmo, **kwarg)
        return ts_recon
Ejemplo n.º 19
0
def tidaldominesrekkja(item,
                       mag,
                       direct,
                       dato,
                       dypid,
                       lat=62,
                       verbose=True,
                       trend=True,
                       dataut=False):
    tin = np.array(dato)
    u = mag * np.sin(np.deg2rad(direct))
    v = mag * np.cos(np.deg2rad(direct))
    coef = utide.solve(tin, u, v, lat=lat, verbose=verbose, trend=trend)
    reconstruckt = utide.reconstruct(tin, coef=coef, verbose=verbose)
    orgmag = [x for x in mag if not np.isnan(x)]
    recmag = [
        np.sqrt(x**2 + y**2)
        for x, y in zip(u - reconstruckt.u, v - reconstruckt.v)
        if not np.isnan(x)
    ]
    if not dataut:
        out = ''
        out += str(item)
        out += '&\t%2.2f' % (dypid, )
        out += '&\t%2.2f\\%%' % (100 * np.var(recmag) / np.var(orgmag), )
        out += '&\t%6.0f' % (sum([coef.Lsmaj[i] for i in range(6)]), )
        out += '&\t%s' % ('Ja' if 100 * np.var(recmag) / np.var(orgmag) < 50
                          and sum([coef.Lsmaj[i]
                                   for i in range(6)]) > 150 else 'Nei', )
        out += '\\\\\n'
        return out
    else:
        part_av_var = 100 * np.var(recmag) / np.var(orgmag)
        sum_av_5 = sum([coef.Lsmaj[i] for i in range(6)])
        sjovarfallsdrivi = part_av_var < 50 and sum_av_5 > 150
        return part_av_var, sum_av_5, sjovarfallsdrivi
Ejemplo n.º 20
0
def c_excoef(commonfol, basefol, requiredStationsFile, stationsDB):

    #required stations
    station_names_req = np.loadtxt(requiredStationsFile,
                                   delimiter='\t',
                                   dtype=str).tolist()

    #extracting data from all stations database
    maindatabase = pd.read_csv(stationsDB, header=0, delimiter=',')
    maindatabase.set_index('name', inplace=True)

    #import simulated values
    dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
    df_simul = pd.read_csv(path.join(basefol, 'telemac_variables',
                                     'variables_all_stations',
                                     'free_surface_all_stations.dat'),
                           header=0,
                           parse_dates=['date'],
                           date_parser=dateparse,
                           index_col=0,
                           squeeze=True)
    df_simul.set_index('date', inplace=True)

    #import measured values
    dateparse2 = lambda x: pd.datetime.strptime(x, '%d-%b-%Y %H:%M:%S')
    path2 = path.join(commonfol, 'measurements')
    for file in os.listdir(path2):
        if file.endswith('.wl.dat'):
            df_meas = pd.read_csv(path.join(path2, file),
                                  header=0,
                                  parse_dates=['Time'],
                                  date_parser=dateparse2,
                                  index_col=0,
                                  squeeze=True)

    #sations to be extracted
    station_names_for_comp = []
    for station in station_names_req:
        if (station in df_meas.columns) and (station in df_simul.columns) and (
                station in maindatabase.index):
            #print(station)
            station_names_for_comp.append(station)
        #elif station not in df_meas.columns:
        #print(station + ' does not have measured data')
        #elif station not in df_simul.columns:
        #print(station + ' does not have simulated data')
        #elif station not in stations:
        #print(station + ' does not have enough data in StationsDatabase')

    #check for extracted coefficients (to avoid re-extracting coefficients)
    #read stations names in the measured coefficients (if available)
#    try:
#        simul_coef_stations = pd.read_csv(path.join(basefol , 'coef_simulated' , 'simul_amplitude_all_stations.dat')).columns
#        meas_coef_stations = pd.read_csv(path.join(commonfol , 'coef_measured' , 'meas_amplitude_all_stations.dat')).columns
#    except :
#        print('no previous coefficient are generated')
#        simul_coef_stations = []
#        meas_coef_stations = []
#
#    stations_for_ext = []
#    for station in station_names_for_comp:
#        if (station not in simul_coef_stations) or (station not in meas_coef_stations):
#            stations_for_ext.append(station)
#

#    df_meas_crop = df_meas[period_s : period_e ]
#    df_simul_crop = df_simul[period_s : '2015-01-10' ]
#    date_inter = df_simul_crop.index.intersection(df_meas_crop.index)

#    datenum_meas = list(map(datenumaz,df_meas_crop.index.tolist()))
#    datenum_simul = list(map(datenumaz,df_simul_crop.index.tolist()))

#slicing the required stations and adding _meas and _simul to suffixes
# to avoid duplication (_x and _y) while joining
    dfmeasforcomp = df_meas[station_names_for_comp][period_s:period_e]
    dfmeasforcomp = dfmeasforcomp.add_suffix('_meas')
    dfsimulforcomp = df_simul[station_names_for_comp][period_s:period_e]
    dfsimulforcomp = dfsimulforcomp.add_suffix('_simul')
    dfmeassimulforcomp = dfmeasforcomp.join(dfsimulforcomp, how='inner')
    dfmeassimulforcomp = dfmeassimulforcomp.sort_index(axis=1)
    #adding a second level in suffixes (multiindexing)
    #first level station names, second level meas, simul
    dfmeassimulforcomp.columns = pd.MultiIndex.from_tuples(
        [tuple(c.split('_')) for c in dfmeassimulforcomp.columns])
    #keep the order of stations
    dfmeassimulforcomp = dfmeassimulforcomp[station_names_for_comp]

    #converting datetime to datenum
    def datenumaz(d):
        return 366 + d.toordinal() + (
            d - dt.fromordinal(d.toordinal())).total_seconds() / (24 * 60 * 60)

#    def dt2dn(dt):
#       ord = dt.toordinal()
#       mdn = dt + datetime.timedelta(days = 366)
#       frac = (dt-datetime.datetime(dt.year,dt.month,dt.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
#       return mdn.toordinal() + frac

    datesforpeaks = dfmeassimulforcomp.index
    #and changing the index to datenum
    #dfmeassimulforcomp.index = dfmeassimulforcomp.index.map(datenumaz)

    #datenum = dfmeassimulforcomp.index
    datenum = dfmeassimulforcomp.index.map(datenumaz)

    #required tides avreviations
    pTides = [
        'MM', 'MF', 'Q1', 'O1', 'K1', 'SO1', 'MU2', 'N2', 'NU2', 'M2', 'S2',
        '2SM2', 'MO3', 'MN4', 'M4', 'MS4', 'MK4', 'S4', 'M6', '2MS6', 'S6',
        'M8', 'M10', 'M12'
    ]
    #pTides = ['MM','MF','Q1','O1','K1','SO1','MU2','N2','NU2','M2','S2','2SM2','MO3','MN4','M4'
    #          ,'MS4','MK4','S4','M6','2MS6','S6','M8','M10','M12' ]

    #making directories for coefficients
    if not os.path.exists(os.path.join(basefol, 'coef_simulated')):
        os.makedirs(os.path.join(basefol, 'coef_simulated'))
    path_s = os.path.join(basefol, 'coef_simulated')

    if not os.path.exists(os.path.join(commonfol, 'coef_measured')):
        os.makedirs(os.path.join(commonfol, 'coef_measured'))
    path_m = os.path.join(commonfol, 'coef_measured')

    dfa = pd.DataFrame()
    dfg = pd.DataFrame()
    idx = pd.IndexSlice
    df_recons_h_meas = pd.DataFrame(columns=station_names_for_comp,
                                    index=datesforpeaks)
    df_recons_h_simul = pd.DataFrame(columns=station_names_for_comp,
                                     index=datesforpeaks)
    station_no_nan = []

    rmse_v_max_t = []
    rmse_v_min_t = []
    rmse_h_max_t = []
    rmse_h_min_t = []

    #    np_recons_h_meas = np.empty((len(datenum) , len(station_names_for_comp)) )
    #    np_recons_h_simul = np.empty((len(datenum) , len(station_names_for_comp)) )

    print('-------------------------------------')
    print('-------------------------------------')
    print('-------------------------------------')
    print('Extracting coefficients and reconstructed water levels ...')
    #i=0
    n = 0
    for station in station_names_for_comp:

        latitude = maindatabase.Latitude[station]

        n += 1
        print('station {} of {} ...'.format(n, len(station_names_for_comp)))

        if dfmeassimulforcomp.loc[:, idx[
                station, ['meas']]].isnull().all().bool() == False:
            #MonteCarlo , ols
            #method = 'ols' , conf_int = 'MC'

            #tempcoefmeas = dfmeassimulforcomp.loc[:,idx[station ,['meas']]].apply(lambda x : (utide.solve(datenum , x , lat = latitude  , constit = pTides)))
            print('-------------------------------------')
            print(station + ' ...coefficient calcs ...measured values ...')
            coef_meas = utide.solve(
                np.array(datenum),
                dfmeassimulforcomp.loc[:, idx[station, ['meas']]].values[:, 0],
                lat=latitude,
                constit=pTides)
            #couldn't find how to save a coef
            #so i decided to merge the get_peaks function into this script
            #tempcoefsimul = dfmeassimulforcomp.loc[:,idx[station ,['simul']]].apply(lambda x : (utide.solve(datenum , x , lat = latitude ,  constit = pTides)))
            print('-------------------------------------')
            print(station + ' ...coefficient calcs ...simulated values ...')
            coef_simul = utide.solve(
                np.array(datenum),
                dfmeassimulforcomp.loc[:, idx[station, ['simul']]].values[:,
                                                                          0],
                lat=latitude,
                constit=pTides)

            #reconstructing the coef for the peak comparison
            print('-------------------------------------')
            print(
                station +
                ' ...reconstructed water levels calcs ...measured values ...')
            recons_coef_meas = utide.reconstruct(np.array(datenum), coef_meas)
            df_recons_h_meas[station] = recons_coef_meas['h']
            #np_recons_h_meas[: , i] = recons_coef_meas['h']
            print('-------------------------------------')
            print(
                station +
                ' ...reconstructed water levels calcs ...measured values ...')
            recons_coef_simul = utide.reconstruct(np.array(datenum),
                                                  coef_simul)
            df_recons_h_simul[station] = recons_coef_simul['h']
            #np_recons_h_simul[: , i] = recons_coef_simul['h']

            #tempcoefmeas.to_csv(path.join(path_m , 'coef_'+station+'.dat'))
            #tempcoefsimul.to_csv(path.join(path_s , 'coef_'+station+'.dat'))

            #measindex = list(tempcoefmeas[station , 'meas']['name'])
            measindex = coef_meas['name'].tolist()
            #simulindex = list(tempcoefsimul[station , 'simul']['name'])
            simulindex = coef_simul['name'].tolist()

            #tempdfameas = tempcoefmeas.loc[idx['A'] , :].apply(pd.Series).T
            tempdfameas = pd.Series(coef_meas['A']).to_frame()
            tempdfameas.index = measindex
            tempdfameas.columns = [station]
            tempdfameas.columns = pd.MultiIndex.from_product(
                [tempdfameas.columns, ['meas']])
            #tempdfasimul = tempcoefsimul.loc[idx['A'] , :].apply(pd.Series).T
            tempdfasimul = pd.Series(coef_simul['A']).to_frame()
            tempdfasimul.index = simulindex
            tempdfasimul.columns = [station]
            tempdfasimul.columns = pd.MultiIndex.from_product(
                [tempdfasimul.columns, ['simul']])

            dfa = pd.concat([dfa, tempdfameas], axis=1, sort=True)
            dfa = pd.concat([dfa, tempdfasimul], axis=1, sort=True)

            #tempdfgmeas = tempcoefmeas.loc[idx['g'] , :].apply(pd.Series).T
            tempdfgmeas = pd.Series(coef_meas['g']).to_frame()
            tempdfgmeas.index = measindex
            tempdfgmeas.columns = [station]
            tempdfgmeas.columns = pd.MultiIndex.from_product(
                [tempdfgmeas.columns, ['meas']])
            #tempdfgsimul = tempcoefsimul.loc[idx['g'] , :].apply(pd.Series).T
            tempdfgsimul = pd.Series(coef_simul['g']).to_frame()
            tempdfgsimul.index = simulindex
            tempdfgsimul.columns = [station]
            tempdfgsimul.columns = pd.MultiIndex.from_product(
                [tempdfgsimul.columns, ['simul']])

            dfg = pd.concat([dfg, tempdfgmeas], axis=1, sort=True)
            dfg = pd.concat([dfg, tempdfgsimul], axis=1, sort=True)

            #i+=1
            station_no_nan.append(station)

            print('-------------------------------------')
            print(station + ' ...finding peaks calcs ...')

            #finding peaks
            #in the following part, 2 represents simulated values and 3 represents the measured ones
            #simul before reconstruction
            #DELTA = 0.3 (7 hours)
            minpeaks2, maxpeaks2 = findpeaks(
                dfmeassimulforcomp.loc[:, idx[station, ['simul']]].iloc[:, 0],
                DELTA=0.3)

            fig, ax = plt.subplots()
            ax.set_ylabel('water level')
            ax.set_xlabel('Time')
            ax.set_title('Peaks in TimeSeries, simul before reconstruction')
            dfmeassimulforcomp.loc[:, idx[station, ['simul']]].iloc[:,
                                                                    0].plot()
            ax.scatter(*zip(*minpeaks2), color='red', label='min')
            ax.scatter(*zip(*maxpeaks2), color='green', label='max')
            ax.legend()
            ax.grid(True)
            plt.show()

            #meas before reconstruction
            minpeaks3, maxpeaks3 = findpeaks(
                dfmeassimulforcomp.loc[:, idx[station, ['meas']]].iloc[:, 0],
                DELTA=0.3)

            fig, ax = plt.subplots()
            ax.set_ylabel('water level')
            ax.set_xlabel('Time')
            ax.set_title('Peaks in TimeSeries, meas before reconstruction')
            dfmeassimulforcomp.loc[:, idx[station, ['meas']]].iloc[:, 0].plot()
            ax.scatter(*zip(*minpeaks3), color='red', label='min')
            ax.scatter(*zip(*maxpeaks3), color='green', label='max')
            ax.legend()
            ax.grid(True)
            plt.show()

            #meas after reconstruction
            minpeaks3r, maxpeaks3r = findpeaks(df_recons_h_meas[station],
                                               DELTA=0.3)

            fig, ax = plt.subplots()
            ax.set_ylabel('water level')
            ax.set_xlabel('Time')
            ax.set_title('Peaks in TimeSeries, meas after rcs')
            df_recons_h_meas[station].plot()
            ax.scatter(*zip(*minpeaks3r), color='red', label='min')
            ax.scatter(*zip(*maxpeaks3r), color='green', label='max')
            ax.legend()
            ax.grid(True)
            plt.show()

            #simul after reconstruction
            minpeaks2r, maxpeaks2r = findpeaks(df_recons_h_simul[station],
                                               DELTA=0.3)

            fig, ax = plt.subplots()
            ax.set_ylabel('water level')
            ax.set_xlabel('Time')
            ax.set_title('Peaks in TimeSeries, simul after rcs')
            df_recons_h_simul[station].plot()
            ax.scatter(*zip(*minpeaks2r), color='red', label='min')
            ax.scatter(*zip(*maxpeaks2r), color='green', label='max')
            ax.legend()
            ax.grid(True)
            plt.show()

            #extracting locations of max and min peaks, before and after reconstruction
            maxlcs2 = []
            for i in range(len(maxpeaks2)):
                maxlcs2.append(maxpeaks2[i][0])

            minlcs2 = []
            for i in range(len(minpeaks2)):
                minlcs2.append(minpeaks2[i][0])

            maxlcs3 = []
            for i in range(len(maxpeaks3)):
                maxlcs3.append(maxpeaks3[i][0])

            minlcs3 = []
            for i in range(len(minpeaks3)):
                minlcs3.append(minpeaks3[i][0])

            maxlcs2r = []
            for i in range(len(maxpeaks2r)):
                maxlcs2r.append(maxpeaks2r[i][0])

            minlcs2r = []
            for i in range(len(minpeaks2r)):
                minlcs2r.append(minpeaks2r[i][0])

            maxlcs3r = []
            for i in range(len(maxpeaks3r)):
                maxlcs3r.append(maxpeaks3r[i][0])

            minlcs3r = []
            for i in range(len(minpeaks3r)):
                minlcs3r.append(minpeaks3r[i][0])

            #getting indices based in the reconstructed values
            Dmax2 = cdist(
                np.array(list(map(datenumaz, maxlcs2r))).reshape(-1, 1),
                np.array(list(map(datenumaz, maxlcs2))).reshape(-1, 1))
            Dmin2 = cdist(
                np.array(list(map(datenumaz, minlcs2r))).reshape(-1, 1),
                np.array(list(map(datenumaz, minlcs2))).reshape(-1, 1))

            Dmax3 = cdist(
                np.array(list(map(datenumaz, maxlcs3r))).reshape(-1, 1),
                np.array(list(map(datenumaz, maxlcs3))).reshape(-1, 1))
            Dmin3 = cdist(
                np.array(list(map(datenumaz, minlcs3r))).reshape(-1, 1),
                np.array(list(map(datenumaz, minlcs3))).reshape(-1, 1))

            (indxmax2r, indxmax2) = np.where(Dmax2 == np.min(Dmax2, axis=0))
            (indxmax3r, indxmax3) = np.where(Dmax3 == np.min(Dmax3, axis=0))

            (indxmin2r, indxmin2) = np.where(Dmin2 == np.min(Dmin2, axis=0))
            (indxmin3r, indxmin3) = np.where(Dmin3 == np.min(Dmin3, axis=0))

            #dataframes for high and low water levels (max and min): index - location of peak - value of peak
            df_max2 = pd.DataFrame(data=maxpeaks2,
                                   columns=['maxsimullcs', 'maxsimulpeaks'],
                                   index=indxmax2r)

            df_min2 = pd.DataFrame(data=minpeaks2,
                                   columns=['minsimullcs', 'minsimulpeaks'],
                                   index=indxmin2r)

            df_max3 = pd.DataFrame(data=maxpeaks3,
                                   columns=['maxmeaslcs', 'maxmeaspeaks'],
                                   index=indxmax3r)

            df_min3 = pd.DataFrame(data=minpeaks3,
                                   columns=['minmeaslcs', 'minmeaspeaks'],
                                   index=indxmin3r)

            #joined dataframes
            df_max = df_max2.join(df_max3, how='inner')
            df_min = df_min2.join(df_min3, how='inner')

            #rmse calc
            rmse_v_max = 0.5**mean_squared_error(df_max['maxmeaspeaks'],
                                                 df_max['maxsimulpeaks'])
            rmse_v_min = 0.5**mean_squared_error(df_min['minmeaspeaks'],
                                                 df_min['minsimulpeaks'])

            rmse_h_max = 0.5**mean_squared_error(
                list(map(datenumaz, df_max['maxmeaslcs'].tolist())),
                list(map(datenumaz, df_max['maxsimullcs'].tolist())))
            rmse_h_min = 0.5**mean_squared_error(
                list(map(datenumaz, df_min['minmeaslcs'].tolist())),
                list(map(datenumaz, df_min['minsimullcs'].tolist())))

            #rmse for stations
            rmse_v_max_t.append(rmse_v_max)
            rmse_v_min_t.append(rmse_v_min)
            rmse_h_max_t.append(rmse_h_max)
            rmse_h_min_t.append(rmse_h_min)

    #save amplitude and phase shift for all stations
    dfa.loc[:, idx[station_no_nan, ['meas']]].to_csv(
        path.join(path_m, 'meas_amplitude_all_stations.dat'))
    dfa.loc[:, idx[station_no_nan, ['simul']]].to_csv(
        path.join(path_s, 'simul_amplitude_all_stations.dat'))
    dfg.loc[:, idx[station_no_nan, ['meas']]].to_csv(
        path.join(path_m, 'meas_phaseshift_all_stations.dat'))
    dfg.loc[:, idx[station_no_nan, ['simul']]].to_csv(
        path.join(path_s, 'simul_phaseshift_all_stations.dat'))

    #adding diff column for each station
    #A
    a = dfa.loc[:, pd.IndexSlice[:, 'simul']].sub(
        dfa.loc[:, pd.IndexSlice[:, 'meas']].values,
        1).rename(columns={'simul': 'diffr'})
    dfaf = dfa.join(a).sort_index(axis=1)
    #keep the order of stations
    dfaf = dfaf[station_no_nan]
    #g
    a = dfg.loc[:, pd.IndexSlice[:, 'simul']].sub(
        dfg.loc[:, pd.IndexSlice[:, 'meas']].values,
        1).rename(columns={'simul': 'diffr'})
    dfgf = dfg.join(a).sort_index(axis=1)
    #keep the order of stations
    dfgf = dfgf[station_no_nan]

    #all data dataframe
    dfag = pd.concat([dfaf, dfgf], keys=['A', 'g'])

    #plots
    #making directory to save the comparisons
    if not os.path.exists(os.path.join(basefol, 'ptcomp')):
        os.makedirs(os.path.join(basefol, 'ptcomp'))
    path_1 = os.path.join(basefol, 'ptcomp')

    #transpose df
    dfaft = dfaf.T
    dfgft = dfgf.T

    print('-------------------------------------')
    print('-------------------------------------')
    print('-------------------------------------')
    print('Partial tides comparison plots ...')

    n = 0
    #looping through tides
    for tide in pTides:

        n += 1
        print('tide {} of {} ...'.format(n, len(pTides)))
        #A
        #subplot1 meas vs simul, subplot2 diff; for each tide

        fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=False, figsize=(32, 22))

        dfaft[tide].unstack(level=-1).reindex(station_no_nan).plot(
            y=['meas', 'simul'],
            ax=ax1,
            legend=True,
            grid=True,
            title=' Amplitude comparison for tide: ' + tide,
            figsize=(16, 11),
            style='x')
        dfaft[tide].unstack(level=-1).reindex(station_no_nan).plot(
            y='diffr',
            ax=ax2,
            legend=True,
            grid=True,
            title=' Amplitude difference for tide: ' + tide,
            figsize=(16, 11),
            style='x')
        ax1.set_ylabel('Amplitude [m]')
        ax1.legend(['Measured', 'Simulated'])
        ax1.set_xticks(list(range(len(station_no_nan))))
        ax1.set_xticklabels(labels=station_no_nan,
                            rotation=45,
                            horizontalalignment='right')
        plt.subplots_adjust(hspace=0.5)
        ax2.set_ylabel('Amplitude Difference [m]')
        ax2.set_xlabel('Stations')
        ax2.legend(['Difference'])
        ax2.set_xticks(list(range(len(station_no_nan))))
        ax2.set_xticklabels(labels=station_no_nan,
                            rotation=45,
                            horizontalalignment='right')

        savingname = path.join(path_1, tide + '_amplitude_comp' + '.png')
        fig.savefig(savingname)
        plt.close()

        print(tide + ' ...amplitude comaprison extracted')
        print('-------------------------------------')

        #g
        #subplot1 meas vs simul, subplot2 diff; for each tide

        fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=False, figsize=(32, 22))

        dfgft[tide].unstack(level=-1).reindex(station_no_nan).plot(
            y=['meas', 'simul'],
            ax=ax1,
            legend=True,
            grid=True,
            title=' Phase shift comparison for tide: ' + tide,
            figsize=(16, 11),
            style='x')
        dfgft[tide].unstack(level=-1).reindex(station_no_nan).plot(
            y='diffr',
            ax=ax2,
            legend=True,
            grid=True,
            title=' Phase shift difference for tide: ' + tide,
            figsize=(16, 11),
            style='x')
        ax1.set_ylabel('Phase shift []')
        ax1.set_xticks(list(range(len(station_no_nan))))
        ax1.set_xticklabels(labels=station_no_nan,
                            rotation=45,
                            horizontalalignment='right')
        ax1.legend(['Measured', 'Simulated'])
        plt.subplots_adjust(hspace=0.5)
        ax2.set_ylabel('Phase shift Difference []')
        ax2.set_xlabel('Stations')
        ax2.set_xticks(list(range(len(station_no_nan))))
        ax2.set_xticklabels(labels=station_no_nan,
                            rotation=45,
                            horizontalalignment='right')
        ax2.legend(['Difference'])

        savingname = path.join(path_1, tide + '_phaseshift_comp' + '.png')
        fig.savefig(savingname)
        plt.close()

        print(tide + ' ...phase shift comaprison extracted')
        print('-------------------------------------')

    print('-------------------------------------')
    print('-------------------------------------')
    print('-------------------------------------')

    #plotting rmse of high and low tides
    #high tides
    #subplot1 vertical, subplot2 horizontal; amongst stations

    fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=False, figsize=(15, 11))

    ax1.plot(station_no_nan, rmse_v_max_t, 'x')
    ax1.set_xticks(list(range(len(station_no_nan))))
    ax1.set_xticklabels(labels=station_no_nan,
                        rotation=45,
                        horizontalalignment='right')
    ax1.set_ylabel('Vertical RMSE')
    ax1.legend(['Vertical RMSE'])
    ax1.set_title('RMSE - High tides peaks values')

    plt.subplots_adjust(hspace=0.5)

    ax2.plot(station_no_nan, rmse_h_max_t, 'x')
    ax2.set_ylabel('Horizontal RMSE')
    ax2.set_xlabel('Stations')
    ax2.legend(['Horizontal RMSE'])
    ax2.set_title('RMSE - High tides peaks locations')
    ax2.set_xticks(list(range(len(station_no_nan))))
    ax2.set_xticklabels(labels=station_no_nan,
                        rotation=45,
                        horizontalalignment='right')

    savingname = path.join(path_1, 'high_tides_rmse' + '.png')
    fig.savefig(savingname)
    plt.close()
    print('RMSE for high tides ... extracted ...')

    #low tides
    #subplot1 vertical, subplot2 horizontal; amongst stations

    fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=False, figsize=(15, 11))

    ax1.plot(station_no_nan, rmse_v_min_t, 'x')
    ax1.set_xticks(list(range(len(station_no_nan))))
    ax1.set_xticklabels(labels=station_no_nan,
                        rotation=45,
                        horizontalalignment='right')
    ax1.set_ylabel('Vertical RMSE')
    ax1.legend(['Vertical RMSE'])
    ax1.set_title('RMSE - Low tides peaks values')

    plt.subplots_adjust(hspace=0.5)

    ax2.plot(station_no_nan, rmse_h_min_t, 'x')
    ax2.set_ylabel('Horizontal RMSE')
    ax2.set_xlabel('Stations')
    ax2.legend(['Horizontal RMSE'])
    ax2.set_title('RMSE - Low tides peaks locations')
    ax2.set_xticks(list(range(len(station_no_nan))))
    ax2.set_xticklabels(labels=station_no_nan,
                        rotation=45,
                        horizontalalignment='right')

    savingname = path.join(path_1, 'low_tides_rmse' + '.png')
    fig.savefig(savingname)
    plt.close()
    print('RMSE for low tides ... extracted ...')
    print('-------------------------------------')
    print('-------------------------------------')
    print('-------------------------------------')
Ejemplo n.º 21
0
def tidal_analysis_for_depth(tin,
                             uin,
                             vin,
                             lat=62,
                             navn='tide.tex',
                             caption='one layer',
                             dest='LaTeX/',
                             label=''):
    coef = utide.solve(tin, uin, vin, lat=lat)
    col = [
        'Const', 'Freq', 'E-ampl', 'E-gpl', 'N-ampl', 'N-gpl', 'Major',
        'minor', 'Theta', 'Graphl', 'R'
    ]
    supcol = [
        '', 'c/hr', 'mm/sec', 'deg', 'mm/sec', 'deg', 'mm/sec', 'mm/sec',
        'deg', 'deg', ''
    ]
    a = list(coef.name)
    rekkjur = min(len(coef.name), 15)
    coefE = utide.solve(tin, uin, lat=lat, constit=a)
    coefN = utide.solve(tin, vin, lat=lat, constit=a)
    reftime = coef.aux.reftime
    reftime = mdate.num2date(reftime).strftime('%Y-%m-%dT%H:%M:%S')

    tabel = '\\begin{tabular}{|' + (len(col)) * 'r|' + '}\n\\hline\n'
    tabel += col[0]
    for x in col[1:]:
        tabel += '&\t%s' % (x, )
    tabel += '\\\\'
    tabel += supcol[0]
    for x in supcol[1:]:
        tabel += '&\t%s' % (x, )
    tabel += '\\\\\\hline\n'

    for i in range(rekkjur):
        ei = np.argwhere(coefE.name == coef.name[i])[0][0]
        ni = np.argwhere(coefN.name == coef.name[i])[0][0]
        tabel += (4 - len(coef.name[i])) * ' ' + coef.name[i]
        tabel += '&\t%.8f' % (coef.aux.frq[i], )
        tabel += '&\t%5.0f' % (coefE.A[ei], )
        tabel += '&\t%5.0f' % (coefE.g[ei], )
        tabel += '&\t%5.0f' % (coefN.A[ni], )
        tabel += '&\t%5.0f' % (coefN.g[ni], )
        tabel += '&\t%5.0f' % (coef.Lsmaj[i], )
        tabel += '&\t%5.0f' % (abs(coef.Lsmin[i]), )
        tabel += '&\t%3.0f' % (coef.theta[i], )
        tabel += '&\t%3.0f' % (coef.g[i], )
        tabel += '&\t%s' % ('A' if coef.Lsmin[i] > 0 else 'C', )
        tabel += '\\\\\n'
    tabel += '\\hline\n'
    tabel += '\\end{tabular}'
    texfil = open(dest + 'Talvur/%s' % (navn, ), 'w')
    texfil.write(tabel)
    texfil.close()

    tide = utide.reconstruct(tin, coef)
    figwidth = 6
    figheight = 9
    fig, axs = plt.subplots(ncols=1, nrows=4, figsize=(figwidth, figheight))
    axs[0].plot(tin, uin, linewidth=.5)
    axs[0].plot(tin, tide.u, linewidth=.5)
    axs[1].plot(tin, uin - tide.u, linewidth=.5)
    axs[2].plot(tin, vin, linewidth=.5)
    axs[2].plot(tin, tide.v, linewidth=.5)
    axs[3].plot(tin, vin - tide.v, linewidth=.5)
    #plt.show()
    caption += ' Reftime = %s' % reftime

    return '\n\\begin{table}[!ht]%s' \
           '\n\\centering' \
           '\n\\resizebox{\\textwidth}{!}{' \
           '\n\\input{Talvur/%s}' \
           '\n}' \
           '\n\\caption{%s}' \
           '\n\\end{table}' % (label, navn, caption)
Ejemplo n.º 22
0
def td():

    fl0 = nr0.get()
    slat = nr1.get()
    tzone = nr2.get()
    mtd = nr3.get()

    lat = float(slat)
    tzone = int(tzone)
    print('method={}'.format(mtd))
    from sklearn import metrics
    from math import sqrt
    UtdF = '../data/' + fl0[0:-4] + '.dtf'
    fli = open('../data/' + fl0, 'r', encoding='cp1252')
    flo = open(UtdF, 'w')
    i = 1
    for ln in fli:
        if ln[0].isnumeric():
            fl1 = ln.split(' ')
            fl2 = fl1[0].split('/')
            fl3 = fl1[1].split(':')
            rval = float(fl1[2]) / 100
            cday = '%2d' % int(fl2[0])
            cmonth = '%2d' % int(fl2[1])
            cyear = '%4d' % int(fl2[2])
            chour = '%2d.0000' % int(fl3[0])
            buf = '%6d' % i + ' ' + cyear + ' ' + cmonth + ' ' + cday + ' ' + chour + ' %7.4f' % rval + ' 0\n'
            flo.write(buf)
            i = i + 1
    fli.close()
    flo.close()

    # Names of the columns that will be used to make a "datetime" column:
    parse_dates = dict(datetime=['year', 'month', 'day', 'hour'])

    # Names of the original columns in the file, including only
    # the ones we will use; we are skipping the first, which appears
    # to be seconds from the beginning.
    names = ['year', 'month', 'day', 'hour', 'elev', 'flag']

    obs = pd.read_table(
        UtdF,
        sep=' ',
        names=names,
        skipinitialspace=True,
        #delim_whitespace=True,
        index_col='datetime',
        usecols=range(1, 7),
        na_values='999.999',
        parse_dates=parse_dates,
        date_parser=date_parser,
    )
    bad = obs['flag'] == 2
    corrected = obs['flag'] == 1

    obs.loc[bad, 'elev'] = np.nan
    Mobs = obs['elev'].mean()
    obs['anomaly'] = obs['elev'] - Mobs
    obs['anomaly'] = obs['anomaly'].interpolate() + Mobs
    print('{} points were flagged "bad" and interpolated'.format(bad.sum()))
    print('{} points were flagged "corrected" and left unchanged'.format(
        corrected.sum()))

    time = mdates.date2num(obs.index.to_pydatetime()) - tzone / 24

    coef = utide.solve(time,
                       obs['anomaly'].values,
                       lat=lat,
                       method=mtd,
                       conf_int='MC')
    #print(coef.keys())
    tide = utide.reconstruct(time, coef)
    #print(tide.keys())

    print('\n')
    flo = open('../out/' + UtdF[8:-4] + '_' + mtd + '.coe', 'w')

    ig = len(coef['name'])
    print('{:4s} {:^8s} {:^8s} {:^10s} {:^10s}'.format('Coef', 'A', 'A_ci',
                                                       'g', 'g_ci'))
    flo.write('{:4s} {:^8s} {:^8s} {:^10s} {:^10s} \n'.format(
        'Coef', 'A', 'A_ci', 'g', 'g_ci'))
    for i in range(ig):
        print('{:4s} {:8.5f} {:8.5f} {:10.5f} {:10.5f}'.format(
            coef['name'][i], coef['A'][i], coef['A_ci'][i], coef['g'][i],
            coef['g_ci'][i]))
        flo.write('{:4s} {:8.5f} {:8.5f} {:10.5f} {:10.5f}\n'.format(
            coef['name'][i], coef['A'][i], coef['A_ci'][i], coef['g'][i],
            coef['g_ci'][i]))
    print('\n\n')
    #t = obs.index.values  # dtype is '<M8[ns]' (numpy datetime64)
    # It is more efficient to supply the time directly as matplotlib
    # datenum floats:

    t = tide.t_mpl
    res = obs.anomaly - tide.h
    fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharey=True, sharex=True)
    ax0.plot(t, obs.anomaly, label=u'Observations', color='C0')
    ax1.plot(t, tide.h, label=u'Tide Fit', color='C1')
    ax2.plot(t, res, label=u'Residual', color='C2')
    ax2.xaxis_date()
    fig.legend(ncol=3, loc='lower center')
    fig.autofmt_xdate()
    fig.suptitle('Comparison observation data with UTide in Station ' +
                 UtdF[8:-4],
                 fontsize=16)
    fig.savefig('../out/' + UtdF[8:-4] + '_' + mtd + '.png')

    print('Std Dev= {:6.3f}'.format(res.std()))

    mse = metrics.mean_squared_error(np.array(obs.anomaly), tide.h)

    print('rmse   = {:6.3f}'.format(sqrt(mse)))
    print('\n\n')
    a = datetime.date(2019, 1, 1).toordinal() - 719163.0
    b = datetime.date(2020, 1, 1).toordinal() - 719163.0
    times = np.arange(float(a), float(b), 1 / 24)

    tides = utide.reconstruct(times, coef)
    t = tides.t_mpl

    fig, (ax0) = plt.subplots(nrows=1, sharey=True, sharex=True)
    ax0.plot(t, tides.h, label=u'Tide Prediction', color='C1')
    ax0.xaxis_date()
    fig.autofmt_xdate()
    fig.suptitle('Tide Prediction with UTide in Station ' + UtdF[8:-4],
                 fontsize=16)
    fig.savefig('../out/' + UtdF[8:-4] + '_' + mtd + '_P.png')

    print('Minimum= {:6.3f}'.format(tide.h.min()))
    print('Maximum= {:6.3f}'.format(tide.h.max()))
    print('Mean   = {:6.3f}'.format(tide.h.mean()))
    print('\n\n')
    print('Minimum= {:6.3f}'.format(tides.h.min()))
    print('Maximum= {:6.3f}'.format(tides.h.max()))
    print('Mean   = {:6.3f}'.format(tides.h.mean()))
    print('rmse   = {:6.3f}'.format(sqrt(mse)))
    flo.write('\n\n')
    flo.write('Std Dev= {:6.3f}\n'.format(res.std()))
    flo.write('rmse   = {:6.3f}\n'.format(sqrt(mse)))
    flo.write('\n\n')
    flo.write('Minimum= {:6.3f}\n'.format(tide.h.min()))
    flo.write('Maximum= {:6.3f}\n'.format(tide.h.max()))
    flo.write('Mean   = {:6.3f}\n'.format(tide.h.mean()))
    flo.write('Minimum= {:6.3f}\n'.format(tides.h.min()))
    flo.write('Maximum= {:6.3f}\n'.format(tides.h.max()))
    flo.write('Mean   = {:6.3f}\n'.format(tides.h.mean()))
    flo.close()

    print('\n\nFinished\n')
Ejemplo n.º 23
0
    def tidal_stat(self,mag='mag',\
        args={'Minimum SNR':2,\
        'Latitude':-36.0,
        'folder out':os.getcwd(),
        }):

        '''Function to extract the tide stats from a time series
            i.e HAT,LAT,MHWS,MLWS...'''

        if hasattr(self.data,'latitude'):
            latitude=self.data.latitude
            if not self.data.latitude:
                latitude=args['Latitude']
        else:
            latitude=args['Latitude']

        time=self.data.index
        dt=(time[2]-time[1]).total_seconds()/3600 # in hours
        stime=np.array(date2num(time))
        lat=latitude
        ray=args['Minimum SNR']
        demeaned = self.data[mag].values - np.nanmean(self.data[mag].values)
        opts = dict(method='ols',conf_int='linear', Rayleigh_min=ray)
        coef = solve(stime,demeaned,lat= lat,**opts)
        m2=(coef.name=='M2').nonzero()[0][0]
        s2=(coef.name=='S2').nonzero()[0][0]
        rpd = np.pi/180
        M2 = coef['A'][m2] 
        S2 = coef['A'][s2]
        t = pd.date_range(start='2000-01-01', periods=24*365*20, freq='H')
        time = date2num(t.to_pydatetime())
        ts_recon = reconstruct(time, coef).h

        stats=np.empty((8,3),dtype="object")
        stats[0,0]='Parameter'
        stats[1,0]='HAT'
        stats[2,0]='MHWS'
        stats[3,0]='MHWN'
        stats[4,0]='MSL'
        stats[5,0]='MLWN'
        stats[6,0]='MLWS'
        stats[7,0]='LAT'

        stats[0,1]='Description'
        stats[1,1]='Highest Astronomical Tide'
        stats[2,1]='Mean High Water Springs (M2+S2)'
        stats[3,1]='Mean High Water Neaps (M2-S2)'
        stats[4,1]='Mean Sea Level'
        stats[5,1]='Mean Low Water Neaps (-M2+S2)'
        stats[6,1]='Mean Low Water Springs (-M2-S2)'
        stats[7,1]='Lowest Astronomical Tide'


        stats[0,2]='Elevation (m), relative to MSL';
        stats[1,2]='%.2f' % (max(ts_recon))
        stats[2,2]='%.2f' % (M2+S2)
        stats[3,2]='%.2f' % (M2-S2)
        stats[4,2]='%.2f' % (0)
        stats[5,2]='%.2f' % (-M2+S2)
        stats[6,2]='%.2f' % (-M2-S2)
        stats[7,2]='%.2f' % (min(ts_recon))

        if hasattr(self.data,'filename'):
            outfile=os.path.join(args['folder out'],os.path.splitext(self.data.filename)[0]+'_Concstats.xlsx')
        else:
            outfile=os.path.join(args['folder out'],'Concstats.xlsx')
        create_table(outfile,'stat',stats)
Ejemplo n.º 24
0
def day_119filt(_data, _lat):
    filt_wts = []
    data_day = {}
    # with open("filt_wts.txt", "r") as file:
    #     for line in file:
    #         if is_number(line):
    #             filt_wts.append(float(line.strip()))

    filt_wts = np.asarray(fw.weights)
    wts = np.concatenate((np.flip(filt_wts[-59:],0), filt_wts), axis=0)

    t = _data["time"]
    x = _data["sealevel"]

    # 1) Find tide prediction
    # 2) Calculate residual
    # 3) Center data on noon

    coef = solve(_data["time"].flatten(), _data["sealevel"].flatten(), lat=_lat, nodal=True, epoch="matlab")
    # freq = coef["aux"]["frq"]
    # name = coef["name"]
    tide = reconstruct(_data["time"].flatten(), coef, constit = coef.name[np.where(coef.aux.frq>=1/30)], epoch="matlab")


    xp = tide["h"] - np.mean(tide["h"])

    # residual
    xr = x - xp

    # yr_ar=[]
    # mon_ar=[]
    # day_ar=[]
    hr_ar=[]

    # # convert the Matlab epoch to Python datetime
    # for d in t:
    #     _date = datetime.fromordinal(int(d)) + timedelta(days=d%1) - timedelta(days = 366)
    #     # yr_ar.append(_date.year)
    #     # mon_ar .append( _date.month)
    #     # day_ar .append( _date.day)
    #     hr_ar .append( _date.hour)

    _date = [matlab2datetime(float(tval)) for tval in _data["time"]]
    for d in _date:
        hr_ar.append(d.hour)
    # yr = np.asarray(yr_ar)
    # mon = np.asarray(mon_ar)
    # day = np.asarray(day_ar)
    hr = np.asarray(hr_ar)
    k = np.where(hr==12)[0]
    ks = k-59
    ke = k+59

    kk = np.where(ks<1)[0]
    ks[kk] = 0

    kk = np.where(ke > len(x))[0]
    ke[kk] = len(x)-1

    nday = len(k)
    tout = t[k]
    xout = np.full(nday, np.nan)
    cout = xout.copy()

    for j in range(nday):
        xx = np.full(119, np.nan)
        ww = np.full(119, np.nan)
        cc = np.full(119, np.nan)

        xx[ks[j]-k[j]+59:ke[j]-k[j]+60] = xr[ks[j]:ke[j]+1]
        ww[ks[j]-k[j]+59:ke[j]-k[j]+60] = wts[ks[j]-k[j]+59:ke[j]-k[j]+60]
        cc[ks[j]-k[j]+59:ke[j]-k[j]+60] = _data["channel"][ks[j]:ke[j]+1]

        kg = np.where(~np.isnan(xx))[0]
        xout[j] = zero_division(sum(xx[kg]*ww[kg]), sum(ww[kg]))
        # TODO: calculate cout
        cout[j] = np.round(zero_division(sum(cc[kg]*ww[kg]), sum(ww[kg])))

        kb = np.where(np.isnan(xx))[0]

        if len(kb) > 0:
            if sum(abs(ww[kb])) > 0.25:
                xout[j] = np.nan
                cout[j] = np.nan


    data_day["time"] = tout
    data_day["residual"] = xr
    data_day["sealevel"] = xout
    data_day["channel"] = cout

    return data_day
Ejemplo n.º 25
0
    def __init__(self,
                 fn_nemo_data,
                 fn_nemo_domain,
                 fn_obs,
                 fn_out,
                 thresholds=np.arange(0, 2, 0.1),
                 constit_to_save=['M2', 'S2', 'K1', 'O1'],
                 chunks={'time_counter': 100}):

        nemo = read_nemo_ssh(fn_nemo_data, fn_nemo_domain, chunks)

        landmask = read_nemo_landmask_using_top_level(fn_nemo_domain)

        obs = read_obs_data(fn_obs)

        obs = subset_obs_by_lonlat(nemo, obs)

        nemo_extracted, obs = extract_obs_locations(nemo, obs, landmask)
        #nemo_extracted = self.read_nemo_oneatatime(fn_nemo_data, fn_nemo_domain,
        #                                           obs, landmask, chunks)

        obs = align_timings(nemo_extracted, obs)
        print('loading', flush=True)
        nemo_extracted = nemo_extracted.load()
        print('loaded', flush=True)

        # Define Dimension Sizes
        n_port = obs.dims['port']
        n_time = obs.dims['time']
        n_constit = len(constit_to_save)
        n_thresholds = len(thresholds)

        a_mod = np.zeros((n_port, n_constit)) * np.nan
        a_obs = np.zeros((n_port, n_constit)) * np.nan
        g_mod = np.zeros((n_port, n_constit)) * np.nan
        g_obs = np.zeros((n_port, n_constit)) * np.nan

        std_obs = np.zeros((n_port)) * np.nan
        std_mod = np.zeros((n_port)) * np.nan
        std_err = np.zeros((n_port)) * np.nan
        ntr_corr = np.zeros((n_port)) * np.nan
        ntr_mae = np.zeros((n_port)) * np.nan

        skew_mod = []
        skew_obs = []
        skew_err = []

        thresh_freq_ntr_mod = np.zeros((n_port, n_thresholds))
        thresh_freq_ntr_obs = np.zeros((n_port, n_thresholds))
        thresh_int_ntr_mod = np.zeros((n_port, n_thresholds))
        thresh_int_ntr_obs = np.zeros((n_port, n_thresholds))
        thresh_freq_skew_mod = np.zeros((n_port, n_thresholds))
        thresh_freq_skew_obs = np.zeros((n_port, n_thresholds))
        thresh_ntr_corr = np.zeros((n_port, n_thresholds))
        thresh_ntr_me = np.zeros((n_port, n_thresholds))
        thresh_ntr_mae = np.zeros((n_port, n_thresholds))

        ntr_mod_all = np.zeros((n_port, n_time)) * np.nan
        ntr_obs_all = np.zeros((n_port, n_time)) * np.nan

        # Loop over tide gauge locations, perform analysis per location
        for pp in range(0, n_port):
            port_mod = nemo_extracted.isel(port=pp)
            port_obs = obs.isel(port=pp)

            if all(np.isnan(port_obs.ssh)):
                skew_mod.append([])
                skew_obs.append([])
                continue

            # Masked arrays
            ssh_mod = port_mod.ssh.values
            ssh_obs = port_obs.ssh.values
            shared_mask = np.logical_or(np.isnan(ssh_mod), np.isnan(ssh_obs))
            ssh_mod[shared_mask] = np.nan
            ssh_obs[shared_mask] = np.nan
            time_mod = port_mod.time_instant.values
            time_obs = port_obs.time.values

            if np.sum(~np.isnan(ssh_obs)) < 8760:
                skew_mod.append([])
                skew_obs.append([])
                continue

            # Harmonic analysis datenums
            hat = mdates.date2num(time_mod)

            # Do harmonic analysis using UTide
            uts_obs = ut.solve(hat, ssh_obs, lat=port_obs.latitude.values)
            uts_mod = ut.solve(hat, ssh_mod, lat=port_mod.latitude.values)

            # Reconstruct tidal signal
            tide_obs = np.array(ut.reconstruct(hat, uts_obs).h)
            tide_mod = np.array(ut.reconstruct(hat, uts_mod).h)
            tide_obs[shared_mask] = np.nan
            tide_mod[shared_mask] = np.nan

            # Identify Peaks in tide and TWL

            pk_ind_tide_mod, _ = signal.find_peaks(tide_mod, distance=9)
            pk_ind_tide_obs, _ = signal.find_peaks(tide_obs, distance=9)
            pk_ind_ssh_mod, _ = signal.find_peaks(ssh_mod, distance=9)
            pk_ind_ssh_obs, _ = signal.find_peaks(ssh_obs, distance=9)

            pk_time_tide_mod = pd.to_datetime(time_mod[pk_ind_tide_mod])
            pk_time_tide_obs = pd.to_datetime(time_obs[pk_ind_tide_obs])
            pk_time_ssh_mod = pd.to_datetime(time_mod[pk_ind_ssh_mod])
            pk_time_ssh_obs = pd.to_datetime(time_obs[pk_ind_ssh_obs])

            pk_tide_mod = tide_mod[pk_ind_tide_mod]
            pk_tide_obs = tide_obs[pk_ind_tide_obs]
            pk_ssh_mod = ssh_mod[pk_ind_ssh_mod]
            pk_ssh_obs = ssh_obs[pk_ind_ssh_obs]

            # Define Skew Surges
            n_tide_mod = len(pk_tide_mod)
            n_tide_obs = len(pk_tide_obs)

            pk_ssh_mod_interp = np.zeros(n_tide_mod)
            pk_ssh_obs_interp = np.zeros(n_tide_obs)

            # Model Skew Surge
            for ii in range(0, n_tide_mod):
                time_diff = np.abs(pk_time_tide_mod[ii] - pk_time_ssh_mod)
                search_ind = np.where(time_diff < timedelta(hours=6))
                if len(search_ind[0]) > 0:
                    pk_ssh_mod_interp[ii] = np.nanmax(
                        pk_ssh_mod[search_ind[0]])
                else:
                    pk_ssh_mod_interp[ii] = np.nan

            # Observed Skew Surge
            pk_ssh_obs_interp = np.zeros(n_tide_obs)
            for ii in range(0, n_tide_obs):
                time_diff = np.abs(pk_time_tide_obs[ii] - pk_time_ssh_obs)
                search_ind = np.where(time_diff < timedelta(hours=6))
                if len(search_ind[0]) > 0:
                    pk_ssh_obs_interp[ii] = np.nanmax(pk_ssh_obs[search_ind])
                else:
                    pk_ssh_obs_interp[ii] = np.nan

            skew_mod_tmp = pk_ssh_mod_interp - pk_tide_mod
            skew_obs_tmp = pk_ssh_obs_interp - pk_tide_obs

            ds_tmp = xr.Dataset(coords=dict(time=('time', pk_time_tide_mod)),
                                data_vars=dict(ssh=('time', skew_mod_tmp)))
            ds_int = ds_tmp.interp(time=pk_time_tide_obs, method='nearest')
            skew_mod_tmp = ds_int.ssh.values

            skew_mod.append(skew_mod_tmp)
            skew_obs.append(skew_obs_tmp)
            skew_err.append(skew_mod_tmp - skew_obs_tmp)

            # TWL: Basic stats
            std_obs[pp] = np.nanstd(ssh_obs)
            std_mod[pp] = np.nanstd(ssh_mod)

            # TWL: Constituents
            a_dict_obs = dict(zip(uts_obs.name, uts_obs.A))
            a_dict_mod = dict(zip(uts_mod.name, uts_mod.A))
            g_dict_obs = dict(zip(uts_obs.name, uts_obs.g))
            g_dict_mod = dict(zip(uts_mod.name, uts_mod.g))

            for cc in range(0, len(constit_to_save)):
                if constit_to_save[cc] in uts_obs.name:
                    a_mod[pp, cc] = a_dict_mod[constit_to_save[cc]]
                    a_obs[pp, cc] = a_dict_obs[constit_to_save[cc]]
                    g_mod[pp, cc] = g_dict_mod[constit_to_save[cc]]
                    g_obs[pp, cc] = g_dict_obs[constit_to_save[cc]]

            a_mod[a_mod == 0] = np.nan
            a_mod[a_mod > 20] = np.nan
            a_obs[a_obs == 0] = np.nan
            a_obs[a_obs > 20] = np.nan

            # NTR: Calculate and get peaks
            ntr_obs = ssh_obs - tide_obs
            ntr_mod = ssh_mod - tide_mod

            #ntr_obs = signal.savgol_filter(ntr_obs,25,3)
            #ntr_mod = signal.savgol_filter(ntr_mod,25,3)

            ntr_obs = np.ma.masked_invalid(ntr_obs)
            ntr_mod = np.ma.masked_invalid(ntr_mod)

            ntr_obs_all[pp] = ntr_obs
            ntr_mod_all[pp] = ntr_mod

            pk_ind_ntr_obs, _ = signal.find_peaks(ntr_obs, distance=12)
            pk_ind_ntr_mod, _ = signal.find_peaks(ntr_mod, distance=12)

            pk_time_ntr_obs = pd.to_datetime(time_obs[pk_ind_ntr_obs])
            pk_time_ntr_mod = pd.to_datetime(time_mod[pk_ind_ntr_mod])
            pk_ntr_obs = ntr_obs[pk_ind_ntr_obs]
            pk_ntr_mod = ntr_mod[pk_ind_ntr_mod]

            # NTR: Basic stats
            ntr_corr[pp] = np.ma.corrcoef(ntr_obs, ntr_mod)[1, 0]
            ntr_mae[pp] = np.ma.mean(np.abs(ntr_obs - ntr_mod))

            # Threshold Statistics
            for nn in range(0, n_thresholds):
                threshn = thresholds[nn]
                # NTR: Threshold Frequency (Peaks)
                thresh_freq_ntr_mod[pp, nn] = np.sum(pk_ntr_mod > threshn)
                thresh_freq_ntr_obs[pp, nn] = np.sum(pk_ntr_obs > threshn)

                # NTR: Threshold integral (Time over threshold)
                thresh_int_ntr_mod[pp, nn] = np.sum(ntr_mod > threshn)
                thresh_int_ntr_obs[pp, nn] = np.sum(ntr_obs > threshn)

                # NTR: MAE and correlations above thresholds
                # ntr_over_ind = np.where( ntr_obs > threshn )[0]
                # ntr_obs_over = ntr_obs[ntr_over_ind]
                # ntr_mod_over = ntr_mod[ntr_over_ind]
                # thresh_ntr_corr[pp,nn] = np.ma.corrcoef(ntr_obs_over, ntr_mod_over)
                # thresh_ntr_mae[pp,nn] = np.ma.mean( np.abs( ntr_mod_over - ntr_obs_over ))
                # thresh_ntr_me[pp,nn] = np.ma.mean( ntr_mod_over - ntr_obs_over )

                # Skew Surge Threshold Frequency
                thresh_freq_skew_mod[pp, nn] = np.sum(skew_mod_tmp > threshn)
                thresh_freq_skew_obs[pp, nn] = np.sum(skew_obs_tmp > threshn)

        # NTR: Monthly Variability
        ds_ntr = xr.Dataset(coords=dict(time=('time', obs.time.values)),
                            data_vars=dict(ntr_mod=(['port',
                                                     'time'], ntr_mod_all),
                                           ntr_obs=(['port',
                                                     'time'], ntr_obs_all)))

        # NTR: Monthly Climatology
        ntr_grouped = ds_ntr.groupby('time.month')
        ntr_clim_var = ntr_grouped.std()
        ntr_clim_mean = ntr_grouped.mean()

        # NTR: Monthly Means
        ntr_resampled = ds_ntr.resample(time='1M')
        ntr_monthly_var = ntr_resampled.std()
        ntr_monthly_mean = ntr_resampled.mean()
        ntr_monthly_max = ntr_resampled.max()

        ### Put into Dataset and write to file

        # Figure out skew surge dimensions
        n_skew = 0
        for pp in range(0, n_port):
            if len(skew_mod[pp]) > n_skew:
                n_skew = len(skew_mod[pp])
            if len(skew_obs[pp]) > n_skew:
                n_skew = len(skew_obs[pp])

        skew_mod_np = np.zeros((n_port, n_skew)) * np.nan
        skew_obs_np = np.zeros((n_port, n_skew)) * np.nan

        for pp in range(0, n_port):
            len_mod = len(skew_mod[pp])
            len_obs = len(skew_obs[pp])
            skew_mod_np[pp, :len_mod] = skew_mod[pp]
            skew_obs_np[pp, :len_obs] = skew_obs[pp]

        stats = xr.Dataset(
            coords=dict(longitude=('port', obs.longitude.values),
                        latitude=('port', obs.latitude.values),
                        time=('time', time_obs),
                        constituent=('constituent', constit_to_save),
                        threshold=('threshold', thresholds),
                        time_month=('time_month', ntr_monthly_var.time),
                        clim_month=('clim_month', ntr_clim_var.month)),
            data_vars=dict(
                ssh_mod=(['port', 'time'], nemo_extracted.ssh.values.T),
                ssh_obs=(['port', 'time'], obs.ssh.values),
                ntr_mod=(['port', 'time'], ntr_mod_all),
                ntr_obs=(['port', 'time'], ntr_obs_all),
                amp_mod=(['port', 'constituent'], a_mod),
                amp_obs=(['port', 'constituent'], a_obs),
                pha_mod=(['port', 'constituent'], g_mod),
                pha_obs=(['port', 'constituent'], g_obs),
                amp_err=(['port', 'constituent'], a_mod - a_obs),
                pha_err=(['port', 'constituent'], compare_phase(g_mod, g_obs)),
                std_obs=(['port'], std_obs),
                std_mod=(['port'], std_mod),
                std_err=(['port'], std_mod - std_obs),
                ntr_corr=(['port'], ntr_corr),
                ntr_mae=(['port'], ntr_mae),
                skew_mod=(['port', 'tide_num'], skew_mod_np),
                skew_obs=(['port', 'tide_num'], skew_obs_np),
                skew_err=(['port', 'tide_num'], skew_mod_np - skew_obs_np),
                thresh_freq_ntr_mod=(['port',
                                      'threshold'], thresh_freq_ntr_mod),
                thresh_freq_ntr_obs=(['port',
                                      'threshold'], thresh_freq_ntr_obs),
                thresh_freq_skew_mod=(['port',
                                       'threshold'], thresh_freq_skew_mod),
                thresh_freq_skew_obs=(['port',
                                       'threshold'], thresh_freq_skew_obs),
                thresh_int_ntr_mod=(['port', 'threshold'], thresh_int_ntr_mod),
                thresh_int_ntr_obs=(['port', 'threshold'], thresh_int_ntr_obs),
                ntr_mod_clim_var=(['port', 'clim_month'],
                                  ntr_clim_var.ntr_mod.values.T),
                ntr_mod_clim_mean=(['port', 'clim_month'],
                                   ntr_clim_mean.ntr_mod.values.T),
                ntr_mod_monthly_var=(['port', 'time_month'],
                                     ntr_monthly_var.ntr_mod.values.T),
                ntr_mod_monthly_mean=(['port', 'time_month'],
                                      ntr_monthly_mean.ntr_mod.values.T),
                ntr_mod_monthly_max=(['port', 'time_month'],
                                     ntr_monthly_max.ntr_mod.values.T),
                ntr_obs_clim_var=(['port', 'clim_month'],
                                  ntr_clim_var.ntr_obs.values.T),
                ntr_obs_clim_mean=(['port', 'clim_month'],
                                   ntr_clim_mean.ntr_obs.values.T),
                ntr_obs_monthly_var=(['port', 'time_month'],
                                     ntr_monthly_var.ntr_obs.values.T),
                ntr_obs_monthly_mean=(['port', 'time_month'],
                                      ntr_monthly_mean.ntr_obs.values.T),
                ntr_obs_monthly_max=(['port', 'time_month'],
                                     ntr_monthly_max.ntr_obs.values.T)))

        write_stats_to_file(stats, fn_out)
Ejemplo n.º 26
0
td = zeros(len(dates))
for k in range(len(dates)):
    td[k] = (UTCDateTime(dates[k] + 'T' + times[k]) - t0) / 86400.

#only include times before the eq
i = where(td < 0)[0]

coef = utide.solve(td[i],
                   data[i],
                   lat=lat,
                   nodal=True,
                   trend=False,
                   method='robust',
                   conf_int='linear')

tide = utide.reconstruct(td, coef)

fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharex=True, figsize=(17, 5))
c1 = '#0080FF'
c2 = '#FF4500'
c3 = '#3CB371'

ax0.plot(td, data, lw=0.5, c=c1)
ax0.set_ylabel('Observed (m)')
ax0.grid()

ax1.plot(td, tide['h'], lw=0.5, c=c2)
ax1.set_ylabel('Modeled (m)')
ax1.grid()

ax2.plot(td, data - tide['h'], lw=0.5, c=c3)
Ejemplo n.º 27
0
f_mod_peaks = f_mod[i_mod]
a_mod_peaks = a_mod[i_mod]

nn_tg = get_nearest(f_tg_peaks, values)
nn_mod = get_nearest(f_mod_peaks, values)

f_tg_const = f_tg_peaks[nn_tg]
a_tg_const = a_tg_peaks[nn_tg]
f_mod_const = f_mod_peaks[nn_mod]
a_mod_const = a_mod_peaks[nn_mod]

# Cross Spectral Density
f_xy, pxy = scipy.signal.csd(tg_ssh, mod_ssh, fs60)

# Spectrogram
sg_tg = scipy.signal.spectrogram(tg_ssh)
sg_mod = scipy.signal.spectrogram(mod_ssh)

# Spectral Coherence
co = scipy.signal.coherence(tg_ssh, mod_ssh, fs60, nperseg=1024)

# Harmonic Analysis
hat_tg = mdates.date2num(tg_time)
hat_mod = mdates.date2num(mod_time)
uts_tg = utide.solve(hat_tg, tg_ssh, lat=53.45)
uts_mod = utide.solve(hat_mod, mod_ssh, lat=53.45)

tg_rec = utide.reconstruct(hat_tg, uts_tg).h
tg_rec[np.isnan(tg_ssh)] = np.nan

fr, ar, gr = compute_fft(tg_ssh - tg_rec, fs60)
Ejemplo n.º 28
0
def intro_bar(datadf,
              max_bin,
              dypir,
              navn='intro_bar.pdf',
              dest='LaTeX/',
              caption=None,
              max_sj=False,
              verbose=True,
              dpi=200,
              font=7,
              figwidth=6,
              figheight=7.1,
              uvdata=None,
              date=None,
              linja=False):
    '''
    Hettar vísur hvussu harður streymurin er í teimun forskelligu dýpinum

    :param datadf:      magdir data
    :param max_bin:     eitt tal sum sigur hvat tað sísta bin sum eg skal higgja eftir
    :param dypir:       ein listi sum sigur hvussu djúpt alt er
    :param navn:        navn á figurinum
    :param dest:        Path to master.tex
    :param caption:     caption á figuri
    :param max_sj:      skal eg hava eitt yvirmát av sjovarfallinum við
    :param dpi:         dpi á figurinum
    :param font:        font stødd á figurinum
    :param figwidth:    víddin á figurinum
    :param figheight:   hæddin á figurinum
    :param uvdata:      uvdata hvissi eg skal tekna maxsjovarfall inní fig
    :param date:        tíðspunktini á mátingunum hviss eg havi brúk fyri tíð
    :param linja:       skal eg hava eina svarta linju sum vísur eitt yvirmát av sjovarfall

    :return:            ein string at koyra í master.tex
    '''
    dypir = [dypir[x] for x in range(max_bin)]
    stod = [25, 50, 75, 95, 99.5]
    bars = [[] for _ in stod]
    bars_sj = [[] for _ in stod]
    for i in range(1, max_bin + 1):
        #  skriva dataði um til cm/s
        temp = [
            x for x in (datadf['mag' + str(i)].values / 10) if not np.isnan(x)
        ]
        temp = np.sort(temp)
        longd = len(temp)
        for j, brok in enumerate(stod):
            if brok > 100:
                raise ValueError('brok er ov stort')
            elif brok == 100:
                bars[j].append(temp[-1])
            elif brok <= 0:
                raise ValueError('brok er ov litið')
            else:
                bars[j].append(temp[int(brok * longd / 100)])

    if max_sj and uvdata is not None and date is not None:
        tin = np.array(date)
        for i in range(1, max_bin + 1):
            u = np.array((datadf['mag' + str(i)]/10)\
                         * np.cos(np.deg2rad(datadf['dir' + str(i)] - 90)))
            v = np.array((datadf['mag' + str(i)]/10)\
                         * np.cos(np.deg2rad(datadf['dir' + str(i)])))
            coef = utide.solve(tin, u, v, lat=62, verbose=verbose)
            tide = utide.reconstruct(tin, coef, verbose=verbose)
            temp = [np.sqrt(x**2 + y**2) for x, y in zip(tide['u'], tide['v'])]
            temp = [x for x in temp if not np.isnan(x)]
            temp = np.sort(temp)
            longd = len(temp)
            for j, brok in enumerate(stod):
                if brok > 100:
                    raise ValueError('brok er ov stort')
                elif brok == 100:
                    bars_sj[j].append(temp[-1])
                elif brok <= 0:
                    raise ValueError('brok er ov litið')
                else:
                    bars_sj[j].append(temp[int(brok * longd / 100)])

    fig, axs = plt.subplots(ncols=1,
                            nrows=1,
                            figsize=(figwidth, figheight),
                            dpi=dpi)
    mpl.rcParams['font.size'] = font

    index = np.arange(max_bin)
    plots = []
    vidd = .33
    plots.append(
        axs.bar([x - vidd / 2 for x in index],
                bars[0],
                vidd,
                label=str(stod[0]) + '%',
                edgecolor='k'))
    for i in range(1, len(stod) - 1):
        plots.append(
            axs.bar([x - vidd / 2 for x in index],
                    [x - y for x, y in zip(bars[i], bars[i - 1])],
                    vidd,
                    bottom=bars[i - 1],
                    label=str(stod[i]) + '%',
                    edgecolor='k'))
    i += 1
    plots.append(
        axs.bar([x - vidd / 2 for x in index],
                [x - y for x, y in zip(bars[i], bars[i - 1])],
                vidd,
                bottom=bars[i - 1],
                label=str(stod[i]) + '%',
                edgecolor='k',
                alpha=.5))

    plt.gca().set_prop_cycle(None)
    plots.append(
        axs.bar([x + vidd / 2 for x in index],
                bars_sj[0],
                vidd,
                edgecolor='k',
                hatch='//'))
    for i in range(1, len(stod) - 1):
        plots.append(
            axs.bar([x + vidd / 2 for x in index],
                    [x - y for x, y in zip(bars_sj[i], bars_sj[i - 1])],
                    vidd,
                    bottom=bars_sj[i - 1],
                    edgecolor='k',
                    hatch='//'))
    i += 1
    plots.append(
        axs.bar([x + vidd / 2 for x in index],
                [x - y for x, y in zip(bars_sj[i], bars_sj[i - 1])],
                vidd,
                bottom=bars_sj[i - 1],
                edgecolor='k',
                hatch='//',
                alpha=.5))
    if len(index) < 20:
        axs.xaxis.set_ticks(index)
        axs.set_xticklabels([int(-x) for x in dypir])
    else:
        axs.xaxis.set_ticks([index[x] for x in range(0, len(index), 2)])
        axs.set_xticklabels([int(-x) for x in dypir[::2]])

    axs.set_ylabel('Streymferð (cm/s)', fontsize=font)
    axs.set_xlabel('Dýpi (m)', fontsize=font)
    temp = bars[-1]
    temp = np.sort(temp)
    mymax = min(2 * temp[int(.5 * len(temp))], 1.2 * temp[-1])

    #  eg havi bara sett lat til defult lat=62
    if max_sj and uvdata is not None and date is not None and linja:
        templist, maxcand = sjovarfallmax(uvdata,
                                          date,
                                          dypir,
                                          max_bin,
                                          figut=False)
        axs.plot(index, templist, color='k', label='yvirmát fyri sjovarfallið')
        axs.set_ylim(0, max(mymax, maxcand))
    else:
        axs.set_ylim(0, mymax)

    axs.legend(ncol=int(np.ceil(len(stod) / 2)))
    axs.tick_params(axis='both', which='major', labelsize=font)
    fig.subplots_adjust(left=0.08,
                        bottom=0.1,
                        right=0.99,
                        top=0.99,
                        wspace=0.0,
                        hspace=0.2)
    fig.savefig(dest + 'myndir/%s' % navn, dpi=dpi)
    plt.close(fig)

    if caption is None:
        caption = 'Býti av streymferð niður gjøgnum dýpi. Dýpi er á x-ásini, '
        caption += 'og streymferð er á y-ásini. Hvør súla vísir býtið av '
        caption += 'streymferðini á einum ávísum dýpi. Tær óskavaðu súlurnar vísa máld virði, '
        caption += 'og tær skavaðu vísa sjóvarfalseffektina roknaða frá mátingunum. '
        caption += 'Litirnir vísa brøkpartin av mátingunum, '
        caption += 'har streymferðin er minni enn ferðin á y-ásini. '
        caption += 'T.d. vísir myndin, at á \\SI{%s}{m} dýpi er streymferðin í '\
                % str(int(-dypir[0]))
        caption += '75\\% av mátingunum minni enn '
        caption += '\\SI{%s}{mm/s}.' % str(int(bars[2][0]))

    label = '\\label{barstreym}'

    out = '\n\\FloatBarrier\n'
    out += '\\section{Býti av streymferð á ymsum dýpum}\n'
    out += '\\begin{figure}[h!]\n'
    out += '\\includegraphics[scale=1]{myndir/%s}\n' % navn
    out += '\\caption{%s}\n' % caption
    out += '%s\n' % label
    out += '\n\\end{figure}\n'
    out += '\\newpage\n'

    return out
Ejemplo n.º 29
0
def tidal_non_tidal_plot(dato,
                         direct,
                         mag,
                         figwidth=6,
                         figheight=7.1,
                         dpi=200,
                         lat=62,
                         verbose=True,
                         figname='tidal_and_nontidal.pdf',
                         dest='LaTeX/',
                         font=7):
    """
    plottar tíðar seriuna í Eystur og Norð, á einum dýpið (goymur eina mynd inni á myndir)
    :param dato: ein list like inniheldur mdate dato fyri tíðarseriuna
    :param datadf: eitt list like inniheldur mag í mm/s abs og dir
    :param fultdypid: float/int hvussu djúpt tað er
    :param Bin_Size: Bin_Size á mátingunum
    :param firstbinrange: 1st Bin Range (m) á mátingini
    :param figwidth: breiddin á figurinum
    :param figheight: hæddin á figurinum
    :param dpi: dpi á figurinum
    :param lat: breiddarstig
    :param verbose: skal utide sleppa at tosa
    :param figname: navnið á fýlini sum verður goymd
    """

    tin = np.array(dato)
    u = mag * np.sin(np.deg2rad(direct))
    v = mag * np.cos(np.deg2rad(direct))
    coef = utide.solve(tin, u, v, lat=lat, verbose=verbose, trend=True)
    #  mean verður fjerna fyri at tá vit hava tiki tingini frá hvørjum ørum
    #  so er munirin mitt í data settinum, tað er ikki heilt ratt men
    #  tað sar ratt ut tá tað skal síggja ratt út
    coef.umean = float(0)
    coef.vmean = float(0)
    reconstruckt = utide.reconstruct(tin, coef=coef, verbose=verbose)
    reconstruckt = utide.reconstruct(tin, coef=coef, verbose=verbose)

    fig, axs = plt.subplots(ncols=1,
                            nrows=2,
                            figsize=(figwidth, figheight),
                            dpi=dpi)
    mpl.rcParams['font.size'] = font

    date_fmt = mdate.DateFormatter('%d %b')
    axs[0].plot(tin, u, linewidth=.2, label='Original time series')
    axs[0].plot(tin,
                u - reconstruckt.u,
                linewidth=.2,
                label='Original time series minus prediction')
    axs[0].set_ylabel('Streymferð í eystan (mm/s)')
    axs[0].xaxis.set_major_formatter(date_fmt)
    axs[0].set_xlim([tin[0], tin[-1]])
    axs[1].plot(tin, v, linewidth=.2, label='Original time series')
    axs[1].plot(tin,
                v - reconstruckt.v,
                linewidth=.2,
                label='Original time series minus prediction')
    axs[1].xaxis.set_major_formatter(date_fmt)
    axs[1].set_ylabel('Streymferð í norðan (mm/s)')
    axs[1].set_xlim([tin[0], tin[-1]])
    mpl.rcParams['font.size'] = 7
    plt.subplots_adjust(left=0.15,
                        bottom=0.05,
                        right=0.95,
                        top=0.95,
                        wspace=0.1,
                        hspace=0.1)
    fig.savefig(dest + 'myndir/' + figname)
Ejemplo n.º 30
0
    def detide(self, gauge):
        """
        Remove tidal constituents from the observed timeseries at a given gauge. This is done using
        the Python re-implementation of the Matlab package UTide, available at
        https://github.com/wesleybowman/UTide.

        :arg gauge: string denoting the gauge of interest.
        :returns: a 3-tuple of arrays corresponding to the observation times, the de-tided timeseries
            and the original timeseries, respectively.
        """
        from pandas import date_range
        import utide
        from matplotlib.dates import date2num

        # Read data from file
        time, elev = self.extract_data(gauge)

        # Start date and time of observations (in the GMT timezone)
        start = '2011-03-11 05:46:00'

        # Observation frequency
        if gauge[0] == "8":
            freq = "5S"
        elif gauge[0] == "P" or "PG" in gauge:
            freq = "S"
        elif gauge[0] == "2":
            freq = "60S"
        else:
            raise ValueError("Gauge {:s} not recognised.".format(gauge))
        time_str = date2num(
            date_range(start=start, periods=len(time),
                       freq=freq).to_pydatetime())

        # Interpolate away any NaNs
        if np.any(np.isnan(elev)):
            self.sample_timeseries(gauge)
            elev = np.array(
                [self.gauges[gauge]["interpolator"](t) for t in time])

        # Shift to zero
        elev[:] -= elev[0]

        # Get anomaly
        anomaly = elev - elev.mean()
        assert not np.any(np.isnan(anomaly))

        # Apply de-tiding algorithm to anomaly
        verbose = self.debug and self.debug_mode == 'full'
        kwargs = {
            'method': 'ols',  # ordinary least squares
            'conf_int': 'none',  # linearised confidence intervals
            'lat': np.array([self.gauges[gauge]["lonlat"][1]]),
            'verbose': verbose,
        }
        self.print_debug(
            "INIT: Applying UTide de-tiding algorithm to gauge {:s}...".format(
                gauge))
        sol = utide.solve(time_str, anomaly, **kwargs)
        tide = utide.reconstruct(time_str, sol, verbose=verbose)

        # Subtract de-tided component
        detided = anomaly - np.array(tide.h).reshape(anomaly.shape)
        # diff = detided - elev

        return time, detided, elev
Ejemplo n.º 31
0
def test_roundtrip(conf_int):
    """Minimal conversion from simple_utide_test."""
    ts = 735604
    duration = 35

    time = np.linspace(ts, ts + duration, 842)
    tref = (time[-1] + time[0]) / 2

    const = ut_constants.const

    amp = 1.0
    phase = 53
    lat = 45.5

    freq_cpd = 24 * const.freq

    jj = 48 - 1  # Python index for M2

    arg = 2 * np.pi * (time - tref) * freq_cpd[jj] - np.deg2rad(phase)
    time_series = amp * np.cos(arg)

    opts = {
        'constit': 'auto',
        'phase': 'raw',
        'nodal': False,
        'trend': False,
        'method': 'ols',
        'conf_int': conf_int,
        'Rayleigh_min': 0.95,
    }

    speed_coef = solve(time, time_series, time_series, lat=lat, **opts)
    elev_coef = solve(time, time_series, lat=lat, **opts)

    amp_err = amp - elev_coef['A'][0]
    phase_err = phase - elev_coef['g'][0]
    ts_recon = reconstruct(time, elev_coef).h

    # pure smoke testing of reconstruct
    vel = reconstruct(time, speed_coef)
    vel = reconstruct(time, speed_coef, constit=('M2', 'S2'))
    htmp = reconstruct(time, elev_coef, constit=('M2', 'S2'))
    vel = reconstruct(time, speed_coef, min_SNR=3)
    htmp = reconstruct(time, elev_coef, min_SNR=3)
    vel = reconstruct(time, speed_coef, min_PE=10)
    htmp = reconstruct(time, elev_coef, min_PE=10)
    vel = reconstruct(time, speed_coef, min_SNR=0)
    htmp = reconstruct(time, elev_coef, min_SNR=0)
    assert isinstance(vel, Bunch)
    assert isinstance(htmp, Bunch)

    # Now the round-trip check, just for the elevation.
    err = np.sqrt(np.mean((time_series - ts_recon)**2))

    print(amp_err, phase_err, err)
    print(elev_coef['aux']['reftime'], tref)
    print(elev_coef['aux']['opt'])

    np.testing.assert_almost_equal(amp_err, 0)
    np.testing.assert_almost_equal(phase_err, 0)
    np.testing.assert_almost_equal(err, 0)
Ejemplo n.º 32
0
#days after t0 for post
td_post=zeros(len(dates_post))
for k in range(len(dates_post)):
    td_post[k]=(UTCDateTime(dates_post[k]+'T'+times_post[k])-t0)/86400.
    
#resample to regular interval
    
    
coef = utide.solve(td, data,
             lat=lat,
             nodal=True,
             trend=False,
             method='robust',
             conf_int='linear')
             
tide = utide.reconstruct(td, coef)

#Predcit for at the post event times
tide_predict=utide.reconstruct(td_post, coef)




fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharex=True, figsize=(17, 5))
c1='#0080FF'
c2='#FF8C00'

ax0.plot(td, data, c=c1)
ax0.plot(td_post, data_post, c=c2)

ax1.plot(td, tide['h'], alpha=0.5, c=c1)