Ejemplo n.º 1
0
def flux_figure(mr,flow_station_name,stage_station_name):
    fig=plt.figure(1)
    fig.clf()
    fig.set_size_inches((7,9), forward=True)
    fig,axs=plt.subplots(3,1,num=1,sharex=True)

    for src in ['obs','mod']:
        if src=='obs':
            flow_da=name_to_da(mr,flow_station_name,'flow')
            stage_da=name_to_da(mr,stage_station_name,'stage')
        else:
            flow_da=name_to_model_da(mr,flow_station_name,'flow')
            stage_da=name_to_model_da(mr,stage_station_name,'stage')

        # E=h*Q ish?

        # range of valid overlapping data:
        t0=max(flow_da.time.values.min(), stage_da.time.values.min() )
        tN=min(flow_da.time.values.max(), stage_da.time.values.max() )
        common_time=np.arange(t0,tN,np.timedelta64(15*60,'s'))

        Q_int = np.interp( utils.to_dnum(common_time),
                           utils.to_dnum(flow_da.time.values),flow_da.values)
        h_int = np.interp( utils.to_dnum(common_time),
                           utils.to_dnum(stage_da.time.values), stage_da.values)

        # just tides:
        from stompy import filters

        Q_int_bar=filters.lowpass( Q_int, cutoff=40, dt=0.25 )
        h_int_bar=filters.lowpass( h_int, cutoff=40, dt=0.25 )
        Q_tidal=Q_int - Q_int_bar
        h_tidal=h_int - h_int_bar

        Qh=filters.lowpass(Q_tidal*h_tidal, cutoff=40, dt=0.25)

        pad=np.timedelta64(40*3600,'s')
        sel=(common_time>common_time[0]+pad) & (common_time<common_time[-1]-pad)

        axs[0].plot(flow_da.time,flow_da)
        axs[0].plot(common_time[sel],Q_tidal[sel],label='Q_tidal %s'%src)
        axs[1].plot(stage_da.time,stage_da)
        axs[1].plot(common_time[sel],h_tidal[sel],label='h_tidal %s'%src)
        axs[2].plot(common_time[sel], Qh[sel],label='Qh %s'%src)

        for ax in axs:
            ax.legend()
    xxyy=axs[2].axis()
    axs[2].axis(ymax=max(xxyy[3],0))
        
    axs[0].set_title(flow_station_name)
    return fig
Ejemplo n.º 2
0
def lowpass(data):
    # padding with first/last value is different than in waq_scenario.
    # but feels better. right?
    flow_padded = np.concatenate((lp_pad * data[0], data, lp_pad * data[-1]))
    lp_flows = filters.lowpass(flow_padded, cutoff=lp_secs, dt=dt_secs)
    lp_flows = lp_flows[n_pad:-n_pad]  # trim the pad
    return lp_flows
Ejemplo n.º 3
0
 def lp(x):
     x = utils.fill_invalid(x)
     dn = utils.to_dnum(t)
     cutoff = 36 / 24.
     x_lp = filters.lowpass(x, dn, cutoff=cutoff)
     mask = (dn < dn[0] + 2 * cutoff) | (dn > dn[-1] - 2 * cutoff)
     x_lp[mask] = np.nan
     return x_lp
Ejemplo n.º 4
0
 def lp(x):
     x = utils.fill_invalid(x)
     dn = utils.to_dnum(t)
     # cutoff for low pass filtering, must be 2 * cutoff days after start or before end of datenums
     cutoff = 36 / 24.
     x_lp = filters.lowpass(x, dn, cutoff=cutoff)
     mask = (dn < dn[0] + 2 * cutoff) | (dn > dn[-1] - 2 * cutoff)
     x_lp[mask] = np.nan
     return x_lp
Ejemplo n.º 5
0
def lowpass_daily(data):
    """
    Replicate as much as possible the lowpass from lowpass_wy2013c, but
    applied to daily data.
    """
    flow_padded=np.concatenate( ( daily_pad, 
                                  data,
                                  daily_pad) )
    lp_flows=filters.lowpass(flow_padded,
                             cutoff=lp_secs,dt=86400.)
    lp_flows=lp_flows[npad:-npad] # trim the pad
    return lp_flows
Ejemplo n.º 6
0
def decay_metrics(test_ds,
                  ref_ds,
                  t_slc,
                  cell_sel,
                  lp_hours_ref=36,
                  tracer_pattern='age.*'):
    """
    test_ds: dataset with age1..agen fields, each with dimensions time,face.
    ref_ds: same, but the "correct" data.

    t_slc: subset of times to use
    cell_sel: subset of cells to use
    lp_hours_ref: lowpass cutoff in hours for the reference data.
    tracer_pattern: a regular expression for which tracers in the datasets will be considered
    """
    tracers = []
    for v in test_ds.variables:
        if re.match(tracer_pattern, v) and (v in ref_ds):
            tracers.append(v)

    score_per_tracer = []

    for tracer in tracers:
        if lp_hours_ref is not None:
            ref_tracer_full = ref_ds[tracer].isel(face=cell_sel).values
            dt_s = np.median(np.diff(ref_ds.time.values)) / np.timedelta64(
                1, 's')
            ref_tracer_lp = filters.lowpass(ref_tracer_full,
                                            cutoff=lp_hours_ref * 3600,
                                            dt=dt_s,
                                            axis=0)
            ref_tracers = xr.DataArray(ref_tracer_lp[t_slc, :],
                                       dims=['time', 'face'])
        else:
            ref_tracers = ref_ds[tracer].isel(time=t_slc, face=cell_sel)
        test_tracers = test_ds[tracer].isel(time=t_slc, face=cell_sel)
        assert np.all(np.isfinite(ref_tracers.values))
        assert np.all(np.isfinite(test_tracers.values))

        #wilmott=utils.model_skill(test_tracers.values.ravel(), ref_tracers.values.ravel() )
        #score_per_tracer.append(wilmott)
        test_vals = test_tracers.values.ravel()
        metrics = calc_metrics(test_vals, ref_tracers.values.ravel())
        metrics['nan_fraction'] = np.isnan(test_vals).sum() / float(
            len(test_vals))
        score_per_tracer.append(metrics)
    res = {}
    for k in score_per_tracer[0]:
        res[k] = np.mean([m[k] for m in score_per_tracer])
    return res
Ejemplo n.º 7
0
    def write_data(self, mdu, feature, var_name, base_fn):
        tides = noaa_coops.coops_dataset_product(self.station,
                                                 'water_level',
                                                 mdu.time_range()[1],
                                                 mdu.time_range()[2],
                                                 days_per_request='M',
                                                 cache_dir=cache_dir)
        tide = tides.isel(station=0)
        water_level = utils.fill_tidal_data(tide.water_level) + self.z_offset
        # IIR butterworth.  Nicer than FIR, with minor artifacts at ends
        # 3 hours, defaults to 4th order.
        water_level[:] = filters.lowpass(water_level[:].values,
                                         utils.to_dnum(water_level.time),
                                         cutoff=3. / 24)

        ref_date = mdu.time_range()[0]
        elapsed_minutes = (tide.time.values - ref_date) / np.timedelta64(
            60, 's')

        # just write a single node
        tim_fn = base_fn + "_0001.tim"
        data = np.c_[elapsed_minutes, water_level]
        np.savetxt(tim_fn, data)
Ejemplo n.º 8
0
def add_ocean(run_base_dir,
              rel_bc_dir,
              run_start,
              run_stop,
              ref_date,
              static_dir,
              grid,
              old_bc_fn,
              all_flows_unit=False,
              lag_seconds=0.0,
              factor=1.0):
    """
    Ocean:
    Silvia used:
        Water level data from station 46214 (apparently from Yi Chao's ROMS?)
          no spatial variation
        Maybe salinity from Yi Chao ROMS?  That's what the thesis says, but the
        actual inputs look like constant 33
    Here I'm using data from NOAA Point Reyes.
        waterlevel, water temperature from Point Reyes.
    When temperature is not available, use constant 15 degrees

    factor: a scaling factor applied to tide data to adjust amplitude around MSL.
    lag_seconds: to shift ocean boundary condition in time, a positive value 
    applying it later in time.
    """
    # get a few extra days of data to allow for transients in the low pass filter.
    pad_time = np.timedelta64(5, 'D')

    if 1:
        if 0:  # This was temporary, while NOAA had an issue with their website.
            log.warning("TEMPORARILY USING FORT POINT TIDES")
            tide_gage = "9414290"  # Fort Point
        else:
            tide_gage = "9415020"  # Pt Reyes

        if common.cache_dir is None:
            tides_raw_fn = os.path.join(run_base_dir, rel_bc_dir,
                                        'tides-%s-raw.nc' % tide_gage)
            if not os.path.exists(tides_raw_fn):
                tides = noaa_coops.coops_dataset(
                    tide_gage,
                    run_start - pad_time,
                    run_stop + pad_time, ["water_level", "water_temperature"],
                    days_per_request=30)

                tides.to_netcdf(tides_raw_fn, engine='scipy')
            else:
                tides = xr.open_dataset(tides_raw_fn)
        else:
            # rely on caching within noaa_coops
            tides = noaa_coops.coops_dataset(
                tide_gage,
                run_start - pad_time,
                run_stop + pad_time, ["water_level", "water_temperature"],
                days_per_request='M',
                cache_dir=common.cache_dir)
    # Those retain station as a dimension of length 1 - drop that dimension
    # here:
    tides = tides.isel(station=0)

    # Fort Point mean tide range is 1.248m, vs. 1.193 at Point Reyes.
    # apply rough correction to amplitude.
    # S2 phase 316.2 at Pt Reyes, 336.2 for Ft. Point.
    # 20 deg difference for a 12h tide, or 30 deg/hr, so
    # that's a lag of 40 minutes.
    # First go I got this backwards, and wound up with lags
    # at Presidio and Alameda of 4600 and 4400s.  That was
    # with lag_seconds -= 40*60.
    # Also got amplitudes 13% high at Presidio, so further correction...
    if tide_gage == "9414290":
        #
        factor *= 1.193 / 1.248 * 1.0 / 1.13
        lag_seconds += 35 * 60.

    if 1:
        # Clean that up, fabricate salinity
        water_level = utils.fill_tidal_data(tides.water_level)

        # IIR butterworth.  Nicer than FIR, with minor artifacts at ends
        # 3 hours, defaults to 4th order.
        water_level[:] = filters.lowpass(water_level[:].values,
                                         utils.to_dnum(water_level.time),
                                         cutoff=3. / 24)

        if 1:  # apply factor:
            msl = 2.152 - 1.214  # MSL(m) - NAVD88(m) for Point Reyes
            if factor != 1.0:
                log.info("Scaling tidal forcing amplitude by %.3f" % factor)
            water_level[:] = msl + factor * (water_level[:].values - msl)

        if 1:  # apply lag
            if lag_seconds != 0.0:
                # sign:  if lag_seconds is positive, then I want the result
                # for time.values[0] to come from original data at time.valules[0]-lag_seconds
                if 0:  # Why interpolate here? Just alter the timebase.
                    water_level[:] = np.interp(
                        utils.to_dnum(tides.time.values),
                        utils.to_dnum(tides.time.values) -
                        lag_seconds / 86400., tides.water_level.values)
                else:
                    # Adjust time base directly.
                    water_level.time.values[:] = water_level.time.values + np.timedelta64(
                        lag_seconds, 's')

        if 'water_temperature' not in tides:
            log.warning(
                "Water temperature was not found in NOAA data.  Will use constant 15"
            )
            water_temp = 15 + 0 * tides.water_level
            water_temp.name = 'water_temperature'
        else:
            fill_data(tides.water_temperature)
            water_temp = tides.water_temperature

        if all_flows_unit:
            print("-=-=-=- USING 35 PPT WHILE TESTING! -=-=-=-")
            salinity = 35 + 0 * water_level
        else:
            salinity = 33 + 0 * water_level
        salinity.name = 'salinity'

    if 1:  # Write it all out
        # Add a stanza to FlowFMold_bnd.ext:
        src_name = 'Sea'

        src_feat = dio.read_pli(os.path.join(static_dir,
                                             '%s.pli' % src_name))[0]

        forcing_data = [('waterlevelbnd', water_level, '_ssh'),
                        ('salinitybnd', salinity, '_salt'),
                        ('temperaturebnd', water_temp, '_temp')]

        for quant, da, suffix in forcing_data:
            with open(old_bc_fn, 'at') as fp:
                lines = [
                    "QUANTITY=%s" % quant,
                    "FILENAME=%s/%s%s.pli" % (rel_bc_dir, src_name, suffix),
                    "FILETYPE=9", "METHOD=3", "OPERAND=O", ""
                ]
                fp.write("\n".join(lines))

            feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
            dio.write_pli(
                os.path.join(run_base_dir, rel_bc_dir,
                             '%s%s.pli' % (src_name, suffix)), [feat_suffix])

            # Write the data:
            columns = ['elapsed_minutes', da.name]

            df = da.to_dataframe().reset_index()
            df['elapsed_minutes'] = (df.time.values -
                                     ref_date) / np.timedelta64(60, 's')

            if len(feat_suffix) == 3:
                node_names = feat_suffix[2]
            else:
                node_names = [""] * len(feat_suffix[1])

            for node_idx, node_name in enumerate(node_names):
                # if no node names are known, create the default name of <feature name>_0001
                if not node_name:
                    node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx)

                tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                      node_name + ".tim")
                df.to_csv(tim_fn,
                          sep=' ',
                          index=False,
                          header=False,
                          columns=columns)
Ejemplo n.º 9
0
        coastal_dt = np.median(np.diff(coastal_boundary_data.time.values))
        coastal_dt_h = coastal_dt / np.timedelta64(3600, 's')

        if 0:  # Add Coastal model zeta to waterlevel
            coastal_water_level = coastal_boundary_data.zeta.isel(boundary=ji)

            if coastal_dt_h < 12:
                # 36h cutoff with 6h ROMS data
                # Note that if the HYCOM fetch switches to finer resolution,
                # it's unclear whether we want to filter it further or not, since
                # it will be non-tidal.
                # This will have some filtfilt trash at the end, probably okay
                # at the beginning
                coastal_water_level.values[:] = filters.lowpass(
                    coastal_water_level.values,
                    cutoff=36.,
                    order=4,
                    dt=coastal_dt_h)

            # As far as I know, ROMS and HYCOM zeta are relative to MSL
            coastal_interp = np.interp(utils.to_dnum(water_level.time),
                                       utils.to_dnum(coastal_water_level.time),
                                       coastal_water_level.values)
            water_level.values += coastal_interp

        if 1:  # salinity, temperature
            if 1:  # proper spatial variation:
                salinity_3d = coastal_boundary_data.isel(boundary=ji).salt
                temperature_3d = coastal_boundary_data.isel(boundary=ji).temp
            else:  # spatially constant
                salinity_3d = coastal_boundary_data.salt.mean(dim='boundary')
Ejemplo n.º 10
0
# iterate over all station pairs
for station_pair in station_pairs:
    (name1, dat1), (name2, dat2) = station_pair
    print(name1[1], name2[1])

    # fraction of Sac River water at upstream and downstream station
    # frac1, frac2 = dat1['frac'], dat2['frac']

    # calculate age difference (d_age)
    age1, age2 = dat1['age'], dat2['age']
    d_age = age2 - age1
    d_age = d_age.dropna('time')
    # lowpass d_age
    d_age.values = filters.lowpass(d_age.values,
                                   utils.to_dnum(d_age.time),
                                   cutoff=cutoff)
    t0 = d_age.time[0]  # start time
    # remove starting spin-up time from d_age
    d_age = d_age[d_age.time > t0 + pd.to_timedelta(spin_up, 'd')]
    # calculate times to grab second station nitrate/BGC values (offset by d_age)
    t2s = [
        t + pd.to_timedelta(da, 'd')
        for da, t in zip(d_age.values, d_age.time.values)
    ]

    # dataframe for joining all observations and making correlograms, etc.
    df = pd.DataFrame(
        data={  # 'frac_1': frac1.interp(time=d_age.time).values,
            # 'frac_2': frac2.interp(time=d_age.time).values,
            'd_age': d_age.values
Ejemplo n.º 11
0
                                  end_date=period[1],
                                  products=[60, 65],
                                  cache_dir='cache')

##

##
from stompy import filters

# separate into tidal, subtidal
for ds in [decker, riovista]:
    da_fill = utils.fill_tidal_data(ds['height_gage'])
    ds['ftime'] = ('ftime', ), da_fill.time
    ds['stage_fill'] = ('ftime', ), da_fill.values
    ds['stage_lp'] = ('ftime', ), filters.lowpass(
        ds['stage_fill'].values,
        (ds.ftime.values - ds.ftime.values[0]) / np.timedelta64(1, 's'),
        cutoff=40 * 3600)
    ds['stage_hp'] = ds.stage_fill - ds.stage_lp

##
# Find the tidal lag:

lag_hp = utils.find_lag_xr(decker.stage_hp, riovista.stage_hp)

# Decker leads Rio Vista by 1738s
lag_hp_s = lag / np.timedelta64(1, 's')

# and subtidal lag is almost exactly 2 hours.  Weird.
lag_lp = utils.find_lag_xr(decker.stage_lp, riovista.stage_lp)
lag_lp_s = lag_lp / np.timedelta64(1, 's')