Example #1
0
def graph(site_name,field):
    default_interval=datetime.timedelta(hours=96)
    site=Site.by_name(site_name)
    last=site.last_reading()

    # stop=datetime.datetime.utcnow()
    stop=last.timestamp[0].to_pydatetime()
    start=stop - default_interval

    kwargs=dict(data_start_time=utils.to_unix(start),
                data_stop_time=utils.to_unix(stop))

    kwargs['last_json']=last.iloc[0].to_json()
    kwargs['site_name']=site_name
    kwargs['field']=field
    # really ought to get this from the database
    if field.startswith('temp1'):
        unit='°F'
    elif field.startswith('press'):
        unit='Pa'
    elif field.startswith('pm'):
        unit='&mu;g/m<sup>3</sup>'
    else:
        unit="n/a"
        
    kwargs['unit']=unit

    kwargs['columns']=[ col for col in last.columns if col!='timestamp']

    return render_template('dygraph2.html',**kwargs)
Example #2
0
 def scan_ncs(self):
     self.nc_catalog=np.zeros( len(self.ncs),
                               dtype=[('start_t','f8'),
                                      ('end_t','f8')] )
     for nc_i,nc in enumerate(self.ncs):
         self.nc_catalog['start_t'][nc_i] = utils.to_unix(nc.time[0])
         self.nc_catalog['end_t'][nc_i]   = utils.to_unix(nc.time[-1])
Example #3
0
def downsample(raw,period_secs):
    """
    raw: pandas DataFrame, with a datetime64 time column
    """
    timestamp=utils.to_unix(raw.time.values)
    periods=timestamp//period_secs
    
    #agg=raw.groupby(periods).agg( [np.min,np.mean,np.max] )
    agg=raw.groupby(periods).agg( [np.mean] )

    # drop multiindex by compounding the names
    new_cols=[]
    for col in agg.columns:
        if col[1]=='mean':
            new_col=col[0]
        else:
            new_col='_'.join(col)
        new_cols.append(new_col)

    agg.columns=new_cols

    unix_time=agg.index.values*period_secs

    agg['time']=np.datetime64("1970-01-01 00:00")+np.timedelta64(1,'s')*unix_time

    # reorder to get time first
    new_cols=['time'] + list(agg.columns[:-1])
    agg=agg[new_cols]
    return agg
Example #4
0
def graph2():
    default_interval=datetime.timedelta(hours=96)
    site_name='rockridge'
    site=Site.by_name(site_name)
    last=site.last_reading()

    # stop=datetime.datetime.utcnow()
    stop=last.timestamp[0].to_pydatetime()
    start=stop - default_interval

    kwargs=dict(data_start_time=utils.to_unix(start),
                data_stop_time=utils.to_unix(stop))

    kwargs['last_json']=last.iloc[0].to_json()
    kwargs['site_name']=site_name
    return render_template('dygraph2.html',**kwargs)
Example #5
0
def parse_txts(txt_fns,
               pressure_range=[110e3, 225e3],
               name=None,
               beacon='auto',
               split_on_clock_change=True):
    """
    Parse a collection of txt files, grab detections and optionally
    clock resync events.
    beacon: 'auto' set beacon tag id from most commonly received tag id.
       None: don't set a beacon tag id
       else: use the provided value as the beacon id.
    """
    txt_fns = list(txt_fns)
    txt_fns.sort()
    dfs = []
    for fn in utils.progress(txt_fns):
        df = parse_txt(fn)
        df['fn'] = fn
        dfs.append(df)
    df = pd.concat(dfs, sort=True)  # sort columns to get alignment
    df = df.reset_index()  # will use this for slicing later

    n_unknown = (df['type'] == 'unknown').sum()
    if n_unknown > 0:
        # So far, this is just corrupt lines.  Might be able to
        # salvage the second part of corrupt lines, but it's
        # such a small number, and the second part is usually
        # just a NODE bootup msg anyway.
        log.warning("%d unknown line types in txt files" % n_unknown)

    # Do we need epoch? yes, it's used downstream
    epoch = utils.to_unix(df['time'].values)
    # It will get usec added, so be sure it's just the integer portion.
    assert np.all((epoch % 1.0)[np.isfinite(epoch)] == 0)
    df['epoch'] = epoch

    # Add microseconds to timestamps when t_usec is available
    sel = np.isfinite(df.t_usec.values)
    df.loc[sel, 'time'] = df.loc[
        sel, 'time'] + (df.loc[sel, 't_usec'].values * 1e6).astype(
            np.int32) * np.timedelta64(1, 'us')

    df_det = df[df['type'] == 'DET']

    # clean up time:
    bad_time = (df_det.time < np.datetime64('2018-01-01')) | (
        df_det.time > np.datetime64('2022-01-01'))
    df2 = df_det[~bad_time].copy()

    # clean up temperature:
    df2.loc[df2.temp < -5, 'temp'] = np.nan
    df2.loc[df2.temp > 35, 'temp'] = np.nan

    # clean up pressure
    if pressure_range is not None:
        df2.loc[df2.pressure < pressure_range[0], 'pressure'] = np.nan
        df2.loc[df2.pressure > pressure_range[1], 'pressure'] = np.nan

    # trim to first/last valid pressure
    valid_idx = np.nonzero(np.isfinite(df2.pressure.values))[0]
    df3 = df2.iloc[valid_idx[0]:valid_idx[-1] + 1, :].copy()

    df3['tag'] = [s.strip() for s in df3.tag.values]

    # narrow that to the fields we actually care about:
    fields = [
        'rx_serial', 'tag', 'time', 't_usec', 'epoch', 'corrQ', 'nbwQ',
        'pressure', 'temp', 'datetime_str', 'fn'
    ]

    ds = xr.Dataset.from_dataframe(df3.loc[:, fields])
    ds['usec'] = ds['t_usec']

    ds['cf2_filename'] = None

    if beacon == 'auto':
        beacon_id = df3.groupby('tag').size().sort_values().index[-1]
        ds['beacon_id'] = beacon_id
        ds['beacon_id'].attrs['source'] = 'received tags'
        print("auto_beacon: beacon_id inferred as ", beacon_id)
        # import pdb
        # pdb.set_trace()
    elif beacon is not None:
        ds['beacon_id'] = beacon
        ds['beacon_id'].attrs['source'] = 'Specified to parse_txts'

    ds.attrs['pressure_range'] = pressure_range

    if name is not None:
        ds['name'] = (), name

    if split_on_clock_change:
        # dice_by_clock_resets got crazy complicated.  Rather than
        # re-living that experience, try something simple here,
        # but know that we may have to come back and refactor this
        # with dice_by_clock_resets.
        sync_sel = df['sync_status'].values == 1.0
        sync_idxs = df.index.values[
            sync_sel]  # of course these won't actually be in df3!

        nonmono_sel = np.diff(df3.time.values) < np.timedelta64(0)
        # Should  mean that each item is the first index of a new chunk.
        nonmono_idxs = df3.index.values[1:][nonmono_sel]

        all_breaks = np.unique(np.concatenate((sync_idxs, nonmono_idxs)))
        all_breaks = np.r_[0, all_breaks, len(df)]

        # as indices into ds.index
        ds_breaks = np.searchsorted(ds.index.values, all_breaks)

        diced = []
        for start, stop in zip(ds_breaks[:-1], ds_breaks[1:]):
            if stop > start:
                diced.append(ds.isel(index=slice(start, stop)))
            else:
                # often will have a slice that's empty.
                pass
        return diced
    else:
        return ds
Example #6
0
    def write_inlet_morpho(self):
        apply_to_nodes=True # patched schism -- applies dumping as dz at nodes when index is negative
        
        ds=self.prep_qcm_data()

        # Too lazy to push this into shapefile
        morph_poly=wkt.loads(self.morph_wkt)

        # Update dumping every 15 min
        dump_dt_s=900
        sim_seconds=(self.run_stop-self.run_start)/np.timedelta64(1,'s')
        # start at t=0 but we won't output dumping at t=0
        dump_t=np.arange(0,sim_seconds+dump_dt_s,dump_dt_s)

        # target elevation
        z_thalweg=np.interp( utils.to_unix(self.run_start)+dump_t,
                             utils.to_unix(ds.time.values),ds['z_thalweg'].values)

        if apply_to_nodes:
            self.morph_nodes=self.grid.select_nodes_intersecting(morph_poly,as_type='indices')
            #nodes=np.unique( np.concatenate([self.grid.cell_to_nodes(c) for c in elts]))
            nodes=self.morph_nodes
            area=np.ones(len(nodes)) # direct elevation, so no area adjustment
            elts1=-(nodes+1) # negate and to 1-based
            z_bed=self.grid.nodes['node_z_bed'][nodes]
        else:
            self.morph_elts=self.grid.select_cells_intersecting(morph_poly,as_type='indices') 
            elts=self.morph_elts
            area=self.grid.cells_area()[elts]
            # A little tricky to figure out ground elevation -- bathy is at nodes.
            # take average to estimate element bathy
            z_cells=self.grid.interp_node_to_cell(self.grid.nodes['node_z_bed'])
            z_bed=z_cells[elts]
            elts1=1+elts # 1-based
            
        # For now, the same z_target for all elements
        z_target=np.zeros( (len(dump_t),len(elts1)), np.float64)
        z_target[:,:]=np.maximum( z_thalweg[:,None], # broadcast the same target over all elements
                                  z_bed[None,:] ) # broadcast bathy over all time
        z_target=np.round_(z_target, 3) # Don't care about sub-mm variation. just makes the files bigger.

        z_target[0,:]=z_bed # initial condition

        dz_target=np.diff(z_target,axis=0)

        with open(os.path.join(self.run_dir,'sed_dump.in'),'wt') as fp:
            fp.write("from QCM, update elevation every %s s\n"%dump_dt_s)
            # Possible bug in sediment.F90 when the simulation starts on or before
            # the time of the first dump record.
            # Here skip first dump_t, which is the initial condition, such that
            # the dumping at time t reflects dz calculated over the preceding time
            # step of dump_dt_s
            for ti,t in enumerate(dump_t[1:]):
                dzs=dz_target[ti,:]
                active=(dzs!=0.0)
                if not np.any(active):
                    continue
                active_elts1=elts1[active]
                fp.write('%g %d\n'%(t,len(active_elts1))) # t_dump, ne_dump
                dVs=(dzs*area)[active]

                for elt1,dV in zip(active_elts1,dVs):
                    # read(18,*)(ie_dump(l),vol_dump(l),l=1,ne_dump)
                    # to 1-based
                    fp.write('%d %.4f\n'%(elt1,dV))
    # Don't get too big for our britches, just stick a second node 50m east
    # if the incoming data is a point
    if 1: #-- Write a PLI file
        pnts=np.array( [[stn_ds.utm_x,stn_ds.utm_y],
                        [stn_ds.utm_x + 50.0,stn_ds.utm_y]] )
        pli_data=[ (src_name,pnts) ]
        pli_fn=opj(out_dir,"%s.pli"%src_name)

        dio.write_pli(pli_fn,pli_data)
        pli_files.append(pli_fn)

    if 1: #-- Write a BC file

        df=stn_ds.to_dataframe().reset_index()

        df['unix_time']=utils.to_unix(df.time.values)
        
        bc_fn=opj(out_dir,"%s.bc"%src_name)
        with open(bc_fn,'wt') as fp:
            # I think the 0001 has to be there, as it is used to
            # specify different values at different nodes of the pli
            # seems better to assume that incoming data is a daily average,
            # and keep it constant across the day
            # block-from means that the timestamp of a datum marks the beginning
            # of a period, and the value is held constant until the next timestamp
            # how about unix epoch for time units?
            fp.write("[forcing]\n")
            # This Name needs to match the name in the pli
            fp.write("Name               = %s_0001\n"%src_name)
            fp.write("Function           = timeseries\n")
            fp.write("Time-interpolation = block-from\n") # or linear, block-to 
Example #8
0
 def set_current_nc(self, nc_i):
     self.current_nc_idx = nc_i
     self.current_nc = self.ncs[self.current_nc_idx]
     self.nc_t_unix = utils.to_unix(self.current_nc.time.values)
     self.nc_time_i = -999
Example #9
0
def df_post(df):
    """ add cms, and unix time, in place.
    """
    df['flow_cms'] = 0.028316847 * df.flow_cfs
    df['unix_time'] = utils.to_unix(df.date.values)
Example #10
0
ds2=ds.rename({'ucxa':'cell_east_velocity',
               'ucya':'cell_north_velocity'})

##

ptm_fwd=Streamlines(ds2,grid=g)
ptm_rev=Streamlines(ds2,grid=g,reverse=True)

##

# debugging a crash
parts=xyz_input[::30,:2][518:519]
ptm=Streamlines(ds2,grid=g,record_dense=True)
ptm.add_particles(x=parts)
t0=np.datetime64("2000-01-01 00:00")
t_out=utils.to_unix(t0+np.timedelta64(10,'s')*np.arange(31))
ptm.set_time(t_out[0])
ptm.integrate(t_out)

# 946685100.0
locs=np.array( [coord for coord,time in ptm.dense] )

##

plt.figure(1).clf()
ax=plt.gca()
ptm.g.plot_edges(clip=clip,color='k',lw=0.4,ax=ax)

ax.plot(locs[:,0,0],locs[:,0,1],'g-o')

##
Example #11
0
tvalid = utils.fill_invalid(times, axis=-1)
tvalid = utils.fill_invalid(tvalid, axis=1)
tvalid = utils.fill_invalid(tvalid, axis=0)
times = tvalid

### Load in his files, define variables, adjust times to same reference time as USGS data

temp = True

his = nc.MFDataset(path + hisfile)
xcoor = his.variables["station_x_coordinate"][:]
ycoor = his.variables["station_y_coordinate"][:]

# get reference time from the mdu:
t_ref, t_start, t_stop = mdu.time_range()
dref = utils.to_unix(t_ref)  # dt.datetime.strptime(emdu['time','refdate']

dtz = -8 * 60 * 60  # adjust UTC to PST
mtimes = his.variables["time"][:] + dref + dtz
msalt = his.variables["salinity"][:]
if temp == True:
    mtemp = his.variables["temperature"][:]
mdepth = his.variables["zcoordinate_c"][:]

if xcoor.ndim == 2:
    # some DFM runs add time to coordinates
    xcoor = xcoor[0, :]
    ycoor = ycoor[0, :]

mll = utm_to_ll(np.c_[xcoor, ycoor]).T
ax_temp.legend(loc='upper left')

fig.tight_layout()

fig.savefig(os.path.join(fig_dir, 'timeseries-arrival_flow_turb.png'))

##

# And presence, swimming spatially -- look at screen_track_with_hydro
# all_segs=

# Seem to have lost my turbidity plots...

# Add turbidity, flow, doy to df_start
df_start['turb'] = np.interp(df_start.t_mid,
                             utils.to_unix(msd_turb.time.values),
                             msd_turb['turb_lp'].values)
df_start['flow_m3s'] = np.interp(df_start.t_mid,
                                 utils.to_unix(msd_flow.time.values),
                                 msd_flow.flow_m3s)
df_start['velo_ms'] = np.interp(df_start.t_mid,
                                utils.to_unix(msd_velo.time.values),
                                msd_velo.velocity_ms)

df_start['time_mid'] = utils.unix_to_dt64(df_start.t_mid)
df_start['doy'] = df_start.time_mid.dt.dayofyear

# For comparison below, get a velocity calculated (roughly) for the analysis polygon
# This is similar to velo_ms, but has a bit more range
import transport_analysis as ta
reach_velo = ta.calc_reach_velo(df_start.time_mid.values)
Example #13
0
nc_tri['depth']=('face',),new_depth
nc_tri['surface']=('face',),ds2['s1'].values[mixed_to_tri]
# kludge - hardwire the sign of edgedepth, positive down
nc_tri['edgedepth']=('edge',),-(ds2['NetNode_z'].values[ gtri.edges['nodes'] ].mean(axis=1))

##

ptm=StreamlinesKGS(nc_tri,grid=gtri)
# even that init is slow
# probably a lot of this would be faster by using DFM's unorm.

parts=xyz_input[::100,:2]
ptm.add_particles(x=parts)

t0=np.datetime64("2000-01-01 00:00")
t_out=utils.to_unix(t0+np.timedelta64(30,'s')*np.arange(5))
ptm.set_time(t_out[0])

##
ptm.integrate(t_out)


##

# 946685100.0
locs=np.array( [coord for coord,time in ptm.output] )

##

plt.figure(1).clf()
ax=plt.gca()