Beispiel #1
0
#
# There are basically two ways of narrowing down the number of files
# processed. You can use the set_time_limits() method of the MultiDBD
# class or you can use DBDPatternSelect. The latter selects files
# according to a pattern, or as a list of files. The select() method
# returns a list of files that match from and until dates, which can
# be used to create a MultiDBD instance using the filenames=[]
# keyword.

# Below this is put to the test.


# open some files, using a pattern
# Note that the default location for cache files ($HOME/.dbdreader) is
# overriden.
dbd=dbdreader.MultiDBD(pattern="../data/amadeus*.[st]bd",
                       cacheDir='../data/cac')

# print what parameters are available:
print("we the following science parameters:")
for i,p in enumerate(dbd.parameterNames['sci']):
    print("%2d: %s"%(i,p))
print("\nand engineering parameters:")
for i,p in enumerate(dbd.parameterNames['eng']):
    print("%2d: %s"%(i,p))

# get the measured depth

tm,depth=dbd.get("m_depth")

max_depth=depth.max()
print("\nmax depth found is %f m"%(max_depth))
Beispiel #2
0
    # use my fast private gsw implementation if available
    from fast_gsw import rho as gsw_rho
except ImportError:
    # or fall back on the community one, installable via pip, for example.
    import gsw

    def gsw_rho(C, T, P, lon, lat):
        SP = gsw.SP_from_C(C, T, P)
        # This particular data set was collected in the Baltic.
        SA = gsw.SA_from_SP_Baltic(SP, lon, lat)
        # in-situ density
        rho = gsw.density.rho_t_exact(SA, T, P)
        return rho


dbd = dbdreader.MultiDBD(
    pattern="/home/lucas/gliderdata/subex2016/hd/comet-2016-174-05-00?.[de]bd")

_tmp = dbd.get_sync(
    "sci_ctd41cp_timestamp",
    "sci_water_pressure m_pitch m_ballast_pumped sci_water_cond sci_water_temp"
    .split())
_, tctd, P, pitch, buoyancy_drive, C, T = _tmp.compress(_tmp[1] > 0, axis=1)

tilt_correction_factor = 0.86
pitch_offset = -0.004,
pitch = pitch * tilt_correction_factor + pitch_offset
density = gsw_rho(C * 10, T, P * 10, 15, 54)
U_adcp = np.interp(tctd, *np.load("../data/U_kalman_datasetIII.npy"))
U_adcp[np.where(np.logical_or(P * 10 < 12, pitch > 0))[0]] = np.nan
np.save("../data/gliderflight_data.npy",
        (tctd, P, pitch, buoyancy_drive, density, U_adcp))
GM.run(dt=1.,
       ndtCPU=4,
       maxSimulationTime=0.13,
       ndtNetCDF=1,
       end_on_surfacing=1)
#GM.save()
d = GM.get("m_depth")
tm = GM.get("m_present_time")
b = GM.get("m_ballast_pumped")
p = GM.get("m_pitch")
pc = GM.get("c_pitch")
bp = GM.get("m_battpos")
bpc = GM.get("c_battpos")
dbd = dbdreader.MultiDBD(
    pattern="/home/lucas/gliderdata/toulon_201504/hd/comet-2015-098-03-000.?bd"
)

md = dbd.get("m_depth")
mb = dbd.get("m_de_oil_vol")
mp = dbd.get("m_pitch")
cbp = dbd.get("c_battpos")
mbp = dbd.get("m_battpos")

import pylab as pl
import publication

publication.setup()
tm -= 0
t0 = md[0][0]
f, ax = pl.subplots(3, 1, sharex=True, figsize=(10, 7.3))
Beispiel #4
0
    def read_gliderdata(self, lat, lon):
        path = os.path.join(self.gliders_directory, self.glider_name,
                            'from-glider', '%s*.[st]bd' % (self.glider_name))
        dbd = dbdreader.MultiDBD(pattern=path)
        if self.glider_name == 'sim':
            print("Warning: assuming simulator. I am making up CTD data!")
            t, P = dbd.get("m_depth")
            P /= 10
            C = np.ones_like(P) * 4
            T = np.ones_like(P) * 15
        else:
            tmp = dbd.get_sync("sci_water_cond",
                               "sci_water_temp sci_water_pressure".split())
            t_last = tmp[0][-1]
            age = t_last - tmp[0]
            t, C, T, P = tmp.compress(np.logical_and(tmp[1] > 0,
                                                     age < self.AGE * 3600),
                                      axis=1)
            try:
                _, u, v = dbd.get_sync("m_water_vx", ["m_water_vy"])
            except dbdreader.DbdError:
                try:
                    _, u, v = dbd.get_sync("m_final_water_vx",
                                           ["m_final_water_vy"])
                except dbdreader.DbdError:
                    u = np.array([0])
                    v = np.array([0])

            u, v = np.compress(np.logical_and(
                np.abs(u) < 1.5,
                np.abs(v) < 1.5), [u, v],
                               axis=1)
        rho = fast_gsw.rho(C * 10, T, P * 10, lon, lat)
        SA = fast_gsw.SA(C * 10, T, P * 10, lon, lat)
        # compute the age of each measurement, and the resulting weight.
        dt = t.max() - t
        weights = np.exp(-dt / (self.AGE * 3600))
        # make binned averages
        max_depth = P.max() * 10
        dz = 5
        zi = np.arange(dz / 2, max_depth + dz / 2, dz)
        bins = np.arange(0, max_depth + dz, dz)
        bins[0] = -10
        idx = np.digitize(P * 10, bins) - 1
        rho_avg = np.zeros_like(zi, float)
        SA_avg = np.zeros_like(zi, float)
        T_avg = np.zeros_like(zi, float)
        weights_sum = np.zeros_like(zi, float)
        for _idx, _w, _rho, _SA, _T in zip(idx, weights, rho, SA, T):
            try:
                rho_avg[_idx] += _rho * _w
                SA_avg[_idx] += _SA * _w
                T_avg[_idx] += _T * _w
                weights_sum[_idx] += _w
            except IndexError:
                continue
        # if data are sparse, it can be that ther are gaps
        j = np.unique(idx)
        zj = zi[j]
        rho_avg = rho_avg[j] / weights_sum[j]
        SA_avg = SA_avg[j] / weights_sum[j]
        T_avg = T_avg[j] / weights_sum[j]
        self.rho_fun = interp1d(zj,
                                rho_avg,
                                bounds_error=False,
                                fill_value=(rho_avg[0], rho_avg[-1]))
        self.SA_fun = interp1d(zj,
                               SA_avg,
                               bounds_error=False,
                               fill_value=(SA_avg[0], SA_avg[-1]))
        self.T_fun = interp1d(zj,
                              T_avg,
                              bounds_error=False,
                              fill_value=(T_avg[0], T_avg[-1]))

        if self.u_fun is None:  # not intialised yet, use last water current estimate available.
            self.u_fun = lambda x: u[-1]
            self.v_fun = lambda x: v[-1]
 def test_get_ctd_sync(self):
     print("get_ctd_sync")
     pattern="../data/amadeus-2014-*.[de]bd"
     dbd=dbdreader.MultiDBD(pattern = pattern, cacheDir='../data/cac')
     dbd.close()
     tctd, C, T, P, depth = dbd.get_CTD_sync("m_depth")
 def test_non_standard_cache_dir(self):
     print("non_standard_cache_dir")
     dbd=dbdreader.MultiDBD(pattern = self.pattern, cacheDir='../data/cac')
     dbd.close()
     depth = dbd.get("m_depth")
     self.assertEqual(len(depth), 2)
 def test_get_sync(self):
     print("get_sync")
     dbd=dbdreader.MultiDBD(pattern=self.pattern)
     dbd.close()
     t, d, lat, lon = dbd.get_sync("m_depth",'m_lat','m_lon')
Beispiel #8
0
# needs to be read, and not every possible variable to find start and
# end times of each file.
#
# There are basically two ways of narrowing down the number of files
# processed. You can use the set_time_limits() method of the MultiDBD
# class or you can use DBDPatternSelect. The latter selects files
# according to a pattern, or as a list of files. The select() method
# returns a list of files that match from and until dates, which can
# be used to create a MultiDBD instance using the filenames=[]
# keyword.

# Below this is put to the test.

# open some files, using a pattern

dbd = dbdreader.MultiDBD(pattern="../data/amadeus*.[st]bd")

# print what parameters are available:
print("we the following science parameters:")
for i, p in enumerate(dbd.parameterNames['sci']):
    print("%2d: %s" % (i, p))
print("\n and engineering paramters:")
for i, p in enumerate(dbd.parameterNames['eng']):
    print("%2d: %s" % (i, p))

# get the measured depth

tm, depth = dbd.get("m_depth")

max_depth = depth.max()
print("\nmax depth found is %f m" % (max_depth))