示例#1
0
def spike(x):
    """ Spike
    """
    y = ma.masked_all_like(x)
    y[1:-1] = np.abs(x[1:-1] - (x[:-2] + x[2:])/2.0) - \
                np.abs((x[2:] - x[:-2])/2.0)
    return y
示例#2
0
def cum_rate_of_change(data, v, memory):

    output = ma.masked_all_like(data[v])
    output[1:] = ma.absolute(ma.diff(data[v]))

    for i in range(2, output.size):
        if output[i] < output[i-1]:
            output[i] = (1 - memory) * output[i] + memory * output[i-1]

    return output
示例#3
0
def cum_rate_of_change(data, v, memory):

    output = ma.masked_all_like(data[v])
    output[1:] = ma.absolute(ma.diff(data[v]))

    for i in range(2, output.size):
        if output[i] < output[i - 1]:
            output[i] = (1 - memory) * output[i] + memory * output[i - 1]

    return output
示例#4
0
def rate_of_change(data, v, cfg):

    RoC = ma.masked_all_like(data[v])
    RoC[1:] = ma.absolute(ma.diff(data[v]))

    flag = np.zeros(data[v].shape, dtype='i1')
    flag[np.nonzero(RoC <= cfg)] = 1
    flag[np.nonzero(RoC > cfg)] = 4
    flag[ma.getmaskarray(data[v])] = 9

    return flag, RoC
示例#5
0
def gradient(x):
    """ Gradient QC

        This is different the mathematical gradient:
        d/dx + d/dy + d/dz,
        but as defined by GTSPP, EuroGOOS and others.
    """
    y = ma.masked_all_like(x)
    y[1:-1] = np.abs(x[1:-1] - (x[:-2] + x[2:])/2.0)

    return y
示例#6
0
def gradient(x):
    """ Gradient QC

        This is different the mathematical gradient:
        d/dx + d/dy + d/dz,
        but as defined by GTSPP, EuroGOOS and others.
    """
    y = ma.masked_all_like(x)
    y[1:-1] = np.abs(x[1:-1] - (x[:-2] + x[2:]) / 2.0)

    return y
示例#7
0
def mld(SA, CT, p, criterion='pdvar'):
    """
    Compute the mixed layer depth.

    Parameters
    ----------
    SA : array_like
         Absolute Salinity  [g/kg]
    CT : array_like
         Conservative Temperature [:math:`^\circ` C (ITS-90)]
    p : array_like
        sea pressure [dbar]
    criterion : str, optional
               MLD Criteria

    Mixed layer depth criteria are:

    'temperature' : Computed based on constant temperature difference
    criterion, CT(0) - T[mld] = 0.5 degree C.

    'density' : computed based on the constant potential density difference
    criterion, pd[0] - pd[mld] = 0.125 in sigma units.

    `pdvar` : computed based on variable potential density criterion
    pd[0] - pd[mld] = var(T[0], S[0]), where var is a variable potential
    density difference which corresponds to constant temperature difference of
    0.5 degree C.

    Returns
    -------
    MLD : array_like
          Mixed layer depth
    idx_mld : bool array
              Boolean array in the shape of p with MLD index.


    Examples
    --------
    >>> import os
    >>> import gsw
    >>> import matplotlib.pyplot as plt
    >>> from oceans import mld
    >>> from gsw.utilities import Bunch
    >>> # Read data file with check value profiles
    >>> datadir = os.path.join(os.path.dirname(gsw.utilities.__file__), 'data')
    >>> cv = Bunch(np.load(os.path.join(datadir, 'gsw_cv_v3_0.npz')))
    >>> SA, CT, p = (cv.SA_chck_cast[:, 0], cv.CT_chck_cast[:, 0],
    ...              cv.p_chck_cast[:, 0])
    >>> fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, sharey=True)
    >>> l0 = ax0.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='temperature')
    >>> l1 = ax0.plot(CT[idx], -p[idx], 'ro')
    >>> l2 = ax1.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='density')
    >>> l3 = ax1.plot(CT[idx], -p[idx], 'ro')
    >>> l4 = ax2.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='pdvar')
    >>> l5 = ax2.plot(CT[idx], -p[idx], 'ro')
    >>> _ = ax2.set_ylim(-500, 0)

    References
    ----------
    .. [1] Monterey, G., and S. Levitus, 1997: Seasonal variability of mixed
    layer depth for the World Ocean. NOAA Atlas, NESDIS 14, 100 pp.
    Washington, D.C.

    """

    SA, CT, p = list(map(np.asanyarray, (SA, CT, p)))
    SA, CT, p = np.broadcast_arrays(SA, CT, p)
    SA, CT, p = list(map(ma.masked_invalid, (SA, CT, p)))

    p_min, idx = p.min(), p.argmin()

    sigma = gsw.rho(SA, CT, p_min) - 1000.

    # Temperature and Salinity at the surface,
    T0, S0, Sig0 = CT[idx], SA[idx], sigma[idx]

    # NOTE: The temperature difference criterion for MLD
    Tdiff = T0 - 0.5  # 0.8 on the matlab original

    if criterion == 'temperature':
        idx_mld = (CT > Tdiff)
    elif criterion == 'pdvar':
        pdvar_diff = gsw.rho(S0, Tdiff, p_min) - 1000.
        idx_mld = (sigma <= pdvar_diff)
    elif criterion == 'density':
        sig_diff = Sig0 + 0.125
        idx_mld = (sigma <= sig_diff)
    else:
        raise NameError("Unknown criteria %s" % criterion)

    MLD = ma.masked_all_like(p)
    MLD[idx_mld] = p[idx_mld]

    return MLD.max(axis=0), idx_mld
示例#8
0
文件: qc.py 项目: castelao/CoTeDe
    def evaluate(self, v, cfg):

        self.flags[v] = {}

        # Apply common flag for all points.
        if 'common' in self.flags:
            N = self.input[v].shape
            for f in self.flags['common']:
                self.flags[v][f] = self.flags['common'][f] * \
                        np.ones(N, dtype='i1')

        if self.saveauxiliary:
            if v not in self.auxiliary.keys():
                self.auxiliary[v] = {}

        if 'platform_identification' in cfg:
            logging.warn("Sorry I'm not ready to evaluate platform_identification()")

        if 'valid_geolocation' in cfg:
            logging.warn("Sorry I'm not ready to evaluate valid_geolocation()")

        if 'valid_speed' in cfg:
            # Think about. ARGO also has a test  valid_speed, but that is
            #   in respect to sucessive profiles. How is the best way to
            #   distinguish them here?
            try:
                if self.saveauxiliary:
                    self.flags[v]['valid_speed'], \
                            self.auxiliary[v]['valid_speed'] = \
                            possible_speed(self.input, cfg['valid_speed'])
            except:
                print("Fail on valid_speed")

        if 'global_range' in cfg:
            self.flags[v]['global_range'] = global_range(
                    self.input, v, cfg['global_range'])

        if 'regional_range' in cfg:
            logging.warn("Sorry, I'm no ready to evaluate regional_range()")

        if 'pressure_increasing' in cfg:
            logging.warn("Sorry, I'm no ready to evaluate pressure_increasing()")

        if 'profile_envelop' in cfg:
            self.flags[v]['profile_envelop'] = profile_envelop(
                    self.input, cfg['profile_envelop'], v)

        if 'gradient' in cfg:
            y = Gradient(self.input, v, cfg['gradient'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['gradient'] = y.features['gradient']

            self.flags[v]['gradient'] = y.flags['gradient']

        if 'gradient_depthconditional' in cfg:
            cfg_tmp = cfg['gradient_depthconditional']
            g = gradient(self.input[v])
            flag = np.zeros(g.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9
            # ---- Shallow zone -----------------
            threshold = cfg_tmp['shallow_max']
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (g > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (g <= threshold))] \
                    = 1
            # ---- Deep zone --------------------
            threshold = cfg_tmp['deep_max']
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (g > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (g <= threshold))] \
                    = 1

            self.flags[v]['gradient_depthconditional'] = flag

        if 'spike' in cfg:
            y = Spike(self.input, v, cfg['spike'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['spike'] = y.features['spike']

            self.flags[v]['spike'] = y.flags['spike']

        if 'spike_depthconditional' in cfg:
            cfg_tmp = cfg['spike_depthconditional']
            s = spike(self.input[v])
            flag = np.zeros(s.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9
            # ---- Shallow zone -----------------
            threshold = cfg_tmp['shallow_max']
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (s > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (s <= threshold))] \
                    = 1
            # ---- Deep zone --------------------
            threshold = cfg_tmp['deep_max']
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (s > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (s <= threshold))] \
                    = 1

            self.flags[v]['spike_depthconditional'] = flag

        if 'stuck_value' in cfg:
            logging.warn("Sorry I'm not ready to evaluate stuck_value()")

        if 'grey_list' in cfg:
            logging.warn("Sorry I'm not ready to evaluate grey_list()")

        if 'gross_sensor_drift' in cfg:
            logging.warn("Sorry I'm not ready to evaluate gross_sensor_drift()")

        if 'frozen_profile' in cfg:
            logging.warn("Sorry I'm not ready to evaluate frozen_profile()")

        if 'deepest_pressure' in cfg:
            logging.warn("Sorry I'm not ready to evaluate deepest_pressure()")

        if 'tukey53H_norm' in cfg:
            y = Tukey53H(self.input, v, cfg['tukey53H_norm'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['tukey53H_norm'] = \
                        y.features['tukey53H_norm']

            self.flags[v]['tukey53H_norm'] = y.flags['tukey53H_norm']

        #if 'spike_depthsmooth' in cfg:
        #    from maud.window_func import _weight_hann as wfunc
        #    cfg_tmp = cfg['spike_depthsmooth']
        #    cfg_tmp['dzwindow'] = 10
        #    smooth = ma.masked_all(self.input[v].shape)
        #    z = ped['pressure']
        #    for i in range(len(self.input[v])):
        #        ind = np.nonzero(ma.absolute(z-z[i]) < \
        #                cfg_tmp['dzwindow'])[0]
        #        ind = ind[ind != i]
        #        w = wfunc(z[ind]-z[i], cfg_tmp['dzwindow'])
        #        smooth[i] = (T[ind]*w).sum()/w.sum()

        # ARGO, test #12. (10C, 5PSU)
        if 'digit_roll_over' in cfg:
            threshold = cfg['digit_roll_over']
            s = step(self.input[v])

            if self.saveauxiliary:
                self.auxiliary[v]['step'] = s

            flag = np.zeros(s.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9

            flag[np.nonzero(ma.absolute(s) > threshold)] = 4
            flag[np.nonzero(ma.absolute(s) <= threshold)] = 1

            self.flags[v]['digit_roll_over'] = flag

        if 'bin_spike' in cfg:
            y = Bin_Spike(self.input, v, cfg['bin_spike'])
            # y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['bin_spike'] = y.features['bin_spike']

            # self.flags[v]['bin_spike'] = y.flags['bin_spike']

        if 'density_inversion' in cfg:
            try:
                if self.saveauxiliary:
                    self.flags[v]['density_inversion'], \
                            self.auxiliary[v]['density_step'] = \
                            density_inversion(
                                    self.input,
                                    cfg['density_inversion'],
                                    saveaux=True)
                else:
                    self.flags[v]['density_inversion'] = density_inversion(
                            self.input, cfg['density_inversion'])
            except:
                print("Fail on density_inversion")

        if 'woa_normbias' in cfg:
            y = WOA_NormBias(self.input, v, cfg['woa_normbias'])
            #        self.attributes)
            y.test()

            if self.saveauxiliary:
                for f in y.features:
                    self.auxiliary[v][f] = y.features[f]

            self.flags[v]['woa_normbias'] = y.flags['woa_normbias']

        #if 'pstep' in cfg:
        #    ind = np.isfinite(self.input[v])
        #    ind = ma.getmaskarray(self.input[v])
        #    if self.saveauxiliary:
        #        self.auxiliary[v]['pstep'] = ma.concatenate(
        #                [ma.masked_all(1),
        #                    np.diff(self.input['PRES'][ind])])

        if 'rate_of_change' in cfg:
            self.flags[v]['rate_of_change'], RoC = \
                    rate_of_change(self.input, v, cfg['rate_of_change'])
            if self.saveauxiliary:
                self.auxiliary[v]['rate_of_change'] = RoC

        if 'cum_rate_of_change' in cfg:
            x = cum_rate_of_change(self.input, v,
                    cfg['cum_rate_of_change']['memory'])
            self.flags[v]['cum_rate_of_change'] = np.zeros(x.shape, dtype='i1')
            self.flags[v]['cum_rate_of_change'][
                    np.nonzero(x <= cfg['cum_rate_of_change']['threshold'])
                    ] = 1
            self.flags[v]['cum_rate_of_change'][
                    np.nonzero(x > cfg['cum_rate_of_change']['threshold'])
                    ] = 4
            self.flags[v]['cum_rate_of_change'][
                    ma.getmaskarray(self.input[v])] = 9

        # FIXME: the Anomaly Detection and Fuzzy require some features
        #   to be estimated previously. Generalize this.
        if 'anomaly_detection' in  cfg:
            features = {}
            for f in cfg['anomaly_detection']['features']:
                if f == 'spike':
                    features['spike'] = spike(self.input[v])
                elif f == 'gradient':
                    features['gradient'] = gradient(self.input[v])
                elif f == 'tukey53H_norm':
                    features['tukey53H_norm'] = tukey53H_norm(self.input[v])
                elif f == 'rate_of_change':
                    RoC = ma.masked_all_like(self.input[v])
                    RoC[1:] = ma.absolute(ma.diff(self.input[v]))
                    features['rate_of_change'] = RoC
                elif (f == 'woa_normbias'):
                    y = WOA_NormBias(self.input, v, {})
                    features['woa_normbias'] = \
                            np.abs(y.features['woa_normbias'])
                else:
                    logging.error("Sorry, I can't evaluate anomaly_detection with: %s" % f)

            self.flags[v]['anomaly_detection'] = \
                    anomaly_detection(features, cfg['anomaly_detection'])

        if 'morello2014' in cfg:
            self.flags[v]['morello2014'] = morello2014(
                    features=self.auxiliary[v],
                    cfg=cfg['morello2014'])

        if 'fuzzylogic' in  cfg:
            features = {}
            for f in cfg['fuzzylogic']['features']:
                if f == 'spike':
                    features['spike'] = spike(self.input[v])
                elif f == 'gradient':
                    features['gradient'] = gradient(self.input[v])
                elif f == 'tukey53H_norm':
                    features['tukey53H_norm'] = tukey53H_norm(self.input[v],
                            k=1.5)
                elif f == 'rate_of_change':
                    RoC = ma.masked_all_like(data[v])
                    RoC[1:] = ma.absolute(ma.diff(data[v]))
                    features['rate_of_change'] = RoC
                elif (f == 'woa_normbias'):
                    y = WOA_NormBias(self.input, v, {})
                    features['woa_normbias'] = \
                            np.abs(y.features['woa_normbias'])
                else:
                    logging.error("Sorry, I can't evaluate fuzzylogic with: %s" % f)

            self.flags[v]['fuzzylogic'] = fuzzylogic(
                    features=features,
                    cfg=cfg['fuzzylogic'])

        self.flags[v]['overall'] = combined_flag(self.flags[v])
def mld(S,thetao,depth_cube,latitude_deg):
	"""Compute the mixed layer depth.
	Parameters
	----------
	SA : array_like
		 Absolute Salinity  [g/kg]
	CT : array_like
		 Conservative Temperature [:math:`^\circ` C (ITS-90)]
	p : array_like
		sea pressure [dbar]
	criterion : str, optional
			   MLD Criteria
	Mixed layer depth criteria are:
	'temperature' : Computed based on constant temperature difference
	criterion, CT(0) - T[mld] = 0.5 degree C.
	'density' : computed based on the constant potential density difference
	criterion, pd[0] - pd[mld] = 0.125 in sigma units.
	`pdvar` : computed based on variable potential density criterion
	pd[0] - pd[mld] = var(T[0], S[0]), where var is a variable potential
	density difference which corresponds to constant temperature difference of
	0.5 degree C.
	Returns
	-------
	MLD : array_like
		  Mixed layer depth
	idx_mld : bool array
			  Boolean array in the shape of p with MLD index.
	Examples
	--------
	>>> import os
	>>> import gsw
	>>> import matplotlib.pyplot as plt
	>>> from oceans import mld
	>>> from gsw.utilities import Bunch
	>>> # Read data file with check value profiles
	>>> datadir = os.path.join(os.path.dirname(gsw.utilities.__file__), 'data')
	>>> cv = Bunch(np.load(os.path.join(datadir, 'gsw_cv_v3_0.npz')))
	>>> SA, CT, p = (cv.SA_chck_cast[:, 0], cv.CT_chck_cast[:, 0],
	...              cv.p_chck_cast[:, 0])
	>>> fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, sharey=True)
	>>> l0 = ax0.plot(CT, -p, 'b.-')
	>>> MDL, idx = mld(SA, CT, p, criterion='temperature')
	>>> l1 = ax0.plot(CT[idx], -p[idx], 'ro')
	>>> l2 = ax1.plot(CT, -p, 'b.-')
	>>> MDL, idx = mld(SA, CT, p, criterion='density')
	>>> l3 = ax1.plot(CT[idx], -p[idx], 'ro')
	>>> l4 = ax2.plot(CT, -p, 'b.-')
	>>> MDL, idx = mld(SA, CT, p, criterion='pdvar')
	>>> l5 = ax2.plot(CT[idx], -p[idx], 'ro')
	>>> _ = ax2.set_ylim(-500, 0)
	References
	----------
	.. [1] Monterey, G., and S. Levitus, 1997: Seasonal variability of mixed
	layer depth for the World Ocean. NOAA Atlas, NESDIS 14, 100 pp.
	Washington, D.C.
	""" 
	#depth_cube.data = np.ma.masked_array(np.swapaxes(np.tile(depths,[360,180,1]),0,2))
	MLD_out = S.extract(iris.Constraint(depth = np.min(depth_cube.data)))
	MLD_out_data = MLD_out.data
	for i in range(np.shape(MLD_out)[0]):
		print'calculating mixed layer for year: ',i
		thetao_tmp = thetao[i]
		S_tmp = S[i]
		depth_cube.data = np.abs(depth_cube.data)
		depth_cube = depth_cube * (-1.0)
		p = gsw.p_from_z(depth_cube.data,latitude_deg.data) # dbar
		SA = S_tmp.data*1.004715
		CT = gsw.CT_from_pt(SA,thetao_tmp.data - 273.15)
		SA, CT, p = map(np.asanyarray, (SA, CT, p))
		SA, CT, p = np.broadcast_arrays(SA, CT, p)
		SA, CT, p = map(ma.masked_invalid, (SA, CT, p))
		p_min, idx = p.min(axis = 0), p.argmin(axis = 0)
		sigma = SA.copy()
		to_mask = np.where(sigma == S.data.fill_value)
		sigma = gsw.rho(SA, CT, p_min) - 1000.
		sigma[to_mask] = np.NAN
		sig_diff = sigma[0,:,:].copy()
		sig_diff += 0.125 # Levitus (1982) density criteria
		sig_diff = np.tile(sig_diff,[np.shape(sigma)[0],1,1])
		idx_mld = sigma <= sig_diff
		#NEED TO SORT THS PIT - COMPARE WWITH OTHER AND FIX!!!!!!!!!!
		MLD = ma.masked_all_like(S_tmp.data)
		MLD[idx_mld] = depth_cube.data[idx_mld] * -1
		MLD_out_data[i,:,:] = np.ma.max(MLD,axis=0) 
	return MLD_out_data
示例#10
0
def mld(SA, CT, p, criterion='pdvar'):
    """
    Compute the mixed layer depth.

    Parameters
    ----------
    SA : array_like
         Absolute Salinity  [g/kg]
    CT : array_like
         Conservative Temperature [:math:`^\circ` C (ITS-90)]
    p : array_like
        sea pressure [dbar]
    criterion : str, optional
               MLD Criteria

    Mixed layer depth criteria are:

    'temperature' : Computed based on constant temperature difference
    criterion, CT(0) - T[mld] = 0.5 degree C.

    'density' : computed based on the constant potential density difference
    criterion, pd[0] - pd[mld] = 0.125 in sigma units.

    `pdvar` : computed based on variable potential density criterion
    pd[0] - pd[mld] = var(T[0], S[0]), where var is a variable potential
    density difference which corresponds to constant temperature difference of
    0.5 degree C.

    Returns
    -------
    MLD : array_like
          Mixed layer depth
    idx_mld : bool array
              Boolean array in the shape of p with MLD index.

    References
    ----------
    .. [1] Monterey, G., and S. Levitus, 1997: Seasonal variability of mixed
    layer depth for the World Ocean. NOAA Atlas, NESDIS 14, 100 pp.
    Washington, D.C.

    """

    SA, CT, p = list(map(np.asanyarray, (SA, CT, p)))
    SA, CT, p = np.broadcast_arrays(SA, CT, p)
    SA, CT, p = list(map(ma.masked_invalid, (SA, CT, p)))

    p_min, idx = p.min(), p.argmin()

    sigma = gsw.rho(SA, CT, p_min) - 1000.

    # Temperature and Salinity at the surface,
    T0, S0, Sig0 = CT[idx], SA[idx], sigma[idx]

    # NOTE: The temperature difference criterion for MLD
    Tdiff = T0 - 0.5  # 0.8 on the matlab original

    if criterion == 'temperature':
        idx_mld = (CT > Tdiff)
    elif criterion == 'pdvar':
        pdvar_diff = gsw.rho(S0, Tdiff, p_min) - 1000.
        idx_mld = (sigma <= pdvar_diff)
    elif criterion == 'density':
        sig_diff = Sig0 + 0.125
        idx_mld = (sigma <= sig_diff)
    else:
        raise NameError('Unknown criteria {}'.format(criterion))

    MLD = ma.masked_all_like(p)
    MLD[idx_mld] = p[idx_mld]

    return MLD.max(axis=0), idx_mld
示例#11
0
def mld(SA, CT, p, criterion='pdvar'):
    """Compute the mixed layer depth.

    Parameters
    ----------
    SA : array_like
         Absolute Salinity  [g/kg]
    CT : array_like
         Conservative Temperature [:math:`^\circ` C (ITS-90)]
    p : array_like
        sea pressure [dbar]
    criterion : str, optional
               MLD Criteria

    Mixed layer depth criteria are:

    'temperature' : Computed based on constant temperature difference
    criterion, CT(0) - T[mld] = 0.5 degree C.

    'density' : computed based on the constant potential density difference
    criterion, pd[0] - pd[mld] = 0.125 in sigma units.

    `pdvar` : computed based on variable potential density criterion
    pd[0] - pd[mld] = var(T[0], S[0]), where var is a variable potential
    density difference which corresponds to constant temperature difference of
    0.5 degree C.

    Returns
    -------
    MLD : array_like
          Mixed layer depth
    idx_mld : bool array
              Boolean array in the shape of p with MLD index.


    Examples
    --------
    >>> import os
    >>> import gsw
    >>> import matplotlib.pyplot as plt
    >>> from oceans import mld
    >>> from gsw.utilities import Bunch
    >>> # Read data file with check value profiles
    >>> datadir = os.path.join(os.path.dirname(gsw.utilities.__file__), 'data')
    >>> cv = Bunch(np.load(os.path.join(datadir, 'gsw_cv_v3_0.npz')))
    >>> SA, CT, p = (cv.SA_chck_cast[:, 0], cv.CT_chck_cast[:, 0],
    ...              cv.p_chck_cast[:, 0])
    >>> fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, sharey=True)
    >>> l0 = ax0.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='temperature')
    >>> l1 = ax0.plot(CT[idx], -p[idx], 'ro')
    >>> l2 = ax1.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='density')
    >>> l3 = ax1.plot(CT[idx], -p[idx], 'ro')
    >>> l4 = ax2.plot(CT, -p, 'b.-')
    >>> MDL, idx = mld(SA, CT, p, criterion='pdvar')
    >>> l5 = ax2.plot(CT[idx], -p[idx], 'ro')
    >>> _ = ax2.set_ylim(-500, 0)

    References
    ----------
    .. [1] Monterey, G., and S. Levitus, 1997: Seasonal variability of mixed
    layer depth for the World Ocean. NOAA Atlas, NESDIS 14, 100 pp.
    Washington, D.C.
    """

    SA, CT, p = map(np.asanyarray, (SA, CT, p))
    SA, CT, p = np.broadcast_arrays(SA, CT, p)
    SA, CT, p = map(ma.masked_invalid, (SA, CT, p))

    p_min, idx = p.min(), p.argmin()

    sigma = gsw.rho(SA, CT, p_min) - 1000.

    # Temperature and Salinity at the surface,
    T0, S0, Sig0 = CT[idx], SA[idx], sigma[idx]

    # NOTE: The temperature difference criterion for MLD
    Tdiff = T0 - 0.5  # 0.8 on the matlab original

    if criterion == 'temperature':
        idx_mld = (CT > Tdiff)
    elif criterion == 'pdvar':
        pdvar_diff = gsw.rho(S0, Tdiff, p_min) - 1000.
        idx_mld = (sigma <= pdvar_diff)
    elif criterion == 'density':
        sig_diff = Sig0 + 0.125
        idx_mld = (sigma <= sig_diff)
    else:
        raise NameError("Unknown criteria %s" % criterion)

    MLD = ma.masked_all_like(p)
    MLD[idx_mld] = p[idx_mld]

    return MLD.max(axis=0), idx_mld
示例#12
0
def fuzzyfy(features, cfg):
    """

        FIXME: Looks like skfuzzy.trapmf does not handle well masked values.
               I must think better what to do with masked input values. What
               to do when there is one feature, but the other features are
               masked?
    """

    features_list = list(cfg['features'].keys())

    N = features[features_list[0]].size

    # The fuzzy set are usually: low, medium, high
    # The membership of each fuzzy set are each feature scaled.
    membership = {}
    for f in cfg['output'].keys():
        membership[f] = {}

    for t in features_list:
        for m in membership:
            assert m in cfg['features'][t], \
                    "Missing %s in %s" % (m, cfg['features'][t])

            membership[m][t] = ma.masked_all_like(features[t])
            ind = ~ma.getmaskarray(features[t])
            if m == 'low':
                membership[m][t][ind] = zmf(
                    np.asanyarray(features[t])[ind], cfg['features'][t][m])
            elif m == 'high':
                membership[m][t][ind] = smf(
                    np.asanyarray(features[t])[ind], cfg['features'][t][m])
            else:
                membership[m][t][ind] = trapmf(
                    np.asanyarray(features[t])[ind], cfg['features'][t][m])

    # Rule Set
    rules = {}
    # Low: u_low = mean(S_l(spike), S_l(clim)...)
    #u_low = np.mean([weights['spike']['low'],
    #    weights['woa_relbias']['low']], axis=0)

    tmp = membership['low'][features_list[0]]
    for f in features_list[1:]:
        tmp = ma.vstack((tmp, membership['low'][f]))

    # FIXME: If there is only one feature, it will return 1 value
    #          instead of an array with N values.
    rules['low'] = ma.mean(tmp, axis=0)

    # IMPROVE IT: Morello2014 doesn't even use the medium uncertainty,
    #   so no reason to estimate it. In the generalize this once the
    #   membership combining rules are defined in the cfg, so I can
    #   decide to use mean or max.
    if 'medium' in membership:
        # Medium: u_medium = mean(S_l(spike), S_l(clim)...)
        #u_medium = np.mean([weights['spike']['medium'],
        #    weights['woa_relbias']['medium']], axis=0)

        tmp = membership['medium'][features_list[0]]
        for f in features_list[1:]:
            tmp = ma.vstack((tmp, membership['medium'][f]))

        rules['medium'] = ma.mean(tmp, axis=0)

    # High: u_high = max(S_l(spike), S_l(clim)...)
    #u_high = np.max([weights['spike']['high'],
    #    weights['woa_relbias']['high']], axis=0)

    tmp = membership['high'][features_list[0]]
    for f in features_list[1:]:
        tmp = ma.vstack((tmp, membership['high'][f]))

    rules['high'] = ma.max(tmp, axis=0)

    return rules
示例#13
0
def fuzzyfy(features, cfg):
    """

        FIXME: Looks like skfuzzy.trapmf does not handle well masked values.
               I must think better what to do with masked input values. What
               to do when there is one feature, but the other features are
               masked?
    """

    features_list = list(cfg['features'].keys())

    N = features[features_list[0]].size

    # The fuzzy set are usually: low, medium, high
    # The membership of each fuzzy set are each feature scaled.
    membership = {}
    for f in cfg['output'].keys():
        membership[f] = {}

    for t in features_list:
        for m in membership:
            assert m in cfg['features'][t], \
                    "Missing %s in %s" % (m, cfg['features'][t])

            membership[m][t] = ma.masked_all_like(features[t])
            ind = ~ma.getmaskarray(features[t])
            if m == 'low':
                membership[m][t][ind] = zmf(
                        np.asanyarray(features[t])[ind], cfg['features'][t][m])
            elif m == 'high':
                membership[m][t][ind] = smf(
                        np.asanyarray(features[t])[ind],
                        cfg['features'][t][m])
            else:
                membership[m][t][ind] = trapmf(
                        np.asanyarray(features[t])[ind],
                        cfg['features'][t][m])

    # Rule Set
    rules = {}
    # Low: u_low = mean(S_l(spike), S_l(clim)...)
    #u_low = np.mean([weights['spike']['low'],
    #    weights['woa_relbias']['low']], axis=0)

    tmp = membership['low'][features_list[0]]
    for f in features_list[1:]:
        tmp = ma.vstack((tmp, membership['low'][f]))

    # FIXME: If there is only one feature, it will return 1 value
    #          instead of an array with N values.
    rules['low'] = ma.mean(tmp, axis=0)

    # IMPROVE IT: Morello2014 doesn't even use the medium uncertainty,
    #   so no reason to estimate it. In the generalize this once the
    #   membership combining rules are defined in the cfg, so I can
    #   decide to use mean or max.
    if 'medium' in membership:
        # Medium: u_medium = mean(S_l(spike), S_l(clim)...)
        #u_medium = np.mean([weights['spike']['medium'],
        #    weights['woa_relbias']['medium']], axis=0)

        tmp = membership['medium'][features_list[0]]
        for f in features_list[1:]:
            tmp = ma.vstack((tmp, membership['medium'][f]))

        rules['medium'] = ma.mean(tmp, axis=0)

    # High: u_high = max(S_l(spike), S_l(clim)...)
    #u_high = np.max([weights['spike']['high'],
    #    weights['woa_relbias']['high']], axis=0)

    tmp = membership['high'][features_list[0]]
    for f in features_list[1:]:
        tmp = ma.vstack((tmp, membership['high'][f]))

    rules['high'] = ma.max(tmp, axis=0)

    return rules
示例#14
0
def _init_partitioned_series(shuffled_series: np.ndarray):
    return ma.masked_all_like(shuffled_series)
示例#15
0
    def evaluate(self, v, cfg):

        self.flags[v] = {}

        # Apply common flag for all points.
        if 'common' in self.flags:
            N = self.input[v].shape
            for f in self.flags['common']:
                self.flags[v][f] = self.flags['common'][f] * \
                        np.ones(N, dtype='i1')

        if self.saveauxiliary:
            if v not in self.auxiliary.keys():
                self.auxiliary[v] = {}

        if 'platform_identification' in cfg:
            logging.warn("Sorry I'm not ready to evaluate platform_identification()")

        if 'valid_geolocation' in cfg:
            logging.warn("Sorry I'm not ready to evaluate valid_geolocation()")

        if 'valid_speed' in cfg:
            # Think about. ARGO also has a test  valid_speed, but that is
            #   in respect to sucessive profiles. How is the best way to
            #   distinguish them here?
            try:
                if self.saveauxiliary:
                    self.flags[v]['valid_speed'], \
                            self.auxiliary[v]['valid_speed'] = \
                            possible_speed(self.input, cfg['valid_speed'])
            except:
                print("Fail on valid_speed")

        if 'global_range' in cfg:
            self.flags[v]['global_range'] = global_range(
                    self.input, v, cfg['global_range'])

        if 'regional_range' in cfg:
            logging.warn("Sorry, I'm no ready to evaluate regional_range()")

        if 'pressure_increasing' in cfg:
            logging.warn("Sorry, I'm no ready to evaluate pressure_increasing()")

        if 'profile_envelop' in cfg:
            self.flags[v]['profile_envelop'] = profile_envelop(
                    self.input, cfg['profile_envelop'], v)

        if 'gradient' in cfg:
            y = Gradient(self.input, v, cfg['gradient'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['gradient'] = y.features['gradient']

            self.flags[v]['gradient'] = y.flags['gradient']

        if 'gradient_depthconditional' in cfg:
            cfg_tmp = cfg['gradient_depthconditional']
            g = gradient(self.input[v])
            flag = np.zeros(g.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9
            # ---- Shallow zone -----------------
            threshold = cfg_tmp['shallow_max']
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (g > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (g <= threshold))] \
                    = 1
            # ---- Deep zone --------------------
            threshold = cfg_tmp['deep_max']
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (g > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (g <= threshold))] \
                    = 1

            self.flags[v]['gradient_depthconditional'] = flag

        if 'spike' in cfg:
            y = Spike(self.input, v, cfg['spike'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['spike'] = y.features['spike']

            self.flags[v]['spike'] = y.flags['spike']

        if 'spike_depthconditional' in cfg:
            cfg_tmp = cfg['spike_depthconditional']
            s = spike(self.input[v])
            flag = np.zeros(s.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9
            # ---- Shallow zone -----------------
            threshold = cfg_tmp['shallow_max']
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (s > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] <= cfg_tmp['pressure_threshold']) & \
                    (s <= threshold))] \
                    = 1
            # ---- Deep zone --------------------
            threshold = cfg_tmp['deep_max']
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (s > threshold))] \
                    = 4
            flag[np.nonzero( \
                    (self['PRES'] > cfg_tmp['pressure_threshold']) & \
                    (s <= threshold))] \
                    = 1

            self.flags[v]['spike_depthconditional'] = flag

        if 'stuck_value' in cfg:
            logging.warn("Sorry I'm not ready to evaluate stuck_value()")

        if 'grey_list' in cfg:
            logging.warn("Sorry I'm not ready to evaluate grey_list()")

        if 'gross_sensor_drift' in cfg:
            logging.warn("Sorry I'm not ready to evaluate gross_sensor_drift()")

        if 'frozen_profile' in cfg:
            logging.warn("Sorry I'm not ready to evaluate frozen_profile()")

        if 'deepest_pressure' in cfg:
            logging.warn("Sorry I'm not ready to evaluate deepest_pressure()")

        if 'tukey53H_norm' in cfg:
            y = Tukey53H(self.input, v, cfg['tukey53H_norm'])
            y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['tukey53H_norm'] = \
                        y.features['tukey53H_norm']

            self.flags[v]['tukey53H_norm'] = y.flags['tukey53H_norm']

        #if 'spike_depthsmooth' in cfg:
        #    from maud.window_func import _weight_hann as wfunc
        #    cfg_tmp = cfg['spike_depthsmooth']
        #    cfg_tmp['dzwindow'] = 10
        #    smooth = ma.masked_all(self.input[v].shape)
        #    z = ped['pressure']
        #    for i in range(len(self.input[v])):
        #        ind = np.nonzero(ma.absolute(z-z[i]) < \
        #                cfg_tmp['dzwindow'])[0]
        #        ind = ind[ind != i]
        #        w = wfunc(z[ind]-z[i], cfg_tmp['dzwindow'])
        #        smooth[i] = (T[ind]*w).sum()/w.sum()

        # ARGO, test #12. (10C, 5PSU)
        if 'digit_roll_over' in cfg:
            threshold = cfg['digit_roll_over']
            s = step(self.input[v])

            if self.saveauxiliary:
                self.auxiliary[v]['step'] = s

            flag = np.zeros(s.shape, dtype='i1')
            # Flag as 9 any masked input value
            flag[ma.getmaskarray(self.input[v])] = 9

            flag[np.nonzero(ma.absolute(s) > threshold)] = 4
            flag[np.nonzero(ma.absolute(s) <= threshold)] = 1

            self.flags[v]['digit_roll_over'] = flag

        if 'bin_spike' in cfg:
            y = Bin_Spike(self.input, v, cfg['bin_spike'])
            # y.test()

            if self.saveauxiliary:
                self.auxiliary[v]['bin_spike'] = y.features['bin_spike']

            # self.flags[v]['bin_spike'] = y.flags['bin_spike']

        if 'density_inversion' in cfg:
            try:
                if self.saveauxiliary:
                    self.flags[v]['density_inversion'], \
                            self.auxiliary[v]['density_step'] = \
                            density_inversion(
                                    self.input,
                                    cfg['density_inversion'],
                                    saveaux=True)
                else:
                    self.flags[v]['density_inversion'] = density_inversion(
                            self.input, cfg['density_inversion'])
            except:
                print("Fail on density_inversion")

        if 'woa_normbias' in cfg:
            y = WOA_NormBias(self.input, v, cfg['woa_normbias'])
            #        self.attributes)
            y.test()

            if self.saveauxiliary:
                for f in y.features:
                    self.auxiliary[v][f] = y.features[f]

            self.flags[v]['woa_normbias'] = y.flags['woa_normbias']

        #if 'pstep' in cfg:
        #    ind = np.isfinite(self.input[v])
        #    ind = ma.getmaskarray(self.input[v])
        #    if self.saveauxiliary:
        #        self.auxiliary[v]['pstep'] = ma.concatenate(
        #                [ma.masked_all(1),
        #                    np.diff(self.input['PRES'][ind])])

        if 'rate_of_change' in cfg:
            self.flags[v]['rate_of_change'], RoC = \
                    rate_of_change(self.input, v, cfg['rate_of_change'])
            if self.saveauxiliary:
                self.auxiliary[v]['rate_of_change'] = RoC

        if 'cum_rate_of_change' in cfg:
            x = cum_rate_of_change(self.input, v,
                    cfg['cum_rate_of_change']['memory'])
            self.flags[v]['cum_rate_of_change'] = np.zeros(x.shape, dtype='i1')
            self.flags[v]['cum_rate_of_change'][
                    np.nonzero(x <= cfg['cum_rate_of_change']['threshold'])
                    ] = 1
            self.flags[v]['cum_rate_of_change'][
                    np.nonzero(x > cfg['cum_rate_of_change']['threshold'])
                    ] = 4
            self.flags[v]['cum_rate_of_change'][
                    ma.getmaskarray(self.input[v])] = 9

        # FIXME: the Anomaly Detection and Fuzzy require some features
        #   to be estimated previously. Generalize this.
        if 'anomaly_detection' in  cfg:
            features = {}
            for f in cfg['anomaly_detection']['features']:
                if f == 'spike':
                    features['spike'] = spike(self.input[v])
                elif f == 'gradient':
                    features['gradient'] = gradient(self.input[v])
                elif f == 'tukey53H_norm':
                    features['tukey53H_norm'] = tukey53H_norm(self.input[v])
                elif f == 'rate_of_change':
                    RoC = ma.masked_all_like(self.input[v])
                    RoC[1:] = ma.absolute(ma.diff(self.input[v]))
                    features['rate_of_change'] = RoC
                elif (f == 'woa_normbias'):
                    y = WOA_NormBias(self.input, v, {})
                    features['woa_normbias'] = \
                            np.abs(y.features['woa_normbias'])
                else:
                    logging.error("Sorry, I can't evaluate anomaly_detection with: %s" % f)

            self.flags[v]['anomaly_detection'] = \
                    anomaly_detection(features, cfg['anomaly_detection'])

        if 'morello2014' in cfg:
            self.flags[v]['morello2014'] = morello2014(
                    features=self.auxiliary[v],
                    cfg=cfg['morello2014'])

        if 'fuzzylogic' in  cfg:
            features = {}
            for f in cfg['fuzzylogic']['features']:
                if f == 'spike':
                    features['spike'] = spike(self.input[v])
                elif f == 'gradient':
                    features['gradient'] = gradient(self.input[v])
                elif f == 'tukey53H_norm':
                    features['tukey53H_norm'] = tukey53H_norm(self.input[v],
                            k=1.5)
                elif f == 'rate_of_change':
                    RoC = ma.masked_all_like(data[v])
                    RoC[1:] = ma.absolute(ma.diff(data[v]))
                    features['rate_of_change'] = RoC
                elif (f == 'woa_normbias'):
                    y = WOA_NormBias(self.input, v, {})
                    features['woa_normbias'] = \
                            np.abs(y.features['woa_normbias'])
                else:
                    logging.error("Sorry, I can't evaluate fuzzylogic with: %s" % f)

            self.flags[v]['fuzzylogic'] = fuzzylogic(
                    features=features,
                    cfg=cfg['fuzzylogic'])

        self.flags[v]['overall'] = combined_flag(self.flags[v])
示例#16
0
import numpy as np
import numpy.ma as ma

arr = np.zeros((2, 3), dtype=np.float32)
arr
ma.masked_all_like(arr)
arr.dtype
ma.masked_all_like(arr).dtype