예제 #1
0
def ariza_seabed(Sv, r, offset=20, thr=(-40,-35), m=20, n=50):
    """
    Mask attenuated pings by looking at seabed breaches.
    
    Ariza (in progress).
    """
    
    # get ping array
    p = np.arange(len(Sv[0]))
    
    # set to NaN shallow waters and data below the Sv threshold
    Sv_ = Sv.copy()
    Sv_[0:np.nanargmin(abs(r - offset)), :] = np.nan
    Sv_[Sv_<-thr[0]] = np.nan
    
    # bin Sv
    # TODO: update to 'twod' and 'full' funtions    
    Sv_bnd, r_bnd, p_bnd = bin2d(Sv_, r, p, m, n, operation='mean')[0:3]
    Sv_bnd = bin2dback(Sv_bnd, r_bnd, p_bnd, r, p)
    
    # label binned Sv data features
    Sv_lbl = label(~np.isnan(Sv_bnd))
    labels = np.unique(Sv_lbl)
    labels = np.delete(labels, np.where(labels==0))
    
    # list the median values for each Sv feature
    val = []
    for lbl in labels:
        val.append(log(np.nanmedian(lin(Sv_bnd[Sv_lbl==lbl]))))
    
    # keep the feature with a median above the Sv threshold (~seabed)
    # and set the rest of the array to NaN
    if val:
        if np.nanmax(val)>thr[1]:
            labels = labels[val!=np.nanmax(val)]
            for lbl in labels:
                Sv_bnd[Sv_lbl==lbl] = np.nan
        else:
            Sv_bnd[:] = np.nan
    else:
        Sv_bnd[:] = np.nan
        
    # remove everything in the original Sv array that is not seabed
    Sv_sb = Sv.copy()
    Sv_sb[np.isnan(Sv_bnd)] = np.nan
    
    # compute the percentile 90th for each ping, at the range at which 
    # the seabed is supposed to be.    
    seabed_percentile = log(np.nanpercentile(lin(Sv_sb), 95, axis=0))
    
    # get mask where this value falls bellow a Sv threshold (seabed breaches)
    mask = seabed_percentile<thr[0]
    mask = np.tile(mask, [len(Sv), 1])    
    
    return mask
def derobertis(Sv, bgn, thr):
    """
    Mask Sv values when lower than background noise by a user-defined
    threshold, following:
        
        De Robertis and Higginbottom (2007) ‘A post-processing technique to 
        estimate the signal-to-noise ratio and remove echosounder background 
        noise’, ICES Journal of Marine Science, 64: 1282–1291.
    
    Args:
        Sv (float): 2D array with Sv data to be masked (dB)
        background (float): 2 array with background noise data (dB)
        thr (int): threshold value (dB)
        
    Returns:
        bool:  2D array mask (Sv < background = True)
    """
    
    # subtract background noise
    Svclean = tf.log(tf.lin(Sv) - tf.lin(bgn))
    
    # signal to noise ratio
    s2n = Svclean - bgn
    
    # mask where Sv is less than background noise by a user-defined threshold
    mask1 = np.ma.masked_less(s2n, thr).mask
    mask2 = np.ma.masked_less(tf.lin(Sv) - tf.lin(bgn), 0).mask
    mask = mask1| mask2
    
    return mask
예제 #3
0
def maxSv(Sv, r, r0=10, r1=1000, roff=0, thr=(-40, -60)):
    """
    Initially detects the seabed as the ping sample with the strongest Sv value, 
    as long as it exceeds a dB threshold. Then it searchs up along the ping 
    until Sv falls below a secondary (lower) dB threshold, where the final 
    seabed is set.
    
    Args:
        Sv (float): 2D Sv array (dB).
        r (float): 1D range array (m).
        r0 (int): minimum range below which the search will be performed (m).
        r1 (int): maximum range above which the search will be performed (m).
        roff (int): seabed range offset (m).
        thr (tuple): 2 integers with 1st and 2nd Sv threshold (dB).

    Returns:
        bool: 2D array with seabed mask.     
    """

    # get offset and range indexes
    roff = np.nanargmin(abs(r - roff))
    r0 = np.nanargmin(abs(r - r0))
    r1 = np.nanargmin(abs(r - r1))

    # get indexes for maximum Sv along every ping,
    idx = np.int64(np.zeros(Sv.shape[1]))
    idx[~np.isnan(Sv).all(axis=0)] = np.nanargmax(
        Sv[r0:r1, ~np.isnan(Sv).all(axis=0)], axis=0) + r0

    # indexes with maximum Sv < main threshold are discarded (=0)
    maxSv = Sv[idx, range(len(idx))]
    maxSv[np.isnan(maxSv)] = -999
    idx[maxSv < thr[0]] = 0

    # mask seabed, proceed only with acepted seabed indexes (!=0)
    idx = idx
    mask = np.zeros(Sv.shape, dtype=bool)
    for j, i in enumerate(idx):
        if i != 0:

            # decrease indexes until Sv mean falls below the 2nd threshold
            if np.isnan(Sv[i - 5:i, j]).all():
                Svmean = thr[1] + 1
            else:
                Svmean = log(np.nanmean(lin(Sv[i - 5:i, j])))

            while (Svmean > thr[1]) & (i >= 5):
                i -= 1

            # subtract range offset & mask all the way down
            i -= roff
            if i < 0:
                i = 0
            mask[i:, j] = True

    return mask
예제 #4
0
def ryan(Sv, r, m, n, thr, excludeabove=250, operation='percentile15'):
    """
    Mask transient noise as in:
        
        Ryan et al. (2015) ‘Reducing bias due to noise and attenuation in 
        open-ocean echo integration data’, ICES Journal of Marine Science,
        72: 2482–2493.
    
    This mask is based on the assumption that Sv values which exceed the median
    value in a surrounding region of m metres by n pings must be due to 
    transient noise. Sv values are removed if exceed a threshold. Masking is
    excluded above 250 m by default to avoid the removal of aggregated biota.

    Args:
        Sv (float): 2D numpy array with Sv data to be masked (dB) 
        r (float): 1D numpy array with range data (m)
        m (int): height of surrounding region (m) 
        n (int): width of surrounding region (pings)
        threshold (int): user-defined threshold for comparisons (dB)
        excludeabove (int): range above which masking is excluded (m)
        operation (str): type of average operation:
            'mean'
            'percentileXX'
            'median'
            'mode'
        
    Returns:
        bool: 2D numpy array mask (transient noise = True) 
    """
    # offsets for i and j indexes
    ioff = np.argmin(abs(r - m))
    joff = n

    # preclude processing above a user-defined range
    r0 = np.argmin(abs(r - excludeabove))

    # mask if Sv sample greater than averaged block
    # TODO: find out a faster method. The iteration below is too slow.
    mask = np.ones(Sv.shape, dtype=bool)
    mask[0:r0, :] = False
    for i in range(r0, len(Sv)):
        for j in range(len(Sv[0])):

            # proceed only if enough room for setting the block
            if (i - ioff >= 0) & (i + ioff < len(Sv)) & (j - joff >= 0) & (
                    j + joff < len(Sv[0])):
                sample = Sv[i, j]
                block = log(
                    np.nanpercentile(
                        lin(Sv[i - ioff:i + ioff, j - joff:j + joff]),
                        int(operation[-2:])))
                mask[i, j] = sample - block > thr

    return mask
예제 #5
0
def ccamlr(raw, prepro=None, jdx=[0, 0]):
    """
    CCAMLR processing routine.
    
    Process EK60 raw data and returns its variables in a dictionary array.
    """
    #--------------------------------------------------------------------------
    # check for appropiate inputs
    if (isinstance(prepro, dict)) & (jdx[0] >= 0):
        raise Exception('Preceeding raw data needs appropiate j indexes')

    #--------------------------------------------------------------------------
    # Load variables
    rawfiles = raw['rawfiles']
    transect = raw['transect']
    alpha120 = raw['alpha']
    r120 = raw['r']
    t120 = raw['t']
    lon120 = raw['lon']
    lat120 = raw['lat']
    nm120 = raw['nm']
    km120 = raw['km']
    knt120 = raw['knt']
    kph120 = raw['kph']
    pitchmax120 = raw['pitchmax']
    rollmax120 = raw['rollmax']
    heavemax120 = raw['heavemax']
    Sv120 = raw['Sv']
    theta120 = raw['theta']
    phi120 = raw['phi']

    #--------------------------------------------------------------------------
    # join preceeding raw data, if there is continuity in the transect
    if prepro is not None:
        if prepro['transect'] == raw['transect']:
            t120 = np.r_[prepro['t'][jdx[0]:], t120]
            lon120 = np.r_[prepro['lon'][jdx[0]:], lon120]
            lat120 = np.r_[prepro['lat'][jdx[0]:], lat120]
            nm120 = np.r_[prepro['nm'][jdx[0]:], nm120]
            km120 = np.r_[prepro['km'][jdx[0]:], km120]
            knt120 = np.r_[prepro['knt'][jdx[0]:], knt120]
            kph120 = np.r_[prepro['kph'][jdx[0]:], kph120]
            Sv120 = np.c_[prepro['Sv'][:, jdx[0]:], Sv120]
            theta120 = np.c_[prepro['theta'][:, jdx[0]:], theta120]
            phi120 = np.c_[prepro['phi'][:, jdx[0]:], phi120]
        else:
            jdx[1] = 0
    else:
        jdx[1] = 0

    #--------------------------------------------------------------------------
    # report about the transects being processed
    trsct = np.arange(jdx[1], nm120[-1], 1)
    logger.info('Processing transect %03d : %2.2f - %2.2f nmi...' %
                (transect, trsct[0], trsct[-1]))

    #--------------------------------------------------------------------------
    # Clean impulse noise
    Sv120in, m120in_ = mIN.wang(Sv120,
                                thr=(-70, -40),
                                erode=[(3, 3)],
                                dilate=[(7, 7)],
                                median=[(7, 7)])
    #TODO: True is valid
    # -------------------------------------------------------------------------
    # estimate and correct background noise
    p120 = np.arange(len(t120))
    s120 = np.arange(len(r120))
    bn120, m120bn_ = gBN.derobertis(Sv120, s120, p120, 5, 20, r120, alpha120)
    Sv120clean = tf.log(tf.lin(Sv120in) - tf.lin(bn120))
    #TODO: True is valid
    # -------------------------------------------------------------------------
    # mask low signal-to-noise
    m120sn = mSN.derobertis(Sv120clean, bn120, thr=12)
    Sv120clean[m120sn] = -999

    # -------------------------------------------------------------------------
    # get mask for near-surface and deep data
    m120rg = mRG.outside(Sv120clean, r120, 19.9, 250)

    # -------------------------------------------------------------------------
    # get mask for seabed
    m120sb = mSB.ariza(Sv120,
                       r120,
                       r0=20,
                       r1=1000,
                       roff=0,
                       thr=-38,
                       ec=1,
                       ek=(3, 3),
                       dc=10,
                       dk=(3, 7))

    # -------------------------------------------------------------------------
    # get seabed line
    idx = np.argmax(m120sb, axis=0)
    sbline = r120[idx]
    sbline[idx == 0] = np.inf
    sbline = sbline.reshape(1, -1)
    sbline[sbline > 250] = np.nan

    # -------------------------------------------------------------------------
    # get mask for non-usable range
    m120nu = mSN.fielding(bn120, -80)[0]

    # -------------------------------------------------------------------------
    # remove unwanted (near-surface & deep data, seabed & non-usable range)
    m120uw = m120rg | m120sb | m120nu
    Sv120clean[m120uw] = np.nan

    # -------------------------------------------------------------------------
    # get swarms mask
    k = np.ones((3, 3)) / 3**2
    Sv120cvv = tf.log(
        convolve2d(tf.lin(Sv120clean), k, 'same', boundary='symm'))
    m120sh, m120sh_ = mSH.echoview(Sv120cvv,
                                   r120,
                                   km120 * 1000,
                                   thr=-70,
                                   mincan=(3, 10),
                                   maxlink=(3, 15),
                                   minsho=(3, 15))

    # -------------------------------------------------------------------------
    # get Sv with only swarms
    Sv120sw = Sv120clean.copy()
    Sv120sw[~m120sh & ~m120uw] = -999

    # -------------------------------------------------------------------------
    # resample Sv from 20 to 250 m, and every 1nm
    r120intervals = np.array([20, 250])
    nm120intervals = np.arange(jdx[1], nm120[-1], 1)
    Sv120swr, r120r, nm120r, pc120swr = rs.twod(Sv120sw,
                                                r120,
                                                nm120,
                                                r120intervals,
                                                nm120intervals,
                                                log=True)

    # -------------------------------------------------------------------------
    # remove seabed from pc120swr calculation, only water column is considered
    m120sb_ = m120sb * 1.0
    m120sb_[m120sb_ == 1] = np.nan
    pc120water = rs.twod(m120sb_, r120, nm120, r120intervals,
                         nm120intervals)[3]
    pc120swr = pc120swr / pc120water * 100

    # -------------------------------------------------------------------------
    # resample seabed line every 1nm
    sbliner = rs.oned(sbline, nm120, nm120intervals, 1)[0]

    # -------------------------------------------------------------------------
    # get time resampled, interpolated from distance resampled
    epoch = np.datetime64('1970-01-01T00:00:00')
    t120f = np.float64(t120 - epoch)
    f = interp1d(nm120, t120f)
    t120rf = f(nm120r)
    t120r = np.array(t120rf, dtype='timedelta64[ms]') + epoch

    t120intervalsf = f(nm120intervals)
    t120intervals = np.array(t120intervalsf, dtype='timedelta64[ms]') + epoch

    # -------------------------------------------------------------------------
    # get latitude & longitude resampled, interpolated from time resampled
    f = interp1d(t120f, lon120)
    lon120r = f(t120rf)
    f = interp1d(t120f, lat120)
    lat120r = f(t120rf)

    # -------------------------------------------------------------------------
    # resample back to full resolution
    Sv120swrf, m120swrf_ = rs.full(Sv120swr, r120intervals, nm120intervals,
                                   r120, nm120)
    #TODO: True is valid

    # -------------------------------------------------------------------------
    # compute Sa and NASC from 20 to 250 m or down to the seabed depth
    Sa120swr = np.zeros_like(Sv120swr) * np.nan
    NASC120swr = np.zeros_like(Sv120swr) * np.nan
    for i in range(len(Sv120swr[0])):
        if (np.isnan(sbliner[0, i])) | (sbliner[0, i] > 250):
            Sa120swr[0, i] = tf.log(tf.lin(Sv120swr[0, i]) * (250 - 20))
            NASC120swr[0, i] = 4 * np.pi * 1852**2 * tf.lin(
                Sv120swr[0, i]) * (250 - 20)
        else:
            Sa120swr[0, i] = tf.log(
                tf.lin(Sv120swr[0, i]) * (sbliner[0, i] - 20))
            NASC120swr[0, i] = 4 * np.pi * 1852**2 * tf.lin(
                Sv120swr[0, i]) * (sbliner[0, i] - 20)

    # -------------------------------------------------------------------------
    # return processed data outputs
    m120_ = m120in_ | m120bn_ | m120sh_ | m120swrf_
    #TODO: True is valid

    pro = {
        'rawfiles': rawfiles,  # list of rawfiles processed
        'transect': transect,  # transect number
        'r120': r120,  # range (m)
        't120': t120,  # time  (numpy.datetime64)
        'lon120': lon120,  # longitude (deg)
        'lat120': lat120,  # latitude (deg)
        'nm120': nm120,  # distance (nmi)
        'km120': km120,  # distance (km)
        'knt120': knt120,  # speed (knots)
        'kph120': kph120,  # speed (km h-1)
        'pitchmax120': pitchmax120,  # max value in last pitching cycle (deg)
        'rollmax120': rollmax120,  # max value in last rolling cycle (deg)
        'heavemax120': heavemax120,  # max value in last heave cycle (deg)
        'Sv120': Sv120,  # Sv (dB)
        'theta120': theta120,  # Athwart-ship angle (deg)
        'phi120': phi120,  # Alon-ship angle (deg)
        'bn120': bn120,  # Background noise (dB)
        'Sv120in': Sv120in,  # Sv without impulse noise (dB)
        'Sv120clean': Sv120clean,  # Sv without background noise (dB)          
        'Sv120sw': Sv120sw,  # Sv with only swarms (dB)
        'nm120r': nm120r,  # Distance resampled (nmi)
        'r120intervals': r120intervals,  # r resampling intervals
        'nm120intervals': nm120intervals,  # nmi resampling intervals
        't120intervals': t120intervals,  # t resampling intervals
        'sbliner': sbliner,  # Seabed resampled (m)
        't120r': t120r,  # Time resampled (numpy.datetime64)
        'lon120r': lon120r,  # Longitude resampled (deg)
        'lat120r': lat120r,  # Latitude resampled (deg)
        'Sv120swr': Sv120swr,  # Sv with only swarms resampled (dB)
        'pc120swr': pc120swr,  # Valid samples used to compute Sv120swr (%)
        'Sa120swr': Sa120swr,  # Sa from swarms, resampled (m2 m-2)
        'NASC120swr': NASC120swr,  # NASC from swarms, resampled (m2 nmi-2)
        'Sv120swrf':
        Sv120swrf,  # Sv with only swarms, resampled, full resolution (dB)         
        'm120_': m120_
    }  # Sv mask indicating valid processed data (where all filters could be applied)

    return pro
예제 #6
0
#------------------------------------------------------------------------------
# get background noise estimation, resampling at 5x20 (samples x pings) bins
bn120, m120bn_ = gBN.derobertis(Sv120,
                                s120,
                                p120,
                                5,
                                20,
                                r120,
                                alpha120,
                                bgnmax=-125)

# Maximum background noise estimation is -125 dB

#------------------------------------------------------------------------------
# clean background noise from Sv
Sv120clean = tf.log(tf.lin(Sv120) - tf.lin(bn120))

# -----------------------------------------------------------------------------
# mask low signal-to-noise Sv samples
m120sn = mSN.derobertis(Sv120, bn120, thr=12)

# In this case, Sv samples 12 dB above the background noise estimation, or
# lower, will be masked.

# -----------------------------------------------------------------------------
# use the mask to turn the Sv samples into "empty water" (-999)
Sv120clean[m120sn] = -999

#------------------------------------------------------------------------------
# Figures
plt.figure(figsize=(8, 7))
예제 #7
0
def twod(data, idim, jdim, irvals, jrvals, log=False, operation='mean'):
    """
    Resample down an array along the two dimensions, i and j.
    
    Args:
        data   (float): 2D array with data to be resampled.
        idim   (float): i vertical dimension.
        jdim   (float): j horizontal dimension.
        irvals (float): i resampling intervals for i vertical dimension.
        jrvals (float): j resampling intervals for j horizontal dimension.
        log    (bool ): if True, data is considered logarithmic and it will 
                        be converted to linear during calculations.
        operation(str): type of resampling operation. Accepts "mean" or "sum".
                    
    Returns:
        float: 2D resampled data array
        float: 1D resampled i vertical dimension
        float: 1D resampled j horizontal dimension
        float: 2D array with percentage of valid samples included on each
               resampled cell.
    """

    # check for appropiate inputs
    if len(irvals) < 2:
        raise Exception('length of i resampling intervals must be  >2')
    if len(jrvals) < 2:
        raise Exception('length of j resampling intervals must be >2')
    if len(data) != len(idim):
        raise Exception('data height and idim length must be the same')
    if len(data[0]) != len(jdim):
        raise Exception('data width and jdim length must be the same')
    for irval, jrval in zip(irvals, jrvals):
        if (irval < idim[0]) | (idim[-1] < irval):
            raise Exception('i resampling intervals must be within idim range')
        if (jrval < jdim[0]) | (jdim[-1] < jrval):
            raise Exception('j resampling intervals must be within jdim range')

    # convert data to linear, if logarithmic
    if log is True:
        data = tf.lin(data)

    # get i/j axes from i/j dimensions and i/j intervals
    iax = np.arange(len(idim))
    jax = np.arange(len(jdim))
    iaxrs = dim2ax(idim, iax, irvals)
    jaxrs = dim2ax(jdim, jax, jrvals)

    # declare new array to allocate resampled values, and new array to
    # alllocate the percentage of values used for resampling
    datar = np.zeros((len(iaxrs) - 1, len(jaxrs) - 1)) * np.nan
    percentage = np.zeros((len(iaxrs) - 1, len(jaxrs) - 1)) * np.nan

    # iterate along-range
    for i in range(len(iaxrs) - 1):

        # get i indexes to locate the samples for the binning operation
        idx0 = np.where(iax - iaxrs[i + 0] <= 0)[0][-1]
        idx1 = np.where(iaxrs[i + 1] - iax > 0)[0][-1]
        idx = np.arange(idx0, idx1 + 1)

        # get i weights as the sum of the sample proportions taken
        iweight0 = 1 - abs(iax[idx0] - iaxrs[i + 0])
        iweight1 = abs(iax[idx1] - iaxrs[i + 1])
        if len(idx) > 1:
            iweights = np.r_[iweight0, np.ones(len(idx) - 2), iweight1]
        else:
            iweights = np.array([iweight0 - 1 + iweight1])

        # iterate along-pings
        for j in range(len(jaxrs) - 1):

            # get j indexes to locate the samples for the binning operation
            jdx0 = np.where(jax - jaxrs[j + 0] <= 0)[0][-1]
            jdx1 = np.where(jaxrs[j + 1] - jax > 0)[0][-1]
            jdx = np.arange(jdx0, jdx1 + 1)

            # get j weights as the sum of the sample proportions taken
            jweight0 = 1 - abs(jax[jdx0] - jaxrs[j + 0])
            jweight1 = abs(jax[jdx1] - jaxrs[j + 1])
            if len(jdx) > 1:
                jweights = np.r_[jweight0, np.ones(len(jdx) - 2), jweight1]
            else:
                jweights = np.array([jweight0 - 1 + jweight1])

            # get data and weight 2D matrices for the binning operation
            d = data[idx[0]:idx[-1] + 1, jdx[0]:jdx[-1] + 1]
            w = np.multiply.outer(iweights, jweights)

            # if d is an all-NAN array, return NAN as the weighted operation
            # and zero as the percentage of valid numbers used for binning
            if np.isnan(d).all():
                datar[i, j] = np.nan
                percentage[i, j] = 0

            #compute weighted operation & percentage of valid numbers otherwise
            else:
                w_ = w.copy()
                w_[np.isnan(d)] = np.nan
                if operation == 'mean':
                    datar[i, j] = np.nansum(d * w_) / np.nansum(w_)
                elif operation == 'sum':
                    datar[i, j] = np.nansum(d * w_)
                else:
                    raise Exception('Operation not recognised')
                percentage[i, j] = np.nansum(w_) / np.nansum(w) * 100

    # convert back to logarithmic, if data was logarithmic
    if log is True:
        datar = tf.log(datar)

    # get resampled dimensions from resampling intervals
    idimr = irvals[:-1]
    jdimr = jrvals[:-1]

    # return data
    return datar, idimr, jdimr, percentage
예제 #8
0
def oned(data, dim, rvals, axis, log=False, operation='mean'):
    """
    Resample down an array along i or j dimension.
    
    Args:
        data  (float) : 2D array with data to be resampled.
        dim   (float) : original dimension.
        rvals (float) : resampling dimension intervals.
        axis  (int  ) : resampling axis (0= i vertical ax, 1= j horizontal ax).
        log   (bool ) : if True, data is considered logarithmic and it will 
                        be converted to linear during the calculations.
        operation(str): type of resampling operation. Accepts "mean" or "sum".
                    
    Returns:
        float: 2D resampled data array
        float: 1D resampled array corresponding to either i or j dimension.
        float: 2D array with percentage of valid samples included on each
               resampled cell.
    """

    # check if appropiate axis input
    if axis > 1:
        raise Exception('axis must be 0 or 1')

    # check if appropiate resampled dimension
    if len(rvals) < 2:
        raise Exception('length of resampling intervals must be >2')

    # check if intervals are within the dimension range of values
    for rval in rvals:
        if (rval < dim[0]) | (dim[-1] < rval):
            raise Exception('resampling intervals must be within dim range')

    # convert data to linear, if logarithmic
    if log is True:
        data = tf.lin(data)

    # get axis from dimension
    ax = np.arange(len(dim))
    axrs = dim2ax(dim, ax, rvals)

    # proceed along i dimension
    if axis == 0:
        iax = ax
        iaxrs = axrs

        # check data and axis match
        if len(data) != len(iax):
            raise Exception('data height and i dimension length must be equal')

        # declare new array to allocate resampled values, and new array to
        # alllocate the percentage of values used for resampling
        datar = np.zeros((len(iaxrs) - 1, len(data[0]))) * np.nan
        percentage = np.zeros((len(iaxrs) - 1, len(data[0]))) * np.nan

        # iterate along i dimension
        for i in range(len(iaxrs) - 1):

            # get i indexes to locate the samples for the resampling operation
            idx0 = np.where(iax - iaxrs[i + 0] <= 0)[0][-1]
            idx1 = np.where(iaxrs[i + 1] - iax > 0)[0][-1]
            idx = np.arange(idx0, idx1 + 1)

            # get i weights as the sum of the proportions of samples taken
            iweight0 = 1 - abs(iax[idx0] - iaxrs[i + 0])
            iweight1 = abs(iax[idx1] - iaxrs[i + 1])
            if len(idx) > 1:
                iweights = np.r_[iweight0, np.ones(len(idx) - 2), iweight1]
            else:
                iweights = np.array([iweight0 - 1 + iweight1])

            # get data and weight 2D matrices for the resampling operation
            d = data[idx[0]:idx[-1] + 1, :]
            w = np.multiply.outer(iweights, np.ones(len(data[0])))

            # if d is an all-NAN array, return NAN as the weighted operation
            # and zero as the percentage of valid numbers used for binning
            if np.isnan(d).all():
                datar[i, :] = np.nan
                percentage[i, :] = 0

            # compute weighted operation and percentage valid numbers otherwise
            else:
                w_ = w.copy()
                w_[np.isnan(d)] = np.nan
                if operation == 'mean':
                    datar[i, :] = np.nansum(d * w_, axis=0) / np.nansum(w_,
                                                                        axis=0)
                elif operation == 'sum':
                    datar[i, :] = np.nansum(d * w_, axis=0)
                else:
                    raise Exception('Operation not recognised')
                percentage[i, :] = np.nansum(w_, axis=0) / np.nansum(
                    w, axis=0) * 100

        # convert back to logarithmic, if data was logarithmic
        if log is True:
            datar = tf.log(datar)

        # get resampled dimension from resampling interval
        dimr = rvals

        # return data
        return datar, dimr, percentage

    # proceed along j dimension
    if axis == 1:
        jax = ax
        jaxrs = axrs

        # check data and axis match
        if len(data[0]) != len(jax):
            raise Exception('data width and j dimension lenght must be equal')

        # declare new array to allocate resampled values, and new array to
        # alllocate the percentage of values used for resampling
        datar = np.zeros((len(data), len(jaxrs) - 1)) * np.nan
        percentage = np.zeros((len(data), len(jaxrs) - 1)) * np.nan

        # iterate along j dimension
        for j in range(len(jaxrs) - 1):

            # get j indexes to locate the samples for the resampling operation
            jdx0 = np.where(jax - jaxrs[j + 0] <= 0)[0][-1]
            jdx1 = np.where(jaxrs[j + 1] - jax > 0)[0][-1]
            jdx = np.arange(jdx0, jdx1 + 1)

            # get j weights as the sum of the proportions of samples taken
            jweight0 = 1 - abs(jax[jdx0] - jaxrs[j + 0])
            jweight1 = abs(jax[jdx1] - jaxrs[j + 1])
            if len(jdx) > 1:
                jweights = np.r_[jweight0, np.ones(len(jdx) - 2), jweight1]
            else:
                jweights = np.array([jweight0 - 1 + jweight1])

            # get data and weight 2D matrices for the resampling operation
            d = data[:, jdx[0]:jdx[-1] + 1]
            w = np.multiply.outer(np.ones(len(data)), jweights)

            # if d is an all-NAN array, return NAN as the weighted operation
            # and zero as the percentage of valid numbers used for resampling
            if np.isnan(d).all():
                datar[:, j] = np.nan
                percentage[:, j] = 0

            # compute weighted operation and percentage valid numbers otherwise
            else:
                w_ = w.copy()
                w_[np.isnan(d)] = np.nan
                if operation == 'mean':
                    datar[:, j] = np.nansum(d * w_, axis=1) / np.nansum(w_,
                                                                        axis=1)
                elif operation == 'sum':
                    datar[:, j] = np.nansum(d * w_, axis=1)
                else:
                    raise Exception('Operation not recognised')
                percentage[:, j] = np.nansum(w_, axis=1) / np.nansum(
                    w, axis=1) * 100

        # convert back to logarithmic, if data was logarithmic
        if log is True:
            datar = tf.log(datar)

        # get resampled dimension from resampling intervals
        dimr = rvals

        # return data
        return datar, dimr, percentage
예제 #9
0
def fielding(Sv, r, r0, r1, n, thr, roff, jumps=5, maxts=-35, start=0):
    """
    Mask transient noise with method proposed by Fielding et al (unpub.).
    
    A comparison is made ping by ping with respect to a block in a reference 
    layer set at far range, where transient noise mostly occurs. If the ping 
    median is greater than the block median by a user-defined threshold, the 
    ping will be masked all the way up, until transient noise dissapears, or 
    until it gets the minimum range allowed by the user.
    
       transient                 transient             ping      
         noise                     noise             evaluated   
           |                         |                  |        
    ______ | _______________________ | ____________.....V.....____________
          |||  far range interval   |||            .  block  .            |
    _____|||||_____________________|||||___________...........____________|
    
    When transient noise is detected, comparisons start to be made in the same 
    ping but moving vertically every x meters (jumps). Pings with transient
    noise will be masked up to where the ping is similar to the block according
    with a secondary threshold or until it gets the exclusion range depth.
    
    Args:
        Sv    (float): 2D numpy array with Sv data to be masked (dB).
        r     (float): 1D numpy array with range data (m).
        r0    (int  ): range below which transient noise is evaluated (m).
        r1    (int  ): range above which transient noise is evaluated (m).
        n     (int  ): n of preceeding & subsequent pings defining the block.
        thr   (int  ): user-defined threshold for side-comparisons (dB).
        roff  (int  ): range above which masking is excluded (m).
        maxts (int  ): max transient noise permited, prevents to interpret 
                       seabed as transient noise (dB). 
        jumps (int  ): height of vertical steps (m).
        start (int  ): ping index to start processing.
        
    Returns:
        list: 2D boolean array with TN mask and 2D boolean array with mask
              indicating where TN detection was unfeasible.
    """

    # raise errors if wrong arguments
    if r0 > r1:
        raise Exception('Minimum range has to be shorter than maximum range')

    # return empty mask if searching range is outside the echosounder range
    if (r0 > r[-1]) or (r1 < r[0]):
        mask = np.zeros_like(Sv, dtype=bool)
        mask_ = np.zeros_like(Sv, dtype=bool)
        return mask, mask_

    # get upper and lower range indexes
    up = np.argmin(abs(r - r0))
    lw = np.argmin(abs(r - r1))

    # get minimum range index admitted for processing
    rmin = np.argmin(abs(r - roff))

    # get scaling factor index
    sf = np.argmin(abs(r - jumps))

    # start masking process
    mask_ = np.zeros(Sv.shape, dtype=bool)
    mask = np.zeros(Sv.shape, dtype=bool)
    for j in range(start, len(Sv[0])):

        # mask where TN evaluation is unfeasible (e.g. edge issues, all-NANs)
        if (j - n < 0) | (j + n > len(Sv[0]) - 1) | np.all(
                np.isnan(Sv[up:lw, j])):
            mask_[:, j] = True

        # evaluate ping and block averages otherwise
        else:
            pingmedian = log(np.nanmedian(lin(Sv[up:lw, j])))
            pingp75 = log(np.nanpercentile(lin(Sv[up:lw, j]), 75))
            blockmedian = log(np.nanmedian(lin(Sv[up:lw, j - n:j + n])))

            # if ping median below 'maxts' permited, and above enough from the
            # block median, mask all the way up until noise dissapears
            if (pingp75 < maxts) & ((pingmedian - blockmedian) > thr[0]):
                r0, r1 = up - sf, up
                while r0 > rmin:
                    pingmedian = log(np.nanmedian(lin(Sv[r0:r1, j])))
                    blockmedian = log(np.nanmedian(lin(Sv[r0:r1,
                                                          j - n:j + n])))
                    r0, r1 = r0 - sf, r1 - sf
                    if (pingmedian - blockmedian) < thr[1]:
                        break
                mask[r0:, j] = True

    return [mask[:, start:], mask_[:, start:]]
예제 #10
0
#------------------------------------------------------------------------------
# Remove impulse noise and signal below/above the target of interest using
# Wang's algorithm.
Sv120clean = mIN.wang(Sv120, thr=(-70,-40), erode=[(3,3)],
                      dilate=[(7,7)], median=[(7,7)])[0]

#------------------------------------------------------------------------------
# Remove data outside the range 20-250 m 
m120rg             = mRG.outside(Sv120clean, r120, 20, 250)
Sv120clean[m120rg] = np.nan

#------------------------------------------------------------------------------
# Convolute Sv prior to mask swarms, with 3x3 moving window
k = np.ones((3, 3))/3**2
Sv120cvv = tf.log(convolve2d(tf.lin(Sv120clean), k, 'same', boundary='symm'))

#------------------------------------------------------------------------------
# Mask swarms using Weill's algorithm
m120sh = mSH.weill(Sv120cvv, thr=-70, maxvgap=15, maxhgap=0,
                   minhlen= 3, minvlen=15)[0]

#------------------------------------------------------------------------------
# Figures
plt.figure(figsize=(8,5))
plt.subplots_adjust(left=0.08, right=0.91, bottom=0.08, top=0.95, wspace=0.08)
gs = gridspec.GridSpec(1, 3, width_ratios=[1, 1, .05])

# Sv original
plt.subplot(gs[0]).invert_yaxis()
im= plt.pcolormesh(t120, r120, Sv120, vmin=-80, vmax=-50, cmap=cmaps().ek500)
예제 #11
0
def wang(Sv,
         thr=(-70, -40),
         erode=[(3, 3)],
         dilate=[(5, 5), (7, 7)],
         median=[(7, 7)]):
    """
    Clean impulse noise from Sv data following the method decribed by:
        
        Wang et al. (2015) ’A noise removal algorithm for acoustic data with
        strong interference based on post-processing techniques’, CCAMLR
        SG-ASAM: 15/02.
        
    This algorithm runs different cycles of erosion, dilation, and median
    filtering to clean impulse noise from Sv. Note that this function
    returns a clean/corrected Sv array, instead of a boolean array indicating
    the occurrence of impulse noise.
        
    Args:
        Sv     (float)    : 2D numpy array with Sv data (dB).
        thr    (int/float): 2-element tupple with bottom/top Sv thresholds (dB)
        erode  (int)      : list of 2-element tupples indicating the window's
                            size for each erosion cycle.
        dilate (int)      : list of 2-element tupples indicating the window's
                            size for each dilation cycle.
        median (int)      : list of 2-element tupples indicating the window's
                            size for each median filter cycle.
                      
    Returns:
        float             : 2D array with clean Sv data.
        bool              : 2D array with mask indicating valid clean Sv data.
    """

    # set weak noise and strong interference as vacant samples (-999)
    Sv_thresholded = Sv.copy()
    Sv_thresholded[(Sv < thr[0]) | (Sv > thr[1])] = -999

    # remaining weak interferences will take neighbouring vacant values
    # by running erosion cycles
    Sv_eroded = Sv.copy()
    for e in erode:
        Sv_eroded = erosion(Sv_thresholded, np.ones(e))

    # the last step might have turned interferences inside biology into vacant
    # samples, this is solved by running dilation cycles
    Sv_dilated = Sv_eroded.copy()
    for d in dilate:
        Sv_dilated = dilation(Sv_dilated, np.ones(d))

    # dilation has modified the Sv value of biological features, so these are
    # now corrected to corresponding Sv values before the erosion/dilation
    Sv_corrected1 = Sv_dilated.copy()
    mask_bio = (Sv_dilated >= thr[0]) & (Sv_dilated < thr[1])
    Sv_corrected1[mask_bio] = Sv_thresholded[mask_bio]

    # compute median convolution in Sv corrected array
    Sv_median = Sv_corrected1.copy()
    for m in median:
        Sv_median = tf.log(medianf(tf.lin(Sv_median), footprint=np.ones(m)))

    # any vacant sample inside biological features will be corrected with
    # the median of corresponding neighbouring samples
    Sv_corrected2 = Sv_corrected1.copy()
    mask_bio = (Sv >= thr[0]) & (Sv < thr[1])
    mask_vacant = Sv_corrected1 == -999
    Sv_corrected2[mask_vacant & mask_bio] = Sv_median[mask_vacant & mask_bio]

    # get mask indicating edges, where swarms analysis couldn't be performed
    mask_ = np.ones_like(Sv_corrected2, dtype=bool)
    idx = int((max([e[0], d[0]]) - 1) / 2)
    jdx = int((max([e[1], d[1]]) - 1) / 2)
    mask_[idx:-idx, jdx:-jdx] = False

    return Sv_corrected2, mask_
예제 #12
0
def ryan(Sv, r, r0, r1, n, thr, start=0):
    """
    Locate attenuated signal and create a mask following the attenuated signal 
    filter as in:
        
        Ryan et al. (2015) ‘Reducing bias due to noise and attenuation in 
        open-ocean echo integration data’, ICES Journal of Marine Science,
        72: 2482–2493.

    Scattering Layers (SLs) are continuous high signal-to-noise regions with 
    low inter-ping variability. But attenuated pings create gaps within SLs. 
                                                 
       attenuation                attenuation       ping evaluated
    ______ V _______________________ V ____________.....V.....____________
          | |   scattering layer    | |            .  block  .            |
    ______| |_______________________| |____________...........____________|
    
    The filter takes advantage of differences with preceding and subsequent 
    pings to detect and mask attenuation. A comparison is made ping by ping 
    with respect to a block of the reference layer. The entire ping is masked 
    if the ping median is less than the block median by a user-defined 
    threshold value.
    
    Args:
        Sv (float): 2D array with Sv data to be masked (dB). 
        r (float):  1D array with range data (m).
        r0 (int): upper limit of SL (m).
        r1 (int): lower limit of SL (m).
        n (int): number of preceding & subsequent pings defining the block.
        thr (int): user-defined threshold value (dB).
        start (int): ping index to start processing.
        
    Returns:
        list: 2D boolean array with AS mask and 2D boolean array with mask
              indicating where AS detection was unfeasible.
    """
    
     # raise errors if wrong arguments
    if r0>r1:
        raise Exception('Minimum range has to be shorter than maximum range')
    
    # return empty mask if searching range is outside the echosounder range
    if (r0>r[-1]) or (r1<r[0]):
        mask  = np.zeros_like(Sv, dtype=bool) 
        mask_ = np.zeros_like(Sv, dtype=bool) 
        return mask, mask_ 
    
    # turn layer boundaries into arrays with length = Sv.shape[1]
    r0 = np.ones(Sv.shape[1])*r0
    r1 = np.ones(Sv.shape[1])*r1
    
    # start masking process    
    mask_ = np.zeros(Sv.shape, dtype=bool)
    mask = np.zeros(Sv.shape, dtype=bool)    
    for j in range(start, len(Sv[0])):
        
        # find indexes for upper and lower SL limits
        up = np.argmin(abs(r - r0[j]))
        lw = np.argmin(abs(r - r1[j]))
            # TODO: now indexes are the same at every loop, but future 
            # versions will have layer boundaries with variable range
            # (need to implement mask_layer.py beforehand!)
        
        # mask where AS evaluation is unfeasible (e.g. edge issues, all-NANs)
        if (j-n<0) | (j+n>len(Sv[0])-1) | np.all(np.isnan(Sv[up:lw, j])):        
            mask_[:, j] = True
        
        # compare ping and block medians otherwise & mask ping if too different
        else:
            pingmedian  = log(np.nanmedian(lin(Sv[up:lw, j])))
            blockmedian = log(np.nanmedian(lin(Sv[up:lw, (j-n):(j+n)])))
            if (pingmedian-blockmedian)<thr:            
                mask[:, j] = True
         
    return [mask[:, start:], mask_[:, start:]]
예제 #13
0
def experimental(Sv, r, r0=10, r1=1000, roff=0, thr=(-30, -70), ns=150, nd=3):
    """
    Mask Sv above a threshold to get a potential seabed mask. Then, the mask is
    dilated to fill seabed breaches, and small objects are removed to prevent 
    masking high Sv features that are not seabed (e.g. fish schools or spikes).    
    Once this is done, the mask is built up until Sv falls below a 2nd
    threshold, Finally, the mask is extended all the way down. 
    
    Args:
        Sv (float): 2D Sv array (dB).
        r (float): 1D range array (m).
        r0 (int): minimum range below which the search will be performed (m). 
        r1 (int): maximum range above which the search will be performed (m).
        roff (int): seabed range offset (m).
        thr (tuple): 2 integers with 1st and 2nd Sv threshold (dB).
        ns (int): maximum number of samples for an object to be removed.
        nd (int): number of dilations performed to the seabed mask.
           
    Returns:
        bool: 2D array with seabed mask.
    """

    # get indexes for range offset and range limits
    roff = np.nanargmin(abs(r - roff))
    r0 = np.nanargmin(abs(r - r0))
    r1 = np.nanargmin(abs(r - r1)) + 1

    # mask Sv above the first Sv threshold
    mask = Sv[r0:r1, :] > thr[0]
    maskabove = np.zeros((r0, mask.shape[1]), dtype=bool)
    maskbelow = np.zeros((len(r) - r1, mask.shape[1]), dtype=bool)
    mask = np.r_[maskabove, mask, maskbelow]

    # remove small to prevent other high Sv features to be masked as seabed
    # (e.g fish schools, impulse noise not properly masked. etc)
    mask = remove_small_objects(mask, ns)

    # dilate mask to fill seabed breaches
    # (e.g. attenuated pings or gaps from previous masking)
    kernel = np.ones((3, 5))
    mask = cv2.dilate(np.uint8(mask), kernel, iterations=nd)
    mask = np.array(mask, dtype='bool')

    # proceed with the following only if seabed was detected
    idx = np.argmax(mask, axis=0)
    for j, i in enumerate(idx):
        if i != 0:

            # rise up seabed until Sv falls below the 2nd threshold
            while (log(np.nanmean(lin(Sv[i - 5:i, j]))) > thr[1]) & (i >= 5):
                i -= 1

            # subtract range offset & mask all the way down
            i -= roff
            if i < 0:
                i = 0
            mask[i:, j] = True


#    # dilate again to ensure not leaving seabed behind
#    kernel = np.ones((3,3))
#    mask = cv2.dilate(np.uint8(mask), kernel, iterations = 2)
#    mask = np.array(mask, dtype = 'bool')

    return mask
예제 #14
0
def blackwell_mod(Sv,
                  theta,
                  phi,
                  r,
                  r0=10,
                  r1=1000,
                  tSv=-75,
                  ttheta=702,
                  tphi=282,
                  wtheta=28,
                  wphi=52,
                  rlog=None,
                  tpi=None,
                  freq=None,
                  rank=50):
    """
    Detects and mask seabed using the split-beam angle and Sv, based in 
    "Blackwell et al (2019), Aliased seabed detection in fisheries acoustic
    data". Complete article here: https://arxiv.org/abs/1904.10736
    
    This is a modified version from the original algorithm. It includes extra
    arguments to evaluate whether aliased seabed items can occur, given the 
    true seabed detection range, and the possibility of tuning the percentile's
    rank.
    
    Args:
        Sv (float): 2D numpy array with Sv data (dB)
        theta (float): 2D numpy array with the along-ship angle (degrees)
        phi (float): 2D numpy array with the athwart-ship angle (degrees)
        r (float): 1D range array (m)
        r0 (int): minimum range below which the search will be performed (m) 
        r1 (int): maximum range above which the search will be performed (m)
        tSv (float): Sv threshold above which seabed is pre-selected (dB)
        ttheta (int): Theta threshold above which seabed is pre-selected (dB)
        tphi (int): Phi threshold above which seabed is pre-selected (dB)
        wtheta (int): window's size for mean square operation in Theta field
        wphi (int): window's size for mean square operation in Phi field
        rlog (float): Maximum logging range of the echosounder (m)
        tpi (float): Transmit pulse interval, or ping rate (s)
        freq (int): frequecy (kHz)
        rank (int): Rank for percentile operation: [0, 100]
                
    Returns:
        bool: 2D array with seabed mask
    """

    # raise errors if wrong arguments
    if r0 > r1:
        raise Exception('Minimum range has to be shorter than maximum range')

    # return empty mask if searching range is outside the echosounder range
    if (r0 > r[-1]) or (r1 < r[0]):
        return np.zeros_like(Sv, dtype=bool)

    # delimit the analysis within user-defined range limits
    i0 = np.nanargmin(abs(r - r0))
    i1 = np.nanargmin(abs(r - r1)) + 1
    Svchunk = Sv[i0:i1, :]
    thetachunk = theta[i0:i1, :]
    phichunk = phi[i0:i1, :]

    # get blur kernels with theta & phi width dimensions
    ktheta = np.ones((wtheta, wtheta)) / wtheta**2
    kphi = np.ones((wphi, wphi)) / wphi**2

    # perform mean square convolution and mask if above theta & phi thresholds
    thetamaskchunk = convolve2d(thetachunk, ktheta, 'same',
                                boundary='symm')**2 > ttheta
    phimaskchunk = convolve2d(phichunk, kphi, 'same',
                              boundary='symm')**2 > tphi
    anglemaskchunk = thetamaskchunk | phimaskchunk

    # remove aliased seabed items when estimated True seabed can not be
    # detected below the logging range
    if (rlog is not None) and (tpi is not None) and (freq is not None):
        items = label(anglemaskchunk)
        item_labels = np.unique(label(anglemaskchunk))[1:]
        for il in item_labels:
            item = items == il
            ritem = np.nanmean(r[i0:i1][np.where(item)[0]])
            rseabed = aliased2seabed(ritem, rlog, tpi, freq)
            if rseabed == []:
                anglemaskchunk[item] = False

    anglemaskchunk = anglemaskchunk & (Svchunk > tSv)

    # if aliased seabed, mask Sv above the Sv median of angle-masked regions
    if anglemaskchunk.any():
        Svmedian_anglemasked = log(
            np.nanpercentile(lin(Svchunk[anglemaskchunk]), rank))
        if np.isnan(Svmedian_anglemasked):
            Svmedian_anglemasked = np.inf
        if Svmedian_anglemasked < tSv:
            Svmedian_anglemasked = tSv
        Svmaskchunk = Svchunk > Svmedian_anglemasked

        # label connected items in Sv mask
        items = nd.label(Svmaskchunk, nd.generate_binary_structure(2, 2))[0]

        # get items intercepted by angle mask (likely, the seabed)
        intercepted = list(set(items[anglemaskchunk]))
        if 0 in intercepted:
            intercepted.remove(intercepted == 0)

        # combine angle-intercepted items in a single mask
        maskchunk = np.zeros(Svchunk.shape, dtype=bool)
        for i in intercepted:
            maskchunk = maskchunk | (items == i)

        # add data above r0 and below r1 (removed in first step)
        above = np.zeros((i0, maskchunk.shape[1]), dtype=bool)
        below = np.zeros((len(r) - i1, maskchunk.shape[1]), dtype=bool)
        mask = np.r_[above, maskchunk, below]

    # return empty mask if aliased-seabed was not detected in Theta & Phi
    else:
        mask = np.zeros_like(Sv, dtype=bool)

    return mask
예제 #15
0
def blackwell(Sv,
              theta,
              phi,
              r,
              r0=10,
              r1=1000,
              tSv=-75,
              ttheta=702,
              tphi=282,
              wtheta=28,
              wphi=52):
    """
    Detects and mask seabed using the split-beam angle and Sv, based in 
    "Blackwell et al (2019), Aliased seabed detection in fisheries acoustic
    data". Complete article here: https://arxiv.org/abs/1904.10736
    
    Args:
        Sv (float): 2D numpy array with Sv data (dB)
        theta (float): 2D numpy array with the along-ship angle (degrees)
        phi (float): 2D numpy array with the athwart-ship angle (degrees)
        r (float): 1D range array (m)
        r0 (int): minimum range below which the search will be performed (m) 
        r1 (int): maximum range above which the search will be performed (m)
        tSv (float): Sv threshold above which seabed is pre-selected (dB)
        ttheta (int): Theta threshold above which seabed is pre-selected (dB)
        tphi (int): Phi threshold above which seabed is pre-selected (dB)
        wtheta (int): window's size for mean square operation in Theta field
        wphi (int): window's size for mean square operation in Phi field
                
    Returns:
        bool: 2D array with seabed mask
    """

    # delimit the analysis within user-defined range limits
    r0 = np.nanargmin(abs(r - r0))
    r1 = np.nanargmin(abs(r - r1)) + 1
    Svchunk = Sv[r0:r1, :]
    thetachunk = theta[r0:r1, :]
    phichunk = phi[r0:r1, :]

    # get blur kernels with theta & phi width dimensions
    ktheta = np.ones((wtheta, wtheta)) / wtheta**2
    kphi = np.ones((wphi, wphi)) / wphi**2

    # perform mean square convolution and mask if above theta & phi thresholds
    thetamaskchunk = convolve2d(thetachunk, ktheta, 'same',
                                boundary='symm')**2 > ttheta
    phimaskchunk = convolve2d(phichunk, kphi, 'same',
                              boundary='symm')**2 > tphi
    anglemaskchunk = thetamaskchunk | phimaskchunk

    # if aliased seabed, mask Sv above the Sv median of angle-masked regions
    if anglemaskchunk.any():
        Svmedian_anglemasked = log(np.nanmedian(lin(Svchunk[anglemaskchunk])))
        if np.isnan(Svmedian_anglemasked):
            Svmedian_anglemasked = np.inf
        if Svmedian_anglemasked < tSv:
            Svmedian_anglemasked = tSv
        Svmaskchunk = Svchunk > Svmedian_anglemasked

        # label connected items in Sv mask
        items = nd.label(Svmaskchunk, nd.generate_binary_structure(2, 2))[0]

        # get items intercepted by angle mask (likely, the seabed)
        intercepted = list(set(items[anglemaskchunk]))
        if 0 in intercepted:
            intercepted.remove(intercepted == 0)

        # combine angle-intercepted items in a single mask
        maskchunk = np.zeros(Svchunk.shape, dtype=bool)
        for i in intercepted:
            maskchunk = maskchunk | (items == i)

        # add data above r0 and below r1 (removed in first step)
        above = np.zeros((r0, maskchunk.shape[1]), dtype=bool)
        below = np.zeros((len(r) - r1, maskchunk.shape[1]), dtype=bool)
        mask = np.r_[above, maskchunk, below]
        anglemask = np.r_[above, anglemaskchunk, below]  # TODO remove

    # return empty mask if aliased-seabed was not detected in Theta & Phi
    else:
        mask = np.zeros_like(Sv, dtype=bool)

    return mask, anglemask