Esempio n. 1
0
def gaussWin2D(shape, sigma=None):
    """
	Create a 2D Gaussian window
	The shape must have 2 components, namely the vertical and horizontal,
	in this order.
	"""
    # Check input
    if len(shape) == 1:
        shape = [shape[0] for _ in range(2)]
    elif len(shape) > 2:
        shape = shape[:2]
    shape = [max([1, int(x)]) for x in shape]
    if not sigma:
        sigma = [x / 2.0 for x in shape]
    else:
        if len(sigma) == 1:
            sigma = [sigma[0] for _ in range(2)]
        elif len(sigma) > 2:
            sigma = sigma[:2]
        sigma = [np.finfo(np.float32).eps if x <= 0 else x for x in sigma]
    # Create vertical and horizontal components
    v = gaussian(shape[0], sigma[0])
    v = np.reshape(v, (-1, 1))  # column
    h = gaussian(shape[1], sigma[1])
    h = np.reshape(h, (1, -1))  # row
    return np.dot(v, h)
Esempio n. 2
0
def exer21(testImageFolder, saveImageFolder):
    """
    Deliverables: 
        Provide a code snippet and explanation of your solution as well
        as illustrations of your solution.
    """
    shape = (124, 124)
    sigma_blob = 10.0

    im = np.ones(shape)
    G = gaussian(shape[0], std=sigma_blob)
    GG = np.outer(G, G)
    blob = im * GG

    sigma_scl = [1., 2., 3., 4., 5., 8., 10., 20]
    sigma_scale = list(map(lambda x: x**2, sigma_scl))

    for i in tqdm(range(len(sigma_scale))):

        #create gaussian for scalespace sampling
        Gs = gaussian(124, std=sigma_scale[i])
        GGs = np.outer(Gs, Gs)

        blob_scaled = convolve2d(blob, GGs)

        fig = plt.figure()
        ax = plt.subplot(1, 1, 1)
        ax.imshow(blob_scaled, cmap=plt.cm.gray)

        ax.axis('off')
        title = r'blob w. $\sigma={}$ scalespace w. $\sigma={}$'.format(
            sigma_blob, sigma_scale[i])
        ax.set_title(title, fontsize=11)

        fig.tight_layout()

        filename = "exer21-" + 'blob w sigma={}, scale w sigma={}'.format(
            sigma_blob, sigma_scale[i])

        plt.savefig(saveImageFolder + filename + '.png')
        plt.close

    fig = plt.figure()
    ax = plt.subplot(1, 1, 1)
    ax.imshow(blob, cmap=plt.cm.gray)

    ax.axis('off')
    title = r'blob w. $\sigma={}$ w. scale = 0'.format(sigma_blob)
    ax.set_title(title, fontsize=11)

    fig.tight_layout()

    filename = "exer21-" + 'blob w sigma={}, scale w sigma=0'.format(
        sigma_blob, sigma_scale[i])

    plt.savefig(saveImageFolder + filename + '.png')
    plt.close

    pass
Esempio n. 3
0
def _smooth_spectral_power(raw_data, wind_len=5):
    """
    smoothes the spectral power using a rolling Gaussian window.

    Parameters
    ----------
    raw_data : float masked array
        The data to smooth.
    wind_len : float
        Length of the moving window

    Returns
    -------
    data_smooth : float masked array
        smoothed data

    """
    # we want an odd window
    if wind_len % 2 == 0:
        wind_len += 1
    half_wind = int((wind_len - 1) / 2)

    # create window
    wind = gaussian(wind_len, std=1.)
    wind /= np.sum(wind)

    # initialize smoothed data
    nrays, ngates, nDoppler = np.shape(raw_data)
    data_smooth = np.ma.masked_all((nrays, ngates, nDoppler))

    # get rolling window and mask data
    data_wind = rolling_window(raw_data, wind_len)
    data_smooth[:, :, half_wind:-half_wind] = np.ma.dot(data_wind, wind)

    return data_smooth
def smooth_responses(rsps, window_width=50, sigma=0.65):
    # Gaussian smoothing
    # Values taken from Rajeev's code.
    sm_rsps = deepcopy(rsps)
    sm_rsps.data = convolve1d(rsps.data, gaussian(window_width, sigma), axis=2)
    sm_rsps.data = np.maximum(sm_rsps.data, 0)
    return sm_rsps
Esempio n. 5
0
def gaussian_img_filter(img_h, img_w):
    ''' 
    Creates a gaussian filter for images.

    Args:
        img_h (int): The height of the images.
        img_w (int): The width of the images.

    Returns:
        gauss_filter (np.ndarray): Array of shape img_h * img_w.
    '''

    gauss_mg = np.meshgrid(gaussian(img_w, img_w // 4),
                           gaussian(img_h, img_h // 4))

    return gauss_mg[0] * gauss_mg[1]
Esempio n. 6
0
def rolling_mean_np(arr, win, center=True, win_type='boxcar'):
    import scipy.signal.windows as spwin

    df = pd.DataFrame(data=arr.reshape((arr.shape[0], arr[0].size)))

    if win_type == 'gaussian':
        w_std = win / 3.
        print('Performing {} day rolling mean with gaussian window (std={})'
              ' to get better interannual statistics'.format(win, w_std))
        fig, ax = plt.subplots(figsize=(3, 3))
        ax.plot(range(-int(win / 2), +round(win / 2 + .49)),
                spwin.gaussian(win, w_std))
        plt.title('window used for rolling mean')
        plt.xlabel('timesteps')
        rollmean = df.rolling(win,
                              center=center,
                              min_periods=1,
                              win_type='gaussian').mean(std=w_std)
    elif win_type == 'boxcar':
        fig, ax = plt.subplots(figsize=(3, 3))
        plt.plot(spwin.boxcar(win))
        plt.title('window used for rolling mean')
        plt.xlabel('timesteps')
        rollmean = df.rolling(win,
                              center=center,
                              min_periods=1,
                              win_type='boxcar').mean()

    return rollmean.values.reshape((arr.shape))
Esempio n. 7
0
def createTargetShapeDelayFigure():
    gestureLen = 20
    gestureSig = np.concatenate([np.zeros((10,3)),np.random.normal(size=(gestureLen,3))*np.atleast_2d(gaussian(20, 3, 0)*2).T,np.zeros((10,3))],0)
    target = np.concatenate([np.zeros((10,1)),np.ones((gestureLen,1)),np.zeros((10,1))],0)
    target_gaus = np.concatenate([np.zeros((5,1)),np.atleast_2d(gaussian(gestureLen+10,5)).T,np.zeros((5,1))],0)
    target_delayed = np.concatenate([np.zeros((28,1)),np.ones((5,1)),np.zeros((7,1))],0)
    
    fig, ax = plt.subplots(1, 3, sharey=True, sharex=True, figsize=(20,5))
    plt.ylim(-5,5)
    for axn in ax: 
        axn.plot(gestureSig,label='input signal')
        axn.plot([0,40],[0,0],c='black',linewidth=1)
    ax[0].plot(target,label='target',c='red',linewidth=2)
    ax[0].fill_between(np.arange(0,40),0,target.squeeze(),facecolor='red',alpha=0.5)
    ax[0].set_title('(a)')
    ax[0].set_xlabel('timestep')
    ax[1].plot(target_gaus,label='target',c='red',linewidth=2)
    ax[1].fill_between(np.arange(0,40),0,target_gaus.squeeze(),facecolor='red',alpha=0.5)
    ax[1].set_title('(b)')
    ax[1].set_xlabel('timestep')
    ax[2].plot(target_delayed,label='target',c='red',linewidth=2)
    ax[2].fill_between(np.arange(0,40),0,target_delayed.squeeze(),facecolor='red',alpha=0.5)
    ax[2].set_title('(c)')
    ax[2].set_xlabel('timestep')
    #plt.legend(bbox_to_anchor=(1., 1.05), loc=1, borderaxespad=0.)
    plt.tight_layout()
    projectPath = 'C:\Users\Steve\Documents\Uni\BAThesis\\src\\targetShapeDelay2.pdf'
    pp = PdfPages(projectPath)
    pp.savefig()
    pp.close()
Esempio n. 8
0
def velocity_smoothed(pos, freq, smooth_size=0.03):
    """
    Compute wheel velocity from uniformly sampled wheel data

    Parameters
    ----------
    pos : array_like
        Array of wheel positions
    smooth_size : float
        Size of Gaussian smoothing window in seconds
    freq : float
        Sampling frequency of the data

    Returns
    -------
    vel : np.ndarray
        Array of velocity values
    acc : np.ndarray
        Array of acceleration values
    """
    # Define our smoothing window with an area of 1 so the units won't be changed
    std_samps = np.round(
        smooth_size *
        freq)  # Standard deviation relative to sampling frequency
    N = std_samps * 6  # Number of points in the Gaussian covering +/-3 standard deviations
    gauss_std = (N - 1) / 6
    win = windows.gaussian(N, gauss_std)
    win = win / win.sum()  # Normalize amplitude

    # Convolve and multiply by sampling frequency to restore original units
    vel = np.insert(convolve(np.diff(pos), win, mode='same'), 0, 0) * freq
    acc = np.insert(convolve(np.diff(vel), win, mode='same'), 0, 0) * freq

    return vel, acc
Esempio n. 9
0
def denoise_sst(sst, bt11, kernel='boxcar', width=20, show=False):
    """
    """
    if kernel == 'boxcar':
        ker = np.ones((width, width))
    elif kernel == 'gaussian':
        gau = gaussian(3 * width, width / 2.)
        ker = np.outer(gau, gau)
    else:
        raise Exception('Unknown kernel')
    mask = ma.getmaskarray(sst) | ma.getmaskarray(bt11)
    sst.data[mask] = 0.
    bt11.data[mask] = 0.
    corr = convolve((sst - 273.15) * 0.9 + 273.15 - bt11, ker)
    #corr = convolve(sst - bt11, ker)
    #corr = convolve((sst - 273) * 0.9 + 273 - bt11, ker)
    norm = convolve(1. - mask, ker)
    corr[mask] = 0
    corr[~mask] /= norm[~mask]
    denoised_sst = bt11 + corr
    denoised_sst.mask = mask
    if show == True:
        import matplotlib.pyplot as plt
        vmin = min([bt11.min(), sst.min(), denoised_sst.min()])
        vmax = max([bt11.max(), sst.max(), denoised_sst.max()])
        corr = ma.MaskedArray(corr, mask=mask)
        for ivar, var in enumerate([sst, bt11, corr, denoised_sst]):
            plt.figure()
            if ivar == 2:
                plt.imshow(var, interpolation='nearest')
            else:
                plt.imshow(var, vmin=vmin, vmax=vmax, interpolation='nearest')
            plt.colorbar()
        plt.show()
    return denoised_sst
def smooth_responses(rsps, window_width=50, sigma=0.65):
    # Gaussian smoothing
    # Values taken from Rajeev's code.
    sm_rsps = deepcopy(rsps)
    sm_rsps.data = convolve1d(rsps.data, gaussian(window_width, sigma), axis=2)
    sm_rsps.data = np.maximum(sm_rsps.data, 0)
    return sm_rsps
Esempio n. 11
0
def edge_detector(
        gammatone: NDVar,
        c: float = 0,
        name: str = None,
):
    """Neural model for auditory edge-detection, as described by [1]_ and used in [2]_

    Parameters
    ----------
    gammatone
        Gammatone spectrogram.
    c
        Saturation parameter (see [1]_).
    name
        Name for the returned :class:`NDVar`.

    References
    ----------
    .. [1] Fishbach, A., Nelken, I., & Yeshurun, Y. (2001). Auditory Edge Detection: A Neural Model for Physiological and Psychoacoustical Responses to Amplitude Transients. Journal of Neurophysiology, 85(6), 2303–2323. https://doi.org/10.1152/jn.2001.85.6.2303
    .. [2] Brodbeck, C., Jiao, A., Hong, L. E., & Simon, J. Z. (2020). Neural speech restoration at the cocktail party: Auditory cortex recovers masked speech of both attended and ignored speakers. PLOS Biology, 18(10), e3000883. https://doi.org/10.1371/journal.pbio.3000883

    """
    taus = np.linspace(3, 5, 10)
    ws = np.diff(gaussian(11, 2))
    rfs = [delay_rf(tau) for tau in taus]
    xs_d = [apply_receptive_field(gammatone, rf) for rf in rfs]
    if c:
        xs_d = [saturate(x, c) for x in xs_d]
    return sum([w * x for w, x in zip(ws, xs_d)]).clip(0, name=name)
Esempio n. 12
0
def gaussian(t, std=1):
    r"""Ricker wavelet

    Create a Gaussian wavelet given time axis ``t`` and standard deviation ``std``
    using :py:func:`scipy.signal.gaussian`.

    Parameters
    ----------
    t : :obj:`numpy.ndarray`
        Time axis (positive part including zero sample)
    std : :obj:`float`, optional
        Standard deviation of gaussian

    Returns
    -------
    w : :obj:`numpy.ndarray`
        Wavelet
    t : :obj:`numpy.ndarray`
        Symmetric time axis
    wcenter : :obj:`int`
        Index of center of wavelet

    """
    if len(t) % 2 == 0:
        t = t[:-1]
        warnings.warn('one sample removed from time axis...')

    w = gaussian(len(t) * 2 - 1, std=std)
    t = np.concatenate((np.flipud(-t[1:]), t), axis=0)
    wcenter = np.argmax(np.abs(w))

    return w, t, wcenter
Esempio n. 13
0
def add_background_fluctuation(sinogram, strength_ratio=0.2):
    """
    Fluctuate the background of a sinogram image using a Gaussian profile beam.

    Parameters
    ----------
    sinogram : array_like
        2D array. Sinogram image.
    strength_ratio : float
        To define the strength of the variation. The value is in the range of
        [0.0, 1.0].

    Returns
    -------
    array_like
    """
    sinogram = np.copy(sinogram)
    (nrow, ncol) = sinogram.shape
    list_fact = 1.0 - np.random.rand(nrow) * strength_ratio
    list_shift = np.int16(
        (0.5 - np.random.rand(nrow)) * strength_ratio * ncol * 0.5)
    for i in range(nrow):
        sinogram[i] = sinogram[i] * np.roll(
            win.gaussian(ncol, 0.5 * list_fact[i] * ncol), list_shift[i])
    return sinogram
Esempio n. 14
0
 def test_basic(self):
     assert_allclose(windows.gaussian(6, 1.0),
                     [0.04393693362340742, 0.3246524673583497,
                      0.8824969025845955, 0.8824969025845955,
                      0.3246524673583497, 0.04393693362340742])
     assert_allclose(windows.gaussian(7, 1.2),
                     [0.04393693362340742, 0.2493522087772962,
                      0.7066482778577162, 1.0, 0.7066482778577162,
                      0.2493522087772962, 0.04393693362340742])
     assert_allclose(windows.gaussian(7, 3),
                     [0.6065306597126334, 0.8007374029168081,
                      0.9459594689067654, 1.0, 0.9459594689067654,
                      0.8007374029168081, 0.6065306597126334])
     assert_allclose(windows.gaussian(6, 3, False),
                     [0.6065306597126334, 0.8007374029168081,
                      0.9459594689067654, 1.0, 0.9459594689067654,
                      0.8007374029168081])
Esempio n. 15
0
 def gauss(n):
     """
     creates a 1d gaussian low pass filter with unit
     standard deviation.
     :param n: number of desired filter components
     """
     kernel = gaussian(n, 1).reshape(1, n)
     return kernel/np.sum(kernel)
Esempio n. 16
0
 def test_basic(self):
     assert_allclose(windows.gaussian(6, 1.0),
                     [0.04393693362340742, 0.3246524673583497,
                      0.8824969025845955, 0.8824969025845955,
                      0.3246524673583497, 0.04393693362340742])
     assert_allclose(windows.gaussian(7, 1.2),
                     [0.04393693362340742, 0.2493522087772962,
                      0.7066482778577162, 1.0, 0.7066482778577162,
                      0.2493522087772962, 0.04393693362340742])
     assert_allclose(windows.gaussian(7, 3),
                     [0.6065306597126334, 0.8007374029168081,
                      0.9459594689067654, 1.0, 0.9459594689067654,
                      0.8007374029168081, 0.6065306597126334])
     assert_allclose(windows.gaussian(6, 3, False),
                     [0.6065306597126334, 0.8007374029168081,
                      0.9459594689067654, 1.0, 0.9459594689067654,
                      0.8007374029168081])
Esempio n. 17
0
def smooth(data, window_type='hann', filter_width=11, sigma=2, plot_on=1):
    """
    Smooth 1d data with moving window (uses filtfilt to have zero phase distortion)
    Wrapper for scipy.signal.filtfilt
    To do: consider replacing with sosfiltfilt

    Inputs:
        data: numpy array
        window_type ('hann'): string ('boxcar', 'gaussian', 'hann', 'bartlett', 'blackman')
        filter_width (11): int (wider is more smooth) odd is ideal
        sigma (2.): scalar std deviation only used for gaussian
        plot_on (1): int determines plotting. 0 none, 1 plot signal, 2: also plot filter
    Outputs
        data_smoothed: signal after being smoothed
        filter_window: the window used for smoothing

    Notes:
        Uses gustaffson's method to handle edge artifacts
        Currently accepted window_type options:
            hann (default) - cosine bump filter_width is only param
            blackman - more narrowly peaked bump than hann
            boxcar - flat-top of length filter_width
            bartlett - triangle
            gaussian - sigma determines width

    """
    if window_type == 'boxcar':
        filter_window = windows.boxcar(filter_width)
    elif window_type == 'hann':
        filter_window = windows.hann(filter_width)
    elif window_type == 'bartlett':
        filter_window = windows.bartlett(filter_width)
    elif window_type == 'blackman':
        filter_window = windows.blackman(filter_width)
    elif window_type == 'gaussian':
        filter_window = windows.gaussian(filter_width, sigma)
    filter_window = filter_window / np.sum(filter_window)
    data_smoothed = signal.filtfilt(filter_window, 1, data,
                                    method="gust")  # pad

    if plot_on:
        if plot_on > 1:
            plt.plot(filter_window)
            plt.title(f'{window_type} filter')
        plt.figure('signal', figsize=(10, 5))
        plt.plot(data,
                 color=(0.7, 0.7, 0.7),
                 label='noisy signal',
                 linewidth=1)
        plt.plot(data_smoothed, color='r', label='smoothed signal')
        plt.xlim(0, len(data_smoothed))
        plt.xlabel('sample')
        plt.grid(True)
        plt.legend()

    return data_smoothed, filter_window
Esempio n. 18
0
def compute_mute_flag(s0):
    a = s0[:]
    a = a.astype('float32') / np.max(a)
    s = a**2 * 0.25
    w = gaussian(32, 1)
    s = convolve(s, w)
    # plt.plot(s[:102400*5])
    s[0] = 0
    flag = np.array(s > 0.030, 'uint8')
    return flag
Esempio n. 19
0
def accumulate_signal_energy_gaussian(array : np.array) -> np.array:
    """
    Computes the cumulative energy from an array considering a Gaussian
    weighted window. This is equivalent to a Gaussian filtered signal
    Parameters of the window: 100 points, std=7 
    """
    # TODO: optimize window based on noise/NN
    gaussian_window = window.gaussian(100, std=2)
    gaussian_window = gaussian_window/np.sum(gaussian_window)
    filtered = signal.convolve(array, gaussian_window)
    return np.cumsum(filtered ** 2, axis=0)
Esempio n. 20
0
def rolling_mean_np(arr, win, center=True):
    import scipy.signal.windows as spwin
    plt.plot(range(-int(win/2),+int(win/2)+1), spwin.gaussian(win, win/2))
    plt.title('window used for rolling mean')
    plt.xlabel('timesteps')
    df = pd.DataFrame(data=arr.reshape( (arr.shape[0], arr[0].size)))

    rollmean = df.rolling(win, center=center, min_periods=1,
                          win_type='gaussian').mean(std=win/2.)

    return rollmean.values.reshape( (arr.shape))
Esempio n. 21
0
    def _f_calc_weighted_percentage(roi):
        """
        特徴量計算: 「端点」「分岐点」の重み付け割合

        - ガウシアンカーネルによる重み付けを行う

        Parameters
        ----------
        roi : numpy.ndarray
            局所領域画像

        Returns
        -------
        value : np.float
           特徴量値
        """
        LABELS = EdgeLineFeatures.LABELS
        BG = LABELS["BG"]
        ENDPOINT = LABELS["endpoint"]
        BRANCH = LABELS["branch"]

        sigma = roi.shape[0] / 3

        # Generate Gaussian kernel
        gaussian_kernel = np.outer(gaussian(roi.shape[0], std=sigma),
                                   gaussian(roi.shape[1], std=sigma))

        # TODO: 分母の値は「領域サイズ」?それとも「エッジ画素数」?
        # w_n_edges = np.sum(gaussian_kernel) - np.sum(gaussian_kernel[roi == BG])
        w_n_edges = np.sum(gaussian_kernel)

        if w_n_edges == 0:
            return 0

        w_n_endpoint = np.sum(gaussian_kernel[roi == ENDPOINT])
        w_n_branch = np.sum(gaussian_kernel[roi == BRANCH])

        value = (w_n_endpoint + w_n_branch) / w_n_edges

        return value
def do_smooth_with_gaussian(hist_dat, std):
     
    window_len = int(4*std)+1
    
    if window_len>2*hist_dat.size-1:
        raise Exception('Whoa buddy!')
    kern = gaussian(window_len,std)
    kern /= np.sum(kern)
    
    ys1 = hist_dat
    ys1 = np.r_[ys1[(window_len-1)//2:0:-1],ys1,ys1[-2:(-window_len-3)//2:-1]]

    ys1 = np.convolve(ys1,kern,'valid')
    return ys1
Esempio n. 23
0
    def convolute(self: 'Frame2D',
                  radius: int,
                  method: str = 'nearest') -> Frame2D:
        """ Convolutes the Frame.

        :param radius: The radius of the convolution.
        :param method: "nearest" or "average". If argument is not 'nearest', it'll use average by default.
        """

        kernel_diam = radius * 2 + 1

        if method == 'nearest':
            kernel = np.zeros([kernel_diam + 1, kernel_diam + 1, 1])
            kernel[kernel.shape[0] // 2, kernel.shape[1] // 2] = 1

        else:  # 'average'
            kernel = np.outer(gaussian(kernel_diam + 1, radius),
                              gaussian(kernel_diam + 1, radius))
            kernel = np.expand_dims(kernel, axis=-1)

        return self.create(
            fftconvolve(self.data, kernel, mode='valid', axes=[0, 1]),
            self.labels)
Esempio n. 24
0
    def __call__(self, length, flagged=None):

        self.length = length
        self.flagged = flagged if not flagged is None else self.flagged

        i = self.get_mask()
        abs_residuals = np.interp(self.spectrum.wave, self.spectrum.wave[i],
                                  self.abs_residuals[i])

        window = gaussian(self.spectrum.N, length)
        window /= np.sum(window)
        error = convolve(abs_residuals, window, mode='same')

        fmin = lambda a: np.power(
            1. * np.sum(abs_residuals < a * error) / self.spectrum.N -
            .682689492137, 2)
        error *= minimize(fmin, 1, method='Nelder-Mead').x[0]

        return error
Esempio n. 25
0
def remove_stripe_based_filtering(sinogram, sigma, size, dim=1):
    """
    Remove stripe artifacts in a sinogram using the filtering technique,
    algorithm 2 in Ref. [1]. Angular direction is along the axis 0.

    Parameters
    ----------
    sinogram : array_like
        2D array. Sinogram image
    sigma : int
        Sigma of the Gaussian window used to separate the low-pass and
        high-pass components of the intensity profile of each column.
    size : int
        Window size of the median filter.
    dim : {1, 2}, optional
        Dimension of the window.

    Returns
    -------
    array_like
         2D array. Stripe-removed sinogram.

    References
    ----------
    .. [1] https://doi.org/10.1364/OE.26.028396
    """
    pad = min(150, int(0.1 * sinogram.shape[0]))
    sinogram = np.transpose(sinogram)
    sino_pad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
    (_, ncol) = sino_pad.shape
    window = gaussian(ncol, std=sigma)
    list_sign = np.power(-1.0, np.arange(ncol))
    sino_smooth = np.copy(sinogram)
    for i, sino_1d in enumerate(sino_pad):
        sino_smooth[i] = np.real(
            fft.ifft(fft.fft(sino_1d * list_sign) * window) *
            list_sign)[pad:ncol - pad]
    sino_sharp = sinogram - sino_smooth
    if dim == 2:
        sino_smooth_cor = median_filter(sino_smooth, (size, size))
    else:
        sino_smooth_cor = median_filter(sino_smooth, (size, 1))
    return np.transpose(sino_smooth_cor + sino_sharp)
Esempio n. 26
0
def create_kernel(dim0, dim1):
    """Create a two-dimensional LPF kernel, with a half-Hamming window along
    the first dimension and a Gaussian along the second.

    Parameters
    ----------
    dim0 : int
        Half-Hamming window length.
    dim1 : int
        Gaussian window length.

    Returns
    -------
    kernel : np.ndarray
        The 2d LPF kernel.
    """
    dim0_weights = np.hamming(dim0 * 2 + 1)[:dim0]
    dim1_weights = gaussian(dim1, dim1 * 0.25, True)
    kernel = dim0_weights[:, np.newaxis] * dim1_weights[np.newaxis, :]
    return kernel / kernel.sum()
Esempio n. 27
0
def create_kernel(dim0, dim1):
    """Create a two-dimensional LPF kernel, with a half-Hamming window along
    the first dimension and a Gaussian along the second.

    Parameters
    ----------
    dim0 : int
        Half-Hamming window length.
    dim1 : int
        Gaussian window length.

    Returns
    -------
    kernel : np.ndarray
        The 2d LPF kernel.
    """
    dim0_weights = np.hamming(dim0 * 2 + 1)[:dim0]
    dim1_weights = gaussian(dim1, dim1 * 0.25, True)
    kernel = dim0_weights[:, np.newaxis] * dim1_weights[np.newaxis, :]
    return kernel / kernel.sum()
Esempio n. 28
0
    def test_decon(self):
        """
        Convolution with subsequent deconvolution.
        """
        # The incoming wavelet
        g = gaussian(51, 2.5)

        # Impulse response
        r = np.zeros_like(g)
        r[0] = 1
        r[15] = .25

        # convolve the two to a signal
        s = np.convolve(g, r)[:len(g)]

        # Deconvolve
        _, _, r2 = it(g, s, 1, omega_min=0.5)

        # test result
        self.assertTrue(np.allclose(r, r2[0:len(r)], atol=0.0001))
Esempio n. 29
0
def gaussian(t, std=1, plotflag=False):
    r"""Ricker wavelet

    Create a Gaussian wavelet given time axis ``t`` and standard deviation ``std``
    using :py:func:`scipy.signal.gaussian`.

    Parameters
    ----------
    t : :obj:`numpy.ndarray`
        Time axis (positive part including zero sample)
    std : :obj:`float`, optional
        Standard deviation of gaussian
    plotflag : :obj:`bool`, optional
        Quickplot

    Returns
    -------
    w : :obj:`numpy.ndarray`
        Wavelet
    t : :obj:`numpy.ndarray`
        Symmetric time axis
    wcenter : :obj:`int`
        Index of center of wavelet

    """
    if len(t)%2 == 0:
        t = t[:-1]
        warnings.warn('one sample removed from time axis...')

    w = gaussian(len(t)*2-1, std=std)
    t = np.concatenate((np.flipud(-t[1:]), t), axis=0)
    wcenter = np.argmax(np.abs(w))

    if plotflag:
        plt.figure(figsize=(7, 2))
        plt.plot(t, w, 'k', lw=2)
        plt.title('Gaussian wavelet')
        plt.xlabel('t')

    return w, t, wcenter
Esempio n. 30
0
def smooth1d(array, window_size=None, kernel='gaussian'):
    """Apply a centered window smoothing to a 1D array.

    Parameters
    ----------
    array : ndarray
        the array to apply the smoothing to
    window_size : int
        the size of the smoothing window
    kernel : str
        the type of smoothing (`gaussian`, `mean`)

    Returns
    -------
    the smoothed array (same dim as input)
    """

    # some defaults
    if window_size is None:
        if len(array) >= 9:
            window_size = 9
        elif len(array) >= 7:
            window_size = 7
        elif len(array) >= 5:
            window_size = 5
        elif len(array) >= 3:
            window_size = 3

    if window_size % 2 == 0:
        raise ValueError('Window should be an odd number.')

    if isinstance(kernel, str):
        if kernel == 'gaussian':
            kernel = gaussian(window_size, 1)
        elif kernel == 'mean':
            kernel = np.ones(window_size)
        else:
            raise NotImplementedError('Kernel: ' + kernel)
    kernel = kernel / np.asarray(kernel).sum()
    return convolve1d(array, kernel, mode='mirror')
def remove_stripe_based_filtering_sorting(sinogram, sigma, size, dim=1):
    """
    Combination of algorithm 2 and algorithm 3 in [1].
    Removing stripes using the filtering and sorting technique.
    Angular direction is along the axis 0.

    Parameters
    ----------
    sinogram : float
        2D array.
    sigma : int
        Sigma of the Gaussian window used to separate the low-pass and
        high-pass components of the intensity profile of each column.
    size : int
        Window size of the median filter.
    dim : {1, 2}, optional
        Dimension of the window.

    Returns
    -------
    float
        2D array. Stripe-removed sinogram.
    """
    pad = 150  # To reduce artifacts caused by FFT
    sinogram = np.transpose(sinogram)
    sinopad = np.pad(sinogram, ((0, 0), (pad, pad)), mode='reflect')
    (_, ncol) = sinopad.shape
    window = gaussian(ncol, std=sigma)
    listsign = np.power(-1.0, np.arange(ncol))
    sinosmooth = np.copy(sinogram)
    for i, sinolist in enumerate(sinopad):
        # sinosmooth[i] = np.real(ifft(fft(
        #     sinolist * listsign) * window) * listsign)[pad:ncol-pad]
        sinosmooth[i] = np.real(
            fft.ifft(fft.fft(sinolist * listsign) * window) *
            listsign)[pad:ncol - pad]
    sinosharp = sinogram - sinosmooth
    sinosmooth_cor = np.transpose(
        remove_stripe_based_sorting(np.transpose(sinosmooth), size, dim))
    return np.transpose(sinosmooth_cor + sinosharp)
Esempio n. 32
0
    def test_it_max(self):
        """
        Convolution with subsequent deconvolution. One Iteration should
        only recover the largest peak.
        """
        # The incoming wavelet
        g = gaussian(51, 2.5)

        # Impulse response
        r = np.zeros_like(g)
        r[0] = 1
        r[15] = .25

        # convolve the two to a signal
        s = np.convolve(g, r)[:len(g)]

        # Deconvolve
        _, _, r2 = it(g, s, 1, it_max=1, omega_min=0.5)

        # test result
        self.assertFalse(np.allclose(r, r2[0:len(r)], atol=0.1))
        self.assertAlmostEqual(r[0], r2[0], places=4)
Esempio n. 33
0
def ACR_model(latency, width, weights, latency_pad, fs):
    full_mod = np.array([])
    stds = width / 4  #SD of the sources in seconds

    source_mods = []
    for source in range(stds.size):
        gauss_source = gaussian(np.round(width[source] * fs),
                                np.round(stds[source] * fs))
        source_mods.append(gauss_source - np.min(gauss_source))

    for source in range(len(source_mods)):

        if source == 2:
            continue

        if (source == 1):  # do mixed source

            s2 = np.concatenate((np.zeros(
                int(
                    np.round((np.sum(width[:2]) + width[2] / 2 - latency[2]) *
                             fs))), source_mods[2]))
            s1 = np.concatenate(
                (source_mods[1], np.zeros(s2.size - source_mods[1].size)))
            mixed_source = weights[1] * s1 + weights[2] * s2

            full_mod = np.append(full_mod, mixed_source)

        else:
            full_mod = np.append(
                full_mod,
                np.min(source_mods[source]) *
                np.ones(int(np.round(latency_pad[source] * fs))))
            full_mod = np.append(full_mod,
                                 weights[source] * source_mods[source])

    return full_mod
Esempio n. 34
0
    def angle_variance_using_mean_vector(_edge_magnitude, _edge_angle):
        """
        平均ベクトルに基づくエッジ角度分散の計算

        *REF:*\ `[PDF]角度統計 <http://q-bio.jp/images/5/53/角度統計配布_qbio4th.pdf>`__
        
        - :math:`N` : 計算対象の角度の個数
        - まず、:math:`\\cos`、:math:`\\sin` の平均値を計算する
        
          -  それぞれを :math:`M_{\\cos}`、:math:`M_{\\sin}` とすると
          
        .. math::
          
          M_{\\cos} = \\frac{1}{N} \\sum^{N} \\cos{\\theta} ;, ;; M_{\\sin} = \\frac{1}{N} \\sum^{N} \\sin{\\theta}
        
        - ここで、平均ベクトルを考える
        
          - 平均ベクトル :math:`(R\\cos{\\Theta}, R\\sin{\\Theta})` は次のように定義される
        
        .. math::
          
          (R\\cos{\\Theta}, R\\sin{\\Theta}) = (M_{\\cos}, M_{\\sin})
        
        - このとき、エッジ角度分散 :math:`V` は平均ベクトルの長さ :math:`R`
          を用いて次のように定義される
          
          - :math:`V = 1 - R`
          - :math:`R` は以下の計算で算出する
            - :math:`R = \\sqrt{ {M_{\\cos}}^2 + {M_{\\sin}}^2 }`

        Parameters
        ----------
        _edge_magnitude : numpy.ndarray
            エッジ強度
        _edge_angle : numpy.ndarray
            エッジ角度

        Returns
        -------
        float
            エッジ角度の分散値 [0, 1]

        """
        radians = _edge_angle
        weights = _edge_magnitude

        gaussian_kernel = np.outer(
            gaussian(_edge_magnitude.shape[0],
                     std=_edge_magnitude.shape[0] / 3),
            gaussian(_edge_magnitude.shape[1],
                     std=_edge_magnitude.shape[1] / 3))

        weights = weights * gaussian_kernel

        if np.isclose(np.sum(weights), 0):
            return 0

        # weights /= weights.max()

        M_cos = np.average(np.cos(radians), weights=weights)
        M_sin = np.average(np.sin(radians), weights=weights)

        R = np.hypot(M_cos, M_sin)

        variance = 1 - R

        # return variance
        return variance * np.mean(weights)