Esempio n. 1
1
File: qam.py Progetto: viyer/ham_qam
def demod_QAM16(QAM, t, f0=1800, fs=48000):
    r = QAM*np.cos(2*np.pi*f0*t)
    i = -QAM*np.sin(2*np.pi*f0*t)

    #plot(r+i)
    num_taps = 100
    lp = signal.firwin(num_taps, np.pi*f0/4,nyq=fs/2.0)
    r_lp = signal.fftconvolve(lp,r)
    i_lp = signal.fftconvolve(lp, i)

    #fig = figure(figsize = (16,4))
    #frange = np.linspace(-fs/2,fs/2,len(r))
    #frange_filt = np.linspace(-fs/2,fs/2,len(r_lp))
    #plt.plot(frange_filt, abs(fft.fftshift(fft.fft(lp))))
    '''
    ylim(-3,3)

    fig = figure(figsize = (16,4))
    plt.plot(frange, abs(fft.fftshift(fft.fft(i))))
    plt.plot(frange_filt, abs(fft.fftshift(fft.fft(i_lp))))

    #r_env = abs(r_lp)
    #i_env = abs(i_lp)
    '''
    r_lp = r_lp[num_taps/2:-num_taps/2+1]
    i_lp = i_lp[num_taps/2:-num_taps/2+1]
    return r_lp, i_lp
Esempio n. 2
0
def detrend(EEG):
    window_size = 207*10
    filt = np.ones((window_size,))/float(window_size)
    trend0 = signal.fftconvolve(EEG[:,0], filt, 'same')
    trend1 = signal.fftconvolve(EEG[:,1], filt, 'same')
    trend = np.vstack([trend0,trend1]).T
    return EEG-trend
Esempio n. 3
0
def golayIR(respa, respb, a, b):
    # Comptute impulse response h for Signle Channel answers a and b
    L = len(a)
    h = np.array(np.zeros(respa.shape))
    h = fftconvolve(a[-1::-1], respa, mode="same") + fftconvolve(b[-1::-1], respb, mode="same")
    h = h / (2 * L)
    return h
Esempio n. 4
0
def energy(traces, duration, dt):
    """
    Compute an mean-squared energy measurement for each point of a
    seismic section.
        
    :param traces: The data array to use for calculating MS energy.
                   Must be 1D or 2D numpy array.
    :param duration: the time duration of the window (in seconds)
    :param dt: the sample interval of the data (in seconds) 
    :returns An array the same dimensions as the input array.
    """

    energy_data = numpy.zeros(traces.shape)
    signal = traces * traces
    n_samples = int(duration / dt)

    window = numpy.ones(n_samples)

    if (len(signal.shape)) == 1:
        ## Compute the sliding average using a convolution
        energy_data = fftconvolve(signal, window, mode="same") / n_samples

    elif len(signal.shape) == 2:
        for trace in range(signal.shape[1]):
            energy_data[:, trace] = fftconvolve(signal[:, trace], window, mode="same")

    else:
        raise ValueError("Array must be 1D or 2D")

    return energy_data
def convolutionFilter(image,vORh):
    if vORh == 'v':
        # return sg.convolve(image, [[1,-1]], "valid")         # Vertical Data
        return sg.fftconvolve(image, [[1,-1]], "valid")         # Vertical Data
    else:
        # return sg.convolve(image, [[1],[-1]], "valid")       # Horizontal Data
        return sg.fftconvolve(image, [[1],[-1]], "valid")       # Horizontal Data
Esempio n. 6
0
def idwt(coeffs, L, H):
    """ Compute the Inverse Wavelet Transform using thw wavelet coefficients
    and filters L and H.

    Parameters:
        coeffs (list): a list of wavelet decomposition arrays.
        L (1D ndarray): The low-pass filter.
        H (1D ndarray): The high-pass filter.
    Returns:
        The reconstructed signal (as a 1D ndarray).
    """
    n = len(coeffs) - 1
    A = coeffs[0]
    coeffs = coeffs[1:]
    for i in xrange(n):
        D = coeffs[i]
        print len(D)
        up_A = np.zeros(2*A.size)
        up_A[::2] = A
        up_D = np.zeros(2*D.size)
        up_D[::2] = D
        print len(up_A),len(L),len(up_D),len(H)
        # now convolve and add, but discard last entry
        A = fftconvolve(up_A,L)[:-1] + fftconvolve(up_D,H)[:-1]
    return A
Esempio n. 7
0
    def fir_filter(self, fir_ac=None, fir_dc=None, f_ac=None, f_dc=None,
                   a_ac=10, a_dc=10, alpha=None, filter_name=None, **kwargs):
        """Apply filters to generate the lock-in and dc components of phi"""

        if filter_name == 'bessel_matched':
            N_pts = kwargs.get('N_pts', int(self.ks / self.k0_dc * 6))
            dec = kwargs.get('dec', 32)
            n_pts_eval_fir = kwargs.get('n_pts_eval_fir', 2**16)
            window = kwargs.get('window', 'hann')

            fir_ac, fir_dc = _matched_filters(self.ks, self.x_m, N_pts, dec, window,
                                              n_pts_eval_fir)

            self.fir_ac = fir_ac
            self.fir_dc = fir_dc
        else:
            if fir_ac is None:
                if f_ac is None and alpha is None:
                    f_ac = self.fx * 0.5
                elif alpha is not None:
                    f_ac = self.v_tip/self.x_m * alpha
                self.fir_ac = signal.firwin(self.fs / (f_ac) * a_ac,
                                            f_ac, nyq=0.5 * self.fs,
                                            window='blackman')
            else:
                self.fir_ac = fir_ac

            if fir_dc is None:
                if f_dc is None and alpha is None:
                    f_dc = self.fx * 0.5
                elif alpha is not None:
                    f_dc = self.v_tip/self.x_m * alpha
                self.fir_dc = signal.firwin(self.fs/(f_dc) * a_dc,
                                            f_dc, nyq=0.5*self.fs,
                                            window='blackman')
            else:
                self.fir_dc = fir_dc

        indices = np.arange(self.phi.size)
        fir_ac_size = self.fir_ac.size
        fir_dc_size = self.fir_dc.size

        fir_max_size = max(fir_ac_size, fir_dc_size)

        self.m = indices[fir_max_size//2: -fir_max_size//2]
        self.tm = self.t[self.m]

        self._lock = np.exp(np.pi * 2j * self.fx * self.t)

        self.phi_lock = signal.fftconvolve(self.phi * self._lock * 2,
                                           self.fir_ac,
                                           mode='same')

        self.V_lock = self.phi_lock

        self.phi_lock_a = np.abs(self.phi_lock)
        self.phi_lock_phase = np.angle(self.phi_lock)

        self.phi_dc = signal.fftconvolve(self.phi, self.fir_dc, mode='same')
        self.V_dc = self.phi_dc
Esempio n. 8
0
File: cwt.py Progetto: mirca/gammapy
    def _transform(self, data):
        """
        Do the transform itself.

        The transform is made by using `scipy.signal.fftconvolve`.

        TODO: document.

        Parameters
        ----------
        data : `~gammapy.detect.CWTData`
            Images for transform.
        """
        from scipy.signal import fftconvolve

        total_background = data._model + data._background + data._approx
        excess = data._counts - total_background
        log.debug('Excess sum: {0:.4f}'.format(excess.sum()))
        log.debug('Excess max: {0:.4f}'.format(excess.max()))

        log.debug('Computing transform and error')
        for idx_scale, kern in self.kernels.kern_base.items():
            data._transform_3d[idx_scale] = fftconvolve(excess, kern, mode='same')
            data._error[idx_scale] = np.sqrt(fftconvolve(total_background, kern ** 2, mode='same'))
        log.debug('Error sum: {0:.4f}'.format(data._error.sum()))
        log.debug('Error max: {0:.4f}'.format(data._error.max()))

        log.debug('Computing approx and approx_bkg')
        data._approx = fftconvolve(data._counts - data._model - data._background,
                                   self.kernels.kern_approx, mode='same')
        data._approx_bkg = fftconvolve(data._background, self.kernels.kern_approx, mode='same')
        log.debug('Approximate sum: {0:.4f}'.format(data._approx.sum()))
        log.debug('Approximate background sum: {0:.4f}'.format(data._approx_bkg.sum()))
Esempio n. 9
0
def fm_afskDemod(sig, TBW=4, N=74, fs=48000.0):
    #TODO: add docstring
    #  non-coherent demodulation of afsk1200
    # function returns the NRZI (without rectifying it)
    baud = 1200.0
    bandwidth = 2*500.0 + baud

    #TODO fix this
    M = int(2/(bandwidth/fs))
    h = signal.firwin(74.0, bandwidth, nyq=fs/2)

    t = r_[0.0:len(h)]/fs
    fc = 1700.0
    h = h*np.exp(1j*2*np.pi*fc*t)
    output = signal.fftconvolve(h, sig)

    temp = output*np.conjugate(np.roll(output, 1))
    NRZ_fm = np.angle(temp)/3

    h2 = signal.firwin(74.0, 1200, nyq=fs/2)
    NRZ_fm = signal.fftconvolve(NRZ_fm, h2)

    NRZ_fm = (NRZ_fm*fs/(2.0*np.pi)-550.0)/500.0

    return NRZ_fm
Esempio n. 10
0
def richardson_lucy(obs, x, h, mode="x"):
    if mode == "x":
        coeff = obs / (fftconvolve(x, h, mode="same"))
        x_hat = x * fftconvolve(coeff, h[::-1].copy(), mode="same")
        return x_hat

    if mode == "h":
        den = fftconvolve(x, h, mode="full")
        n_diff = den.shape[0] - obs.shape[0]
        den = den[n_diff / 2 : -n_diff / 2]
        coeff = obs / den
        print(h.shape, coeff.shape, x.shape)
        x_hat = x * fftconvolve(coeff, h[::-1].copy(), mode="same")
        return x_hat

    n_pad = x.shape[0] - h.shape[0]
    if n_pad > 0:
        h_new = np.zeros_like(x)
        h_new[: h.shape[0]] = h
        h = h_new.copy()
    if n_pad < 0:
        x_new = np.zeros_like(h)
        x_new[: x.shape[0]] = x
        x = x_new.copy()

    assert x.shape == h.shape
    X = np.fft.fft(x)
    H = np.fft.fft(h)
    H_flip = np.fft.fft(h[::-1])
    Y = np.fft.fft(obs)
    coeff = Y / (X * H) * H_flip
    X_hat = X * coeff
    x_hat = np.fft.ifft(X_hat)
    assert np.max(x_hat.imag) < 1e-6
    return np.abs(x_hat)
Esempio n. 11
0
 def generate_prediction(self, x, y, sigma, mbeta, pbeta):
     
     # mask for speed
     mask = self.distance_mask(x, y, sigma)
     
     # generate the RF
     spatial_rf = generate_og_receptive_field(x, y, sigma, self.stimulus.deg_x, self.stimulus.deg_y)
     spatial_rf /= ((2 * np.pi * sigma**2) * 1/np.diff(self.stimulus.deg_x[0,0:2])**2)
     
     # spatial response
     spatial_ts = generate_rf_timeseries(self.stimulus.stim_arr, spatial_rf, mask)
     
     # temporal response
     m_ts, p_ts = generate_mp_timeseries(spatial_ts, self.m_amp, self.p_amp, self.stimulus.flicker_vec)
     
     # convolve with HRF
     hrf = self.hrf_model(self.hrf_delay, self.stimulus.tr_length)
     
     # M
     m_model = fftconvolve(m_ts, hrf)[0:len(m_ts)]
     
     # P
     p_model = fftconvolve(p_ts, hrf)[0:len(p_ts)]
     
     # convert units
     m_model = (m_model - np.mean(m_model))/np.mean(m_model)
     p_model = (p_model - np.mean(p_model))/np.mean(p_model)
     
     # mix
     model = m_model * mbeta + p_model * pbeta
     
     return model
Esempio n. 12
0
def run():
    print_complex = get_print_complex()
    convolutionCPU = get_convolution_cpu()
    check_results = get_check_results()

    #data = np.ones((3,3)).astype('complex64')
    data = np.asfortranarray(np.random.randn(3,3).astype('complex64'))
    #kernel = np.ones((3,3)).astype('complex64')
    kernel = np.asfortranarray(np.random.randn(3,3).astype('complex64'))
    result = np.asfortranarray(np.zeros_like(data).astype('complex64'))

    convolutionCPU(_get_float2_ptr(result), _get_float2_ptr(data), _get_float2_ptr(kernel), data.shape[1], data.shape[0], kernel.shape[1], kernel.shape[0], 1, 6)

    print
    print kernel
    print
    print data
    print

    s1 = np.array(data.shape)
    s2 = np.array(kernel.shape)

    print result
    print 
    print fftconvolve(data.real, kernel.real, mode='full').astype('complex64')
Esempio n. 13
0
def get_smoothed_slice(data, psf, periodic=False):
	''' Smooth a data slice with a given point spread function

		Parameters:
			* data (numpy array): the array to smooth. Must have dimensions NxN 
			* psf (numpy array): the point spread function. Must have dimensions NxN

		Kwargs:
			* periodic (bool): if True, the input data will be assumed to have
				periodic boundary conditions

		Returns:
			the smoothed array
	'''
	from scipy import signal

	assert(len(data.shape) == 2)
	assert(data.shape[0] == data.shape[1])

	if periodic:
		#Make a bigger version, using the periodicity
		data_big = np.hstack([np.vstack([data,data]),np.vstack([data,data])])
		data_big = np.roll(data_big, data.shape[0]/2, axis=0)
		data_big = np.roll(data_big, data.shape[1]/2, axis=1)

		out = signal.fftconvolve(data_big, psf, mode='same')
		ox = out.shape[0]
		out = out[ox*0.25:ox*0.75, ox*0.25:ox*0.75]

	else:
		out =  signal.fftconvolve(data, psf, mode='same')

	return out
Esempio n. 14
0
def ssim(img1, img2, cs_map=False):
    """Return the Structural Similarity Map corresponding to input images img1 
    and img2 (images are assumed to be uint8)
    
    This function attempts to mimic precisely the functionality of ssim.m a 
    MATLAB provided by the author's of SSIM
    https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
    """
    img1 = img1.astype(numpy.float64)
    img2 = img2.astype(numpy.float64)
    size = 11
    sigma = 1.5
    window = gauss.fspecial_gauss(size, sigma)
    K1 = 0.01
    K2 = 0.03
    L = 255 #bitdepth of image
    C1 = (K1*L)**2
    C2 = (K2*L)**2
    mu1 = signal.fftconvolve(window, img1, mode='valid')
    mu2 = signal.fftconvolve(window, img2, mode='valid')
    mu1_sq = mu1*mu1
    mu2_sq = mu2*mu2
    mu1_mu2 = mu1*mu2
    sigma1_sq = signal.fftconvolve(window, img1*img1, mode='valid') - mu1_sq
    sigma2_sq = signal.fftconvolve(window, img2*img2, mode='valid') - mu2_sq
    sigma12 = signal.fftconvolve(window, img1*img2, mode='valid') - mu1_mu2
    if cs_map:
        return (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
                    (sigma1_sq + sigma2_sq + C2)), 
                (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
    else:
        return ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
                    (sigma1_sq + sigma2_sq + C2))
Esempio n. 15
0
 def demod(self, buff):
     SIG = signal.fftconvolve(buff,self.h_bp,mode='valid')
     mark = signal.fftconvolve(SIG,self.h_mark,mode='valid')
     space = signal.fftconvolve(SIG,self.h_space,mode='valid')
     NRZ = abs(mark)-abs(space)
     NRZ = signal.fftconvolve(NRZ,self.h_lpp,mode='valid')
     return NRZ
Esempio n. 16
0
def getData():    #function called to get data
    
    startTime = time.time()
    raw650 = np.array(session[s1Vals])
    raw950 = np.array(session[s2Vals])
  
 #   while True:

   #     if time.time() - startTime >= 5:
    startTime = time.time()
    print('got data')
    working950 = reject_outliers(raw950)
    working650 = reject_outliers(raw650)
    sig950 = np.std(working950)
    sig650 = np.std(working650)
    print(sig650)
    window950 = signal.general_gaussian(51, p=1.5, sig= sig950)
    filtered950 = signal.fftconvolve(window950, working950)
    filtered950 = (np.average(working950) / np.average(filtered950)) * filtered950
    window650 = signal.general_gaussian(51, p=1.5, sig= sig650)
    filtered650 = signal.fftconvolve(window650, working650)
    filtered650 = (np.average(working650) / np.average(filtered650)) * filtered650

  #  filtered = np.roll(filtered, -25)
 #    plt.plot(working950)
 # #   plt.plot(window950)
 #    plt.plot(filtered950)
 #    plt.plot(raw650)
 #    plt.show()
    
    print(filtered950)
    
    print(working950)
    concentration(filtered650,filtered950)
Esempio n. 17
0
  def __init__ (self, var, saxis, kernel, fft):
  # {{{
    ''' __init__()'''
    import numpy as np

    assert len(kernel) <= var.shape[saxis], 'Kernel must not be longer than dimension being smoothed.'

    # Construct new variable
    self.saxis = saxis
    self.var = var
    self.kernel = kernel
    self.fft = fft
    self.klen = len(kernel)

    # Normalize and reshape kernel
    self.kernel /= np.sum(self.kernel)
    self.kernel.shape = [self.klen if i == saxis else 1 for i in range(var.naxes)]

    # Determine which convolution function to use
    from scipy import signal as sg
    tdata = np.ones(len(kernel), 'd')
    if self.fft:
      try:
        sg.fftconvolve(tdata, kernel, 'same', old_behaviour=False)
        self._convolve = lambda x, y, z: sg.fftconvolve(x, y, z, old_behaviour=False)
      except TypeError:
        self._convolve = sg.fftconvolve
    else:
      try:
        sg.convolve(tdata, kernel, 'same', old_behaviour=False)
        self._convolve = lambda x, y, z: sg.convolve(x, y, z, old_behaviour=False)
      except TypeError:
        self._convolve = sg.convolve

    Var.__init__(self, var.axes, var.dtype, name=var.name, atts=var.atts, plotatts=var.plotatts)
Esempio n. 18
0
    def contexttrack(self):
        print("[%s] tracking" % QThread.currentThread().objectName())
        if self._isRunning:
            self.wait.emit()
            t_trackerstart = time.time()
            print('\t start context tracking')
            dx, dy = np.random.randint(0, 20, size=2)
            self.data = np.roll(self.data, dx, axis=0)
            self.data = np.roll(self.data, dy, axis=1)
            self.data[0:dy, :] = 0.
            self.data[:, 0:dx] = 0.
            self.correlation = sig.fftconvolve(self.data, self.referencedata[::-1, ::-1], 'same')
            maxposy, maxposx = np.unravel_index(self.correlation.argmax(), self.correlation.shape)
            xcorrect = int(np.round(self.resolution1/2-maxposx))
            ycorrect = int(np.round(self.resolution2/2-maxposy))
            self.data = np.roll(self.data, ycorrect, axis=0)
            self.data = np.roll(self.data, xcorrect, axis=1)

            # check result
            self.correlation1 = sig.fftconvolve(self.data, self.referencedata[::-1, ::-1], 'same')
            maxposx, mayposy = np.unravel_index(self.correlation1.argmax(), self.correlation1.shape)

            self.update.emit(self.data, self.correlation, xcorrect, ycorrect, self.correlation.max(), time.strftime('%H:%M:%S')+' - ok')

            print('tracking took ', time.time()-t_trackerstart, 's')
            print('\t context tracking done')

            self.goon.emit()
Esempio n. 19
0
def nc_afskDemod(sig, TBW=2.0, N=74, fs=48000):
    bw = float(TBW*fs)/N
    t = np.r_[:N]/float(fs)
    h = signal.firwin(N,bw/2.0,nyq=float(fs)/2)
    b0=h*np.exp(2*np.pi*1.0j*1200.0*t)
    b1=h*np.exp(2*np.pi*1.0j*2200.0*t)
    return np.abs(signal.fftconvolve(sig,b1)) - np.abs(signal.fftconvolve(sig,b0))
Esempio n. 20
0
def energy(traces, duration, dt=1):
    """
    Compute an mean-squared energy measurement for each point of a
    seismic section.

    :param traces: The data array to use for calculating MS energy.
                   Must be 1D or 2D numpy array.
    :param duration: the time duration of the window (in seconds), or
                     samples if dt=1.
    :param dt: the sample interval of the data (in seconds). Defaults
               to 1 so duration can be in samples.
    :returns: An array the same dimensions as the input array.
    """

    energy_data = np.zeros(traces.shape)
    signal = traces * traces
    n_samples = int(duration / dt)

    window = np.ones(n_samples)

    if np.ndim(signal) == 1:
        # Compute the sliding average using a convolution
        energy_data = fftconvolve(signal, window, mode='same') \
                     / n_samples

    elif np.ndim(signal) == 2:
        for trace in range(signal.shape[1]):
            energy_data[:, trace] = (fftconvolve(signal[:, trace],
                                                 window,
                                                 mode='same'))

    else:
        raise ValueError('Array must be 1D or 2D')

    return energy_data
Esempio n. 21
0
def acovf_fft(x, demean=True):
    '''autocovariance function with call to fftconvolve, biased

    Parameters
    ----------
    x : array_like
        timeseries, signal
    demean : boolean
        If true, then demean time series

    Returns
    -------
    acovf : array
        autocovariance for data, same length as x

    might work for nd in parallel with time along axis 0

    '''
    from scipy import signal
    x = np.asarray(x)

    if demean:
        x = x - x.mean()

    signal.fftconvolve(x,x[::-1])[len(x)-1:len(x)+10]/x.shape[0]
Esempio n. 22
0
def dwt(X, L, H, n):
    '''
    Compute the discrete wavelet transform of f with respect to 
    the wavelet filters lo and hi.
    Inputs:
        X -- numpy array corresponding to the signal
        L -- numpy array giving the lo-pass filter
        H -- numpy array giving the hi-pass filter
        n -- integer, giving what level of decomposition
    Returns:
        list of the form [A, D1, D2, ..., Dn] where each entry
        is a numpy array. These are the approximation frame (A)
        and the detail coefficients.
    '''
    coeffs = []
    A = X
    i=0
    while i < n:
        D = fftconvolve(A,H)[1::2]
        A = fftconvolve(A,L)[1::2]
        coeffs.append(D)
        i += 1
    coeffs.append(A)
    coeffs.reverse()
    return coeffs
def SIC_method(X,Y,order=100):   
    #low-passing to take LFP only
    
    h_for=AR_fit(X,Y,order)
    y_new=signal.fftconvolve(h_for,X)
    h_back=AR_fit(Y,X,order)
    x_new=signal.fftconvolve(h_back,Y)

    #Sx=welch(x_new,nperseg=1000)[1]
    #Sy=welch(y_new,nperseg=1000)[1]

#    Sy=welch(Y,nperseg=1000)[1]
#    Sx=welch(X,nperseg=1000)[1]
#
#    X_Y=delta_estimator_3(Sy/Sx,Sx)
#    Y_X=delta_estimator_3(Sx/Sy,Sy)
            
    #mask1=Sy!=0
    #mask2=Sx[mask1]!=0
    #X_Y=eval('delta_estimator_'+str(method_no))(Sy[mask1][mask2][1:-1]/Sx[mask1][mask2][1:-1],Sx[mask1][mask2][1:-1])
    #Y_X=eval('delta_estimator_'+str(method_no))(Sx[mask1][mask2][1:-1]/Sy[mask1][mask2][1:-1],Sy[mask1][mask2][1:-1])
    #X_Y=eval('delta_estimator_'+str(method_no))(Sy[mask1][mask2][1:-1]/Sx[mask1][mask2][1:-1],Sx[mask1][mask2][1:-1])
    #Y_X=eval('delta_estimator_'+str(method_no))(Sx[mask1][mask2][1:-1]/Sy[mask1][mask2][1:-1],Sy[mask1][mask2][1:-1])

    X_Y=np.var(y_new)/float(np.sum(h_for**2)*np.var(X))
    Y_X=np.var(x_new)/float(np.sum(h_back**2)*np.var(Y))
    
    return X_Y,Y_X
Esempio n. 24
0
    def demodulate2(self,samples):
        # DEMODULATION CODE

        # LIMITER goes here

        # low pass & down sampling
        h = signal.firwin(128,80000,nyq=1.2e5)
        lp_samples = signal.fftconvolve(samples, h)

    # polar discriminator

        A = lp_samples[1:lp_samples.size]
        B = lp_samples[0:lp_samples.size-1]

        dphase = ( A * np.conj(B) ) / np.pi

        dphase.resize(dphase.size+1)
        dphase[dphase.size-1] = dphase[dphase.size-2]

        h = signal.firwin(128,16000,nyq=1.2e5)
        rebuilt = signal.fftconvolve(dphase,h)

        output = rebuilt[::self.decim_r2]

        output = self.lowpass(output, self.audioFilterSize)

        return np.real(output)
Esempio n. 25
0
def conv_mul(lin_op, rh_val, transpose=False, is_abs=False):
    """Multiply by a convolution operator.

    arameters
    ----------
    lin_op : LinOp
        The root linear operator.
    rh_val : NDArray
        The vector being convolved.
    transpose : bool
        Is the transpose of convolution being applied?
    is_abs : bool
        Is the absolute value of convolution being applied?

    Returns
    -------
    NumPy NDArray
        The convolution.
    """
    constant = mul(lin_op.data, {}, is_abs)
    # Convert to 2D
    constant, rh_val = map(intf.from_1D_to_2D, [constant, rh_val])
    if transpose:
        constant = np.flipud(constant)
        # rh_val always larger than constant.
        return fftconvolve(rh_val, constant, mode='valid')
    else:
        # First argument must be larger.
        if constant.size >= rh_val.size:
            return fftconvolve(constant, rh_val, mode='full')
        else:
            return fftconvolve(rh_val, constant, mode='full')
Esempio n. 26
0
def nc_afsk1200Demod(sig, fs=48000.0, TBW=2.0):
    #  non-coherent demodulation of afsk1200
    # function returns the NRZ (without rectifying it)
    # 
    # sig  - signal
    # baud - The bitrate. Default 1200
    # fs   - sampling rate in Hz
    # TBW  - TBW product of the filters
    #
    # Returns:
    #     NRZ  
    # your code here
    taps = fs/1200-1
    bandpass = signal.firwin(taps, 1200, nyq=fs/2)
    spacepass = bandpass * np.exp(1j*2*np.pi*1200*np.r_[0.0:taps]/fs)
    markpass = bandpass * np.exp(1j*2*np.pi*3600*np.r_[0.0:taps]/fs)
    spaces = signal.fftconvolve(sig, spacepass, mode='same')
    marks = signal.fftconvolve(sig, markpass, mode='same')

    analog = np.abs(spaces)-np.abs(marks)
    lowpass = signal.firwin(taps, 2400*1.2, nyq=fs/2)
    filtered = signal.fftconvolve(analog, lowpass, mode='same')
    NRZ = filtered
    
    return NRZ
Esempio n. 27
0
 def Md_dotProduct_Rect(cls, s1_train, s2_train, args, dt=0.1):
     delta = args['delta']
     rect_size_i = 2*int(float(delta)/dt)
     rect        = np.ones(rect_size_i)
     s1_filtered = fftconvolve(s1_train, rect, mode='same')
     s2_filtered = fftconvolve(s2_train, rect, mode='same')
     dotProduct = np.sum(s1_filtered*s2_filtered)
     return dotProduct
 def test_real_valid_mode(self):
     # len(a) < len(b) deprecated in 0.12.0, removed for 0.13.0
     a = array([3,2,1])
     b = array([3,3,5,6,8,7,9,0,1])
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", DeprecationWarning)
         assert_array_almost_equal(fftconvolve(a, b, 'valid'),
                                   fftconvolve(b, a, 'valid'))
Esempio n. 29
0
 def numeric(self, values):
     """Convolve the two values.
     """
     # First argument must be larger.
     if values[0].size >= values[1].size:
         return fftconvolve(values[0], values[1], mode='full')
     else:
         return fftconvolve(values[1], values[0], mode='full')
Esempio n. 30
0
    def __call__(self, locs, wfImage):
        """Align a set of localizations to a widefield image.
        
        Parameters
        ----------
        locs    : Pandas DataFrame
            The DataFrame containing the localizations. x- and y-column
            labels are specified in self.coordCols.
        wfImage : array of int or array of float 
            The widefield image to align the localizations to.
        
        Returns
        -------
        offsets : tuple of float
            The estimated offset between the localizations and widefield
            image. The first element is the offset in x and the second
            in y. These should be subtracted from the input localizations
            to align them to the widefield image.
            
        """
        upsampleFactor = self.upsampleFactor
        
        # Bin the localizations into a 2D histogram;
        # x corresponds to rows for histogram2d
        binsX = np.arange(0, upsampleFactor * wfImage.shape[0] + 1, 1) \
                                            * self.pixelSize / upsampleFactor
        binsY = np.arange(0, upsampleFactor * wfImage.shape[1] + 1, 1) \
                                            * self.pixelSize / upsampleFactor
        H, _, _ = np.histogram2d(locs[self.coordCols[0]],
                                 locs[self.coordCols[1]],
                                 bins = [binsX, binsY])
                           
        # Upsample and flip the image to align it to the histogram;
        # then compute the cross correlation
        crossCorr = fftconvolve(H,
                                zoom(np.transpose(wfImage)[::-1, ::-1], 
                                     upsampleFactor, order = 0),
                                mode = 'same')
        
        # Find the maximum of the cross correlation
        centerLoc = np.unravel_index(np.argmax(crossCorr), crossCorr.shape)

        # Find the center of the widefield image
        imgCorr = fftconvolve(zoom(np.transpose(wfImage), 
                                   upsampleFactor, order = 0),
                              zoom(np.transpose(wfImage)[::-1, ::-1], 
                                   upsampleFactor, order = 0),
                              mode = 'same')
        centerWF = np.unravel_index(np.argmax(imgCorr), imgCorr.shape)
                              
        # Find the shift between the images.
        # dx -> rows, dy -> cols because the image was transposed during
        # fftconvolve operation.
        dy = (centerLoc[1] - centerWF[1]) / upsampleFactor * self.pixelSize
        dx = (centerLoc[0] - centerWF[0]) / upsampleFactor * self.pixelSize
        
        offsets = (dx, dy)
        return offsets
Esempio n. 31
0
import re
from functools import reduce
import numpy as np
import networkx as nx
from tqdm import tqdm
from scipy.signal import convolve2d, fftconvolve

serial = 9435

n = 300
x = list(range(1, n + 1))

grid = np.zeros((n, n))
Y, X = np.meshgrid(x, x)
rack_ID = X + 10
power_level = Y * rack_ID
power_level += serial
power_level *= rack_ID
power = power_level//100 - 10*(power_level//1000) - 5

kern = np.ones((3,3), dtype=int)
total = convolve2d(power, kern, mode="valid")

max_square = np.unravel_index(total.argmax(), total.shape)
print("{},{}".format(max_square[0]+1, max_square[1]+1))

convs = ((fftconvolve(power, np.ones((i, i)), mode="valid"), i) for i in tqdm(x))
score, pos, size = max(
        (total.max(), np.unravel_index(total.argmax(), total.shape), size)
        for total, size in convs)
print("{},{},{}".format(pos[0]+1, pos[1]+1, size))
Esempio n. 32
0
    def run(self, save=False, show=False):
        if self.part is None:
            psf = self.PSFs
        else:
            psf = [self.PSFs[self.part]]
        yN, xN, channel = self.shape
        key, kex = self.PSFs[0].shape
        delta = yN - key
        assert delta >= 0, 'resolution of image should be higher than kernel'
        result = []
        if len(psf) > 1:
            for p in psf:
                tmp = np.pad(p, delta // 2, 'edge')
                cv2.normalize(tmp,
                              tmp,
                              alpha=0,
                              beta=1,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_32F)
                # blured = np.zeros(self.shape)
                blured = cv2.normalize(self.original,
                                       self.original,
                                       alpha=0,
                                       beta=1,
                                       norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_32F)

                blured[:, :, 0] = np.array(
                    signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
                blured[:, :, 1] = np.array(
                    signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
                blured[:, :, 2] = np.array(
                    signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
                blured = cv2.normalize(blured,
                                       blured,
                                       alpha=0,
                                       beta=1,
                                       norm_type=cv2.NORM_MINMAX,
                                       dtype=cv2.CV_32F)
                blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
                result.append(np.abs(blured))
        else:
            psf = psf[0]
            tmp = np.pad(psf, delta // 2, 'edge')
            cv2.normalize(tmp,
                          tmp,
                          alpha=0,
                          beta=1,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_32F)
            blured = cv2.normalize(self.original,
                                   self.original,
                                   alpha=0,
                                   beta=1,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_32F)
            blured[:, :, 0] = np.array(
                signal.fftconvolve(blured[:, :, 0], tmp, 'same'))
            blured[:, :, 1] = np.array(
                signal.fftconvolve(blured[:, :, 1], tmp, 'same'))
            blured[:, :, 2] = np.array(
                signal.fftconvolve(blured[:, :, 2], tmp, 'same'))
            blured = cv2.normalize(blured,
                                   blured,
                                   alpha=0,
                                   beta=1,
                                   norm_type=cv2.NORM_MINMAX,
                                   dtype=cv2.CV_32F)
            blured = cv2.cvtColor(blured, cv2.COLOR_RGB2BGR)
            result.append(np.abs(blured))
        self.result = result
        if show or save:
            self.__plot_canvas(show, save)
Esempio n. 33
0
def applyMMSI(I1, filters, togglePolarity=False, narrow_rivers=True):
    """ Applies the filters to a given input image to compute the
    modified multiscale singularity index response. Estimate the width
    and the dominant orientation angle for each spatial location.

    Inputs:
    I1 -- input image (e.g. Landsat NIR band or MNDWI)
    filters -- an instance of SingularityIndexFilters class that contains
               precomputed filters

    Keyword arguments:
    togglePolarity -- changes polarity, use if the rivers are darker
                      than land in the input image (i.e. SAR images)

    Returns:
    psi -- the singularity index response
    widthMap -- estimated width at each spatial location (x,y)
    orient -- local orientation at each spatial location (x,y)
    """

    if I1.dtype == 'uint8':
        I1 = I1.astype('float') / 255

    if I1.dtype == 'uint16':
        I1 = I1.astype('float') / 65535

    if len(I1.shape) > 2:
        raise ValueError('This function inputs only a singe channel image')

    R, C = I1.shape

    # Compute the multiscale singularity index
    for s in range(0, filters.nrScales):
        print("Processing scale: " + str(s))

        # Downscale the image to the current scale. We use a pyramid instead of
        # increasing the sigma and size of the kernels for efficiency
        if s > 0:
            I1 = cv2.resize(I1, (int(C/(np.sqrt(2)**s)), int(R/(np.sqrt(2)**s))), \
                            interpolation = cv2.INTER_CUBIC)

        # Debias the image.
        mu = cv2.sepFilter2D(I1, cv2.CV_64FC1, filters.Gdebias, filters.Gdebias.T, \
                                borderType=cv2.BORDER_REFLECT_101)
        I = I1 - mu

        # Apply the second order derivative filters
        J20 = fftconvolve(I, filters.G20, mode='same')
        J260 = fftconvolve(I, filters.G260, mode='same')
        J2120 = fftconvolve(I, filters.G2120, mode='same')

        # Compute the dominant local orientation
        Nr = np.sqrt(3) * ((J260**2) - (J2120**2) + (J20 * J260) -
                           (J20 * J2120))
        Dr = 2 * (J20**2) - (J260**2) - (J2120**2) + (
            J20 * J260) - 2 * (J260 * J2120) + (J20 * J2120)
        angles = np.arctan2(Nr, Dr) / 2

        # Apply the first order derivative filters
        J0u  = cv2.sepFilter2D(I, cv2.CV_64FC1, filters.G1.T, filters.G0_a.T, \
                                borderType=cv2.BORDER_REFLECT_101)
        J90u = cv2.sepFilter2D(I, cv2.CV_64FC1, filters.G0_a, filters.G1, \
                                borderType=cv2.BORDER_REFLECT_101)

        # Compute 0th, 1st, and 2nd derivatives along the estimated direction
        J0 = cv2.sepFilter2D(I, cv2.CV_64FC1, filters.G01d, filters.G01d.T, \
                                borderType=cv2.BORDER_REFLECT_101)
        J1 = J0u * np.cos(angles) + J90u * np.sin(angles)
        J2 =((1+(2*np.cos(2*angles)))*J20 + \
             (1-np.cos(2*angles)+(np.sqrt(3)*np.sin(2*angles)))*J260 + \
             (1-np.cos(2*angles)-(np.sqrt(3)*np.sin(2*angles)))*J2120) / 3

        # Compute the singularity index for the current scale
        psi_scale = np.abs(J0) * J2 / (1 + J1**2)

        # Resize scale responses to the same size for element-wise comparison
        if s > 0:
            psi_scale = cv2.resize(psi_scale, (C, R),
                                   interpolation=cv2.INTER_CUBIC)
            angles = cv2.resize(angles, (C, R),
                                interpolation=cv2.INTER_NEAREST)

        # Toggle polarity if needed
        if togglePolarity:
            psi_scale = -psi_scale

        # Suppress island response (channels have negative response unless the polarity is changed)
        psi_scale[psi_scale > 0] = 0
        psi_scale = np.abs(psi_scale)

        # Gamma normalize response
        psi_scale = psi_scale * filters.minScale**2

        # Find the dominant scale, orientation, and norm of the response across scales
        if s == 0:
            # response buffer (we need the neighbors of the dominant scale for width estimation)
            psi_prev = np.zeros(psi_scale.shape)
            psi_curr = np.zeros(psi_scale.shape)
            psi_next = psi_scale

            # response at the dominant scale and its neighbors
            psi_max_curr = np.zeros(psi_scale.shape)
            psi_max_prev = np.zeros(psi_scale.shape)
            psi_max_next = np.zeros(psi_scale.shape)

            dominant_scale_idx = np.zeros(psi_scale.shape)
            orient = angles
            psi_max = psi_scale
            psi = psi_scale**2

        else:
            psi_prev = psi_curr
            psi_curr = psi_next
            psi_next = psi_scale

            idx_curr = psi_curr > psi_max_curr
            psi_max_curr[idx_curr] = psi_curr[idx_curr]
            psi_max_prev[idx_curr] = psi_prev[idx_curr]
            psi_max_next[idx_curr] = psi_next[idx_curr]

            idx = psi_scale > psi_max
            psi_max[idx] = psi_scale[idx]
            dominant_scale_idx[idx] = s
            orient[idx] = angles[idx]
            psi = psi + psi_scale**2

    # Check if the coarsest scale has the maximum response
    psi_prev = psi_curr
    psi_curr = psi_next
    idx_curr = psi_curr > psi_max_curr
    psi_max_curr[idx_curr] = psi_curr[idx_curr]
    psi_max_prev[idx_curr] = psi_prev[idx_curr]
    psi_max_next[idx_curr] = 0

    # Euclidean norm of the response across scales
    psi = np.sqrt(psi)

    # Estimate the width by fitting a quadratic spline to the response at the
    # dominant scale and its neighbors
    s_prev = filters.minScale * (np.sqrt(2)**(dominant_scale_idx - 1))
    s_max = filters.minScale * (np.sqrt(2)**(dominant_scale_idx))
    s_next = filters.minScale * (np.sqrt(2)**(dominant_scale_idx + 1))

    A = s_next * (psi_max - psi_max_prev) + \
        s_max * (psi_max_prev - psi_max_next) + \
        s_prev * (psi_max_next - psi_max)
    B = s_next*s_next * (psi_max_prev - psi_max) + \
        s_max*s_max * (psi_max_next - psi_max_prev) + \
        s_prev*s_prev * (psi_max - psi_max_next)
    widthMap = np.zeros(psi.shape)
    widthMap[psi > 0] = -B[psi > 0] / (2 * A[psi > 0])

    # Scaling factor for narrow rivers
    if narrow_rivers:
        scalingFactor = 2 / (1 + np.exp(-8 * psi)) - 1
        widthMap = scalingFactor * widthMap + 0.5

    return psi, widthMap, orient
Esempio n. 34
0
def ant_dyn_larmip(SCE, start_date2, ye, GAM, NormD, UnifDd, data_dir,
                   temp_opt, larmip_v, delay, LowPass):
    '''Compute the antarctic dynamics contribution to global sea level as in 
    Levermann et al 2014, using linear response functions.'''

    model_corr = False  # Introduces a correlation between input distribution
    # UnifDd and the LRF model. Only implemented for the
    # three LARMIP ice sheet models with ice shelves

    N = len(NormD)
    start_date = 1861  # This is different from other runs
    Beta_low = 7  # Bounds to use for the basal melt rate,
    Beta_high = 16  # units are m.a^(-1).K^(-1)
    nb_y = ye - start_date + 1
    TIME = np.arange(start_date, ye + 1)
    i_ys = np.where(TIME == start_date2)[0][0]
    # TODO change the way this time reference is handled
    i_ys_ref = np.where(TIME == 1995)[0][0]

    if larmip_v == 'LARMIP':
        RF = read_larmip_lrf(data_dir)
        nbLRF = 3  # Number of LRF to use. 3 or 5 for LARMIP
        coeff = read_larmip_coeff(data_dir).values
    elif larmip_v == 'LARMIP2':
        RF = read_larmip2_lrf(f'{data_dir}LRF_Lev20/RFunctions/', 'BM08')
        # Exclude a model? There are two BISI_LBL...
        nbLRF = len(RF.model)
        coeff = read_larmip_coeff(data_dir)
        # The coefficents need to be reordered to fit the RF
        coeff = xr.concat([
            coeff.sel(region='EAIS'),
            coeff.sel(region='Ross'),
            coeff.sel(region='Amundsen'),
            coeff.sel(region='Weddell'),
            coeff.sel(region='Amundsen').assign_coords(region='Peninsula')
        ],
                          dim='region')
        coeff = coeff.values

    else:
        print(f'ERROR: {larmip_v} value of larmip_v not implemented')

    nb_bass = len(RF.region)

    TGLOB = misc.make_tglob_array(data_dir, temp_opt, SCE, start_date, ye,
                                  LowPass)
    TGLOBs = TGLOB.sel(time=slice(start_date, None))
    Tref_Lev = TGLOBs - TGLOB.sel(time=slice(start_date, start_date +
                                             19)).mean(dim='time')
    Td_Lev = misc.normal_distrib(Tref_Lev, GAM, NormD)

    # Random climate model number: 1-19
    RMod = np.random.randint(0, 19, N)  # Select random model indice (0 to 18)

    if delay:
        AlpCoeff = coeff[:, RMod, 3]  # dim: region, N
        TimeDelay = np.array(coeff[:, RMod, 2], dtype='int')

        # That could be made faster using fancy indexing
        # https://stackoverflow.com/questions/20360675/roll-rows-of-a-matrix-independently
        Td_Lev_d = np.zeros(
            [AlpCoeff.shape[0], Td_Lev.shape[0], Td_Lev.shape[1]])

        for r in range(AlpCoeff.shape[0]):
            for t in range(N):
                Td_Lev_d[r, t,
                         TimeDelay[r, t]:] = Td_Lev[t, :nb_y - TimeDelay[r, t]]

    else:
        AlpCoeff = coeff[:, RMod, 0]  # dim: region, N
        Td_Lev_d = Td_Lev[np.newaxis, :, :]

    # Use following line if Beta should have some external dependence
    #  Beta = Beta_low + UnifDd*(Beta_high - Beta_low) # Modify to obtain random_uniform(7,16,N)
    Beta = np.random.uniform(Beta_low, Beta_high, N)
    BMelt = AlpCoeff[:, :, np.newaxis] * Td_Lev_d * Beta[np.newaxis, :,
                                                         np.newaxis]

    if model_corr:
        Rdist = np.zeros([N], dtype=int)
        Rdist = 2
        Rdist = np.where(UnifDd >= 0.33, 1, Rdist)
        Rdist = np.where(UnifDd >= 0.67, 0, Rdist)
        modelsel = Rdist  # Select model
    else:
        modelsel = np.random.randint(0, nbLRF, N)  # Select models

    RF = RF[:, modelsel, :]

    X_ant_b = signal.fftconvolve(RF[:, :, :nb_y], BMelt, mode='full',
                                 axes=2)[:, :, :nb_y]

    X_ant_b = X_ant_b * 100  # Convert from m to cm
    X_ant = np.sum(X_ant_b, 0)  # Sum 4 bassins

    # Remove the uncertainty at the beginning of the projection
    X_ant -= X_ant[:, [i_ys_ref]]

    return X_ant[:, i_ys:]
Esempio n. 35
0
def __myconvolve1(parArgs):
    return sg.fftconvolve(parArgs[0], parArgs[1], 'full')
Esempio n. 36
0
def fast_corr(signal, template):
    ''' correlation with fft '''
    return fftconvolve(signal, template[::-1], mode='same')
Esempio n. 37
0
def detect_cells(cell_probability, probability_threshold, stopping_criterion,
                 initial_template_size, dilation_size, max_no_cells):
    """
    This is the top level function to infer the position (and eventually size) of all cells in a 3D 
    volume of image data. We assume that we already have computed a "probability map" which encodes 
    the probability that each voxel corresponds to a cell body.
    
    Parameters 
    ----------
    cell_probability : ndarray
        Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body. 
    
    probability_threshold : float
        threshold between (0,1) to apply to probability map (only consider voxels for which 
        cell_probability(r,c,z) > probability_threshold)
    stopping_criterion : float
        stopping criterion is a value between (0,1) (minimum normalized correlation between 
        template and probability map) (Example = 0.47)
    initial_template_size : int
        initial size of spherical template (to use in sweep)
    dilation_size : int
        size to increase mask around each detected cell (zero out sphere of radius with 
        initial_template_size+dilation_size around each centroid)
    max_no_cells : int
        maximum number of cells (alternative stopping criterion)
        
    Returns
    -------
    ndarray
        centroids = D x 4 matrix, where D = number of detected cells.
        The (x,y,z) coordinate of each cell are in columns 1-3.
        The fourth column contains the correlation (ptest) between the template
        and probability map and thus represents our "confidence" in the estimate.
        The algorithm terminates when ptest<=stopping_criterion.
    ndarray
        new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
    """

    # threshold probability map.
    newtest = (cell_probability *
               (cell_probability > probability_threshold)).astype('float32')
    #initial_template_size is an int now but could a vector later on - convert it to an array
    initial_template_size = np.atleast_1d(initial_template_size)

    # create dictionary of spherical templates
    box_radius = np.ceil(np.max(initial_template_size) / 2) + 1
    dict = create_synth_dict(initial_template_size, box_radius)
    dilate_dict = create_synth_dict(initial_template_size + dilation_size,
                                    box_radius)
    box_length = int(round(np.shape(dict)[0]**(1 / 3)))
    new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
    newid = 1
    centroids = np.empty((0, 4))

    # run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
    for ktot in range(max_no_cells):
        val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
        id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')

        # loop to convolve the probability cube with each template in dict
        for j in range(np.shape(dict)[1]):
            convout = signal.fftconvolve(
                newtest,
                np.reshape(dict[:, j], (box_length, box_length, box_length)),
                mode='same')
            # get the max value of the flattened convout array and its index
            val[j], id[j] = np.real(np.amax(convout)), np.argmax(convout)

        # find position in image with max correlation
        which_atom = np.argmax(val)
        which_loc = id[which_atom]

        # Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
        x2 = compute3dvec(dict[:, which_atom], which_loc, box_length,
                          np.shape(newtest))
        xid = np.nonzero(x2)

        # Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
        x3 = compute3dvec(dilate_dict[:, which_atom], which_loc, box_length,
                          np.shape(newtest))

        newtest = newtest * (x3 == 0)
        ptest = val / np.sum(dict, axis=0)

        if ptest < stopping_criterion:
            print("Cell Detection is done")
            return (centroids, new_map)

        # Label detected cell
        new_map[xid] = newid
        newid = newid + 1

        #Convert flat index to indices
        rr, cc, zz = np.unravel_index(which_loc, np.shape(newtest))
        new_centroid = rr, cc, zz  #Check - why cc is first?

        # insert a row into centroids
        centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
        # for later: convert to logging and print with much less frequency
        if (ktot % 50 == 0):
            print('Iteration remaining = ', (max_no_cells - ktot - 1),
                  'Correlation = ', ptest)

    print("Cell Detection is done")
    return (centroids, new_map)
Esempio n. 38
0
def _project_images(reference_sources, estimated_source, flen, G=None):
    """Least-squares projection of estimated source on the subspace spanned by
    delayed versions of reference sources, with delays between 0 and flen-1.
    Passing G as all zeros will populate the G matrix and return it so it can
    be passed into the next call to avoid recomputing G (this will only works
    if not computing permutations).
    """
    nsrc = reference_sources.shape[0]
    nsampl = reference_sources.shape[1]
    nchan = reference_sources.shape[2]
    reference_sources = np.reshape(np.transpose(reference_sources, (2, 0, 1)),
                                   (nchan*nsrc, nsampl), order='F')

    # computing coefficients of least squares problem via FFT ##
    # zero padding and FFT of input data
    reference_sources = np.hstack((reference_sources,
                                   np.zeros((nchan*nsrc, flen - 1))))
    estimated_source = \
        np.hstack((estimated_source.transpose(), np.zeros((nchan, flen - 1))))
    n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
    sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
    sef = scipy.fftpack.fft(estimated_source, n=n_fft)

    # inner products between delayed versions of reference_sources
    if G is None:
        saveg = False
        G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
        for i in range(nchan * nsrc):
            for j in range(i+1):
                ssf = sf[i] * np.conj(sf[j])
                ssf = np.real(scipy.fftpack.ifft(ssf))
                ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
                              r=ssf[:flen])
                G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
                G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
    else:  # avoid recomputing G (only works if no permutation is desired)
        saveg = True  # return G
        if np.all(G == 0):  # only compute G if passed as 0
            G = np.zeros((nchan * nsrc * flen, nchan * nsrc * flen))
            for i in range(nchan * nsrc):
                for j in range(i+1):
                    ssf = sf[i] * np.conj(sf[j])
                    ssf = np.real(scipy.fftpack.ifft(ssf))
                    ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
                                  r=ssf[:flen])
                    G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
                    G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T

    # inner products between estimated_source and delayed versions of
    # reference_sources
    D = np.zeros((nchan * nsrc * flen, nchan))
    for k in range(nchan * nsrc):
        for i in range(nchan):
            ssef = sf[k] * np.conj(sef[i])
            ssef = np.real(scipy.fftpack.ifft(ssef))
            D[k * flen: (k+1) * flen, i] = \
                np.hstack((ssef[0], ssef[-1:-flen:-1])).transpose()

    # Computing projection
    # Distortion filters
    try:
        C = np.linalg.solve(G, D).reshape(flen, nchan*nsrc, nchan, order='F')
    except np.linalg.linalg.LinAlgError:
        C = np.linalg.lstsq(G, D)[0].reshape(flen, nchan*nsrc, nchan,
                                             order='F')
    # Filtering
    sproj = np.zeros((nchan, nsampl + flen - 1))
    for k in range(nchan * nsrc):
        for i in range(nchan):
            sproj[i] += fftconvolve(C[:, k, i].transpose(),
                                    reference_sources[k])[:nsampl + flen - 1]
    # return G only if it was passed in
    if saveg:
        return sproj, G
    else:
        return sproj
Esempio n. 39
0
def shift(pat, mode='gaus'):
    mask, to_conv, brdf, indx, indy = pat

    if (mask.sum() >= 2000) and (mask.sum() < 3000):

        if mode == 'mean':
            w = 1. / (np.nansum(mask))
            k = np.zeros(mask.shape).astype('float')
            k[mask] = w
            conved = signal.fftconvolve(to_conv, k, mode='valid')
            dif = abs(conved - u)
            minm = np.nanmin(dif)
            x = np.where(dif == minm)[0][0] - np.ceil((conved.shape[0]) / 2.)
            y = np.where(dif == minm)[1][0] - np.ceil((conved.shape[1]) / 2.)
            vals = conved[np.where(dif == minm)[0][0],
                          np.where(dif == minm)[1][0]]
            return [x, y, u, vals, indx, indy]

        elif mode == 'gaus':

            xwin, ywin = mask.shape

            if (xwin <= 0) or (ywin <= 0):
                pass
            else:
                cost = []
                start = 100
                if star == 0:
                    star += 0.0001
                if end == 0:
                    end += 0.0002
                for xstd in np.arange(star, end, 1):
                    for ystd in np.arange(star, end, 1):
                        if xstd <= ystd:
                            for angle in xrange(30, 160, 2):
                                gaus = gaussian(xwin, ywin, xstd, ystd, angle,
                                                False)
                                gaus[~mask] = 0
                                ker = gaus / (gaus.sum())
                                conved = signal.fftconvolve(to_conv,
                                                            ker,
                                                            mode='valid')
                                dif = abs(conved - brdf)
                                minm = np.nanmin(dif)
                                if minm < start:
                                    x = np.where(dif == minm)[0][0] - np.ceil(
                                        (conved.shape[0]) / 2.)
                                    y = np.where(dif == minm)[1][0] - np.ceil(
                                        (conved.shape[1]) / 2.)
                                    vals = conved[np.where(dif == minm)[0][0],
                                                  np.where(dif == minm)[1][0]]
                                    cost.append([
                                        xstd, ystd, angle, x, y, brdf, vals,
                                        indx, indy
                                    ])
                                    start = minm
                                    print 'Find One!!', start
                        else:
                            pass
                return cost[-1]

        else:
            pass

    else:
        pass
Esempio n. 40
0
    padding_shape = np.asarray(padding_shape, dtype='i')
    z_pad = np.pad(z, padding_shape, mode='constant')

    t_start = time.time()
    ztz = np.empty(ztz_shape)
    for i in range(ztz.size):
        i0 = k0, k1, *pt = np.unravel_index(i, ztz.shape)
        zk1_slice = tuple([k1] + [
            slice(v, v + size_ax) for v, size_ax in zip(pt, valid_support)])
        ztz[i0] = np.dot(z[k0].ravel(), z_pad[zk1_slice].ravel())
    print("A la mano: {:.3f}s".format(time.time() - t_start))

    # compute the cross correlation between z and z_pad
    t_fft = time.time()
    flip_axis = tuple(range(1, z.ndim))
    ztz_fft = np.array([[fftconvolve(z_pad_k0, z_k, mode='valid')
                         for z_k in z]
                        for z_pad_k0 in np.flip(z_pad, axis=flip_axis)])
    print("FFT: {:.3f}s".format(time.time() - t_fft))
    assert ztz_fft.shape == ztz_shape, (ztz.shape, ztz_shape)
    plt.imshow((ztz - ztz_fft).reshape(25*25, 23*23))
    plt.show()
    assert np.allclose(ztz, ztz_fft), abs(ztz - ztz_fft).max()

    # Sparse the cross correlation between z and z_pad
    t_sparse = time.time()
    ztz_sparse = np.zeros(ztz_shape)
    for k0, *pt in zip(*z.nonzero()):
        z_pad_slice = tuple([slice(None)] + [
            slice(v, v + 2 * size_ax - 1)
            for v, size_ax in zip(pt, atom_support)])
Esempio n. 41
0
    def line(self,
             rate,
             angle,
             dt,
             pixScale=0.2,
             display=False,
             useLookupTable=True,
             verbose=True):
        """
        Compute the TSF given input rate of motion, angle of motion, length of exposure, and pixelScale.

        Units choice is irrelevant, as long as they are all the same! eg. rate in "/hr, and dt in hr.
        Angle is in degrees +-90 from horizontal.

        display=True to see the TSF

        useLookupTable=True to use the lookupTable. OTherwise pure moffat is used.
        """

        self.rate = rate
        self.angle = angle
        self.dt = dt
        self.pixScale = pixScale

        angr = angle * np.pi / 180.

        self.line2d = self.PSF * 0.0
        w = np.where((np.abs(self.X - self.centx) <
                      np.cos(angr) * rate * dt / pixScale / 2.))
        if len(w[0]) > 0:
            x = self.X[w] * 1.0
            y = np.tan(angr) * (x - self.centx) + self.centy
            X = (x * self.repFact).astype('int')
            Y = (y * self.repFact).astype('int')
            self.line2d[Y, X] = 1.0

            w = np.where(self.line2d > 0)
            yl, yh = np.min(w[0]), np.max(w[0])
            xl, xh = np.min(w[1]), np.max(w[1])

            self.line2d = self.line2d[yl:yh + 1, xl:xh + 1]

        else:
            self.line2d = np.array([[1.0]])

        if useLookupTable:
            if verbose:
                print('Using the lookup table when generating the long PSF.')
            #self.longPSF=signal.convolve2d(self.moffProf+self.lookupTable*self.repFact*self.repFact, self.line2d,mode='same')
            self.longPSF = signal.fftconvolve(
                self.moffProf + self.lookupTable * self.repFact * self.repFact,
                self.line2d,
                mode='same')
            self.longPSF *= np.sum(self.fullPSF) / np.sum(self.longPSF)
        else:
            if verbose:
                print(
                    'Not using the lookup table when generating the long PSF')
            #self.longPSF=signal.convolve2d(self.moffProf,self.line2d,mode='same')
            self.longPSF = signal.fftconvolve(self.moffProf,
                                              self.line2d,
                                              mode='same')
            self.longPSF *= np.sum(self.moffProf) / np.sum(self.longPSF)
        self.longpsf = downSample2d(self.longPSF, self.repFact)

        if display:
            fig = pyl.figure('Line PSF')
            pyl.imshow(self.longPSF, interpolation='nearest', origin='lower')
            pyl.show()
Esempio n. 42
0
def adaptive_gaussian(ionos, wgt, size_max, size_min):
    '''
    This program performs Gaussian filtering with adaptive window size.
    ionos: ionosphere
    wgt: weight
    size_max: maximum window size
    size_min: minimum window size
    '''
    import scipy.signal as ss

    length = (ionos.shape)[0]
    width = (ionos.shape)[1]
    flag = (ionos != 0) * (wgt != 0)
    ionos *= flag
    wgt *= flag

    size_num = 100
    size = np.linspace(size_min, size_max, num=size_num, endpoint=True)
    std = np.zeros((length, width, size_num))
    flt = np.zeros((length, width, size_num))
    out = np.zeros((length, width, 1))

    #calculate filterd image and standard deviation
    #sigma of window size: size_max
    sigma = size_max / 2.0
    for i in range(size_num):
        size2 = np.int(np.around(size[i]))
        if size2 % 2 == 0:
            size2 += 1
        if (i + 1) % 10 == 0:
            print('min win: %4d, max win: %4d, current win: %4d' % (np.int(
                np.around(size_min)), np.int(np.around(size_max)), size2))
        g2d = gaussian(size2, sigma * size2 / size_max, scale=1.0)
        scale = ss.fftconvolve(wgt, g2d, mode='same')
        flt[:, :,
            i] = ss.fftconvolve(ionos * wgt, g2d, mode='same') / (scale +
                                                                  (scale == 0))
        #variance of resulting filtered sample
        scale = scale**2
        var = ss.fftconvolve(wgt, g2d**2, mode='same') / (scale + (scale == 0))
        #in case there is a large area without data where scale is very small, which leads to wired values in variance
        var[np.nonzero(var < 0)] = 0
        std[:, :, i] = np.sqrt(var)

    std_mv = np.mean(std[np.nonzero(std != 0)], dtype=np.float64)
    diff_max = np.amax(np.absolute(std - std_mv)) + std_mv + 1
    std[np.nonzero(std == 0)] = diff_max

    index = np.nonzero(np.ones((length, width))) + ((np.argmin(
        np.absolute(std - std_mv), axis=2)).reshape(length * width), )
    out = flt[index]
    out = out.reshape((length, width))

    #remove artifacts due to varying wgt
    size_smt = size_min
    if size_smt % 2 == 0:
        size_smt += 1
    g2d = gaussian(size_smt, size_smt / 2.0, scale=1.0)
    scale = ss.fftconvolve((out != 0), g2d, mode='same')
    out2 = ss.fftconvolve(out, g2d, mode='same') / (scale + (scale == 0))

    return out2
Esempio n. 43
0
                 [[-0.8,  0. ,  0. ],
                  [ 0. , -0.8,  0. ],
                  [ 0. ,  0. , -0.8]]])
ttt = fftconvolve(np.r_[np.zeros((100,3)),imp][:,:,None],a3n3.T)[100:]
gftt = ttt/ttt[0,:,:]

a3n3 = np.array([[[ 1. ,  0 ,  0. ],
                  [ 0. ,  1. ,  0. ],
                  [ 0. ,  0. ,  1. ]],

                 [[-0.8,  0.2 ,  0. ],
                  [ 0 ,  0.0,  0. ],
                  [ 0. ,  0. , 0.8]]])
ttt = fftconvolve(np.r_[np.zeros((100,3)),imp][:,:,None],a3n3)[100:]
gftt = ttt/ttt[0,:,:]
signal.fftconvolve(np.dstack((imp,imp,imp)),a3n3)[1,:,:]

nobs = 10
imp = np.zeros((nobs,3))
imp[1] = 1.
ar13 = np.zeros((nobs+1,3))
for i in range(1,nobs+1):
    ar13[i] = np.dot(a3n3[1,:,:],ar13[i-1]) + imp[i-1]

a3n3inv = np.zeros((nobs+1,3,3))
a3n3inv[0,:,:] = a3n3[0]
a3n3inv[1,:,:] = -a3n3[1]
for i in range(2,nobs+1):
    a3n3inv[i,:,:] = np.dot(-a3n3[1],a3n3inv[i-1,:,:])

def demoFieldSynthesis():
    '''Demonstrate Field Synthesis Method with Plots
        INPUT
         None
        OUTPUT
         None
    '''
    # plt.rc('text', usetex=True)
    fig, ax = plt.subplots(2,4,sharey=True,sharex=True,figsize=(16,9))

    # Create F, the illumination pattern
    F_hat = createAnnulus()
    F_hat = ft.ifftshift(F_hat)
    F = ft.ifft2(F_hat)
    F = ft.fftshift(F)
    # This is the illumination intensity pattern
    Fsqmod = np.real(F*np.conj(F))

    #plt.figure()
    #plt.title('F')
    #plt.imshow(Fsqmod, cmap='plasma')
    #plt.show(block=False)
    ax[0,0].imshow(Fsqmod, cmap='plasma')
    ax[0,0].set_title('F(x,z)')

    # Create L, the scan profile
    L = np.zeros_like(Fsqmod)
    center = L.shape[1]//2
    sigma = 30
    L[center,:] = norm.pdf(np.arange(-center,center),0,sigma)
    # L[L.shape[1]//2,:] = 1
    # The square modulus of L is the object space
    Lsqmod = L*np.conj(L)
    # This is the line scan profile used in Field Synthesis
    L_hat = ft.fftshift(ft.fft2(ft.ifftshift(L)))

    ax[0,1].imshow(L, cmap='plasma')
    ax[0,1].set_title('$ L(x)\delta(z) $')

    ax[0,2].imshow(Lsqmod, cmap='plasma')
    ax[0,2].set_title('$ |L(x)\delta(z)|^2 $')

    ax[0,3].imshow(np.abs(L_hat), cmap='plasma')
    ax[0,3].set_title('$\hat{L}(k_x) $')

    # Manually scan by shifting Fsqmod and multiplying by Lsqmod
    scanned = doConventionalScan(Fsqmod,Lsqmod)

    ax[1,0].imshow(scanned, cmap='plasma')
    ax[1,0].set_title('Scanned: $ \sum_{x\'} |F(x\',z)|^2|L(x-x\')|^2 $')

    # Manually scanning is a convolution operation
    # There are potentially boundary effects here
    convolved = sig.fftconvolve(Fsqmod,Lsqmod,'same')

    ax[1,1].imshow(convolved, cmap='plasma')
    ax[1,1].set_title('Convolved: $ |F(x,z)|^2 ** |L(x)\delta(z)|^2 $')

    # This manual implementation of Fourier transform based convolution
    # actually does circular convolution
    convolvedft = ft.fftshift(ft.fft2(ft.ifft2(ft.ifftshift(Fsqmod)) *ft.ifft2(ft.ifftshift(Lsqmod))))
    convolvedft = np.real(convolvedft)

    ax[1,2].imshow(convolvedft, cmap='plasma')
    ax[1,2].set_title(r'Convolved FT: $ \mathcal{F}^{-1} \{ \mathcal{F}\{|F|^2\} \mathcal{F}\{|L(x)\delta(z)|^2\} \} $')

    # Do the Field Synthesis method of performing a line scan at the back focal plane
    fieldSynthesis = doFieldSynthesisLineScan(F_hat,L_hat)

    ax[1,3].imshow(fieldSynthesis, cmap='plasma')
    ax[1,3].set_title('Field Synthesis: $ \sum_a |\mathcal{F}^{-1}\{ \hat{F}(k_x,k_z)\hat{L}(k_x-a) \}|^2 $')

    plt.show()
    plt.pause(0.001)
Esempio n. 45
0
def convolve(data, window, axis='time', length=1):
    """Design taper and convolve it with the signal.

    Parameters
    ----------
    data : instance of Data
        the data to filter.
    window : str
        one of the windows in scipy, using get_window
    length : float, optional
        length of the window
    axis : str, optional
        axis to apply the filter on.

    Returns
    -------
    instance of DataRaw
        data after convolution

    Notes
    -----
    Most of the code is identical to fftconvolve(axis=data.index_of(axis))
    but unfortunately fftconvolve in scipy 0.13 doesn't take that argument
    so we need to redefine it here. It's pretty slow too.

    Taper is normalized such that the integral of the function remains the
    same even after convolution.

    See Also
    --------
    scipy.signal.get_window : function used to create windows
    """
    taper = get_window(window, length * data.s_freq)
    taper = taper / sum(taper)

    fdata = data._copy()
    idx_axis = data.index_of(axis)

    for i in range(data.number_of('trial')):
        orig_dat = data.data[i]

        sel_dim = []
        i_dim = []
        dat = empty(orig_dat.shape, dtype=orig_dat.dtype)
        for i_axis, one_axis in enumerate(data.list_of_axes):
            if one_axis != axis:
                i_dim.append(i_axis)
                sel_dim.append(range(data.number_of(one_axis)[i]))

        for one_iter in product(*sel_dim):
            # create the numpy indices for one value per dimension,
            # except for the dimension of interest
            idx = [[x] for x in one_iter]
            idx.insert(idx_axis, range(data.number_of(axis)[i]))
            indices = ix_(*idx)

            d_1dim = squeeze(orig_dat[indices], axis=i_dim)

            d_1dim = fftconvolve(d_1dim, taper, 'same')

            for to_squeeze in i_dim:
                d_1dim = expand_dims(d_1dim, axis=to_squeeze)
                dat[indices] = d_1dim
        fdata.data[0] = dat

    return fdata
Esempio n. 46
0

if __name__ == '__main__':

    from TemplateGenerator import generate_tilted_templates
    from TomogramGenerator import generate_tomogram
    import matplotlib.pyplot as plt

    templates = generate_tilted_templates()
    tomogram = generate_tomogram(templates, None)

    fig, ax = plt.subplots()
    ax.imshow(tomogram.density_map)

    correlation = signal.fftconvolve(tomogram.density_map,
                                     templates[1][2].density_map,
                                     mode='same')

    fig, ax = plt.subplots()
    ax.imshow(correlation)

    positions = CandidateSelector.find_local_maxima(None, correlation)
    maximums = np.zeros(correlation.shape)
    for position in positions:
        maximums[position] = correlation[position]
    fig, ax = plt.subplots()
    print(len(positions))
    ax.imshow(maximums)

    #plt.show()
Esempio n. 47
0
 pulse_list = join_pulses(pulse.nonzero()[0])
 if pulse_list is not None:
     raw_data = map_to_raw_data(pulse_list,
                                avg_num=avg_num,
                                window_length=window_length,
                                offset=trans.start)
     for pmin, pmax in raw_data:
         selection = slice(pmin, pmax)  # the detection window
         windowsize = 1024 * 3
         msig = np.zeros((windowsize)) - 1.0  # make the edge signal
         msig[
             windowsize // 2:
             -1] = 1.0  # reverse window for conveolution to be the same as a correlate
         position_of_pulse = (np.abs(
             signal.fftconvolve(data[selection]**2 -
                                (data[selection]**2).mean(),
                                msig,
                                mode='valid')).argmax() +
                              (msig.shape[0] - 1) // 2)
         #position_of_pulse  = (pmin+pmax)/2. # middle of window
         selection1 = slice(pmin, pmin + position_of_pulse)  #
         selection2 = slice(pmin + position_of_pulse, pmax)
         pchange = (data[selection2].astype(np.float)**2).mean() - (
             data[selection1].astype(np.float)**2).mean()
         ptime = real_time(pmin + position_of_pulse,
                           sync_time=sync_time,
                           first_timestamp=timestamp_value)
         up_down = 'up  '
         if np.signbit(pchange):
             up_down = 'down'
         print(
             "Pulse power change %s %.2f db & time is %33.12f seconds  %s,%s "
Esempio n. 48
0
    def evaluate_period_forecasts(self):
        """
        Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour

        Returns:
            A pandas DataFrame with full-period metadata and verification statistics
        """
        score_columns = [
            "Run_Date", "Ensemble Name", "Model_Name", "Forecast_Variable",
            "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC",
            "Reliability"
        ]
        all_scores = pd.DataFrame(columns=score_columns)
        if self.coordinate_file is not None:
            coord_mask = np.where(
                (self.coordinates["lon"] >= self.lon_bounds[0])
                & (self.coordinates["lon"] <= self.lon_bounds[1])
                & (self.coordinates["lat"] >= self.lat_bounds[0])
                & (self.coordinates["lat"] <= self.lat_bounds[1])
                & (self.period_obs[self.mask_variable] > 0))
        else:
            coord_mask = None
        for neighbor_radius in self.neighbor_radii:
            n_filter = disk(neighbor_radius)
            for s, size_threshold in enumerate(self.size_thresholds):
                period_obs = fftconvolve(self.period_obs[self.mrms_variable] >=
                                         self.obs_thresholds[s],
                                         n_filter,
                                         mode="same")
                period_obs[period_obs > 1] = 1
                if self.obs_mask and self.coordinate_file is None:
                    period_obs = period_obs[
                        self.period_obs[self.mask_variable] > 0]
                elif self.obs_mask and self.coordinate_file is not None:
                    period_obs = period_obs[coord_mask[0], coord_mask[1]]
                else:
                    period_obs = period_obs.ravel()
                for smoothing_radius in self.smoothing_radii:
                    print(
                        "Eval period forecast {0} {1} {2} {3} {4} {5}".format(
                            self.model_name, self.forecast_variable,
                            self.run_date, neighbor_radius, size_threshold,
                            smoothing_radius))
                    period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(
                        self.end_hour - self.start_hour + 1, neighbor_radius,
                        smoothing_radius, self.forecast_variable,
                        size_threshold)
                    if self.obs_mask and self.coordinate_file is None:
                        period_forecast = self.period_forecasts[period_var][
                            self.period_obs[self.mask_variable] > 0]
                    elif self.obs_mask and self.coordinate_file is not None:
                        period_forecast = self.period_forecasts[period_var][
                            coord_mask[0], coord_mask[1]]
                    else:
                        period_forecast = self.period_forecasts[
                            period_var].ravel()
                    roc = DistributedROC(thresholds=self.probability_levels,
                                         obs_threshold=0.5)
                    roc.update(period_forecast, period_obs)
                    rel = DistributedReliability(
                        thresholds=self.probability_levels, obs_threshold=0.5)
                    rel.update(period_forecast, period_obs)
                    row = [
                        self.run_date, self.ensemble_name, self.model_name,
                        self.forecast_variable, neighbor_radius,
                        smoothing_radius, size_threshold, roc, rel
                    ]
                    all_scores.loc[period_var] = row
        return all_scores
Esempio n. 49
0
    def train_logreg(
        self,
        ts_target: TSContinuous,
        ts_input: TSEvent = None,
        learning_rate: float = 0,
        regularize: float = 0,
        batch_size: Optional[int] = None,
        epochs: int = 1,
        store_states: bool = True,
        verbose: bool = False,
    ):
        """
        Train self with logistic regression over one of possibly many batches. Note that this training method assumes that a sigmoid function is applied to the layer output, which is not the case in `.evolve`.

        :param TSContinuous ts_target:  Target for current batch
        :param TSEvent ts_input:        Input to self for current batch
        :param float learning_rate:     Factor determining scale of weight increments at each step
        :param float regularize:        Regularization parameter
        :param int batch_size:          Number of samples per batch. If None, train with all samples at once
        :param int epochs:              How many times is training repeated
        :param bool store_states:       Include last state from previous training and store state from this training. This has the same effect as if data from both trainings were presented at once.
        :param bool verbose:            Print output about training progress
        """

        # - Discrete time steps for evaluating input and target time series
        num_timesteps = int(np.round(ts_target.duration / self.dt))
        time_base = self._gen_time_trace(ts_target.t_start, num_timesteps)

        # - Discard last sample to avoid counting time points twice
        time_base = time_base[:-1]

        # - Make sure time_base does not exceed ts_target
        time_base = time_base[time_base <= ts_target.t_stop]

        # - Prepare target data
        target = ts_target(time_base)

        # - Make sure no nan is in target, as this causes learning to fail
        assert not np.isnan(
            target
        ).any(), "Layer `{}`: nan values have been found in target (where: {})".format(
            self.name, np.where(np.isnan(target))
        )

        # - Check target dimensions
        if target.ndim == 1 and self.size == 1:
            target = target.reshape(-1, 1)

        assert (
            target.shape[-1] == self.size
        ), "Layer `{}`: Target dimensions ({}) does not match layer size ({})".format(
            self.name, target.shape[-1], self.size
        )

        # - Prepare input data

        # Empty input array with additional dimension for training biases
        inp = np.zeros((np.size(time_base), self.size_in + 1))
        inp[:, -1] = 1

        # - Generate spike trains from ts_input
        if ts_input is None:
            # - Assume zero input
            print(
                "Layer `{}`: No ts_input defined, assuming input to be 0.".format(
                    self.name
                )
            )

        else:
            # - Get data within given time range
            event_times, event_channels = ts_input(
                t_start=time_base[0], t_stop=time_base[-1]
            )

            # - Make sure that input channels do not exceed layer input dimensions
            try:
                assert (
                    np.amax(event_channels) <= self.size_in - 1
                ), "Layer `{}`: Number of input channels exceeds layer input dimensions.".format(
                    self.name
                )
            except ValueError as e:
                # - No events in input data
                if event_channels.size == 0:
                    print("Layer `{}`: No input spikes for training.".format(self.name))
                else:
                    raise e

            # Extract spike data from the input
            spike_raster = (
                ts_input.raster(
                    dt=self.dt,
                    t_start=time_base[0],
                    num_timesteps=time_base.size,
                    channels=np.arange(self.size_in),
                    add_events=self.add_events,
                )
            ).astype(float)

            if store_states:
                try:
                    # - Include last state from previous batch
                    spike_raster[0, :] += self._training_state
                except AttributeError:
                    pass

            # - Define exponential kernel
            kernel = np.exp(-(np.arange(time_base.size - 1) * self.dt) / self.tau_syn)

            # - Apply kernel to spike trains and add filtered trains to input array
            for channel, events in enumerate(spike_raster.T):
                inp[:, channel] = fftconvolve(events, kernel, "full")[: time_base.size]

        # - Prepare batches for training
        if batch_size is None:
            num_batches = 1
            batch_size = num_timesteps
        else:
            num_batches = int(np.ceil(num_timesteps / float(batch_size)))

        sample_order = np.arange(
            num_timesteps
        )  # Indices to choose samples - shuffle for random order

        # - Iterate over epochs
        for ind_epoch in range(epochs):
            # - Iterate over batches and optimize
            for ind_batch in range(num_batches):
                simple_indices = sample_order[
                    ind_batch * batch_size : (ind_batch + 1) * batch_size
                ]
                # - Gradients
                gradients = self._gradients(
                    inp[simple_indices], target[simple_indices], regularize
                )
                self.weights = self.weights - learning_rate * gradients[:-1, :]
                self.bias = self.bias - learning_rate * gradients[-1, :]
            if verbose:
                print(
                    "Layer `{}`: Training epoch {} of {}".format(
                        self.name, ind_epoch + 1, epochs
                    ),
                    end="\r",
                )
            # - Shuffle samples
            np.random.shuffle(sample_order)

        if verbose:
            print("Layer `{}`: Finished trainig.              ".format(self.name))

        if store_states:
            # - Store last state for next batch
            self._training_state = inp[-1, :-1].copy()
Esempio n. 50
0
    half_window = (window_size - 1) // 2

    # precompute coefficients
    b = np.mat([[k**i for i in order_range]
                for k in range(-half_window, half_window + 1)])
    m = np.linalg.pinv(b).A[deriv]

    # pad the signal at the extremes with
    # values taken from the signal itself
    firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
    lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])

    y = np.concatenate((firstvals, y, lastvals))

    if use_fft:
        return signal.fftconvolve(m, y, mode='valid')
    else:
        return np.convolve(m, y, mode='valid')


def wiener_filter(t, h, signal='gaussian', noise='flat', return_PSDs=False):
    """Compute a Wiener-filtered time-series

    Parameters
    ----------
    t : array_like
        evenly-sampled time series, length N
    h : array_like
        observations at each t
    signal : str (optional)
        currently only 'gaussian' is supported
Esempio n. 51
0
def rotational_broadening(wave_spec,
                          flux_spec,
                          vrot,
                          fwhm=0.25,
                          epsilon=0.6,
                          chard=None,
                          stepr=0,
                          stepi=0,
                          alam0=None,
                          alam1=None,
                          irel=0,
                          cont=None,
                          method='fortran'):
    """
    Apply rotational broadening to a spectrum assuming a linear limb darkening
    law.
    
    This function is based on the ROTIN program. See Fortran file for
    explanations of parameters.
    
    Limb darkening law is linear, default value is epsilon=0.6
    
    Possibility to normalize as well by giving continuum in 'cont' parameter.
    
    B{Warning}: C{method='python'} is still experimental, but should work.
    
    Section 1. Parameters for rotational convolution 
    ================================================

    C{VROT}: v sin i (in km/s):
    
        -  if VROT=0 - rotational convolution is 
                 a) either not calculated,
                 b) or, if simultaneously FWHM is rather large
                 (vrot/c*lambda < FWHM/20.),
                 vrot is set to  FWHM/20*c/lambda;
        -  if VROT >0 but the previous condition b) applies, the
        value of VROT is changed as  in the previous case
        -  if VROT<0 - the value of abs(VROT) is used regardless of
        how small compared to FWHM it is
     
    C{CHARD}: characteristic scale of the variations of unconvolved stellar
    spectrum (basically, characteristic distance between two neighbouring
    wavelength points) - in A:
     
        - if =0 - program sets up default (0.01 A)
        
    C{STEPR}: wavelength step for evaluation rotational convolution;
     
        - if =0, the program sets up default (the wavelength
        interval corresponding to the rotational velocity
        devided by 3.)                           
        - if <0, convolved spectrum calculated on the original
        (detailed) SYNSPEC wavelength mesh


    Section 2. parameters for instrumental convolution
    ==================================================

    C{FWHM}: WARNING: this is not the full width at half maximum for Gaussian
    instrumental profile, but the sigma (FWHM = 2.3548 sigma).
    
    C{STEPI}: wavelength step for evaluating instrumental convolution
          - if =0, the program sets up default (FWHM/10.)
          - if <0, convolved spectrum calculated with the previous
          wavelength mesh:
          either the original (SYNSPEC) one if vrot=0,
          or the one used in rotational convolution (vrot > 0)


    Section 3. wavelength interval and normalization of spectra
    ===========================================================

    C{ALAM0}: initial wavelength
    C{ALAM1}: final wavelength
    C{IREL}: for =1 relative spectrum, =0 absolute spectrum
    
    @return: wavelength,flux
    @rtype: array, array
    """
    if method == 'fortran':
        #-- set arguments
        if alam0 is None: alam0 = wave_spec[0]
        if alam1 is None: alam1 = wave_spec[-1]
        if cont is None: cont = (np.ones(1), np.ones(1))
        contw, contf = cont
        if chard is None:
            chard = np.diff(wave_spec).mean()

        #-- apply broadening
        logger.info('ROTIN rot.broad. with vrot=%.3f (epsilon=%.2f)' %
                    (vrot, epsilon))
        w3, f3, ind = pyrotin4.pyrotin(wave_spec, flux_spec, contw, contf,
                                       vrot, chard, stepr, fwhm, stepi, alam0,
                                       alam1, irel, epsilon)

        return w3[:ind], f3[:ind]
    elif method == 'python':
        logger.info("PYTHON rot.broad with vrot=%.3f (epsilon=%.2f)" %
                    (vrot, epsilon))
        #-- first a wavelength Gaussian convolution:
        if fwhm > 0:
            fwhm /= 2.3548
            #-- make sure it's equidistant
            wave_ = np.linspace(wave_spec[0], wave_spec[-1], len(wave_spec))
            flux_ = np.interp(wave_, wave_spec, flux_spec)
            dwave = wave_[1] - wave_[0]
            n = int(2 * 4 * fwhm / dwave)
            wave_k = np.arange(n) * dwave
            wave_k -= wave_k[-1] / 2.
            kernel = np.exp(-(wave_k)**2 / (2 * fwhm**2))
            kernel /= sum(kernel)
            flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
            flux_spec = np.interp(wave_spec + dwave / 2,
                                  wave_,
                                  1 - flux_conv,
                                  left=1,
                                  right=1)
        if vrot > 0:
            #-- convert wavelength array into velocity space, this is easier
            #   we also need to make it equidistant!
            wave_ = np.log(wave_spec)
            velo_ = np.linspace(wave_[0], wave_[-1], len(wave_))
            flux_ = np.interp(velo_, wave_, flux_spec)
            dvelo = velo_[1] - velo_[0]
            vrot = vrot / (constants.cc * 1e-3)
            #-- compute the convolution kernel and normalise it
            n = int(2 * vrot / dvelo)
            velo_k = np.arange(n) * dvelo
            velo_k -= velo_k[-1] / 2.
            y = 1 - (velo_k / vrot)**2  # transformation of velocity
            G = (2 * (1 - epsilon) * sqrt(y) + pi * epsilon / 2. * y) / (
                pi * vrot * (1 - epsilon / 3.0))  # the kernel
            G /= G.sum()
            #-- convolve the flux with the kernel
            flux_conv = fftconvolve(1 - flux_, G, mode='same')
            velo_ = np.arange(len(flux_conv)) * dvelo + velo_[0]
            wave_conv = np.exp(velo_)
            return wave_conv, 1 - flux_conv
        return wave_spec, flux_spec
    else:
        raise ValueError("don't understand method {}".format(method))
Esempio n. 52
0
def excess_power(
    ts_data,  # Time series from magnetic field data 
    band=None,  # Channel bandwidth
    channel_name='channel-name',  # Channel name
    fmin=0,  # Lowest frequency of the filter bank.
    fmax=None,  # Highest frequency of the filter bank.
    impulse=False,  # Impulse response
    make_plot=True,  # Condition to produce plots
    max_duration=None,  # Maximum duration of the tile
    nchans=256,  # Total number of channels
    psd_estimation='median-mean',  # Average method
    psd_segment_length=60,  # Length of each segment in seconds
    psd_segment_stride=30,  # Separation between 2 consecutive segments in seconds
    station='station-name',  # Station name
    tile_fap=1e-7,  # Tile false alarm probability threshold in Gaussian noise.
    verbose=True,  # Print details
    window_fraction=0,  # Withening window fraction
    wtype='tukey'):  # Whitening type, can tukey or hann
    '''
    Perform excess-power search analysis on magnetic field data.
    This method will produce a bunch of time-frequency plots for every
    tile duration and bandwidth analysed as well as a XML file identifying
    all the triggers found in the selected data within the user-defined
    time range.

    Parameters
    ----------
    ts_data : TimeSeries
      Time Series from magnetic field data
    psd_segment_length : float
      Length of each segment in seconds
    psd_segment_stride : float
      Separation between 2 consecutive segments in seconds
    psd_estimation : string
      Average method
    window_fraction : float
      Withening window fraction
    tile_fap : float
      Tile false alarm probability threshold in Gaussian noise.
    nchans : int
      Total number of channels
    band : float
      Channel bandwidth
    fmin : float
      Lowest frequency of the filter bank.
    fmax : float
      Highest frequency of the filter bank

    Examples
    --------
    The program can be ran as an executable by using the ``excesspower`` command
    line as follows::

      excesspower --station "mainz01" \\
                  --start-time "2017-04-15-17-1" \\
                  --end-time "2017-04-15-18" \\
                  --rep "/Users/vincent/ASTRO/data/GNOME/GNOMEDrive/gnome/serverdata/" \\
                  --resample 512 \\
                  --verbose

    '''
    # Determine sampling rate based on extracted time series
    sample_rate = ts_data.sample_rate
    # Check if tile maximum frequency is not defined
    if fmax is None or fmax > sample_rate / 2.:
        # Set the tile maximum frequency equal to the Nyquist frequency
        # (i.e. half the sampling rate)
        fmax = sample_rate / 2.0
    # Check whether or not tile bandwidth and channel are defined
    if band is None and nchans is None:
        # Exit program with error message
        exit("Either bandwidth or number of channels must be specified...")
    else:
        # Check if tile maximum frequency larger than its minimum frequency
        assert fmax >= fmin
        # Define spectral band of data
        data_band = fmax - fmin
        # Check whether tile bandwidth or channel is defined
        if band is not None:
            # Define number of possible filter bands
            nchans = int(data_band / band)
        elif nchans is not None:
            # Define filter bandwidth
            band = data_band / nchans
            nchans -= 1
        # Check if number of channels is superior than unity
        assert nchans > 1
    # Print segment information
    if verbose: print '|- Estimating PSD from segments of',
    if verbose:
        print '%.2f s, with %.2f s stride...' % (psd_segment_length,
                                                 psd_segment_stride)
    # Convert time series as array of float
    data = ts_data.astype(numpy.float64)
    # Define segment length for PSD estimation in sample unit
    seg_len = int(psd_segment_length * sample_rate)
    # Define separation between consecutive segments in sample unit
    seg_stride = int(psd_segment_stride * sample_rate)
    # Minimum frequency of detectable signal in a segment
    delta_f = 1. / psd_segment_length
    # Calculate PSD length counting the zero frequency element
    fd_len = fmax / delta_f + 1
    # Calculate the overall PSD from individual PSD segments
    if impulse:
        # Produce flat data
        flat_data = numpy.ones(int(fd_len)) * 2. / fd_len
        # Create PSD frequency series
        fd_psd = types.FrequencySeries(flat_data, 1. / psd_segment_length,
                                       ts_data.start_time)
    else:
        # Create overall PSD using Welch's method
        fd_psd = psd.welch(data,
                           avg_method=psd_estimation,
                           seg_len=seg_len,
                           seg_stride=seg_stride)
    if make_plot:
        # Plot the power spectral density
        plot_spectrum(fd_psd)
    # We need this for the SWIG functions
    lal_psd = fd_psd.lal()
    # Create whitening window
    if verbose: print "|- Whitening window and spectral correlation..."
    if wtype == 'hann': window = lal.CreateHannREAL8Window(seg_len)
    elif wtype == 'tukey':
        window = lal.CreateTukeyREAL8Window(seg_len, window_fraction)
    else:
        raise ValueError("Can't handle window type %s" % wtype)
    # Create FFT plan
    fft_plan = lal.CreateForwardREAL8FFTPlan(len(window.data.data), 1)
    # Perform two point spectral correlation
    spec_corr = lal.REAL8WindowTwoPointSpectralCorrelation(window, fft_plan)
    # Determine length of individual filters
    filter_length = int(2 * band / fd_psd.delta_f) + 1
    # Initialise filter bank
    if verbose:
        print "|- Create bank of %i filters of %i Hz bandwidth..." % (
            nchans, filter_length)
    # Initialise array to store filter's frequency series and metadata
    lal_filters = []
    # Initialise array to store filter's time series
    fdb = []
    # Loop over the channels
    for i in range(nchans):
        # Define central position of the filter
        freq = fmin + band / 2 + i * band
        # Create excess power filter
        lal_filter = lalburst.CreateExcessPowerFilter(freq, band, lal_psd,
                                                      spec_corr)
        # Testing spectral correlation on filter
        #print lalburst.ExcessPowerFilterInnerProduct(lal_filter, lal_filter, spec_corr, None)
        # Append entire filter structure
        lal_filters.append(lal_filter)
        # Append filter's spectrum
        fdb.append(FrequencySeries.from_lal(lal_filter))
        #print fdb[0].frequencies
        #print fdb[0]
    if make_plot:
        # Plot filter bank
        plot_bank(fdb)
        # Convert filter bank from frequency to time domain
        if verbose:
            print "|- Convert all the frequency domain to the time domain..."
        tdb = []
        # Loop for each filter's spectrum
        for fdt in fdb:
            zero_padded = numpy.zeros(int((fdt.f0 / fdt.df).value) + len(fdt))
            st = int((fdt.f0 / fdt.df).value)
            zero_padded[st:st + len(fdt)] = numpy.real_if_close(fdt.value)
            n_freq = int(sample_rate / 2 / fdt.df.value) * 2
            tdt = numpy.fft.irfft(zero_padded, n_freq) * math.sqrt(sample_rate)
            tdt = numpy.roll(tdt, len(tdt) / 2)
            tdt = TimeSeries(tdt,
                             name="",
                             epoch=fdt.epoch,
                             sample_rate=sample_rate)
            tdb.append(tdt)
        # Plot time series filter
        plot_filters(tdb, fmin, band)
    # Computer whitened inner products of input filters with themselves
    #white_filter_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, None) for f in lal_filters])
    # Computer unwhitened inner products of input filters with themselves
    #unwhite_filter_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f, f, spec_corr, lal_psd) for f in lal_filters])
    # Computer whitened filter inner products between input adjacent filters
    #white_ss_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f1, f2, spec_corr, None) for f1, f2 in zip(lal_filters[:-1], lal_filters[1:])])
    # Computer unwhitened filter inner products between input adjacent filters
    #unwhite_ss_ip = numpy.array([lalburst.ExcessPowerFilterInnerProduct(f1, f2, spec_corr, lal_psd) for f1, f2 in zip(lal_filters[:-1], lal_filters[1:])])
    # Check filter's bandwidth is equal to user defined channel bandwidth
    min_band = (len(lal_filters[0].data.data) - 1) * lal_filters[0].deltaF / 2
    assert min_band == band
    # Create an event list where all the triggers will be stored
    event_list = lsctables.New(lsctables.SnglBurstTable, [
        'start_time', 'start_time_ns', 'peak_time', 'peak_time_ns', 'duration',
        'bandwidth', 'central_freq', 'chisq_dof', 'confidence', 'snr',
        'amplitude', 'channel', 'ifo', 'process_id', 'event_id', 'search',
        'stop_time', 'stop_time_ns'
    ])
    # Create repositories to save TF and time series plots
    os.system('mkdir -p segments/time-frequency')
    os.system('mkdir -p segments/time-series')
    # Define time edges
    t_idx_min, t_idx_max = 0, seg_len
    # Loop over each segment
    while t_idx_max <= len(ts_data):
        # Define first and last timestamps of the block
        start_time = ts_data.start_time + t_idx_min / float(
            ts_data.sample_rate)
        end_time = ts_data.start_time + t_idx_max / float(ts_data.sample_rate)
        if verbose:
            print "\n|- Analyzing block %i to %i (%.2f percent)" % (
                start_time, end_time, 100 * float(t_idx_max) / len(ts_data))
        # Debug for impulse response
        if impulse:
            for i in range(t_idx_min, t_idx_max):
                ts_data[i] = 1000. if i == (t_idx_max + t_idx_min) / 2 else 0.
        # Model a withen time series for the block
        tmp_ts_data = types.TimeSeries(ts_data[t_idx_min:t_idx_max] *
                                       window.data.data,
                                       delta_t=1. / ts_data.sample_rate,
                                       epoch=start_time)
        # Save time series in relevant repository
        os.system('mkdir -p segments/%i-%i' % (start_time, end_time))
        if make_plot:
            # Plot time series
            plot_ts(tmp_ts_data,
                    fname='segments/time-series/%i-%i.png' %
                    (start_time, end_time))
        # Convert times series to frequency series
        fs_data = tmp_ts_data.to_frequencyseries()
        if verbose:
            print "|- Frequency series data has variance: %s" % fs_data.data.std(
            )**2
        # Whitening (FIXME: Whiten the filters, not the data)
        fs_data.data /= numpy.sqrt(fd_psd) / numpy.sqrt(2 * fd_psd.delta_f)
        if verbose:
            print "|- Whitened frequency series data has variance: %s" % fs_data.data.std(
            )**2
        if verbose: print "|- Create time-frequency plane for current block"
        # Return the complex snr, along with its associated normalization of the template,
        # matched filtered against the data
        #filter.matched_filter_core(types.FrequencySeries(tmp_filter_bank,delta_f=fd_psd.delta_f),
        #                           fs_data,h_norm=1,psd=fd_psd,low_frequency_cutoff=lal_filters[0].f0,
        #                           high_frequency_cutoff=lal_filters[0].f0+2*band)
        if verbose: print "|- Filtering all %d channels...\n" % nchans,
        # Initialise 2D zero array
        tmp_filter_bank = numpy.zeros(len(fd_psd), dtype=numpy.complex128)
        # Initialise 2D zero array for time-frequency map
        tf_map = numpy.zeros((nchans, seg_len), dtype=numpy.complex128)
        # Loop over all the channels
        for i in range(nchans):
            # Reset filter bank series
            tmp_filter_bank *= 0.0
            # Index of starting frequency
            f1 = int(lal_filters[i].f0 / fd_psd.delta_f)
            # Index of last frequency bin
            f2 = int((lal_filters[i].f0 + 2 * band) / fd_psd.delta_f) + 1
            # (FIXME: Why is there a factor of 2 here?)
            tmp_filter_bank[f1:f2] = lal_filters[i].data.data * 2
            # Define the template to filter the frequency series with
            template = types.FrequencySeries(tmp_filter_bank,
                                             delta_f=fd_psd.delta_f,
                                             copy=False)
            # Create filtered series
            filtered_series = filter.matched_filter_core(
                template,
                fs_data,
                h_norm=None,
                psd=None,
                low_frequency_cutoff=lal_filters[i].f0,
                high_frequency_cutoff=lal_filters[i].f0 + 2 * band)
            # Include filtered series in the map
            tf_map[i, :] = filtered_series[0].numpy()
        if make_plot:
            # Plot spectrogram
            plot_spectrogram(numpy.abs(tf_map).T,
                             dt=tmp_ts_data.delta_t,
                             df=band,
                             ymax=ts_data.sample_rate / 2.,
                             t0=start_time,
                             t1=end_time,
                             fname='segments/time-frequency/%i-%i.png' %
                             (start_time, end_time))
            plot_tiles_ts(numpy.abs(tf_map),
                          2,
                          1,
                          sample_rate=ts_data.sample_rate,
                          t0=start_time,
                          t1=end_time,
                          fname='segments/%i-%i/ts.png' %
                          (start_time, end_time))
            #plot_tiles_tf(numpy.abs(tf_map),2,1,ymax=ts_data.sample_rate/2,
            #              sample_rate=ts_data.sample_rate,t0=start_time,t1=end_time,
            #              fname='segments/%i-%i/tf.png'%(start_time,end_time))
        # Loop through powers of 2 up to number of channels
        for nc_sum in range(0, int(math.log(nchans, 2)))[::-1]:
            # Calculate total number of summed channels
            nc_sum = 2**nc_sum
            if verbose:
                print "\n\t|- Contructing tiles containing %d narrow band channels" % nc_sum
            # Compute full bandwidth of virtual channel
            df = band * nc_sum
            # Compute minimal signal's duration in virtual channel
            dt = 1.0 / (2 * df)
            # Compute under sampling rate
            us_rate = int(round(dt / ts_data.delta_t))
            if verbose:
                print "\t|- Undersampling rate for this level: %f" % (
                    ts_data.sample_rate / us_rate)
            if verbose: print "\t|- Calculating tiles..."
            # Clip the boundaries to remove window corruption
            clip_samples = int(psd_segment_length * window_fraction *
                               ts_data.sample_rate / 2)
            # Undersample narrow band channel's time series
            # Apply clipping condition because [0:-0] does not give the full array
            tf_map_temp = tf_map[:,clip_samples:-clip_samples:us_rate] \
                          if clip_samples > 0 else tf_map[:,::us_rate]
            # Initialise final tile time-frequency map
            tiles = numpy.zeros(((nchans + 1) / nc_sum, tf_map_temp.shape[1]))
            # Loop over tile index
            for i in xrange(len(tiles)):
                # Sum all inner narrow band channels
                ts_tile = numpy.absolute(tf_map_temp[nc_sum * i:nc_sum *
                                                     (i + 1)].sum(axis=0))
                # Define index of last narrow band channel for given tile
                n = (i + 1) * nc_sum - 1
                n = n - 1 if n == len(lal_filters) else n
                # Computer withened inner products of each input filter with itself
                mu_sq = nc_sum * lalburst.ExcessPowerFilterInnerProduct(
                    lal_filters[n], lal_filters[n], spec_corr, None)
                #kmax = nc_sum-1 if n==len(lal_filters) else nc_sum-2
                # Loop over the inner narrow band channels
                for k in xrange(0, nc_sum - 1):
                    # Computer whitened filter inner products between input adjacent filters
                    mu_sq += 2 * lalburst.ExcessPowerFilterInnerProduct(
                        lal_filters[n - k], lal_filters[n - 1 - k], spec_corr,
                        None)
                # Normalise tile's time series
                tiles[i] = ts_tile.real**2 / mu_sq
            if verbose: print "\t|- TF-plane is %dx%s samples" % tiles.shape
            if verbose:
                print "\t|- Tile energy mean %f, var %f" % (numpy.mean(tiles),
                                                            numpy.var(tiles))
            # Define maximum number of degrees of freedom and check it larger or equal to 2
            max_dof = 32 if max_duration == None else int(max_duration / dt)
            assert max_dof >= 2
            # Loop through multiple degrees of freedom
            for j in [2**l for l in xrange(0, int(math.log(max_dof, 2)))]:
                # Duration is fixed by the NDOF and bandwidth
                duration = j * dt
                if verbose: print "\n\t\t|- Summing DOF = %d ..." % (2 * j)
                if verbose:
                    print "\t\t|- Explore signal duration of %f s..." % duration
                # Construct filter
                sum_filter = numpy.array([1, 0] * (j - 1) + [1])
                # Calculate length of filtered time series
                tlen = tiles.shape[1] - sum_filter.shape[0] + 1
                # Initialise filtered time series array
                dof_tiles = numpy.zeros((tiles.shape[0], tlen))
                # Loop over tiles
                for f in range(tiles.shape[0]):
                    # Sum and drop correlate tiles
                    dof_tiles[f] = fftconvolve(tiles[f], sum_filter, 'valid')
                if verbose:
                    print "\t\t|- Summed tile energy mean: %f" % (
                        numpy.mean(dof_tiles))
                if verbose:
                    print "\t\t|- Variance tile energy: %f" % (
                        numpy.var(dof_tiles))
                if make_plot:
                    plot_spectrogram(
                        dof_tiles.T,
                        dt,
                        df,
                        ymax=ts_data.sample_rate / 2,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                    plot_tiles_ts(
                        dof_tiles,
                        2 * j,
                        df,
                        sample_rate=ts_data.sample_rate / us_rate,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof_ts.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                    plot_tiles_tf(
                        dof_tiles,
                        2 * j,
                        df,
                        ymax=ts_data.sample_rate / 2,
                        sample_rate=ts_data.sample_rate / us_rate,
                        t0=start_time,
                        t1=end_time,
                        fname='segments/%i-%i/%02ichans_%02idof_tf.png' %
                        (start_time, end_time, nc_sum, 2 * j))
                threshold = scipy.stats.chi2.isf(tile_fap, j)
                if verbose:
                    print "\t\t|- Threshold for this level: %f" % threshold
                spant, spanf = dof_tiles.shape[1] * dt, dof_tiles.shape[0] * df
                if verbose:
                    print "\t\t|- Processing %.2fx%.2f time-frequency map." % (
                        spant, spanf)
                # Since we clip the data, the start time needs to be adjusted accordingly
                window_offset_epoch = fs_data.epoch + psd_segment_length * window_fraction / 2
                window_offset_epoch = LIGOTimeGPS(float(window_offset_epoch))
                for i, j in zip(*numpy.where(dof_tiles > threshold)):
                    event = event_list.RowType()
                    # The points are summed forward in time and thus a `summed point' is the
                    # sum of the previous N points. If this point is above threshold, it
                    # corresponds to a tile which spans the previous N points. However, the
                    # 0th point (due to the convolution specifier 'valid') is actually
                    # already a duration from the start time. All of this means, the +
                    # duration and the - duration cancels, and the tile 'start' is, by
                    # definition, the start of the time frequency map if j = 0
                    # FIXME: I think this needs a + dt/2 to center the tile properly
                    event.set_start(window_offset_epoch + float(j * dt))
                    event.set_stop(window_offset_epoch + float(j * dt) +
                                   duration)
                    event.set_peak(event.get_start() + duration / 2)
                    event.central_freq = lal_filters[
                        0].f0 + band / 2 + i * df + 0.5 * df
                    event.duration = duration
                    event.bandwidth = df
                    event.chisq_dof = 2 * duration * df
                    event.snr = math.sqrt(dof_tiles[i, j] / event.chisq_dof -
                                          1)
                    # FIXME: Magic number 0.62 should be determine empircally
                    event.confidence = -lal.LogChisqCCDF(
                        event.snr * 0.62, event.chisq_dof * 0.62)
                    event.amplitude = None
                    event.process_id = None
                    event.event_id = event_list.get_next_id()
                    event_list.append(event)
                for event in event_list[::-1]:
                    if event.amplitude != None:
                        continue
                    etime_min_idx = float(event.get_start()) - float(
                        fs_data.epoch)
                    etime_min_idx = int(etime_min_idx / tmp_ts_data.delta_t)
                    etime_max_idx = float(event.get_start()) - float(
                        fs_data.epoch) + event.duration
                    etime_max_idx = int(etime_max_idx / tmp_ts_data.delta_t)
                    # (band / 2) to account for sin^2 wings from finest filters
                    flow_idx = int((event.central_freq - event.bandwidth / 2 -
                                    (df / 2) - fmin) / df)
                    fhigh_idx = int((event.central_freq + event.bandwidth / 2 +
                                     (df / 2) - fmin) / df)
                    # TODO: Check that the undersampling rate is always commensurate
                    # with the indexing: that is to say that
                    # mod(etime_min_idx, us_rate) == 0 always
                    z_j_b = tf_map[flow_idx:fhigh_idx,
                                   etime_min_idx:etime_max_idx:us_rate]
                    # FIXME: Deal with negative hrss^2 -- e.g. remove the event
                    try:
                        event.amplitude = measure_hrss(
                            z_j_b, unwhite_filter_ip[flow_idx:fhigh_idx],
                            unwhite_ss_ip[flow_idx:fhigh_idx - 1],
                            white_ss_ip[flow_idx:fhigh_idx - 1],
                            fd_psd.delta_f, tmp_ts_data.delta_t,
                            len(lal_filters[0].data.data), event.chisq_dof)
                    except ValueError:
                        event.amplitude = 0
                if verbose:
                    print "\t\t|- Total number of events: %d" % len(event_list)
        t_idx_min += int(seg_len * (1 - window_fraction))
        t_idx_max += int(seg_len * (1 - window_fraction))
    setname = "MagneticFields"
    __program__ = 'pyburst_excesspower_gnome'
    start_time = LIGOTimeGPS(int(ts_data.start_time))
    end_time = LIGOTimeGPS(int(ts_data.end_time))
    inseg = segment(start_time, end_time)
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    ifo = channel_name.split(":")[0]
    straindict = psd.insert_psd_option_group.__dict__
    proc_row = register_to_xmldoc(xmldoc,
                                  __program__,
                                  straindict,
                                  ifos=[ifo],
                                  version=git_version.id,
                                  cvs_repository=git_version.branch,
                                  cvs_entry_time=git_version.date)
    dt_stride = psd_segment_length
    sample_rate = ts_data.sample_rate
    # Amount to overlap successive blocks so as not to lose data
    window_overlap_samples = window_fraction * sample_rate
    outseg = inseg.contract(window_fraction * dt_stride / 2)
    # With a given dt_stride, we cannot process the remainder of this data
    remainder = math.fmod(abs(outseg), dt_stride * (1 - window_fraction))
    # ...so make an accounting of it
    outseg = segment(outseg[0], outseg[1] - remainder)
    ss = append_search_summary(xmldoc,
                               proc_row,
                               ifos=(station, ),
                               inseg=inseg,
                               outseg=outseg)
    for sb in event_list:
        sb.process_id = proc_row.process_id
        sb.search = proc_row.program
        sb.ifo, sb.channel = station, setname
    xmldoc.childNodes[0].appendChild(event_list)
    ifostr = ifo if isinstance(ifo, str) else "".join(ifo)
    st_rnd, end_rnd = int(math.floor(inseg[0])), int(math.ceil(inseg[1]))
    dur = end_rnd - st_rnd
    fname = "%s-excesspower-%d-%d.xml.gz" % (ifostr, st_rnd, dur)
    utils.write_filename(xmldoc, fname, gz=fname.endswith("gz"))
    plot_triggers(fname)
Esempio n. 53
0
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# DECONVOLUTION

#Define the sine sweep parameters
finf = 10
fsup = 22000
T = 7
fs = 48000
t = np.arange(0, T * fs) / fs

sinesweep = log_sinesweep(finf, fsup, T, t, fs)
sf.write('sinesweep.wav', sinesweep, fs)
inversefilter = inverse_filter(finf, fsup, T, t, sinesweep)
sf.write('inversefilter.wav', inversefilter, fs)
delta = sig.fftconvolve(sinesweep, inversefilter)
delta = delta / (np.abs(max(delta)))  # normalization
delta = delta[inversefilter.size - 1:]  # adjust length because of FFT
sf.write('deltaFarina.wav', delta, fs)

#Plot the sine sweep
freqSine, sinesweepdB = spectrumDBFS(sinesweep, fs)
plots(sinesweep, sinesweepdB, 'Logarithmic SineSweep x(t)', fs, freqSine)

freqInv, inversefilterdB = spectrumDBFS(inversefilter, fs)
plots(inversefilter, inversefilterdB, 'Inverse filter f(t)', fs, freqInv)

freqDelta, deltadB = spectrumDBFS(delta, fs)
plots(delta, deltadB, 'Delta d(t) = x(t) * f(t)', fs, freqDelta)

plots_allSpectrum(sinesweepdB, inversefilterdB, deltadB, 'Log. SineSweep',
Esempio n. 54
0
            temp_stim, fs = sf.read(stimuli[0][0])

            print("Convolving", method.name, room.name, "...")
            convolved = np.zeros([len(stimuli_pos), method.channels, len(temp_stim) + max_len_RIR - 1])

            for file_ind, files in enumerate(stimuli):
                dry, fs = sf.read(files[0])
                RIR, fs = sf.read(files[1])

                RIR = RIR.T

                for RIR_ind, LS in enumerate(RIR):
                    if len(LS) < max_len_RIR:
                        padding = np.zeros(max_len_RIR - len(LS))
                        LS = np.concatenate([LS, padding])
                    convolved[file_ind][RIR_ind] = signal.fftconvolve(dry / len(stimuli), LS)

            output = np.sum(convolved, 0) # Sum the first dimension (each stimulus) such that there is one file per LS
            if output.shape[1] > max_len_file:
                max_len_file = output.shape[1]

            for ind, channel in enumerate(output):
                if apply_filter and method.name in methods_to_filter:
                    channel = signal.sosfilt(sos, channel)
                    channel = signal.sosfilt(sos2, channel)

                if max(channel) >= 1:
                    print("clipping")

                if mode is "TakeFive":
                    filename = os.path.join(output_dir, room.name + "_" + method.name + "_TakeFive_" + str(ind) + ".wav")
Esempio n. 55
0
def __myconvolve(in2, in1, mode):
    return sg.fftconvolve(in1, in2, mode)
Esempio n. 56
0
def generate_data(output_path='',
                  dataset='adhoc',
                  libri_path='/hdd/data/Librispeech/LibriSpeech',
                  noise_path='/hdd/data/Nonspeech'):
    assert dataset in ['adhoc', 'fixed'], "dataset can only be adhoc or fixed."

    if output_path == '':
        output_path = os.getcwd()

    data_type = ['train', 'validation', 'test']
    for i in range(len(data_type)):
        # path for config
        config_path = os.path.join(
            'configs', 'MC_Libri_' + dataset + '_' + data_type[i] + '.pkl')

        # load pickle file
        with open(config_path, 'rb') as f:
            configs = pickle.load(f)

        # sample rate is 16k Hz
        sr = 16000
        # signal length is 4 sec
        sig_len = 4

        # generate and save audio
        save_dir = os.path.join(output_path, 'MC_Libri_' + dataset,
                                data_type[i])
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        for utt in range(len(configs)):
            this_config = configs[utt]

            # load audio files
            speakers = this_config['speech']
            noise = this_config['noise']
            spk1, _ = sf.read(os.path.join(libri_path, speakers[0]))
            spk2, _ = sf.read(os.path.join(libri_path, speakers[1]))
            noise, _ = sf.read(os.path.join(noise_path, noise))

            # calculate signal length according to overlap ratio
            overlap_ratio = this_config['overlap_ratio']
            actual_len = int(sig_len / (2 - overlap_ratio)) * sr
            overlap = int(actual_len * overlap_ratio)

            # truncate speech according to start and end indexes
            start_idx = this_config['start_idx']
            end_idx = this_config['end_idx']
            spk1 = spk1[start_idx:end_idx]
            spk2 = spk2[start_idx:end_idx]

            # rescaling spk2 energy according to relative SNR
            spk1 = spk1 / np.sqrt(np.sum(spk1**2) + 1e-8) * 1e2
            spk2 = spk2 / np.sqrt(np.sum(spk2**2) + 1e-8) * 1e2
            spk2 = spk2 * np.power(10, this_config['spk_snr'] / 20.)

            # load locations and room configs
            mic_pos = np.asarray(this_config['mic_pos'])
            spk_pos = np.asarray(this_config['spk_pos'])
            noise_pos = np.asarray(this_config['noise_pos'])
            room_size = np.asarray(this_config['room_size'])
            rt60 = this_config['RT60']

            # generate RIR
            beta = gpuRIR.beta_SabineEstimation(room_size, rt60)
            nb_img = gpuRIR.t2n(rt60, room_size)
            spk_rir = gpuRIR.simulateRIR(room_size, beta, spk_pos, mic_pos,
                                         nb_img, rt60, sr)
            noise_rir = gpuRIR.simulateRIR(room_size, beta, noise_pos, mic_pos,
                                           nb_img, rt60, sr)

            # convolve with RIR at different mic
            if dataset == 'adhoc':
                nmic = this_config['num_mic']
            else:
                nmic = 6
            for mic in range(nmic):
                spk1_echoic_sig = signal.fftconvolve(spk1, spk_rir[0][mic])
                spk2_echoic_sig = signal.fftconvolve(spk2, spk_rir[1][mic])

                # align the speakers according to overlap ratio
                actual_length = len(spk1_echoic_sig)
                total_length = actual_length * 2 - overlap
                padding = np.zeros(actual_length - overlap)
                spk1_echoic_sig = np.concatenate([spk1_echoic_sig, padding])
                spk2_echoic_sig = np.concatenate([padding, spk2_echoic_sig])
                mixture = spk1_echoic_sig + spk2_echoic_sig

                # add noise
                noise = noise[:total_length]
                if len(noise) < total_length:
                    # repeat noise if necessary
                    num_repeat = total_length // len(noise)
                    res = total_length - num_repeat * len(noise)
                    noise = np.concatenate(
                        [np.concatenate([noise] * num_repeat), noise[:res]])
                noise = signal.fftconvolve(noise, noise_rir[0][mic])

                # rescaling noise energy
                noise = noise[:total_length]
                noise = noise / np.sqrt(np.sum(noise**2) + 1e-8) * np.sqrt(
                    np.sum(mixture**2) + 1e-8)
                noise = noise / np.power(10, this_config['noise_snr'] / 20.)

                mixture += noise

                # save waveforms
                this_save_dir = os.path.join(save_dir,
                                             str(nmic) + 'mic',
                                             'sample' + str(utt + 1))
                if not os.path.exists(this_save_dir):
                    os.makedirs(this_save_dir)
                sf.write(
                    os.path.join(this_save_dir,
                                 'spk1_mic' + str(mic + 1) + '.wav'),
                    spk1_echoic_sig, sr)
                sf.write(
                    os.path.join(this_save_dir,
                                 'spk2_mic' + str(mic + 1) + '.wav'),
                    spk2_echoic_sig, sr)
                sf.write(
                    os.path.join(this_save_dir,
                                 'mixture_mic' + str(mic + 1) + '.wav'),
                    mixture, sr)

            # print progress
            if (utt + 1) % (len(configs) // 5) == 0:
                print(
                    "{} configuration, {} set, {:d} out of {:d} utterances generated."
                    .format(dataset, data_type[i], utt + 1, len(configs)))
Esempio n. 57
0
def match_template(image,
                   template,
                   pad_input=False,
                   mode='constant',
                   constant_values=0):
    """Match a template to a 2-D or 3-D image using normalized correlation.

    The output is an array with values between -1.0 and 1.0. The value at a
    given position corresponds to the correlation coefficient between the image
    and the template.

    For `pad_input=True` matches correspond to the center and otherwise to the
    top-left corner of the template. To find the best match you must search for
    peaks in the response (output) image.

    Parameters
    ----------
    image : (M, N[, D]) array
        2-D or 3-D input image.
    template : (m, n[, d]) array
        Template to locate. It must be `(m <= M, n <= N[, d <= D])`.
    pad_input : bool
        If True, pad `image` so that output is the same size as the image, and
        output values correspond to the template center. Otherwise, the output
        is an array with shape `(M - m + 1, N - n + 1)` for an `(M, N)` image
        and an `(m, n)` template, and matches correspond to origin
        (top-left corner) of the template.
    mode : see `numpy.pad`, optional
        Padding mode.
    constant_values : see `numpy.pad`, optional
        Constant values used in conjunction with ``mode='constant'``.

    Returns
    -------
    output : array
        Response image with correlation coefficients.

    Notes
    -----
    Details on the cross-correlation are presented in [1]_. This implementation
    uses FFT convolutions of the image and the template. Reference [2]_
    presents similar derivations but the approximation presented in this
    reference is not used in our implementation.

    References
    ----------
    .. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light
           and Magic.
    .. [2] Briechle and Hanebeck, "Template Matching using Fast Normalized
           Cross Correlation", Proceedings of the SPIE (2001).
           :DOI:`10.1117/12.421129`

    Examples
    --------
    >>> template = np.zeros((3, 3))
    >>> template[1, 1] = 1
    >>> template
    array([[0., 0., 0.],
           [0., 1., 0.],
           [0., 0., 0.]])
    >>> image = np.zeros((6, 6))
    >>> image[1, 1] = 1
    >>> image[4, 4] = -1
    >>> image
    array([[ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  1.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  0., -1.,  0.],
           [ 0.,  0.,  0.,  0.,  0.,  0.]])
    >>> result = match_template(image, template)
    >>> np.round(result, 3)
    array([[ 1.   , -0.125,  0.   ,  0.   ],
           [-0.125, -0.125,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.125, -1.   ]])
    >>> result = match_template(image, template, pad_input=True)
    >>> np.round(result, 3)
    array([[-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125,  1.   , -0.125,  0.   ,  0.   ,  0.   ],
           [-0.125, -0.125, -0.125,  0.   ,  0.   ,  0.   ],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125, -1.   ,  0.125],
           [ 0.   ,  0.   ,  0.   ,  0.125,  0.125,  0.125]])
    """
    check_nD(image, (2, 3))

    if image.ndim < template.ndim:
        raise ValueError("Dimensionality of template must be less than or "
                         "equal to the dimensionality of image.")
    if np.any(np.less(image.shape, template.shape)):
        raise ValueError("Image must be larger than template.")

    image_shape = image.shape

    float_dtype = _supported_float_type(image.dtype)
    image = image.astype(float_dtype, copy=False)

    pad_width = tuple((width, width) for width in template.shape)
    if mode == 'constant':
        image = np.pad(image,
                       pad_width=pad_width,
                       mode=mode,
                       constant_values=constant_values)
    else:
        image = np.pad(image, pad_width=pad_width, mode=mode)

    # Use special case for 2-D images for much better performance in
    # computation of integral images
    if image.ndim == 2:
        image_window_sum = _window_sum_2d(image, template.shape)
        image_window_sum2 = _window_sum_2d(image**2, template.shape)
    elif image.ndim == 3:
        image_window_sum = _window_sum_3d(image, template.shape)
        image_window_sum2 = _window_sum_3d(image**2, template.shape)

    template_mean = template.mean()
    template_volume = np.prod(template.shape)
    template_ssd = np.sum((template - template_mean)**2)

    if image.ndim == 2:
        xcorr = fftconvolve(image, template[::-1, ::-1], mode="valid")[1:-1,
                                                                       1:-1]
    elif image.ndim == 3:
        xcorr = fftconvolve(image, template[::-1, ::-1, ::-1],
                            mode="valid")[1:-1, 1:-1, 1:-1]

    numerator = xcorr - image_window_sum * template_mean

    denominator = image_window_sum2
    np.multiply(image_window_sum, image_window_sum, out=image_window_sum)
    np.divide(image_window_sum, template_volume, out=image_window_sum)
    denominator -= image_window_sum
    denominator *= template_ssd
    np.maximum(denominator, 0,
               out=denominator)  # sqrt of negative number not allowed
    np.sqrt(denominator, out=denominator)

    response = np.zeros_like(xcorr, dtype=float_dtype)

    # avoid zero-division
    mask = denominator > np.finfo(float_dtype).eps

    response[mask] = numerator[mask] / denominator[mask]

    slices = []
    for i in range(template.ndim):
        if pad_input:
            d0 = (template.shape[i] - 1) // 2
            d1 = d0 + image_shape[i]
        else:
            d0 = template.shape[i] - 1
            d1 = d0 + image_shape[i] - template.shape[i] + 1
        slices.append(slice(d0, d1))

    return response[tuple(slices)]
Esempio n. 58
0
    def preprocess(self,
                   scale,
                   num_scales,
                   coarsest_scale,
                   initialization='biharmonic',
                   constant_values=1.0,
                   init_coef=None,
                   mask_threshold=0.1):
        start = time.time()

        # image and mask at current scale
        image = self.get_image_at_scale(self.image, scale, num_scales,
                                        coarsest_scale)
        mask = self.get_mask_at_scale(self.mask, scale, num_scales,
                                      coarsest_scale, mask_threshold)

        # dimensions of the image at current scale
        im_ch, im_h, im_w = image.shape

        if scale == num_scales - 1:
            # initialize the coarsest scale
            if initialization is not None:
                if isinstance(initialization, np.ndarray):
                    image[:, mask] = self.get_image_at_scale(
                        initialization, scale, num_scales,
                        coarsest_scale)[:, mask]
                    # image[:,mask] = np.moveaxis(initialization,-1,0)[:,mask]
                elif isinstance(initialization, str):
                    if initialization == 'harmonic':
                        aniso_coef = np.ones_like(
                            mask
                        ) if init_coef is None else self.get_image_at_scale(
                            init_coef, scale, num_scales, coarsest_scale)
                        self.inpaint_PDE(image,
                                         mask,
                                         type='harmonic',
                                         aniso_coef=aniso_coef)
                    elif initialization == 'biharmonic':
                        self.inpaint_PDE(image, mask, type='biharmonic')
                    # elif initialization=='image':
                    # 	assert init_image is not None, "init_image must be given"
                    # 	image[:,mask] = self.get_image_at_scale(init_image, scale, num_scales, coarsest_scale)[:,mask]
                    elif initialization == 'constant':
                        image[:, mask] = constant_values
                    elif initialization == 'mean':
                        image[:, mask] = np.mean(self.image)
                    elif initialization == 'random':
                        image[:, mask] = np.random.rand(im_ch, im_h,
                                                        im_w)[:, mask]

            #######################################################################

            # find max kernel size
            self.max_ker_size_0 = self.max_ker_size_1 = 0
            for ker in self.filters:
                self.max_ker_size_0 = max(self.max_ker_size_0, ker.shape[0])
                self.max_ker_size_1 = max(self.max_ker_size_1, ker.shape[1])

            # find max patch size
            self.max_patch_size_0 = self.max_patch_size_1 = 0
            for feat in self.features:
                self.max_patch_size_0 = max(self.max_patch_size_0,
                                            feat.patch_shape[0])
                self.max_patch_size_1 = max(self.max_patch_size_1,
                                            feat.patch_shape[1])

            #######################################################################
        else:
            # upscale lowres image from the previous scale
            for ch in range(im_ch):
                image[ch, mask] = resize(self.curr_image[ch, ...],
                                         mask.shape,
                                         mode='reflect',
                                         order=1,
                                         anti_aliasing=False)[mask]

        # truncate values to the allowed limits
        image[image < 0] = 0.0
        image[image > 1] = 1.0

        #######################################################################

        # bounding box of the inpainting region at current scale
        inp_top_left_y, inp_top_left_x, inp_bot_rght_y, inp_bot_rght_x = op.masked_bounding_box(
            mask)

        # boundary extensions of the image and masks to account for nonlocal kernels and patches
        ext_x_lft = abs(
            min(
                0, inp_top_left_x - 2 *
                (self.max_ker_size_0 // 2 + self.max_patch_size_0 // 2)))
        ext_y_top = abs(
            min(
                0, inp_top_left_y - 2 *
                (self.max_ker_size_1 // 2 + self.max_patch_size_1 // 2)))
        ext_x_rgt = abs(
            min(0, (im_w - 1) - inp_bot_rght_x - 2 *
                (self.max_ker_size_0 // 2 + self.max_patch_size_0 // 2)))
        ext_y_bot = abs(
            min(0, (im_h - 1) - inp_bot_rght_y - 2 *
                (self.max_ker_size_1 // 2 + self.max_patch_size_1 // 2)))
        print("Image extensions (l,r,t,b)   ... ",
              ((ext_x_lft, ext_x_rgt), (ext_y_top, ext_y_bot)))

        # pad image to account for nonlocal kernels and patches. Note that 'image' is redefined after this point
        image = np.pad(image, ((0, 0), (ext_y_top, ext_y_bot),
                               (ext_x_lft, ext_x_rgt)),
                       'constant',
                       constant_values=0)
        mask = np.pad(mask, ((ext_y_top, ext_y_bot), (ext_x_lft, ext_x_rgt)),
                      'constant',
                      constant_values=False)

        # dimensions of the padded image at current scale
        im_ch, im_h, im_w = self.im_shape = image.shape

        self.curr_image = image[:, ext_y_top:im_h - ext_y_bot,
                                ext_x_lft:im_w - ext_x_rgt]

        #######################################################################

        # linear indices of the masked pixels
        self.ind_dof = op.masked_indices(mask)

        # $O_*$ domain - extend mask to account for nonlocal kernels
        conv_target_mask = binary_dilation(mask,
                                           selem=morph.rectangle(
                                               self.max_ker_size_1,
                                               self.max_ker_size_0))

        # # $\tilde{O}_*$ domain - extend mask to account for patches
        # nonlocal_target_mask = binary_dilation( conv_target_mask, selem=morph.rectangle(self.max_patch_size_1,self.max_patch_size_0) )

        # # $\tilde{O}_*^c$ domain
        # nonlocal_source_mask = nonlocal_target_mask.copy()
        # p_h, p_w = (self.max_patch_size_0//2+self.max_ker_size_0//2, self.max_patch_size_1//2+self.max_ker_size_1//2)
        # nonlocal_source_mask[p_h:-p_h,p_w:-p_w] = np.logical_not( nonlocal_target_mask[p_h:-p_h,p_w:-p_w] )
        # # m_h, m_w = nonlocal_source_mask.shape
        # # nonlocal_source_mask[p_h:m_h-p_h,p_w:m_w-p_w] = np.logical_not( nonlocal_target_mask[p_h:m_h-p_h,p_w:m_w-p_w] )

        # imsave("./dbg/target_mask"+str(num_scales-scale-1)+".png",img_as_ubyte(nonlocal_target_mask),cmap='gray')
        # imsave("./dbg/source_mask"+str(num_scales-scale-1)+".png",img_as_ubyte(nonlocal_source_mask),cmap='gray')

        #######################################################################

        # pad confidence mask to account for nonlocal kernels and patches
        # self.confidence = self.get_image_at_scale(self.calculate_confidence_mask(mask, 0.1, 10.0), scale, num_scales, coarsest_scale).ravel()
        self.confidence = np.pad(self.calculate_confidence_mask(
            self.get_mask_at_scale(self.mask, scale, num_scales,
                                   coarsest_scale, mask_threshold)),
                                 ((ext_y_top, ext_y_bot),
                                  (ext_x_lft, ext_x_rgt)),
                                 'constant',
                                 constant_values=1).ravel()
        # self.confidence = np.pad( self.get_image_at_scale(self.conf_mask, scale, num_scales, coarsest_scale), ((ext_y_top,ext_y_bot),(ext_x_lft,ext_x_rgt)), 'constant', constant_values=1 ).ravel()

        #######################################################################

        # relative linear indices of pixels in zero-centered patches
        for feat in self.features:
            feat.patch_h = np.zeros(
                (feat.patch_shape[0] * feat.patch_shape[1], ), dtype=np.int32)
            for i in range(feat.patch_shape[0]):
                for j in range(feat.patch_shape[1]):
                    feat.patch_h[i * feat.patch_shape[1] +
                                 j] = (j - feat.patch_shape[1] // 2) + (
                                     i - feat.patch_shape[0] // 2) * im_w

        # convolution matrices
        conv_mat_start = time.time()
        self.conv_mat = [
            op.conv2mat((im_h, im_w), k, dtype=_im_dtype).tocsr()
            for k in self.filters
        ]
        self.adj_conv_mat = [mat.T[self.ind_dof, :] for mat in self.conv_mat]
        conv_mat_time = time.time() - conv_mat_start

        # convolutions in the known part of the domain
        # self.convolutions = np.array([ [correlate2d(image[ch],k,mode='same',boundary='symm').ravel() for k in self.filters] for ch in range(im_ch) ])
        # self.convolutions = np.array([ fftconvolve( np.tile(image[ch][np.newaxis,...],(len(self.filters),1,1)), np.flip(np.array(self.filters),axis=(1,2)), mode='same', axes=(1,2)).reshape(len(self.filters),-1) for ch in range(im_ch) ])

        filt_start = time.time()
        self.convolutions = np.array([
            fftconvolve(np.tile(image[ch][np.newaxis, ...],
                                (len(self.filters), 1, 1)),
                        np.flip(op.stack_kernels(self.filters), axis=(1, 2)),
                        mode='same',
                        axes=(1, 2)).reshape(len(self.filters), -1)
            for ch in range(im_ch)
        ])
        # self.convolutions = np.array([ oaconvolve( np.tile(image[ch][np.newaxis,...],(len(self.filters),1,1)), np.flip(op.stack_kernels(self.filters),axis=(1,2)), mode='same', axes=(1,2)).reshape(len(self.filters),-1) for ch in range(im_ch) ])
        filt_time = time.time() - filt_start

        # preprocess features at the current scale
        for j, feat in enumerate(self.features):
            feat_max_ker_size_0 = feat_max_ker_size_1 = 0
            for active_ker in feat.active_filters:
                feat_max_ker_size_0 = max(feat_max_ker_size_0,
                                          self.filters[active_ker].shape[0])
                feat_max_ker_size_1 = max(feat_max_ker_size_1,
                                          self.filters[active_ker].shape[1])
            feat_conv_extension_kernel = np.ones(
                (feat_max_ker_size_0, feat_max_ker_size_1))
            feat_patch_extension_kernel = np.ones(feat.patch_shape)

            #######################################################################
            # nonlocal target and source masks with conv kernels and patch sizes of the given feature

            # $\tilde{O}_*$ domain
            global_feat_target_mask = binary_dilation(
                binary_dilation(mask, selem=feat_conv_extension_kernel),
                selem=feat_patch_extension_kernel)

            # $\tilde{O}_*^c$ domain
            global_feat_source_mask = global_feat_target_mask.copy()
            p_h, p_w = (feat.patch_shape[0] // 2 + self.max_ker_size_0 // 2,
                        feat.patch_shape[1] // 2 + self.max_ker_size_1 // 2)
            global_feat_source_mask[p_h:-p_h, p_w:-p_w] = np.logical_not(
                global_feat_target_mask[p_h:-p_h, p_w:-p_w])

            #######################################################################

            feat_target_mask = np.pad(feat.target_mask_pyramid[scale],
                                      ((ext_y_top, ext_y_bot),
                                       (ext_x_lft, ext_x_rgt)),
                                      'constant',
                                      constant_values=False)
            feat_target_mask = binary_dilation(
                feat_target_mask, selem=feat_conv_extension_kernel)
            feat.conv_inp_ind = op.masked_indices(
                np.logical_and(feat_target_mask, conv_target_mask))
            feat_target_mask = binary_dilation(
                feat_target_mask, selem=feat_patch_extension_kernel)
            feat_target_mask = np.logical_and(feat_target_mask,
                                              global_feat_target_mask)

            feat_source_mask = np.pad(feat.source_mask_pyramid[scale],
                                      ((ext_y_top, ext_y_bot),
                                       (ext_x_lft, ext_x_rgt)),
                                      'constant',
                                      constant_values=False)
            feat_source_mask = binary_dilation(
                binary_dilation(feat_source_mask,
                                selem=feat_conv_extension_kernel),
                selem=feat_patch_extension_kernel)
            feat_source_mask = np.logical_and(feat_source_mask,
                                              global_feat_source_mask)

            feat.source_ind = op.masked_indices(feat_source_mask)
            feat.target_ind = op.masked_indices(feat_target_mask)
            # feat.source_ind2 = feat.source_ind.copy()

            #######################################################################

            feat.lambdas = np.pad(feat.lambda_pyramid[scale],
                                  ((0, 0), (ext_y_top, ext_y_bot),
                                   (ext_x_lft, ext_x_rgt)),
                                  'constant',
                                  constant_values=0.0).reshape(
                                      feat.lambda_pyramid[scale].shape[0], -1)

            #######################################################################

            feat.build_nnf_index(scale,
                                 self.convolutions.reshape(-1, im_h, im_w),
                                 feat_target_mask, feat_source_mask)

            #######################################################################

            beta_sum = feat.beta if j == 0 else beta_sum + feat.beta

        # normalize betas
        beta_sum[beta_sum < 1.e-7] = 1e-7
        for feat in self.features:
            feat.beta /= beta_sum
            # feat.beta[beta_sum<1.e-9] = 1.0/self.num_features

        print("Preprocessing                ... %6.3f sec" %
              (time.time() - start))
        print("   incl. eval. filters       ... %6.3f sec" % (filt_time))
        print("   incl. eval. matricies     ... %6.3f sec" % (conv_mat_time))
        return image
Esempio n. 59
0
    templateFft = np.fft.rfft(windowedTemplate)  #/ nPoints**2/windowNorm

    phFilter_f = templateFft.conjugate() / noisePsd

    NEPsq = noisePsd / (templateFft * templateFft.conjugate())
    df = 1. / (nPoints * pulses.dt)
    deltaVrms = np.sqrt(np.sum(df / NEPsq))

    phFilter_t = np.fft.irfft(phFilter_f, nPoints)
    #phFilter_t = np.roll(phFilter_t, nPoints//2)
    assert (len(phFilter_t) == nPoints)

    from scipy.signal import fftconvolve

    templatePulseFiltered = fftconvolve(windowedTemplate,
                                        phFilter_t,
                                        mode='full')
    iMax = np.argmax(templatePulseFiltered)
    templateAmp = templatePulseFiltered[iMax]

    print('Filtering all pulses.', end='')
    ophs = []
    for i, pulse in enumerate(pulses):
        if i % 10 == 0:
            print('.', end='')
        y = (pulse.Vsquid - pulse.preTriggerBaseline()) * window
        filteredPulse = fftconvolve(y, phFilter_t, mode='full')
        ph = filteredPulse[iMax]
        ophs.append(ph)
    ophs = np.asarray(ophs)
Esempio n. 60
0
    def __init__(self, fn):

        tag = os.path.splitext(fn)[0]
        normal_fn = '%s_normal_edges.txt' % tag
        druse_fn = '%s_druse_edges.txt' % tag
        isos_fn = '%s_isos_depth.txt' % tag
        for outfn in [normal_fn, druse_fn, isos_fn]:
            if os.path.exists(outfn):
                print '%s exists.' % outfn
                return
        im = imread(fn)
        prof = im.mean(axis=1)
        zmin = np.argmax(prof) - 100
        zmax = np.argmax(prof) + 100
        subim = im[zmin:zmax, :]
        kernel = np.ones((5, 5))
        subim = fftconvolve(subim, kernel, mode='same')
        subim = np.log(subim)
        ssy, ssx = subim.shape
        y1, y2 = ssy // 2 - 50, ssy // 2 + 50
        x1, x2 = ssx // 2 - 50, ssx // 2 + 50
        cmin, cmax = np.percentile(subim[y1:y2, x1:x2], (0, 99.5))
        subim = (subim - cmin) / (cmax - cmin)
        subim = (subim.clip(0, 1) * 255.0).astype(np.uint8)
        xclicks, yclicks, junk = collector(
            [subim], titles=['click multiple points along IS/OS band'])

        xtemp = []
        ytemp = []

        # search a bit to make sure we've got the brightest pixel:
        rad = 1
        for xc, yc in zip(xclicks, yclicks):
            xc = int(xc)
            yc = int(yc)
            col = subim[yc - rad:yc + rad + 1, xc]
            shift = np.argmax(col) - 1
            yc = yc + shift
            xtemp.append(xc)
            ytemp.append(yc)

        xclicks = xtemp
        yclicks = ytemp

        xclicks = [0] + xclicks + [subim.shape[1] - 1]
        yclicks = [yclicks[0]] + yclicks + [yclicks[-1]]

        xclicks = np.array(xclicks)
        yclicks = np.array(yclicks)

        idx = np.argsort(xclicks)

        xclicks = xclicks[idx]
        yclicks = yclicks[idx]

        x = np.arange(subim.shape[1])
        interpolator = interp1d(xclicks, yclicks, kind='cubic')

        isos_position = interpolator(x) + zmin

        plt.imshow(np.log(im), cmap='gray', interpolation='none')
        plt.autoscale(False)
        plt.plot(x, isos_position)
        plt.plot(xclicks, yclicks + zmin, 'rx')
        plt.savefig('%s_marked.png' % tag, dpi=300)
        np.savetxt(isos_fn, isos_position)
        plt.close()

        xclicks, yclicks, junk = collector(
            [subim], titles=['click edges of normal retina'])
        np.savetxt(normal_fn, np.round(np.sort(xclicks)))
        xclicks, yclicks, junk = collector([subim],
                                           titles=['click edges of druse'])
        np.savetxt(druse_fn, np.round(np.sort(xclicks)))