def featArray(data, timesObs):
    freqs1 = n.linspace(100, 200, n.shape(data)[1])
    sh = n.shape(data)
    CW1mean = n.zeros_like(data)
    for i in range(CW1mean.shape[1]):
        CW1 = cwt(n.abs(data[:, i]), haar, n.arange(1, 10, 1))
        CW1 = n.ma.masked_where(CW1 == 0, CW1)
        CW1mean[:, i] = n.ma.mean(n.abs(CW1), 0)

    CT1mean = n.zeros_like(data)
    for j in range(CW1mean.shape[0]):
        CT1 = cwt(data[j, :], signal.morlet, n.arange(1, 3, 1))
        CT1 = n.ma.masked_where(CT1 == 0, CT1)
        CT1mean[j, :] = n.mean(n.abs(CT1), 0)
    processed = ndimage.sobel(n.abs(data))
    X1 = n.zeros((sh[0] * sh[1], 5))
    X1[:, 0] = (n.real(data)).reshape(sh[0] * sh[1])
    X1[:, 1] = (n.imag(data)).reshape(sh[0] * sh[1])
    #X1[:,2] = n.abs(CW1mean).reshape(sh[0]*sh[1])
    X1[:, 2] = n.abs(CT1mean).reshape(sh[0] * sh[1])
    #    X1[:,2] = n.log10(n.abs(processed)).reshape(sh[0]*sh[1])
    X1[:, 3] = (n.array([timesObs] * sh[1])).reshape(sh[0] * sh[1])
    X1[:, 4] = (n.array([freqs1] * sh[0])).reshape(sh[0] * sh[1])
    X1 = n.nan_to_num(X1)
    #    for m in range(n.shape(X1)[1]):
    #        X1[:,m] = X1[:,m]/X1[:,m].max()
    X1 = normalize(X1, norm='l2', axis=1)
    X1 = n.nan_to_num(X1)
    return X1
Exemple #2
0
def CW_SSIM(x, y, width=30, k=0.01):
    widths = np.arange(1, width + 1)

    # Use the image data as arrays
    sig1 = x
    sig2 = y

    # Convolution
    cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
    cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)

    # Compute the first term
    c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
    c1_2 = np.square(abs(cwtmatr1))
    c2_2 = np.square(abs(cwtmatr2))
    num_ssim_1 = 2 * np.sum(c1c2, axis=0) + k
    den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + k

    # Compute the second term
    c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
    num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + k
    den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + k

    # Construct the result
    ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)

    # Average the per pixel results
    index = np.average(ssim_map)
    return index
Exemple #3
0
def featTargetArray(data,timesObs,maskedLST):
    freqs1 = n.linspace(100,200,data.shape[1])
    sh = n.shape(data)
    CW1mean = n.zeros_like(data)
    for i in range(CW1mean.shape[1]):
        CW1 = cwt(n.abs(data[:,i]),haar,n.arange(1,10,1))
        CW1 = n.ma.masked_where(CW1==0,CW1)
        CW1mean[:,i] = n.ma.mean(n.abs(CW1),0)

    CT1mean = n.zeros_like(data)
    for j in range(CW1mean.shape[0]):
        CT1 = cwt(data[j,:],signal.morlet,n.arange(1,3,1))
        CT1 = n.ma.masked_where(CT1==0,CT1)
        CT1mean[j,:] = n.mean(n.abs(CT1),0)
    X1 = n.zeros((sh[0]*sh[1],6))
    X1[:,0] = (n.real(data)).reshape(sh[0]*sh[1])
    X1[:,1] = (n.imag(data)).reshape(sh[0]*sh[1])
    #X1[:,2] = n.abs(CW1mean).reshape(sh[0]*sh[1])
    X1[:,2] = n.log10(n.abs(CT1mean)).reshape(sh[0]*sh[1])
    X1[:,3] = (n.array([timesObs]*sh[1])).reshape(sh[0]*sh[1])
    X1[:,4] = (n.array([freqs1]*sh[0])).reshape(sh[0]*sh[1])
    X1[:,5] = (maskedLST[0:sh[0],:]).reshape(sh[0]*sh[1])
    X1 = n.nan_to_num(X1)
    for m in range(n.shape(X1)[1]):
        X1[:,m] = X1[:,m]/n.abs(X1[:,m]).max()
    X1[n.abs(X1)>10**100] = 0
#    X1 = normalize(X1,norm='l2',axis=1)
    X1 = n.nan_to_num(X1)
    return X1
Exemple #4
0
def continuous_wavelet(series, freqs=None, bandwidth=4.5, phase=False, **kwargs):
    """
    Construct a continuous wavelet transform for the data series.
    Extra pars are parameters for the Morlet wavelet.
    Returns a tuple (time-frequency matrix, frequencies, times)
    If phase=True, returns the phase, else returns the amplitude
    """
    if freqs is None:
        # define some default LFP frequencies of interest
        freqlist = [np.arange(1, 13), np.arange(15, 30, 3), np.arange(35, 100, 5)]
        freqs = np.concatenate(freqlist)

    dt = series.index[1] - series.index[0]
    wav = _make_morlet(bandwidth)
    scales = bandwidth / (2 * np.pi * freqs * dt)
    rwavelet = lambda N, b: np.real(wav(N, b))
    iwavelet = lambda N, b: np.imag(wav(N, b))
    tfr = ssig.cwt(series.values, rwavelet, scales)
    tfi = ssig.cwt(series.values, iwavelet, scales)

    tf = tfr ** 2 + tfi ** 2
    if phase:
        # return tf rescaled to unit circle
        tf = (tfr + 1j * tfi) / tf

    return pd.DataFrame(tf.T, columns=freqs, index=series.index)
def wavelet_feature(im, widths=[1, 4, 16]):
    """Feature extraction using wavelet transform
		 
		 Reference:
			 Scipy documentation for wavelet transformation:
			 https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.cwt.html
		 
		Parameters:
			im : an input grayscale or rgb image
			widths : widths of the wavelet transformation
			
		Returns:
			cwt_feature: Wavelet Transformed data
		
	"""

    # convert rgb to grayscale if needed
    if im.ndim == 3:
        image = rgb2gray(im)
    else:
        image = np.at_least_2d(im)

    # apply the wavelet transformation vertically and horizontally
    cwt_hor = [0] * im.shape[0] * len(widths)
    for i in range(im.shape[0]):
        cwt_hor += signal.cwt(image[i, :], signal.ricker, widths).ravel()

    cwt_ver = [0] * im.shape[1] * len(widths)
    for i in range(im.shape[1]):
        cwt_ver += signal.cwt(image[:, i], signal.ricker, widths).ravel()

    cwt_feature = np.concatenate((cwt_hor, cwt_ver))

    return cwt_feature
Exemple #6
0
def acwt(sig, widths):
    """ Reconstruct an analytic signal using CWT"""
    magical_constant = 16.854880972  # this doesn't actually scale properly
    # Somehow, I'm still seing dependence on the number of widths.
    cwt_r = signal.cwt(sig, signal.ricker, widths)
    cwt_i = signal.cwt(sig, CWTlets.ricker_i, widths)
    a_sig = lazy_icwt(cwt_r, widths) + 1j * lazy_icwt(cwt_i, widths)
    return a_sig  #* magical_constant
Exemple #7
0
    def doBaseCalling(
        self, params=(1.61, 0.1, 6, 1.38,
                      12)):  # TODO: better default parameter estimation
        """
            Perform a base calling. (Only do that after cutoutAuto, baseline, skyline and noiseCorrection have
            been applied.)

            @param params Basecalling parameters.
        """
        # get the "maximal" trace
        maxTrace = []
        for s in self['Z']:
            maxVal = max(s)
            maxTrace.append(maxVal)
        # ok, in the maximal trace, search for local minimas
        # this allows us to reduce the problem, since between
        # each consicutive two local minima there's a local maxima
        # continuous wavelet transformation of this curve
        mCwt = signal.cwt(maxTrace, signal.ricker, [params[0]])[0]
        # calculate the continuous wavelet transformation
        # for each trace (for later)
        for key in self.getNucs():
            cwt = signal.cwt(self[key], signal.ricker, [params[1]])[0]
            setattr(self, "CWT_%s" % key, cwt)
        # Search for local minimas in the transformation
        minimas = signal.argrelextrema(mCwt, np.less)[0]
        # assure that the end point of trace is in
        if minimas[-1] < self.length - 2:
            minimas = np.append(minimas, self.length - 1)
        # some helper functions
        def annotate(traceP, cwtP, params):
            x = abs(max(0, max(cwtP)) - max(cwtP[0], cwtP[-1]))
            # prevent overflow
            if (-params[3] * (x - params[4])) < -100: return 1.0
            if (-params[3] * (x - params[4])) > 100: return 0
            # calculate precisely
            return 1 / (1 + np.e**(-params[3] * (x - params[4])))

        # Now window between two consicutive minimas
        startMin, lst = 0, []
        for minima in minimas:
            # convinience renaming
            start, stop = startMin, minima
            # check if enough data
            if stop - start <= params[2]:
                lst.append((start, stop, None, None, None, None))
            else:
                peakData = []
                for key in self.getNucs():
                    peakData.append(
                        annotate(self[key][start:stop + 1],
                                 getattr(self, 'CWT_%s' % key)[start:stop + 1],
                                 params))
                lst.append((start, stop, peakData[0], peakData[1], peakData[2],
                            peakData[3]))
            startMin = minima
        # return the matrix
        return lst
Exemple #8
0
    def doBaseCalling(self, params=(1.61, 0.1, 6, 1.38, 12)): # TODO: better default parameter estimation
        """
            Perform a base calling. (Only do that after cutoutAuto, baseline, skyline and noiseCorrection have
            been applied.)

            @param params Basecalling parameters.
        """
        # get the "maximal" trace
        maxTrace = []
        for s in self['Z']:
            maxVal = max(s)
            maxTrace.append(maxVal)
        # ok, in the maximal trace, search for local minimas
        # this allows us to reduce the problem, since between
        # each consicutive two local minima there's a local maxima
        # continuous wavelet transformation of this curve
        mCwt = signal.cwt(maxTrace, signal.ricker, [params[0]])[0]
        # calculate the continuous wavelet transformation
        # for each trace (for later)
        for key in self.getNucs():
            cwt = signal.cwt(self[key], signal.ricker, [params[1]])[0]
            setattr(self, "CWT_%s" % key, cwt)
        # Search for local minimas in the transformation
        minimas = signal.argrelextrema(mCwt, np.less)[0]
        # assure that the end point of trace is in
        if minimas[-1] < self.length - 2:
            minimas = np.append(minimas, self.length - 1)
        # some helper functions
        def annotate(traceP, cwtP, params):
            x = abs(max(0, max(cwtP)) - max(cwtP[0], cwtP[-1]))
            # prevent overflow
            if (-params[3]*(x-params[4])) < -100: return 1.0
            if (-params[3]*(x-params[4])) > 100: return 0
            # calculate precisely
            return 1 / (1 + np.e ** (-params[3]*(x-params[4])))
        # Now window between two consicutive minimas
        startMin, lst = 0, []
        for minima in minimas:
            # convinience renaming
            start, stop = startMin, minima
            # check if enough data
            if stop - start <= params[2]:
                lst.append((start, stop, None, None, None, None))
            else:
                peakData = []
                for key in self.getNucs():
                    peakData.append(
                        annotate(self[key][start:stop+1], getattr(self, 'CWT_%s' % key)[start:stop+1], params)
                    )
                lst.append((start, stop, peakData[0], peakData[1], peakData[2], peakData[3]))
            startMin = minima
        # return the matrix
        return lst
Exemple #9
0
    def get_wavelets(self, arid, logarithmic=True, log_after=True):
        waveforms = self.get_waveforms(arid)
        wavelets = [None, None, None]
        for i, waveform in enumerate(waveforms):
            if waveform is None:
                continue
            if logarithmic and not log_after:
                for j, value in enumerate(waveform):
                    if value >= 0:
                        value = math.log10(value + 1)
                    else:
                        value = -math.log10(abs(value) + 1)
                    waveform[j] = value
            widths = np.arange(1, 41)
            wavelet = signal.cwt(waveform, signal.ricker, widths)
            if logarithmic and log_after:
                wavelet_row, wavelet_columns = wavelet.shape
                for k in range(wavelet_row):
                    for l in range(wavelet_columns):
                        if wavelet[k][l] >= 0:
                            wavelet[k][l] = math.log10(wavelet[k][l] + 1)
                        else:
                            wavelet[k][l] = -math.log10(abs(wavelet[k][l]) + 1)
            wavelets[i] = wavelet

        return wavelets
Exemple #10
0
def cwt_scipy():
    st = obspy.read()[0].data

    t = np.linspace(-1, 1, 200, endpoint=False)
    sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)

    print(np.shape(st), st.dtype)
    print(np.shape(sig), sig.dtype)
    widths = np.arange(1, 100)
    cwtmatr = abs(signal.cwt(st, signal.morlet2, widths))

    # t = np.linspace(0, dt * npts, npts)
    # x, y = np.meshgrid(t, np.logspace(np.log10(1), np.log10(50), cwtmatr.shape[0]))

    # print(np.shape(y))

    # plt.pcolormesh(x, y, np.abs(cwtmatr), cmap=obspy_sequential)

    # plt.yscale('log')
    plt.imshow(cwtmatr, aspect='auto')

    # plt.imshow(cwtmatr, extent=[-1, 1, 1, 30], cmap='PRGn', aspect='auto',
    #            vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
    plt.savefig('cwt_scipy.png')
    plt.close()
Exemple #11
0
def morlet_wavelet_transform(X,fs=250,freq_range=(1,15),freq_bins=100,w=5):
    ''' 
    Discrete continous wavelet transform of eeg data convolved with complex morlet wavelet
    INPUTS:
    X - EEG data (num_trials, num_eeg_electrodes, time_bins,1)
    fs - sampling rate in Hz
    freq_range - tuple containing min and max freq range to perform analysis within
    freq_bins - number of points between freq range being analyzed 
    w - Omega0 for complex morlet wavelet
    OUTPUTS: 
    X_cwt - Wavlet transformed eeg data (num_trials, num_eeg_electrodes,freq_bins,time_bins)
    '''
    
    N_trials,N_eegs,time_bins,_ = X.shape
    
    # values for cwt
    freq = np.linspace(freq_range[0],freq_range[1],freq_bins)
    widths = w * fs / (2 * freq * np.pi) 
    X_cwt = np.zeros((N_trials,N_eegs,widths.shape[0],time_bins))
    
    print('Performing discrete CWT convolutions...')
    for trial in tqdm_notebook(range(N_trials), desc='Trials'):
        for eeg in tqdm_notebook(range(N_eegs), desc='EEG Channel', leave=False): 
            X_cwt[trial,eeg,:,:] = np.abs(signal.cwt(np.squeeze(X[trial,eeg,:,]),signal.morlet2,widths,w=w))

    return X_cwt
Exemple #12
0
def cwt_coefficients(x, param):
    """
    :param x: the time series to calculate the feature of
    :type x: numpy.ndarray
    :param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
    :type param: list
    :return: the different feature values
    :return type: pandas.Series
    """

    calculated_cwt = {}
    res = []
    indices = []

    for parameter_combination in param:
        widths = parameter_combination["widths"]
        w = parameter_combination["w"]
        coeff = parameter_combination["coeff"]

        if widths not in calculated_cwt:
            calculated_cwt[widths] = cwt(x, ricker, widths)

        calculated_cwt_for_widths = calculated_cwt[widths]

        indices += ["widths_{}__coeff_{}__w_{}".format(widths, coeff, w)]

        i = widths.index(w)
        if calculated_cwt_for_widths.shape[1] <= coeff:
            res += [np.NaN]
        else:
            res += [calculated_cwt_for_widths[i, coeff]]

    return res
Exemple #13
0
 def segmentize(cls, x):
     """Segmentizes the signal.
     
     Arguments:
         x       Input signal.
     """
     # changes using CWT
     coefs = signal.cwt(x, signal.ricker, [cls.cwtCoef])[0]
     extrems = signal.argrelextrema(coefs, np.greater, order=cls.edgeOrder)[0]
     try:
         if extrems[0] != 0:
             extrems = np.concatenate(([0],extrems))
     except IndexError:
         extrems = np.concatenate(([0],extrems))
     try:
         if extrems[-1] != len(x) - 1:
             extrems = np.concatenate((extrems,[len(x) - 1]))
     except IndexError:
         extrems = np.concatenate((extrems,[len(x) - 1]))
     # segment borders
     segmentBorders = np.array([ (extrems[i-1],extrems[i]) for i in range(1,len(extrems)) ])
     # segments
     segments = np.array([ x[b[0]:b[1]] for b in segmentBorders])
     segmentList = []
     lensum = 0
     for segment in segments:
         segmentInstance = cls(segment, lensum)
         segmentList.append(segmentInstance)
         lensum += segmentInstance.len()
     return segmentList
Exemple #14
0
    def peakIdentifing(self):
        """
            Identify peaks in the chromatogram for the different traces.
            Do cutout first!

            @return An dictionary ACTG with the corresponding peak position.
        """
        try: return self.peaks
        except: pass
        result = {}
        for key in self.getNucs():
            #mCwt = signal.cwt(self[key], signal.ricker, [0.1])[0]
            #maximas = signal.argrelextrema(mCwt, np.greater)[0]
            #result[key] = signal.argrelextrema(self[key], np.greater)[0] #maximas #[(x, self[key][x], mCwt[x]) for x in maximas]
            maximas = set([(val, "X") for val in findMaximas(self[key])])
            cwtTrans = signal.cwt(self[key], signal.ricker, np.arange(1, 20)) # todo width parameter estimation
            for cwtTran in cwtTrans:
                for maxima in findMaximas(cwtTran):
                    maximas.add((maxima, "O"))
            # filter maximas
            maximas = filterPeaks(self[key], sorted(list(maximas), key=lambda x : x[0]))
            # save maximas
            result[key] = maximas
        self.peaks = result
        return result
Exemple #15
0
def QRS_detection(dat, widths):
    cwtmatr = signal.cwt(dat, signal.ricker, widths)
    difference_ecg = scipy.fftpack.diff(cwtmatr[3], order=1, period=1000)
    hilbert_ecg = scipy.fftpack.hilbert(difference_ecg)
    V = hilbert_ecg**2 + difference_ecg**2
    V = V / V.max()
    return V
def testWavelet():
    fig = plt.figure()
    ax = fig.gca(projection='3d')
    #Time scale
    X = np.arange(0,data_ctr*xbee_ave[1], xbee_ave[1])
    print "X: "
    print X
    #Frequency scale
    Y = np.arange(1, data_ctr, 1)
    widths = Y
    print "Y"
    print Y
    X, Y = np.meshgrid(X, Y)
    data = np.random.rand(20) - 0.5
    wavelet = signal.ricker
    #widths = np.arange(1,data_ctr + 1)
    cwtmatr = signal.cwt(data_in[1,0:data_ctr],wavelet,widths)
    print "cwtr.shape"
    print cwtmatr.shape
    print "cwtr matrix"
    print cwtmatr
    ax.plot_surface(X,Y,cwtmatr, rstride=1,cmap=cm.spectral, cstride=1,linewidth=0, antialiased=False)
    plt.xlabel('Time(ms)')
    plt.ylabel('Scale(1/f)')
    #
    #plt.zlabel('Amplitude')
    plt.show()
    return
def MyCWT(data):
    #--原始数据初始化--
    # x = np.arange(len(data))
    # y = data

    # Fs = 500000 #采样频率:500 000 Hz
    #             #采样周期:2 us
    # #--尺度计算--
    # wavename = 'gaus1'
    # totalscal = 64;    #尺度序列的长度

    # #Fc = 2000;          #小波中心频率(Hz)(“主波峰之间的差值”=2000Hz)
    #                     #Fc = pywt.central_frequency(wavename, precision=8)
    # Fc = pywt.central_frequency(wavename)

    # C = 2*Fc*totalscal; # C 为常数,C = 2*Fc/totalscal
    # scal= C/np.arange(1,totalscal+1);   #尺度序列,范围(2*Fc,inf)

    # #--连续小波变换--
    # # coef,freqs = pywt.cwt(y,scal,wavename)

    # # return coef ,freqs
    # data = map(float,data)
    # data1  = map(lambda x: float(x),data)
    data1 = np.ones(len(data), np.float)
    for i in range(0, len(data)):
        data1[i] = float(data[i])

    widths = np.arange(1, 31)

    cwtmatr = signal.cwt(data1, signal.ricker, widths)
    return cwtmatr
    def process(self, inData):

        progStep = 100.0 / len(inData)
        prog = 0
        outData = []
        chi = 0

        ##        winName = (self.winTypeChooser.currentText())
        ##        if winName in self.specWindows:
        ##            winParam = self.specWindows[winName][1].value()
        ##            window = (winName, winParam)
        ##        else:
        ##            window = winName
        ##
        ##        maxFreq = self.maxFreqEdit.value()

        widths = np.arange(self.minScaleEdit.value(),
                           self.maxScaleEdit.value() + 1)
        wavelet = signal.ricker
        ##        wavelet = signal.morlet
        for chData in inData:
            cwtmatr = signal.cwt(chData, wavelet, widths)
            ##            print(type(cwtmatr))
            ##            print(np.shape(cwtmatr))
            cwtmatr = abs(cwtmatr)
            outData.append(cwtmatr)
            prog = prog + progStep
            self.progress.emit(int(prog))
            chi += 1

        return outData
Exemple #19
0
 def scalo(self,
           window,
           freqs,
           start,
           end,
           step=100,
           lib='pywt'):  #window in sec,freqs in Hz, step in ms
     div = 1
     X = self.signal[start:end, :]
     if self.downsample:
         div = 10
     window_len = int(((window * 1000 // step) + 2) * step // div)
     scalo = np.empty((X.shape[0] - window_len, X.shape[1], freqs.shape[0],
                       (window * 1000 // step) + 2))
     for i in range(X.shape[1]):
         for j in range(window_len, X.shape[0]):
             if lib == 'scipy':
                 scalo[j - window_len, i, :, :] = signal.cwt(
                     data=X[j - window_len:j, i],
                     wavelet=abs_morlet,
                     widths=freqs)[:, ::step // div]**2
             if lib == 'pywt':
                 #print(type(pywt.cwt(data = X[j-window_len:j,i],wavelet='morl',scales = freqs)[0]))
                 scalo[j - window_len, i, :, :] = pywt.cwt(
                     data=X[j - window_len:j, i],
                     wavelet='morl',
                     scales=freqs)[0][:, ::step // div]**2
     return scalo, self.motion[start +
                               window_len:end, :], self.time[start +
                                                             window_len:end]
Exemple #20
0
    def peakIdentifing(self):
        """
            Identify peaks in the chromatogram for the different traces.
            Do cutout first!

            @return An dictionary ACTG with the corresponding peak position.
        """
        try:
            return self.peaks
        except:
            pass
        result = {}
        for key in self.getNucs():
            #mCwt = signal.cwt(self[key], signal.ricker, [0.1])[0]
            #maximas = signal.argrelextrema(mCwt, np.greater)[0]
            #result[key] = signal.argrelextrema(self[key], np.greater)[0] #maximas #[(x, self[key][x], mCwt[x]) for x in maximas]
            maximas = set([(val, "X") for val in findMaximas(self[key])])
            cwtTrans = signal.cwt(self[key], signal.ricker, np.arange(
                1, 20))  # todo width parameter estimation
            for cwtTran in cwtTrans:
                for maxima in findMaximas(cwtTran):
                    maximas.add((maxima, "O"))
            # filter maximas
            maximas = filterPeaks(self[key],
                                  sorted(list(maximas), key=lambda x: x[0]))
            # save maximas
            result[key] = maximas
        self.peaks = result
        return result
def compute_whole_sgy_file():
    seismic_sgy = ""
    with segyio.open(seismic_sgy, 'r') as f:
        trace = f.trace[500]

    widths = numpy.arange(1, 11, 0.1)
    Wf = []
    for w in widths:
        w = ricker(50, w)
        F = fft.fftshift(fft.fftfreq(256))
        W = fft.fftshift(fft.fft(numpy.real(w), 256))
        Wf.append(numpy.amax(numpy.abs(W)))

    p_trace = numpy.zeros(250)
    p_trace[0:len(trace)] = trace
    # cwt_tf = cwt(trace, ricker, numpy.arange(1, 11, 0.1))

    volume = segyio.tools.cube(seismic_sgy)
    cwt_cube = numpy.zeros((*volume.shape, len(Wf)))
    shape = cwt_cube.shape

    for i in range(0, shape[0]):
        for x in range(0, shape[1]):
            trace = numpy.squeeze(volume[i, x, :])
            c = cwt(trace, ricker, widths)
            cwt_cube[i, x, :, :] = numpy.fliplr(c).T

    return cwt_cube, volume
Exemple #22
0
def PlotWavelet(t,y,sigmas,clk=None,wavelet=None,T_Int=45.0,Offset=1.0,NewFigure=True):
    from scipy import signal
    import matplotlib.pyplot as plt
    from bisect import bisect
    import numpy as np
    h = t[1]-t[0]
    slist = sigmas/h
    if not wavelet:
        wavelet = signal.ricker
    dwt = signal.cwt(y,wavelet,slist)
    if clk is None:
        tlist = np.append(t,t[-1]+h)
        dwt = np.abs(dwt)
    else:
        tlist = clk + T_Int / 2.0 + Offset 
        tids = [ bisect(t, tr) for tr in tlist ]
        dwt = np.abs(np.array([ dwt[:,i] for i in tids ])).T
        tlist = np.append(tlist,tlist[-1]+tlist[1]-tlist[0])
    sigmas = np.append(sigmas,sigmas[-1]+sigmas[1]-sigmas[0])
    if NewFigure:
        plt.figure()
    plt.pcolormesh(tlist,sigmas,dwt,cmap=plt.get_cmap('hot'))
    if clk is None:
        plt.xlim(tlist[0],tlist[-1])
    else:
        plt.xlim(clk[0],clk[-1])
    plt.ylim(sigmas[0],sigmas[-1])
    #plt.xlabel("Time / ms")
    #plt.ylabel("Sigma / ms")
    if NewFigure:
        plt.colorbar()
        plt.show()
Exemple #23
0
def encode_dataset(dataset, signal_type, pooling_function):
    """
        The function returns a 3D matrix.
        The new 3D matrix contains several 2D matrices, which correspond to the time series encodings.
        The order of the objects does not change, which means for example that the 23rd slice of the 
        input dataset corresponds to the 23rd matrix in the 3D Matrix.
        
        The images in this case are discrete scalograms. Output imgae shape is fixed at 64x64 """

    factor = 8
    smoothness_factor = 4
    widths = np.linspace(1, 64, 64)
    widths = 2**(widths / smoothness_factor)
    n = np.shape(dataset)[0]
    X_sc = np.zeros((n, 64, 64))

    for i in range(0, n):
        cwtmatr = signal.cwt(dataset[i, :], signal_type, widths)
        X_sc[i] = block_reduce(cwtmatr,
                               block_size=(1, factor),
                               func=pooling_function)

    print('Encoding successful!')
    print('#####################################')

    return X_sc
Exemple #24
0
def make_series(n_parcels, n_samples, n_cut_samples=40, widths=range(5, 6)):
    """Function for generating oscillating parcel signals.
    
    Input arguments:
    ================
    n_parcels : int
        Number of source-space parcels or labels.
    n_samples : int
        Length of the generated time-series in number of samples.
    n_cut_samples : int
        Number of temporary extra samples at each end of the signal
        for handling edge artefacts.
    widths : ndarray
        Widths to use for the wavelet transform.
        
    Output arguments:
    =================
    s : ndarray
        Simulated oscillating parcel time-series.
    """
    decim_factor = 5
    s = randn(n_parcels, n_samples * decim_factor + 2 * n_cut_samples)

    for i in np.arange(0, n_parcels):
        s[i, :] = signal.cwt(s[i, :], signal.ricker, widths)

    s = signal.hilbert(s)
    s = s[:, n_cut_samples:-n_cut_samples]
    s = scipy.signal.decimate(s, decim_factor, axis=1)

    return s
Exemple #25
0
def _spectrogram_scipy_wavelet(data,
                               fs,
                               nt,
                               nch,
                               fmin=None,
                               wave='morlet',
                               warn=True):

    if wave != 'morlet':
        msg = "Only the morlet wavelet implmented so far !"
        raise Exception(msg)

    # Check inputs
    if fmin is None:
        fmin = _fmin_coef * (fs / nt)
        if warn:
            msg = "fmin was not provided => set to 10.*fs/nt"
            warnings.warn(msg)

    nw = int((1. / fmin - 2. / fs) * fs)
    widths = 2. * np.pi * np.linspace(fmin, fs / 2., nw)
    wave = eval('scpsig.%s' % wave)

    for ii in range(0, nch):
        cwt = scpsig.cwt(data[:, ii], wave, widths)
        lcwt.append(np.abs(cwt)**2)

    f = widths / (2. * np.pi)
    return f, lcwt
Exemple #26
0
 def doSkylineCorrection(trace):
     # general
     tl = len(trace)
     # skyline estimation
     cwtTrace = signal.cwt(trace, signal.ricker, [0.1])[0]
     maximas = signal.argrelextrema(cwtTrace, np.greater)[0]
     maxVals = [(maxima, cwtTrace[maxima])
                for maxima in maximas]  # this is for maxima filtering
     maxVals = filter(
         lambda x: x[1] > treshold,
         maxVals)  # TODO: check if there's a better treshold
     # save, where the absolute max. can be found
     absMaxValPos = max(maxVals, key=lambda x: x[1])[0]
     # create the maxima arrays
     maximas = np.array([x[0] for x in maxVals])
     maxVals = np.array([x[1] for x in maxVals])
     # expected peak height ~ exp. decay ratio
     pSkyF = lambda x, a, b, m: a * e**(np.sign(x - m) * -b * (x - m))
     (a, b, m), pConv = curve_fit(pSkyF,
                                  maximas,
                                  maxVals,
                                  p0=(max(maxVals), 10E-7,
                                      absMaxValPos))
     skyline = np.array([pSkyF(x, a, b, m) for x in range(tl)])
     skyline = np.array([max(1, skyline[x]) for x in range(tl)])
     # normalize
     trace = np.array([trace[x] / skyline[x] * 100 for x in range(tl)])
     # return
     return trace
Exemple #27
0
def extractwaveletcoef(infile, outfile):
    """
    This will extract wavelet coefficients from the specified file.
    Uses ricker wavelet, may not be the best choice...
    :param infile: location of csv formatted file
    :param outfile: destination for a file, will be formatted as mat file
    """
    csvin = np.loadtxt(infile, delimiter=',').astype(np.float32).T
    numscales = len(SCALES)

    outfile = Path(outfile)
    if outfile.exists():
        print('Warning: Overwriting output file', outfile)

    numrows = csvin.shape[0]
    if numrows != NUM_OF_CHANNELS:
        print('WARNING: in file:', infile)
        print(numrows, '!=151 MEG channels detected in file')

    outmat = np.empty((*csvin.shape, numscales), dtype=np.float32)

    # print('Extracting wavelet coefficients...')
    for row in range(numrows):
        outmat[row, :, :] = cwt(csvin[row, :], ricker, SCALES).T

    print('Saving to: ', outfile, '...')
    if outfile.suffix == 'mat':
        savemat(str(outfile), {MAT_VAR_NAME: outmat}, do_compression=True)
    elif outfile.suffix == 'npy':
        np.save(str(outfile), outmat)
    elif outfile.suffix == '':
        np.save(str(outfile.with_suffix('npy')), outmat)
    else:
        raise NameError('Unknown suffix for outfile: "{0}", will not proceed'.format(outfile.suffix))
    print('Saved.')
Exemple #28
0
    def get_single_external_state(self, key):
        # Use Hi-Low median as signal:
        x = (np.frombuffer(self.data_streams[key].high.get(size=self.time_dim))
             + np.frombuffer(
                 self.data_streams[key].low.get(size=self.time_dim))) / 2

        # Differences along time dimension:
        d_x = np.gradient(x, axis=0) * self.p.cwt_signal_scale

        # Compute continuous wavelet transform using Ricker wavelet:
        cwt_x = signal.cwt(d_x, signal.ricker, self.cwt_width).T

        norm_x = cwt_x

        # Note: differences taken once again along channels axis,
        # apply weighted scaling to normalize channels
        # norm_x = np.gradient(cwt_x, axis=-1)
        # norm_x = zscore(norm_x, axis=0) * self.p.state_ext_scale
        norm_x *= self.p.state_ext_scale[key]

        out_x = tanh(norm_x)

        # out_x = np.clip(norm_x, -10, 10)

        # return out_x[:, None, :]
        return out_x[:, None, :]
def waveletFilter(a):
    a = sitk.GetArrayFromImage(a)
    a.astype(float)
    w = np.arange(1, 31)
    a = signal.cwt(a, signal.ricker, w)
    a.astype(int)
    return sitk.GetImageFromArray(a)
Exemple #30
0
def scalogram_example_2():
    # Generate a test signal, a 2 Vrms sine wave whose frequency is slowly modulated around 3kHz, corrupted by white noise of exponentially decreasing magnitude sampled at 10 kHz.
    fs = 10e3
    N = 1e5

    amp = 2 * np.sqrt(2)
    noise_power = 0.01 * fs / 2
    time = np.arange(N) / float(fs)
    mod = 500 * np.cos(2 * np.pi * 0.25 * time)
    carrier = amp * np.sin(2 * np.pi * 3e3 * time + mod)
    noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
    noise *= np.exp(-time / 5)
    x = carrier + noise
    # TODO [fix] >> How to determine?
    scale = np.arange(1, 101)

    # Compute the CWT.
    cwtmatr = signal.cwt(x, signal.ricker, scale)

    # Scalogram: a spectrogram for wavelets. (???)
    plt.pcolormesh(time, scale, 20 * np.log10(np.abs(cwtmatr)))
    plt.show()

    # Plot the CWT.
    plt.imshow(cwtmatr,
               extent=[0, 10, 30, -30],
               cmap='PRGn',
               aspect='auto',
               vmax=abs(cwtmatr).max(),
               vmin=-abs(cwtmatr).max())
    plt.show()
Exemple #31
0
def pEW(wavelength,
        flux,
        cont,
        cont_coords,
        err_method='default',
        model=None,
        flux_err=np.array([np.nan])):
    '''
    calculates the pEW between two chosen points
    cont should be the return of a call to <pseudo_continuum>
    '''

    # calculate pEW
    pEW_val = _pEW(wavelength, flux / cont(wavelength), cont_coords)

    # calculate pEW uncertainty
    if (err_method == 'sample') and (model is not None):
        sim_pEWs = []
        for sample in model.posterior_samples_f(wavelength[:, np.newaxis],
                                                100).squeeze().T:
            sim_pEWs.append(
                _pEW(wavelength, sample / cont(wavelength), cont_coords))
        pEW_err = np.std(sim_pEWs)
        return pEW_val, pEW_err
    elif (err_method == 'data') and (~np.isnan(flux_err).all()):
        pEW_err_sq = 0
        for i in range(len(wavelength)):
            if (wavelength[i] > cont_coords[0, 0]) and (wavelength[i] <
                                                        cont_coords[0, 1]):
                dwave = 0.5 * (wavelength[i + 1] - wavelength[i - 1])
                pEW_err_sq += (dwave**
                               2) * (flux_err[i] / cont(wavelength[i]))**2
        return pEW_val, np.sqrt(pEW_err_sq)
    elif (err_method == 'data') and (np.isnan(flux_err).any()):
        warnings.warn(
            'NaN in flux err, computing pEW error using default method instead of from data'
        )

    if err_method != 'LEGACY':
        flux_err = np.sqrt(np.mean(signal.cwt(flux, signal.ricker, [1])**2))
    else:
        flux_err = np.abs(signal.cwt(flux, signal.ricker, [1])).mean()
    pEW_stat_err = flux_err
    pEW_cont_err = np.abs(cont_coords[0, 0] - cont_coords[0, 1]) * flux_err
    pEW_err = math.hypot(pEW_stat_err, pEW_cont_err)

    return pEW_val, pEW_err
Exemple #32
0
    def cw_ssim_value(self, target, width=30):
        """Compute the complex wavelet SSIM (CW-SSIM) value from the reference
        image to the target image.

        Args:
          target (str or np.array): Input image to compare the reference image
          to. This may be a numpy array object or, to save time, an SSIMImage
          object (e.g. the img member of another SSIM object).
          width: width for the wavelet convolution (default: 30)

        Returns:
          Computed CW-SSIM float value.
        """
        if not isinstance(target, SSIMImage):
            target = SSIMImage(target, size=self.img.size)

        # Define a width for the wavelet convolution
        widths = np.arange(1, width + 1)

        # Use the image data as arrays
        sig1 = np.reshape(self.img.img_gray,
                          (self.img.size[0] * self.img.size[1], ))
        sig2 = np.reshape(target.img_gray,
                          (self.img.size[0] * self.img.size[1], ))

        # Convolution
        cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
        cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)

        # Compute the first term
        c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
        c1_2 = np.square(abs(cwtmatr1))
        c2_2 = np.square(abs(cwtmatr2))
        num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k
        den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k

        # Compute the second term
        c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
        num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k
        den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k

        # Construct the result
        ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)

        # Average the per pixel results
        index = np.average(ssim_map)
        return index
Exemple #33
0
def wavelet():
    t = np.linspace(-1, 1, 200, endpoint=False)
    sig  = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
    widths = np.arange(1, 31)
    cwtmatr = signal.cwt(sig, signal.ricker, widths)
    plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
    plt.show()
    return
Exemple #34
0
def cwt_lowpass(x, fps, cf):
    num_wavelets = int(fps / cf)
    widths = np.arange(1, num_wavelets)
    cwt_mat = signal.cwt(x, signal.ricker, widths)
    c = np.mean(cwt_mat, axis=0)
    # renormalize
    orig_e, c_e = np.sqrt(np.sum(x * x)), np.sqrt(np.sum(c * c))
    return c * (orig_e / c_e)
Exemple #35
0
    def test_cwt_complex(self, rand_complex_data_gen, num_samps, widths):
        cpu_signal, gpu_signal = rand_complex_data_gen(num_samps)

        cpu_cwt = signal.cwt(cpu_signal, signal.ricker, np.arange(1, widths))
        gpu_cwt = cp.asnumpy(
            cusignal.cwt(gpu_signal, cusignal.ricker, cp.arange(1, widths)))

        assert array_equal(cpu_cwt, gpu_cwt)
def cwt_coefficients(x, c, param):
    """
    Calculates a Continuous wavelet transform for the Ricker wavelet, also known as the "Mexican hat wavelet" which is
    defined by

    .. math::
        \\frac{2}{\\sqrt{3a} \\pi^{\\frac{1}{4}}} (1 - \\frac{x^2}{a^2}) exp(-\\frac{x^2}{2a^2})

    where :math:`a` is the width parameter of the wavelet function.

    This feature calculator takes three different parameter: widths, coeff and w. The feature calculater takes all the
    different widths arrays and then calculates the cwt one time for each different width array. Then the values for the
    different coefficient for coeff and width w are returned. (For each dic in param one feature is returned)

    :param x: the time series to calculate the feature of
    :type x: pandas.Series
    :param c: the time series name
    :type c: str
    :param param: contains dictionaries {"widths":x, "coeff": y, "w": z} with x array of int and y,z int
    :type param: list
    :return: the different feature values
    :return type: pandas.Series
    """
    df_cfg = pd.DataFrame(param)
    res = pd.Series()

    for widths in df_cfg["widths"].unique():

        # the calculated_cwt will shape (len(widths), len(x)).
        calculated_cwt = cwt(x, ricker, widths)

        for w in df_cfg[df_cfg["widths"] == widths]["w"].unique():

            coeff = df_cfg[(df_cfg["widths"] == widths)
                           & (df_cfg["w"] == w)]["coeff"].unique()
            i = widths.index(w)

            if calculated_cwt.shape[1] < len(
                    coeff
            ):  # There are less data points than requested model coefficients
                red_coeff = coeff[:calculated_cwt.shape[1]]
                res_tmp = calculated_cwt[i, red_coeff]
                # Fill up the rest of the requested coefficients with np.NaNs
                res_tmp = np.append(
                    res_tmp,
                    np.array([np.NaN] * (len(coeff) - len(red_coeff))))
            else:
                res_tmp = calculated_cwt[i, coeff]

            res = res.append(
                pd.Series(
                    res_tmp,
                    index=[
                        "{}__cwt_coefficients__widths_{}__coeff_{}__w_{}".
                        format(c, widths, m, w) for m in coeff
                    ]))

    return res
Exemple #37
0
def cw_ssim_value(data, width):
        """Compute the complex wavelet SSIM (CW-SSIM) value from the reference
        image to the target image.
        Args:
          target (str or PIL.Image): Input image to compare the reference image
          to. This may be a PIL Image object or, to save time, an SSIMImage
          object (e.g. the img member of another SSIM object).
          width: width for the wavelet convolution (default: 30)
        Returns:
          Computed CW-SSIM float value.
        """

        # Define a width for the wavelet convolution
        widths = np.arange(1, width+1)

        for d in data:
        
            # Use the image data as arrays
            sig1 = np.asarray(data[0].ravel())
            sig2 = np.asarray(d.ravel())

            # Convolution
            cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
            cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)

            # Compute the first term
            c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
            c1_2 = np.square(abs(cwtmatr1))
            c2_2 = np.square(abs(cwtmatr2))
            num_ssim_1 = 2 * np.sum(c1c2, axis=0) + 0.01
            den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + 0.01

            # Compute the second term
            c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
            num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + 0.01
            den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + 0.01

            # Construct the result
            ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
            ssim_map = ssim_map.reshape(512,512)
            cw_ssim_maps.append(ssim_map)

            # Average the per pixel results
            index = round( np.average(ssim_map), 2) 
            cw_ssim_vals.append(index)
Exemple #38
0
    def cw_ssim_value(self, target, width=30):
        """Compute the complex wavelet SSIM (CW-SSIM) value from the reference
        image to the target image.

        Args:
          target (str or PIL.Image): Input image to compare the reference image
          to. This may be a PIL Image object or, to save time, an SSIMImage
          object (e.g. the img member of another SSIM object).
          width: width for the wavelet convolution (default: 30)

        Returns:
          Computed CW-SSIM float value.
        """
        if not isinstance(target, SSIMImage):
            target = SSIMImage(target, size=self.img.size)

        # Define a width for the wavelet convolution
        widths = np.arange(1, width+1)

        # Use the image data as arrays
        sig1 = np.asarray(self.img.img_gray.getdata())
        sig2 = np.asarray(target.img_gray.getdata())

        # Convolution
        cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
        cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)

        # Compute the first term
        c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
        c1_2 = np.square(abs(cwtmatr1))
        c2_2 = np.square(abs(cwtmatr2))
        num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k
        den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k

        # Compute the second term
        c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
        num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k
        den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k

        # Construct the result
        ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)

        # Average the per pixel results
        index = np.average(ssim_map)
        return index
def cwt(i):
  print 'processing worm %d' % i
  widths = np.array([2**ii for ii in range(17)]);
  fspeed = np.memmap('%s_speed.npy' % strain, dtype='float32', mode='r', shape = shape_speed);
  signal = fspeed[i,:];
  cwtm = sig.cwt(signal, sig.ricker, widths);  
  fwt = np.memmap('%s_wavelets_hierarchical.npy' % strain, dtype = 'float32', mode = 'r+', shape = wt_shape);
  fwt[i,:,:] = cwtm;
  del fwt;
def cw_ssim(reference, target, width):
    
        """Compute the complex wavelet SSIM (CW-SSIM) value from the reference
        image to the target image.
        Args:
          reference and target images
          width: width for the wavelet convolution (default: 30)
        Returns:
          Computed CW-SSIM float value and map.
        """

        # Define a width for the wavelet convolution
        widths = np.arange(1, width+1)
        
        # Use the image data as arrays
        sig1 = np.ravel(reference)
        sig2 = np.ravel(target)

        # Convolution
        cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)
        cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)

        # Compute the first term
        c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))
        c1_2 = np.square(abs(cwtmatr1))
        c2_2 = np.square(abs(cwtmatr2))
        num_ssim_1 = 2 * np.sum(c1c2, axis=0) + 0.01
        den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + 0.01

        # Compute the second term
        c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))
        num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + 0.01
        den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + 0.01

        # Construct the result
        cw_ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)
        cw_ssim_map = cw_ssim_map.reshape(reference.shape[0],
                                          reference.shape[1])

        # Average the per pixel results
        cw_ssim_index = round( np.average(cw_ssim_map), 3)
        
        return cw_ssim_index, cw_ssim_map
Exemple #41
0
def get_scalogram(data, **kwargs):
    if 'wavelet' in kwargs:
        wavelet = kwargs['wavelet']
    else:
        wavelet = signal.ricker()
    if 'levels' in kwargs:
        levels = kwargs['levels']
    else:
        levels = np.arange(1, 11)
    return signal.cwt(data, wavelet, levels)
def get_WT(y, scale):
    single_scale = not isinstance(scale, (list, tuple, np.ndarray))
    if single_scale:
        scale = [scale]
    wavelet = sig.ricker
    tt = time.time()
    wt = sig.cwt(y, wavelet, scale)
    if single_scale:
        wt = wt[0,:]
    logging.debug('WT took %.3f sec to calculate on %s scale (signal length %d)', (time.time() - tt), str(scale), len(y))
    return wt
Exemple #43
0
def chromToMatrix(chrom, params=(1.61, 0.1, 6, 1.38, 12)):
    """
        Transfer the chromatogram into a probability
        matrix.

        @param chrom The chromatogram to process.
        @param params Possible prediction parameters.
        @return A probability matrix that represents
        the nucleotid probability for the different
        positions in the chromatogram.
    """
    # get the "maximal" trace
    maxTrace = []
    for i in range(len(chrom["A"])):
        maxVal = 0
        for key in "ACTG":
            maxVal = max(maxVal, chrom[key][i])
        maxTrace.append(maxVal)
    chrom['M'] = maxTrace
    # ok, in the maximal trace, search for local minimas
    # this allows us to reduce the problem, since between
    # each consicutive two local minima there's a local maxima
    # continuous wavelet transformation of this curve
    mCwt = signal.cwt(chrom['M'], signal.ricker, [params[0]])[0]
    # calculate the continuous wavelet transformation
    # for each trace (for later)
    for key in "ACTG":
        chrom['CWT_' + key] = signal.cwt(chrom[key], signal.ricker, [params[1]])[0]
    # Search for local minimas in the transformation
    minimas = signal.argrelextrema(mCwt, np.less)[0]
    # Now window between two consicutive minimas
    # this will explicitly not handle the case between
    # the last minima and the trace end
    startMin, lst = 0, []
    for minima in minimas:
        peak = getPeakBetweenMinimas(chrom, startMin, minima, params)
        if peak != None: # data got accepted
            lst.append(peak)
        startMin = minima
    # return the matrix
    return lst
    def wavelet_transform(self, data):

        # TODO: perform discrete wavelet transform instead

        sig = numpy.copy(data)
        widths = numpy.arange(1, len(data))
        cwtmatr = signal.cwt(sig, signal.ricker, widths)

        self.next_plot()
        pyplot.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
                    vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())

        return cwtmatr
Exemple #45
0
 def preview(self):
     low=self.getValue('low')
     high=self.getValue('high')
     preview=self.getValue('preview')
     if self.roi is not None:
         if preview:
             trace=self.roi.getTrace()
             wavelet = signal.ricker
             widths = np.arange(low, high)
             cwtmatr = signal.cwt(trace, wavelet, widths)
             newtrace=np.mean(cwtmatr,0)
             roi_index=g.currentTrace.get_roi_index(self.roi)
             g.currentTrace.update_trace_full(roi_index,newtrace) #update_trace_partial may speed it up
         else:
             self.roi.redraw_trace()
def harmo_analysis(df, gas_tar, gas_ref, maxlags=1000):
    # TO be continued
    new_column_name= '%s/%s'%(gas_tar, gas_ref)
    plt.figure(figsize=(12,8))
    plt.subplot(211)                                     
    plot_acorr(df[new_column_name], maxlags, index=df.index)

    # ok now cwt
    from scipy import signal
    import numpy as np
    L=512
    widths = np.arange(1, L)
    cwtmatr = signal.cwt(df[new_column_name], signal.ricker, widths)
    plt.subplot(212)
    plt.imshow(abs(cwtmatr), extent=[-1, 1, 1, L], cmap='jet', aspect='auto',
                vmax=abs(cwtmatr).max(), vmin=0)
Exemple #47
0
 def __call__(self,low,high,keepSourceWindow=False):
     self.start(keepSourceWindow)
     if self.tif.ndim != 3:
         g.alert("Wavelet filter only works on 3 dimensional movies")
         return
     mx=self.tif.shape[2]
     my=self.tif.shape[1]
     self.newtif=np.zeros(self.tif.shape)
     wavelet = signal.ricker
     widths = np.arange(low, high)
     for i in np.arange(my):
         print(i)
         for j in np.arange(mx):
             cwtmatr = signal.cwt(self.tif[:, i, j], wavelet, widths)
             self.newtif[:, i, j]=np.mean(cwtmatr,0)
     self.newname=self.oldname+' - Wavelet Filtered'
     return self.end()
def getPeaks(waveNumbers,intensities):

    data = _spectra(waveNumbers,intensities)

    # Take the CWT of the spectra. Trim the result to remove padding.
    waveletCoeff = signal.cwt(intensities, signal.ricker, \
                                   np.linspace(lowerBound,upperBound,steps))

    # Flip the matrix so the highest wavelet coefficient is the top row
    waveletCoeff = np.flipud(waveletCoeff)

    # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines
    # takes a (scaleFactor,3) array of positions and values of maxima.
    ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)

    # Populate a structured array with peak information
    peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)

    return peakInfo
def peak_finder_pro(measurement):
    E0 = measurement.energy_cal[0]
    slope = measurement.energy_cal[1]
    energy_axis = measurement.channel
    energy_axis = energy_axis.astype(float)
    energy_axis[:] = [E0 + slope * x for x in range(len(measurement.channel))]

    """energy_spectra is the spectra that will be loaded and analyzed"""

    fwhm_list = []
    for i in energy_axis:
        fwhm = 0.05 * energy_axis[i] ** 0.5
        fwhm_list = fwhm_list.append(fwhm)

    counts = measurement.data

    peaks_found = []
    start = 0
    end = start + 50
    for energy in energy_axis:
        E_start = energy_axis[start]
        E_end = energy_axis[end]
        energy_range = range(E_start, E_end)
        count_total = 0
        for i in energy_range:
            count_total = count_total + counts[energy_range[i]]
        avg_range = count_total/len(energy_range)
        avg_ends = (counts[start] + counts[end]) / 2
        threshold = 1.1 * avg_ends
        if avg_range > threshold:
            energy_average = start + 25
            avg_fwhm = fwhm_list[energy_average]
            width_parameter = avg_fwhm * 3
            wavelet = signal.ricker(width_parameter, avg_fwhm)
            counts_range = range(counts[E_start], counts[E_end])
            wave_transform = signal.cwt(counts_range, wavelet, width_parameter)
            peak_finder = signal.find_peaks_cwt(wave_transform, counts_range)
            peaks_found.append(peak_finder)
            next_range = peak_finder + 0.5 * avg_fwhm
            start = next_range
        else:
            start += 1
        return peaks_found
 def test_wavelet(self):
     MOUSE = False
     # scales = np.array([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024])
     scales = np.array([4, 8, 12, 16, 20, 24, 32])
     # scales = np.array([48, 56, 64, 82, 100])
     if MOUSE:
         scales *= 44
         ecg = self.ecg_mouse()
     else:
         ecg = self.ecg()
     # x, y, sample_fq = self._periodic_signal()
     x, y, sample_fq = (ecg.getTiming(), ecg.getLowFreq(), ecg.getDataFrequency())
     # y = aline(y, sample_fq)
     tt = time.time()
     # cwt = sig.cwt(y, sig.ricker, np.arange(1, 100, 2))
     cwt = sig.cwt(y, sig.ricker, scales)
     logging.info('CWT took %.3f sec to calculate' % (time.time() - tt))
     # self._draw_cwt_as_img(y, cwt, scales)
     self._draw_cwt_as_plots(y, cwt, scales)
Exemple #51
0
def perform_cwt(sig, width_step=0.5, max_scale=None, wavelet=signal.ricker, epsilon=0.1, order=1, plot=False):
    """
    Perform the continuous wavelet transform against the incoming signal. This function will normalize the signal
    (to 0-1 in the y axis) for you, as well as taking the -1 * abs( log( ) ) of the matrix that is found. Literature
    suggests that len/4 is a good balance for finding the bifurcations vs execution time

    This will automatically create the maxima only mask of the wavelet coef matrix for you. To see the original, use
    plot=True
    :param sig: 1 dimensional array -- the signal to be hit with the wavelet
    :param width_step: what width step to use between the min and the max
    :param max_scale: the maximum scale to use. Default = len(sig)/4
    :param wavelet: what wavelet to use as the mother
    :param epsilon: how to score the maxima's intensity (e.g. intensity / epsilon )
    :param order: how many neighbors to look at when finding the local maxima
    :param plot: whether to plot the original CWT coefficient matrix as a heatmap
    :return: the mask, see above
    """
    if not max_scale:
        max_scale = len(sig) / 4
    widths = np.arange(1, max_scale, width_step)

    # normalize the signal to fit in the wavelet
    sig_max = sig.max()
    sig_min = sig.min()
    sig = (sig - (sig_min - 0.01)) / (sig_max - sig_min + 0.02)


    # Run the transform
    w_coefs = abs(-1 * log(abs(signal.cwt(sig, wavelet, widths))))

    # Create the mask, keeping only the maxima
    mask = create_w_coef_mask(w_coefs, epsilon=epsilon, order=order)

    if plot:
        plt.figure(figsize=(14, 10))
        plt.pcolormesh(w_coefs)
        plt.colorbar()
        ax = plt.gca()
        ax.set_ylim(ax.get_ylim()[::-1])
        ax.xaxis.tick_top()
        plt.show()

    return mask
Exemple #52
0
def MakePlot():
    Points = 5000
    MaxX = np.pi * 2
    XValues,dx = np.linspace(start=0,stop=MaxX,num=Points,retstep=True)
    num_steps = 3
    step = int(Points/num_steps)
    frequency_low = 1
    frequency_high = 10
    freqs = np.linspace( frequency_low, frequency_high,num=num_steps )
    Idx = [ slice(i*step,(i+1)*step,1) for i in range(step)]
    y_values = np.zeros(XValues.size)
    for f,idx_range in zip(freqs,Idx):
        x_tmp = XValues[idx_range]
        y_values[idx_range] = np.sin(2*np.pi*f*(x_tmp-x_tmp[0]))
    # Get the FFT of our function
    fft_coeffs = np.fft.rfft(y_values)
    fft_freq = np.fft.rfftfreq(n=y_values.size,d=dx)
    # get the CWT of our function 
    CoeffMax = frequency_high*5
    CoeffMin = 1/(frequency_low/5)
    NCoeffs = 50
    widths = np.linspace(CoeffMin,CoeffMax,NCoeffs)
    wavelet_signal = lambda n_points,width: \
            signal.morlet(M=n_points,w=width,complete=True,s=1.0)
    cwt_coeffs = signal.cwt(data=y_values,wavelet=signal.ricker,widths=widths)
    fudge = 0.5
    x_lim = np.array([0,max(XValues)])
    x_fudge = np.array([-fudge,fudge])
    plt.subplot(3,1,1)
    plt.plot(XValues,y_values)
    plt.xlim(x_lim+x_fudge)
    PlotUtilities.lazyLabel("Time","","")
    plt.subplot(3,1,2)
    plt.plot(fft_freq,fft_coeffs)
    PlotUtilities.lazyLabel("Frequency","FFT Coefficients","")
    plt.subplot(3,1,3)
    plt.imshow(cwt_coeffs, extent=[x_lim[0], x_lim[1], 
                                   min(widths), max(widths)], 
               cmap='PRGn', aspect='auto',
               vmax=abs(cwt_coeffs).max(), vmin=-abs(cwt_coeffs).max())
    plt.xlim(x_lim+x_fudge)
    PlotUtilities.lazyLabel("Time","Frequency","Morlet Coefficient Map")
Exemple #53
0
def find_peaks_cwt2(vector, widths, min_snr=1):
    #print vector.size, widths
    gap_thresh = n.ceil(widths[0]) + 1
    max_distances = widths / 3.0
    wavelet = ss.ricker
    pad = 1
    if pad:
        #vec = n.hstack((vector[:vector.size/pad+1][::-1], vector, vector[-vector.size/pad:][::-1]))
        vec = pad_data_const(vector, pad)
    else:
        vec=vector
    cwt_dat_all = ss.cwt(vec, wavelet, widths)
    cwt_dat = cwt_dat_all[:,vector.size/pad:vector.size/pad+vector.size]
    ridge_lines = ss._peak_finding._identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
    min_length = 5
    #print ridge_lines
    #filtered = ss._peak_finding._filter_ridge_lines(cwt_dat, ridge_lines, min_snr=min_snr)
    filtered = _filter_ridge_lines2(cwt_dat, ridge_lines, min_snr=min_snr, min_length=min_length)
    #if pad:
    #    good_ones = [x for x in filtered if x[1][0]>vector.size/pad\
    #                and x[1][0]<vector.size+vector.size/pad]
    #else:
    good_ones = filtered
    #for g in good_ones:
    #    print 'g=',g
    #adjust = vector.size/pad - 1 if pad else 0
    #find boundaries of region from its half height cwd by looking for local minima around the peak
    max_locs = []
    for g in good_ones:
        x_loc = g[1][2]
        width_index = g[-1][-2]#the width at the maximum on the ridge
        width = widths[width_index]
        snr = g[-1][1]
        #print 'width',x_loc, width_index,width,snr
        left_min = max(0,x_loc-int(1.5*width))
        right_min = min(x_loc+2*width, len(cwt_dat[0]))
        max_locs.append((x_loc ,width, left_min, right_min, snr))
    #print 'mm',max_locs
    return sorted(max_locs, key=lambda x:x[0])
Exemple #54
0
 def doSkylineCorrection(trace):
     # general
     tl = len(trace)
     # skyline estimation
     cwtTrace = signal.cwt(trace, signal.ricker, [0.1])[0]
     maximas = signal.argrelextrema(cwtTrace, np.greater)[0]
     maxVals = [(maxima, cwtTrace[maxima]) for maxima in maximas] # this is for maxima filtering
     maxVals = filter(lambda x:x[1]>treshold, maxVals) # TODO: check if there's a better treshold
     # save, where the absolute max. can be found
     absMaxValPos = max(maxVals, key=lambda x:x[1])[0]
     # create the maxima arrays
     maximas = np.array([x[0] for x in maxVals])
     maxVals = np.array([x[1] for x in maxVals])
     # expected peak height ~ exp. decay ratio
     pSkyF = lambda x, a, b, m : a * e ** (np.sign(x-m) * -b * (x-m))
     (a, b, m), pConv = curve_fit(pSkyF, maximas, maxVals, p0=(max(maxVals), 10E-7, absMaxValPos))
     skyline = np.array([pSkyF(x, a, b, m) for x in range(tl)])
     skyline = np.array([max(1, skyline[x]) for x in range(tl)])
     # normalize
     trace = np.array([ trace[x] / skyline[x] * 100 for x in range(tl) ])
     # return
     return trace
Exemple #55
0
 def plot(self,data,ws=32):
     """ plot the function, calcs the complexity and a continous wavelet """
     N=len(data)
     x=np.arange(N)
     # wavelet part
     widths = np.arange(1, ws)
     cwtmatr = signal.cwt(data, signal.ricker, widths)
     # define the multiple plot
     plt.subplot(2,1,1)
     c=self.comp(data)
     plt.title('Signal complexity='+str(c))
     plt.xlabel('x')
     plt.ylabel('y')
     plt.grid(True)
     plt.plot(x,data)
     plt.subplot(2,1,2)
     cax=plt.imshow(cwtmatr,aspect='auto')
     cbar = plt.colorbar(cax)
     plt.xlabel('dt')
     plt.ylabel('dF')
     plt.grid(True)
     plt.show()
Exemple #56
0
def MakeFigure(Points=1024,MaxX=1,Seed=42,DecayConst=1/50,SpringStretch=10,
               snr=100):
    np.random.seed(Seed)
    x,dx,stretch_kwargs,f = SimulationUtil.\
        normalized_force_extension(max_x=MaxX,decay_const=DecayConst,
                                   points=Points,spring_const=SpringStretch,
                                   snr=snr)
    # first, add in approach/retract
    # make everything nice and zero-meaned, max of 1
    f -= np.mean(f)
    f /= max(f)
    # Get the FFT of our function
    fft_coeffs = np.fft.rfft(f)
    fft_freq = np.fft.rfftfreq(n=f.size,d=dx)
    # get the CWT of our function 
    CoeffMax = 50
    CoeffMin = 1
    NCoeffs = 50
    widths = np.linspace(CoeffMin,CoeffMax,NCoeffs)
    cwt_coeffs = signal.cwt(data=f,wavelet=signal.ricker,widths=widths)
    # see how well an out-of-the-box scipy system can do 
    peak_idx_scipy = signal.find_peaks_cwt(vector=f, widths=widths)
    # plot the various transforms..
    plt.subplot(3,1,1)
    plt.plot(x,f)
    PlotUtilities.lazyLabel("x","f(x)","Fourier and Wavelet Transform of f(x)")
    plt.subplot(3,1,2)
    plt.plot(fft_freq[1:]/max(fft_freq),fft_coeffs[1:])
    xlabel = r"$\frac{\mathrm{Frequency}}{\mathrm{Frequency}_{\mathrm{max}}}$"
    PlotUtilities.lazyLabel(xlabel,
                            "Positive FFT coefficients","")
    plt.subplot(3,1,3)
    # XXX make heat map like ch 6...
    plt.imshow(cwt_coeffs, extent=[0, max(x), min(widths), max(widths)], 
               cmap='PRGn', aspect='auto',
               vmax=abs(cwt_coeffs).max(), vmin=-abs(cwt_coeffs).max())
    title = "Positive wavelet coefficients for Laplacian of Gaussian"
    PlotUtilities.lazyLabel("x","Frequency",title)
import caffe
import sys
from scipy import signal
import numpy as np

# Call caffe to extract features from one video
net = caffe.Net('/u/chen478/dogCentric/deploy.prototxt','/u/chen478/dogCentric/bvlc_reference_caffenet.caffemodel', caffe.TEST)
net.forward()
features = net.blobs['fc7'].data
features = features.T
n = features.shape[0]
print features.shape

# Apply wavelet transfom on features extract from the same video and concatenae them to form the final feature which represents the video
feature = []
widths = np.arange(1, 4)
for i in range(n):
    cwtmatr = signal.cwt(features[i], signal.ricker, widths)
    feature = feature + list(cwtmatr.reshape(1, cwtmatr.shape[0]*cwtmatr.shape[1]))

f = open("/nobackup/chen478/features/" + sys.argv[1], "r+")
s = ''
for i in range(n):
    for j in feature[i]:
	s += str(j) + ' '	

f.write(s)

Exemple #58
0
for e in exchanges:
    averagers[e] = BitcoinAverager(e)

averager = averagers["bitfinexUSD"]

# <codecell>

data['price'].tolist()

# <codecell>

from scipy import signal
import matplotlib.pyplot as plt
sig  = data['price'].tolist()
widths = pow(2,np.arange(0, 18, 0.5))
cwtmatr = signal.cwt(sig, signal.ricker, widths)
imgplot = plt.imshow(cwtmatr, aspect='auto')
imgplot

# <codecell>

pow(2,np.arange(0, 16, 0.5))

# <codecell>

from scipy import signal
import matplotlib.pyplot as plt
sig  = data['volume'].tolist()
widths = pow(2,np.arange(0, 18, 0.5))
cwtmatr = signal.cwt(sig, signal.ricker, widths)
imgplot = plt.imshow(cwtmatr, aspect='auto')
x = 0
# fig=plt.figure()
fig, axarr = plt.subplots(3, sharex=True)
cwtmatr = []
specm = []
mmin = float('Inf')
mmax = float('-Inf')
img = []
img2 = []
img3 = []
for x in range(len(cube.alpha_axis)):
    spec = cube.data[:, 0, x]
    img2.append(axarr[1].plot(spec, 'b'))
    wavelet = signal.ricker
    widths = np.arange(2, 5, 0.2)
    cmat = signal.cwt(spec, wavelet, widths)
    for y in range(len(widths)):
      img3.append(axarr[2].plot(cmat[y]))
    cmax = cmat.max()
    if cmax > mmax:
        mmax = cmax
    cmin = cmat.min()
    if cmin < mmin:
        mmin = cmin
    cwtmatr.append(cmat)
for cmat in cwtmatr:
    img.append([axarr[0].imshow(cmat, vmin=mmin, vmax=mmax)])

ani = animation.ArtistAnimation(fig, img, interval=inte, blit=True,
                                repeat=True)
ani = animation.ArtistAnimation(fig, img2, interval=inte, blit=True,