def denoise(nblck,filename,mode='sym', wv='sym5' ):
    from statsmodels.robust import mad
    #noisy_coefs = pywt.wavedec(nblck, 'sym5', level=5, mode='per')
    noisy_coefs = pywt.wavedec(nblck, wavelet=wv,   mode=mode) #level=5,  #dwt is for single level decomposition; wavedecoding is for more levels
    sigma = mad(noisy_coefs[-1])
    #uthresh=np.std(ca)/2
    uthresh = sigma*np.sqrt(2*np.log(len(nblck)))
    denoised = noisy_coefs[:]
    denoised[1:] = [pywt.threshold(i, value=uthresh,mode='soft') for i in denoised[1:]]
    signal = pywt.waverec(denoised, wavelet=wv, mode=mode)
    from matplotlib import pyplot as plt
    fig, axes = plt.subplots(1, 2, sharey=True, sharex=True,figsize=(8,4))
    ax1, ax2 = axes
    
    ax1.plot(signal)
    #ax1.set_xlim(0,2**10)
    ax1.set_title("Recovered Signal")
    ax1.margins(.1)
    
    ax2.plot(nblck)
    ax2.set_title("Noisy Signal")
    
    for ax in fig.axes:
        ax.tick_params(labelbottom=False, top=False, bottom=False, left=False,       right=False)
    fig.tight_layout()
    fig.savefig(filename+'_'+wv+'.pdf')
    plt.clf()
    return signal
예제 #2
0
 def save_samples(self):
     """
     Save all the generated samples in a samples.txt file
     """
     
     doc_count = sim_count = 1
     with open("%ssets/learn_test_set.txt" % settings.MEDIA_ROOT, "w") as lt_file:
         for sample in self.learn_set:
             if doc_count % 9 == 0:
                 with open("%ssets/sim_sample%s.txt" % (settings.MEDIA_ROOT, sim_count), "w") as sim_doc:
                     self._write_sample(sim_doc, sample)
                 sim_count += 1
                 self.targets.pop(doc_count-1)
             else:
                 sample = wavedec(sample, self.wavelet, level=5)
                 sample = [i/10 for i in list(sample[0])]
                 self._write_sample(lt_file, sample)
                 lt_file.write(";")
             doc_count += 1
         for sample in self.test_set:
             sample = wavedec(sample, self.wavelet, level=5)
             sample = [i/10 for i in list(sample[0])]
             self._write_sample(lt_file, sample)
             lt_file.write(";")
     with open("%ssets/target_set.txt" % settings.MEDIA_ROOT, "w") as target_doc:
         self._write_sample(target_doc, self.targets)
예제 #3
0
    def fit(self):
        '''
        First finds the wavelet in types that fits the data the best => The smallest Euclidean distance between the 
        approximation signal and under-sampled signal
        After finding the best Wavelet, uses it to decompose data into levels detail signals and builds a AR(p)
        model per approximation signal and detail signals using order as p for each corresponding  
        '''
        self.bestType = 'db1'
        bestDist = np.inf

        for t in self.types:
            approx_levels = pywt.wavedec(self.data, wavelet=t, level=self.levels)
            idx = np.int_(np.linspace(0, self.data.shape[0]-1, num=len(approx_levels[0])))
            samples = self.data[idx]
            dist = np.linalg.norm(approx_levels[0]-samples)
            if dist < bestDist:
                bestDist = dist
                self.bestType = t
                
        self.coefs = pywt.wavedec(self.data, wavelet=self.bestType, level=self.levels)
        for i in range(len(self.order)):
#             model = AR_model(approx_levels[i], order=self.order[i])
            model = Markov_model(approx_levels[i])
            model.fit()
            self.models.append(model)
예제 #4
0
def wavedec_lin(x,wavelet,mode="per",level=None,return_sl=False): 
    """Performs a wavelet-decomposition using pywt.wavedec, but returns the coefficients
    as one 1d-array.
    Uses different default-values for mode and level.
    Behaviour for the case "len(x) not a power of 2" is not complete!"""       
    assert len(x.shape) in [1,2], "Only 1d and 2d-arrays supported up to now!"
    if level == None:
        level = int(round(n.log2(x.shape[0])))
    if len(x.shape) == 1:
        wts = wavedec(x,wavelet,mode=mode,level=level)
        sc_lengths = [len(x) for x in wts]
        rv = n.zeros((sum(sc_lengths)),"d")
        for j in range(len(wts)):
            offset = sum(sc_lengths[:j])
            rv[offset:offset+sc_lengths[j]]=wts[j]
    else: #len(x.shape)==2
        wts = wavedec(x[:,0],wavelet,mode=mode,level=level)
        sc_lengths = [len(i) for i in wts]
        rv = n.zeros((sum(sc_lengths),x.shape[1]),"d")
        for i in range(x.shape[1]):
            if i>0:
                wts = wavedec(x[:,i],wavelet,mode=mode,level=level)
            for j in range(len(wts)):
                offset = sum(sc_lengths[:j])
                rv[offset:offset+sc_lengths[j],i]=wts[j]
    if return_sl:
        return rv, sc_lengths
    else:
        return rv
예제 #5
0
def wavepower_db_secondhalf(x,wavelet,mode="per",level=None):
    """Unused? Maybe remove it."""
    if level == None:
        level = int(round(n.log2(x.shape[0])))
    if len(x.shape) == 1:
        wts = wavedec(x,wavelet,mode=mode,level=level)
        sc_lengths = [len(x) for x in wts]
        rv = n.zeros((sum(sc_lengths)/2),"d")
        for j in range(len(wts)):
            wts_power = wts[j]**2
            idx_norm = max( 0 , int(wts_power.shape[0]/2)-1 )
                #print "idx_norm:", idx_norm
            wts_power /= wts_power[idx_norm]
            wts_power = 10* np.log(wts_power)
            offset = sum(sc_lengths[:j])/2
            print rv[offset:offset+sc_lengths[j]/2].shape, wts_power[idx_norm+1:].shape
            rv[offset:offset+sc_lengths[j]/2]=wts_power[idx_norm+1:]
    else: #len(x.shape)==2
        return NotImplemented
        wts = wavedec(x[:,0],wavelet,mode=mode,level=level)
        sc_lengths = [len(i) for i in wts]
        rv = n.zeros((sum(sc_lengths),x.shape[1]),"d")
        for i in range(x.shape[1]):
            if i>0:
                wts = wavedec(x[:,i],wavelet,mode=mode,level=level)
            for j in range(len(wts)):
                wts_power = wts[j]**2
                if normalise:
                    idx_norm = max( 0 , int(wts_power.shape[0]*norm_fraction)-1 )
                    wts_power /= wts_power[idx_norm]
                offset = sum(sc_lengths[:j])
                rv[offset:offset+sc_lengths[j],i]=wts_power[:]
    return rv
예제 #6
0
def _weighted_retrieve(data, genome, loci, prediction_steps, spinup, weight_func):
    l = loci
    (temps, loads) = (data['Temperature'], data['Load'])

    idx = _grow_tree(data, genome, loci, prediction_steps)
    window = genome[l.hindsight]
    test_starts, test_ends = _get_test_period(data)
    
    query_loads_norm = sg.utils.Normalizer(loads[test_starts-window:test_starts])
    query_loads = [item for sublist in pywt.wavedec(query_loads_norm.normalized, 'haar')
                   for item in sublist ][:idx.properties.dimension/2]

    query_temps_norm = sg.utils.Normalizer(temps[test_starts-window:test_starts])
    query_temps = [item for sublist in pywt.wavedec(query_temps_norm.normalized, 'haar')
                   for item in sublist ][:idx.properties.dimension/2]

    query = weight_func(query_loads) + weight_func(query_temps)
    # If we find matches where there is a gap in the timeseries (which will throw an exception), we look for the next best match.
    num_matches = 1
    while True:
        match_date = list(idx.nearest(tuple(query), num_matches, objects="raw"))[-1]
        end_date = match_date + dt(hours=genome[l.hindsight]+prediction_steps - 1)
        period = sg.utils.Normalizer(loads[match_date:end_date]).normalized
        prediction = query_loads_norm.expand(period[-prediction_steps:])
        try:
            result = pd.TimeSeries(data=prediction.values,
                                   index=data[test_starts:test_ends+1].index)
            idx.close()
            return result
        except:
            num_matches += 1
            print 'Time gap encountered, we will try match number', num_matches
예제 #7
0
def decomposeField(dataframe, fieldName, groupFieldName, maxCoef) :
    coeffs = {} #Coefficients for each group
    maxLen = 0  #Largest list of coefficients

    #Grouped Case
    try:
        grouped = dataframe.groupby(groupFieldName)
        for name, group in grouped:
            #Collect coefficients for each group
            coeffs[name] = pywt.wavedec(group[fieldName], 'db1', level=2)[0].tolist()[:maxCoef]
            maxLen = max(maxLen,len(coeffs[name]))
            #Non-grouped case
    except KeyError:
        #No group.  One row of coefficients
        coeffs[0] = pywt.wavedec(dataframe[fieldName], 'db1', level=2)[0].tolist()[:maxCoef]
        maxLen = len(coeffs[0])

    # Ensures all rows of coefficients are the same length.  
    # Populates anything shorter with nan
    for coef in coeffs:
        coeffs[coef] = coeffs[coef] + [float('nan')]*(maxLen-len(coeffs[coef]))
    #Assign names of coefficients using the original field name as a prefix
    names = [fieldName + str(i) for i in range(maxLen)]    #note change from original

    #Transpose & return
    coeffD = pd.DataFrame(coeffs)
    coeffT = coeffD.T
    coeffT.columns = names
    return coeffT
예제 #8
0
파일: generate.py 프로젝트: aelred/pleased
 def wave_ica(x):
     ica = decomposition.FastICA(max_iter=10000)
     # calculate wavelet transform of each
     w1 = pywt.wavedec(x[:, 0], 'haar', level=12)
     w2 = pywt.wavedec(x[:, 1], 'haar', level=12)
     # calculate ica between components
     t = [ica.fit_transform(np.array([lev1, lev2]).T)
          for lev1, lev2 in zip(w1, w2)]
     return np.array(t)
def wavelet_levels(Y):
	w = pywt.Wavelet('sym2')
	levels = pywt.dwt_max_level(Y.shape[0],w)
	w0 = pywt.wavedec(Y[:,0],w,level=levels)[1:]
	L = [np.empty((Y.shape[1],len(x))) for x in w0]
	for i in range(Y.shape[1]):
		wd = pywt.wavedec(Y[:,i],w)[1:]
		for j,x in enumerate(wd):
			L[j][i,:] = x
	return L,[Y.shape[0]/len(x) for x in w0]
예제 #10
0
파일: extract.py 프로젝트: spyke/spyke
    def extract_all_wcs_by_maxchan(self, wavelet='haar'):
        """Extract wavelet coefficients from all spikes, store them as spike attribs.
        Find optimum coeffs for each chan, then average across all chans to find
        globally optimum coeffs"""
        # TODO: add multiprocessing
        nkeep = 5 # num of top wavelet coeffs to keep
        sort = self.sort
        spikes = sort.spikes # struct array
        wavedata = sort.wavedata
        nspikes = len(spikes)
        #ncoeffs = 53 # TODO: this only applies for V of length 50, stop hardcoding
        #ncoeffs = len(self.ksis)
        nt = wavedata.shape[2]
        ncoeffs = len(np.concatenate(pywt.wavedec(wavedata[0, 0], wavelet)))

        wcs = {}
        maxchans = np.unique(spikes['chan'])
        nmaxchans = len(maxchans)
        for maxchan in maxchans:
            wcs[maxchan] = [] # init dict of lists, indexed by spike maxchan
        flatwcs = np.zeros((nspikes, ncoeffs))

        t0 = time.time()
        for spike, wd in zip(spikes, wavedata):
            nchans = spike['nchans']
            chans = spike['chans'][:nchans]
            maxchan = spike['chan']
            maxchani = int(np.where(chans == maxchan)[0])
            #chanis = det.chans.searchsorted(chans) # det.chans are always sorted
            #wd = wd[:nchans] # unnecessary?
            V = wd[maxchani]
            coeffs = np.concatenate(pywt.wavedec(V, wavelet)) # flat array of wavelet coeffs
            wcs[maxchan].append(coeffs)
            flatwcs[spike['id']] = coeffs
        ks = np.zeros((nmaxchans, ncoeffs))
        p = np.zeros((nmaxchans, ncoeffs))
        for maxchani, maxchan in enumerate(maxchans):
            wcs[maxchan] = np.asarray(wcs[maxchan])
            for i in range(ncoeffs):
                ks[maxchani, i], p[maxchani, i] = scipy.stats.kstest(wcs[maxchan][:, i], 'norm')
        ## TODO: weight the KS value from each maxchan according to the nspikes for that
        ## maxchan!!!!!
        ks = ks.mean(axis=0)
        p = p.mean(axis=0)
        ksis = ks.argsort()[::-1] # ks indices sorted from biggest to smallest ks values
        # assign as params in spikes struct array
        for coeffi in range(nkeep): # assign first nkeep
            spikes['w%d' % coeffi] = flatwcs[:, ksis[coeffi]]
        print("Extracting wavelet coefficients from all %d spikes took %.3f sec" %
             (nspikes, time.time()-t0))
        return wcs, flatwcs, ks, ksis, p
예제 #11
0
        def dot(self, mat):
            m = []

            if mat.shape[0] != mat.size:
                for i in xrange(mat.shape[1]):
                    c = pywt.wavedec(mat[:, i], self.name, level=self.level)
                    self.sizes.append(map(len, c))
                    c = np.concatenate(c)
                    m.append(c)
                return np.asarray(m).T
            else:
                c = pywt.wavedec(mat, self.name, level=self.level)
                self.sizes.append(map(len, c))
                return np.concatenate(c)
예제 #12
0
def dwt_eeg_video(video_eeg_data, electrode_count, electrode_indexes):
    """
    Use Discrete wavelet transform (DWT) to compute alpha and beta band of signal. Compute power of alpha and beta band
    and also valence and arousal values.
    :param video_eeg_data: eeg signal
    :param electrode_count: number of electrodes
    :param electrode_indexes: indexes of electrodes, usually just range(0, electrode_count)
    :return: array of floats, shape [electrode_count*2 + 2, 1]
             power of alpha and beta band of individual electrodes, valence and arousal values computed from eeg signal
    notes: this function should be split into more in the future
    """

    data_final = np.empty(electrode_count*2 + 2)

    alphaArray = []
    betaArray = []
    counter = 0
    for electrodeIndex in electrode_indexes:
        coeffs = pywt.wavedec(video_eeg_data[electrodeIndex], 'db2', level=3)
        a3, d3, d2, d1 = coeffs

        coeffs = pywt.wavedec(d3, 'db2', level=1)

        alpha, beta = coeffs
        alphaArray.append(power_of_signal(alpha))
        data_final[counter] = power_of_signal(alpha)

        beta = pywt.idwt(d2,sig.resample(beta,d2.__len__()),'db2')
        betaArray.append(power_of_signal(beta))
        data_final[counter+1] = power_of_signal(beta)

        counter += 2

    F3alpha = alphaArray[0]
    F4alpha = alphaArray[1]
    AF3alpha = alphaArray[2]
    AF4alpha = alphaArray[3]
    F3beta = betaArray[0]
    F4beta = betaArray[1]
    AF3beta = betaArray[2]
    AF4beta = betaArray[3]

    valence = (F4alpha/F4beta) - (F3alpha/F3beta)
    arousal = (F3beta+F4beta+AF3beta+AF4beta) / (F3alpha+F4alpha+AF3alpha+AF4alpha)

    data_final[counter] = valence
    data_final[counter+1] = arousal

    return data_final
예제 #13
0
파일: MRA.py 프로젝트: plant/envmixer
    def calculate_mra(self, wavelet='db10', mode='per'):
        """
        Creates an MRA wavelet tree on the recording.
        
        Args:
            wavelet (str): wavelet to use. Any string supported by PyWavelets will work.
            mode (str): method for handling overrun. Default "per," start over at the beginning of the waveform
                (periodic).
        """
        
        self.wavelet, self.mode = wavelet, mode
        self.dwt = pywt.wavedec(self.wav, wavelet, mode=mode, level=int(np.log2(len(self.wav))) + 1)
        
        self.root = None
        self.nodes = []
        self.wavelet = wavelet
        self.mode = mode
        parents = [None]

        for i in range(len(self.dwt)):
            nodes = []

            for j in range(len(self.dwt[i])):
                if j > 0:
                    nodes.append(MRANode(self.dwt[i][j], parents[j / 2], nodes[j - 1], j % 2, i))
                else:
                    nodes.append(MRANode(self.dwt[i][j], parents[j / 2], None, j % 2, i))

            nodes[0].predecessor = nodes[-1]
            parents = nodes
            self.nodes.extend(nodes)
            if i is 0: self.root = nodes[0]
예제 #14
0
def wave_semisoft_filter(y, sigma, tau, w, mu):
    coeffs = pywt.wavedec(y, w)
    threshold = sigma * tau
    hcoeffs = []
    for scale, x in enumerate(coeffs):
            hcoeffs.append(thresholding_semisoft(x, threshold, mu))
    return pywt.waverec(hcoeffs, w)
예제 #15
0
파일: dwt_1.py 프로젝트: BossKwei/temp
def func_2():
    Fs = 5000
    T = 1 / Fs
    N = 2000

    t = np.linspace(0, N * T, N)
    y = np.sin(2 * np.pi * 10 * t)# + 0.1 * np.sin(2 * np.pi * 300 * t)
    y[500:1000] += 0.1 * np.sin(2 * np.pi * 500 * t[500:1000])

    [cA3, cD3, cD2, cD1] = pywt.wavedec(y, wavelet='db1', level=3)
    A3 = pywt.idwt(cA=cA3, cD=None, wavelet='db1')
    D3 = pywt.idwt(cA=None, cD=cD3, wavelet='db1')
    D2 = pywt.idwt(cA=None, cD=cD2, wavelet='db1')
    D1 = pywt.idwt(cA=None, cD=cD1, wavelet='db1')

    plt.subplot(511)
    plt.plot(y)
    plt.subplot(512)
    plt.plot(A3)
    plt.subplot(513)
    plt.plot(D1)
    plt.subplot(514)
    plt.plot(D2)
    plt.subplot(515)
    plt.plot(D3)

    plt.show()
예제 #16
0
def task_2():
    dwt_haar_8_2lvl = find_multilevel_dwt_matrix(8, 2, 'haar')
    x = np.array([1.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0])
    x_dwt = np.dot(dwt_haar_8_2lvl, x)

    dwt_haar_4 = find_dwt_matrix(4, 'haar')
    dwt_haar_8 = find_dwt_matrix(8, 'haar')
    x_dwt_man_lvl1 = np.dot(dwt_haar_8, x)
    x_dwt_man = np.concatenate(
        (
            np.dot(dwt_haar_4, x_dwt_man_lvl1[:4]),
            x_dwt_man_lvl1[4:]
        ),
        axis=0
    )

    print()
    print("2-level DWT matrix:")
    print(dwt_haar_8_2lvl)

    print()
    print("x:")
    print(x)

    print()
    print("2-level DWT of x with matrix:")
    print(x_dwt)

    print()
    print("2-level DWT of x manually:")
    print(x_dwt_man)

    print()
    print("2-level DWT of x with pywt:")
    print(pywt.wavedec(x, 'haar', mode='ppd', level=3))
예제 #17
0
def wave_stein_filter(y, sigma, tau, w):
    coeffs = pywt.wavedec(y, w)
    threshold = sigma * tau
    hcoeffs = []
    for scale, x in enumerate(coeffs):
            hcoeffs.append(stein_thresholding(x, threshold))
    return pywt.waverec(hcoeffs, w)
예제 #18
0
 def Wavelet(self, column, wavelet='db4'):
     #Decompose the time series using wavelets
     
     #Get the column index
     index = self.Get_Index(column)
     
     self.wavelet_coefs = pywt.wavedec(self.data[:,index], wavelet)
예제 #19
0
def wavelet_process_mat(x, level = 8, wavelet = 'haar'):
  
  wavelet_coefs = []
  for channel in range(16):
    coefs = pywt.wavedec(x[channel, :], wavelet, mode = 'sym', level = level)
    wavelet_coefs.append(coefs[0])
  return np.array(wavelet_coefs, dtype = 'float32')
예제 #20
0
def perform_haar_wavelet(wf2, n_components, level, std_restrict):

    coeffs = []
    for i in range(wf2.shape[0]):
        coeff = np.concatenate(pywt.wavedec(wf2[i, :], "haar", mode="sym", level=level))
        if i == 0:
            coeffs = np.empty((wf2.shape[0], coeff.shape[0]))
        coeffs[i, :] = coeff

    keep = np.sum(coeffs == 0.0, axis=0) != wf2.shape[0]
    coeffs = coeffs[:, keep]
    # calcul tes ks for all coeff
    ks_score = np.zeros((coeffs.shape[1]))
    for j in range(coeffs.shape[1]):
        # keep only coeff inside m +- restrict std
        s = np.std(coeffs[:, j], axis=0) * 3
        m = np.mean(coeffs[:, j], axis=0)
        ind_selected = (coeffs[:, j] >= m - s) & (coeffs[:, j] <= m + s)
        if np.sum(ind_selected) >= 10:
            x = coeffs[ind_selected, j]
            zscored = (x - np.mean(x)) / np.std(x)
            D, pvalue = stats.kstest(zscored, "norm")
            ks_score[j] = D

    # keep only the best ones
    ind_sorted = np.argsort(ks_score)[::-1]
    ind_sorted = ind_sorted[:n_components]

    features = coeffs[:, ind_sorted]

    names = np.array(["wt {}".format(n) for n in range(features.shape[1])], dtype="U")

    return features, names
예제 #21
0
 def denoise(self, data, wavelet):
     noiseSigma = median(absolute(data - median(data))) / 0.6745
     levels = int(floor(log(len(data))))
     WC = pywt.wavedec(data, wavelet, level=levels)
     threshold = noiseSigma * sqrt(2 * log(len(data)))
     NWC = map(lambda x: pywt.thresholding.hard(x, threshold), WC)
     return pywt.waverec(NWC, wavelet)
예제 #22
0
파일: wavelet.py 프로젝트: NikEfth/odl
    def _call(self, x):
        """Compute the discrete wavelet transform.

        Parameters
        ----------
        x : `DiscreteLpVector`

        Returns
        -------
        arr : `numpy.ndarray`
            Flattened and concatenated coefficient array
            The length of the array depends on the size of input image to
            be transformed and on the chosen wavelet basis.
        """
        if x.space.ndim == 1:
            coeff_list = pywt.wavedec(x, self.wbasis, self.mode, self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
            return self.range.element(coeff_arr)

        if x.space.ndim == 2:
            coeff_list = pywt.wavedec2(x, self.wbasis, self.mode, self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
            return self.range.element(coeff_arr)

        if x.space.ndim == 3:
            coeff_dict = wavelet_decomposition3d(x, self.wbasis, self.mode,
                                                 self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_dict, self.size_list)

            return self.range.element(coeff_arr)
예제 #23
0
	def filterData(self, icurr, Fs):
		"""
			Denoise an ionic current time-series and store it in self.eventData

			:Parameters:
				- `icurr` :	ionic current in pA
				- `Fs` :	original sampling frequency in Hz
		"""
		# self.eventData=icurr
		self.Fs=Fs

		# Set up the wavelet
		w=pywt.Wavelet(self.waveletType)

		# Calculate the maximum wavelet level for the data length
		self.maxWaveletLevel=pywt.dwt_max_level(len(icurr), filter_len=w.dec_len)

		# Perform a wavelet decomposition to the specified level
		wcoeff = pywt.wavedec(icurr, w, mode='sym', level=self.waveletLevel)

		# Perform a simple threshold by setting all the detailed coefficients
		# up to level n-1 to zero
		thresh=np.std(wcoeff[-1])*self._thselect(wcoeff, self.waveletThresholdSubType)
		thrfunc=self.thrtypedict[self.waveletThresholdType]

		# print thresh, np.std(wcoeff[-1])
		wcoeff[1:] = [ thrfunc(wc, thresh) for wc in wcoeff[1:] ]
		# for i in range(1, self.waveletLevel):
		# 	wcoeff[-i]=np.zeros(len(wcoeff[-i]))

		# Reconstruct the signal with the thresholded wavelet coefficients
		self.eventData = pywt.waverec(wcoeff, self.waveletType, mode='sym')
예제 #24
0
def soft_threshold(bins, tau, level=1, plot=False):
        coeffs = pywt.wavedec(bins, 'haar', level=level)
        old_coeffs = copy.deepcopy(coeffs) if plot else None
        coeffs[1:] = [sign(a) * np.maximum(0, abs(a)-tau) for a in coeffs[1:] ]
        filtered = pywt.waverec(coeffs, 'haar')

        if plot:
                from matplotlib import pyplot as pl
                fig = pl.figure(figsize=(15,8))
                fig.suptitle("Level %d Wavelet Decomposition" % level)
                fig.subplots_adjust(hspace=0.3, wspace=0.3)

                ax = fig.add_subplot(2,1,1)
                ax.plot(bins, label='Raw')
                ax.plot(filtered, label='Filtered')
                ax.legend()

                ax = fig.add_subplot(2,level,1+level)
                ax.set_title("Approximation coefficients")
                ax.plot(coeffs[0])
                for l in range(1,level):
                        ax = fig.add_subplot(2, level, 1+l+level)
                        ax.plot(old_coeffs[l])
                        ax.axhline(-tau, color='g'); ax.axhline(+tau, color='g')
                        ax.set_title("Level %d detail coefficients" % l)

                fig.savefig('wavelet-analysis.pdf')

        return filtered
예제 #25
0
파일: wt_fun.py 프로젝트: ftilmann/miic
    def filter(self):

        if self.level > self.max_dec_level():
            clevel = self.max_dec_level()
        else:
            clevel = self.level

        # decompose
        coeffs = pywt.wavedec(self.sig, pywt.Wavelet(self.wt), \
                              mode=self.mode, \
                              level=clevel)

        # threshold evaluation
        th = sqrt(2 * log(len(self.sig)) * power(self.sigma, 2))

        # thresholding
        for (i, cAD) in enumerate(coeffs):
            if i == 0:
                continue
            coeffs[i] = sign(cAD) * pywt.thresholding.less(abs(cAD), th)

        # reconstruct
        rec_sig = pywt.waverec(coeffs, pywt.Wavelet(self.wt), mode=self.mode)
        if len(rec_sig) == (len(self.sig) + 1):
            self.sig = rec_sig[:-1]
예제 #26
0
파일: wt_fun.py 프로젝트: ftilmann/miic
    def filter(self, mode='soft'):

        if self.level > self.max_dec_level():
            clevel = self.max_dec_level()
        else:
            clevel = self.level

        # decompose
        coeffs = pywt.wavedec(self.sig, pywt.Wavelet(self.wt), \
                              mode=self.mode, \
                              level=clevel)

        # threshold evaluation
        th = sqrt(2 * log(len(self.sig)) * power(self.sigma, 2))

        # thresholding
        for (i, cAD) in enumerate(coeffs):
            if mode == 'soft':
                coeffs[i] = pywt.thresholding.soft(cAD, th)
            elif mode == 'hard':
                coeffs[i] = pywt.thresholding.hard(cAD, th)

        # reconstruct
        rec_sig = pywt.waverec(coeffs, pywt.Wavelet(self.wt), mode=self.mode)
        if len(rec_sig) == (len(self.sig) + 1):
            self.sig = rec_sig[:-1]
예제 #27
0
def ApproximationsV(data, family, levels):
    """
    get approximation reconstrutions at different levels
    for a DWT family.
    returns levels+1 arrays with A[0]=full reconstruction,
    and A[1]=first approx, A[levels] is smoothest
    """
    # subtract off mean data
    meandata=np.mean(data)
    #meandata=0.0
    # get DWT coefficients
    coeffs = pywt.wavedec(data-meandata, family, mode='sym',level=Nlevels)
    lcoeffs=len(coeffs)
    for i,l in enumerate(coeffs):
        vl=np.var(l)
        l[:]=vl
        coeffs[i]=l
    #print "len coeffs",lcoeffs
    #for i in coeffs: print len(i)
    # reconstruct approximations
    A=[]
    c=pywt.waverec(coeffs,family,mode='sym')
    A.append(np.array(c)+meandata)
    for j in range(Nlevels,0,-1):
        coeffs[j][0:]=0.0
        c=pywt.waverec(coeffs,family,mode='sym')
        A.append(np.array(c)+meandata)
    return A
예제 #28
0
파일: features.py 프로젝트: btel/SpikeSort
def WT(data, wavelet, mode='sym'):
    """Perfroms a batch 1D wavelet transform.

    Parameters
    ----------
    data : array
        (n_vars, n_obs, n_contacts) array where `n_vars` is the number of
        variables (vector dimensions), `n_obs` the number of observations
        and `n_contacts` is the number of contacts. Only 3D arrays are
        accepted.
    wavelet : string or pywt.Wavelet
        wavelet to be used to perform the transform
    mode : string, optional
        signal extension mode (see modes in PyWavelets documentation)

    Returns
    -------
    data : array
        (n_coeffs, n_obs, n_contacts) 1D wavelet transform of each vector
        of the input data array. `pywt.wavedec` is used to perform the
        transform. For every vector of the input array, a 1D transformation
        is returned of the form [cA_n, cD_n, cD_n-1, ..., cD_n2, cD_n1]
        where cA_n and cD_n are approximation and detailed coefficients of
        level n. cA_n and cD_n's are stacked together in a single vector.

    Notes
    -----

    PyWavelets documentation contains more detailed information on the
    wavelet transform.

    """
    # TODO: complete docstring, add wavelet type checking,
    # think about dependencies

    def full_coeff_len(datalen, filtlen, mode):
        max_level = wt.dwt_max_level(datalen, filtlen)
        total_len = 0

        for i in xrange(max_level):
            datalen = wt.dwt_coeff_len(datalen, filtlen, mode)
            total_len += datalen

        return total_len + datalen

    if not isinstance(wavelet, wt.Wavelet):
        wavelet = wt.Wavelet(wavelet)

    n_samples = data.shape[0]
    n_spikes = data.shape[1]
    n_contacts = data.shape[2]
    n_features = full_coeff_len(n_samples, wavelet.dec_len, mode)
    new_data = np.empty((n_features, n_spikes, n_contacts))

    for i in xrange(n_spikes):
        for c in xrange(n_contacts):
            coeffs = wt.wavedec(data[:, i, c], wavelet, mode)
            new_data[:, i, c] = np.hstack(coeffs)

    return new_data
예제 #29
0
def haar(vals, p=80):
	hC = pywt.wavedec(vals,'haar')
	cutVal = np.percentile(np.abs(np.concatenate(hC)), p)
	for A in hC:
		A[np.abs(A) < cutVal] = 0
	tVals = pywt.waverec(hC,'haar')
	return tVals[:len(vals)]
예제 #30
0
def dwtTransform(x):
    xdwt = []
    coeffs = pywt.wavedec(x,'haar')
    for coeff in  coeffs:
        for a in coeff:
            xdwt.append(a)
    return xdwt
예제 #31
0
def generate_feature_vectors(TS):
    TSwfv = {}  # Wavelet feature vector
    TSsse = {}  # Sum of Square Errors
    TSrec = {}  # reconstructed time series
    for TSname in TS:
        cL2 = pywt.wavedec(TS[TSname], wavelettype, level=2)
        cL4 = pywt.wavedec(TS[TSname], wavelettype, level=4)
        cL6 = pywt.wavedec(TS[TSname], wavelettype, level=6)
        cL8 = pywt.wavedec(TS[TSname], wavelettype, level=8)
        cL10 = pywt.wavedec(TS[TSname], wavelettype, level=10)
        # Generate array of feature vectors for this time series
        TSwfv[TSname] = [list(cL2[0])+list(cL2[1]), \
         list(cL4[0])+list(cL4[1]), \
         list(cL6[0])+list(cL6[1]), \
         list(cL8[0])+list(cL8[1]), \
         list(cL10[0])+list(cL10[1])]
        #print len(TSwfv[TSname][0])
        #print len(TSwfv[TSname][1])
        #print len(TSwfv[TSname][2])

        #print "TSwfv[{0}]:{1}".format(TSname,TSwfv[TSname])

        # Generate array of SSE values for this time series
        TSsse[TSname] = []

        # Level 2
        # print len(cL2), len(cL4), len(cL6), len(cL8), len(cL10)
        c = [cL2[0], cL2[1]]
        for i in range(2, len(cL2)):
            c.append([0] * len(cL2[i]))
        TSrecL2 = pywt.waverec(c, wavelettype)
        sse = sum([(TS[TSname][i] - TSrecL2[i])**2
                   for i, v in enumerate(TSrecL2)])
        TSsse[TSname].append(sse)

        # Level 4
        c = [cL4[0], cL4[1]]
        for i in range(2, len(cL4)):
            c.append([0] * len(cL4[i]))
        TSrecL4 = pywt.waverec(c, wavelettype)
        sse = sum([(TS[TSname][i] - TSrecL4[i])**2
                   for i, v in enumerate(TSrecL4)])
        TSsse[TSname].append(sse)

        # Level 6
        c = [cL6[0], cL6[1]]
        for i in range(2, len(cL6)):
            c.append([0] * len(cL6[i]))
        TSrecL6 = pywt.waverec(c, wavelettype)
        sse = sum([(TS[TSname][i] - TSrecL6[i])**2
                   for i, v in enumerate(TSrecL6)])
        TSsse[TSname].append(sse)

        # Level 8
        c = [cL8[0], cL8[1]]
        for i in range(2, len(cL8)):
            c.append([0] * len(cL8[i]))
        TSrecL8 = pywt.waverec(c, wavelettype)
        sse = sum([(TS[TSname][i] - TSrecL8[i])**2
                   for i, v in enumerate(TSrecL8)])
        TSsse[TSname].append(sse)

        TSrec[TSname] = [TSrecL2, TSrecL4, TSrecL6, TSrecL8]

        # Plotting
        if TSname == 'TS.AMZN':
            print[len(x) for x in cL2]
            print[len(x) for x in cL4]
            print[len(x) for x in cL6]
            print[len(x) for x in cL8]
            lenL2 = str(len(cL2[0]) + len(cL2[1]))
            lenL4 = str(len(cL4[0]) + len(cL4[1]))
            lenL6 = str(len(cL6[0]) + len(cL6[1]))
            lenL8 = str(len(cL8[0]) + len(cL8[1]))
            fig = plt.figure()
            ax = fig.add_subplot(111)
            ax.set_xlabel('Time index (1 = 4/20/2011)')
            ax.set_ylabel('Closing price (normalized)')
            ax.plot(TS[TSname], label=TSname)
            LL2 = TSname + ' (L2) len: ' + lenL2 + ' SSE: ' + '%3.2f' % TSsse[
                TSname][0]
            LL4 = TSname + ' (L4) len: ' + lenL4 + ' SSE: ' + '%3.2f' % TSsse[
                TSname][1]
            LL6 = TSname + ' (L6) len: ' + lenL6 + ' SSE: ' + '%3.2f' % TSsse[
                TSname][2]
            LL8 = TSname + ' (L8) len: ' + lenL8 + ' SSE: ' + '%3.2f' % TSsse[
                TSname][3]
            ax.plot(TSrecL2, label=LL2)
            ax.plot(TSrecL4, label=LL4)
            ax.plot(TSrecL6, label=LL6)
            ax.plot(TSrecL8, label=LL8)
            plt.legend(prop={'size': 10}, loc=0)
            plt.savefig(TSname + '.pdf', edgecolor='b', format='pdf')

    return TSwfv, TSsse, TSrec
예제 #32
0
def _get_transf_matrix(
        n: int,
        transform_type: str,
        dec_levels: int = 0,
        flip_hardcoded: bool = False) -> (np.ndarray, np.ndarray):
    """
    Create forward and inverse transform matrices, which allow for perfect
    reconstruction. The forward transform matrix is normalized so that the
    l2-norm of each basis element is 1.
    Includes hardcoded transform matrices which are kept for matlab compatibility

    :param n: Transform size (nxn)
    :param transform_type: Transform type 'dct', 'dst', 'hadamard', or anything that is
                           supported by 'wavedec'
                           'DCrand' -- an orthonormal transform with a DC and all
                           the other basis elements of random nature
    :param dec_levels:  If a wavelet transform is generated, this is the
                           desired decomposition level. Must be in the
                           range [0, log2(N)-1], where "0" implies
                           full decomposition.
    :param flip_hardcoded: Return transpose of the hardcoded matrices.
    :return: (forward transform, inverse transform)
    """

    if n == 1:
        t_forward = 1
    elif transform_type == 'hadamard':
        t_forward = hadamard(n)
    elif n == 8 and transform_type == 'bior1.5':
        t_forward = [[
            0.343550200747110, 0.343550200747110, 0.343550200747110,
            0.343550200747110, 0.343550200747110, 0.343550200747110,
            0.343550200747110, 0.343550200747110
        ],
                     [
                         -0.225454819240296, -0.461645582253923,
                         -0.461645582253923, -0.225454819240296,
                         0.225454819240296, 0.461645582253923,
                         0.461645582253923, 0.225454819240296
                     ],
                     [
                         0.569359398342840, 0.402347308162280,
                         -0.402347308162280, -0.569359398342840,
                         -0.083506045090280, 0.083506045090280,
                         -0.083506045090280, 0.083506045090280
                     ],
                     [
                         -0.083506045090280, 0.083506045090280,
                         -0.083506045090280, 0.083506045090280,
                         0.569359398342840, 0.402347308162280,
                         -0.402347308162280, -0.569359398342840
                     ],
                     [0.707106781186550, -0.707106781186550, 0, 0, 0, 0, 0, 0],
                     [0, 0, 0.707106781186550, -0.707106781186550, 0, 0, 0, 0],
                     [0, 0, 0, 0, 0.707106781186550, -0.707106781186550, 0, 0],
                     [0, 0, 0, 0, 0, 0, 0.707106781186550, -0.707106781186550]]
        if flip_hardcoded:
            t_forward = np.array(t_forward).T

    elif n == 8 and transform_type == 'dct':
        t_forward = [
            [
                0.353553390593274, 0.353553390593274, 0.353553390593274,
                0.353553390593274, 0.353553390593274, 0.353553390593274,
                0.353553390593274, 0.353553390593274
            ],
            [
                0.490392640201615, 0.415734806151273, 0.277785116509801,
                0.097545161008064, -0.097545161008064, -0.277785116509801,
                -0.415734806151273, -0.490392640201615
            ],
            [
                0.461939766255643, 0.191341716182545, -0.191341716182545,
                -0.461939766255643, -0.461939766255643, -0.191341716182545,
                0.191341716182545, 0.461939766255643
            ],
            [
                0.415734806151273, -0.097545161008064, -0.490392640201615,
                -0.277785116509801, 0.277785116509801, 0.490392640201615,
                0.097545161008064, -0.415734806151273
            ],
            [
                0.353553390593274, -0.353553390593274, -0.353553390593274,
                0.353553390593274, 0.353553390593274, -0.353553390593274,
                -0.353553390593274, 0.353553390593274
            ],
            [
                0.277785116509801, -0.490392640201615, 0.097545161008064,
                0.415734806151273, -0.415734806151273, -0.097545161008064,
                0.490392640201615, -0.277785116509801
            ],
            [
                0.191341716182545, -0.461939766255643, 0.461939766255643,
                -0.191341716182545, -0.191341716182545, 0.461939766255643,
                -0.461939766255643, 0.191341716182545
            ],
            [
                0.097545161008064, -0.277785116509801, 0.415734806151273,
                -0.490392640201615, 0.490392640201615, -0.415734806151273,
                0.277785116509801, -0.097545161008064
            ]
        ]
        if flip_hardcoded:
            t_forward = np.array(t_forward).T

    elif n == 11 and transform_type == 'dct':
        t_forward = [
            [
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764
            ],
            [
                0.422061280946316, 0.387868386059133, 0.322252701275551,
                0.230530019145232, 0.120131165878581, -8.91292406723889e-18,
                -0.120131165878581, -0.230530019145232, -0.322252701275551,
                -0.387868386059133, -0.422061280946316
            ],
            [
                0.409129178625571, 0.279233555180591, 0.0606832509357945,
                -0.177133556713755, -0.358711711672592, -0.426401432711221,
                -0.358711711672592, -0.177133556713755, 0.0606832509357945,
                0.279233555180591, 0.409129178625571
            ],
            [
                0.387868386059133, 0.120131165878581, -0.230530019145232,
                -0.422061280946316, -0.322252701275551, 1.71076608154014e-17,
                0.322252701275551, 0.422061280946316, 0.230530019145232,
                -0.120131165878581, -0.387868386059133
            ],
            [
                0.358711711672592, -0.0606832509357945, -0.409129178625571,
                -0.279233555180591, 0.177133556713755, 0.426401432711221,
                0.177133556713755, -0.279233555180591, -0.409129178625571,
                -0.0606832509357945, 0.358711711672592
            ],
            [
                0.322252701275551, -0.230530019145232, -0.387868386059133,
                0.120131165878581, 0.422061280946316, -8.13580150049806e-17,
                -0.422061280946316, -0.120131165878581, 0.387868386059133,
                0.230530019145232, -0.322252701275551
            ],
            [
                0.279233555180591, -0.358711711672592, -0.177133556713755,
                0.409129178625571, 0.0606832509357945, -0.426401432711221,
                0.0606832509357944, 0.409129178625571, -0.177133556713755,
                -0.358711711672592, 0.279233555180591
            ],
            [
                0.230530019145232, -0.422061280946316, 0.120131165878581,
                0.322252701275551, -0.387868386059133, -2.87274927630557e-18,
                0.387868386059133, -0.322252701275551, -0.120131165878581,
                0.422061280946316, -0.230530019145232
            ],
            [
                0.177133556713755, -0.409129178625571, 0.358711711672592,
                -0.0606832509357945, -0.279233555180591, 0.426401432711221,
                -0.279233555180591, -0.0606832509357944, 0.358711711672592,
                -0.409129178625571, 0.177133556713755
            ],
            [
                0.120131165878581, -0.322252701275551, 0.422061280946316,
                -0.387868386059133, 0.230530019145232, 2.03395037512452e-17,
                -0.230530019145232, 0.387868386059133, -0.422061280946316,
                0.322252701275551, -0.120131165878581
            ],
            [
                0.0606832509357945, -0.177133556713755, 0.279233555180591,
                -0.358711711672592, 0.409129178625571, -0.426401432711221,
                0.409129178625571, -0.358711711672592, 0.279233555180591,
                -0.177133556713755, 0.0606832509357945
            ]
        ]
        if flip_hardcoded:
            t_forward = np.array(t_forward).T

    elif n == 8 and transform_type == 'dst':
        t_forward = [
            [
                0.161229841765317, 0.303012985114696, 0.408248290463863,
                0.464242826880013, 0.464242826880013, 0.408248290463863,
                0.303012985114696, 0.161229841765317
            ],
            [
                0.303012985114696, 0.464242826880013, 0.408248290463863,
                0.161229841765317, -0.161229841765317, -0.408248290463863,
                -0.464242826880013, -0.303012985114696
            ],
            [
                0.408248290463863, 0.408248290463863, 0, -0.408248290463863,
                -0.408248290463863, 0, 0.408248290463863, 0.408248290463863
            ],
            [
                0.464242826880013, 0.161229841765317, -0.408248290463863,
                -0.303012985114696, 0.303012985114696, 0.408248290463863,
                -0.161229841765317, -0.464242826880013
            ],
            [
                0.464242826880013, -0.161229841765317, -0.408248290463863,
                0.303012985114696, 0.303012985114696, -0.408248290463863,
                -0.161229841765317, 0.464242826880013
            ],
            [
                0.408248290463863, -0.408248290463863, 0, 0.408248290463863,
                -0.408248290463863, 0, 0.408248290463863, -0.408248290463863
            ],
            [
                0.303012985114696, -0.464242826880013, 0.408248290463863,
                -0.161229841765317, -0.161229841765317, 0.408248290463863,
                -0.464242826880013, 0.303012985114696
            ],
            [
                0.161229841765317, -0.303012985114696, 0.408248290463863,
                -0.464242826880013, 0.464242826880013, -0.408248290463863,
                0.303012985114696, -0.161229841765317
            ]
        ]
        if flip_hardcoded:
            t_forward = np.array(t_forward).T

    elif transform_type == 'dct':
        t_forward = dct(np.eye(n), norm='ortho')
    elif transform_type == 'eye':
        t_forward = np.eye(n)
    elif transform_type == 'dst':
        t_forward = dst(np.eye(n), norm='ortho')
    elif transform_type == 'DCrand':
        x = np.random.normal(n)
        x[:, 0] = np.ones(len(x[:, 0]))
        q, _, _ = np.linalg.qr(x)
        if q[0] < 0:
            q = -q

        t_forward = q.T

    elif pywt is not None:
        # a wavelet decomposition supported by PyWavelets
        # Set periodic boundary conditions, to preserve bi-orthogonality
        t_forward = np.zeros((n, n))

        for ii in range(n):
            temp = np.zeros(n)
            temp[0] = 1.0
            temp = np.roll(temp, (ii, dec_levels))
            tt = pywt.wavedec(temp,
                              transform_type,
                              mode='periodization',
                              level=int(np.log2(n)))
            cc = np.hstack(tt)
            t_forward[:, ii] = cc

    else:
        raise ValueError(
            "Transform of " + transform_type +
            "couldn't be found and PyWavelets couldn't be imported!")

    t_forward = np.array(t_forward)
    # Normalize the basis elements
    if not ((n == 8) and transform_type == 'bior1.5'):
        try:
            t_forward = (
                t_forward.T @ np.diag(np.sqrt(1. / sum(t_forward**2, 0)))).T
        except TypeError:  # t_forward was not an array...
            pass

    # Compute the inverse transform matrix
    try:
        t_inverse = np.linalg.inv(t_forward)
    except LinAlgError:
        t_inverse = np.array([[1]])

    return t_forward, t_inverse
예제 #33
0
    y += np.sin(100 * np.pi * x)
if noise:
    y += np.random.randint(1, 5, size=number_of_points)

y_spikefree = np.copy(y)
if spike:
    size = 1
    y[20] += 7 * size
    y[21] += 15 * size
    y[22] += 9 * size

# the coefficients array has the for of
# coeff = [cA(n-1) cD(n-1) cD(n-2) ... cD(2) cD(1)]
# here cA are the approximation coefficients and
# cD the detail coefficients
coeff = pywt.wavedec(y, wavelet_name)

length = len(coeff)

# create figures for all wavelets and detail levels
fig, ax = plt.subplots(figsize=(8, 6), nrows=length - 1, ncols=3)
# create big figure for raw data
gs = ax[0, 0].get_gridspec()
# remove the underlying axes
for axis in ax[0:, 0]:
    axis.remove()
axbig = fig.add_subplot(gs[0:, 0])
# plot raw data
axbig.plot(x, y, label='raw')
axbig.set_ylabel('Intensity (arb. u.)')
axbig.set_xlabel('')
예제 #34
0
def test_waverec():
    x = [3, 7, 1, 1, -2, 5, 4, 6]
    coeffs = pywt.wavedec(x, 'db1')
    assert_allclose(pywt.waverec(coeffs, 'db1'), x, rtol=1e-12)
#for i in range(len(raw_data)-1):
#    x, y = raw_data[i].split('\n')
#    X = float(x)
#    Y = float(y)
#    index.append(X)
#    data.append(Y)

# Create wavelet object and define parameters
w = pywt.Wavelet('sym4')
maxlev = pywt.dwt_max_level(len(data), w.dec_len)
print("maximum level is " + str(maxlev))
threshold = 0.35  # Threshold for filtering can be computed http://gwyddion.net/documentation/user-guide-en/wavelet-transform.html

# Decompose into wavelet components, to the level selected

coeffs = pywt.wavedec(data, 'sym4', level=maxlev)
plt.figure()
for i in range(1, len(coeffs)):
    plt.subplot(maxlev, 1, i)
    plt.plot(coeffs[i])
    coeffs[i] = pywt.threshold(coeffs[i], threshold * max(coeffs[i]))
    plt.plot(coeffs[i])

datarec = pywt.waverec(coeffs, 'sym4')
indexrec = np.arange(0, len(datarec), 1) * 2

mintime = 9  # Start point in the time scale
maxtime = mintime + 2000  # The added time is in ms

File_Writer(file, indexrec, datarec)
예제 #36
0
def getTransfMatrix(N, transform_type, dec_levels=0, flip_hardcoded=False):
    #
    # Create forward and inverse transform matrices, which allow for perfect
    # reconstruction. The forward transform matrix is normalized so that the
    # l2-norm of each basis element is 1.
    #
    # [Tforward, Tinverse] = getTransfMatrix (N, transform_type, dec_levels)
    #
    #  INPUTS:
    #
    #   N               --> Size of the transform (for wavelets, must be 2^K)
    #
    #   transform_type  --> 'dct', 'dest', 'hadamard', or anything that is
    #                       listed by 'help wfilters' (bi-orthogonal wavelets)
    #                       'DCrand' -- an orthonormal transform with a DC and all
    #                       the other basis elements of random nature
    #
    #   dec_levels      --> If a wavelet transform is generated, this is the
    #                       desired decomposition level. Must be in the
    #                       range [0, log2(N)-1], where "0" implies
    #                       full decomposition.
    #
    #  OUTPUTS:
    #
    #   Tforward        --> (N x N) Forward transform matrix
    #
    #   Tinverse        --> (N x N) Inverse transform matrix
    #

    if N == 1:
        Tforward = 1
    elif transform_type == 'hadamard':
        Tforward = hadamard(N)
    elif N == 8 and transform_type == 'bior1.5':  # hardcoded transform so that the wavelet toolbox is not needed to generate it
        Tforward = [[
            0.343550200747110, 0.343550200747110, 0.343550200747110,
            0.343550200747110, 0.343550200747110, 0.343550200747110,
            0.343550200747110, 0.343550200747110
        ],
                    [
                        -0.225454819240296, -0.461645582253923,
                        -0.461645582253923, -0.225454819240296,
                        0.225454819240296, 0.461645582253923,
                        0.461645582253923, 0.225454819240296
                    ],
                    [
                        0.569359398342840, 0.402347308162280,
                        -0.402347308162280, -0.569359398342840,
                        -0.083506045090280, 0.083506045090280,
                        -0.083506045090280, 0.083506045090280
                    ],
                    [
                        -0.083506045090280, 0.083506045090280,
                        -0.083506045090280, 0.083506045090280,
                        0.569359398342840, 0.402347308162280,
                        -0.402347308162280, -0.569359398342840
                    ],
                    [0.707106781186550, -0.707106781186550, 0, 0, 0, 0, 0, 0],
                    [0, 0, 0.707106781186550, -0.707106781186550, 0, 0, 0, 0],
                    [0, 0, 0, 0, 0.707106781186550, -0.707106781186550, 0, 0],
                    [0, 0, 0, 0, 0, 0, 0.707106781186550, -0.707106781186550]]
        if flip_hardcoded:
            Tforward = np.array(Tforward).T

    elif N == 8 and transform_type == 'dct':  # hardcoded transform so that the signal processing toolbox is not needed to generate it
        Tforward = [
            [
                0.353553390593274, 0.353553390593274, 0.353553390593274,
                0.353553390593274, 0.353553390593274, 0.353553390593274,
                0.353553390593274, 0.353553390593274
            ],
            [
                0.490392640201615, 0.415734806151273, 0.277785116509801,
                0.097545161008064, -0.097545161008064, -0.277785116509801,
                -0.415734806151273, -0.490392640201615
            ],
            [
                0.461939766255643, 0.191341716182545, -0.191341716182545,
                -0.461939766255643, -0.461939766255643, -0.191341716182545,
                0.191341716182545, 0.461939766255643
            ],
            [
                0.415734806151273, -0.097545161008064, -0.490392640201615,
                -0.277785116509801, 0.277785116509801, 0.490392640201615,
                0.097545161008064, -0.415734806151273
            ],
            [
                0.353553390593274, -0.353553390593274, -0.353553390593274,
                0.353553390593274, 0.353553390593274, -0.353553390593274,
                -0.353553390593274, 0.353553390593274
            ],
            [
                0.277785116509801, -0.490392640201615, 0.097545161008064,
                0.415734806151273, -0.415734806151273, -0.097545161008064,
                0.490392640201615, -0.277785116509801
            ],
            [
                0.191341716182545, -0.461939766255643, 0.461939766255643,
                -0.191341716182545, -0.191341716182545, 0.461939766255643,
                -0.461939766255643, 0.191341716182545
            ],
            [
                0.097545161008064, -0.277785116509801, 0.415734806151273,
                -0.490392640201615, 0.490392640201615, -0.415734806151273,
                0.277785116509801, -0.097545161008064
            ]
        ]
        if flip_hardcoded:
            Tforward = np.array(Tforward).T

    elif N == 11 and transform_type == 'dct':  # hardcoded transform so that the signal processing toolbox is not needed to generate it
        Tforward = [
            [
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764, 0.301511344577764,
                0.301511344577764, 0.301511344577764
            ],
            [
                0.422061280946316, 0.387868386059133, 0.322252701275551,
                0.230530019145232, 0.120131165878581, -8.91292406723889e-18,
                -0.120131165878581, -0.230530019145232, -0.322252701275551,
                -0.387868386059133, -0.422061280946316
            ],
            [
                0.409129178625571, 0.279233555180591, 0.0606832509357945,
                -0.177133556713755, -0.358711711672592, -0.426401432711221,
                -0.358711711672592, -0.177133556713755, 0.0606832509357945,
                0.279233555180591, 0.409129178625571
            ],
            [
                0.387868386059133, 0.120131165878581, -0.230530019145232,
                -0.422061280946316, -0.322252701275551, 1.71076608154014e-17,
                0.322252701275551, 0.422061280946316, 0.230530019145232,
                -0.120131165878581, -0.387868386059133
            ],
            [
                0.358711711672592, -0.0606832509357945, -0.409129178625571,
                -0.279233555180591, 0.177133556713755, 0.426401432711221,
                0.177133556713755, -0.279233555180591, -0.409129178625571,
                -0.0606832509357945, 0.358711711672592
            ],
            [
                0.322252701275551, -0.230530019145232, -0.387868386059133,
                0.120131165878581, 0.422061280946316, -8.13580150049806e-17,
                -0.422061280946316, -0.120131165878581, 0.387868386059133,
                0.230530019145232, -0.322252701275551
            ],
            [
                0.279233555180591, -0.358711711672592, -0.177133556713755,
                0.409129178625571, 0.0606832509357945, -0.426401432711221,
                0.0606832509357944, 0.409129178625571, -0.177133556713755,
                -0.358711711672592, 0.279233555180591
            ],
            [
                0.230530019145232, -0.422061280946316, 0.120131165878581,
                0.322252701275551, -0.387868386059133, -2.87274927630557e-18,
                0.387868386059133, -0.322252701275551, -0.120131165878581,
                0.422061280946316, -0.230530019145232
            ],
            [
                0.177133556713755, -0.409129178625571, 0.358711711672592,
                -0.0606832509357945, -0.279233555180591, 0.426401432711221,
                -0.279233555180591, -0.0606832509357944, 0.358711711672592,
                -0.409129178625571, 0.177133556713755
            ],
            [
                0.120131165878581, -0.322252701275551, 0.422061280946316,
                -0.387868386059133, 0.230530019145232, 2.03395037512452e-17,
                -0.230530019145232, 0.387868386059133, -0.422061280946316,
                0.322252701275551, -0.120131165878581
            ],
            [
                0.0606832509357945, -0.177133556713755, 0.279233555180591,
                -0.358711711672592, 0.409129178625571, -0.426401432711221,
                0.409129178625571, -0.358711711672592, 0.279233555180591,
                -0.177133556713755, 0.0606832509357945
            ]
        ]
        if flip_hardcoded:
            Tforward = np.array(Tforward).T

    elif N == 8 and transform_type == 'dest':  # hardcoded transform so that the PDE toolbox is not needed to generate it
        Tforward = [
            [
                0.161229841765317, 0.303012985114696, 0.408248290463863,
                0.464242826880013, 0.464242826880013, 0.408248290463863,
                0.303012985114696, 0.161229841765317
            ],
            [
                0.303012985114696, 0.464242826880013, 0.408248290463863,
                0.161229841765317, -0.161229841765317, -0.408248290463863,
                -0.464242826880013, -0.303012985114696
            ],
            [
                0.408248290463863, 0.408248290463863, 0, -0.408248290463863,
                -0.408248290463863, 0, 0.408248290463863, 0.408248290463863
            ],
            [
                0.464242826880013, 0.161229841765317, -0.408248290463863,
                -0.303012985114696, 0.303012985114696, 0.408248290463863,
                -0.161229841765317, -0.464242826880013
            ],
            [
                0.464242826880013, -0.161229841765317, -0.408248290463863,
                0.303012985114696, 0.303012985114696, -0.408248290463863,
                -0.161229841765317, 0.464242826880013
            ],
            [
                0.408248290463863, -0.408248290463863, 0, 0.408248290463863,
                -0.408248290463863, 0, 0.408248290463863, -0.408248290463863
            ],
            [
                0.303012985114696, -0.464242826880013, 0.408248290463863,
                -0.161229841765317, -0.161229841765317, 0.408248290463863,
                -0.464242826880013, 0.303012985114696
            ],
            [
                0.161229841765317, -0.303012985114696, 0.408248290463863,
                -0.464242826880013, 0.464242826880013, -0.408248290463863,
                0.303012985114696, -0.161229841765317
            ]
        ]
        if flip_hardcoded:
            Tforward = np.array(Tforward).T

    elif transform_type == 'dct':
        Tforward = dct(np.eye(N), norm='ortho')
    elif transform_type == 'eye':
        Tforward = np.eye(N)
    elif transform_type == 'dest':
        Tforward = dest(np.eye(N), norm='ortho')
    elif transform_type == 'DCrand':
        x = np.random.normal(N)
        x[:, 0] = np.ones(len(x[:, 0]))
        Q, _, _ = np.linalg.qr(x)
        if Q[0] < 0:
            Q = -Q

        Tforward = Q.T

    else:  ## a wavelet decomposition supported by 'wavedec'
        ### Set periodic boundary conditions, to preserve bi-orthogonality
        Tforward = np.zeros((N, N))
        for i in range(0, N):
            Tforward[:, i] = pywt.wavedec(
                np.roll([1, np.zeros(1, N - 1)], [dec_levels, i - 1]),
                level=np.log2(N),
                wavelet=transform_type,
                mode='periodical')  ## construct transform matrix

    Tforward = np.array(Tforward)
    ### Normalize the basis elements
    if not ((N == 8) and transform_type == 'bior1.5'):
        try:
            Tforward = (
                Tforward.T @ np.diag(np.sqrt(1. / sum(Tforward**2, 0)))).T
        except TypeError:  # Tforward was not an array...
            pass

    ### Compute the inverse transform matrix
    try:
        Tinverse = np.linalg.inv(Tforward)
    except:
        Tinverse = np.array(1)

    return Tforward, Tinverse
예제 #37
0
                        stim_epochs_data[e][c], decim)
                    #13
                    pk_to_pk_slope = AT_FeatureExtraction.pk_to_pk_slope(
                        stim_epochs_data[e][c], decim)
                    #14
                    zero_cross = AT_FeatureExtraction.zero_cross(
                        stim_epochs_data[e][c])
                    #15
                    zero_cross_density = AT_FeatureExtraction.zero_cross_density(
                        stim_epochs_data[e][c], decim)
                    #16
                    slope_sign_alt = AT_FeatureExtraction.slope_sign_alt(
                        stim_epochs_data[e][c])
                    #17-43
                    cA3, cD3, cD2, cD1 = pywt.wavedec(stim_epochs_data[e][c],
                                                      'db1',
                                                      level=3)

                    #assigning the features to a feature vector
                    X[b][r][s][e][c][0:43] = np.concatenate(
                        ([latency], [amplitude], [lat_amp_ratio], [abs_amp],
                         [abs_lat_amp_ratio], [positive_area], [negative_area],
                         [total_area], [abs_total_area], [total_abs_area],
                         [avg_abs_slope], [peak_to_peak], [pk_to_pk_tw],
                         [pk_to_pk_slope], [zero_cross], [zero_cross_density],
                         [slope_sign_alt], cA3))
                    #keeping a version of multi-dimensional X before reshaping it
                    X_multi_D = X

#reshaping X into a 2D array: n_data and n_features
X = np.reshape(
예제 #38
0
import pywt
import math as mt
import numpy as np
import SignalModel
import matplotlib.pyplot as plt

t1 = SignalModel.Signal(1000)
s1 = t1.createSpikeMultiNoise([2, 4, 6])
wp = pywt.WaveletPacket(data=s1, wavelet='db3')
coeffs = pywt.wavedec(data=s1, wavelet='db3', level=1)
a = coeffs[0]
d = coeffs[1]
aa = pywt.wavedec(data=a, wavelet='db3', level=1)[0]
ad = pywt.wavedec(data=a, wavelet='db3', level=1)[1]
da = pywt.wavedec(data=d, wavelet='db3', level=1)[0]
dd = pywt.wavedec(data=d, wavelet='db3', level=1)[1]
print(dd == wp['dd'].data)

sa = pywt.swt(data=s1, wavelet='db3', level=1, trim_approx=True)[0]
sd = pywt.swt(data=s1, wavelet='db3', level=1, trim_approx=True)[1]
sda = pywt.swt(data=sd, wavelet='db3', level=1, trim_approx=True)[0]

plt.figure()
plt.subplot(2, 1, 1)
plt.plot([i for i in range(len(da))], da)
plt.subplot(2, 1, 2)
plt.plot([i for i in range(len(sda))], sda)
plt.show()
예제 #39
0
# plt.axis([0, 1000, 0, 100])
plt.show()
##############画图############################################

# print(data.shape)
# print(data)
# print(data['RH_6'])
##################去噪#########################
db1 = pywt.Wavelet('db1')
# [ca3, cd3, cd2, cd1] = pywt.wavedec(x, db1)
# print(ca3)
# print(cd3)
# print(cd2)
# print(cd1)
# 分解为三层
coeffs = pywt.wavedec(y_values, db1, level=3)
print("------------------len of coeffs---------------------")
print(len(coeffs))
# print(coeffs)
recoeffs = pywt.waverec(coeffs, db1)
# print(recoeffs)

thcoeffs = []
for i in range(1, len(coeffs)):
    tmp = coeffs[i].copy()
    Sum = 0.0
    for j in coeffs[i]:
        Sum = Sum + abs(j)
    N = len(coeffs[i])
    Sum = (1.0 / float(N)) * Sum
    sigma = (1.0 / 0.6745) * Sum
예제 #40
0
start_freq = 1  # Hz
end_freq = 1  # Hz
if (start_freq != end_freq):
    freq0 = arange(start_freq, end_freq, (end_freq - start_freq) / (n * 1.0))
else:
    freq0 = start_freq

factor0 = samp_rate / freq0
time = arange(n) / float(samp_rate)
wave0 = sin(2 * pi * freq0 * time)

# errors = [random() - 0.5 for _ in range(n)]
# wave0 += errors

cA, cD = dwt(wave0, 'db2')
coeffs = wavedec(wave0, 'db2', level=levels)

i = 0
print len(coeffs)
nyquist = samp_rate / 2.

freqs = zeros(levels)
powers = zeros(levels)
freq_widths = zeros(levels)

for a in coeffs:
    if (i <> 0):
        print i, len(a), mean(
            abs(a)), std(a), nyquist / 2.**(levels - i +
                                            1), nyquist / 2.**(levels - i)
        # I don't know why the 2 to 1 ratio works better...
import numpy as np
import statistics
import pywt
import csv
from scipy.stats import kurtosis

# Reading file and saving it in a variable, suppose x;
x = np.array([])
filename = '/User/'
with open(filename) as csvfile:
    f_read = csv.reader(csvfile, delimiter = ',')
    for row in f_read:
        x = np.append(x, [float(j) for j in row])

[a, d1, d2, d3, d4] = pywt.wavedec(x, 'haar', level=4)

def comp_moment(feature):
    step = int(len(feature)/2)
    avg_temp = np.zeros([2])
    stn_dev_temp = np.zeros([2])
    kurto_temp = np.zeros([2])
    for i in range(int(len(feature)/step)):
        avg_temp[i] = np.mean(feature[step*i:step*(i+1)])
        stn_dev_temp[i] = statistics.stdev(feature[step*i:step*(i+1)])
        kurto_temp[i] = kurtosis(feature[step*i:step*(i+1)])
        return (avg_temp, stn_dev_temp, kurto_temp)


    #Approximation coeficient
    avg_temp, stn_dev_temp, kurto_temp = comp_moment(a)
    avg = avg_temp
예제 #42
0
    return np.sum(x * x) / len(x)


# Stuff for the axis
sample = np.arange(0, number_of_samples, 1)
fig, axs = plt.subplots(number_of_samples // skip, 1, sharex=True)
for i in range(0, number_of_samples, skip):
    axs[i // skip].set_ylim(-number_of_samples, number_of_samples)
    axs[i // skip].grid(True)
axs[number_of_samples // skip - 1].set_xlabel('sample')
axs[number_of_samples // skip // 2].set_ylabel('amplitude')

print("Coefficient\t   Energy")

zeros = np.zeros(number_of_samples)
coeffs = wt.wavedec(zeros, wavelet=wavelet, level=levels, mode=padding)
arr, coeff_slices = wt.coeffs_to_array(coeffs)
for i in range(0, number_of_samples, skip):

    # The basis functions are created by computing the inverse DWt of
    # an DWT spectrum where all coefficients are zero except one of
    # them. Depending on the position of such coefficient, a different
    # basis function is obtained.

    arr[i] = number_of_samples  # i is the coeff different from 0
    coeffs_from_arr = wt.array_to_coeffs(arr,
                                         coeff_slices,
                                         output_format="wavedec")
    samples = wt.waverec(coeffs_from_arr, wavelet=wavelet, mode=padding)
    arr[i] = 0
    print("       %4d" % i, "\t", "%8.2f" % energy(samples))
예제 #43
0
def _dwt_approx_rec(array, level, wavelet, mode, axis):
    """
    Approximate reconstruction of a signal/image. Uses the multi-level discrete wavelet
    transform to decompose a signal or an image, and reconstruct it using approximate
    coefficients only.

    Parameters
    ----------
    array : array_like
        Array to be decomposed. Currently, only 1D and 2D arrays are supported.
        Only even-lengths signals long the axis.
    level : int or None
        Decomposition level. A higher level will result in a coarser approximation of
        the input array. If the level is higher than the maximum possible decomposition level,
        the maximum level is used.
        If None, the maximum possible decomposition level is used.
    wavelet : str or Wavelet object
        Can be any argument accepted by PyWavelet.Wavelet, e.g. 'db10'
    mode : str, optional
        Signal extension mode, see pywt.Modes.
    axis : int, optional
        Axis over which to compute the transform. Default is -1.

    Returns
    -------
    reconstructed : `~numpy.ndarray`
            Approximated reconstruction of the input array.

    Raises
    ------
    ValueError : If input array has dimension > 2
    """
    if isinstance(axis, Iterable):
        axis = axis[0]

    array = np.asarray(array, dtype=float)
    if array.shape[axis] % 2 != 0:
        raise ValueError("Only even-length signals are supported")

        # Build Wavelet object
    if isinstance(wavelet, str):
        wavelet = pywt.Wavelet(wavelet)

        # Check maximum decomposition level
        # For 2D array, check the condition with shortest dimension min(array.shape). This is how
        # it is done in PyWavelet.wavedec2.
    max_level = pywt.dwt_max_level(data_len=array.shape[axis],
                                   filter_len=wavelet.dec_len)
    if level is None:
        level = max_level
    elif max_level < level:
        warn(
            f"Decomposition level {level} higher than maximum {max_level}. Maximum is used."
        )
        level = max_level

        # By now, we are sure that the decomposition level will be supported.
        # Decompose the signal using the multilevel discrete wavelet transform
    coeffs = pywt.wavedec(data=array,
                          wavelet=wavelet,
                          level=level,
                          mode=mode,
                          axis=axis)
    app_coeffs, det_coeffs = coeffs[0], coeffs[1:]

    # Replace detail coefficients by 0; keep the correct length so that the
    # reconstructed signal has the same size as the (possibly upsampled) signal
    # The structure of coefficients depends on the dimensionality
    # Reconstruct signal
    reconstructed = pywt.waverec(
        [app_coeffs] + [None] * len(det_coeffs),
        wavelet=wavelet,
        mode="constant",
        axis=axis,
    )

    # Sometimes pywt.waverec returns a signal that is longer than the original signal
    while reconstructed.shape[axis] > array.shape[axis]:
        reconstructed = np.swapaxes(
            np.swapaxes(reconstructed, 0, axis)[:array.shape[axis]], 0, axis)
    return reconstructed
예제 #44
0
def main():
    #import amitgroup.io.mnist
    #images, _ = read('training', '/local/mnist', [9])
    #shifted = np.zeros(images[0].shape)
    #shifted[:-3,:] = images[0,3:,:]

    im1, im2 = np.zeros((32, 32)), np.zeros((32, 32))

    wl_name = "db2"
    levels = len(pywt.wavedec(range(32), wl_name)) - 1
    scriptNs = map(len, pywt.wavedec(range(32), wl_name, level=levels))

    if 0:
        images = np.load("data/nines.npz")['images']
        im1[:28, :28] = images[0]
        im2[:28, :28] = images[2]
    else:
        im1 = ag.io.load_image('data/Images_0', 45)
        im2 = ag.io.load_image('data/Images_1', 23)

    im1 = im1[::-1, :]
    im2 = im2[::-1, :]

    A = 2

    if 1:
        # Blur images
        im1b = im1  #ag.math.blur_image(im1, 2)
        im2b = im2  #ag.math.blur_image(im2, 2)

        show_costs = True

        imgdef, info = ag.ml.imagedef(im1b,
                                      im2b,
                                      rho=3.0,
                                      calc_costs=show_costs)

        print info['iterations_per_level']

        if PLOT and show_costs:
            logpriors = -info['logpriors']
            loglikelihoods = -info['loglikelihoods']
            np.savez('logs',
                     logpriors=logpriors,
                     loglikelihoods=loglikelihoods)

            plotfunc = plt.semilogy
            plt.figure(figsize=(8, 12))
            plt.subplot(211)
            costs = logpriors + loglikelihoods
            plotfunc(costs, label="J")
            plotfunc(loglikelihoods, label="log likelihood")
            plt.legend()
            plt.subplot(212)
            plotfunc(logpriors, label="log prior")
            plt.legend()
            plt.show()

        im3 = imgdef.deform(im1)

        if PLOT:
            d = dict(origin='lower', interpolation='nearest', cmap=plt.cm.gray)

            plt.figure(figsize=(9, 9))
            plt.subplot(221)
            plt.title("Prototype")
            plt.imshow(im1, **d)
            plt.subplot(222)
            plt.title("Original")
            plt.imshow(im2, **d)
            plt.subplot(223)
            plt.title("Deformed")
            plt.imshow(im3, **d)

            plt.subplot(224)
            if 0:
                plt.title("Deformed")
                plt.imshow(im2 - im3, **d)
                plt.colorbar()
            else:
                plt.title("Deformation map")
                x, y = imgdef.get_x(im1.shape)
                Ux, Uy = imgdef.deform_map(x, y)
                plt.quiver(y, x, Uy, Ux)
            plt.show()

    elif 1:
        plt.figure(figsize=(14, 6))
        plt.subplot(121)
        plt.title("F")
        plt.imshow(im1, origin='lower')
        plt.subplot(122)
        plt.title("I")
        plt.imshow(im2, origin='lower')
        plt.show()

    import pickle
    if TEST:
        im3correct = pickle.load(open('im3.p', 'rb'))
        passed = (im3 == im3correct).all()
        print "PASSED:", ['NO', 'YES'][passed]
        print((im3 - im3correct)**2).sum()
    else:
        pickle.dump(im3, open('im3.p', 'wb'))
예제 #45
0
def extract_wavelet_features(channel):
    # extract wavelet coeff features
    coeffs = pywt.wavedec(channel, 'db1', level=3)
    return coeffs[0] + [cal_entropy(coeffs[0])]
예제 #46
0
yspper10y.index.to_pydatetime()

# use matplotlib.dates's date2num
date2num( yspper10y.index.to_pydatetime() )

# get the values as a numpy array with values, after choosing which column by a Python dictionary manner
yspper10y['Value'].values

################################################################# 
## fun with wavelets
################################################################# 

yspper10ydwted = pywt.dwt( yspper10y['Value'].values, "haar", mode="cpd")

# try different levels
yspper10dwtedcoeffs = pywt.wavedec(yspper10y['Value'].values, 'haar', level=3)

# try maximum wavelent decomposition
yspper10dwtedcoeffs = pywt.wavedec(yspper10y['Value'].values, 'haar')



################################################################# 
## fun with lag_plots
################################################################# 

lag_plot(yspper10y)
plt.title("Lag plot of 1-year lag of "+yaleds[12].name, fontsize=12)

################################################################################ 
##### Is the U.S. stock market overvalued?
예제 #47
0
def GetWaveletHurst(historyPriceMatrix):
    dataList = []
    rowsCount = len(historyPriceMatrix)
    for rowIndex in range(0, rowsCount):
        for col in historyPriceMatrix[rowIndex]:
            dataList.append(col)
    print("dataList", dataList)
    coeffs = pywt.wavedec(dataList, 'db8', level=15)
    cA2, cD15, cD14, cD13, cD12, cD11, cD10, cD9, cD8, cD7, cD6, cD5, cD4, cD3, cD2, cD1 = coeffs
    n15 = len(cD15)
    n14 = len(cD14)
    n13 = len(cD13)
    n12 = len(cD12)
    n11 = len(cD11)
    n10 = len(cD10)
    n9 = len(cD9)
    n8 = len(cD8)
    n7 = len(cD7)
    n6 = len(cD6)
    n5 = len(cD5)
    n4 = len(cD4)
    n3 = len(cD3)
    n2 = len(cD2)
    n1 = len(cD1)

    print("cD6:", cD6)
    print("cD6 rows", cD6.shape[0])
    print("cD5:", cD5)
    print("cD5 rows", cD5.shape[0])
    print("cD4:", cD4)
    print("cD4 rows", cD4.shape[0])
    print("cD3:", cD3)
    print("cD3 rows", cD3.shape[0])
    print("cD2:", cD2)
    print("cD2 rows", cD2.shape[0])
    print("cD1:", cD1)
    print("cD1 rows", cD1.shape[0])
    print("np.dot(cD6, cD6.T) ", np.dot(cD6.T, cD6))
    print("np.dot(cD5, cD5.T) ", np.dot(cD5.T, cD5))
    print("np.dot(cD4, cD4.T) ", np.dot(cD4.T, cD4))
    print("np.dot(cD3, cD3.T) ", np.dot(cD3.T, cD3))
    print("np.dot(cD2, cD2.T) ", np.dot(cD2.T, cD2))
    print("np.dot(cD1, cD1.T) ", np.dot(cD1.T, cD1))
    f15 = math.log(np.dot(cD15, cD15.T) / n15) / math.log(2)
    f14 = math.log(np.dot(cD14, cD14.T) / n14) / math.log(2)
    f13 = math.log(np.dot(cD13, cD13.T) / n13) / math.log(2)
    f12 = math.log(np.dot(cD12, cD12.T) / n12) / math.log(2)
    f11 = math.log(np.dot(cD11, cD11.T) / n11) / math.log(2)
    f10 = math.log(np.dot(cD10, cD10.T) / n10) / math.log(2)
    f9 = math.log(np.dot(cD9, cD9.T) / n9) / math.log(2)
    f8 = math.log(np.dot(cD8, cD8.T) / n8) / math.log(2)
    f7 = math.log(np.dot(cD7, cD7.T) / n7) / math.log(2)
    f6 = math.log(np.dot(cD6, cD6.T) / n6) / math.log(2)
    f5 = math.log(np.dot(cD5, cD5.T) / n5) / math.log(2)
    f4 = math.log(np.dot(cD4, cD4.T) / n4) / math.log(2)
    f3 = math.log(np.dot(cD3, cD3.T) / n3) / math.log(2)
    f2 = math.log(np.dot(cD2, cD2.T) / n2) / math.log(2)
    f1 = math.log(np.dot(cD1, cD1.T) / n1) / math.log(2)
    t = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
    f = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15]
    cof = np.polyfit(t, f, 1)
    fit = np.polyval(cof, t)
    pl.plot(t, f, 'o')
    pl.plot(t, fit, '-')
    pl.show()

    L = (fit[15] - fit[0]) / 14
    H = (L + 1) / 2
    return H
예제 #48
0
def extractSpectralFeatures(dataWindow,
                            numDomCoeffs=2,
                            numDomFreqs=2,
                            sampleT=0.012,
                            wavelet='haar',
                            coeffNormFact=1,
                            freqNormFact=1,
                            selectSensors=None):
    if isinstance(selectSensors, (list, tuple, np.ndarray)):
        featureVector = []
        numOfSensors = len(selectSensors)
        ## wavelet features:
        LEVEL = 0
        WAVEMODE = 'symmetric'  # zero padding
        #THRESMODE = 'hard'  # soft thresholdin
        windowNumberOfPoints = np.size(dataWindow[::, 0])
        #windowCoeffs = []
        #waveletDenoised = []
        dominantCoeffsVal = []
        dominantCoeffsAmp = []
        signalPower = []
        for i in range(numOfSensors):
            signalPower.append(
                np.sum(np.abs(dataWindow[::, int(selectSensors[i])])**2))
            coeffs = pywt.wavedec(dataWindow[::, int(selectSensors[i])],
                                  wavelet=wavelet,
                                  mode=WAVEMODE,
                                  level=LEVEL)
            coeffs0 = coeffs[0]
            translationAxis = np.linspace(0, 1, np.size(coeffs0))
            dominantCoeffsAmp.append(
                coeffs0[coeffs0.argsort()[-numDomCoeffs:]])
            if (np.max(coeffs0) == 0):
                dominantCoeffsVal.append(np.zeros(numDomCoeffs))
            else:
                dominantCoeffsVal.append(
                    translationAxis[coeffs0.argsort()[-numDomCoeffs:]])
        for i in range(numDomCoeffs):
            for j in range(numOfSensors):
                featureVector.append(
                    np.round([(np.transpose(dominantCoeffsVal)[i, j],
                               np.transpose(dominantCoeffsAmp)[i, j])], 5))
        ## fourier features:
        freqAxis = np.linspace(-(2 / sampleT), 2 / sampleT,
                               windowNumberOfPoints)
        freqAxis = freqAxis[int(
            windowNumberOfPoints / 2
        ):] * sampleT / 2  # from zero to nyquist frequency but normalized to 1
        dominantFreqVal = []
        dominantFreqAmpRe = []
        dominantFreqAmpIm = []
        for i in range(numOfSensors):
            spectrum = np.fft.fftshift(
                np.fft.fft(dataWindow[::, int(selectSensors[i])]))
            spectrum = spectrum[int(windowNumberOfPoints /
                                    2):] * freqNormFact / signalPower[
                                        i] / windowNumberOfPoints
            dominantFreqAmpRe.append(
                np.real(spectrum[spectrum.argsort()[-numDomFreqs:]])
            )  # -> get real part of amplitude of largest frequencies
            dominantFreqAmpIm.append(
                np.imag(spectrum[spectrum.argsort()[-numDomFreqs:]])
            )  # -> get imaginary part of amplitude of largest frequencies
            dominantFreqVal.append(freqAxis[spectrum.argsort()[-numDomFreqs:]]
                                   )  # -> get value of largest frequencies
        for i in range(numDomFreqs):
            for j in range(numOfSensors):
                featureVector.append(
                    np.round([(np.transpose(dominantFreqVal)[i, j],
                               np.transpose(dominantFreqAmpRe)[i, j],
                               np.transpose(dominantFreqAmpIm)[i, j])], 5))
        featureVector = np.reshape(featureVector, np.size(featureVector))
    else:
        featureVector = []
        numOfSensors = np.size(dataWindow[0, ::])
        ## wavelet features:
        LEVEL = 0
        WAVEMODE = 'symmetric'  # zero padding
        THRESMODE = 'hard'  # soft thresholdin
        windowNumberOfPoints = np.size(dataWindow[::, 0])
        windowCoeffs = []
        #waveletDenoised = []
        dominantCoeffsVal = []
        dominantCoeffsAmp = []
        signalPower = []
        for i in range(numOfSensors):
            signalPower.append(np.sum(np.abs(dataWindow[::, i])**2))
            coeffs = pywt.wavedec(dataWindow[::, i],
                                  wavelet=wavelet,
                                  mode=WAVEMODE,
                                  level=LEVEL)
            for j in range(LEVEL):
                coeffs[j] = coeffs[
                    j]  #*coeffNormFact/signalPower[i]/windowNumberOfPoints
                thresh = mad(coeffs[j]) * np.sqrt(
                    2 * np.log(len(dataWindow[::, i])))
                coeffs[j] = pywt.threshold(coeffs[j],
                                           value=thresh,
                                           mode=THRESMODE)
            #waveletDenoised.append(pywt.waverec(coeffs, wavelet=WAVELET, mode=WAVEMODE))
            windowCoeffs.append(coeffs[:LEVEL])  # omit last level
            coeffs0 = np.abs(coeffs[0])
            translationAxis = np.linspace(0, 1, np.size(coeffs0))
            dominantCoeffsAmp.append(
                coeffs0[coeffs0.argsort()[-numDomCoeffs:]])
            if (np.max(coeffs0) == 0):
                dominantCoeffsVal.append(np.zeros(numDomCoeffs))
            else:
                dominantCoeffsVal.append(
                    translationAxis[coeffs0.argsort()[-numDomCoeffs:]])
        for i in range(numDomCoeffs):
            for j in range(numOfSensors):
                featureVector.append(
                    np.round([(np.transpose(dominantCoeffsVal)[i, j],
                               np.transpose(dominantCoeffsAmp)[i, j])], 5))
        ## fourier features:
        freqAxis = np.linspace(-(2 / sampleT), 2 / sampleT,
                               windowNumberOfPoints)
        freqAxis = freqAxis[int(
            windowNumberOfPoints / 2
        ):] * sampleT / 2  # from zero to nyquist frequency but normalized to 1
        #windowSpectrum = []
        dominantFreqVal = []
        dominantFreqAmp = []
        for i in range(numOfSensors):
            spectrum = np.abs(np.fft.fftshift(np.fft.fft(dataWindow[::, i])))
            spectrum = spectrum[
                int(windowNumberOfPoints /
                    2):]  #*freqNormFact/signalPower[i]/windowNumberOfPoints
            #windowSpectrum.append(spectrum)
            dominantFreqAmp.append(
                np.real(spectrum[spectrum.argsort()[-numDomFreqs:]])
            )  # -> get amplitude of real part of largest frequencies
            dominantFreqVal.append(freqAxis[spectrum.argsort()[-numDomFreqs:]]
                                   )  # -> get value of largest frequencies
        for i in range(numDomFreqs):
            for j in range(numOfSensors):
                featureVector.append(
                    np.round([(np.transpose(dominantFreqVal)[i, j],
                               np.transpose(dominantFreqAmp)[i, j])], 5))
        featureVector = np.array(featureVector)
        featureVector = np.reshape(featureVector, np.size(featureVector))
    return featureVector
예제 #49
0
plt.grid()


print(epochs)
#mathed filtration
ycn=yc/10
ycn=np.where(np.abs(ycn)!=1,ycn,0)
yc1=10*ycn
ydc=np.real(scipy.fft.fftshift(scipy.fft.ifft(scipy.fft.fft(yc1)/scipy.fft.fft(yg))))

#input wavelet decompasition
in1=ydc

#wavelet decompasition using dwt
CA11,CD11,CD10,CD9,CD8,CD7,CD6,CD5,CD4,CD3,CD2,CD1=pywt.wavedec(ydc,wavelet=pywt.Wavelet('db2'),level=11)

#predict 10 level dwt decompasition coefficient detalisation
CD10P=[]
for i in range(1):
    t=np.linspace(0,len(CD10[i,:])/fs,len(ydc))
    ca11=np.resize(CD10,CD10.shape[0])
    x_train=np.column_stack((ca11,t,t))
    x_train=np.asarray(x_train)
   

    encoding_dim = 3


    input = Input(shape=(3,))
    encoded = Dense(encoding_dim, activation='relu')(input)
예제 #50
0
    def one_row(arr_pic, arr_pic2, label):
        global a
        a = a + 1
        arr_add = arr_pic.loc[::]  #通过行标签索引行数据

        arr_add2 = arr_pic2.loc[::]

        listall = np.mat(arr_pic)

        listall2 = np.mat(arr_pic2)

        result_list = []  #新建结果列

        for i in range(params['wave_layer1'] + 1):

            data_ave = np.mean(listall[:, :])
            list_paraa = [data_ave]
            result_list.extend(list_paraa)  #

            data_std = np.std(listall[:, :])  #标准差
            list_para2 = [data_std]
            result_list.extend(list_para2)  #

            time_var = np.var(listall[:, :])
            list_para6 = [time_var]
            result_list.extend(list_para6)  #

            time_amp = np.abs(listall[:, :]).mean()
            list_para7 = [time_amp]
            result_list.extend(list_para7)  #

            time_smr = np.square(
                np.sqrt(np.abs(listall[:, :]).astype(np.float64)).mean())
            list_para8 = [time_smr]
            result_list.extend(list_para8)  #

            time_rms = np.sqrt(
                np.square(listall[:, :]).mean().astype(np.float64))
            list_para81 = [time_rms]
            result_list.extend(list_para81)  #

            time_iqr = np.percentile(listall[:, :], 75) - np.percentile(
                listall[:, :], 25)
            list_para84 = [time_iqr]
            result_list.extend(list_para84)  #

            time_pr = np.percentile(listall[:, :], 90) - np.percentile(
                listall[:, :], 10)
            list_para85 = [time_pr]
            result_list.extend(list_para85)  #最好加上                   #

            time_max = listall[:, :].max()
            list_para87 = [time_max]
            result_list.extend(list_para87)

            time_min = listall[:, :].min()
            list_para88 = [time_min]
            result_list.extend(list_para88)  #

            data_avep1 = np.mean(listall[:, :]) + 1
            list_paraap1 = [data_avep1]
            result_list.extend(list_paraap1)  #     #

            if time_amp == 0:
                time_wavefactor = 0
                time_pulse = 0
            else:
                time_wavefactor = time_rms / time_amp
                time_pulse = time_max / time_amp

            list_para89 = [time_wavefactor]  #擅长判3         #
            result_list.extend(list_para89)  #

            for i in arr_add.columns:
                df_fftline = fftpack.fft(arr_add[i])
                freq_fftline = fftpack.fftfreq(len(arr_add[i]), 1 / 12000)
                df_fftline = abs(df_fftline[freq_fftline >= 0])
                freq_fftline = freq_fftline[freq_fftline >= 0]
                #基本特征,依次为均值,标准差,最大值,最小值,均方根,中位数,四分位差,百分位差
                freq_mean = df_fftline.mean()
                freq_std = df_fftline.std()
                freq_max = df_fftline.max()
                freq_min = df_fftline.min()
                freq_rms = np.sqrt(np.square(df_fftline).mean())
                freq_median = np.median(df_fftline)
                freq_iqr = np.percentile(df_fftline, 75) - np.percentile(
                    df_fftline, 25)
                freq_pr = np.percentile(df_fftline, 90) - np.percentile(
                    df_fftline, 10)
                #f2 f3 f4反映频谱集中程度
                freq_f2 = np.square(
                    (df_fftline - freq_mean)).sum() / (len(df_fftline) - 1)
                freq_f3 = pow((df_fftline - freq_mean),
                              3).sum() / (len(df_fftline) * pow(freq_f2, 1.5))
                freq_f4 = pow((df_fftline - freq_mean),
                              4).sum() / (len(df_fftline) * pow(freq_f2, 2))
                #f5 f6 f7 f8反映主频带位置
                freq_f5 = np.multiply(freq_fftline,
                                      df_fftline).sum() / df_fftline.sum()
                freq_f6 = np.sqrt(
                    np.multiply(np.square(freq_fftline),
                                df_fftline).sum()) / df_fftline.sum()
                freq_f7 = np.sqrt(
                    np.multiply(pow(freq_fftline, 4),
                                df_fftline).sum()) / np.multiply(
                                    np.square(freq_fftline), df_fftline).sum()
                freq_f8 = np.multiply(
                    np.square(freq_fftline), df_fftline).sum() / np.sqrt(
                        np.multiply(pow(freq_fftline, 4), df_fftline).sum() *
                        df_fftline.sum())
                #----------  timefreq-domain feature,12
                #5级小波变换,最后输出6个能量特征和其归一化能量特征
                cA5, cD5, cD4, cD3, cD2, cD1 = wavedec(arr_add[i],
                                                       'db10',
                                                       level=5)
                ener_cA5 = np.square(cA5).sum()
                ener_cD5 = np.square(cD5).sum()
                ener_cD4 = np.square(cD4).sum()
                ener_cD3 = np.square(cD3).sum()
                ener_cD2 = np.square(cD2).sum()
                ener_cD1 = np.square(cD1).sum()
                ener = ener_cA5 + ener_cD1 + ener_cD2 + ener_cD3 + ener_cD4 + ener_cD5

                ratio_cA5 = ener_cA5 / ener
                ratio_cD5 = ener_cD5 / ener
                ratio_cD4 = ener_cD4 / ener
                ratio_cD3 = ener_cD3 / ener
                ratio_cD2 = ener_cD2 / ener
                ratio_cD1 = ener_cD1 / ener
                result_list.extend([
                    freq_mean, freq_std, freq_max, freq_min, freq_rms,
                    freq_median, freq_iqr, freq_pr, freq_f2, freq_f3, freq_f4,
                    freq_f5, freq_f6, freq_f7, freq_f8, ener_cA5, ener_cD5,
                    ener_cD4, ener_cD3, ener_cD2, ener_cD1, ratio_cA5,
                    ratio_cD5, ratio_cD4, ratio_cD3, ratio_cD2, ratio_cD1
                ])
            #,,freq_median,大猛料
            #罪魁祸首freq_meanfreq_std,ener_cD5,ener_cD4,ener_cD3,ener_cD2,ener_cD1,freq_iqrfreq_f6,freq_f5,freq_f2,freq_max,freq_min,freq_rms
            #freq_f4,,freq_f3,ratio_cA5

            data_std2 = np.std(listall2[:, :])  #标准差  无功无过
            list_para11 = [data_std2]
            result_list.extend(list_para11)

            data_energy2 = np.sum(np.abs(listall2[:, :]))  #能量 擅长判2
            list_para12 = [data_energy2]
            result_list.extend(list_para12)

            time_kurtosis = stats.kurtosis(listall2[:, :])
            time_kurtosisex = np.sum(time_kurtosis)
            list_para13 = [time_kurtosisex]
            result_list.extend(list_para13)

            time_var = np.var(listall2[:, :])
            list_para14 = [time_var]
            result_list.extend(list_para14)

            time_amp = np.abs(listall2[:, :]).mean()
            list_para15 = [time_amp]
            result_list.extend(list_para15)

            time_smr = np.square(
                np.sqrt(np.abs(listall2[:, :]).astype(np.float64)).mean())
            list_para16 = [time_smr]
            result_list.extend(list_para16)

            time_rms = np.sqrt(
                np.square(listall2[:, :]).mean().astype(np.float64))
            list_para161 = [time_rms]
            result_list.extend(list_para161)

            time_ptp = np.asarray(listall2[:, :]).ptp()
            list_para162 = [time_ptp]
            result_list.extend(list_para162)

            time_iqr = np.percentile(listall2[:, :], 75) - np.percentile(
                listall2[:, :], 25)
            list_para164 = [time_iqr]
            result_list.extend(list_para164)

            time_pr = np.percentile(listall2[:, :], 90) - np.percentile(
                listall2[:, :], 10)
            list_para165 = [time_pr]
            result_list.extend(list_para165)

            time_skew = stats.skew(listall[:, :])
            time_skewex = np.sum(time_skew)
            list_para166 = [time_skewex]
            result_list.extend(list_para166)

            time_min = listall2[:, :].min()
            list_para168 = [time_min]

            result_list.extend(list_para168)

            if time_amp == 0:
                time_wavefactor = 0
                time_pulse = 0
            else:
                time_wavefactor = time_rms / time_amp
                time_pulse = time_max / time_amp

            list_para169 = [time_wavefactor]
            result_list.extend(list_para169)
            list_para1610 = [time_pulse]
            result_list.extend(list_para1610)
            if time_rms == 0:
                time_peakfactor = 0
            else:
                time_peakfactor = time_max / time_rms
            list_para1611 = [time_peakfactor]
            result_list.extend(list_para1611)

            if time_smr == 0:
                time_margin = 0
            else:
                time_margin = time_max / time_smr
            list_para1612 = [time_margin]
            result_list.extend(list_para1612)
            for i in arr_add2.columns:
                df_fftline = fftpack.fft(arr_add2[i])
                freq_fftline = fftpack.fftfreq(len(arr_add2[i]), 1 / 12000)
                df_fftline = abs(df_fftline[freq_fftline >= 0])
                freq_fftline = freq_fftline[freq_fftline >= 0]
                #基本特征,依次为均值,标准差,最大值,最小值,均方根,中位数,四分位差,百分位差
                freq_mean = df_fftline.mean()
                freq_std = df_fftline.std()
                freq_max = df_fftline.max()
                freq_min = df_fftline.min()
                freq_rms = np.sqrt(np.square(df_fftline).mean())
                freq_median = np.median(df_fftline)
                freq_iqr = np.percentile(df_fftline, 75) - np.percentile(
                    df_fftline, 25)
                freq_pr = np.percentile(df_fftline, 90) - np.percentile(
                    df_fftline, 10)
                #f2 f3 f4反映频谱集中程度
                freq_f2 = np.square(
                    (df_fftline - freq_mean)).sum() / (len(df_fftline) - 1)
                freq_f3 = pow((df_fftline - freq_mean),
                              3).sum() / (len(df_fftline) * pow(freq_f2, 1.5))
                freq_f4 = pow((df_fftline - freq_mean),
                              4).sum() / (len(df_fftline) * pow(freq_f2, 2))
                #f5 f6 f7 f8反映主频带位置
                freq_f5 = np.multiply(freq_fftline,
                                      df_fftline).sum() / df_fftline.sum()
                freq_f6 = np.sqrt(
                    np.multiply(np.square(freq_fftline),
                                df_fftline).sum()) / df_fftline.sum()
                freq_f7 = np.sqrt(
                    np.multiply(pow(freq_fftline, 4),
                                df_fftline).sum()) / np.multiply(
                                    np.square(freq_fftline), df_fftline).sum()
                freq_f8 = np.multiply(
                    np.square(freq_fftline), df_fftline).sum() / np.sqrt(
                        np.multiply(pow(freq_fftline, 4), df_fftline).sum() *
                        df_fftline.sum())
                #----------  timefreq-domain feature,12
                #5级小波变换,最后输出6个能量特征和其归一化能量特征
                cA5, cD5, cD4, cD3, cD2, cD1 = wavedec(arr_add2[i],
                                                       'db10',
                                                       level=5)
                ener_cA5 = np.square(cA5).sum()
                ener_cD5 = np.square(cD5).sum()
                ener_cD4 = np.square(cD4).sum()
                ener_cD3 = np.square(cD3).sum()
                ener_cD2 = np.square(cD2).sum()
                ener_cD1 = np.square(cD1).sum()
                ener = ener_cA5 + ener_cD1 + ener_cD2 + ener_cD3 + ener_cD4 + ener_cD5
                ratio_cA5 = ener_cA5 / ener
                ratio_cD5 = ener_cD5 / ener
                ratio_cD4 = ener_cD4 / ener
                ratio_cD3 = ener_cD3 / ener
                ratio_cD2 = ener_cD2 / ener
                ratio_cD1 = ener_cD1 / ener
                result_list.extend([
                    freq_mean, freq_std, freq_max, freq_min, freq_rms,
                    freq_median, freq_iqr, freq_pr, freq_f2, freq_f3, freq_f4,
                    freq_f5, freq_f6, freq_f7, freq_f8, ener_cA5, ener_cD5,
                    ener_cD4, ener_cD3, ener_cD2, ener_cD1, ratio_cA5,
                    ratio_cD5, ratio_cD4, ratio_cD3, ratio_cD2, ratio_cD1
                ])

                #不能要的,freq_f3,ener_cD5,freq_mean,,freq_f6]freq_max,freq_min,freq_rms,freq_median,freq_iqr,freq_pr,freq_f2,ener_cD4,ener_cD3,ener_cD2,ener_cD1,freq_f4ratio_cA5
                a = 438
                #无功无过
                result_list.extend([label])
        return result_list
예제 #51
0
파일: BuildSeq.py 프로젝트: weanl/myJan
def conScoreSeqs(score, look_back, forecast_step):
    print('--->call--->conScoreSeqs')

    # 1 compute dec_lv
    assert look_back <= maxLookBack
    dec_len = maxLookBack
    dec_lv = int(np.log2(dec_len) - 3)
    print('dec_lv+1 = ', dec_lv + 1)

    # 2 construct x_seqs, y_seqs
    assert score.ndim == 1
    raw_len = len(score)
    assert raw_len >= 2 * dec_len
    drop_len = raw_len - int(
        (raw_len - dec_len) / forecast_step) * forecast_step - dec_len
    if drop_len == 0:
        score = score
    else:
        score = score[:-drop_len]
    ##
    x = score.reshape(-1, 1)[:-forecast_step]
    y = score.reshape(-1, 1)[forecast_step:]
    x_seqs = consSeqx(x, dec_len, forecast_step, testFlag=False)
    y_seqs = consSeqx(y, dec_len, forecast_step, testFlag=False)
    assert x_seqs.shape[0] == y_seqs.shape[0]
    num = x_seqs.shape[0]

    # 3 wavedec
    x_seqs = x_seqs.reshape(num, dec_len)
    y_seqs = y_seqs.reshape(num, dec_len)
    ##
    x_seqs_dec = wavedec(x_seqs, wavelet='db4', level=dec_lv)
    y_seqs_dec = wavedec(y_seqs, wavelet='db4', level=dec_lv)

    # 4 waverec
    x_seqs_subs = [np.zeros_like(x_seqs) for i in range(dec_lv + 1)]
    y_seqs_subs = [np.zeros_like(y_seqs) for i in range(dec_lv + 1)]
    for LV in range(dec_lv + 1):
        ##
        x_seqs_dec_slct = [np.zeros_like(l) for l in x_seqs_dec]
        x_seqs_dec_slct[LV] = x_seqs_dec[LV]
        x_seqs_subs[LV] = waverec(x_seqs_dec_slct, 'db4')
        ##
        y_seqs_dec_slct = [np.zeros_like(l) for l in y_seqs_dec]
        y_seqs_dec_slct[LV] = y_seqs_dec[LV]
        y_seqs_subs[LV] = waverec(y_seqs_dec_slct, 'db4')

    # 4 construct `inputs` and `outputs` for model
    #   according to look_back and forecast_step
    inputs = [l[:, -look_back:].reshape(-1, look_back, 1) for l in x_seqs_subs]
    outputs = [
        l[:, -forecast_step:].reshape(-1, forecast_step, 1)
        for l in y_seqs_subs
    ]
    inputs = np.array(inputs)
    outputs = np.array(outputs)
    print('inputs.shape = ', inputs.shape)
    print('outputs.shape = ', outputs.shape)

    # -1 fetch look_back from x_seqs
    #           forecast_step from y_seqs
    '''
    '''
    x_seqs = x_seqs[:, -look_back:]
    y_seqs = y_seqs[:, -forecast_step:]
    print('x_seqs.shape = ', x_seqs.shape)
    print('y_seqs.shape = ', y_seqs.shape)

    print('--->return from--->conScoreSeqs')
    return x_seqs, y_seqs, inputs, outputs, drop_len
예제 #52
0
def feature_get(filepath, windowlen):
    df_data = pd.DataFrame(pd.read_csv(filepath))
    if ((df_data.columns.values == 'BA_time').any() == False):
        dfs = df_data.loc[:, ['DE_time', 'FE_time']]
    else:
        dfs = df_data.loc[:, ['DE_time', 'FE_time', 'BA_time']]
    features_list = []
    print(len(dfs))
    for i in range(0, len(dfs), windowlen):
        if (int((len(dfs) - i) / windowlen) >= 1):  # 舍去少量数据
            df = dfs[i:i + params['len_piece']]
            feature_list = []
            #             print(df)
            for i in df.columns:
                # ----------  time-domain feature,18
                # 依次为均值,标准差,最大值,最小值,均方根,峰峰值,中位数,四分位差,百分位差,偏度,峰度,方差,整流平均值,方根幅值,波形因子,峰值因子,脉冲值,裕度
                df_line = df[i]
                #                 print(df_line)
                time_mean = df_line.mean()
                time_std = df_line.std()
                time_max = df_line.max()
                time_min = df_line.min()
                time_rms = np.sqrt(np.square(df_line).mean())
                time_ptp = time_max - time_min
                time_median = np.median(df_line)
                time_iqr = np.percentile(df_line, 75) - np.percentile(df_line, 25)
                time_pr = np.percentile(df_line, 90) - np.percentile(df_line, 10)
                time_skew = stats.skew(df_line)
                time_kurtosis = stats.kurtosis(df_line)
                time_var = np.var(df_line)
                time_amp = np.abs(df_line).mean()
                time_smr = np.square(np.sqrt(np.abs(df_line)).mean())
                # 下面四个特征需要注意分母为0或接近0问题,可能会发生报错
                time_wavefactor = time_rms / time_amp
                time_peakfactor = time_max / time_rms
                time_pulse = time_max / time_amp
                time_margin = time_max / time_smr
                # ----------  freq-domain feature,15
                # 采样频率25600Hz
                df_fftline = fftpack.fft(df[i])
                freq_fftline = fftpack.fftfreq(len(df[i]), 1 / 25600)
                df_fftline = abs(df_fftline[freq_fftline >= 0])
                freq_fftline = freq_fftline[freq_fftline >= 0]
                # 基本特征,依次为均值,标准差,最大值,最小值,均方根,中位数,四分位差,百分位差
                freq_mean = df_fftline.mean()
                freq_std = df_fftline.std()
                freq_max = df_fftline.max()
                freq_min = df_fftline.min()
                freq_rms = np.sqrt(np.square(df_fftline).mean())
                freq_median = np.median(df_fftline)
                freq_iqr = np.percentile(df_fftline, 75) - np.percentile(df_fftline, 25)
                freq_pr = np.percentile(df_fftline, 90) - np.percentile(df_fftline, 10)
                # f2 f3 f4反映频谱集中程度
                freq_f2 = np.square((df_fftline - freq_mean)).sum() / (len(df_fftline) - 1)
                freq_f3 = pow((df_fftline - freq_mean), 3).sum() / (len(df_fftline) * pow(freq_f2, 1.5))
                freq_f4 = pow((df_fftline - freq_mean), 4).sum() / (len(df_fftline) * pow(freq_f2, 2))
                # f5 f6 f7 f8反映主频带位置
                freq_f5 = np.multiply(freq_fftline, df_fftline).sum() / df_fftline.sum()
                freq_f6 = np.sqrt(np.multiply(np.square(freq_fftline), df_fftline).sum()) / df_fftline.sum()
                freq_f7 = np.sqrt(np.multiply(pow(freq_fftline, 4), df_fftline).sum()) / np.multiply(
                    np.square(freq_fftline), df_fftline).sum()
                freq_f8 = np.multiply(np.square(freq_fftline), df_fftline).sum() / np.sqrt(
                    np.multiply(pow(freq_fftline, 4), df_fftline).sum() * df_fftline.sum())
                # ----------  timefreq-domain feature,12
                # 5级小波变换,最后输出6个能量特征和其归一化能量特征
                cA5, cD5, cD4, cD3, cD2, cD1 = wavedec(df[i], 'db10', level=5)
                ener_cA5 = np.square(cA5).sum()
                ener_cD5 = np.square(cD5).sum()
                ener_cD4 = np.square(cD4).sum()
                ener_cD3 = np.square(cD3).sum()
                ener_cD2 = np.square(cD2).sum()
                ener_cD1 = np.square(cD1).sum()
                ener = ener_cA5 + ener_cD1 + ener_cD2 + ener_cD3 + ener_cD4 + ener_cD5
                ratio_cA5 = ener_cA5 / ener
                ratio_cD5 = ener_cD5 / ener
                ratio_cD4 = ener_cD4 / ener
                ratio_cD3 = ener_cD3 / ener
                ratio_cD2 = ener_cD2 / ener
                ratio_cD1 = ener_cD1 / ener
                feature_list.extend(
                    [time_mean, time_std, time_max, time_min, time_rms, time_ptp, time_median, time_iqr, time_pr,
                     time_skew, time_kurtosis, time_var, time_amp,
                     time_smr, time_wavefactor, time_peakfactor, time_pulse, time_margin, freq_mean, freq_std, freq_max,
                     freq_min, freq_rms, freq_median,
                     freq_iqr, freq_pr, freq_f2, freq_f3, freq_f4, freq_f5, freq_f6, freq_f7, freq_f8, ener_cA5,
                     ener_cD1, ener_cD2, ener_cD3, ener_cD4, ener_cD5,
                     ratio_cA5, ratio_cD1, ratio_cD2, ratio_cD3, ratio_cD4, ratio_cD5])
            features_list.append(feature_list)
    return features_list
import pywt

fout_data = open("train.csv",'a')
vec = []
chan = ['Fp1','AF3','F3','F7','FC5','FC1','C3','T7','CP5','CP1','P3','P7','PO3','O1','Oz','Pz','Fp2','AF4','Fz','F4','F8','FC6','FC2','Cz','C4','T8','CP6','CP2','P4','P8','PO4','O2']
columns = defaultdict(list) # each value in each column is appended to a list

with open("features_raw.csv") as f:
    reader = csv.DictReader(f) # read rows into a dictionary format
    for row in reader: # read a row as {column1: value1, column2: value2,...}
        for (k,v) in row.items(): # go over each column name and value 
            columns[k].append(v) # append the value into the appropriate list
                                 # based on column name k
for i in chan:
    x = np.array(columns[i]).astype(np.float)
	coeffs = pywt.wavedec(x, 'db4', level=6)
	cA6, cD6, cD5,cD4,cD3,cD2,cD1 = coeffs
	cD5 = np.std(cD5)
	cD4 = np.std(cD4)
	cD3 = np.std(cD3)
	cD2 = np.std(cD2)
	cD1 = np.std(cD1)
	if i =="O2":
		fout_data.write(str(cD5)+",")
		fout_data.write(str(cD4)+",")
		fout_data.write(str(cD3)+",")
		fout_data.write(str(cD2)+",")
		fout_data.write(str(cD1))
	else:
		fout_data.write(str(cD5)+",")
		fout_data.write(str(cD4)+",")
예제 #54
0
print(matriz.shape)
print(matriz)

total_matriz = np.empty(shape=(res_real.shape[0] * 2, res_real.shape[1]),
                        dtype=complex)
cong_matriz = matriz.conjugate()


def unir(positivo, negativo, matriz_final):
    for i in range(positivo.shape[0]):
        for j in range(positivo.shape[1]):
            matriz_final[i][j] = positivo[i][j]
            matriz_final[matriz_final.shape[0] - 1 - i][j] = negativo[i][j]


unir(matriz, cong_matriz, total_matriz)

print("total_matriz ", total_matriz)
print(total_matriz.shape)

#Aplicar a eso la ifft
final = np.fft.ifft(total_matriz)
print("final ", final)

#Aplicar Wavelet para quitar ruido: (funcion wden de matlab, filtro deb3, umbral: minimaxi, opcion de escala: sln, nivel de descoposicin: maximo
sin_ruido = pywt.wavedec(final, 'db3')
print("SIN RUIDO", sin_ruido)

#Exportar Audio
sonidofinal = np.array(sin_ruido[0], dtype='int8')
wavfile.write('salida_' + sys.argv[1], fs, sonidofinal)
예제 #55
0
def return_best_wv_level_idx(x,
                             fs,
                             sqr,
                             levels,
                             mother_wv,
                             crit,
                             wv_approx,
                             int_points,
                             freq_values=None,
                             freq_range=None,
                             freq_values_2=None,
                             freq_range_2=None):
    n = len(x)
    coeffs = pywt.wavedec(x, mother_wv, level=levels, mode='periodic')
    print('LEN COEF ', len(coeffs))
    for i in range(levels + 1):
        print('..', i)
        print('...', len(coeffs[i]))

    vec = np.zeros(levels + 1)
    for i in range(levels + 1):
        print('evaluate level ', i / levels)
        wsignal = coeffs[i]

        if int_points != True:
            wsignal = odd_to_even(wsignal)
            new_fs = downsampling_fs(x, wsignal, fs)

        # plt.plot(wsignal)
        # plt.show()

        if crit == 'mpr':
            fitness = cal_WV_fitness_hilbert_env_Ncomp_mpr(
                wsignal, sqr, freq_values, freq_range, new_fs)
        elif crit == 'avg_mpr':
            fitness = cal_WV_fitness_hilbert_env_AVG_mpr(
                wsignal, sqr, freq_values, freq_range, freq_values_2,
                freq_range_2, new_fs)
        elif crit == 'kurt_sen':
            kurt = scipy.stats.kurtosis(wsignal, fisher=False)
            sen = shannon_entropy(wsignal)
            fitness = kurt / sen
        elif crit == 'kurt':
            kurt = scipy.stats.kurtosis(wsignal, fisher=False)
            fitness = kurt
        else:
            print('fatal error crit wavelet')
            sys.exit()

        if i == 0 and wv_approx != 'ON':
            vec[i] = -9999
        else:
            vec[i] = fitness

    best_level_idx = np.argmax(vec)
    print('best fitness = ', np.max(vec))

    outsignal = coeffs[best_level_idx]
    if int_points == True:
        xold = np.linspace(0., 1., len(outsignal))
        xnew = np.linspace(0., 1., n)
        outsignal = np.interp(x=xnew, xp=xold, fp=outsignal)
        new_fs = fs
    else:
        outsignal = odd_to_even(outsignal)
        new_fs = downsampling_fs(x, outsignal, fs)

    return outsignal, best_level_idx, new_fs
예제 #56
0
nu = np.arange(-0.5,0.5, step)
plt.title('FR of the HPF'), plt.plot(nu,abs(HPF)), plt.show()
#% compute derivatives
k=1
dh=abs(HPF)
dnu = nu
while k<=4:
    dh = np.diff(dh)/step
    dnu=dnu[1:]
    print 'Derivative %1d'%  k
    plt.plot(dnu,dh), plt.show()
    k=k+1

#%% Compute and show  the DWT

cA3, cD3, cD2, cD1 = pywt.wavedec(x, w, level=3)
plt.title('Approximation'), plt.plot(cA3), plt.show(),
plt.title('Details 3'), plt.plot(cD3), plt.show(),
plt.title('Details 2'), plt.plot(cD2), plt.show(),
plt.title('Details 1'), plt.plot(cD1), plt.show(),

plt.hist(cD3,200)
plt.show()


#%% Noise characteristics
largeN =  2**18
sigma = .5
noiseSamples = ??? # Generate largeN samples iid from N(sigma^2, 0 ) distribution
plt.hist(noiseSamples,200)
plt.title('Noise Histogram'), plt.show()
예제 #57
0
                                                      output=None,
                                                      mode='reflect')

            obj = Differentiator(filt_length=window_length + 1,
                                 window=np.blackman(window_length + 1))
            a = H2 / np.max(H2)
            diff_mean = obj.diff_eval(a)
            diff = obj.diff_eval(foooo)
            diff = diff[window_length + 1:]
            diff_mean = diff_mean[window_length + 1:]
            plt.figure()
            plt.title('Derivative')
            plt.plot(diff_mean)
            plt.plot(diff)

            coeffs = pywt.wavedec(a, 'db3', level=2)
            cA, cD2, cD1 = coeffs
            plt.figure()
            plt.subplot(3, 1, 1)
            plt.plot(cA)
            plt.subplot(3, 1, 2)
            plt.plot(cD1)
            plt.subplot(3, 1, 3)
            plt.plot(cD2)

            plt.show()

# In[ ]:

linalg.norm(a)
예제 #58
0
def transform(x):
    db1 = pywt.Wavelet('haar')
    rs = pywt.wavedec(x, db1)
    rs = np.concatenate(rs)
    return rs
예제 #59
0
def denoise(st, wavelet="coif4", MODE="zero", remove_bg=True,
                threshold='soft', zero_coarse_levels=1, zero_fine_levels=1,
                preevent_window=10.0, preevent_threshold_reduction=2.0,
                store_orig=False, store_noise=False):
    """Remove noise from waveforms using wavelets in a two-step
    process. In the first step, noise is identified via a Kurtosis
    analysis of the wavelet coefficients. In the second step, the
    noise level in a pre-event window is determined for each wavelet
    level and then removed from the waveform using a soft threshold.

    :type wavelet: str
    :param wavelet: Name of wavelet to use in denoising.
    :type remove_bg: bool
    :param remove_bg: If True, perform the first step in the denoising process.
    :type preevent_window: float
    :param preevent_window: Size of pre-event window to use in second step. Skip second step if <= 0.
    :type preevent_threshold_reduction: float
    :param preevent_threshold_reduction: Factor to reduce threshold of noise level in second step.
    :type store_orig: bool
    :param store_orig: Return a copy of the original waveforms.
    :type store_noise: bool
    :param store_noise: Return the noise waveforms removed.
    :returns: Dictionary containing the denoised waveforms and, if
    requested, original waveforms and noise waveforms.

    """
    # Use of "__name__" is to grab the module's name in this python package namespace
    logger = logging.getLogger(__name__)

    try:
        import pywt
    except ImportError:
        raise ImportError("dwt_denoise() requires PyWavelets (pywt) Python module.")

    # Incorporate use of other wavelets, check that they are valid
    try:
        wt_fams = pywt.families()
        wt_fams_lists = []
        for i, fam in enumerate(wt_fams):
            wt_fams_lists.append(pywt.wavelist(fam))

        if ~(wavelet in wt_fams_lists):
            logger.info("")
    except LookupError:
        logger.info("The wavelet selected by the user is not supported by PyWavelets")

    # Incorporate other options for padding, while default is still zero
        # Do at some point

    # Keep a copy of the input data
    dataOut = {}
    if store_orig:
        dataOut["orig"] = st.copy()

    # Prep in case user wants to also keep noise
    tracesNoise = []
    for tr in st:
        channelLabel = "%s.%s.%s" % (tr.stats.network,
                                     tr.stats.station,
                                     tr.stats.channel)
        coefsNoise = []

        coefs = pywt.wavedec(tr.data, wavelet, mode=MODE)

        # Do kurtosis analysis to determine noise
        if remove_bg:
            coefsNoise = utils.kurtosis(channelLabel, coefs, logger)

        # Identify pre-event noise at all wavelet levels and remove
        coefs, coefsNoise = utils.remove_pre_event_noise(tr,coefs, preevent_window, preevent_threshold_reduction)
        for ilevel in range(1+zero_coarse_levels):
            coefsNoise[ilevel] += coefs[ilevel].copy()
            coefs[ilevel] *= 0.0
        for ilevel in range(zero_fine_levels):
            index = -(1+ilevel)
            coefsNoise[index] += coefs[index].copy()
            coefs[index] *= 0.0

        # Reconstruct a reduced noise signal from processed wavelet coefficients
        tr.data = pywt.waverec(coefs, wavelet, mode=MODE)

        if store_noise:
            trNoise = tr.copy()
            trNoise.data = pywt.waverec(coefsNoise, wavelet, mode=MODE)
            tracesNoise.append(trNoise)

        #Signal to noise ratio
        if threshold == 'soft':
            tr = utils.soft_threshold(tr, channelLabel, coefs, coefsNoise, logger)
        elif threshold == 'hard':
            tr = utils.soft_threshold(tr, channelLabel, coefs, coefsNoise, logger)
        elif threshold == 'block':
            logger.info("Block thresholding currenlty under development")
        else:
            logger.info("Unsupported thresholding option")

    dataOut["data"] = st
    if store_noise:
        import obspy.core
        dataOut["noise"] = obspy.core.Stream(traces=tracesNoise)

    return dataOut
예제 #60
0
window_size = 256

b = 256 * 4

scale = 40

# plt.figure()

window = price_series[n - window_size - b:n - b]
window = np.array(window)

plt.figure()
plt.plot(window - 400)

decomposition_dwt = pywt.wavedec(window, 'db6')
decomposition_dwt[0].fill(0)
decomposition_dwt[-1].fill(0)
decomposition_dwt[-2].fill(0)
decomposition_dwt[-3].fill(0)
# decomposition_dwt[-4].fill(0)
# decomposition_dwt[-5].fill(0)

window = pywt.waverec(decomposition_dwt, 'db6')

plt.plot(window)

decomposition, _ = pywt.cwt(window, np.arange(1, scale), 'morl')

x = np.arange(1, window_size + 1)
y = np.arange(1, scale)