Esempio n. 1
0
def ecg_plot(data, ax1,signum):
	x, y = [], []
	print("Tworze wykres numer %d" % signum)
	#for row in arr[2:]:
	#	x.append(row[0])
	#	y.append(row[signum])
	x, y =[], []
	for index, cell in enumerate(data[signum-1]):
		x.append(index)
		y.append(float(cell)/mV)
		
	global X
	global Y
	X.append(x)
	Y.append(y)
	if(signum==1):
		find_QRS(y)
	ax1.plot(X[signum-1],Y[signum-1])
	fftax = np.fft.fft(y)/ylen
	fftax = fftax[0:int(ylen/2)]

	(wtaxA, wtaxD) = pywt.dwt((y),'db1') ##wt approximation + detail coefficients cokolwiek to znaczy
	if(WT_times > 1):
		for i in range(WT_times-1):
			(wtaxA,wtaxD) = pywt.dwt((wtaxA),'db1')

	global FFTaxes
	global WTaxesA
	global WTaxesD
	FFTaxes[signum-1] = fftax
	WTaxesA[signum-1] = wtaxA
	WTaxesD[signum-1] = wtaxD
def collect(S, wavelet, mode, level):
    '''
    Returns the full binary tree of wavelet packets.
    @param S:         Input signal.
                      Both single and double precision floating-point data types are supported
                      and the output type depends on the input type. If the input data is not
                      in one of these types it will be converted to the default double precision
                      data format before performing computations.
    @param wavelet:   Wavelet to use in the transform. 
                      This must be a name of the wavelet from the wavelist() list.
    @param mode:      Signal extension mode to deal with the border distortion problem.
    @param level:     Number of decomposition steps to perform. If the level is None, then the
                      full decomposition up to the level computed with dwt_max_level() function for
                      the given data and wavelet lengths is performed.
    @return:          The full binary tree of wavelet packets.
    '''
    Nodes = [[] for i in range(level)]
    (Cl, Cr) = pywt.dwt(S, wavelet=wavelet, mode=mode)
    Nodes[0] = [node.Node(Cl, 0, 0), node.Node(Cr, 0, 1)]
    for l in range(0, level-1):
        Parents = Nodes[l]
        Childs = []
        for p in range(len(Parents)):
            (Cl, Cr) = pywt.dwt(Parents[p].C, wavelet=wavelet, mode=mode)
            Childs.append(node.Node(Cl, l+1, 2*p))
            Childs.append(node.Node(Cr, l+1, 2*p+1))
        Nodes[l+1] = Childs 
    return Nodes
Esempio n. 3
0
def test_dwt_axis_arg():
    x = [[3, 7, 1, 1], [-2, 5, 4, 6]]

    cA_, cD_ = pywt.dwt(x, "db2", axis=-1)
    cA, cD = pywt.dwt(x, "db2", axis=1)

    assert_allclose(cA_, cA)
    assert_allclose(cD_, cD)
Esempio n. 4
0
def test_default_mode():
    # The default mode should be 'symmetric'
    x = [1, 2, 1, 5, -1, 8, 4, 6]
    cA, cD = pywt.dwt(x, 'db2')
    cA2, cD2 = pywt.dwt(x, 'db2', mode='symmetric')
    assert_allclose(cA, cA2)
    assert_allclose(cD, cD2)
    assert_allclose(pywt.idwt(cA, cD, 'db2'), x)
Esempio n. 5
0
def extractFeatureFromWave(wave):
    cA, cD = pywt.dwt(wave, 'dmey')
    counter = 1
    while counter <= 4:
        print cA.shape
        cA, cD = pywt.dwt(cA, 'dmey')
        counter = counter + 1
    return cA, cD
Esempio n. 6
0
def bpm_detector(data,fs):
    cA = []
    cD = []
    correl = []
    cD_sum = []
    levels = 4
    max_decimation = 2**(levels-1);
    min_ndx = 60./ 220 * (fs/max_decimation)
    max_ndx = 60./ 40 * (fs/max_decimation)

    for loop in range(0,levels):
        cD = []
        # 1) DWT
        if loop == 0:
            [cA,cD] = pywt.dwt(data,'db4');
            cD_minlen = len(cD)/max_decimation+1;
            cD_sum = numpy.zeros(cD_minlen);
        else:
            [cA,cD] = pywt.dwt(cA,'db4');
        # 2) Filter
        cD = signal.lfilter([0.01],[1 -0.99],cD);

        # 4) Subtractargs.filename out the mean.

        # 5) Decimate for reconstruction later.
        cD = abs(cD[::(2**(levels-loop-1))]);
        cD = cD - numpy.mean(cD);
        # 6) Recombine the signal before ACF
        #    essentially, each level I concatenate
        #    the detail coefs (i.e. the HPF values)
        #    to the beginning of the array
        cD_sum = cD[0:cD_minlen] + cD_sum;

    if [b for b in cA if b != 0.0] == []:
        return no_audio_data()
    # adding in the approximate data as well...
    cA = signal.lfilter([0.01],[1 -0.99],cA);
    cA = abs(cA);
    cA = cA - numpy.mean(cA);
    cD_sum = cA[0:cD_minlen] + cD_sum;

    # ACF
    correl = numpy.correlate(cD_sum,cD_sum,'full')

    midpoint = len(correl) / 2
    correl_midpoint_tmp = correl[midpoint:]
    peak_ndx = peak_detect(correl_midpoint_tmp[min_ndx:max_ndx]);
    if len(peak_ndx) > 1:
        return no_audio_data()

    peak_ndx_adjusted = peak_ndx[0]+min_ndx;
    bpm = 60./ peak_ndx_adjusted * (fs/max_decimation)
    global bpm_list
    bpm_list.append(bpm.item(0))
    #print bpm
    return bpm,correl
Esempio n. 7
0
def swt(data, wavelet, level=None):
    """
    Stationary Wavelet Transform

    This version is 2 orders of magnitude faster than the one in pywt
    even though it uses pywt for all the calculations.
    
      Input parameters: 

        data
          One-dimensional data to transform
        wavelet
          Either the name of a wavelet or a Wavelet object
        level
          Number of levels

    """
    if level is None:
        level = pywt.swt_max_level(len(data))
    num_levels = level
    idata = data.copy()
    res = []
    for j in range(1,num_levels+1): 
        step_size = int(math.pow(2, j-1))
        last_index = step_size
        # allocate
        cA = np.empty_like(data)
        cD = np.empty_like(data)
        for first in xrange(last_index): # 0 to last_index - 1
            # Getting the indices that we will transform 
            indices = np.arange(first, len(cD), step_size)

            # select the even indices
            even_indices = indices[0::2] 
            # select the odd indices
            odd_indices = indices[1::2] 
            
            # get the even
            (cA1,cD1) = pywt.dwt(idata[indices], wavelet, 'per')
            cA[even_indices] = cA1
            cD[even_indices] = cD1

            # then the odd
            (cA1,cD1) = pywt.dwt(np.roll(idata[indices],-1), wavelet, 'per')
            cA[odd_indices] = cA1
            cD[odd_indices] = cD1

        # set the data for the next loop
        idata = cA

        # prepend the result
        res.insert(0,(cA,cD))

    return res
Esempio n. 8
0
def zdwtfun(data, wname):
    matSize = np.shape(data)
    testL, testH = pywt.dwt(data[0, 0, :], wname)
    L = np.zeros((matSize[0], matSize[1], testL.shape[0]), dtype='float64')
    H = np.zeros((matSize[0], matSize[1], testH.shape[0]), dtype='float64')
    for i in range(0, matSize[0]):
        for j in range(0, matSize[1]):
            line = np.float64(data[i, j, :])
            cL, cH = pywt.dwt(line, wname)
            L[i, j, :] = cL
            H[i, j, :] = cH
    return L, H
Esempio n. 9
0
def test_mode_equivalence():
    old_new = [('zpd', 'zero'),
               ('cpd', 'constant'),
               ('sym', 'symmetric'),
               ('ppd', 'periodic'),
               ('sp1', 'smooth'),
               ('per', 'periodization')]
    x = np.arange(8.)
    with warnings.catch_warnings():
        warnings.simplefilter('ignore', DeprecationWarning)
        for old, new in old_new:
            assert_array_equal(pywt.dwt(x, 'db2', mode=old),
                               pywt.dwt(x, 'db2', mode=new))
Esempio n. 10
0
def test_dwt_single_axis():
    x = [[3, 7, 1, 1], [-2, 5, 4, 6]]

    cA, cD = pywt.dwt(x, "db2", axis=-1)

    cA0, cD0 = pywt.dwt(x[0], "db2")
    cA1, cD1 = pywt.dwt(x[1], "db2")

    assert_allclose(cA[0], cA0)
    assert_allclose(cA[1], cA1)

    assert_allclose(cD[0], cD0)
    assert_allclose(cD[1], cD1)
Esempio n. 11
0
def makeWT(y,times): ## robi pare razy WT na approx.
	(wtaxA, wtaxD) = pywt.dwt((y),'db1')
	if(times > 1):
		for i in range(times-1):
			(wtaxA,wtaxD) = pywt.dwt((wtaxA),'db1')
	ymin = min(wtaxD)
	for index,x in enumerate(wtaxD):
		wtaxD[index-1]=wtaxD[index-1]-ymin
	ymin = min(wtaxA)
	for index,x in enumerate(wtaxA):
		wtaxA[index-1]=wtaxA[index-1] - ymin
	
	return wtaxA,wtaxD
Esempio n. 12
0
def wu_hash(fxy):
    # compute radon hash, use 180 deg w/sampl. intervall 1
    fxy_rad = radon(fxy)
    # divide into 40x10 blocks
    bl = blocks(fxy_rad,40,10)
    # compute mean values of the blocks
    ms = []
    for x in xrange(0,len(bl)):
        els = []
        for y in xrange(0,len(bl[x])):
            els.append(np.mean(bl[x][y]))
        ms.append(els)
    # wavelet decomposition with haar wavelet
    # for each column resulting in (approx, detail)
    # approx is thrown away, resulting in a
    # list of 40 lists with each 5 higher order elements
    dec = []
    for x in xrange(0,len(ms)):
        dec.append(pywt.dwt(ms[x],"haar")[1])
    # apply fft to each component and throw imaginary
    # components away
    ffts = map(np.fft.fft,dec)
    reals = []
    for x in xrange(0,len(ffts)):
        reals_of_x = []
        for c in ffts[x]:
            reals_of_x.append(c.real)
        reals.append(reals_of_x)    
    return reals
Esempio n. 13
0
def check_reconstruction(pmode, mmode, wavelet, dtype):
    data_size = list(range(2, 40)) + [100, 200, 500, 1000, 2000, 10000,
                                50000, 100000]
    np.random.seed(12345)
    #TODO: smoke testing - more failures for different seeds

    if dtype == np.float32:
        epsilon = 3e-7
    else:
        #FIXME: limit was 5e-11, but gave failures.  Investigate
        epsilon = 1e-8

    for N in data_size:
        data = np.asarray(np.random.random(N), dtype)

        # compute dwt coefficients
        pa, pd = pywt.dwt(data, wavelet, pmode)

        # compute reconstruction
        rec = pywt.idwt(pa, pd, wavelet, pmode)

        if len(data) % 2:
            rec = rec[:len(data)]

        rms_rec = np.sqrt(np.mean((data-rec)**2))
        msg = ('[RMS_REC > EPSILON] for Mode: %s, Wavelet: %s, '
               'Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_rec))
        assert_(rms_rec < epsilon, msg=msg)
def test_accuracy(families, wavelets, modes, epsilon=1.0e-10):
    print "Testing decomposition".upper()
    
    for pmode, mmode in modes:
        for wavelet in wavelets:
            print "Wavelet: %-8s Mode: %s" % (wavelet, pmode)
        
            w = pywt.Wavelet(wavelet)
            data_size = range(w.dec_len, 40) + [100, 200, 500, 1000, 50000]
            
            for N in data_size:
                data = numpy.random.random(N)
                
                # PyWavelets result
                pa, pd = pywt.dwt(data, wavelet, pmode)
                
                # Matlab result
                ma, md = mlab.dwt(data, wavelet, 'mode', mmode, nout=2)
                ma = ma.flat; md = md.flat

                # calculate error measures
                mse_a, mse_d = mse(pa, ma), mse(pd, md)
                rms_a, rms_d = math.sqrt(mse_a), math.sqrt(mse_d)

                if rms_a > epsilon:
                    print '[RMS_A > EPSILON] for Mode: %s, Wavelet: %s, Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_a)
                    
                if rms_d > epsilon:
                    print '[RMS_D > EPSILON] for Mode: %s, Wavelet: %s, Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_d)
Esempio n. 15
0
def func_dwt(Fs, T, N):
    t = np.linspace(0, N * T, N)
    y = np.sin(2 * np.pi * 10 * t) + 0.1 * np.sin(2 * np.pi * 300 * t)

    (cA, cD) = pywt.dwt(y, 'db1')
    yy = pywt.idwt(cA, cD, 'db1')
    print(np.sum(np.abs(yy - y)) / N)
Esempio n. 16
0
def test_perfect_reconstruction(families, wavelets, modes, epsilon, dtype):
    for wavelet in wavelets:
        for pmode, mmode in modes:
            print "Wavelet: %-8s Mode: %s" % (wavelet, pmode),

            data_size = range(2, 40) + [100, 200, 500, 1000, 2000, 10000, 50000, 100000]

            ok, over = 0, 0
            for N in data_size:
                data = numpy.asarray(numpy.random.random(N), dtype)

                # compute dwt coefficients
                pa, pd = pywt.dwt(data, wavelet, pmode)

                # compute reconstruction
                rec = pywt.idwt(pa, pd, wavelet, pmode)

                if len(data) % 2:
                    rec = rec[: len(data)]

                rms_rec = rms(data, rec)
                if rms_rec > epsilon:
                    if not over:
                        print
                    print "[RMS_REC > EPSILON] for Mode: %s, Wavelet: %s, " "Length: %d, rms=%.3g" % (
                        pmode,
                        wavelet,
                        len(data),
                        rms_rec,
                    )
                    over += 1
                else:
                    ok += 1
            if not over:
                print "- RMSE for all %d cases was under %s" % (len(data_size), epsilon)
def coiflets_wavelets_transf(X, parameter1=1):
    parameter1 = parameter1 if parameter1 in range(1, 6) else 1
    funct = 'coif'+str(parameter1)
    funct = funct if funct in pywt.wavelist('coif') else 'coif1'
    Xt = np.array([np.concatenate(pywt.dwt(X[:, i], funct))
                   for i in range(X.shape[1])])
    return Xt
Esempio n. 18
0
def compute_wavlets():
    real_wavelet_dict = {}
    os.chdir("./data")
    for file in glob.glob("*.txt"):
        file_id = (int)(file[1:-4])
        dyad_data = np.loadtxt( file)
        male = []
        female = []
        dyadicstate = []
        for i in range(len(dyad_data)):
                male.append(int(dyad_data[i][0])) 
                female.append(int(dyad_data[i][1])) 
                dyadicstate.append( (int(dyad_data[i][0]) + int(dyad_data[i][1]) )/2.)        
        
        male = np.array(male)
        female = np.array(female)
        dyadicstate = np.array(dyadicstate)
        ### normalize the data
        dyadicstate_scaled = preprocessing.scale(dyadicstate) 
        
        (cA, cD) = pywt.dwt(dyadicstate_scaled , 'haar', mode='zpd')
        cA_real = cA
        cD_real = cD
        real_wavelet_dict[file_id] = (cA_real, cD_real)
    os.chdir("../")
    return real_wavelet_dict
def daubechies_wavelet_transf(X, parameter1=1):
    parameter1 = parameter1 if parameter1 in range(1, 21) else 1
    funct = 'db'+str(parameter1)
    funct = funct if funct in pywt.wavelist('db') else 'db1'
    Xt = np.array([np.concatenate(pywt.dwt(X[:, i], funct))
                   for i in range(X.shape[1])])
    return Xt
Esempio n. 20
0
def test_dwt_idwt_allmodes():
    # Test that :func:`dwt` and :func:`idwt` can be performed using every mode
    x = [1, 2, 1, 5, -1, 8, 4, 6]
    dwt_result_modes = {
        'zero': ([-0.03467518, 1.73309178, 3.40612438, 6.32928585, 6.95094948],
                 [-0.12940952, -2.15599552, -5.95034847, -1.21545369,
                 -1.8625013]),
        'constant': ([1.28480404, 1.73309178, 3.40612438, 6.32928585,
                      7.51935555],
                     [-0.48296291, -2.15599552, -5.95034847, -1.21545369,
                      0.25881905]),
        'symmetric': ([1.76776695, 1.73309178, 3.40612438, 6.32928585,
                       7.77817459],
                      [-0.61237244, -2.15599552, -5.95034847, -1.21545369,
                       1.22474487]),
        'reflect': ([2.12132034, 1.73309178, 3.40612438, 6.32928585,
                     6.81224877],
                    [-0.70710678, -2.15599552, -5.95034847, -1.21545369,
                     -2.38013939]),
        'periodic': ([6.9162743, 1.73309178, 3.40612438, 6.32928585,
                      6.9162743],
                     [-1.99191082, -2.15599552, -5.95034847, -1.21545369,
                      -1.99191082]),
        'smooth': ([-0.51763809, 1.73309178, 3.40612438, 6.32928585,
                    7.45000519],
                   [0, -2.15599552, -5.95034847, -1.21545369, 0]),
        'periodization': ([4.053172, 3.05257099, 2.85381112, 8.42522221],
                          [0.18946869, 4.18258152, 4.33737503, 2.60428326])
    }

    for mode in pywt.Modes.modes:
        cA, cD = pywt.dwt(x, 'db2', mode)
        assert_allclose(cA, dwt_result_modes[mode][0], rtol=1e-7, atol=1e-8)
        assert_allclose(cD, dwt_result_modes[mode][1], rtol=1e-7, atol=1e-8)
        assert_allclose(pywt.idwt(cA, cD, 'db2', mode), x, rtol=1e-10)
Esempio n. 21
0
def test_dwtn_axes():
    data = np.array([[0, 1, 2, 3],
                     [1, 1, 1, 1],
                     [1, 4, 2, 8]])
    data = data + 1j*data  # test with complex data
    coefs = pywt.dwtn(data, 'haar', axes=(1,))
    expected_a = list(map(lambda x: pywt.dwt(x, 'haar')[0], data))
    assert_equal(coefs['a'], expected_a)
    expected_d = list(map(lambda x: pywt.dwt(x, 'haar')[1], data))
    assert_equal(coefs['d'], expected_d)

    coefs = pywt.dwtn(data, 'haar', axes=(1, 1))
    expected_aa = list(map(lambda x: pywt.dwt(x, 'haar')[0], expected_a))
    assert_equal(coefs['aa'], expected_aa)
    expected_ad = list(map(lambda x: pywt.dwt(x, 'haar')[1], expected_a))
    assert_equal(coefs['ad'], expected_ad)
def symlets_wavelets_transf(X, parameter1=2):
    parameter1 = parameter1 if parameter1 in range(2, 21) else 2
    funct = 'sym'+str(parameter1)
    funct = funct if funct in pywt.wavelist('sym') else 'sym2'
    Xt = np.array([np.concatenate(pywt.dwt(X[:, i], funct))
                  for i in range(X.shape[1])])
    return Xt
Esempio n. 23
0
File: wt.py Progetto: jpcoles/jcode
def my_wavedec(data, wavelet, mode='sym', level=None):
    """
    Multilevel 1D Discrete Wavelet Transform of data.
    Returns coefficients list - [cAn, cDn, cDn-1, ..., cD2, cD1]

    data    - input data
    wavelet - wavelet to use (Wavelet object or name string)
    mode    - signal extension mode, see MODES
    level   - decomposition level. If level is None then it will be
              calculated using `dwt_max_level` function.
    """

    if not isinstance(wavelet, pywt.Wavelet):
        wavelet = pywt.Wavelet(wavelet)

    if level is None:
        level = pywt.dwt_max_level(len(data), wavelet.dec_len)
    elif level < 0:
        raise ValueError("Level value of %d is too low . Minimum level is 0." % level)

    coeffs_list = []

    a = data
    for i in xrange(level):
        a, d = pywt.dwt(a, wavelet, mode)
        d = list(d)
        #d.reverse()
        coeffs_list.append(d)

    a = list(a)
    #a.reverse()
    coeffs_list.append(a)
    #coeffs_list.reverse()

    return coeffs_list
Esempio n. 24
0
    def wavelet(self, column, name):
        sample_size = self.sample_size
        sc = self.sc
        link = self.file
        length = self.file_size

        tab = []
        for i in range(0, length):
            tab.append(length - i)

        def get_key(iterator, size):
            key = int(iterator/size)
            iterator += 1
            return key

        rdd = sc\
            .textFile(link)\
            .filter(lambda line: name not in line)\
            .map(lambda line: (get_key(tab.pop(), sample_size), re.split(r';', line)[column]))\
            .groupByKey().mapValues(list)\
            .map(lambda line: (line[0], pywt.dwt(line[1], 'db1')[1]))

        def get_previous_line(line):
            iterator = line[0]
            if iterator == 0:
                prev = rdd.filter(lambda my_line: my_line[0] == iterator).collect()[0][1]
            else:
                prev = rdd.filter(lambda my_line: my_line[0] == iterator - 1).collect()[0][1]
            d = distance.euclidean(line[1], prev)
            return d

        return rdd\
            .map(lambda line: get_previous_line(line))\
            .collect()
Esempio n. 25
0
def check_reconstruction(pmode, mmode, wavelet, dtype):
    data_size = list(range(2, 40)) + [100, 200, 500, 1000, 2000, 10000,
                                      50000, 100000]
    np.random.seed(12345)
    # TODO: smoke testing - more failures for different seeds

    if dtype == np.float32:
        # was 3e-7 has to be lowered as db21, db29, db33, db35, coif14, coif16 were failing
        epsilon = 6e-7
    else:
        epsilon = 5e-11

    for N in data_size:
        data = np.asarray(np.random.random(N), dtype)

        # compute dwt coefficients
        pa, pd = pywt.dwt(data, wavelet, pmode)

        # compute reconstruction
        rec = pywt.idwt(pa, pd, wavelet, pmode)

        if len(data) % 2:
            rec = rec[:len(data)]

        rms_rec = np.sqrt(np.mean((data-rec)**2))
        msg = ('[RMS_REC > EPSILON] for Mode: %s, Wavelet: %s, '
               'Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_rec))
        assert_(rms_rec < epsilon, msg=msg)
def check_accuracy(pmode, mmode, wavelet):
    # max RMSE
    epsilon = 1.0e-10

    w = pywt.Wavelet(wavelet)
    data_size = list(range(w.dec_len, 40)) + [100, 200, 500, 1000, 50000]
    np.random.seed(1234)

    for N in data_size:
        data = np.random.random(N)

        # PyWavelets result
        pa, pd = pywt.dwt(data, wavelet, pmode)

        # Matlab result
        ma, md = mlab.dwt(data, wavelet, 'mode', mmode, nout=2)
        ma = ma.flat
        md = md.flat

        # calculate error measures
        rms_a = np.sqrt(np.mean((pa-ma)**2))
        rms_d = np.sqrt(np.mean((pd-md)**2))

        msg = ('[RMS_A > EPSILON] for Mode: %s, Wavelet: %s, '
              'Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_a))
        assert_(rms_a < epsilon, msg=msg)

        msg = ('[RMS_D > EPSILON] for Mode: %s, Wavelet: %s, '
               'Length: %d, rms=%.3g' % (pmode, wavelet, len(data), rms_d))
        assert_(rms_d < epsilon, msg=msg)
Esempio n. 27
0
def wavelet_trasnform(sig):
    mode = pywt.Modes.smooth
    w = 'coif3'
    w = pywt.Wavelet(w)
    
#    hasil=[]
#    for n in range(data.shape[0]):
        #dd = data[n,1:24]
    ca = []
    cd = []
    for i in range(5):
            (a, d) = pywt.dwt(sig, w, mode)
            ca.append(a)
            cd.append(d)
    rec_a = []
    rec_d = []
    for i, coeff in enumerate(ca):
        coeff_list = [coeff, None] + [None] * i
        rec_a.append(pywt.waverec(coeff_list, w))
    for i, coeff in enumerate(cd):
        coeff_list = [None, coeff] + [None] * i
        rec_d.append(pywt.waverec(coeff_list, w))
#    hasil.append(rec_a[0])
        
    return rec_a[0];
Esempio n. 28
0
def wavedec(data, wavelet, mode='sym', level=None):
    """
    Multilevel 1D Discrete Wavelet Transform of data.
    Returns coefficients list - [cAn, cDn, cDn-1, ..., cD2, cD1]

    data    - input data
    wavelet - wavelet to use (Wavelet object or name string)
    mode    - signal extension mode, see MODES
    level   - decomposition level. If level is None then it will be
              calculated using `dwt_max_level` function.
    """

    if not isinstance(wavelet, Wavelet):
        wavelet = Wavelet(wavelet)

    # if level is None:
    #     level = dwt_max_level(len(data), wavelet.dec_len)
    # elif level < 0:
    #     raise ValueError(
    #         "Level value of %d is too low . Minimum level is 0." % level)

    coeffs_list = []

    a = data
    for i in xrange(level):
        a, d = dwt(a, wavelet, mode)
        # a, d = my_dwt(a)
        # print "length of a: " + str(len(a))
        coeffs_list.append(d)

    # print "type: " + str(type(a))
    coeffs_list.append(a)
    coeffs_list.reverse()

    return coeffs_list
Esempio n. 29
0
def task_1():
    dwt_haar_4 = find_dwt_matrix(4, 'haar')
    dwt_haar_8 = find_dwt_matrix(8, 'haar')
    x = np.array([1.0, 2.0, 3.0, 2.0, 1.0, 3.0, 3.0, 3.0])
    x_dwt = np.dot(dwt_haar_8, x)

    print()
    print("DWT Matrix of size 4:")
    print(dwt_haar_4)

    print()
    print("DWT Matrix of size 8:")
    print(dwt_haar_8)

    print()
    print("x:")
    print(x)

    print()
    print("DWT of x with matrix:")
    print(x_dwt)

    print()
    print("DWT of x with pywt:")
    print(pywt.dwt(x, 'haar', mode='ppd'))
def get_waveletfeatures(data, w, use_dwt=True):
    #Show dwt or swt coefficients for given data and wavelet.
    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    if use_dwt:
        for i in range(5):
            (a, d) = pywt.dwt(a, w, mode)
            ca.append(a)
            cd.append(d)
    else:
        coeffs = pywt.swt(data, w, 5)  # [(cA5, cD5), ..., (cA1, cD1)]
        for a, d in reversed(coeffs):
            ca.append(a)
            cd.append(d)
    
    wave_features = []
    for i in range(len(ca)): #ca1 - ca5
        c_Dsquares = []
        for j in range(len(cd[i])):
            c_Dsquares.append((cd[i][j])**2)
        c_Dsumsquares=sum(c_Dsquares)
        wave_features.append(c_Dsumsquares)
    return wave_features # Returns 
Esempio n. 31
0
import numpy as np
import pywt
import matplotlib.pyplot as plt

x = np.linspace(-5, 5, 100)
y = np.sin(x)
(cA, cD) = pywt.dwt(y, 'db1')

plt.subplot(311)
plt.plot(y)

plt.subplot(312)
plt.plot(cA)

plt.subplot(313)
plt.plot(cD)

plt.show()
Esempio n. 32
0
    def data(self, type_of_transform="dwt and dct", shuffling=True):
        assert type_of_transform == "dwt" or type_of_transform == "dct" or type_of_transform == "dwt and dct", "type_of_transform can either be dct(discreate cosine transform) or dwt (discreate wave transform) or dwt and dct"
        X = []
        Y = []

        if type_of_transform == "dwt":
            for dirs in os.listdir(path=self.path):
                for images in os.listdir(path="%s%s" % (self.path, dirs)):
                    raw_image = cv2.imread(
                        "%s/%s/%s" % (self.path, dirs, images), 0)

                    raw_image = cv2.resize(255 - raw_image, self.size)
                    (thresh, raw_image) = cv2.threshold(
                        raw_image, self.thresh, 255,
                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    #raw_raw.point(lambda x:x > thresh and 255)
                    rows, cols, raw_image = self.cutter(raw_image)
                    if rows > cols:
                        factor = (self.size[1] - 10) / rows  #changes
                        rows = (self.size[1] - 10)
                        cols = int(round(cols * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))
                    else:
                        factor = (self.size[1] - 10) / cols
                        cols = (self.size[1] - 10)
                        rows = int(round(rows * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))

                    c_pad = (int(math.ceil((self.size[1] - cols) / 2.0)),
                             int(math.floor((self.size[1] - cols) / 2.0)))
                    raw_pad = (int(math.ceil((self.size[0] - rows) / 2.0)),
                               int(math.floor((self.size[0] - rows) / 2.0)))
                    raw_image = np.lib.pad(raw_image, (raw_pad, c_pad),
                                           'constant')
                    shft_x, shift_y = self.b_shif(raw_image)
                    shifted = self.shifter(raw_image, shft_x, shift_y)
                    raw_image = shifted

                    flatten = raw_image.flatten() / 255.0
                    #transform will come here

                    flatten, disc = dwt(flatten, wavelet="db1")
                    X.append(flatten)
                    Y.append(dirs)
            if shuffling:
                X, Y = shuffle(X, Y, random_state=0)
            return X, Y

        elif type_of_transform == "dct":
            for dirs in os.listdir(path=self.path):
                for images in os.listdir(path="%s%s" % (self.path, dirs)):
                    raw_image = cv2.imread(
                        "%s/%s/%s" % (self.path, dirs, images), 0)

                    raw_image = cv2.resize(255 - raw_image, self.size)
                    (thresh, raw_image) = cv2.threshold(
                        raw_image, self.thresh, 255,
                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    #raw_raw.point(lambda x:x > thresh and 255)
                    rows, cols, raw_image = self.cutter(raw_image)
                    if rows > cols:
                        factor = (self.size[1] - 10) / rows  #changes
                        rows = (self.size[1] - 10)
                        cols = int(round(cols * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))
                    else:
                        factor = (self.size[1] - 10) / cols
                        cols = (self.size[1] - 10)
                        rows = int(round(rows * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))

                    c_pad = (int(math.ceil((self.size[1] - cols) / 2.0)),
                             int(math.floor((self.size[1] - cols) / 2.0)))
                    raw_pad = (int(math.ceil((self.size[0] - rows) / 2.0)),
                               int(math.floor((self.size[0] - rows) / 2.0)))
                    raw_image = np.lib.pad(raw_image, (raw_pad, c_pad),
                                           'constant')
                    shft_x, shift_y = self.b_shif(raw_image)
                    shifted = self.shifter(raw_image, shft_x, shift_y)
                    raw_image = shifted

                    flatten = raw_image.flatten() / 255.0
                    #transform will come here

                    flatten = dct(flatten)
                    X.append(flatten)
                    Y.append(dirs)
            if shuffling:
                X, Y = shuffle(X, Y, random_state=0)
            return X, Y
        elif type_of_transform == "dwt and dct":
            for dirs in os.listdir(path=self.path):
                for images in os.listdir(path="%s%s" % (self.path, dirs)):
                    raw_image = cv2.imread(
                        "%s/%s/%s" % (self.path, dirs, images), 0)

                    raw_image = cv2.resize(255 - raw_image, self.size)
                    (thresh, raw_image) = cv2.threshold(
                        raw_image, self.thresh, 255,
                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                    #raw_raw.point(lambda x:x > thresh and 255)
                    rows, cols, raw_image = self.cutter(raw_image)
                    if rows > cols:
                        factor = (self.size[1] - 10) / rows  #changes
                        rows = (self.size[1] - 10)
                        cols = int(round(cols * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))
                    else:
                        factor = (self.size[1] - 10) / cols
                        cols = (self.size[1] - 10)
                        rows = int(round(rows * factor))
                        raw_image = cv2.resize(raw_image, (cols, rows))

                    c_pad = (int(math.ceil((self.size[1] - cols) / 2.0)),
                             int(math.floor((self.size[1] - cols) / 2.0)))
                    raw_pad = (int(math.ceil((self.size[0] - rows) / 2.0)),
                               int(math.floor((self.size[0] - rows) / 2.0)))
                    raw_image = np.lib.pad(raw_image, (raw_pad, c_pad),
                                           'constant')
                    shft_x, shift_y = self.b_shif(raw_image)
                    shifted = self.shifter(raw_image, shft_x, shift_y)
                    raw_image = shifted

                    flatten_m = raw_image.flatten() / 255.0
                    #transform will come here
                    flatten_2, disc = dwt(flatten_m, wavelet="db1")
                    flatten_1 = dct(flatten_m)
                    #print(np.shape(flatten_2),np.shape(flatten_1))
                    flatten = np.concatenate((flatten_1, flatten_2))
                    X.append(flatten)
                    Y.append(dirs)
            if shuffling:
                X, Y = shuffle(X, Y, random_state=0)
            return X, Y
Esempio n. 33
0
def wavedec(data, wavelet, mode='symmetric', level=1, axis=-1):
    """
    Multiple level 1-D discrete fast wavelet decomposition

    Calling Sequence
    ----------------
    [C, L] = wavedec(data, wavelet, mode, level, axis)
    [C, L] = wavedec(data, wavelet)
    [C, L] = wavedec(data, 'sym3')

    Parameters
    ----------
    data: array_like
        Input data
    wavelet : Wavelet object or name string
        Wavelet to use
    mode : str, optional
        Signal extension mode, see Modes (default: 'symmetric')
    level : int, optional
        Decomposition level (must be >= 0). Default is 1.
    axis: int, optional
        Axis over which to compute the DWT. If not given, the
        last axis is used.

    Returns
    -------
    C: list
        Ordered list of flattened coefficients arrays (N=level):
        C = [app. coef.(N)|det. coef.(N)|... |det. coef.(1)]

    L: list
        Ordered list of individual lengths of coefficients arrays.
        L(1)   = length of app. coef.(N)
        L(i)   = length of det. coef.(N-i+2) for i = 2,...,N+1
        L(N+2) = length(X).

    Description
    -----------
    wavedec can be used for multiple-level 1-D discrete fast wavelet
    decomposition using a specific wavelet name or instance of the
    Wavelet class instance.

    The coefficient vector C contains the approximation coefficient at level N
    and all detail coefficient from level 1 to N

    The first entry of L is the length of the approximation coefficient,
    then the length of the detail coefficients are stored and the last
    value of L is the length of the signal vector.

    The approximation coefficient can be extracted with C(1:L(1)).
    The detail coefficients can be obtained with C(L(1):sum(L(1:2))),
    C(sum(L(1:2)):sum(L(1:3))),.... until C(sum(L(1:length(L)-2)):sum(L(1:length(L)-1)))

    The implementation of the function is based on pywt.wavedec
    with the following minor changes:
        - checking of the axis is dropped out
        - checking of the maximum possible level is dropped out
          (as for Matlab's implementation)
        - returns format is modified to Matlab's internal format:
          two separate lists of details coefficients and
          corresponding lengths

    Examples
    --------
    >>> C, L = wavedec([3, 7, 1, 1, -2, 5, 4, 6], 'sym3', level=2)
    >>> C
    array([  7.38237875   5.36487594   8.83289608   2.21549896  11.10312807
            -0.42770133   3.72423411   0.48210099   1.06367045  -5.0083641
            -2.11206142  -2.64704675  -3.16825651  -0.67715519   0.56811154
             2.70377533])
    >>> L
    array([5, 5, 6, 8])

    """
    data = np.asarray(data)

    if not isinstance(wavelet, pywt.Wavelet):
        wavelet = pywt.Wavelet(wavelet)

    # Initialization
    coefs, lengths = [], []

    # Decomposition
    lengths.append(len(data))
    for i in range(level):
        data, d = pywt.dwt(data, wavelet, mode, axis)

        # Store detail and its length
        coefs.append(d)
        lengths.append(len(d))

    # Add the last approximation
    coefs.append(data)
    lengths.append(len(data))

    # Reverse (since we've appended to the end of list)
    coefs.reverse()
    lengths.reverse()

    return np.concatenate(coefs).ravel(), lengths
def dwt(a):
    [ca, cd] = pywt.dwt(a, 'haar')
    return ca, cd
Esempio n. 35
0
        y_pred = np.zeros(y_test.shape)
        for ii, t_name in enumerate(target_fields):
            print t_name
            clfs[t_name].fit(x_train, y_train[:, ii])
            y_pred[:, ii] = clfs[t_name].predict(x_test).astype(float)

        pred_all.append(y_pred)
        truth_all.append(y_test)
    pred_all = np.concatenate(pred_all)
    truth_all = np.concatenate(truth_all)

    print m.mcrmse(pred_all, truth_all)

else:
    test = dl.get_data('test')
    spectra_test = test['spectra']
    x_test = np.array([pywt.dwt(s, 'db3')[0] for s in spectra_test])
    x_test = np.c_[x_test, test['spatial']]
    x_test = np.c_[x_test, (test['depth'] == 'Topsoil').astype(float)]

    pred = np.zeros([x_test.shape[0], len(target_fields)])
    # Train on all data
    for ii, t_name in enumerate(target_fields):
        clfs[t_name].fit(x_train_all, targets[:, ii])
        pred[:, ii] = clfs[t_name].predict(x_test).astype(float)

# dl.write_predictions(test['pidn'], pred)

toc = time.time() - tic
print toc, 'seconds'
Esempio n. 36
0
def funcaofeia(p_wavelet,p_name,I1):
	wavelet = p_wavelet#wavelets[0]#'db1'
	
	
	cooef1 = pywt.wavedec2(I1[:,:], wavelet)

	'''
	for a in range(len(cooef1)):
		print(len(cooef1[a]))

	print("IMAGEM")
	print(I1[:,:])

	print("PRIMEIRA LINHA")
	print(cooef1[0][0])
	print(cooef1[0][1])

	print("SEGUNDA LINHA")

	print(cooef1[1][0])
	print(cooef1[1][1])
	print(cooef1[1][2])
	'''

	# for line in cooef1:
	# 	for cell in line:
	# 		if(cell != 0):
	# 			pass
	
	fusedImage = pywt.waverec2(cooef1, wavelet)

	'''

	print("IMAGEM FUDIDA")
	print(len(fusedImage))

	countLine = 0
	countCol = 0
	countGeral = 0

	print("Elementos não nulos por linha")

	for line in fusedImage:
		countLine = 0
		for cell in line:
			if(cell != 0):
				countLine +=1
		#print(countLine)
		countGeral +=countLine
		if(countLine != 0):
			countCol +=1

	print("COUNT GERAL=>",countGeral)
	print("COUNT LINE =>",countCol)
	print("ZERO LINE =>", len(fusedImage)-countCol)
	'''

	# MAGIA DO STACK
	# Forth: normmalize values to be in uint8
	fusedImage = np.multiply(np.divide(fusedImage - np.min(fusedImage),(np.max(fusedImage) - np.min(fusedImage))),255)
	fusedImage = fusedImage.astype(np.uint8)

	cv2.imwrite(p_name+"-XABLAU.jpg",fusedImage)

	#print(len(cooef1))

	cooef2 = pywt.dwt(I1[:,:], wavelet)
	#cooef3 = pywt.dwt(I1[:,:], wavelet)

	#print("COEF 2")
	#print(cooef2[0])
	#for cell in cooef2[0]:
	#	print(cell, end=" ")
	#print(len(cooef2[1]))

	cv2.imwrite(p_name+"-Digital-BIGODINHO-0.jpg",cooef2[0])
	cv2.imwrite(p_name+"-Digital-BIGODINHO-1.jpg",cooef2[1])
def _wavelet_filter_signal(s, wave='db4', *args, **kwargs):
    cA, cD = pywt.dwt(s, wave)
    cD_mod = pywt.threshold(cD, *args, **kwargs)
    s_mod = pywt.idwt(cA, cD_mod, wave)
    return s_mod
                                          ).any(1)]
    app_train = app_train.reset_index(drop=True)
    print(str(name + 1) + '.csv' + "merge Done\n")
    print('merge shape: ', app_train.shape)

    #提取统计量
    plus_cells = int(app_train.shape[0] / 600)
    for k in range(0, 600):
        app_train_temp_origin = app_train[plus_cells * k:plus_cells * (k + 1) -
                                          1]
        app_train_temp_origin = app_train_temp_origin.reset_index(drop=True)

        app_train_temp = pd.DataFrame()
        for col in app_train_temp_origin.columns:
            x = app_train_temp_origin[col]
            cA, cD = pywt.dwt(x, 'sym2')
            cA_df = pd.DataFrame(cA, columns=[col + '_cA'])
            cD_df = pd.DataFrame(cD, columns=[col + '_cD'])
            app_train_temp = pd.concat([app_train_temp, cA_df, cD_df], axis=1)

        app_train_merge_sensor_s = pd.DataFrame()
        #方差
        var_temp = app_train_temp.var()
        app_train_merge_sensor_s = pd.DataFrame([var_temp.values],
                                                columns=var_temp.index +
                                                '_var')
        #均值
        mean_temp = app_train_temp.mean()
        app_train_merge_sensor_s = pd.concat([
            app_train_merge_sensor_s,
            pd.DataFrame([mean_temp.values], columns=mean_temp.index + '_mean')
Esempio n. 39
0
def test_dwt_input_error():
    data = np.ones((16, 1))
    assert_raises(ValueError, pywt.dwt, data, 'haar')

    cA, cD = pywt.dwt(data[:, 0], 'haar')
    assert_raises(ValueError, pywt.idwt, cA[:, np.newaxis], cD, 'haar')
Esempio n. 40
0
                    row[j] = float(row[j])
                    j += 1
                data1.append(row)
                if int(file_name) < 15:
                    label.append('0')
                elif int(file_name) > 100:
                    label.append('2')
                else:
                    label.append('1')

    data1 = np.array(data1)
    label = np.array(label)
    # 数据预处理

    # dwt变换
    coeff = pywt.dwt(data1, 'db3', mode='symmetric')
    data = np.array(coeff[0])

    # fft
    # data = fft(data1).real

    # KSVD
    # scores1 = []
    # scores2 = []
    # for i in range(15, 30):
    # ksvd = KSVD(24)
    # dictionary, sparsecode = ksvd.fit(data1)
    # data = dictionary.dot(sparsecode)

    # 数据拆分
    X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
Esempio n. 41
0
def func(arr):
    xdata = np.where(arr != -200)
    ydata = arr[xdata]
    cA, cD = pywt.dwt(arr, 'db2')
    return cA
Esempio n. 42
0
    r'C:\Users\dsq94\OneDrive\文档\WeChat Files\orthonormal\FileStorage\File\2020-11\museMonitor_697.csv',
    delimiter=',')
index_Alpha_TP9 = 9  #np.where(eegData[0] == 'Alpha_TP9')[0][0]
index_Beta_TP9 = 13  #np.where(eegData[0] == 'Beta_TP9')[0][0]
Alpha_TP9Data = eegData[1:, index_Alpha_TP9]  #.astype(np.float)
Beta_TP9Data = eegData[1:, index_Beta_TP9]  #.astype(np.float)

plt.plot(Alpha_TP9Data)
plt.ylabel("Muse Alpha_TP9Data")
plt.figure()

plt.plot(Beta_TP9Data)
plt.ylabel("Muse Beta_TP9Data")
plt.figure()

cA, cD = pywt.dwt(Beta_TP9Data, 'Rbio3.5')

approx = pywt.idwt(cA, None, 'Rbio3.5')
plt.plot(approx)
plt.ylabel("Approx TP9")
plt.figure()

# detail = pywt.idwt(cD, None, 'Rbio3.5')
# plt.plot(detail)
# plt.ylabel("Detailed TP9")

plt.figure()
plt.plot(cA)
plt.ylabel("Approx Points")

plt.figure()
Esempio n. 43
0
import pywt

cA, cD = pywt.dwt([1, 2, 3, 4], wavelet='db1')
print('cA:', cA)
print('cD', cD)
print('done')
Esempio n. 44
0
def WaveletTransform(data):
	cA, cD = pywt.dwt(data, 'db4')
	return cA
Esempio n. 45
0
    def __init__(self):
        self.filter_bank = self.dec_lo, self.dec_hi, self.rec_lo, self.rec_hi


data = [1, 2, 3, 4, 5, 6]

############################################################################
print "Case 1 (custom filter bank - Haar wavelet)"

myBank = FilterBank()
# pass the user supplied filter bank as argument
myWavelet = pywt.Wavelet(name="UserSuppliedWavelet", filter_bank=myBank)
#print myWavelet.get_filters_coeffs()

print "data:", data
a, d = pywt.dwt(data, myWavelet)
print "a:", a
print "d:", d
print "rec:", pywt.idwt(a, d, myWavelet)

############################################################################
print "-" * 75
print "Case 2 (Wavelet object as filter bank - db2 wavelet)"

# builtin wavelets can also be treated as filter banks with theirs
# filter_bank attribute

builtinWavelet = pywt.Wavelet('db2')
myWavelet = pywt.Wavelet(name="UserSuppliedWavelet",
                         filter_bank=builtinWavelet)
Esempio n. 46
0
import pywt
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt


# 数据
data = [1, 1, 1, 1, 2, 2, 3, 3]

# Haar小波
# haar = pywt.Wavelet(name="haar")  # 规范化小波
haar = pywt.Wavelet(name="haar", filter_bank=((1, 1), (-1, 1), (1, 1), (1, -1)))  # 未规范化小波

# (1级)1D离散小波变换
cA1, cD1 = pywt.dwt(data, haar)
print(cA1)
print(cD1)

# 1级1D离散小波变换
result = pywt.wavedec(data, wavelet=haar, level=1)
print(result)

# 2级1D离散小波变换
result = pywt.wavedec(data, wavelet=haar, level=2)
print(result)

# 3级1D离散小波变换
result = pywt.wavedec(data, wavelet=haar, level=3)
print(result)

print()
Esempio n. 47
0
def waves(time_series, wave):
    ca, cd = pywt.dwt(time_series, wave)
    rec_ts = pywt.idwt(None, cd, wave)
    return rec_ts
Esempio n. 48
0
import numpy as np
import pywt
import matplotlib.pyplot as plt

ecg = np.loadtxt('ecg_segment4.txt')
# plt.plot(ecg)
# plt.show()
w = pywt.Wavelet('db3')
ca = []
cd = []
layers = int(7)
mode = pywt.Modes.smooth
a = ecg
for i in range(layers):
    (a, d) = pywt.dwt(a, w, mode)
    ca.append(a)
    cd.append(d)

rec_a = []
rec_d = []
coeff_list_new = []
# for i, coeff in enumerate(cd):
#     coeff_list = [coeff, None] + [None] * i
#     coeff_list_new.append()
#     rec_a.append(pywt.waverec(coeff_list, w))

for i in range(3, 4, 5):
    coeff_list = cd[i]
    coeff_list_new.append(coeff_list)
re_ecg = pywt.waverec(coeff_list_new, w)
Esempio n. 49
0
def plot_signal_decomp(data, w):
    global all_feature
    """Decompose and plot a signal S.
        S = An + Dn + Dn-1 + ... + D1
        """
    mean_a = []
    maximum_a = []
    minimum_a = []
    stdev_a = []
    median_a = []
    var_a = []

    mean_coefA = []
    max_coefA = []
    min_coefA = []
    stdev_coefA = []
    median_coefA = []
    var_coefA = []

    mean_d = []
    maximum_d = []
    minimum_d = []
    stdev_d = []
    median_d = []
    var_d = []

    mean_coefD = []
    max_coefD = []
    min_coefD = []
    stdev_coefD = []
    median_coefD = []
    var_coefD = []

    w = pywt.Wavelet(w)
    a = data
    ca = []
    cd = []

    idx = 1

    for i in range(5):
        (a, d) = pywt.dwt(a, w, mode)
        ca.append(a)
        cd.append(d)

        mean_a = np.mean([i[1] for i in ca])
        maximum_a = np.amax([i[1] for i in ca])
        minimum_a = np.amin([i[1] for i in ca])
        stdev_a = np.std([i[1] for i in ca])
        median_a = np.median([i[1] for i in ca])
        var_a = np.var([i[1] for i in ca])

        mean_coefA.append(mean_a)
        max_coefA.append(maximum_a)
        min_coefA.append(minimum_a)
        stdev_coefA.append(stdev_a)
        median_coefA.append(median_a)
        var_coefA.append(var_a)

        mean_d = np.mean([i[1] for i in cd])
        maximum_d = np.amax([i[1] for i in cd])
        minimum_d = np.amin([i[1] for i in cd])
        stdev_d = np.std([i[1] for i in cd])
        median_d = np.median([i[1] for i in cd])
        var_d = np.var([i[1] for i in cd])

        mean_coefD.append(mean_d)
        max_coefD.append(maximum_d)
        min_coefD.append(minimum_d)
        stdev_coefD.append(stdev_d)
        median_coefD.append(median_d)
        var_coefD.append(var_d)

        idx += 1

    feature_extraction = [
        min_coefD[4], max_coefD[4], mean_coefD[4], stdev_coefD[4],
        median_coefD[4], var_coefD[4]
    ]
    all_feature.append(feature_extraction)
Esempio n. 50
0
    x = np.linspace(0, 1, num=2048)
    chirp_signal = np.sin(250 * np.pi * x**2)
        
    fig, ax = plt.subplots(figsize=(6,1))
    ax.set_title("Original Chirp Signal: ")
    ax.plot(chirp_signal)
    plt.show()
        
    data = chirp_signal
    waveletname = 'haar'
    
    fig, axarr = plt.subplots(nrows=5, ncols=2, figsize=(6,6))
    figR, axarrR = plt.subplots(nrows=5, ncols=1,figsize=(6,6))
    
    for ii in range(5):
        (data, coeff_d) = pywt.dwt(data, waveletname)
        axarr[ii, 0].plot(data, 'r')
        axarr[ii, 1].plot(coeff_d, 'g')
        axarr[ii, 0].set_ylabel("Level {}".format(ii + 1), fontsize=14, rotation=90)
        axarr[ii, 0].set_yticklabels([])
        axarrR[ii].plot(pywt.idwt(data, coeff_d, waveletname))
        axarrR[ii].set_title("Reconstruction of level " + str(ii+1) + " coefficients")
        if ii == 0:
            axarr[ii, 0].set_title("Approximation coefficients", fontsize=14)
            axarr[ii, 1].set_title("Detail coefficients", fontsize=14)
        axarr[ii, 1].set_yticklabels([])
    plt.tight_layout()
    plt.show()
    
if (plotCorrectImplemnt == True):
    # Create wavelet and extract the filters
Esempio n. 51
0
currentSegment = deque(maxlen = WAVELET_SEGMENT_SIZE)
waveletList = []
for i in range(WAVELET_SEGMENT_SIZE):
    date, value = csvReader.next()
    currentSegment.append(value)

smoothingSegment = deque(maxlen=SMOOTHING_SIZE)

iteration_size = ((sum(1 for line in open(targetPath, "r")) - WAVELET_SEGMENT_SIZE)) - 1
print("DEBUG: LOOP_NUM:", iteration_size)
for i in range(iteration_size):
    date, value = csvReader.next()
    currentSegment.append(value)

    # cA, cD = pywt.dwt(sorted(currentSegment), WAVELET_DB)
    cA, cD = pywt.dwt(currentSegment, WAVELET_DB)

    smoothingSegment.append(cA[WAVELET_FREQUENCY])
    smoothedValue = sum(smoothingSegment) / len(smoothingSegment)

    waveletList.append([date, int(value), smoothedValue])

s = sum(map(lambda l:l[2], waveletList))
offset = 500 - (s / len(waveletList))
print("DEBUG: OFFFSET:", offset)
waveletList  = map(lambda l: [l[0], l[1], (l[2] + offset)], waveletList)

# write csv headers
csvWriter.writerow(["timestamp", "raw_value", "wavelet_value"])
csvWriter.writerow(["datetime", "int", "float"])
csvWriter.writerow(["T", "", ""])
Esempio n. 52
0
# -*- coding: utf-8 -*-
"""
Created on Wed Sep  9 12:34:57 2020

@author: qtckp
"""

import pywt

(cA, cD) = pywt.dwt([1, 2, 3, 4, 5, 6], 'db1')
Esempio n. 53
0
		fake_ch5A, fake_ch5D = generator(z)# * 20

		# Loss measures generator's ability to fool the discriminator
		g_loss = adversarial_loss(discriminator(fake_ch5A, fake_ch5D), valid)

		g_loss.backward()
		optimizer_G.step()

		# ---------------------
		#  Train Discriminator
		# ---------------------

		optimizer_D.zero_grad()

		# Measure discriminator's ability to classify real from generated samples
		real_ch5A, real_ch5D = pywt.dwt(real_imgs.cpu().numpy(), wtype)
		real_ch5A = torch.from_numpy(real_ch5A).cuda()
		real_ch5D = torch.from_numpy(real_ch5D).cuda() 
		real_loss = adversarial_loss(discriminator(real_ch5A, real_ch5D), valid)
		fake_loss = adversarial_loss(discriminator(fake_ch5A.detach(), fake_ch5D.detach()), fake)
		d_loss = (real_loss + fake_loss) / 2

		d_loss.backward()
		optimizer_D.step()

		print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, n_epochs, i, len(real_eegs),
															d_loss.item(), g_loss.item()))
	save_EEG_tfr(fake_ch5A.cpu().detach().numpy(), fake_ch5D.cpu().detach().numpy(), 44, 200, "./generated_eegs/generated-"+ str(epoch) + "-fake-conv-tfr")
	print("Save @ Epoch", epoch)
		# batches_done = epoch * len(dataloader) + i
		# if batches_done % sample_interval == 0:
Esempio n. 54
0
times_idwt = [[] for i in range(len(wavelets))]

repeat = 5

for j, size in enumerate(sizes):
    #if size > 500000:
    #    warnings.warn("Warning, too big data size may cause page swapping.")

    data = numpy.ones((size, ), dtype)

    print("%d/%d" % (j + 1, len(sizes))).rjust(6), str(size).rjust(9),
    for i, w in enumerate(wavelets):
        min_t1, min_t2 = 9999., 9999.
        for _ in xrange(repeat):
            t1 = clock()
            (a, d) = pywt.dwt(data, w, mode)
            t1 = clock() - t1
            min_t1 = min(t1, min_t1)

            t2 = clock()
            a0 = pywt.idwt(a, d, w, mode)
            t2 = clock() - t2
            min_t2 = min(t2, min_t2)

        times_dwt[i].append(min_t1)
        times_idwt[i].append(min_t2)
        print '.',
    print
    gc.collect()

for j, (times, name) in enumerate([(times_dwt, 'dwt'), (times_idwt, 'idwt')]):
Esempio n. 55
0
import matplotlib.pyplot as plt
import pandas as pd
import pywt
dist = pd.read_csv("../../Result/dist.txt")
dist = np.asarray(dist)
cA = pd.read_csv("../../Result/cA.txt")
cA = np.asarray(cA)
cD = pd.read_csv("../../Result/cD.txt")
cD = np.asarray(cD)

ridx = 201
# my version
plt.subplot(3, 1, 1)
plt.plot(dist[ridx, :])
plt.subplot(3, 1, 2)
plt.plot(cA[ridx, :])
plt.subplot(3, 1, 3)
plt.plot(cD[ridx, :])
plt.savefig("1.png")

# lib version
dat = dist[ridx, :]
cA_lib, cD_lib = pywt.dwt(dat, 'db2')
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(dat)
plt.subplot(3, 1, 2)
plt.plot(cA_lib)
plt.subplot(3, 1, 3)
plt.plot(cD_lib)
plt.savefig("2.png")
Esempio n. 56
0
def graphData(stock, MA1, MA2):
    stockFile = loadStock(stock)
    stockFile = stockFile[1:100]
    try:
        date, closep, highp, lowp, openp, volume = np.loadtxt(
            stockFile,
            delimiter=',',
            unpack=True,
            converters={0: bytespdate2num('%Y%m%d')})
        x = 0
        y = len(date)
        newAr = []
        while x < y:
            appendLine = date[x], openp[x], highp[x], lowp[x], closep[
                x], volume[x]
            newAr.append(appendLine)
            x += 1

        Av1 = movingaverage(closep, MA1)
        Av2 = movingaverage(closep, MA2)
        print(closep)
        SP = len(date[MA2 - 1:])
        cA, cD = pywt.dwt(closep, 'haar')
        #SMA
        #--------------------------------------
        fig = plt.figure(facecolor='#07000d')

        ax1 = plt.subplot2grid((6, 4), (1, 0),
                               rowspan=4,
                               colspan=4,
                               axisbg='#07000d')
        candlestick_ochl(ax1,
                         newAr[-SP:],
                         width=.6,
                         colorup='#53c156',
                         colordown='#ff1717')

        Label1 = str(MA1) + ' SMA'
        Label2 = str(MA2) + ' SMA'
        newDate = date[-SP:]
        newDate = newDate[1::2]
        ax1.plot(newDate[-SP / 2:],
                 cA[-SP / 2:],
                 '#e1edf9',
                 label=Label1,
                 linewidth=1.5)
        ax1.plot(newDate[-SP / 2:],
                 cD[-SP / 2:],
                 '#4ee6fd',
                 label=Label2,
                 linewidth=1.5)

        ax1.grid(True, color='w')
        ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
        ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        ax1.yaxis.label.set_color("w")
        ax1.spines['bottom'].set_color("#5998ff")
        ax1.spines['top'].set_color("#5998ff")
        ax1.spines['left'].set_color("#5998ff")
        ax1.spines['right'].set_color("#5998ff")
        ax1.tick_params(axis='y', colors='w')
        plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
        ax1.tick_params(axis='x', colors='w')
        plt.ylabel('Stock price and Volume')
        maLeg = plt.legend(loc=9,
                           ncol=2,
                           prop={'size': 7},
                           fancybox=True,
                           borderaxespad=0.)
        maLeg.get_frame().set_alpha(0.4)
        textEd = pylab.gca().get_legend().get_texts()
        pylab.setp(textEd[0:5], color='w')

        ax1.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5,
                                                        prune='upper'))
        for label in ax1.xaxis.get_ticklabels():
            label.set_rotation(45)

        plt.suptitle(stock.upper(), color='w')
        plt.setp(ax1.get_xticklabels(), visible=True)

        # ax1.annotate('Big news!',(date[510],Av1[510]),
        #     xytext=(0.8, 0.9), textcoords='axes fraction',
        #     arrowprops=dict(facecolor='white', shrink=0.05),
        #     fontsize=14, color = 'w',
        #     horizontalalignment='right', verticalalignment='bottom')

        plt.subplots_adjust(left=.09,
                            bottom=.14,
                            right=.94,
                            top=.95,
                            wspace=.20,
                            hspace=0)
        plt.show()
        # fig.savefig('example.png',facecolor=fig.get_facecolor())

    except Exception as e:
        print('main loop', str(e))
Esempio n. 57
0
                    fw.write(str(i+1)+'\t'+str(j+1)+'\t'+str(maxSimilar)[:6]+'\n')
        fw.close()


if __name__ == '__main__':
    a = dataHelper(r'/home/hl/cy/code/data/USACA17_20pro.json')
    data = a.getData(24*3)
    b = statics(data)
    keyWords = b.getTopKWords(10)
    k_interval = int(24*3*60/10)
    c = similarity(data, k_interval)

    window = 60
    q = int(k_interval/2)
    
    length = len(keyWords)
    count = 0
    fw = open('/home/hl/cy/code/data/USACAwordsignal.json', 'w+', encoding='utf-8')
    for word in keyWords:
        count += 1
        if (count % 10 == 0):
            print("{}/{}".format(count, length))
        temp = c.WCWTF_ITWCWTF(word)
        signal = c.Fuzzy(list=temp, window_with=window)
        dwt = list(np.array(pywt.dwt(signal, 'haar')).reshape(1, -1)[0][:q])
        content = {word: {'WCWTF': temp, 'fuzzy': signal, 'dwt': dwt}}
        json_str = json.dumps(content)
        fw.write(json_str + '\n')
    fw.close()

Esempio n. 58
0
import joblib

# %%
# Taking the file path from user
testData = pd.read_csv('test.csv', header=None)
testData = testData.dropna()
n_rows = testData.shape[0]
n_col = testData.shape[1]

# Feature Extractions:

# Feature: DWT Transform of data
fft_coef = []
index_ar = []
for i in range(n_rows):
    (d, c) = pywt.dwt(testData.iloc[i, :], 'db2')
    (d, c) = pywt.dwt(d, 'db2')
    fft_coef.append(c)
    index_ar.append(i)

# Feature: Kurtosis

testKurtosis = []
for i in range(n_rows):
    row = testData.iloc[i, :]
    kurtVal = feature_calculators.kurtosis(row)
    testKurtosis.append(kurtVal)

# Feature: LAOGE (Large Amplitude of plasma Glucose Excursions)

testLAOGE = np.zeros(n_rows)
Esempio n. 59
0
def process_data(data, channelNames, srate):
    global f_labels, processed_channel_names

    # Default RQA parameters
    embedding = 10  # Embedding dimension
    tdelay = 2  # Time delay
    tau = 30  # threshold

    # Multiscaling is accomplished with a wavelet transform
    # Options for basis functions: ['haar', 'db', 'sym', 'coif', 'bior', 'rbio', 'dmey']
    #wavelet = 'haar'
    wavelet = 'db4'
    mode = 'cpd'
    #mode = pywt.Modes.smooth

    # Simple array for entropy value
    ent = np.zeros(1)

    # Determine the number of levels required so that
    # the lowest level approximation is roughly the
    # delta band (freq range 0-4 Hz)

    if srate <= 128: levels = 4
    elif srate <= 256: levels = 5
    elif srate <= 512:  # subsample
        srate = srate / 2.0
        n = len(data[0])
        data = data[0:, 0:n:2]
        levels = 5
    elif srate <= 1024:
        srate = srate / 4.0
        n = len(data[0])
        data = data[0:, 0:n:4]
        levels = 5
    nbands = levels

    wavelet_scale = {}
    f_limit = {}

    # The following function returns the highest level (ns) approximation
    # in dec[0], then details for level ns in dec[1]. Each successive
    # level of detail coefficients is in dec[2] through dec[ns].
    #
    #   level       approximation       details
    #   0           original signal     --
    #   1                -              dec[ns]
    #   2                -              dec[ns-1]
    #   3                -              dec[ns-2]
    #   i              -                dec[ns-i+1]
    #   ns          dec[0]              dec[1]

    WRITE_RP_IMAGE_FILE = False

    # Print screen headers
    sys.stdout.write("%10s %6s " % ("Sensor", "Freq"))
    for f in all_features:
        sys.stdout.write(" %8s " % (f))
    sys.stdout.write("\n")

    D = {}

    for c, ch in enumerate(channelNames):
        if ch in master_channel_list:
            processed_channel_names.append(ch)

            # Create a raw recurrence plot image for the original signal from this channel
            if WRITE_RP_IMAGE_FILE:
                rp_plot_name = filename + "_" + ch + "_" + "rp" + ".png"
                print("            write rp image file ", rp_plot_name)
                settings = Settings(data[c],
                                    embedding_dimension=embedding,
                                    time_delay=tdelay,
                                    neighbourhood=FixedRadius(0))
                #computation = RQAComputation.create(settings, verbose=False)
                rp_computation = RecurrencePlotComputation.create(
                    settings, verbose=False)
                result = rp_computation.run()
                ImageGenerator.save_recurrence_plot(
                    result.recurrence_matrix_reverse, rp_plot_name)

            D[ch] = {}

            #--------------------------------------------------------------------
            # Get the wavelet decomposition. See pywavelet (or pywt) documents.
            # Deconstruct the waveforms
            # S = An + Dn + Dn-1 + ... + D1
            #--------------------------------------------------------------------
            w = pywt.Wavelet(wavelet)
            m = np.mean(data[c])
            a_orig = data[c] - m  # the original signal, initially
            a = a_orig

            ca = []  # all the approximations
            cd = []  # all the details
            sqrt2 = np.sqrt(2.0)
            for i in range(nbands):
                (a, d) = pywt.dwt(a, w, mode)
                f = pow(sqrt2, i + 1)
                ca.append(a / f)
                cd.append(d / f)

            if 1 == 0:  # this will build full reconstructed signals at every level
                rec_a = []  # reconstructed approximations
                rec_d = []  # reconstructed details
                for i, coeff in enumerate(ca):
                    coeff_list = [coeff, None] + [None] * i
                    rec_a.append(pywt.waverec(coeff_list, w))
                for i, coeff in enumerate(cd):
                    coeff_list = [None, coeff] + [None] * i
                    rec_d.append(pywt.waverec(coeff_list, w))
            else:
                rec_a = ca
                rec_d = cd

            # Use the details and last approximation to create all the power-of-2 freq bands
            f_labels = ['A0']
            wavelet_scale = {}
            wavelet_scale['A0'] = 0
            f_limit = {}
            f_limit['A0'] = srate / 2.0
            fs = [srate]
            freqband = [a_orig]  # A0 is the original signal
            N = len(a_orig)
            f = srate / 4.0
            for j, r in enumerate(rec_a):
                freq_name = 'A' + str(j + 1)
                wavelet_scale[freq_name] = j + 1
                f_limit[freq_name] = f
                f = f / 2.0
                f_labels.append(freq_name)
                freqband.append(r[0:N])  # wavelet approximation for this band

            f = srate / 2.0
            for j, r in enumerate(rec_d):
                freq_name = 'D' + str(j + 1)
                wavelet_scale[freq_name] = j + 1
                f_limit[freq_name] = f
                f = f / 2.0
                f_labels.append(freq_name)
                freqband.append(r[0:N])  # wavelet details for this band

            #--------------------------------------------------------------------
            # Compute features on each of the frequency bands
            #--------------------------------------------------------------------
            for f in all_features:
                D[ch][f] = {}

            #----------------------
            # Feature set 1: Power
            for i, y in enumerate(freqband):
                v = bandpower(y)
                D[ch]["Power"][f_labels[i]] = v

                #----------------------
                # Feature set 2: Sample Entropy, Hurst parameter, DFA, Lyapunov exponents
                D[ch]["SampE"][f_labels[i]] = nolds.sampen(y)

                try:
                    D[ch]["hurst_rs"][f_labels[i]] = nolds.hurst_rs(y)
                except:
                    D[ch]["hurst_rs"][f_labels[i]] = 0.0

                try:
                    D[ch]["dfa"][f_labels[i]] = nolds.dfa(y)
                except:
                    D[ch]["dfa"][f_labels[i]] = 0.0

                try:
                    D[ch]["cd"][f_labels[i]] = nolds.corr_dim(y, embedding)
                except:
                    D[ch]["cd"][f_labels[i]] = 0.0

                try:
                    #lyap = nolds.lyap_e(y, emb_dim= embedding)
                    lyap0 = nolds.lyap_r(y, emb_dim=embedding)
                except:
                    #lyap = [0.0, 0.0, 0.0]
                    lyap0 = 0.0
                D[ch]["lyap0"][f_labels[i]] = lyap0

                #----------------------
                # Feature set 3: Recurrence Quantitative Analysis (RQA)
                # This routine seems to be incredibly slow and may need improvement
                rqa_features = [
                    "RR", "DET", "LAM", "L_entr", "L_max", "L_mean", "TT"
                ]
                pyRQA_names = ['recurrence_rate', 'determinism', 'laminarity', 'entropy_diagonal_lines', \
                               'longest_diagonal_line','average_diagonal_line', 'trapping_time'   ]

                # First check to see if RQA values are needed at all
                compute_RQA = False
                for r in rqa_features:
                    if r in all_features:
                        compute_RQA = True
                        break

                if compute_RQA:
                    #for i, y in enumerate(freqband):
                    settings = Settings(
                        y,
                        embedding_dimension=embedding,
                        time_delay=tdelay,
                        neighbourhood=FixedRadius(tau)
                        #similarity_measure=EuclideanMetric,
                        #theiler_corrector=1,
                        #min_diagonal_line_length=2,
                        #min_vertical_line_length=2,
                        #min_white_vertical_line_length=2)
                    )
                    computation = RQAComputation.create(settings,
                                                        verbose=False)
                    result = computation.run()

                    # We have to pull out each value
                    w = f_labels[i]
                    D[ch]["RR"][w] = result.recurrence_rate
                    D[ch]["DET"][w] = result.determinism
                    D[ch]["LAM"][w] = result.laminarity
                    D[ch]["L_entr"][w] = result.entropy_diagonal_lines
                    D[ch]["L_max"][w] = result.longest_diagonal_line
                    D[ch]["L_mean"][w] = result.average_diagonal_line
                    D[ch]["TT"][w] = result.trapping_time

                    # Write results from first channel to the screen, to give
                    # visual feedback that the code is running

                w = f_labels[i]
                sys.stdout.write("%10s %6s " % (ch, w))
                for dyn_inv in all_features:  # D[ch].keys():
                    v = D[ch][dyn_inv][w]
                    sys.stdout.write(" %8.3f " % (v))
                sys.stdout.write("\n")

    return D, srate, wavelet_scale, f_limit
 def wavelet_dwt(self, eeg_data):
     return pywt.dwt(eeg_data, 'db5')