コード例 #1
0
def test_smooth_nd():
    for edge in ['m', 'c']:
        a = rand(20, 2, 3) + 10
        for M in [5, 20, 123]:
            print("nd", edge, "M=%i" %M)
            kern = gaussian(M, 2.0)
            asm = smooth(a, kern[:,None,None], axis=0, edge=edge)
            assert asm.shape == a.shape
            for jj in range(asm.shape[1]):
                for kk in range(asm.shape[2]):
                    assert np.allclose(asm[:,jj,kk], smooth(a[:,jj,kk], kern, 
                                                            edge=edge))
                    mn = a[:,jj,kk].min()
                    mx = a[:,jj,kk].max()
                    smn = asm[:,jj,kk].min()
                    smx = asm[:,jj,kk].max()
                    assert smn >= mn, "min: data=%f, smooth=%f" %(mn, smn)
                    assert smx <= mx, "max: data=%f, smooth=%f" %(mx, smx)
コード例 #2
0
ファイル: test_signal.py プロジェクト: elcorto/pwtools
def test_smooth_nd():
    for edge in ['m', 'c']:
        a = rand(20, 2, 3) + 10
        for M in [5, 20, 123]:
            print "nd", edge, "M=%i" %M
            kern = gaussian(M, 2.0)
            asm = smooth(a, kern[:,None,None], axis=0, edge=edge)
            assert asm.shape == a.shape
            for jj in range(asm.shape[1]):
                for kk in range(asm.shape[2]):
                    assert np.allclose(asm[:,jj,kk], smooth(a[:,jj,kk], kern, 
                                                            edge=edge))
                    mn = a[:,jj,kk].min()
                    mx = a[:,jj,kk].max()
                    smn = asm[:,jj,kk].min()
                    smx = asm[:,jj,kk].max()
                    assert smn >= mn, "min: data=%f, smooth=%f" %(mn, smn)
                    assert smx <= mx, "max: data=%f, smooth=%f" %(mx, smx)
コード例 #3
0
def test_smooth_1d():
    for edge in ['m', 'c']:
        for N in [20,21]:
            # values in [9.0,11.0]
            x = rand(N) + 10
            mn = 9.0
            mx = 11.0
            for M in range(18,27):
                print("1d", edge, "N=%i, M=%i" %(N,M))
                xsm = smooth(x, gaussian(M,2.0), edge=edge)
                assert len(xsm) == N
                # (N,1) case
                xsm2 = smooth(x[:,None], gaussian(M,2.0)[:,None], edge=edge)
                assert np.allclose(xsm, xsm2[:,0], atol=1e-14, rtol=1e-12)
                # Smoothed signal should not go to zero if edge effects are handled
                # properly. Also assert proper normalization (i.e. smoothed signal
                # is "in the middle" of the noisy original data).
                assert xsm.min() >= mn
                assert xsm.max() <= mx
                assert mn <= xsm[0] <= mx
                assert mn <= xsm[-1] <= mx
            # convolution with delta peak produces same data exactly
            assert np.allclose(smooth(x, np.array([0.0,1,0]), edge=edge),x, atol=1e-14,
                               rtol=1e-12)
コード例 #4
0
ファイル: test_signal.py プロジェクト: elcorto/pwtools
def test_smooth_1d():
    for edge in ['m', 'c']:
        for N in [20,21]:
            # values in [9.0,11.0]
            x = rand(N) + 10
            mn = 9.0
            mx = 11.0
            for M in range(18,27):
                print "1d", edge, "N=%i, M=%i" %(N,M)
                xsm = smooth(x, gaussian(M,2.0), edge=edge)
                assert len(xsm) == N
                # (N,1) case
                xsm2 = smooth(x[:,None], gaussian(M,2.0)[:,None], edge=edge)
                assert np.allclose(xsm, xsm2[:,0], atol=1e-14, rtol=1e-12)
                # Smoothed signal should not go to zero if edge effects are handled
                # properly. Also assert proper normalization (i.e. smoothed signal
                # is "in the middle" of the noisy original data).
                assert xsm.min() >= mn
                assert xsm.max() <= mx
                assert mn <= xsm[0] <= mx
                assert mn <= xsm[-1] <= mx
            # convolution with delta peak produces same data exactly
            assert np.allclose(smooth(x, np.array([0.0,1,0]), edge=edge),x, atol=1e-14,
                               rtol=1e-12)
コード例 #5
0
ファイル: lorentz.py プロジェクト: elcorto/pwtools
    if nrand % 2 == 1:
        nrand += 1
    y[npoints/2-nrand/2:npoints/2+nrand/2] = np.random.rand(nrand) + 2.0
    
    # Sum of Lorentz functions at data points. This is the same as convolution
    # with a Lorentz function withOUT end point correction, valid if data `y`
    # is properly zero at both ends, else edge effects are visible: smoothed
    # data always goes to zero at both ends, even if original data doesn't. We
    # need to use a very wide kernel with at least 100*std b/c of long
    # Lorentz tails. Better 200*std to be safe.
    sig = np.zeros_like(y)
    for xi,yi in enumerate(y):
        sig += yi * std / ((x-xi)**2.0 + std**2.0)
    sig = scale(sig)
    plt.plot(sig, label='sum')
    # convolution with wide kernel
    klen = 200*std
    klen = klen+1 if klen % 2 == 0 else klen # odd kernel
    kern = lorentz(klen, std=std)
    plt.plot(scale(convolve(y, kern/float(kern.sum()), 'same')),
             label='conv, klen=%i' %klen)

    # Convolution with Lorentz function with end-point correction.  
    for klen in [10*std, 100*std, 200*std]:
        klen = klen+1 if klen % 2 == 0 else klen # odd kernel
        kern = lorentz(klen, std=std)
        plt.plot(scale(smooth(y, kern)), label='conv+egde, klen=%i' %klen)
    plt.title("npoints=%i" %npoints)
    plt.legend()
plt.show()
コード例 #6
0
ファイル: lorentz.py プロジェクト: zari277/pwtools
        nrand += 1
    y[npoints // 2 - nrand // 2:npoints // 2 +
      nrand // 2] = np.random.rand(nrand) + 2.0

    # Sum of Lorentz functions at data points. This is the same as convolution
    # with a Lorentz function withOUT end point correction, valid if data `y`
    # is properly zero at both ends, else edge effects are visible: smoothed
    # data always goes to zero at both ends, even if original data doesn't. We
    # need to use a very wide kernel with at least 100*std b/c of long
    # Lorentz tails. Better 200*std to be safe.
    sig = np.zeros_like(y)
    for xi, yi in enumerate(y):
        sig += yi * std / ((x - xi)**2.0 + std**2.0)
    sig = scale(sig)
    plt.plot(sig, label='sum')
    # convolution with wide kernel
    klen = 200 * std
    klen = klen + 1 if klen % 2 == 0 else klen  # odd kernel
    kern = lorentz(klen, std=std)
    plt.plot(scale(convolve(y, kern / float(kern.sum()), 'same')),
             label='conv, klen=%i' % klen)

    # Convolution with Lorentz function with end-point correction.
    for klen in [10 * std, 100 * std, 200 * std]:
        klen = klen + 1 if klen % 2 == 0 else klen  # odd kernel
        kern = lorentz(klen, std=std)
        plt.plot(scale(smooth(y, kern)), label='conv+egde, klen=%i' % klen)
    plt.title("npoints=%i" % npoints)
    plt.legend()
plt.show()