Exemple #1
0
def test_numpy_cc_samesize():
    arrs = get_sample_arrays()
    try:
        multipletau.correlate_numpy(a=arrs[0], v=arrs[1], normalize=True)
    except ValueError as e:
        assert "`a` and `v` must have same length!" in e.args
    else:
        assert False
Exemple #2
0
def test_numpy_cc_trace0():
    arrs = get_sample_arrays()
    try:
        multipletau.correlate_numpy(a=arrs[0] - np.mean(arrs[0]),
                                    v=arrs[0],
                                    normalize=True)
    except ValueError as e:
        assert "Cannot normalize: Average of `a` is zero!" in e.args
    else:
        assert False

    try:
        multipletau.correlate_numpy(a=arrs[0],
                                    v=arrs[0] - np.mean(arrs[0]),
                                    normalize=True)
    except ValueError as e:
        assert "Cannot normalize: Average of `v` is zero!" in e.args
    else:
        assert False
Exemple #3
0
def test_corresponds_ac_first_loop():
    """
    numpy correlation:
    G_m = sum_i(a_i*a_{i+m})
    
    multipletau correlation 2nd order:
    b_j = (a_{2i} + a_{2i+1} / 2)
    G_m = sum_j(b_j*b_{j+1})
        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
                    a_{2i}   * a_{2i+m+1} +
                    a_{2i+1} * a_{2i+m}   +   
                    a_{2i+1} * a_{2i+m+1}
                    )
    
    The values after the first m+1 lag times in the multipletau
    correlation differ from the normal correlation, because the
    traces are averaged over two consecutive items, effectively
    halving the size of the trace. The multiple-tau correlation
    can be compared to the regular correlation by using an even
    sized sequence (here 222) in which the elements 2i and 2i+1
    are equal, as is done in this test.
    """
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = [arr / np.average(arr) for arr in get_sample_arrays_cplx()]
    a = np.concatenate(a)[:222]
    # two consecutive elements are the same, so the multiple-tau method
    # corresponds to the numpy correlation for the first loop.
    a[::2] = a[1::2]

    for m in [2, 4, 6, 8, 10, 12, 14, 16]:
        restau = multipletau.correlate(a=a,
                                       v=a.imag + 1j * a.real,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.complex_)

        reslin = multipletau.correlate_numpy(a=a,
                                             v=a.imag + 1j * a.real,
                                             copy=True,
                                             normalize=False,
                                             dtype=np.complex_)

        idtau = np.where(restau[:, 0] == m + 2)[0][0]
        tau3 = restau[idtau, 1]  #m+1 initial bins

        idref = np.where(reslin[:, 0] == m + 2)[0][0]
        tau3ref = reslin[idref, 1]

        assert np.allclose(tau3, tau3ref)
Exemple #4
0
def test_corresponds_ac_first_loop():
    """
    numpy correlation:
    G_m = sum_i(a_i*a_{i+m})
    
    multipletau correlation 2nd order:
    b_j = (a_{2i} + a_{2i+1} / 2)
    G_m = sum_j(b_j*b_{j+1})
        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
                    a_{2i}   * a_{2i+m+1} +
                    a_{2i+1} * a_{2i+m}   +   
                    a_{2i+1} * a_{2i+m+1}
                    )
    
    The values after the first m+1 lag times in the multipletau
    correlation differ from the normal correlation, because the
    traces are averaged over two consecutive items, effectively
    halving the size of the trace. The multiple-tau correlation
    can be compared to the regular correlation by using an even
    sized sequence (here 222) in which the elements 2i and 2i+1
    are equal, as is done in this test.
    """
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ]
    a = np.concatenate(a)[:222]
    # two consecutive elements are the same, so the multiple-tau method
    # corresponds to the numpy correlation for the first loop.
    a[::2] = a[1::2]
    
    for m in [2,4,6,8,10,12,14,16]:
        restau = multipletau.correlate(a=a,
                                       v=a.imag+1j*a.real,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.complex256)
        
        reslin = multipletau.correlate_numpy(a=a,
                                             v=a.imag+1j*a.real,
                                             copy=True,
                                             normalize=False,
                                             dtype=np.complex256)
        
        idtau = np.where(restau[:,0]==m+2)[0][0]
        tau3 = restau[idtau, 1] #m+1 initial bins
    
        idref = np.where(reslin[:,0]==m+2)[0][0]
        tau3ref = reslin[idref, 1]
        
        assert np.allclose(tau3, tau3ref)
Exemple #5
0
def test_corresponds_ac_nonormalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.concatenate(get_sample_arrays_cplx()).real
    m = 16

    restau = multipletau.autocorrelate(a=1 * a,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.float_)

    reslin = multipletau.correlate_numpy(a=1 * a,
                                         v=1 * a,
                                         copy=True,
                                         normalize=False,
                                         dtype=np.float_)

    idx = np.array(restau[:, 0].real, dtype=int)[:m + 1]

    assert np.allclose(reslin[idx, 1], restau[:m + 1, 1])
Exemple #6
0
def test_corresponds_ac_nonormalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = np.concatenate(get_sample_arrays_cplx()).real
    m=16

    restau = multipletau.autocorrelate(a=1*a,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.float128)

    reslin = multipletau.correlate_numpy(a=1*a,
                                         v=1*a,
                                         copy=True,
                                         normalize=False,
                                         dtype=np.float128)

    idx = np.array(restau[:,0].real, dtype=int)[:m+1]

    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
Exemple #7
0
def test_corresponds_cc():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.concatenate(get_sample_arrays_cplx())
    m = 16

    restau = multipletau.correlate(a=a,
                                   v=a.imag + 1j * a.real,
                                   m=m,
                                   copy=True,
                                   normalize=True,
                                   dtype=np.complex_)

    reslin = multipletau.correlate_numpy(a=a,
                                         v=a.imag + 1j * a.real,
                                         copy=True,
                                         normalize=True,
                                         dtype=np.complex_)

    idx = np.array(restau[:, 0].real, dtype=int)[:m + 1]

    assert np.allclose(reslin[idx, 1], restau[:m + 1, 1])
Exemple #8
0
def test_corresponds_cc():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = np.concatenate(get_sample_arrays_cplx())
    m=16

    restau = multipletau.correlate(a=a,
                                   v=a.imag+1j*a.real,
                                   m=m,
                                   copy=True,
                                   normalize=True,
                                   dtype=np.complex256)

    reslin = multipletau.correlate_numpy(a=a,
                                         v=a.imag+1j*a.real,
                                         copy=True,
                                         normalize=True,
                                         dtype=np.complex256)

    idx = np.array(restau[:,0].real, dtype=int)[:m+1]

    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
Exemple #9
0
def compare_corr():
    ## Starting parameters
    N = np.int(np.pi*1e3)
    countrate = 250. * 1e-3 # in Hz
    taudiff = 55. # in us
    deltat = 2e-6 # time discretization [s]
    normalize = True

    # time factor
    taudiff *= deltat

    if N < 1e5:
        do_np_corr = True
    else:
        do_np_corr = False

    ## Autocorrelation
    print("Creating noise for autocorrelation")
    data = noise_exponential(N, taudiff, deltat=deltat)
    data -= np.average(data)
    if normalize:
        data += countrate
    # multipletau
    print("Performing autocorrelation (multipletau).")
    G = autocorrelate(data, deltat=deltat, normalize=normalize)
    # numpy.correlate for comparison
    if do_np_corr:
        print("Performing autocorrelation (numpy).")
        Gd = correlate_numpy(data, data, deltat=deltat,
                             normalize=normalize)
    else:
        Gd = G
    
    ## Cross-correlation
    print("Creating noise for cross-correlation")
    a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
    a -= np.average(a)
    v -= np.average(v)
    if normalize:
        a += countrate
        v += countrate
    Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) # forward
    Gccback = correlate(v, a, deltat=deltat, normalize=normalize) # backward
    if do_np_corr:
        print("Performing cross-correlation (numpy).")
        Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize)
    
    ## Calculate the model curve for cross-correlation
    xcc = Gd[:,0]
    ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid")
    if normalize:
        ampcc /= len(a) * countrate**2
    ycc = ampcc*np.exp(-xcc/taudiff)

    ## Calculate the model curve for autocorrelation
    x = Gd[:,0]
    amp = np.correlate(data-np.average(data), data-np.average(data),
                       mode="valid")
    if normalize:
        amp /= len(data) * countrate**2
    y = amp*np.exp(-x/taudiff)


    ## Plotting
    # AC
    fig = plt.figure()
    fig.canvas.set_window_title('testing multipletau')
    ax = fig.add_subplot(2,1,1)
    ax.set_xscale('log')
    if do_np_corr:
        plt.plot(Gd[:,0], Gd[:,1] , "-", color="gray", label="correlate (numpy)")
    plt.plot(x, y, "g-", label="input model")
    plt.plot(G[:,0], G[:,1], "-",  color="#B60000", label="autocorrelate")
    plt.xlabel("lag channel")
    plt.ylabel("autocorrelation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -amp*.2, amp*1.2)
    plt.xlim( Gd[0,0], Gd[-1,0])

    # CC
    ax = fig.add_subplot(2,1,2)
    ax.set_xscale('log')
    if do_np_corr:
        plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "-", color="gray", label="forward (numpy)")
    plt.plot(xcc, ycc, "g-", label="input model")
    plt.plot(Gccforw[:,0], Gccforw[:,1], "-", color="#B60000", label="forward")
    plt.plot(Gccback[:,0], Gccback[:,1], "-", color="#5D00B6", label="backward")
    plt.xlabel("lag channel")
    plt.ylabel("cross-correlation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -ampcc*.2, ampcc*1.2)
    plt.xlim( Gd[0,0], Gd[-1,0])
    plt.tight_layout()

    savename = __file__[:-3]+".png"
    if os.path.exists(savename):
        savename = __file__[:-3]+time.strftime("_%Y-%m-%d_%H-%M-%S.png")

    plt.savefig(savename)
    print("Saved output to", savename)
Exemple #10
0
def test():
    import numpy as np
    import os
    import sys
    from matplotlib import pylab as plt
    sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../"))
    from multipletau import autocorrelate, correlate, correlate_numpy
    ## Starting parameters
    N = np.int(np.pi*1e3)
    countrate = 250. * 1e-3 # in Hz
    taudiff = 55. # in us
    deltat = 2e-6 # time discretization [s]
    normalize = True

    # time factor
    taudiff *= deltat

    ##
    ## Autocorrelation
    ##
    print("Creating noise for autocorrelation")
    data = noise_exponential(N, taudiff, deltat=deltat)
    data += - np.average(data)
    if normalize:
        data += countrate
    # multipletau
    print("Performing autocorrelation (multipletau).")
    G = autocorrelate(data, deltat=deltat, normalize=normalize)
    # numpy.correlate for comparison
    if len(data) < 1e5:
        print("Performing autocorrelation (numpy).")
        Gd = correlate_numpy(data, data, deltat=deltat,
                             normalize=normalize)
    # Calculate the expected curve
    x = G[:,0]
    amp = np.correlate(data-np.average(data), data-np.average(data),
                       mode="valid")
    if normalize:
        amp /= len(data) * countrate**2
    y = amp*np.exp(-x/taudiff)

    ##
    ## Cross-correlation
    ##
    print("Creating noise for cross-correlation")
    a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
    a += - np.average(a)
    v += - np.average(v)
    if normalize:
        a += countrate
        v += countrate
    # multipletau
    Gccforw = correlate(a, v, deltat=deltat, normalize=normalize)
    Gccback = correlate(v, a, deltat=deltat, normalize=normalize)
    if len(a) < 1e5:
        print("Performing autocorrelation (numpy).")
        Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize)
    # Calculate the expected curve
    xcc = Gccforw[:,0]
    ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid")

    if normalize:
        ampcc /= len(a) * countrate**2
    ycc = ampcc*np.exp(-xcc/taudiff)


    ##
    ## Plotting
    ##

    # AC
    fig = plt.figure()
    fig.canvas.set_window_title('testing multipletau')
    ax = fig.add_subplot(2,1,1)
    ax.set_xscale('log')
    plt.plot(x, y, "g-", label="input model")
    plt.plot(G[:,0], G[:,1], "r-", label="autocorrelate")
    if len(data) < 1e5:
        plt.plot(Gd[:,0], Gd[:,1] , "b--", label="correlate (numpy)")
    plt.xlabel("lag channel")
    plt.ylabel("autocorrelation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -amp*.2, amp*1.2)


    ## CC
    ax = fig.add_subplot(2,1,2)
    ax.set_xscale('log')
    plt.plot(xcc, ycc, "g-", label="input model")
    plt.plot(Gccforw[:,0], Gccforw[:,1], "r-", label="forward")
    if len(data) < 1e5:
        plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "b--", label="forward (numpy)")
    plt.plot(Gccback[:,0], Gccback[:,1], "r--", label="backward")
    plt.xlabel("lag channel")
    plt.ylabel("cross-correlation")
    plt.legend(loc=0, fontsize='small')

    plt.ylim( -ampcc*.2, ampcc*1.2)

    plt.tight_layout()
    plt.show()
Exemple #11
0
taudiff = 55.  # in us
deltat = 2e-6  # time discretization [s]
normalize = True

# time factor
taudiff *= deltat

# create noise for autocorrelation
data = noise_exponential(N, taudiff, deltat=deltat)
data -= np.average(data)
if normalize:
    data += countrate
# perform autocorrelation (multipletau)
gac_mt = autocorrelate(data, deltat=deltat, normalize=normalize)
# numpy.correlate for comparison
gac_np = correlate_numpy(data, data, deltat=deltat,
                         normalize=normalize)
# calculate model curve for autocorrelation
x = gac_np[:, 0]
amp = np.correlate(data - np.average(data), data - np.average(data),
                   mode="valid")
if normalize:
    amp /= len(data) * countrate**2
y = amp * np.exp(-x / taudiff)

# create noise for cross-correlation
a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
a -= np.average(a)
v -= np.average(v)
if normalize:
    a += countrate
    v += countrate