def test_cc_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays_cplx() ms = [4, 8, 10, 16, 20, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.correlate(a=a, v=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.complex) res.append(r) # test minimal length of array _r2 = multipletau.correlate(a=a[:2*m], v=a[:2*m], m=m, deltat=1, normalize=False, copy=True, dtype=np.complex) res = np.concatenate(res) #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.all(res==ref)
def test_cc_copy(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays_cplx() res1 = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=True) res1.append(r) res2 = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=False) res2.append(r) # simple test if result is the same assert np.all(np.concatenate(res1) == np.concatenate(res2)) arrs = np.concatenate(arrs) refarrs = np.concatenate(get_sample_arrays_cplx()) # make sure the copy function really changes something assert not np.all(arrs == refarrs)
def test_ac_cc_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() ms = [8, 16, 32, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float) res.append(r) res = np.concatenate(res) rescc = [] for m in ms: r = multipletau.correlate(a=a, v=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float) rescc.append(r) # test minimal length of array _r2 = multipletau.correlate( a=a[: 2 * m], v=a[: 2 * m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float ) rescc = np.concatenate(rescc) assert np.all(res == rescc)
def test_ac_cc_simple(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() rescc = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.float_) rescc.append(r) rescc = np.concatenate(rescc) resac = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.float_) resac.append(r) resac = np.concatenate(resac) assert np.all(resac == rescc)
def test_cc_m_wrong(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = get_sample_arrays_cplx()[0] # integer r1 = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=True) r2 = multipletau.correlate(a=a, v=a, m=15, deltat=1, normalize=True, copy=True) r3 = multipletau.correlate(a=a, v=a, m=15.5, deltat=1, normalize=True, copy=True) r4 = multipletau.correlate(a=a, v=a, m=14.5, deltat=1, normalize=True, copy=True) r5 = multipletau.correlate(a=a, v=a, m=16., deltat=1, normalize=True, copy=True) assert np.all(r1 == r2) assert np.all(r1 == r3) assert np.all(r1 == r4) assert np.all(r1 == r5)
def test_cc(): ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_) soll = np.array([[0.00000000e+00, 2.46820000e+04], [1.00000000e+00, 2.38210000e+04], [2.00000000e+00, 2.29600000e+04], [4.00000000e+00, 2.12325000e+04], [8.00000000e+00, 1.58508000e+04]]) assert np.allclose(soll, ist)
def test_cc_compress_first(): ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_, compress="first") soll = np.array([[0.00000e+00, 2.46820e+04], [1.00000e+00, 2.38210e+04], [2.00000e+00, 2.29600e+04], [4.00000e+00, 2.04440e+04], [8.00000e+00, 1.39104e+04]]) assert np.allclose(soll, ist)
def test_cc_compress_second(): ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_, compress="second") soll = np.array([[0.00000e+00, 2.46820e+04], [1.00000e+00, 2.38210e+04], [2.00000e+00, 2.29600e+04], [4.00000e+00, 2.20400e+04], [8.00000e+00, 1.79424e+04]]) assert np.allclose(soll, ist)
def test_cc_compress_average(): ist, ist_count = correlate(range(42), range(1, 43), m=2, dtype=np.float_, ret_sum=True) soll = np.array([[0.000000e+00, 2.468200e+04], [1.000000e+00, 2.382100e+04], [2.000000e+00, 2.296000e+04], [4.000000e+00, 1.061625e+04], [8.000000e+00, 3.774000e+03]]) soll_count = [42., 41., 40., 19., 8.] assert np.allclose(soll, ist) assert np.allclose(soll_count, ist_count)
def test_corresponds_ac_first_loop(): """ numpy correlation: G_m = sum_i(a_i*a_{i+m}) multipletau correlation 2nd order: b_j = (a_{2i} + a_{2i+1} / 2) G_m = sum_j(b_j*b_{j+1}) = 1/4*sum_i(a_{2i} * a_{2i+m} + a_{2i} * a_{2i+m+1} + a_{2i+1} * a_{2i+m} + a_{2i+1} * a_{2i+m+1} ) The values after the first m+1 lag times in the multipletau correlation differ from the normal correlation, because the traces are averaged over two consecutive items, effectively halving the size of the trace. The multiple-tau correlation can be compared to the regular correlation by using an even sized sequence (here 222) in which the elements 2i and 2i+1 are equal, as is done in this test. """ myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ] a = np.concatenate(a)[:222] # two consecutive elements are the same, so the multiple-tau method # corresponds to the numpy correlation for the first loop. a[::2] = a[1::2] for m in [2,4,6,8,10,12,14,16]: restau = multipletau.correlate(a=a, v=a.imag+1j*a.real, m=m, copy=True, normalize=False, dtype=np.complex256) reslin = multipletau.correlate_numpy(a=a, v=a.imag+1j*a.real, copy=True, normalize=False, dtype=np.complex256) idtau = np.where(restau[:,0]==m+2)[0][0] tau3 = restau[idtau, 1] #m+1 initial bins idref = np.where(reslin[:,0]==m+2)[0][0] tau3ref = reslin[idref, 1] assert np.allclose(tau3, tau3ref)
def test_cc_dtype(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.round(get_sample_arrays_cplx()[0].real) # integer rf = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) ri = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.int_) ri2 = multipletau.correlate(a=np.array(a, dtype=np.int_), v=np.array(a, dtype=np.int_), m=16, deltat=1, normalize=True, copy=True, dtype=None) assert ri.dtype == np.dtype( np.float_), "if wrong dtype, dtype should default to np.float_" assert ri2.dtype == np.dtype( np.float_), "if wrong dtype, dtype should default to np.float_" assert np.all( rf == ri), "result should be the same, because input us the same" assert np.all( rf == ri2), "result should be the same, because input us the same"
def test_cc_dtype2(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.round(get_sample_arrays_cplx()[0]) rf = multipletau.correlate(a=a.real, v=a, m=16, deltat=1, normalize=True, copy=True) assert np.dtype(rf.dtype) == np.dtype(np.complex_) rf2 = multipletau.correlate(a=a.real, v=np.array(a.imag, dtype=np.int_), m=16, deltat=1, normalize=True, copy=True) assert np.dtype(rf2.dtype) == np.dtype(np.float_)
def test_cc_dtype2(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.round(get_sample_arrays_cplx()[0]) print("this should issue a warning of unequal input dtypes, casting to complex") rf = multipletau.correlate(a=a.real, v=a, m=16, deltat=1, normalize=True, copy=True) assert np.dtype(rf.dtype) == np.dtype(np.complex) print("this should issue a warning of unequal input dtypes, casting to float") rf2 = multipletau.correlate(a=a.real, v=np.array(a.imag, dtype=np.int), m=16, deltat=1, normalize=True, copy=True) assert np.dtype(rf2.dtype) == np.dtype(np.float)
def test_cc_simple(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays_cplx() res = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.complex_) res.append(r) res = np.concatenate(res) # np.save(os.path.dirname(__file__) # + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.allclose(res, ref, atol=0, rtol=1e-15) # also check result of autocorrelate res2 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.complex_) res2.append(r) res2 = np.concatenate(res2) assert np.allclose(res, res2, atol=0, rtol=1e-15)
def test_cc_normalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays_cplx() res = [] for a in arrs: r = multipletau.correlate(a=a.real, v=a.imag, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) res.append(r) res = np.concatenate(res) # np.save(os.path.dirname(__file__) # + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.allclose(res, ref, atol=0, rtol=1e-14)
def test_corresponds_cc_nonormalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.concatenate(get_sample_arrays_cplx()) m=16 restau = multipletau.correlate(a=a, v=a.imag+1j*a.real, m=m, copy=True, normalize=False, dtype=np.complex256) reslin = multipletau.correlate_numpy(a=a, v=a.imag+1j*a.real, copy=True, normalize=False, dtype=np.complex256) idx = np.array(restau[:,0].real, dtype=int)[:m+1] assert np.allclose(reslin[idx, 1], restau[:m+1,1])
def compare_corr(): ## Starting parameters N = np.int(np.pi*1e3) countrate = 250. * 1e-3 # in Hz taudiff = 55. # in us deltat = 2e-6 # time discretization [s] normalize = True # time factor taudiff *= deltat if N < 1e5: do_np_corr = True else: do_np_corr = False ## Autocorrelation print("Creating noise for autocorrelation") data = noise_exponential(N, taudiff, deltat=deltat) data -= np.average(data) if normalize: data += countrate # multipletau print("Performing autocorrelation (multipletau).") G = autocorrelate(data, deltat=deltat, normalize=normalize) # numpy.correlate for comparison if do_np_corr: print("Performing autocorrelation (numpy).") Gd = correlate_numpy(data, data, deltat=deltat, normalize=normalize) else: Gd = G ## Cross-correlation print("Creating noise for cross-correlation") a, v = noise_cross_exponential(N, taudiff, deltat=deltat) a -= np.average(a) v -= np.average(v) if normalize: a += countrate v += countrate Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) # forward Gccback = correlate(v, a, deltat=deltat, normalize=normalize) # backward if do_np_corr: print("Performing cross-correlation (numpy).") Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize) ## Calculate the model curve for cross-correlation xcc = Gd[:,0] ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid") if normalize: ampcc /= len(a) * countrate**2 ycc = ampcc*np.exp(-xcc/taudiff) ## Calculate the model curve for autocorrelation x = Gd[:,0] amp = np.correlate(data-np.average(data), data-np.average(data), mode="valid") if normalize: amp /= len(data) * countrate**2 y = amp*np.exp(-x/taudiff) ## Plotting # AC fig = plt.figure() fig.canvas.set_window_title('testing multipletau') ax = fig.add_subplot(2,1,1) ax.set_xscale('log') if do_np_corr: plt.plot(Gd[:,0], Gd[:,1] , "-", color="gray", label="correlate (numpy)") plt.plot(x, y, "g-", label="input model") plt.plot(G[:,0], G[:,1], "-", color="#B60000", label="autocorrelate") plt.xlabel("lag channel") plt.ylabel("autocorrelation") plt.legend(loc=0, fontsize='small') plt.ylim( -amp*.2, amp*1.2) plt.xlim( Gd[0,0], Gd[-1,0]) # CC ax = fig.add_subplot(2,1,2) ax.set_xscale('log') if do_np_corr: plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "-", color="gray", label="forward (numpy)") plt.plot(xcc, ycc, "g-", label="input model") plt.plot(Gccforw[:,0], Gccforw[:,1], "-", color="#B60000", label="forward") plt.plot(Gccback[:,0], Gccback[:,1], "-", color="#5D00B6", label="backward") plt.xlabel("lag channel") plt.ylabel("cross-correlation") plt.legend(loc=0, fontsize='small') plt.ylim( -ampcc*.2, ampcc*1.2) plt.xlim( Gd[0,0], Gd[-1,0]) plt.tight_layout() savename = __file__[:-3]+".png" if os.path.exists(savename): savename = __file__[:-3]+time.strftime("_%Y-%m-%d_%H-%M-%S.png") plt.savefig(savename) print("Saved output to", savename)
def test(): import numpy as np import os import sys from matplotlib import pylab as plt sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../")) from multipletau import autocorrelate, correlate, correlate_numpy ## Starting parameters N = np.int(np.pi*1e3) countrate = 250. * 1e-3 # in Hz taudiff = 55. # in us deltat = 2e-6 # time discretization [s] normalize = True # time factor taudiff *= deltat ## ## Autocorrelation ## print("Creating noise for autocorrelation") data = noise_exponential(N, taudiff, deltat=deltat) data += - np.average(data) if normalize: data += countrate # multipletau print("Performing autocorrelation (multipletau).") G = autocorrelate(data, deltat=deltat, normalize=normalize) # numpy.correlate for comparison if len(data) < 1e5: print("Performing autocorrelation (numpy).") Gd = correlate_numpy(data, data, deltat=deltat, normalize=normalize) # Calculate the expected curve x = G[:,0] amp = np.correlate(data-np.average(data), data-np.average(data), mode="valid") if normalize: amp /= len(data) * countrate**2 y = amp*np.exp(-x/taudiff) ## ## Cross-correlation ## print("Creating noise for cross-correlation") a, v = noise_cross_exponential(N, taudiff, deltat=deltat) a += - np.average(a) v += - np.average(v) if normalize: a += countrate v += countrate # multipletau Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) Gccback = correlate(v, a, deltat=deltat, normalize=normalize) if len(a) < 1e5: print("Performing autocorrelation (numpy).") Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize) # Calculate the expected curve xcc = Gccforw[:,0] ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid") if normalize: ampcc /= len(a) * countrate**2 ycc = ampcc*np.exp(-xcc/taudiff) ## ## Plotting ## # AC fig = plt.figure() fig.canvas.set_window_title('testing multipletau') ax = fig.add_subplot(2,1,1) ax.set_xscale('log') plt.plot(x, y, "g-", label="input model") plt.plot(G[:,0], G[:,1], "r-", label="autocorrelate") if len(data) < 1e5: plt.plot(Gd[:,0], Gd[:,1] , "b--", label="correlate (numpy)") plt.xlabel("lag channel") plt.ylabel("autocorrelation") plt.legend(loc=0, fontsize='small') plt.ylim( -amp*.2, amp*1.2) ## CC ax = fig.add_subplot(2,1,2) ax.set_xscale('log') plt.plot(xcc, ycc, "g-", label="input model") plt.plot(Gccforw[:,0], Gccforw[:,1], "r-", label="forward") if len(data) < 1e5: plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "b--", label="forward (numpy)") plt.plot(Gccback[:,0], Gccback[:,1], "r--", label="backward") plt.xlabel("lag channel") plt.ylabel("cross-correlation") plt.legend(loc=0, fontsize='small') plt.ylim( -ampcc*.2, ampcc*1.2) plt.tight_layout() plt.show()