def test_ac_dtype(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.round(get_sample_arrays()[0]) # integer rf = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float) ri = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.uint) ri2 = multipletau.autocorrelate(a=np.array(a, dtype=np.uint), m=16, deltat=1, normalize=True, copy=True, dtype=None) assert ri.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float" assert ri2.dtype == np.dtype(np.float), "if wrong dtype, dtype should default to np.float" assert np.all(rf == ri), "result should be the same, because input us the same" assert np.all(rf == ri2), "result should be the same, because input us the same"
def test_ac_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() ms = [8, 16, 32, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float_) res.append(r) # test minimal length of array _r2 = multipletau.autocorrelate(a=a[:2*m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float_) res = np.concatenate(res) #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.allclose(res, ref, atol=0, rtol=1e-15)
def test_ac_dtype(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.round(get_sample_arrays()[0]) # integer rf = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) ri = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.uint) ri2 = multipletau.autocorrelate(a=np.array(a, dtype=np.uint), m=16, deltat=1, normalize=True, copy=True, dtype=None) assert ri.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_" assert ri2.dtype == np.dtype(np.float_), "if wrong dtype, dtype should default to np.float_" assert np.all(rf == ri), "result should be the same, because input us the same" assert np.all(rf == ri2), "result should be the same, because input us the same"
def test_ac_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() ms = [8, 16, 32, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float) res.append(r) # test minimal length of array _r2 = multipletau.autocorrelate(a=a[:2*m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float) res = np.concatenate(res) #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.all(res==ref)
def test_ac_copy(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() res1 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) res1.append(r) res2 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=False, dtype=np.float_) res2.append(r) # simple test if result is the same assert np.all(np.concatenate(res1) == np.concatenate(res2)) arrs = np.concatenate(arrs) refarrs = np.concatenate(get_sample_arrays()) # make sure the copy function really changes something assert not np.all(arrs == refarrs)
def test_ac_copy(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() res1 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float) res1.append(r) res2 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=False, dtype=np.float) res2.append(r) # simple test if result is the same assert np.all(np.concatenate(res1) == np.concatenate(res2)) arrs = np.concatenate(arrs) refarrs = np.concatenate(get_sample_arrays()) # make sure the copy function really changes something assert not np.all(arrs == refarrs)
def test_ac_trace0(): arrs = get_sample_arrays() try: multipletau.autocorrelate(a=arrs[0] - np.mean(arrs[0]), normalize=True) except ValueError as e: assert "Cannot normalize: Average of `a` is zero!" in e.args else: assert False
def test_ac_tracesize(): arrs = get_sample_arrays() try: multipletau.autocorrelate(a=arrs[0][:31], m=16) except ValueError as e: assert '`len(a)` must be >= `2m`!' in e.args else: assert False
def test_ac_cc_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() ms = [8, 16, 32, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float) res.append(r) res = np.concatenate(res) rescc = [] for m in ms: r = multipletau.correlate(a=a, v=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float) rescc.append(r) # test minimal length of array _r2 = multipletau.correlate( a=a[: 2 * m], v=a[: 2 * m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float ) rescc = np.concatenate(rescc) assert np.all(res == rescc)
def test_ac_cc_normalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() res = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) res.append(r) res = np.concatenate(res) rescc = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) rescc.append(r) rescc = np.concatenate(rescc) assert np.all(res == rescc)
def test_ac_cc_simple(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() rescc = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.float_) rescc.append(r) rescc = np.concatenate(rescc) resac = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.float_) resac.append(r) resac = np.concatenate(resac) assert np.all(resac == rescc)
def test_ac_compress_second(): ist = autocorrelate(range(42), m=2, dtype=np.float_, compress="second") soll = np.array([[0.00000e+00, 2.38210e+04], [1.00000e+00, 2.29600e+04], [2.00000e+00, 2.21000e+04], [4.00000e+00, 2.11660e+04], [8.00000e+00, 1.71024e+04]]) assert np.allclose(soll, ist)
def test_ac_compress_first(): ist = autocorrelate(range(42), m=2, dtype=np.float_, compress="first") soll = np.array([[0.00000e+00, 2.38210e+04], [1.00000e+00, 2.29600e+04], [2.00000e+00, 2.21000e+04], [4.00000e+00, 1.96080e+04], [8.00000e+00, 1.31712e+04]]) assert np.allclose(soll, ist)
def test_ac_m_wrong(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = get_sample_arrays()[0] # integer r1 = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) r2 = multipletau.autocorrelate(a=a, m=15, deltat=1, normalize=True, copy=True, dtype=np.float_) r3 = multipletau.autocorrelate(a=a, m=15.5, deltat=1, normalize=True, copy=True, dtype=np.float_) r4 = multipletau.autocorrelate(a=a, m=14.5, deltat=1, normalize=True, copy=True, dtype=np.float_) r5 = multipletau.autocorrelate(a=a, m=16., deltat=1, normalize=True, copy=True, dtype=np.float_) assert np.all(r1==r2) assert np.all(r1==r3) assert np.all(r1==r4) assert np.all(r1==r5)
def test_ac_m_wrong(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = get_sample_arrays()[0] # integer r1 = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float) r2 = multipletau.autocorrelate(a=a, m=15, deltat=1, normalize=True, copy=True, dtype=np.float) r3 = multipletau.autocorrelate(a=a, m=15.5, deltat=1, normalize=True, copy=True, dtype=np.float) r4 = multipletau.autocorrelate(a=a, m=14.5, deltat=1, normalize=True, copy=True, dtype=np.float) r5 = multipletau.autocorrelate(a=a, m=16., deltat=1, normalize=True, copy=True, dtype=np.float) assert np.all(r1==r2) assert np.all(r1==r3) assert np.all(r1==r4) assert np.all(r1==r5)
def test_ac(): ist = autocorrelate(range(42), m=2, dtype=np.float_) soll = np.array([[0.00000000e+00, 2.38210000e+04], [1.00000000e+00, 2.29600000e+04], [2.00000000e+00, 2.21000000e+04], [4.00000000e+00, 2.03775000e+04], [8.00000000e+00, 1.50612000e+04]]) assert np.allclose(soll, ist)
def test_ac(): ist = autocorrelate(range(42), m=2, dtype=np.dtype(float)) soll = np.array([[ 0.00000000e+00, 2.38210000e+04], [ 1.00000000e+00, 2.29600000e+04], [ 2.00000000e+00, 2.21000000e+04], [ 4.00000000e+00, 2.03775000e+04], [ 8.00000000e+00, 1.50612000e+04]]) assert np.allclose(soll, ist)
def test_ac_compress_average(): ist = autocorrelate(range(42), m=2, dtype=np.float_, compress="average") soll = np.array([[0.00000000e+00, 2.38210000e+04], [1.00000000e+00, 2.29600000e+04], [2.00000000e+00, 2.21000000e+04], [4.00000000e+00, 2.03775000e+04], [8.00000000e+00, 1.50612000e+04]]) assert np.allclose(soll, ist)
def test_ac_return_sum(): ist, ist_count = autocorrelate(range(42), m=2, dtype=np.float_, ret_sum=True) soll = np.array([[0.000000e+00, 2.382100e+04], [1.000000e+00, 2.296000e+04], [2.000000e+00, 2.210000e+04], [4.000000e+00, 1.018875e+04], [8.000000e+00, 3.586000e+03]]) soll_count = [42., 41., 40., 19., 8.] assert np.allclose(soll, ist) assert np.allclose(soll_count, ist_count)
def test_ac(): arrs = get_sample_arrays() try: multipletau.autocorrelate(a=arrs[0], copy=2) except ValueError as e: assert "`copy` must be boolean!" in e.args else: assert False try: multipletau.autocorrelate(a=arrs[0], ret_sum=2) except ValueError as e: assert "`ret_sum` must be boolean!" in e.args else: assert False try: multipletau.autocorrelate(a=arrs[0], normalize=2) except ValueError as e: assert "`normalize` must be boolean!" in e.args else: assert False try: multipletau.autocorrelate(a=arrs[0], compress="peter") except ValueError as e: assert "Invalid value for `compress`!" in e.args[0] else: assert False try: multipletau.autocorrelate(a=arrs[0], normalize=True, ret_sum=True) except ValueError as e: assert "'normalize' and 'ret_sum' must not both be True!" in e.args else: assert False
def mt_correction(self, t0=0, tf=4000000): ''' Multiple tau correction :param self: refer traces :param t0: :param tf: number of timepoint data :return: ''' autocors = [] # calculation of autocorrelation for trace in traces: y_mtau = multipletau.autocorrelate(trace[t0:tf], normalize=True) autocors.append(y_mtau) # calculation of crosscorrelation xcor = multipletau.correlate(traces[0, t0:tf], traces[1, t0:tf], normalize=True) xcor = xcor / np.sqrt(np.mean(decay_factors[0])) / np.sqrt(np.mean(decay_factors[1])) return autocors, xcor
def test_ac_cc_m(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() ms = [8, 16, 32, 64, 128] a = np.concatenate(arrs) res = [] for m in ms: r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float_) res.append(r) res = np.concatenate(res) rescc = [] for m in ms: r = multipletau.correlate(a=a, v=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float_) rescc.append(r) # test minimal length of array _r2 = multipletau.correlate(a=a[:2 * m], v=a[:2 * m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float_) rescc = np.concatenate(rescc) assert np.all(res == rescc)
def test_ac_normalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() res = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float) res.append(r) res = np.concatenate(res) #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.all(res==ref)
def test_ac_normalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays() res = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=True, copy=True, dtype=np.float_) res.append(r) res = np.concatenate(res) #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.allclose(res, ref, atol=0, rtol=1e-14)
def test_cc_simple(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) arrs = get_sample_arrays_cplx() res = [] for a in arrs: r = multipletau.correlate(a=a, v=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.complex_) res.append(r) res = np.concatenate(res) # np.save(os.path.dirname(__file__) # + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res) ref = get_reference_data(myname, __file__) assert np.allclose(res, ref, atol=0, rtol=1e-15) # also check result of autocorrelate res2 = [] for a in arrs: r = multipletau.autocorrelate(a=a, m=16, deltat=1, normalize=False, copy=True, dtype=np.complex_) res2.append(r) res2 = np.concatenate(res2) assert np.allclose(res, res2, atol=0, rtol=1e-15)
def test_corresponds_ac_nonormalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.concatenate(get_sample_arrays_cplx()).real m=16 restau = multipletau.autocorrelate(a=1*a, m=m, copy=True, normalize=False, dtype=np.float128) reslin = multipletau.correlate_numpy(a=1*a, v=1*a, copy=True, normalize=False, dtype=np.float128) idx = np.array(restau[:,0].real, dtype=int)[:m+1] assert np.allclose(reslin[idx, 1], restau[:m+1,1])
def test_corresponds_ac_nonormalize(): myframe = sys._getframe() myname = myframe.f_code.co_name print("running ", myname) a = np.concatenate(get_sample_arrays_cplx()).real m = 16 restau = multipletau.autocorrelate(a=1 * a, m=m, copy=True, normalize=False, dtype=np.float_) reslin = multipletau.correlate_numpy(a=1 * a, v=1 * a, copy=True, normalize=False, dtype=np.float_) idx = np.array(restau[:, 0].real, dtype=int)[:m + 1] assert np.allclose(reslin[idx, 1], restau[:m + 1, 1])
def mtAuto(data, fs=10E6, levels=16): out = mt.autocorrelate(data, m=levels, deltat=1.0 / fs, normalize=True) out[:, 1] = out[:, 1] + 1 return out
spadData, aData, dData = Parser.parseAll(data); import numpy as np import Autocorrelate import multipletau as mt data = np.random.rand(32768); data = np.round(data); normalize=True; levels=34 out = Autocorrelate.multipleTau(data, levels, 1, normalize); out = np.asarray(out); out2 = mt.autocorrelate(data, m=levels, normalize=normalize); out2[:,0]=out; out2; np.max(np.abs(out2[:,0]-out2[:,1])); python3 -m timeit -s "import numpy as np import Autocorrelate import multipletau as mt data = np.random.rand(3276800); data = np.round(data); normalize=True; levels=32" "out = Autocorrelate.multipleTau(data, levels, normalize);"
def test(): import numpy as np import os import sys from matplotlib import pylab as plt sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../")) from multipletau import autocorrelate, correlate, correlate_numpy ## Starting parameters N = np.int(np.pi*1e3) countrate = 250. * 1e-3 # in Hz taudiff = 55. # in us deltat = 2e-6 # time discretization [s] normalize = True # time factor taudiff *= deltat ## ## Autocorrelation ## print("Creating noise for autocorrelation") data = noise_exponential(N, taudiff, deltat=deltat) data += - np.average(data) if normalize: data += countrate # multipletau print("Performing autocorrelation (multipletau).") G = autocorrelate(data, deltat=deltat, normalize=normalize) # numpy.correlate for comparison if len(data) < 1e5: print("Performing autocorrelation (numpy).") Gd = correlate_numpy(data, data, deltat=deltat, normalize=normalize) # Calculate the expected curve x = G[:,0] amp = np.correlate(data-np.average(data), data-np.average(data), mode="valid") if normalize: amp /= len(data) * countrate**2 y = amp*np.exp(-x/taudiff) ## ## Cross-correlation ## print("Creating noise for cross-correlation") a, v = noise_cross_exponential(N, taudiff, deltat=deltat) a += - np.average(a) v += - np.average(v) if normalize: a += countrate v += countrate # multipletau Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) Gccback = correlate(v, a, deltat=deltat, normalize=normalize) if len(a) < 1e5: print("Performing autocorrelation (numpy).") Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize) # Calculate the expected curve xcc = Gccforw[:,0] ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid") if normalize: ampcc /= len(a) * countrate**2 ycc = ampcc*np.exp(-xcc/taudiff) ## ## Plotting ## # AC fig = plt.figure() fig.canvas.set_window_title('testing multipletau') ax = fig.add_subplot(2,1,1) ax.set_xscale('log') plt.plot(x, y, "g-", label="input model") plt.plot(G[:,0], G[:,1], "r-", label="autocorrelate") if len(data) < 1e5: plt.plot(Gd[:,0], Gd[:,1] , "b--", label="correlate (numpy)") plt.xlabel("lag channel") plt.ylabel("autocorrelation") plt.legend(loc=0, fontsize='small') plt.ylim( -amp*.2, amp*1.2) ## CC ax = fig.add_subplot(2,1,2) ax.set_xscale('log') plt.plot(xcc, ycc, "g-", label="input model") plt.plot(Gccforw[:,0], Gccforw[:,1], "r-", label="forward") if len(data) < 1e5: plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "b--", label="forward (numpy)") plt.plot(Gccback[:,0], Gccback[:,1], "r--", label="backward") plt.xlabel("lag channel") plt.ylabel("cross-correlation") plt.legend(loc=0, fontsize='small') plt.ylim( -ampcc*.2, ampcc*1.2) plt.tight_layout() plt.show()
def compare_corr(): ## Starting parameters N = np.int(np.pi*1e3) countrate = 250. * 1e-3 # in Hz taudiff = 55. # in us deltat = 2e-6 # time discretization [s] normalize = True # time factor taudiff *= deltat if N < 1e5: do_np_corr = True else: do_np_corr = False ## Autocorrelation print("Creating noise for autocorrelation") data = noise_exponential(N, taudiff, deltat=deltat) data -= np.average(data) if normalize: data += countrate # multipletau print("Performing autocorrelation (multipletau).") G = autocorrelate(data, deltat=deltat, normalize=normalize) # numpy.correlate for comparison if do_np_corr: print("Performing autocorrelation (numpy).") Gd = correlate_numpy(data, data, deltat=deltat, normalize=normalize) else: Gd = G ## Cross-correlation print("Creating noise for cross-correlation") a, v = noise_cross_exponential(N, taudiff, deltat=deltat) a -= np.average(a) v -= np.average(v) if normalize: a += countrate v += countrate Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) # forward Gccback = correlate(v, a, deltat=deltat, normalize=normalize) # backward if do_np_corr: print("Performing cross-correlation (numpy).") Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize) ## Calculate the model curve for cross-correlation xcc = Gd[:,0] ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid") if normalize: ampcc /= len(a) * countrate**2 ycc = ampcc*np.exp(-xcc/taudiff) ## Calculate the model curve for autocorrelation x = Gd[:,0] amp = np.correlate(data-np.average(data), data-np.average(data), mode="valid") if normalize: amp /= len(data) * countrate**2 y = amp*np.exp(-x/taudiff) ## Plotting # AC fig = plt.figure() fig.canvas.set_window_title('testing multipletau') ax = fig.add_subplot(2,1,1) ax.set_xscale('log') if do_np_corr: plt.plot(Gd[:,0], Gd[:,1] , "-", color="gray", label="correlate (numpy)") plt.plot(x, y, "g-", label="input model") plt.plot(G[:,0], G[:,1], "-", color="#B60000", label="autocorrelate") plt.xlabel("lag channel") plt.ylabel("autocorrelation") plt.legend(loc=0, fontsize='small') plt.ylim( -amp*.2, amp*1.2) plt.xlim( Gd[0,0], Gd[-1,0]) # CC ax = fig.add_subplot(2,1,2) ax.set_xscale('log') if do_np_corr: plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "-", color="gray", label="forward (numpy)") plt.plot(xcc, ycc, "g-", label="input model") plt.plot(Gccforw[:,0], Gccforw[:,1], "-", color="#B60000", label="forward") plt.plot(Gccback[:,0], Gccback[:,1], "-", color="#5D00B6", label="backward") plt.xlabel("lag channel") plt.ylabel("cross-correlation") plt.legend(loc=0, fontsize='small') plt.ylim( -ampcc*.2, ampcc*1.2) plt.xlim( Gd[0,0], Gd[-1,0]) plt.tight_layout() savename = __file__[:-3]+".png" if os.path.exists(savename): savename = __file__[:-3]+time.strftime("_%Y-%m-%d_%H-%M-%S.png") plt.savefig(savename) print("Saved output to", savename)
N = np.int(np.pi * 1e3) countrate = 250. * 1e-3 # in Hz taudiff = 55. # in us deltat = 2e-6 # time discretization [s] normalize = True # time factor taudiff *= deltat # create noise for autocorrelation data = noise_exponential(N, taudiff, deltat=deltat) data -= np.average(data) if normalize: data += countrate # perform autocorrelation (multipletau) gac_mt = autocorrelate(data, deltat=deltat, normalize=normalize) # numpy.correlate for comparison gac_np = correlate_numpy(data, data, deltat=deltat, normalize=normalize) # calculate model curve for autocorrelation x = gac_np[:, 0] amp = np.correlate(data - np.average(data), data - np.average(data), mode="valid") if normalize: amp /= len(data) * countrate**2 y = amp * np.exp(-x / taudiff) # create noise for cross-correlation a, v = noise_cross_exponential(N, taudiff, deltat=deltat) a -= np.average(a) v -= np.average(v)
import matplotlib.pyplot as plt import numpy as np from datetime import datetime from multipletau import autocorrelate file_name = "data-2018-06-12 15:42:26.npy" data = np.load(file_name) print(len(data)) plt.figure(1) plt.plot(data) """ plt.figure(2) plt.plot(data[1:]-data[:-1]) plt.figure() occ,val,dummy=plt.hist(data,256,(0,256),log=True) """ G = autocorrelate(data[1:] - data[:-1], normalize=True, dtype=np.float_) plt.figure() plt.semilogx(G[5:, 0], G[5:, 1]) plt.show()
fd.write('# BEGIN TRACE \r\n') fd.write('# Time ([s])' + " \t," 'Intensity Trace [kHz]' + " \r\n") for i in np.arange(len(trace)): dataWriter.writerow([str(trace[i, 0]) + " \t", str(trace[i, 1])]) # Line time to be found by SFCS analyzation software linetime = 0.714 # in ms # Time of exponentially correlated noise taudiff = 7. # in ms noisearray = GenerateExpNoise(200000, taud=taudiff / linetime) noisearray += np.abs(np.min(noisearray)) noisearray *= 30. / np.max(noisearray) noisearray = np.uint32(noisearray) # Create 32bit and 16bit binary .dat files data = MakeDat(linetime / 1000, noisearray, np.uint16, "test_" + str(taudiff) + "ms_16bit.dat") data = MakeDat(linetime / 1000, noisearray, np.uint32, "test_" + str(taudiff) + "ms_32bit.dat") # Create reference .csv file to check results G = multipletau.autocorrelate( noisearray, deltat=linetime / 1000, normalize=True) newtrace = ReduceTrace(noisearray, deltat=linetime, length=500) SaveCSV(G, newtrace, "test_" + str(taudiff) + "ms_reference.csv")