示例#1
0
def test_cc_m():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    ms = [4, 8, 10, 16, 20, 64, 128]
    a = np.concatenate(arrs)

    res = []
    for m in ms:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=m,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.complex_)
        res.append(r)

        # test minimal length of array
        _r2 = multipletau.correlate(a=a[:2 * m],
                                    v=a[:2 * m],
                                    m=m,
                                    deltat=1,
                                    normalize=False,
                                    copy=True,
                                    dtype=np.complex_)

    res = np.concatenate(res)
    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.allclose(res, ref, atol=0, rtol=1e-15)
示例#2
0
def test_cc_m():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    arrs = get_sample_arrays_cplx()

    ms = [4, 8, 10, 16, 20, 64, 128]
    a = np.concatenate(arrs)

    res = []    
    for m in ms:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=m,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.complex)
        res.append(r)

        # test minimal length of array
        _r2 = multipletau.correlate(a=a[:2*m],
                                    v=a[:2*m],
                                    m=m,
                                    deltat=1,
                                    normalize=False,
                                    copy=True,
                                    dtype=np.complex)
    
    res = np.concatenate(res)
    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.all(res==ref)
示例#3
0
def test_ac_cc_m():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays()

    ms = [8, 16, 32, 64, 128]
    a = np.concatenate(arrs)

    res = []
    for m in ms:
        r = multipletau.autocorrelate(a=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float)
        res.append(r)
    res = np.concatenate(res)

    rescc = []
    for m in ms:
        r = multipletau.correlate(a=a, v=a, m=m, deltat=1, normalize=False, copy=True, dtype=np.float)
        rescc.append(r)
        # test minimal length of array
        _r2 = multipletau.correlate(
            a=a[: 2 * m], v=a[: 2 * m], m=m, deltat=1, normalize=False, copy=True, dtype=np.float
        )

    rescc = np.concatenate(rescc)
    assert np.all(res == rescc)
示例#4
0
def test_cc_dtype2():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.round(get_sample_arrays_cplx()[0])

    print(
        "this should issue a warning of unequal input dtypes, casting to complex"
    )
    rf = multipletau.correlate(a=a.real,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)
    assert np.dtype(rf.dtype) == np.dtype(np.complex_)

    print(
        "this should issue a warning of unequal input dtypes, casting to float"
    )
    rf2 = multipletau.correlate(a=a.real,
                                v=np.array(a.imag, dtype=np.int_),
                                m=16,
                                deltat=1,
                                normalize=True,
                                copy=True)
    assert np.dtype(rf2.dtype) == np.dtype(np.float_)
示例#5
0
def test_cc_copy():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res1 = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=True)
        res1.append(r)

    res2 = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=False)
        res2.append(r)

    # simple test if result is the same
    assert np.all(np.concatenate(res1) == np.concatenate(res2))

    arrs = np.concatenate(arrs)
    refarrs = np.concatenate(get_sample_arrays_cplx())

    # make sure the copy function really changes something
    assert not np.all(arrs == refarrs)
def test_cc_copy():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res1 = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=True)
        res1.append(r)

    res2 = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=False)
        res2.append(r)

    # simple test if result is the same
    assert np.all(np.concatenate(res1) == np.concatenate(res2))

    arrs = np.concatenate(arrs)
    refarrs = np.concatenate(get_sample_arrays_cplx())

    # make sure the copy function really changes something
    assert not np.all(arrs == refarrs)
示例#7
0
def test_cc_tracesize():
    arrs = get_sample_arrays()
    try:
        multipletau.correlate(a=arrs[0][:31], v=arrs[0][:31], m=16)
    except ValueError as e:
        assert '`len(a)` must be >= `2m`!' in e.args
    else:
        assert False
示例#8
0
def test_cc_samesize():
    arrs = get_sample_arrays()
    try:
        multipletau.correlate(a=arrs[0], v=arrs[1], normalize=True)
    except ValueError as e:
        assert "`a` and `v` must have same length!" in e.args
    else:
        assert False
示例#9
0
def test_ac_cc_simple():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays()

    rescc = []
    for a in arrs:
        r = multipletau.correlate(a=a, v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.float_)
        rescc.append(r)

    rescc = np.concatenate(rescc)

    resac = []
    for a in arrs:
        r = multipletau.autocorrelate(a=a,
                                      m=16,
                                      deltat=1,
                                      normalize=False,
                                      copy=True,
                                      dtype=np.float_)
        resac.append(r)

    resac = np.concatenate(resac)

    assert np.all(resac == rescc)
示例#10
0
def test_ac_cc_normalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays()

    res = []
    for a in arrs:
        r = multipletau.autocorrelate(a=a,
                                      m=16,
                                      deltat=1,
                                      normalize=True,
                                      copy=True,
                                      dtype=np.float_)
        res.append(r)

    res = np.concatenate(res)

    rescc = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=True,
                                  dtype=np.float_)
        rescc.append(r)

    rescc = np.concatenate(rescc)

    assert np.all(res == rescc)
示例#11
0
def test_cc_m_wrong():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = get_sample_arrays_cplx()[0]

    # integer
    r1 = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r2 = multipletau.correlate(a=a,
                               v=a,
                               m=15,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r3 = multipletau.correlate(a=a,
                               v=a,
                               m=15.5,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r4 = multipletau.correlate(a=a,
                               v=a,
                               m=14.5,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r5 = multipletau.correlate(a=a,
                               v=a,
                               m=16.,
                               deltat=1,
                               normalize=True,
                               copy=True)

    assert np.all(r1 == r2)
    assert np.all(r1 == r3)
    assert np.all(r1 == r4)
    assert np.all(r1 == r5)
示例#12
0
def test_cc_m_wrong():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = get_sample_arrays_cplx()[0]

    # integer
    r1 = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r2 = multipletau.correlate(a=a,
                               v=a,
                               m=15,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r3 = multipletau.correlate(a=a,
                               v=a,
                               m=15.5,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r4 = multipletau.correlate(a=a,
                               v=a,
                               m=14.5,
                               deltat=1,
                               normalize=True,
                               copy=True)

    r5 = multipletau.correlate(a=a,
                               v=a,
                               m=16.,
                               deltat=1,
                               normalize=True,
                               copy=True)

    assert np.all(r1 == r2)
    assert np.all(r1 == r3)
    assert np.all(r1 == r4)
    assert np.all(r1 == r5)
示例#13
0
def test_cc():
    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_)
    soll = np.array([[0.00000000e+00,   2.46820000e+04],
                     [1.00000000e+00,   2.38210000e+04],
                     [2.00000000e+00,   2.29600000e+04],
                     [4.00000000e+00,   2.12325000e+04],
                     [8.00000000e+00,   1.58508000e+04]])
    assert np.allclose(soll, ist)
示例#14
0
def test_cc_compress_first():
    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
                    compress="first")
    soll = np.array([[0.00000e+00, 2.46820e+04],
                     [1.00000e+00, 2.38210e+04],
                     [2.00000e+00, 2.29600e+04],
                     [4.00000e+00, 2.04440e+04],
                     [8.00000e+00, 1.39104e+04]])

    assert np.allclose(soll, ist)
示例#15
0
def test_cc_compress_second():
    ist = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
                    compress="second")
    soll = np.array([[0.00000e+00, 2.46820e+04],
                     [1.00000e+00, 2.38210e+04],
                     [2.00000e+00, 2.29600e+04],
                     [4.00000e+00, 2.20400e+04],
                     [8.00000e+00, 1.79424e+04]])

    assert np.allclose(soll, ist)
示例#16
0
def test_cc_trace0():
    arrs = get_sample_arrays()
    try:
        multipletau.correlate(a=arrs[0] - np.mean(arrs[0]),
                              v=arrs[0],
                              normalize=True)
    except ValueError as e:
        assert "Cannot normalize: Average of `a` is zero!" in e.args
    else:
        assert False

    try:
        multipletau.correlate(a=arrs[0],
                              v=arrs[0] - np.mean(arrs[0]),
                              normalize=True)
    except ValueError as e:
        assert "Cannot normalize: Average of `v` is zero!" in e.args
    else:
        assert False
示例#17
0
def test_ac_cc_m():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays()

    ms = [8, 16, 32, 64, 128]
    a = np.concatenate(arrs)

    res = []
    for m in ms:
        r = multipletau.autocorrelate(a=a,
                                      m=m,
                                      deltat=1,
                                      normalize=False,
                                      copy=True,
                                      dtype=np.float_)
        res.append(r)
    res = np.concatenate(res)

    rescc = []
    for m in ms:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=m,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.float_)
        rescc.append(r)
        # test minimal length of array
        _r2 = multipletau.correlate(a=a[:2 * m],
                                    v=a[:2 * m],
                                    m=m,
                                    deltat=1,
                                    normalize=False,
                                    copy=True,
                                    dtype=np.float_)

    rescc = np.concatenate(rescc)
    assert np.all(res == rescc)
示例#18
0
def test_cc_compress_average():
    ist, ist_count = correlate(range(42), range(1, 43), m=2, dtype=np.float_,
                               ret_sum=True)
    soll = np.array([[0.000000e+00, 2.468200e+04],
                     [1.000000e+00, 2.382100e+04],
                     [2.000000e+00, 2.296000e+04],
                     [4.000000e+00, 1.061625e+04],
                     [8.000000e+00, 3.774000e+03]])
    soll_count = [42., 41., 40., 19.,  8.]
    assert np.allclose(soll, ist)
    assert np.allclose(soll_count, ist_count)
示例#19
0
def test_cc_compress_first():
    ist = correlate(range(42),
                    range(1, 43),
                    m=2,
                    dtype=np.float_,
                    compress="first")
    soll = np.array([[0.00000e+00, 2.46820e+04], [1.00000e+00, 2.38210e+04],
                     [2.00000e+00, 2.29600e+04], [4.00000e+00, 2.04440e+04],
                     [8.00000e+00, 1.39104e+04]])

    assert np.allclose(soll, ist)
示例#20
0
def test_cc_compress_second():
    ist = correlate(range(42),
                    range(1, 43),
                    m=2,
                    dtype=np.float_,
                    compress="second")
    soll = np.array([[0.00000e+00, 2.46820e+04], [1.00000e+00, 2.38210e+04],
                     [2.00000e+00, 2.29600e+04], [4.00000e+00, 2.20400e+04],
                     [8.00000e+00, 1.79424e+04]])

    assert np.allclose(soll, ist)
示例#21
0
def test_corresponds_ac_first_loop():
    """
    numpy correlation:
    G_m = sum_i(a_i*a_{i+m})
    
    multipletau correlation 2nd order:
    b_j = (a_{2i} + a_{2i+1} / 2)
    G_m = sum_j(b_j*b_{j+1})
        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
                    a_{2i}   * a_{2i+m+1} +
                    a_{2i+1} * a_{2i+m}   +   
                    a_{2i+1} * a_{2i+m+1}
                    )
    
    The values after the first m+1 lag times in the multipletau
    correlation differ from the normal correlation, because the
    traces are averaged over two consecutive items, effectively
    halving the size of the trace. The multiple-tau correlation
    can be compared to the regular correlation by using an even
    sized sequence (here 222) in which the elements 2i and 2i+1
    are equal, as is done in this test.
    """
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = [arr / np.average(arr) for arr in get_sample_arrays_cplx()]
    a = np.concatenate(a)[:222]
    # two consecutive elements are the same, so the multiple-tau method
    # corresponds to the numpy correlation for the first loop.
    a[::2] = a[1::2]

    for m in [2, 4, 6, 8, 10, 12, 14, 16]:
        restau = multipletau.correlate(a=a,
                                       v=a.imag + 1j * a.real,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.complex_)

        reslin = multipletau.correlate_numpy(a=a,
                                             v=a.imag + 1j * a.real,
                                             copy=True,
                                             normalize=False,
                                             dtype=np.complex_)

        idtau = np.where(restau[:, 0] == m + 2)[0][0]
        tau3 = restau[idtau, 1]  #m+1 initial bins

        idref = np.where(reslin[:, 0] == m + 2)[0][0]
        tau3ref = reslin[idref, 1]

        assert np.allclose(tau3, tau3ref)
示例#22
0
def test_corresponds_ac_first_loop():
    """
    numpy correlation:
    G_m = sum_i(a_i*a_{i+m})
    
    multipletau correlation 2nd order:
    b_j = (a_{2i} + a_{2i+1} / 2)
    G_m = sum_j(b_j*b_{j+1})
        = 1/4*sum_i(a_{2i}   * a_{2i+m}   +
                    a_{2i}   * a_{2i+m+1} +
                    a_{2i+1} * a_{2i+m}   +   
                    a_{2i+1} * a_{2i+m+1}
                    )
    
    The values after the first m+1 lag times in the multipletau
    correlation differ from the normal correlation, because the
    traces are averaged over two consecutive items, effectively
    halving the size of the trace. The multiple-tau correlation
    can be compared to the regular correlation by using an even
    sized sequence (here 222) in which the elements 2i and 2i+1
    are equal, as is done in this test.
    """
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = [ arr / np.average(arr) for arr in get_sample_arrays_cplx() ]
    a = np.concatenate(a)[:222]
    # two consecutive elements are the same, so the multiple-tau method
    # corresponds to the numpy correlation for the first loop.
    a[::2] = a[1::2]
    
    for m in [2,4,6,8,10,12,14,16]:
        restau = multipletau.correlate(a=a,
                                       v=a.imag+1j*a.real,
                                       m=m,
                                       copy=True,
                                       normalize=False,
                                       dtype=np.complex256)
        
        reslin = multipletau.correlate_numpy(a=a,
                                             v=a.imag+1j*a.real,
                                             copy=True,
                                             normalize=False,
                                             dtype=np.complex256)
        
        idtau = np.where(restau[:,0]==m+2)[0][0]
        tau3 = restau[idtau, 1] #m+1 initial bins
    
        idref = np.where(reslin[:,0]==m+2)[0][0]
        tau3ref = reslin[idref, 1]
        
        assert np.allclose(tau3, tau3ref)
示例#23
0
def test_cc_dtype():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.round(get_sample_arrays_cplx()[0].real)

    # integer
    rf = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True,
                               dtype=np.float_)

    ri = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True,
                               dtype=np.int_)

    ri2 = multipletau.correlate(a=np.array(a, dtype=np.int_),
                                v=np.array(a, dtype=np.int_),
                                m=16,
                                deltat=1,
                                normalize=True,
                                copy=True,
                                dtype=None)

    assert ri.dtype == np.dtype(
        np.float_), "if wrong dtype, dtype should default to np.float_"
    assert ri2.dtype == np.dtype(
        np.float_), "if wrong dtype, dtype should default to np.float_"
    assert np.all(
        rf == ri), "result should be the same, because input us the same"
    assert np.all(
        rf == ri2), "result should be the same, because input us the same"
示例#24
0
def test_cc_dtype():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.round(get_sample_arrays_cplx()[0].real)

    # integer
    rf = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True,
                               dtype=np.float_)

    ri = multipletau.correlate(a=a,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True,
                               dtype=np.int_)

    ri2 = multipletau.correlate(a=np.array(a, dtype=np.int_),
                                v=np.array(a, dtype=np.int_),
                                m=16,
                                deltat=1,
                                normalize=True,
                                copy=True,
                                dtype=None)

    assert ri.dtype == np.dtype(
        np.float_), "if wrong dtype, dtype should default to np.float_"
    assert ri2.dtype == np.dtype(
        np.float_), "if wrong dtype, dtype should default to np.float_"
    assert np.all(
        rf == ri), "result should be the same, because input us the same"
    assert np.all(
        rf == ri2), "result should be the same, because input us the same"
示例#25
0
def test_cc():
    arrs = get_sample_arrays()

    try:
        multipletau.correlate(a=arrs[0], v=arrs[0], copy=2)
    except ValueError as e:
        assert "`copy` must be boolean!" in e.args
    else:
        assert False

    try:
        multipletau.correlate(a=arrs[0], v=arrs[0], ret_sum=2)
    except ValueError as e:
        assert "`ret_sum` must be boolean!" in e.args
    else:
        assert False

    try:
        multipletau.correlate(a=arrs[0], v=arrs[0], normalize=2)
    except ValueError as e:
        assert "`normalize` must be boolean!" in e.args
    else:
        assert False

    try:
        multipletau.correlate(a=arrs[0], v=arrs[0], compress="peter")
    except ValueError as e:
        assert "Invalid value for `compress`!" in e.args[0]
    else:
        assert False

    try:
        multipletau.correlate(a=arrs[0],
                              v=arrs[0],
                              normalize=True,
                              ret_sum=True)
    except ValueError as e:
        assert "'normalize' and 'ret_sum' must not both be True!" in e.args
    else:
        assert False
示例#26
0
def test_cc_dtype2():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.round(get_sample_arrays_cplx()[0])

    rf = multipletau.correlate(a=a.real,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)
    assert np.dtype(rf.dtype) == np.dtype(np.complex_)

    rf2 = multipletau.correlate(a=a.real,
                                v=np.array(a.imag, dtype=np.int_),
                                m=16,
                                deltat=1,
                                normalize=True,
                                copy=True)
    assert np.dtype(rf2.dtype) == np.dtype(np.float_)
示例#27
0
def test_cc_dtype2():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = np.round(get_sample_arrays_cplx()[0])

    print("this should issue a warning of unequal input dtypes, casting to complex")
    rf = multipletau.correlate(a=a.real,
                               v=a,
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)
    assert np.dtype(rf.dtype) == np.dtype(np.complex)

    print("this should issue a warning of unequal input dtypes, casting to float")
    rf2 = multipletau.correlate(a=a.real,
                               v=np.array(a.imag, dtype=np.int),
                               m=16,
                               deltat=1,
                               normalize=True,
                               copy=True)
    assert np.dtype(rf2.dtype) == np.dtype(np.float)
示例#28
0
文件: calCorr.py 项目: t10823gm/M9003
def mt_correction(self, t0=0, tf=4000000):
    '''
    Multiple tau correction
    :param self: refer traces
    :param t0:
    :param tf: number of timepoint data
    :return:
    '''
    autocors = []
    # calculation of autocorrelation
    for trace in traces:
        y_mtau = multipletau.autocorrelate(trace[t0:tf], normalize=True)
        autocors.append(y_mtau)

    # calculation of crosscorrelation
    xcor = multipletau.correlate(traces[0, t0:tf], traces[1, t0:tf], normalize=True)
    xcor = xcor / np.sqrt(np.mean(decay_factors[0])) / np.sqrt(np.mean(decay_factors[1]))

    return autocors, xcor
示例#29
0
def test_cc_normalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res = []
    for a in arrs:
        r = multipletau.correlate(a=a.real,
                                  v=a.imag,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=True,
                                  dtype=np.float_)
        res.append(r)
    res = np.concatenate(res)
    #np.save(os.path.dirname(__file__)+"/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.allclose(res, ref, atol=0, rtol=1e-14)
示例#30
0
def test_cc_simple():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.complex_)
        res.append(r)
    res = np.concatenate(res)

    # np.save(os.path.dirname(__file__)
    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.allclose(res, ref, atol=0, rtol=1e-15)

    # also check result of autocorrelate
    res2 = []
    for a in arrs:
        r = multipletau.autocorrelate(a=a,
                                      m=16,
                                      deltat=1,
                                      normalize=False,
                                      copy=True,
                                      dtype=np.complex_)
        res2.append(r)
    res2 = np.concatenate(res2)

    assert np.allclose(res, res2, atol=0, rtol=1e-15)
示例#31
0
def test_cc_simple():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res = []
    for a in arrs:
        r = multipletau.correlate(a=a,
                                  v=a,
                                  m=16,
                                  deltat=1,
                                  normalize=False,
                                  copy=True,
                                  dtype=np.complex_)
        res.append(r)
    res = np.concatenate(res)

    # np.save(os.path.dirname(__file__)
    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.allclose(res, ref, atol=0, rtol=1e-15)

    # also check result of autocorrelate
    res2 = []
    for a in arrs:
        r = multipletau.autocorrelate(a=a,
                                      m=16,
                                      deltat=1,
                                      normalize=False,
                                      copy=True,
                                      dtype=np.complex_)
        res2.append(r)
    res2 = np.concatenate(res2)

    assert np.allclose(res, res2, atol=0, rtol=1e-15)
示例#32
0
def test_cc_normalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    arrs = get_sample_arrays_cplx()

    res = []
    for a in arrs:
        r = multipletau.correlate(a=a.real,
                                  v=a.imag,
                                  m=16,
                                  deltat=1,
                                  normalize=True,
                                  copy=True,
                                  dtype=np.float_)
        res.append(r)
    res = np.concatenate(res)
    # np.save(os.path.dirname(__file__)
    #         + "/data/"+os.path.basename(__file__)+"_"+myname+".npy", res)
    ref = get_reference_data(myname, __file__)

    assert np.allclose(res, ref, atol=0, rtol=1e-14)
示例#33
0
def test_corresponds_cc_nonormalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)
    
    a = np.concatenate(get_sample_arrays_cplx())
    m=16

    restau = multipletau.correlate(a=a,
                                   v=a.imag+1j*a.real,
                                   m=m,
                                   copy=True,
                                   normalize=False,
                                   dtype=np.complex256)

    reslin = multipletau.correlate_numpy(a=a,
                                         v=a.imag+1j*a.real,
                                         copy=True,
                                         normalize=False,
                                         dtype=np.complex256)

    idx = np.array(restau[:,0].real, dtype=int)[:m+1]

    assert np.allclose(reslin[idx, 1], restau[:m+1,1])
示例#34
0
def test_corresponds_cc_nonormalize():
    myframe = sys._getframe()
    myname = myframe.f_code.co_name
    print("running ", myname)

    a = np.concatenate(get_sample_arrays_cplx())
    m = 16

    restau = multipletau.correlate(a=a,
                                   v=a.imag + 1j * a.real,
                                   m=m,
                                   copy=True,
                                   normalize=False,
                                   dtype=np.complex_)

    reslin = multipletau.correlate_numpy(a=a,
                                         v=a.imag + 1j * a.real,
                                         copy=True,
                                         normalize=False,
                                         dtype=np.complex_)

    idx = np.array(restau[:, 0].real, dtype=int)[:m + 1]

    assert np.allclose(reslin[idx, 1], restau[:m + 1, 1])
示例#35
0
# calculate model curve for autocorrelation
x = gac_np[:, 0]
amp = np.correlate(data - np.average(data), data - np.average(data),
                   mode="valid")
if normalize:
    amp /= len(data) * countrate**2
y = amp * np.exp(-x / taudiff)

# create noise for cross-correlation
a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
a -= np.average(a)
v -= np.average(v)
if normalize:
    a += countrate
    v += countrate
gcc_forw_mt = correlate(a, v, deltat=deltat, normalize=normalize)  # forward
gcc_back_mt = correlate(v, a, deltat=deltat, normalize=normalize)  # backward
# numpy.correlate for comparison
gcc_forw_np = correlate_numpy(a, v, deltat=deltat, normalize=normalize)
# calculate the model curve for cross-correlation
xcc = gac_np[:, 0]
ampcc = np.correlate(a - np.average(a), v - np.average(v), mode="valid")
if normalize:
    ampcc /= len(a) * countrate**2
ycc = ampcc * np.exp(-xcc / taudiff)

# plotting
fig = plt.figure(figsize=(8, 5))
fig.canvas.set_window_title('comparing multipletau')

# autocorrelation
示例#36
0
def FCS2Corr(data,
             dwellTime,
             listOfG=['central', 'sum3', 'sum5', 'chessboard', 'ullr'],
             accuracy=50):
    """
    Convert SPAD-FCS data to correlation curves
    ==========  ===============================================================
    Input       Meaning
    ----------  ---------------------------------------------------------------
    data        Data variable, i.e. output from binFile2Data
    dwellTime   Bin time [in µs]
    listofG     List of correlations to be calculated
    accuracy    Accuracy of the autocorrelation function, typically 50
    ==========  ===============================================================
    Output      Meaning
    ----------  ---------------------------------------------------------------
    G           Object with all autocorrelations
                E.g. G.central contains the array with the central detector
                element autocorrelation
    ==========  ===============================================================
    """

    # object from correlations class in which all correlation data is stored
    G = correlations()

    # dwell time
    G.dwellTime = dwellTime

    if len(np.shape(data)) == 1:
        # vector is given instead of matrix, single detector only
        print('Calculating autocorrelation ')
        setattr(
            G, 'det0',
            multipletau.correlate(data,
                                  data,
                                  m=accuracy,
                                  deltat=dwellTime * 1e-6,
                                  normalize=True))

    for i in listOfG:
        if isinstance(i, int):
            # autocorrelation of a detector element i
            print('Calculating autocorrelation of detector element ' + str(i))
            dataSingle = extractSpadData(data, i)
            setattr(
                G, 'det' + str(i),
                multipletau.correlate(dataSingle,
                                      dataSingle,
                                      m=accuracy,
                                      deltat=dwellTime * 1e-6,
                                      normalize=True))

        elif i == "central":
            # autocorrelation central detector element
            print('Calculating autocorrelation central detector element')
            dataCentral = extractSpadData(data, "central")
            G.central = multipletau.correlate(dataCentral,
                                              dataCentral,
                                              m=accuracy,
                                              deltat=dwellTime * 1e-6,
                                              normalize=True)

        elif i == "sum3":
            # autocorrelation sum3x3
            print('Calculating autocorrelation sum3x3')
            dataSum3 = extractSpadData(data, "sum3")
            G.sum3 = multipletau.correlate(dataSum3,
                                           dataSum3,
                                           m=accuracy,
                                           deltat=dwellTime * 1e-6,
                                           normalize=True)

        elif i == "sum5":
            # autocorrelation sum3x3
            print('Calculating autocorrelation sum5x5')
            dataSum5 = extractSpadData(data, "sum5")
            G.sum5 = multipletau.correlate(dataSum5,
                                           dataSum5,
                                           m=accuracy,
                                           deltat=dwellTime * 1e-6,
                                           normalize=True)

        elif i == "allbuthot":
            # autocorrelation sum5x5 except for the hot pixels
            print('Calculating autocorrelation allbuthot')
            dataAllbuthot = extractSpadData(data, "allbuthot")
            G.allbuthot = multipletau.correlate(dataAllbuthot,
                                                dataAllbuthot,
                                                m=accuracy,
                                                deltat=dwellTime * 1e-6,
                                                normalize=True)

        elif i == "chessboard":
            # crosscorrelation chessboard
            print('Calculating crosscorrelation chessboard')
            dataChess0 = extractSpadData(data, "chess0")
            dataChess1 = extractSpadData(data, "chess1")
            G.chessboard = multipletau.correlate(dataChess0,
                                                 dataChess1,
                                                 m=accuracy,
                                                 deltat=dwellTime * 1e-6,
                                                 normalize=True)

        elif i == "chess3":
            # crosscorrelation small 3x3 chessboard
            print('Calculating crosscorrelation small chessboard')
            dataChess0 = extractSpadData(data, "chess3a")
            dataChess1 = extractSpadData(data, "chess3b")
            G.chess3 = multipletau.correlate(dataChess0,
                                             dataChess1,
                                             m=accuracy,
                                             deltat=dwellTime * 1e-6,
                                             normalize=True)

        elif i == "ullr":
            # crosscorrelation upper left and lower right
            print('Calculating crosscorrelation upper left and lower right')
            dataUL = extractSpadData(data, "upperleft")
            dataLR = extractSpadData(data, "lowerright")
            G.ullr = multipletau.correlate(dataUL,
                                           dataLR,
                                           m=accuracy,
                                           deltat=dwellTime * 1e-6,
                                           normalize=True)

        elif i == "crossCenter":
            # crosscorrelation center element with L, R, T, B
            dataCenter = extractSpadData(data, 12)
            for j in range(25):
                print('Calculating crosscorrelation central element with ' +
                      str(j))
                data2 = extractSpadData(data, j)
                Gtemp = multipletau.correlate(dataCenter,
                                              data2,
                                              m=accuracy,
                                              deltat=dwellTime * 1e-6,
                                              normalize=True)
                setattr(G, 'det12x' + str(j), Gtemp)

        elif i == "2MPD":
            # crosscorrelation element 12 and 13
            data1 = extractSpadData(data, 12)
            data2 = extractSpadData(data, 13)
            print('Cross correlation elements 12 and 13')
            Gtemp = multipletau.correlate(data1,
                                          data2,
                                          m=accuracy,
                                          deltat=dwellTime * 1e-6,
                                          normalize=True)
            G.cross12 = Gtemp
            print('Cross correlation elements 13 and 12')
            Gtemp = multipletau.correlate(data2,
                                          data1,
                                          m=accuracy,
                                          deltat=dwellTime * 1e-6,
                                          normalize=True)
            G.cross21 = Gtemp
            print('Autocorrelation element 12')
            Gtemp = multipletau.correlate(data1,
                                          data1,
                                          m=accuracy,
                                          deltat=dwellTime * 1e-6,
                                          normalize=True)
            G.auto1 = Gtemp
            print('Autocorrelation element 13')
            Gtemp = multipletau.correlate(data2,
                                          data2,
                                          m=accuracy,
                                          deltat=dwellTime * 1e-6,
                                          normalize=True)
            G.auto2 = Gtemp

        elif i == "crossAll":
            # crosscorrelation every element with every other element
            for j in range(25):
                data1 = extractSpadData(data, j)
                for k in range(25):
                    data2 = extractSpadData(data, k)
                    print('Calculating crosscorrelation det' + str(j) +
                          ' and det' + str(k))
                    Gtemp = multipletau.correlate(data1,
                                                  data2,
                                                  m=accuracy,
                                                  deltat=dwellTime * 1e-6,
                                                  normalize=True)
                    setattr(G, 'det' + str(j) + 'x' + str(k), Gtemp)

        elif i == "autoSpatial":
            # number of time points
            Nt = np.size(data, 0)
            # detector size (5 for SPAD)
            N = int(np.round(np.sqrt(np.size(data, 1) - 1)))
            # G size
            M = 2 * N - 1
            deltats = range(0, 1, 1)  # in units of dwell times
            G.autoSpatial = np.zeros((M, M, len(deltats)))
            # normalization
            print("Calculating average image")
            avIm = np.mean(data, 0)
            # avInt = np.mean(avIm[0:N*N]) - can't be used since every pixel
            # has a different PSF amplitude!!
            # for j in range(np.size(data, 0)):
            # data[j, :] = data[j, :] - avIm
            avIm = np.resize(avIm[0:N * N], (N, N))
            # calculate autocorrelation
            k = 0
            for deltat in deltats:
                print("Calculating spatial autocorr delta t = " +
                      str(deltat * dwellTime) + " µs")
                for j in range(Nt - deltat):
                    im1 = np.resize(data[j, 0:N * N], (N, N))
                    im1 = np.ndarray.astype(im1, 'int64')
                    im2 = np.resize(data[j + deltat, 0:N * N], (N, N))
                    im2 = np.ndarray.astype(im2, 'int64')
                    # G.autoSpatial[:,:,k] = G.autoSpatial[:,:,k] + ssig.correlate2d(im1, im2)
                    # calculate correlation between im1 and im2
                    for shifty in np.arange(-4, 5):
                        for shiftx in np.arange(-4, 5):
                            # go through all detector elements
                            n = 0  # number of overlapping detector elements
                            Gtemp = 0
                            for detx in np.arange(np.max((0, shiftx)),
                                                  np.min((5, 5 + shiftx))):
                                for dety in np.arange(np.max((0, shifty)),
                                                      np.min((5, 5 + shifty))):
                                    GtempUnNorm = im1[dety, detx] * im2[
                                        dety - shifty, detx - shiftx]
                                    GtempNorm = GtempUnNorm - avIm[
                                        dety, detx] * avIm[dety - shifty,
                                                           detx - shiftx]
                                    GtempNorm /= avIm[dety, detx] * avIm[
                                        dety - shifty, detx - shiftx]
                                    Gtemp += GtempNorm
                                    n += 1
                            Gtemp /= n
                            G.autoSpatial[shifty + 4, shiftx + 4, k] += Gtemp
                G.autoSpatial[:, :, k] /= (Nt - deltat)
                k = k + 1

        elif i == "av":
            # average of all 25 individual autocorrelation curves
            for j in range(25):
                # autocorrelation of a detector element j
                print('Calculating autocorrelation of detector element ' +
                      str(j))
                dataSingle = extractSpadData(data, j)
                Gtemp = multipletau.correlate(dataSingle,
                                              dataSingle,
                                              m=accuracy,
                                              deltat=dwellTime * 1e-6,
                                              normalize=True)
                setattr(G, 'det' + str(j), Gtemp)
            Gav = Gtemp[:, 1]
            for j in range(24):
                Gav = np.add(Gav, getattr(G, 'det' + str(j))[:, 1])
            Gav = Gav / 25
            G.av = np.zeros([np.size(Gav, 0), 2])
            G.av[:, 0] = Gtemp[:, 0]
            G.av[:, 1] = Gav

    return G
示例#37
0
def compare_corr():
    ## Starting parameters
    N = np.int(np.pi*1e3)
    countrate = 250. * 1e-3 # in Hz
    taudiff = 55. # in us
    deltat = 2e-6 # time discretization [s]
    normalize = True

    # time factor
    taudiff *= deltat

    if N < 1e5:
        do_np_corr = True
    else:
        do_np_corr = False

    ## Autocorrelation
    print("Creating noise for autocorrelation")
    data = noise_exponential(N, taudiff, deltat=deltat)
    data -= np.average(data)
    if normalize:
        data += countrate
    # multipletau
    print("Performing autocorrelation (multipletau).")
    G = autocorrelate(data, deltat=deltat, normalize=normalize)
    # numpy.correlate for comparison
    if do_np_corr:
        print("Performing autocorrelation (numpy).")
        Gd = correlate_numpy(data, data, deltat=deltat,
                             normalize=normalize)
    else:
        Gd = G
    
    ## Cross-correlation
    print("Creating noise for cross-correlation")
    a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
    a -= np.average(a)
    v -= np.average(v)
    if normalize:
        a += countrate
        v += countrate
    Gccforw = correlate(a, v, deltat=deltat, normalize=normalize) # forward
    Gccback = correlate(v, a, deltat=deltat, normalize=normalize) # backward
    if do_np_corr:
        print("Performing cross-correlation (numpy).")
        Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize)
    
    ## Calculate the model curve for cross-correlation
    xcc = Gd[:,0]
    ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid")
    if normalize:
        ampcc /= len(a) * countrate**2
    ycc = ampcc*np.exp(-xcc/taudiff)

    ## Calculate the model curve for autocorrelation
    x = Gd[:,0]
    amp = np.correlate(data-np.average(data), data-np.average(data),
                       mode="valid")
    if normalize:
        amp /= len(data) * countrate**2
    y = amp*np.exp(-x/taudiff)


    ## Plotting
    # AC
    fig = plt.figure()
    fig.canvas.set_window_title('testing multipletau')
    ax = fig.add_subplot(2,1,1)
    ax.set_xscale('log')
    if do_np_corr:
        plt.plot(Gd[:,0], Gd[:,1] , "-", color="gray", label="correlate (numpy)")
    plt.plot(x, y, "g-", label="input model")
    plt.plot(G[:,0], G[:,1], "-",  color="#B60000", label="autocorrelate")
    plt.xlabel("lag channel")
    plt.ylabel("autocorrelation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -amp*.2, amp*1.2)
    plt.xlim( Gd[0,0], Gd[-1,0])

    # CC
    ax = fig.add_subplot(2,1,2)
    ax.set_xscale('log')
    if do_np_corr:
        plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "-", color="gray", label="forward (numpy)")
    plt.plot(xcc, ycc, "g-", label="input model")
    plt.plot(Gccforw[:,0], Gccforw[:,1], "-", color="#B60000", label="forward")
    plt.plot(Gccback[:,0], Gccback[:,1], "-", color="#5D00B6", label="backward")
    plt.xlabel("lag channel")
    plt.ylabel("cross-correlation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -ampcc*.2, ampcc*1.2)
    plt.xlim( Gd[0,0], Gd[-1,0])
    plt.tight_layout()

    savename = __file__[:-3]+".png"
    if os.path.exists(savename):
        savename = __file__[:-3]+time.strftime("_%Y-%m-%d_%H-%M-%S.png")

    plt.savefig(savename)
    print("Saved output to", savename)
示例#38
0
def test():
    import numpy as np
    import os
    import sys
    from matplotlib import pylab as plt
    sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../"))
    from multipletau import autocorrelate, correlate, correlate_numpy
    ## Starting parameters
    N = np.int(np.pi*1e3)
    countrate = 250. * 1e-3 # in Hz
    taudiff = 55. # in us
    deltat = 2e-6 # time discretization [s]
    normalize = True

    # time factor
    taudiff *= deltat

    ##
    ## Autocorrelation
    ##
    print("Creating noise for autocorrelation")
    data = noise_exponential(N, taudiff, deltat=deltat)
    data += - np.average(data)
    if normalize:
        data += countrate
    # multipletau
    print("Performing autocorrelation (multipletau).")
    G = autocorrelate(data, deltat=deltat, normalize=normalize)
    # numpy.correlate for comparison
    if len(data) < 1e5:
        print("Performing autocorrelation (numpy).")
        Gd = correlate_numpy(data, data, deltat=deltat,
                             normalize=normalize)
    # Calculate the expected curve
    x = G[:,0]
    amp = np.correlate(data-np.average(data), data-np.average(data),
                       mode="valid")
    if normalize:
        amp /= len(data) * countrate**2
    y = amp*np.exp(-x/taudiff)

    ##
    ## Cross-correlation
    ##
    print("Creating noise for cross-correlation")
    a, v = noise_cross_exponential(N, taudiff, deltat=deltat)
    a += - np.average(a)
    v += - np.average(v)
    if normalize:
        a += countrate
        v += countrate
    # multipletau
    Gccforw = correlate(a, v, deltat=deltat, normalize=normalize)
    Gccback = correlate(v, a, deltat=deltat, normalize=normalize)
    if len(a) < 1e5:
        print("Performing autocorrelation (numpy).")
        Gdccforw = correlate_numpy(a, v, deltat=deltat, normalize=normalize)
    # Calculate the expected curve
    xcc = Gccforw[:,0]
    ampcc = np.correlate(a-np.average(a), v-np.average(v), mode="valid")

    if normalize:
        ampcc /= len(a) * countrate**2
    ycc = ampcc*np.exp(-xcc/taudiff)


    ##
    ## Plotting
    ##

    # AC
    fig = plt.figure()
    fig.canvas.set_window_title('testing multipletau')
    ax = fig.add_subplot(2,1,1)
    ax.set_xscale('log')
    plt.plot(x, y, "g-", label="input model")
    plt.plot(G[:,0], G[:,1], "r-", label="autocorrelate")
    if len(data) < 1e5:
        plt.plot(Gd[:,0], Gd[:,1] , "b--", label="correlate (numpy)")
    plt.xlabel("lag channel")
    plt.ylabel("autocorrelation")
    plt.legend(loc=0, fontsize='small')
    plt.ylim( -amp*.2, amp*1.2)


    ## CC
    ax = fig.add_subplot(2,1,2)
    ax.set_xscale('log')
    plt.plot(xcc, ycc, "g-", label="input model")
    plt.plot(Gccforw[:,0], Gccforw[:,1], "r-", label="forward")
    if len(data) < 1e5:
        plt.plot(Gdccforw[:,0], Gdccforw[:,1] , "b--", label="forward (numpy)")
    plt.plot(Gccback[:,0], Gccback[:,1], "r--", label="backward")
    plt.xlabel("lag channel")
    plt.ylabel("cross-correlation")
    plt.legend(loc=0, fontsize='small')

    plt.ylim( -ampcc*.2, ampcc*1.2)

    plt.tight_layout()
    plt.show()