def corr3(x1, x2=None, window=50):
    """
    x1 and x2 are binned spikes.
    """
    if x2 is None:
        x2 = x1
    corr = np.zeros((window * 2 + 1))
    # Positive lag
    corr[:window + 1] = pycorrelate.ucorrelate(x1, x2, window + 1)[::-1]
    # Negative lag
    corr[window:] = pycorrelate.ucorrelate(x2, x1, window + 1)

    return corr
예제 #2
0
def test_ucorrelate(data):
    """Test ucorrelate against np.correlate."""
    t, u, unit = data
    binwidth = 50e-6
    bins_tt = np.arange(0, t.max() * unit, binwidth) / unit
    bins_uu = np.arange(0, u.max() * unit, binwidth) / unit
    tx, _ = np.histogram(t, bins=bins_tt)
    ux, _ = np.histogram(u, bins=bins_uu)
    C = np.correlate(ux, tx, mode='full')
    Gn = C[tx.size - 1:]  # trim to positive time lags
    Gu = pyc.ucorrelate(tx, ux)
    assert (Gu == Gn).all()
    Gu2 = pyc.ucorrelate(tx, ux, maxlag=1000)
    assert (Gu2 == Gn[:1000]).all()
def allcorr2(allspikes, window):
    pxc = np.zeros((n, n, window))
    for i in range(n):
        for j in range(n):
            if i >= j:
                pxc[i, j, :] = pycorrelate.ucorrelate(allspikes[i, :],
                                                      allspikes[j, :], window)
예제 #4
0
def cross_corr(a, b):
	#max up/down shift
	N = 5
	#returns peak value and offset for the cross-correlation of a and b
	# res = np.correlate( a, b, mode="same")
	# offset = np.argmax(res) - len(res)//2
	
	#this is working and equivalent, but not significantly faster than the full numpy cross-correlation
	res = np.empty(N*2+1)
	#can't do negative lag so just shift them and piece both together
	first = pyc.ucorrelate(a, b, maxlag=N+1)
	second = pyc.ucorrelate(b, a, maxlag=N+1)
	res[:N+1]  = first[::-1]
	res[N+1:]  = second[1:]
	offset = np.argmax(res)-N
	# this is not equivalent
	# for i, x in enumerate(range(-N, N+1)):
		# # out[i] = np.sum(np.abs(a- np.roll(b, x) ))
		# out[i] = np.correlate( a, np.roll(b, x), mode="valid")
	return offset
예제 #5
0
def test_pcorrelate_vs_ucorrelate(data):
    t, u, unit = data
    binwidth = 50e-6
    bins_tt = np.arange(0, t.max() * unit, binwidth) / unit
    bins_uu = np.arange(0, u.max() * unit, binwidth) / unit
    tx, _ = np.histogram(t, bins=bins_tt)
    ux, _ = np.histogram(u, bins=bins_uu)
    Gu = pyc.ucorrelate(tx, ux)
    maxlag_sec = 1.2  # seconds
    lagbins = (np.arange(0, maxlag_sec, binwidth) / unit).astype('int64')
    Gp = pyc.pcorrelate(t, u, lagbins) * int(binwidth / unit)
    n = 6000
    err = np.abs(Gp[:n] - Gu[:n]) / (0.5 * (Gp[:n] + Gu[:n]))
    assert err.max() < 0.23
    assert err.mean() < 0.05
    def cross_correlation_using_all_approaches_with_instrumentation(
            signal_x=[], signal_y=[]):
        # List of tuples storing results regarding cross-correlation.
        results = []
        """
			Enumerate all modes of NumPy's correlate function.
			
			In the example from \cite{TheSciPyCommunity2019},
				the cross-correlation has values greater than 1;
				since the notes in \cite{TheSciPyCommunity2019}
					indicate that the definition for
					cross-correlation "is not unique," this can
					explain why the cross-correlation for the
					"valid" mode is >1.
		"""
        for current_mode in approx_cross_correlation.numpy_correlate_modes:
            prompt = "	... Testing NumPy-based cross-correlation's " + current_mode + " mode.		{}"
            statistical_analysis.increment_number_test_cases_used()
            # Determine the cross-correlation using the current mode.
            cross_correlation_from_numpy_correlate = approx_cross_correlation.cross_correlation_using_numpy(
                signal_x, signal_y, current_mode)
            #print("NumPy-based cross-correlation using",current_mode,"mode is:",cross_correlation_from_numpy_correlate,"=")
            """
				Perform statistical analysis on the set of
					cross-correlation values.
			"""
            std_dev_cross_correlation = np.std(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's standard deviation using",current_mode,"mode is:",std_dev_cross_correlation,"=")
            var_cross_correlation = np.var(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's variance using",current_mode,"mode is:",var_cross_correlation,"=")
            arith_mean_cross_correlation = np.mean(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's arithmetic mean using",current_mode,"mode is:",arith_mean_cross_correlation,"=")
            ptp_cross_correlation = np.ptp(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's min and max values (or peak to peak) using",current_mode,"mode is:",ptp_cross_correlation,"=")
            amax_cross_correlation = np.amax(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's max value using",current_mode,"mode is:",amax_cross_correlation,"=")
            amin_cross_correlation = np.amin(
                cross_correlation_from_numpy_correlate)
            #print("	NumPy-based cross-correlation's min value using",current_mode,"mode is:",amin_cross_correlation,"=")
            #print("")
            results.append(
                (current_mode, cross_correlation_from_numpy_correlate,
                 std_dev_cross_correlation, var_cross_correlation,
                 arith_mean_cross_correlation, ptp_cross_correlation,
                 amax_cross_correlation, amin_cross_correlation))
            print(prompt.format("OK"))
            statistical_analysis.increment_number_test_cases_passed()
        """
		Matplotlib.pyplot's solution for correlation causes
			execution error
		
		(lags, c, line, b) = plt.xcorr(signal_x, signal_y,maxlags=4)
		print("Matplotlib.pyplot lags",lags,"=")
		print("Matplotlib.pyplot c",c,"=")
		print("Matplotlib.pyplot line",line,"=")
		print("Matplotlib.pyplot b",b,"=")
		"""
        cross_correlation_from_pycorrelate = pyc.ucorrelate(
            np.array(signal_x), np.array(signal_y))
        #print("Pycorrelate-based cross-correlation is:",cross_correlation_from_pycorrelate,"=")
        """
			Perform statistical analysis on the set of
				cross-correlation values.
		"""
        std_dev_cross_correlation = np.std(cross_correlation_from_pycorrelate)
        print("	NumPy-based cross-correlation's standard deviation using",
              current_mode, "mode is:", std_dev_cross_correlation, "=")
        var_cross_correlation = np.var(cross_correlation_from_pycorrelate)
        print("	NumPy-based cross-correlation's variance using", current_mode,
              "mode is:", var_cross_correlation, "=")
        arith_mean_cross_correlation = np.mean(
            cross_correlation_from_pycorrelate)
        print("	NumPy-based cross-correlation's arithmetic mean using",
              current_mode, "mode is:", arith_mean_cross_correlation, "=")
        ptp_cross_correlation = np.ptp(cross_correlation_from_pycorrelate)
        print(
            "	NumPy-based cross-correlation's min and max values (or peak to peak) using",
            current_mode, "mode is:", ptp_cross_correlation, "=")
        amax_cross_correlation = np.amax(cross_correlation_from_pycorrelate)
        print("	NumPy-based cross-correlation's max value using", current_mode,
              "mode is:", amax_cross_correlation, "=")
        amin_cross_correlation = np.amin(cross_correlation_from_pycorrelate)
        print("	NumPy-based cross-correlation's min value using", current_mode,
              "mode is:", amin_cross_correlation, "=")
        print("")
        results.append(("pyc.ucorrelate", cross_correlation_from_pycorrelate,
                        std_dev_cross_correlation, var_cross_correlation,
                        arith_mean_cross_correlation, ptp_cross_correlation,
                        amax_cross_correlation, amin_cross_correlation))
        return results
예제 #7
0
x, y = cell_storm_opt.data.data_dict['storm'][
    'x'], cell_storm_opt.data.data_dict['storm']['y']
perimeter = cell_storm_opt.coords.calc_perimeter(x, y)

# Sampling points
x_out = np.linspace(0, cell_storm_opt.circumference, num=20000, endpoint=True)
dx = np.diff(x_out)[0]
# Sum convolution with gaussian
y_out = running_sum(np.sort(perimeter),
                    np.ones_like(perimeter),
                    x_out,
                    sigma=0.075)

# Autocorrelation
G = ucorrelate(y_out, y_out)

# Remove low-frequency component
G_m = uniform_filter1d(G, size=200)
acf = G - G_m

#https://stackoverflow.com/questions/11205037/detect-period-of-unknown-source
fourier = np.fft.fft(acf) / len(acf)
n = acf.size
freq = np.fft.fftfreq(n, d=dx)

sns.set_style('ticks')
ax_loc = plt.subplot(bot_grid[0])
ax_loc.plot(x_out * (80 / 1000), y_out)
ax_loc.set_xlim(0, np.max(x_out) * (80 / 1000))
ax_loc.set_ylabel('Localizations')