コード例 #1
0
ファイル: test_windows.py プロジェクト: BitFoyle/scipy
def test_exponential():
    for k, v in exponential_data.items():
        if v is None:
            assert_raises(ValueError, signal.exponential, *k)
        else:
            win = signal.exponential(*k)
            assert_allclose(win, v, rtol=1e-14)
コード例 #2
0
ファイル: test_windows.py プロジェクト: sjpeterson/scipy
def test_exponential():
    for k, v in exponential_data.items():
        if v is None:
            assert_raises(ValueError, signal.exponential, *k)
        else:
            win = signal.exponential(*k)
            assert_allclose(win, v, rtol=1e-14)
コード例 #3
0
def apply_doubleexponential_filter(FR_time, length, tau):
    w = signal.exponential(length, tau=tau)
    nN = 60
    new_FR = []
    for x in range(nN):
        nx = np.convolve(FR_time[x, :], w / w.sum(), 'valid')
        new_FR.append(nx.tolist())
    return np.array(new_FR)
コード例 #4
0
def apply_exponential_filter(FR_time, length, tau):
    w = signal.exponential(length, tau=tau)
    # check the asymmetric one
    #tau2 = -(length - 1)/np.log(0.01)
    #w = signal.exponential(length, 0, tau2, sym=False)
    nN = 60
    new_FR = []
    for x in range(nN):
        nx = np.convolve(FR_time[x, :], w / w.sum(), 'valid')
        new_FR.append(nx.tolist())
    return np.array(new_FR)
コード例 #5
0
 def apply(self,shot):
     apply_positivity(shot)
     super(AveragingVarNormalizer,self).apply(shot)
     window_decay = self.conf['data']['window_decay']
     window_size = self.conf['data']['window_size']
     window = exponential(window_size,0,window_decay,False)
     window /= np.sum(window)
     for (i,sig) in enumerate(shot.signals):
         if sig.normalize:
             shot.signals_dict[sig] = apply_along_axis(lambda m : correlate(m,window,'valid'),axis=0,arr=shot.signals_dict[sig])
             shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
     shot.ttd = shot.ttd[-shot.signals.shape[0]:]
コード例 #6
0
 def apply(self,shot):
     apply_positivity(shot)
     super(AveragingVarNormalizer,self).apply(shot)
     window_decay = self.conf['data']['window_decay']
     window_size = self.conf['data']['window_size']
     window = exponential(window_size,0,window_decay,False)
     window /= np.sum(window)
     for (i,sig) in enumerate(shot.signals):
         if sig.normalize:
             shot.signals_dict[sig] = apply_along_axis(lambda m : correlate(m,window,'valid'),axis=0,arr=shot.signals_dict[sig])
             shot.signals_dict[sig] = np.clip(shot.signals_dict[sig],-self.bound,self.bound)
     shot.ttd = shot.ttd[-shot.signals.shape[0]:]
コード例 #7
0
def get_template(samples=8000, padding_samples=1000, decay_samples=4000):
    if decay_samples < 0:
        decay_samples = math.floor(samples / 5)

    mu, sigma = 0.0, 1.0
    template = np.array([gauss(mu, sigma) for i in range(samples)])

    tau2 = -(decay_samples - 1) / np.log(0.01)
    decay = signal.exponential(samples, 0, tau2, False)
    template /= np.max(np.abs(template))

    tmp = np.zeros((padding_samples,))
    #template = np.concatenate([tmp, decay * template], axis=0)
    template = np.concatenate([tmp, decay], axis=0)  # only decay curve
    return template
コード例 #8
0
def exponential_m(decay=False):
    """
    Returns an exponential function.

    Parameters
    ----------
    decay: bool
        If True returns a decaying exponential function
        If False returns a growing exponential function
    """
    
    M = 50
    sign = -1 if decay else 1
    tau2 = sign*(M) / np.log(0.1)
    return signal.exponential(M, 0, tau2, False)
コード例 #9
0
ファイル: df_f.py プロジェクト: carlyzfeng/Image_toolbox
def dff_expfilt(dff_r, dt, t_width=2.0):
    """
    Exponentially weighted moving average filter
    OK this also works.
    """
    M = int(t_width / dt + 1) * 8 + 1  # the number of window
    wd = exponential(M, center=None, tau=t_width)  # Symmetric = True

    NT, NP = dff_r.shape
    dff_expf = np.zeros([NT, NP])
    tt = np.arange(1, NT + 1) * dt
    denom_filter = (1 - np.exp(-tt / t_width)) * t_width  # the denominator
    for cp in range(NP):
        numer_filter = fftconvolve(dff_r[:, cp], wd, mode='same') * dt
        dff_expf[:, cp] = numer_filter / denom_filter

    return dff_expf, wd
def simulate_integrate_and_fire_cell(presynaptic_input_spikes, synaptic_weights, membrane_time_const=20, v_reset=-95, v_threshold=-50, current_to_voltage_mult_factor=5):
    temporal_filter_length = int(7 * membrane_time_const) + 1
    syn_filter = signal.exponential(M=temporal_filter_length,center=0,tau=membrane_time_const,sym=False)[np.newaxis,:]
    syn_local_currents = signal.convolve(presynaptic_input_spikes, syn_filter, mode='full')[:,:presynaptic_input_spikes.shape[1]]
    soma_current       = signal.convolve(syn_local_currents, np.flipud(synaptic_weights), mode='valid')
    
    # make simulations
    soma_voltage = v_reset + current_to_voltage_mult_factor * soma_current.ravel()
    output_spike_times_in_ms = []
    for t in range(len(soma_voltage)):
        if (soma_voltage[t] > v_threshold) and ((t + 1) < len(soma_voltage)):
            t_start = t + 1
            t_end = min(len(soma_voltage), t_start + temporal_filter_length)
            soma_voltage[t_start:t_end] -= (soma_voltage[t + 1] - v_reset) * syn_filter.ravel()[:(t_end - t_start)]
            output_spike_times_in_ms.append(t)

    return soma_voltage, output_spike_times_in_ms
コード例 #11
0
def ewma_smoothing(series, tau=5):
    """
    Exponentially weighted moving average of a series.

    Parameters
    ----------
    series: array-like
        Series to convolve.
    tau: float
        Decay factor.

    Returns
    -------
    smoothed: array-like
        Smoothed series.
    """
    exp_window = signal.exponential(2 * tau, 0, tau, False)[::-1]
    exp_window /= exp_window.sum()
    smoothed = signal.convolve(series, exp_window, mode="same")
    return smoothed
コード例 #12
0
def _weigh_exp(width: int) -> ndarray:
    'create exponential weights'
    weights = signal.exponential(((width - 1) * 2) + 1)[:width]
    weights /= (weights.sum())
    return weights
コード例 #13
0
import random
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
for y in range(10):
    l = random.randint(750, 1000)
    x = np.arange(0, l)
    #Baseline noise and curvature
    bn = [random.randint(0, 5) for m in x]
    #bcx=random.randint(25, 50)
    #tau=random.randint(100, 200)
    bc1 = random.randint(25, 50) * signal.exponential(
        l, center=0, tau=random.randint(100, 200), sym=False)
    bc2 = random.randint(
        50,
        100) + random.randint(0, 50) * np.sin(0.01 * x + random.randint(0, 50))
    #Define number of low intensity background peaks
    bspn = l / 3
    #Randomize peak height, peak position
    bsp = [0] * l
    for m in range(bspn):
        bsptemp = random.randint(0, 10) * signal.exponential(
            l,
            center=random.randint(0, l),
            tau=random.randint(1, 2),
            sym=False)
        bsp = bsp + bsptemp
        #Define number of prominent puncta
    pn = random.randint(l / 25, l / 20)
    #Randomize peak height and peak position
    gpeaks = [0] * l
コード例 #14
0
def add_signal(X, signal_dict):
    """
    """
    n = X.shape[0]
    length = signal_dict['length']
    position = signal_dict['position'] + signal_dict['extra_shift']
    amp = signal_dict['amp'] * signal_dict['sign']
    signal_type = signal_dict['signal_type']

    if signal_type == 'gaussian':
        # Gaussian peak.
        # Center of gaussian will be placed at given position.
        x0 = position - int(length / 2)
        x1 = x0 + length
        dx0 = -x0 * (x0 < 0)
        dx1 = (n - x1) * (x1 >= n)
        X[x0 + dx0:x1 + dx1] += amp * scipysig.gaussian(
            length, std=length / 7)[dx0:(x1 - x0 + dx1)]
    elif signal_type == 'wave':
        # Two gaussian peaks in different directions ("wave").
        # Center will be placed at given position.
        x0 = position - int(length / 2)
        x1 = x0 + length
        dx0 = -x0 * (x0 < 0)
        dx1 = (n - x1) * (x1 >= n)
        signal = np.zeros((length))
        signal[:int(0.7 * length)] += amp * scipysig.gaussian(
            int(0.7 * length), std=length / 10)
        signal[-int(0.7 * length):] -= amp * scipysig.gaussian(
            int(0.7 * length), std=length / 10)
        X[x0 + dx0:x1 + dx1] += signal[dx0:(x1 - x0 + dx1)]
    elif signal_type == 'exponential':
        # Sudden peak + exponential decay.
        # Peak will be placed at given position.
        x0 = position
        x1 = x0 + length
        dx1 = (n - x1) * (x1 >= n)
        X[x0:x1 + dx1] += amp * scipysig.exponential(length, 0, length / 5,
                                                     False)[:(x1 - x0 + dx1)]
    elif signal_type == 'peak_exponential':
        # Peak with two exponential flanks.
        # Center of eak will be placed at given position.
        x0 = position - int(length / 2)
        x1 = x0 + length
        dx0 = -x0 * (x0 < 0)
        dx1 = (n - x1) * (x1 >= n)
        X[x0 + dx0:x1 + dx1] += amp * scipysig.exponential(
            length, tau=length / 10)[dx0:(x1 - x0 + dx1)]
    elif signal_type == 'triangle':
        # Triangular peak.
        # Center of peak will be placed at given position.
        x0 = position - int(length / 2)
        x1 = x0 + length
        dx0 = -x0 * (x0 < 0)
        dx1 = (n - x1) * (x1 >= n)
        X[x0 + dx0:x1 +
          dx1] += amp * scipysig.triang(length)[dx0:(x1 - x0 + dx1)]
    elif signal_type == 'box':
        # Box peak.
        # Center of peak will be placed at given position.
        x0 = position - int(length / 2)
        x1 = x0 + length
        dx0 = -x0 * (x0 < 0)
        dx1 = (n - x1) * (x1 >= n)
        X[x0 + dx0:x1 + dx1] += amp * np.ones((length))[dx0:(x1 - x0 + dx1)]
    else:
        print("Signal type not found.")
        X = None

    return X
コード例 #15
0
# Plot the symmetric window and its frequency response:

from scipy import signal
from scipy.fftpack import fft, fftshift
import matplotlib.pyplot as plt

M = 51
tau = 3.0
window = signal.exponential(M, tau=tau)
plt.plot(window)
plt.title("Exponential Window (tau=3.0)")
plt.ylabel("Amplitude")
plt.xlabel("Sample")

plt.figure()
A = fft(window, 2048) / (len(window)/2.0)
freq = np.linspace(-0.5, 0.5, len(A))
response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
plt.plot(freq, response)
plt.axis([-0.5, 0.5, -35, 0])
plt.title("Frequency response of the Exponential window (tau=3.0)")
plt.ylabel("Normalized magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")

# This function can also generate non-symmetric windows:

tau2 = -(M-1) / np.log(0.01)
window2 = signal.exponential(M, 0, tau2, False)
plt.figure()
plt.plot(window2)
plt.ylabel("Amplitude")
コード例 #16
0
    binned_idx = divmod(
        t / ms, tres_input)[0]  # the whole part is the idx  (t0 = 0th bin)
    input_current_components[spike_id][binned_idx] += 1
# binned
plt.figure()
plt.imshow(input_current_components, aspect='auto')
plt.title('poisson activity after binning')
plt.show()

# convolve with exponentials  (todo it would be cool to convolve with a depressing burst)
tau_filterInSamples = tau_input / tres_input
RANGE_FRACTION = 0.01  # cutoff the exp function when this much of the y-range is left
M = -(
    tau_filterInSamples * np.log(RANGE_FRACTION) - 1
)  # include 99% of range and solve for number of samples ... todo account for dt
exp_kernel = signal.exponential(M, 0, tau_filterInSamples, False)
exp_kernel = exp_kernel * 1.0 / np.sum(exp_kernel)  # make sure integral = 1 s
plt.figure()
plt.plot(exp_kernel)
title('filtering kernel for input currents')
plt.show()

# = N_poisson, N_targets.T   **   N_poisson, T
input_current_components = w_poisson_out * filt.convolve1d(
    input_current_components, exp_kernel,
    axis=1)  # oops, this probably wasn't matrix multiplication

for i_target in range(N_targets):  # row in input cells
    for i_source in range(N_input):
        if input_connectivity[i_source][i_target] > 0:
            input_conductances[i_target][:] += input_current_components[
コード例 #17
0
def main(argv):
    parser = argparse.ArgumentParser(description=argv[0] +
                                     ': Evaluation of a task')
    parser.add_argument("seed",
                        nargs="?",
                        type=int,
                        help="seed used",
                        default=None)
    parser.add_argument(
        "-o",
        nargs=1,
        type=str,
        help="path to the file where the absolute erro will be saved")
    parser.add_argument(
        "-r",
        nargs="?",
        type=str,
        help="path to the file where the real output will be saved")
    parser.add_argument(
        "-d",
        nargs="?",
        type=str,
        help="path to the file where the desired output will be saved")
    parser.add_argument("-i",
                        nargs="?",
                        type=str,
                        help="path to the file where the input will be saved")
    args = parser.parse_args(argv[1:])
    # Parameters
    # --------------------------------------------------------------------------
    seed = args.seed  # Predefined seed, if None one will be chosen
    seed, rstate = set_seed(seed=seed, verbose=1)  # Seed used
    n_res = 100  # Number of reservoir tested
    M = 10
    tau = 5.0
    window = signal.exponential(M, tau=tau)
    window = window / np.sum(window)
    # Training parameters
    # ------------------     ----------------------------------------------------
    n_samples_train = 100  # Number of samples
    sample_size_train = 100  # Sample temporal size
    n_trigger_train = 5  # Number of trigger in each sample
    trigger_earliest_train = 30  # Earliest allowed time for trigger (warmup)
    trigger_latest_train = 50  # Latest allowed time for trigger
    # Test parameters
    # ----------------------------------------------------------------------
    n_samples_test = 100  # Number of samples
    sample_size_test = 200  # Sample temporal size
    n_trigger_test = 5  # Number of trigger in each sample
    trigger_earliest_test = 30  # Earliest allowed time for trigger (warmup)
    trigger_latest_test = 50  # Latest allowed time for trigger
    # Network hyper parameters
    # ----------------------------------------------------------------------
    n_unit = 100  # Number of unit in the reservoir
    n_input = 2  # Number of input
    n_output = 2  # Number of output
    leak = 5.17e-4  # Leak rate
    radius = 1.15e-2  # Spectral radius
    s_input = 1.39  # Input scaling
    s_fb = 1.54e-2  # Feedback scaling
    ridge = 6.55e-15  # Regularization coefficient in the ridge regression

    # Build weights
    # --------------------------------------------------------------------------
    res_seeds = rstate.randint(4294967296, size=(n_res, ))
    W_in = np.empty((n_res, n_unit, n_input))
    W_fb = np.empty((n_res, n_unit, n_output))
    W = np.empty((n_res, n_unit, n_unit))
    C_in = np.empty((n_res, n_unit))
    for r in range(n_res):
        res_seed, res_rstate = set_seed(res_seeds[r])
        # Input weight matrix
        W_in[r] = (res_rstate.uniform(size=(n_unit, n_input)) -
                   0.5) * (2 * s_input)
        # Feedback weight matrix
        W_fb[r] = (res_rstate.uniform(size=(n_unit, n_output)) - 0.5) * (2 *
                                                                         s_fb)
        # Recurrent weight matrix
        W[r] = res_rstate.uniform(size=(n_unit, n_unit)) - 0.5
        actual_radius = max(abs(np.linalg.eig(W[r])[0]))
        if actual_radius > 0.:
            W[r] *= radius / actual_radius
        else:
            raise NameError("Null spectral radius")
        # Input bias matrix
        C_in[r] = (res_rstate.uniform(size=(n_unit)) - 0.5) * s_input
    W_out = np.empty((n_res, n_output, n_unit))
    C_out = np.empty((n_res, n_output))
    # Build Training Samples
    # --------------------------------------------------------------------------
    # Inputs
    inputs = np.empty((n_samples_train, sample_size_train, n_input))
    # Data to store
    inputs[:, :, 0] = rstate.uniform(low=-1.,
                                     high=1.,
                                     size=(n_samples_train, sample_size_train))
    # Triggers
    inputs[:, :, 1] = 0
    trigger_times = np.empty((n_samples_train, n_trigger_train),
                             dtype=np.int64)
    for k in range(n_samples_train):
        trigger_times[k] = np.sort(
            rstate.permutation(
                np.arange(trigger_earliest_train,
                          trigger_latest_train))[:n_trigger_train])
        inputs[k, trigger_times[k], 1] = 1
        # Desired outputs
    desired_outputs = np.zeros((n_samples_train, sample_size_train, n_output))
    for k in range(n_samples_train):
        for l in range(n_trigger_train):
            desired_outputs[k, trigger_times[k, l]:,
                            0] = inputs[k, trigger_times[k, l], 0]
            desired_outputs[k, trigger_times[k, l]:,
                            1] = inputs[k, trigger_times[k, l],
                                        0] * inputs[k, trigger_times[k, l]:, 0]

    # Offline learning
    # --------------------------------------------------------------------------
    n_training_points = (sample_size_train -
                         trigger_earliest_train) * n_samples_train
    X = np.empty((1 + n_unit, n_training_points))
    Y = np.empty((n_output, n_training_points))
    internal = np.empty((sample_size_train, n_unit))
    for r in trange(n_res, desc="Training reservoirs"):
        index = 0
        for i in range(inputs.shape[0]):
            input_ = inputs[i]
            output = desired_outputs[i]
            internal[0] = leak * np.tanh(np.dot(W_in[r], input_[0]) + C_in[r])
            for t in range(1, input_.shape[0]):
                internal[t] = np.tanh(
                    np.dot(W[r], internal[t - 1]) +
                    np.dot(W_in[r], input_[t]) +
                    np.dot(W_fb[r], output[t - 1]) + C_in[r])
                internal[t] = leak * internal[t] + (1 - leak) * internal[t - 1]
                if t >= trigger_earliest_train:
                    X[:, index] = np.concatenate([[1.0], internal[t]])
                    Y[:, index] = output[t]
                    index += 1
        assert (index == n_training_points)
        A = np.dot(
            Y,
            np.dot(
                X.T,
                np.linalg.inv(
                    np.dot(X, X.T) + (ridge**2) * np.identity(X.shape[0]))))
        C_out[r] = A[:, 0]
        W_out[r] = A[:, 1:]

    # Build Testing Samples
    # --------------------------------------------------------------------------
    # Inputs
    inputs = np.empty((n_samples_test, sample_size_test, n_input))
    # Data to store
    random_inputs = rstate.uniform(low=-1.,
                                   high=1.,
                                   size=(n_samples_test,
                                         sample_size_test + M - 1))
    for i in range(n_samples_test):
        inputs[i, :, 0] = signal.convolve(random_inputs[i],
                                          window,
                                          mode="valid")
        # Triggers
    inputs[:, :, 1] = 0
    trigger_times = np.empty((n_samples_test, n_trigger_test), dtype=np.int64)
    for k in range(n_samples_test):
        trigger_times[k] = np.sort(
            rstate.permutation(
                np.arange(trigger_earliest_test,
                          trigger_latest_test))[:n_trigger_test])
        inputs[k, trigger_times[k], 1] = 1
        # Desired outputs
    desired_outputs = np.zeros((n_samples_test, sample_size_test, n_output))
    for k in range(n_samples_train):
        for l in range(n_trigger_train):
            desired_outputs[k, trigger_times[k, l]:,
                            0] = inputs[k, trigger_times[k, l], 0]
            desired_outputs[k, trigger_times[k, l]:,
                            1] = inputs[k, trigger_times[k, l],
                                        0] * inputs[k, trigger_times[k, l]:, 0]

    # Testing
    # --------------------------------------------------------------------------
    internals = np.empty((n_samples_test, sample_size_test, n_unit))
    outputs = np.empty((n_samples_test, sample_size_test, n_output))
    abs_error = np.empty((n_res, n_samples_test, sample_size_test, n_output))
    print("{:s} will approximately take {:f} GB".format(
        args.o[0],
        (n_res * n_samples_test * sample_size_test * n_output * 4) / (2**30)))
    for r in trange(n_res, desc="Testing reservoirs"):
        for i in trange(n_samples_test, desc="Samples tested"):
            internals[
                i,
                0] = leak * np.tanh(np.dot(W_in[r], inputs[i, 0]) + C_in[r])
            outputs[i, 0] = np.dot(W_out[r], internals[i, 0]) + C_out[r]
            for n in range(1, sample_size_test):
                internals[i, n] = np.tanh(
                    np.dot(W[r], internals[i, n - 1]) +
                    np.dot(W_fb[r], outputs[i, n - 1]) +
                    np.dot(W_in[r], inputs[i, n]) + C_in[r])
                internals[i, n] = (
                    1 - leak) * internals[i, n - 1] + leak * internals[i, n]
                outputs[i, n] = np.dot(W_out[r], internals[i, n]) + C_out[r]
        abs_error[r] = np.abs(outputs - desired_outputs)

    np.save(args.o[0], abs_error)

    if args.r != None:
        np.save(args.r, outputs)
    if args.d != None:
        np.save(args.d, desired_outputs)
    if args.i != None:
        np.save(args.i, inputs)

    rms = np.sqrt(np.sum(abs_error**2, axis=(2, 3)) / (abs_error.shape[2]))

    mean_rms = np.mean(rms)
    std_rms = np.std(rms)

    print("Error: {:e} ± {:e}".format(mean_rms, std_rms))
コード例 #18
0
        if a!=b:
            if a in IMPORTANT_A:
                res += IMP_WEIGHT * monotonic
            else:
                res += NOT_IMP_WEIGHT * monotonic
        prev = a
    return res  


rho = 0.9
N = 300
SIGNALS = (0,1)

# best_score = {'small':0, 'big':0, 'score':100}

window = scs.exponential(15)

start = time.time()

rho_num = True

for rho in range(50,100,5):
    rho = rho/100
    smalls = []
    bigs = []
    scores = []
    # ex_scores = []
    for _ in range(10):
        rho_num = True
        best_score = {'small':0, 'big':0, 'score':100}
        for sm in range(50,100,2):
コード例 #19
0
import numpy as np
from scipy import signal
from scipy.fftpack import fft, fftshift
import matplotlib.pyplot as plt
from scipy.io import wavfile
fs, data = wavfile.read('aaa.wav')
M = 44100 * 6
tau2 = -(M - 1) / np.log(0.02)
window1 = signal.exponential(M, 0, tau2, False)
window2 = 1 - window1
#window1 = signal.gaussian(M, std=7)
#window2=1-window1
plt.subplot(211)
plt.plot(window1)
plt.subplot(212)
plt.plot(window2)
plt.show()
channel1 = data[:, 0]  #/max(abs(data[:,0]))
channel2 = data[:, 1]  #/max(abs(data[:,1]))
#np.savetxt('channel.txt',channel1)
channel1_8d = np.zeros(len(channel1))
channel2_8d = np.zeros(len(channel1))
i = 0
l = 0
#channel2_8d[0:l]=channel2[0:l]
while i <= len(channel1):
    if (len(channel1[i:i + M]) == 44100 * 6):
        channel1_8d[i:i + M] = window1 * channel1[i:i + M]
        channel2_8d[l:l + M] = window2 * channel2[l:l + M]
    #print channel1_8d[i:i+M]
    t = window1
コード例 #20
0
def generate_inputs(target_dir, save_postfix, verboseplot=False):

    #target_dir = '../simulations/3-2-2017/'
    #save_postfix = "2-28-2017 inputs.json"
    save_name = target_dir + save_postfix

    # design parameters
    N_input = 200  # number of poisson thalamic inputs
    inputMean = 10 * Hz  # hz # for the Poisson rates
    my_rates = np.random.uniform(
        0, inputMean / Hz * 2, N_input
    ) * Hz  # draw a uniform bag of firing rates, idealizing stimululs tuning (would be better to use a long-tailed distribution of rates)
    tau_input = 3  # ms # todo it would be nice to use a depressing burst

    w_poisson_out = 500  # scaling to get the conductances into the biologically plausible range
    # nS

    DURATION = 50 * ms
    T_RES = 0.1 * ms
    N_targets = 1200  # WARNING this has to batched by hand to N_inputs in the network config files (todo fix this issue)
    p_target_receives_input = 0.05  # todo what level of input correlation does this induce (probably too much)

    # init
    input_connectivity = np.random.rand(N_input, N_targets)
    active_recipients = np.arange(N_targets)
    #for i_recipient in range(N_input):
    #    if active_recipients(i_recipient) < p_target_receives_input:
    #        active_recipients.append(i_recipient)

    for i_input in range(N_input):
        for i_target in active_recipients:
            if input_connectivity[i_input][i_target] < p_target_receives_input:
                input_connectivity[i_input][i_target] = 1 + 0.2 * np.random.rand(
                )  # a little bit of heterogeneity # todo think about this todo no negative weights
            else:
                input_connectivity[i_input][i_target] = 0

    # instantiate a poisson population

    PoissonInputGroup = PoissonGroup(N_input, my_rates)
    p_mon = SpikeMonitor(PoissonInputGroup)

    run(DURATION)

    if verboseplot:
        plt.figure()
        plt.plot(p_mon.t / ms, p_mon.i, '.k')
        plt.xlabel('Time (ms)')
        plt.ylabel('Neuron index')
        plt.title('population spiking')
        plt.show()

    tres_input = 0.1  # ms
    N_inputbins = DURATION / ms / tres_input + 1
    input_conductances = np.zeros((N_targets, N_inputbins))  # init
    input_current_components = np.zeros((N_input, N_inputbins))

    for idx, t in enumerate(p_mon.t):
        spike_id = p_mon.i[idx]
        #t
        binned_idx = divmod(
            t / ms, tres_input)[0]  # the whole part is the idx  (t0 = 0th bin)
        input_current_components[spike_id][binned_idx] += 1
    # binned
    if verboseplot:
        plt.figure()
        plt.imshow(input_current_components, aspect='auto')
        plt.title('poisson activity after binning')
        plt.show()

    # convolve with exponentials  (todo it would be cool to convolve with a depressing burst)
    tau_filterInSamples = tau_input / tres_input
    RANGE_FRACTION = 0.01  # cutoff the exp function when this much of the y-range is left
    M = -(
        tau_filterInSamples * np.log(RANGE_FRACTION) - 1
    )  # include 99% of range and solve for number of samples ... todo account for dt
    exp_kernel = signal.exponential(M, 0, tau_filterInSamples, False)
    exp_kernel = exp_kernel * 1.0 / np.sum(
        exp_kernel)  # make sure integral = 1 s
    if verboseplot:
        plt.figure()
        plt.plot(exp_kernel)
        title('filtering kernel for input currents')
        plt.show()

        # = N_poisson, N_targets.T   **   N_poisson, T
    input_current_components = w_poisson_out * filt.convolve1d(
        input_current_components, exp_kernel,
        axis=1)  # oops, this probably wasn't matrix multiplication

    for i_target in range(N_targets):  # row in input cells
        for i_source in range(N_input):
            if input_connectivity[i_source][i_target] > 0:
                input_conductances[i_target][:] += input_current_components[
                    i_source][:] * input_connectivity[i_source][
                        i_target]  # scale by the connectio weight

    # plot
    if verboseplot:
        plt.figure()
        plt.imshow(input_conductances, aspect='auto')
        plt.title('sum input activity')
        plt.show()

        plt.figure()
        idx = 1
        plt.plot(input_conductances[idx][:])
        plt.title('neuron number ' + str(idx))
        plt.ylabel('input conductance (nS)')
        plt.show()

    C = 1  # get a rough idea of the induced voltage at rest
    V_clamp = 60  # mV
    E_input = 0  # mV
    input_currents = input_conductances * (V_clamp - E_input) / C

    if verboseplot:
        plt.figure()
        idx = 1
        plt.plot(input_conductances[idx][:])
        plt.title('neuron number ' + str(idx))
        plt.ylabel('effective voltage induced (mV)')
        plt.show()

    # save as json

    savefile = open(save_name, 'w')
    save_object = {
        "input_conductances": input_conductances.tolist(),
        "dt": T_RES / ms,  #  "target_fraction":target_fraction,
        "p_target_receives_input": p_target_receives_input,
        "inputMean": inputMean / Hz,
        "tau_input": tau_input,  # "inputStd":inputStd/Hz,
        "N_targets": N_targets,
        "w_poisson": w_poisson_out
    }
    json.dump(save_object, savefile, sort_keys=True, indent=2)