num_trials=num_alpha, prefix=te_prefix, inp=inp, noise_amp=noise_test, run_time=run_time, N_AL=N_AL, train=False) #============================================================================================================== # Create Base Odors' Currents #============================================================================================================== I_arr = [] # Create all (num_odors_train) of the base odors for i in range(num_odors_train): I = ex.get_rand_I(N_AL, p=0.33, I_inp=inp) * nA I_arr.append(I) #============================================================================================================== # Choose which base currents to mix #============================================================================================================== listOfNumbers = np.arange( 0, 15, 1, dtype='int') # Generate numbers in order (guarantees no duplicates) np.random.shuffle(listOfNumbers) # Randomize order of numbers baseNum = listOfNumbers.reshape( (5, 3)) # reshape randomized list of numbers into usable array print(baseNum) np.savetxt('baseNumbersUsed.txt', baseNum) #============================================================================================================== # Run Base Odor and Mixture Odor Simulations
@network_operation(dt=5 * ms) def f2(t): print(G_AL.I_inj[0]) net.add(f2) # random input num_classes = 2 samples_per_class = 1 n_samples = int(samples_per_class * num_classes) p_inj = 0.3 X = np.zeros((num_classes, N_AL)) for j in range(num_classes): X[j, :] = ex.get_rand_I(N_AL, p_inj, 1) # troubleshooting array #test_array = np.zeros(N_AL) #test_array[0] = 0.5 #test_array[999] = 1.0 # set tstart! tstart = reset_time * ms # Run random input with gradual current for i in range(n_samples): # turns off all neurons G_AL.active_ = 0 net.run(reset_time * ms)
I_arr = [] # Use previous injected training currents if they exist inj_curr = os.path.exists(tr_prefix + 'I_0.npy') if inj_curr: print('Using previous odors...') for i in range(num_odors): I_arr.append(np.load(tr_prefix + 'I_' + str(i) + '.npy')) else: #create the base odors for i in range(num_odors): #I = ex.get_rand_I(N_AL, p = np.random.uniform(0.1, 0.5), I_inp = inp)*nA # Editing for Henry I = ex.get_rand_I(N_AL, p = p_inj, I_inp = inp)*nA I_arr.append(I) # Since we doin't rescale currents when saving, this adjustment is necessary I_arr = np.asarray(I_arr)/1e-9*nA # example of running odors 5-10 #I_arr = I_arr[num_odors:] run_params_train = dict(num_odors = num_odors, num_trials = num_train, prefix = tr_prefix, inp = inp, noise_amp = noise_amp, run_time = run_time, N_AL = N_AL,
neuron_class=nm.n_HH, syn_class=nm.s_GABA_inh, PAL=PAL, mon=['V']) #create the network object net = Network() G_AL, S_AL, trace_AL, spikes_AL = lm.get_AL(al_para, net) net.store() for k in range(3): net.restore() I = (ex.get_rand_I(N_AL, p=0.33, I_inp=15) + 15) * uA / cm**2 G_AL.I_inj = I net.run(run_time, report='text') np.save(prefix + 'spikes_t_' + str(k), spikes_AL.t / ms) np.save(prefix + 'spikes_i_' + str(k), spikes_AL.i) np.save(prefix + 'I_' + str(k), I) np.save(prefix + 'trace_V_' + str(k), trace_AL.V) np.save(prefix + 'trace_t_' + str(k), trace_AL.t) spikes_t_arr, spikes_i_arr, I_arr, trace_V_arr, trace_t_arr = anal.load_wlc_data( prefix, num_runs=3) fig1 = plt.figure() plt.plot(spikes_t_arr[0] / ms, spikes_i_arr[0], '.')
inp = 1.0 noise_amp = 0.1*np.sqrt(3) #max noise as a fraction of inp noise_test = 0.1*np.sqrt(3) # Different spatial injections sp_odors = 1 num_odors = int(sp_odors*n_waveforms) num_train = 1 num_test = 1 run_time = 100*ms I_arr = [] #create the base odors for i in range(sp_odors): I = ex.get_rand_I(N_AL, p =1./3., I_inp = inp) I_arr.append(I) run_params_train = dict(num_odors = num_odors, n_waveforms = n_waveforms, sp_odors = sp_odors, num_trials = num_train, prefix = tr_prefix, inp = inp*nA, noise_amp = noise_amp, run_time = run_time, N_AL = N_AL, train = True)