r_sxt = np.dot(P_nxn, states_nxt) for s in xrange(nspikifications): data_e.append(r_sxt) u_e.append(u_1xt) outs_e.append(outputs_t_bxn) truth_data_e = normalize_rates(data_e, E, N) spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt, max_firing_rate=FLAGS.max_firing_rate) train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nspikifications) data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e, train_inds, valid_inds) data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e, train_inds, valid_inds) data_train_truth = nparray_and_transpose(data_train_truth) data_valid_truth = nparray_and_transpose(data_valid_truth) data_train_spiking = nparray_and_transpose(data_train_spiking) data_valid_spiking = nparray_and_transpose(data_valid_spiking) # save down the inputs used to generate this data train_inputs_u, valid_inputs_u = split_list_by_inds(u_e, train_inds, valid_inds) train_inputs_u = nparray_and_transpose(train_inputs_u)
input_times=input_times) if FLAGS.noise_type == "poisson": noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate']) elif FLAGS.noise_type == "gaussian": noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate']) else: raise ValueError("Only noise types supported are poisson or gaussian") # split into train and validation sets train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nreplications) # Split the data, inputs, labels and times into train vs. validation. rates_train, rates_valid = \ split_list_by_inds(rates, train_inds, valid_inds) noisy_data_train, noisy_data_valid = \ split_list_by_inds(noisy_data, train_inds, valid_inds) input_train, inputs_valid = \ split_list_by_inds(inputs, train_inds, valid_inds) condition_labels_train, condition_labels_valid = \ split_list_by_inds(condition_labels, train_inds, valid_inds) input_times_train, input_times_valid = \ split_list_by_inds(input_times, train_inds, valid_inds) # Turn rates, noisy_data, and input into numpy arrays. rates_train = nparray_and_transpose(rates_train) rates_valid = nparray_and_transpose(rates_valid) noisy_data_train = nparray_and_transpose(noisy_data_train) noisy_data_valid = nparray_and_transpose(noisy_data_valid) input_train = nparray_and_transpose(input_train)
r_sxt = np.dot(P_nxn, states_nxt) for s in xrange(nreplications): data_e.append(r_sxt) u_e.append(u_1xt) outs_e.append(outputs_t_bxn) truth_data_e = normalize_rates(data_e, E, N) spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt, max_firing_rate=FLAGS.max_firing_rate) train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nreplications) data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e, train_inds, valid_inds) data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e, train_inds, valid_inds) data_train_truth = nparray_and_transpose(data_train_truth) data_valid_truth = nparray_and_transpose(data_valid_truth) data_train_spiking = nparray_and_transpose(data_train_spiking) data_valid_spiking = nparray_and_transpose(data_valid_spiking) # save down the inputs used to generate this data train_inputs_u, valid_inputs_u = split_list_by_inds(u_e, train_inds, valid_inds) train_inputs_u = nparray_and_transpose(train_inputs_u)
# not the best way to do this but E is small enough rates = [] spikes = [] for trial in xrange(E): if rnn_to_use[trial] == 0: rates.append(rates_a[trial]) spikes.append(spikes_a[trial]) else: rates.append(rates_b[trial]) spikes.append(spikes_b[trial]) # split into train and validation sets train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nreplications) rates_train, rates_valid = split_list_by_inds(rates, train_inds, valid_inds) spikes_train, spikes_valid = split_list_by_inds(spikes, train_inds, valid_inds) condition_labels_train, condition_labels_valid = split_list_by_inds( condition_labels, train_inds, valid_inds) ext_input_train, ext_input_valid = split_list_by_inds(ext_input, train_inds, valid_inds) rates_train = nparray_and_transpose(rates_train) rates_valid = nparray_and_transpose(rates_valid) spikes_train = nparray_and_transpose(spikes_train) spikes_valid = nparray_and_transpose(spikes_valid) # add train_ext_input and valid_ext input data = { 'train_truth': rates_train, 'valid_truth': rates_valid,
# not the best way to do this but E is small enough rates = [] spikes = [] for trial in xrange(E): if rnn_to_use[trial] == 0: rates.append(rates_a[trial]) spikes.append(spikes_a[trial]) else: rates.append(rates_b[trial]) spikes.append(spikes_b[trial]) # split into train and validation sets train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nreplications) rates_train, rates_valid = split_list_by_inds(rates, train_inds, valid_inds) spikes_train, spikes_valid = split_list_by_inds(spikes, train_inds, valid_inds) condition_labels_train, condition_labels_valid = split_list_by_inds( condition_labels, train_inds, valid_inds) ext_input_train, ext_input_valid = split_list_by_inds( ext_input, train_inds, valid_inds) rates_train = nparray_and_transpose(rates_train) rates_valid = nparray_and_transpose(rates_valid) spikes_train = nparray_and_transpose(spikes_train) spikes_valid = nparray_and_transpose(spikes_valid) # add train_ext_input and valid_ext input data = {'train_truth': rates_train, 'valid_truth': rates_valid, 'train_data' : spikes_train,
else: input_times = None rates, x0s, inputs = \ generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn, input_magnitude=input_magnitude, input_times=input_times) spikes = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate']) # split into train and validation sets train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nspikifications) # Split the data, inputs, labels and times into train vs. validation. rates_train, rates_valid = \ split_list_by_inds(rates, train_inds, valid_inds) spikes_train, spikes_valid = \ split_list_by_inds(spikes, train_inds, valid_inds) input_train, inputs_valid = \ split_list_by_inds(inputs, train_inds, valid_inds) condition_labels_train, condition_labels_valid = \ split_list_by_inds(condition_labels, train_inds, valid_inds) input_times_train, input_times_valid = \ split_list_by_inds(input_times, train_inds, valid_inds) # Turn rates, spikes, and input into numpy arrays. rates_train = nparray_and_transpose(rates_train) rates_valid = nparray_and_transpose(rates_valid) spikes_train = nparray_and_transpose(spikes_train) spikes_valid = nparray_and_transpose(spikes_valid) input_train = nparray_and_transpose(input_train)