feed_dict = {} for t in xrange(ntimesteps): feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1)) states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t], feed_dict=feed_dict) states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn))) outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn)) r_sxt = np.dot(P_nxn, states_nxt) for s in xrange(nspikifications): data_e.append(r_sxt) u_e.append(u_1xt) outs_e.append(outputs_t_bxn) truth_data_e = normalize_rates(data_e, E, N) spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt, max_firing_rate=FLAGS.max_firing_rate) train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nspikifications) data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e, train_inds, valid_inds) data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e, train_inds, valid_inds) data_train_truth = nparray_and_transpose(data_train_truth) data_valid_truth = nparray_and_transpose(data_valid_truth)
feed_dict = {} for t in xrange(ntimesteps): feed_dict[inputs_ph_t[t]] = np.reshape(u_1xt[:,t], (batch_size,-1)) states_t_bxn, outputs_t_bxn = sess.run([states_t, outputs_t], feed_dict=feed_dict) states_nxt = np.transpose(np.squeeze(np.asarray(states_t_bxn))) outputs_t_bxn = np.squeeze(np.asarray(outputs_t_bxn)) r_sxt = np.dot(P_nxn, states_nxt) for s in xrange(nreplications): data_e.append(r_sxt) u_e.append(u_1xt) outs_e.append(outputs_t_bxn) truth_data_e = normalize_rates(data_e, E, N) spiking_data_e = spikify_data(truth_data_e, rng, dt=FLAGS.dt, max_firing_rate=FLAGS.max_firing_rate) train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage, nreplications) data_train_truth, data_valid_truth = split_list_by_inds(truth_data_e, train_inds, valid_inds) data_train_spiking, data_valid_spiking = split_list_by_inds(spiking_data_e, train_inds, valid_inds) data_train_truth = nparray_and_transpose(data_train_truth) data_valid_truth = nparray_and_transpose(data_valid_truth)