FLAGS = flags.FLAGS rng = np.random.RandomState(seed=FLAGS.synth_data_seed) rnn_rngs = [ np.random.RandomState(seed=FLAGS.synth_data_seed + 1), np.random.RandomState(seed=FLAGS.synth_data_seed + 2) ] T = FLAGS.T C = FLAGS.C N = FLAGS.N nreplications = FLAGS.nreplications E = nreplications * C train_percentage = FLAGS.train_percentage ntimesteps = int(T / FLAGS.dt) rnn_a = generate_rnn(rnn_rngs[0], N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) rnn_b = generate_rnn(rnn_rngs[1], N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) rnns = [rnn_a, rnn_b] # pick which RNN is used on each trial rnn_to_use = rng.randint(2, size=E) ext_input = np.repeat(np.expand_dims(rnn_to_use, axis=1), ntimesteps, axis=1) ext_input = np.expand_dims(ext_input, axis=2) # these are "a's" in the paper x0s = [] condition_labels = [] condition_number = 0 for c in range(C): x0 = FLAGS.x0_std * rng.randn(N, 1) x0s.append(np.tile(x0, nreplications))
rng = np.random.RandomState(seed=FLAGS.synth_data_seed) T = FLAGS.T C = FLAGS.C N = FLAGS.N S = FLAGS.S input_magnitude = FLAGS.input_magnitude nreplications = FLAGS.nreplications E = nreplications * C # total number of trials # S is the number of measurements in each datasets, w/ each # dataset having a different set of observations. ndatasets = N/S # ok if rounded down train_percentage = FLAGS.train_percentage ntime_steps = int(T / FLAGS.dt) # End of user parameters rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) # Check to make sure the RNN is the one we used in the paper. if N == 50: assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?' rem_check = nreplications * train_percentage assert abs(rem_check - int(rem_check)) < 1e-8, \ 'Train percentage * nreplications should be integral number.' # Initial condition generation, and condition label generation. This # happens outside of the dataset loop, so that all datasets have the # same conditions, which is similar to a neurophys setup. condition_number = 0 x0s = [] condition_labels = []
flags.DEFINE_float("dt", 0.010, "Time bin") flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second") FLAGS = flags.FLAGS rng = np.random.RandomState(seed=FLAGS.synth_data_seed) rnn_rngs = [np.random.RandomState(seed=FLAGS.synth_data_seed+1), np.random.RandomState(seed=FLAGS.synth_data_seed+2)] T = FLAGS.T C = FLAGS.C N = FLAGS.N nreplications = FLAGS.nreplications E = nreplications * C train_percentage = FLAGS.train_percentage ntimesteps = int(T / FLAGS.dt) rnn_a = generate_rnn(rnn_rngs[0], N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) rnn_b = generate_rnn(rnn_rngs[1], N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate) rnns = [rnn_a, rnn_b] # pick which RNN is used on each trial rnn_to_use = rng.randint(2, size=E) ext_input = np.repeat(np.expand_dims(rnn_to_use, axis=1), ntimesteps, axis=1) ext_input = np.expand_dims(ext_input, axis=2) # these are "a's" in the paper x0s = [] condition_labels = [] condition_number = 0 for c in range(C): x0 = FLAGS.x0_std * rng.randn(N, 1) x0s.append(np.tile(x0, nreplications))