コード例 #1
0
 def create_dataset(self, sim_pos):
     """
     Creates a GradientData object by executing all behavioral choices at simulated positions once per second
     :param sim_pos: Previously created simulation trajectory
     :return: GradientData object with all necessary training in- and outputs
     """
     if sim_pos.shape[1] != 3:
         raise ValueError(
             "sim_pos has to be nx3 array with xpos, ypos and heading at each timepoint"
         )
     history = GlobalDefs.frame_rate * GlobalDefs.hist_seconds
     start = history + 1  # start data creation with enough history present
     btypes = ["C", "S", "P", "L", "R"]
     # create vector that selects one position every second on average
     sel = np.random.rand(sim_pos.shape[0]) < (1 / GlobalDefs.frame_rate)
     sel[:start + 1] = False
     # initialize model inputs and outputs
     inputs = np.zeros((sel.sum(), 3, history), np.float32)
     outputs = np.zeros((sel.sum(), 5), np.float32)
     # loop over each position, simulating PRED_WINDOW into future to obtain real finish temperature
     curr_sel = 0
     for step in range(start, sim_pos.shape[0]):
         if not sel[step]:
             continue
         # obtain inputs at given step
         inputs[curr_sel, 0, :] = self.temperature(
             sim_pos[step - history + 1:step + 1, 0],
             sim_pos[step - history + 1:step + 1, 1])
         spd = np.sqrt(
             np.sum(
                 np.diff(sim_pos[step - history:step + 1, 0:2], axis=0)**2,
                 1))
         inputs[curr_sel, 1, :] = spd
         inputs[curr_sel, 2, :] = np.diff(sim_pos[step - history:step + 1,
                                                  2],
                                          axis=0)
         # select each possible behavior in turn starting from this step and simulate
         # the behavior into the future and obtain final temperature as output by
         # approximating PRED_WINDOW steps into the future moving in a straight line
         for i, b in enumerate(btypes):
             fpos = self.sim_forward(self.alen, sim_pos[step, :], b)[-1, :]
             fpos[0] += np.cos(fpos[2]) * PRED_WINDOW * self.mu_disp
             fpos[1] += np.sin(fpos[2]) * PRED_WINDOW * self.mu_disp
             outputs[curr_sel, i] = self.temperature(fpos[0], fpos[1])
         curr_sel += 1
     assert not np.any(np.sum(inputs, (1, 2)) == 0)
     assert not np.any(np.sum(outputs, 1) == 0)
     return GradientData(inputs, outputs, PRED_WINDOW)
コード例 #2
0
        for elem in range(TESTSIZE):
            rank_real = np.unique(ytest[elem, :], return_inverse=True)[1]
            rank_pred = np.unique(pred_test[elem, :], return_inverse=True)[1]
            sum_rank_diffs += np.sum(np.abs(rank_real - rank_pred))
        print("TEST")
        print('step %d, test loss %g, rank loss %g' %
              (global_count, cur_l, sum_rank_diffs / TESTSIZE))
        print("TEST")
        test_losses.append(cur_l)
        test_rank_errors.append(sum_rank_diffs / TESTSIZE)
    net_model.train(xbatch, ybatch, keep=0.5)


if __name__ == "__main__":
    if TRAIN_BOUT_FREQ == 1:
        trainingData_1 = GradientData.load("gd_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_training_data_rev.hdf5")
        testData = GradientData.load("gd_test_data_radial.hdf5")
    elif TRAIN_BOUT_FREQ == 0.5:
        trainingData_1 = GradientData.load("gd_05Hz_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_05Hz_training_data_rev.hdf5")
        testData = GradientData.load("gd_05Hz_test_data_radial.hdf5")
    elif TRAIN_BOUT_FREQ == 2:
        trainingData_1 = GradientData.load("gd_2Hz_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_2Hz_training_data_rev.hdf5")
        testData = GradientData.load("gd_2Hz_test_data_radial.hdf5")
    else:
        raise Exception(
            "No training data has been generated for the requested bout frequency"
        )
    # enforce same scaling on testData as on trainingData
コード例 #3
0
        if global_step % EVAL_TEST_EVERY == 0:
            test()
        data_batch = train_list[td_ix[1]].training_batch(BATCHSIZE)
        train_func(data_batch[0], data_batch[1], removal=droplist)
        global_step += 1
    sf = m.save_state(chk_file, global_step, True)
    print("Retrained model saved in file {0}.".format(sf))
    error_file = h5py.File(save_path + "/losses.hdf5", "x")
    error_file.create_dataset("test_rank_errors", data=np.array(test_errors))
    error_file.create_dataset("test_eval", data=np.array(test_steps))
    error_file.close()


if __name__ == '__main__':
    # load training and test data
    tD_1 = GradientData.load("ce_gd_training_data.hdf5")
    tD_2 = GradientData.load("ce_gd_training_data_rev.hdf5")
    tD_2.copy_normalization(tD_1)
    train_list = [tD_1, tD_2]
    testData = GradientData.load("ce_gd_test_data_radial.hdf5")
    # enforce same scaling on testData as on trainingData
    testData.copy_normalization(tD_1)

    ana = a.Analyzer(MoTypes(True), tD_1.standards, None,
                     "ce_activity_store.hdf5")

    # load cell unit ids and cluster ids
    dfile = h5py.File("stimFile.hdf5", 'r')
    tsin = np.array(dfile['sine_L_H_temp'])
    x = np.arange(tsin.size)  # stored at 20 Hz !
    xinterp = np.linspace(0, tsin.size,
コード例 #4
0
n_iter = 500000
n_gen = 50
TPREFERRED = 26

# Indicates the baseline bout frequency
TRAIN_BOUT_FREQ = 1


def mpath(path):
    return base_path + path[:-1]  # need to remove trailing slash


if __name__ == "__main__":
    # load training data for scaling - dependent on bout frequency
    if TRAIN_BOUT_FREQ == 1:
        std = GradientData.load_standards("gd_training_data.hdf5")
    elif TRAIN_BOUT_FREQ == 0.5:
        std = GradientData.load_standards("gd_05Hz_training_data.hdf5")
    elif TRAIN_BOUT_FREQ == 2:
        std = GradientData.load_standards("gd_2Hz_training_data.hdf5")
    else:
        raise Exception(
            "No training data has been generated for the requested bout frequency"
        )

    # evolve each 512 network unless it has been done before
    for p in paths_512:
        model_path = mpath(p)
        savedir = model_path + '/evolve/'
        if os.path.exists(savedir):
            print(
コード例 #5
0
    if response == "y":
        n_steps = int(input("Number of steps to perform?"))
        gradsim = CircGradientTrainer(100, 22, 37)
        gradsim.p_move *= TRAIN_BOUT_FREQ  # Adjust bout frequency during training data navigation
        print("Running radial simulation, inside-out")
        pos = gradsim.run_simulation(n_steps)
        pl.figure()
        pl.plot(pos[:, 0], pos[:, 1])
        pl.xlabel("X position [mm]")
        pl.ylabel("Y position [mm]")
        sns.despine()
        print("Generating gradient data")
        grad_data = gradsim.create_dataset(pos)
        all_in = grad_data.model_in_raw
        all_out = grad_data.model_out_raw
        print("Running radial simulation, outside-in")
        gradsim = CircGradientTrainer(100, 37, 22)
        gradsim.p_move *= TRAIN_BOUT_FREQ
        pos = gradsim.run_simulation(n_steps)
        pl.figure()
        pl.plot(pos[:, 0], pos[:, 1])
        pl.xlabel("X position [mm]")
        pl.ylabel("Y position [mm]")
        sns.despine()
        print("Generating gradient data")
        grad_data = gradsim.create_dataset(pos)
        all_in = np.r_[all_in, grad_data.model_in_raw]
        all_out = np.r_[all_out, grad_data.model_out_raw]
        grad_data = GradientData(all_in, all_out, grad_data.pred_window)
        print("Done")
コード例 #6
0
from core import GradientData, ModelData, ZfGpNetworkModel
from trainingData import CircGradientTrainer
from global_defs import GlobalDefs

if __name__ == "__main__":
    if sys.platform == "darwin" and "Tk" not in mpl.get_backend():
        print(
            "On OSX tkinter likely does not work properly if matplotlib uses a backend that is not TkAgg!"
        )
        print(
            "If using ipython activate TkAgg backend with '%matplotlib tk' and retry."
        )
        sys.exit(1)
    # load training data to obtain temperature scaling
    try:
        std = GradientData.load_standards("gd_training_data.hdf5")
    except IOError:
        print("No standards found attempting to load full training data")
        train_data = GradientData.load("gd_training_data.hdf5")
        std = train_data.standards
    # load and interpolate temperature stimulus
    dfile = h5py.File("stimFile.hdf5", 'r')
    tsin = np.array(dfile['sine_L_H_temp'])
    x = np.arange(tsin.size)  # stored at 20 Hz !
    xinterp = np.linspace(0, tsin.size,
                          tsin.size * GlobalDefs.frame_rate // 20)
    temp = np.interp(xinterp, x, tsin)
    dfile.close()
    print("Select model directory")
    root = tk.Tk()
    root.update()
コード例 #7
0
        sum_rank_diffs = 0.0
        for elem in range(TESTSIZE):
            rank_real = np.unique(ytest[elem, :], return_inverse=True)[1]
            rank_pred = np.unique(pred_test[elem, :], return_inverse=True)[1]
            sum_rank_diffs += np.sum(np.abs(rank_real - rank_pred))
        print("TEST")
        print('step %d, test loss %g, rank loss %g' %
              (global_count, cur_l, sum_rank_diffs / TESTSIZE))
        print("TEST")
        test_losses.append(cur_l)
        test_rank_errors.append(sum_rank_diffs / TESTSIZE)
    net_model.train(xbatch, ybatch)


if __name__ == "__main__":
    trainingData = GradientData.load("photo_training_data.hdf5")
    testData = GradientData.load("photo_test_data.hdf5")
    # enforce same scaling on testData as on trainingData
    testData.copy_normalization(trainingData)
    epoch_size = trainingData.data_size // BATCHSIZE

    train_losses = []
    rank_errors = []
    test_losses = []
    test_rank_errors = []
    global_count = 0
    total_steps = N_EPOCHS * epoch_size
    with ZfGpNetworkModel() as Model:
        Model.setup(N_CONV, N_UNITS, N_BRANCH, N_MIXED)
        # save naive model including full graph
        save_path = Model.save_state(chk_file, 0)
コード例 #8
0
 mo_type = ""
 while mo_type != "c" and mo_type != "z":
     mo_type = input("Please select either (z)ebrafish or (c) elegans simulation [z/c]:")
     mo_type = mo_type.lower()
 n_steps = 2000000
 TPREFERRED = 25
 root = tk.Tk()
 root.update()
 root.withdraw()
 print("Select model directory")
 model_dir = filedialog.askdirectory(title="Select directory with model checkpoints", initialdir="./model_data/")
 root.update()
 mdata = ModelData(model_dir)
 # load training data for scaling
 if mo_type == "z":
     std = GradientData.load_standards("gd_training_data.hdf5")
 else:
     std = GradientData.load_standards("ce_gd_training_data.hdf5")
 sim_type = ""
 while sim_type != "l" and sim_type != "r":
     sim_type = input("Please select either (l)inear or (r)adial simulation [l/r]:")
 if mo_type == "z":
     mot = MoTypes(False)
 else:
     mot = MoTypes(True)
 gpn_naive = mot.network_model()
 gpn_naive.load(mdata.ModelDefinition, mdata.FirstCheckpoint)
 gpn_trained = mot.network_model()
 gpn_trained.load(mdata.ModelDefinition, mdata.LastCheckpoint)
 if sim_type == "l":
     sim_type = "x"  # so we call run_simulation correctly later