for elem in range(TESTSIZE):
            rank_real = np.unique(ytest[elem, :], return_inverse=True)[1]
            rank_pred = np.unique(pred_test[elem, :], return_inverse=True)[1]
            sum_rank_diffs += np.sum(np.abs(rank_real - rank_pred))
        print("TEST")
        print('step %d, test loss %g, rank loss %g' %
              (global_count, cur_l, sum_rank_diffs / TESTSIZE))
        print("TEST")
        test_losses.append(cur_l)
        test_rank_errors.append(sum_rank_diffs / TESTSIZE)
    net_model.train(xbatch, ybatch, keep=0.5)


if __name__ == "__main__":
    if TRAIN_BOUT_FREQ == 1:
        trainingData_1 = GradientData.load("gd_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_training_data_rev.hdf5")
        testData = GradientData.load("gd_test_data_radial.hdf5")
    elif TRAIN_BOUT_FREQ == 0.5:
        trainingData_1 = GradientData.load("gd_05Hz_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_05Hz_training_data_rev.hdf5")
        testData = GradientData.load("gd_05Hz_test_data_radial.hdf5")
    elif TRAIN_BOUT_FREQ == 2:
        trainingData_1 = GradientData.load("gd_2Hz_training_data.hdf5")
        trainingData_2 = GradientData.load("gd_2Hz_training_data_rev.hdf5")
        testData = GradientData.load("gd_2Hz_test_data_radial.hdf5")
    else:
        raise Exception(
            "No training data has been generated for the requested bout frequency"
        )
    # enforce same scaling on testData as on trainingData
示例#2
0
        if global_step % EVAL_TEST_EVERY == 0:
            test()
        data_batch = train_list[td_ix[1]].training_batch(BATCHSIZE)
        train_func(data_batch[0], data_batch[1], removal=droplist)
        global_step += 1
    sf = m.save_state(chk_file, global_step, True)
    print("Retrained model saved in file {0}.".format(sf))
    error_file = h5py.File(save_path + "/losses.hdf5", "x")
    error_file.create_dataset("test_rank_errors", data=np.array(test_errors))
    error_file.create_dataset("test_eval", data=np.array(test_steps))
    error_file.close()


if __name__ == '__main__':
    # load training and test data
    tD_1 = GradientData.load("ce_gd_training_data.hdf5")
    tD_2 = GradientData.load("ce_gd_training_data_rev.hdf5")
    tD_2.copy_normalization(tD_1)
    train_list = [tD_1, tD_2]
    testData = GradientData.load("ce_gd_test_data_radial.hdf5")
    # enforce same scaling on testData as on trainingData
    testData.copy_normalization(tD_1)

    ana = a.Analyzer(MoTypes(True), tD_1.standards, None,
                     "ce_activity_store.hdf5")

    # load cell unit ids and cluster ids
    dfile = h5py.File("stimFile.hdf5", 'r')
    tsin = np.array(dfile['sine_L_H_temp'])
    x = np.arange(tsin.size)  # stored at 20 Hz !
    xinterp = np.linspace(0, tsin.size,
        sum_rank_diffs = 0.0
        for elem in range(TESTSIZE):
            rank_real = np.unique(ytest[elem, :], return_inverse=True)[1]
            rank_pred = np.unique(pred_test[elem, :], return_inverse=True)[1]
            sum_rank_diffs += np.sum(np.abs(rank_real - rank_pred))
        print("TEST")
        print('step %d, test loss %g, rank loss %g' %
              (global_count, cur_l, sum_rank_diffs / TESTSIZE))
        print("TEST")
        test_losses.append(cur_l)
        test_rank_errors.append(sum_rank_diffs / TESTSIZE)
    net_model.train(xbatch, ybatch)


if __name__ == "__main__":
    trainingData = GradientData.load("photo_training_data.hdf5")
    testData = GradientData.load("photo_test_data.hdf5")
    # enforce same scaling on testData as on trainingData
    testData.copy_normalization(trainingData)
    epoch_size = trainingData.data_size // BATCHSIZE

    train_losses = []
    rank_errors = []
    test_losses = []
    test_rank_errors = []
    global_count = 0
    total_steps = N_EPOCHS * epoch_size
    with ZfGpNetworkModel() as Model:
        Model.setup(N_CONV, N_UNITS, N_BRANCH, N_MIXED)
        # save naive model including full graph
        save_path = Model.save_state(chk_file, 0)
if __name__ == "__main__":
    if sys.platform == "darwin" and "Tk" not in mpl.get_backend():
        print(
            "On OSX tkinter likely does not work properly if matplotlib uses a backend that is not TkAgg!"
        )
        print(
            "If using ipython activate TkAgg backend with '%matplotlib tk' and retry."
        )
        sys.exit(1)
    # load training data to obtain temperature scaling
    try:
        std = GradientData.load_standards("gd_training_data.hdf5")
    except IOError:
        print("No standards found attempting to load full training data")
        train_data = GradientData.load("gd_training_data.hdf5")
        std = train_data.standards
    # load and interpolate temperature stimulus
    dfile = h5py.File("stimFile.hdf5", 'r')
    tsin = np.array(dfile['sine_L_H_temp'])
    x = np.arange(tsin.size)  # stored at 20 Hz !
    xinterp = np.linspace(0, tsin.size,
                          tsin.size * GlobalDefs.frame_rate // 20)
    temp = np.interp(xinterp, x, tsin)
    dfile.close()
    print("Select model directory")
    root = tk.Tk()
    root.update()
    root.withdraw()
    model_dir = filedialog.askdirectory(
        title="Select directory with model checkpoints",
示例#5
0
        pos_var = sim_store.get_sim_pos(model_path, "r", "bfevolve", drop_list)
    bf_fixed, bc = bout_freq(pos_fixed)
    bf_var, bc = bout_freq(pos_var)
    return bc, bf_fixed, bf_var


if __name__ == "__main__":
    # plot training progress
    plot_squared_losses()
    plot_rank_losses()
    # load training data for scaling
    try:
        std = GradientData.load_standards("gd_training_data.hdf5")
    except IOError:
        print("No standards found attempting to load full training data")
        std = GradientData.load("gd_training_data.hdf5").standards
    # plot radial sim results
    plot_sim("r")
    # load and interpolate temperature stimulus
    dfile = h5py.File("stimFile.hdf5", 'r')
    tsin = np.array(dfile['sine_L_H_temp'])
    x = np.arange(tsin.size)  # stored at 20 Hz !
    xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20)
    temperature = np.interp(xinterp, x, tsin)
    dfile.close()
    # for our 512 unit network extract all temperature responses and correponding IDs
    all_cells = []
    all_ids = []
    for i, d in enumerate(paths_512):
        with ActivityStore("activity_store.hdf5", std, MoTypes(False)) as act_store:
            cell_res, ids = act_store.get_cell_responses(mpath(d), temperature, i)