dmodel1 = {"acc": [], "dt": [], "fsize": []}

dmodel = {
    "data": copy.deepcopy(dmodel1),
    "count": [],
    "files": [],
    "avg": copy.deepcopy(dmodel1),
    "top": copy.deepcopy(dmodel1)
}

# create separate models for each data file
for filename in filenames:
    data_file = root_data_folder + "/" + filename + ".csv"
    x, y, _, _ = loader.load_dataset(data_file)

    acc_train_vect[filename] = copy.deepcopy(dmodel)
    acc_test_vect[filename] = copy.deepcopy(dmodel)

    # print(y)
    # binarize the outputs
    y = loader.binarize(y)
    if config["one_hot_encoding"]:
        # use integer encoding
        y = prep.encode(prep.adapt_input(y))
        y = prep.decode_int_onehot(y)

        # print(y)
        # quit()
        # y = prep.encode(prep.adapt_input(y))
    input_file = "./data/exp_39.csv"
    if use_matching_random_model:
        model_file = root_crt_model_folder + "/" + "exp_179_1_top.h5"
        # model_file = root_crt_model_folder + "/" + "exp_217_2_top.h5"
    else:
        model_file = root_crt_model_folder + "/" + "exp_39_3_multi_top.skl"
else:
    input_file = "./data/exp_39.csv"
    model_file = root_crt_model_folder + "/" + "exp_39_5_top.h5"

nvalves = config["n_valves"]

nrowskip = 0

# X1, y1 = loader.load_dataset_raw_buffer(input_file)
X1, y1, _, _ = loader.load_dataset(input_file)

# X1 = X1[120:1700]
# y1 = y1[120:1700]

# binarize the outputs
y1 = loader.binarize(y1)

s = np.shape(X1)
print(s)

nrows = s[0]
ncols = s[1]

n_bins = 20
rowskip = int(nrows / n_bins)
save_best_model = True

if n_reps > 1:
    use_saved_model = False
    append_timestamp = True
    save_best_model = True
else:
    save_best_model = False

# bookmarks = [bookmarks[-1]]
from_bookmark_index = len(bookmarks) - 1

# create separate models for each data file
for filename in filenames:
    data_file = root_data_folder + "/" + filename + ".csv"
    x, y = loader.load_dataset(data_file)

    x_train = x
    y_train = y
    x_eval = x
    y_eval = y

    sizex = np.shape(x_train)

    for bookmark_index in range(len(bookmarks)):
        if bookmark_index < from_bookmark_index:
            continue
        x_train = x[0:bookmarks[bookmark_index], :]
        y_train = y[0:bookmarks[bookmark_index], :]
        x_eval = x[0:bookmarks[len(bookmarks) - 1], :]
        y_eval = y[0:bookmarks[len(bookmarks) - 1], :]
    # x = remove_outliers(x)
    # tss = create_timeseries(x, xheader)

    # fig, _ = graph.plot_timeseries_multi(tss, "sensor output", "samples [x0.1s]", "flow [L/h]", False)

    # graph.save_figure(fig, "./figs/sensor_output")
    # graph.plot_timeseries(ts, "title", "x", "y")

    # quit()


# create separate models for each data file
for filename in filenames:
    data_file = root_data_folder + "/" + filename + ".csv"
    x, y, xheader, yheader, times = loader.load_dataset(data_file)

    # tss = create_timeseries(x, xheader)

    # TODO: sort by chan number 0 - 10
    # TODO: show as subplot

    print(xheader)
    print(yheader)

    print(len(xheader))

    order = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 2]

    xheader = reorder(xheader, order)