"files": [],
    "avg": copy.deepcopy(dmodel1),
    "top": copy.deepcopy(dmodel1)
}

# create separate models for each data file
for filename in filenames:
    data_file = root_data_folder + "/" + filename + ".csv"
    x, y, _, _ = loader.load_dataset(data_file)

    acc_train_vect[filename] = copy.deepcopy(dmodel)
    acc_test_vect[filename] = copy.deepcopy(dmodel)

    # print(y)
    # binarize the outputs
    y = loader.binarize(y)
    if config["one_hot_encoding"]:
        # use integer encoding
        y = prep.encode(prep.adapt_input(y))
        y = prep.decode_int_onehot(y)

        # print(y)
        # quit()
        # y = prep.encode(prep.adapt_input(y))

    top_acc = 0
    top_model_filename = None

    # session = K.get_session()

    # classifiers.create_decision_tree(x, y[:,0], 20)
else:
    input_file = "./data/exp_39.csv"
    model_file = root_crt_model_folder + "/" + "exp_39_5_top.h5"

nvalves = config["n_valves"]

nrowskip = 0

# X1, y1 = loader.load_dataset_raw_buffer(input_file)
X1, y1, _, _ = loader.load_dataset(input_file)

# X1 = X1[120:1700]
# y1 = y1[120:1700]

# binarize the outputs
y1 = loader.binarize(y1)

s = np.shape(X1)
print(s)

nrows = s[0]
ncols = s[1]

n_bins = 20
rowskip = int(nrows / n_bins)

if use_post_rowskip:
    rowskip = 1

post_rowskip = int(nrows / n_bins)