# pred_model_layer_1 = 256
# pred_model_layer_2 = 256
# pred_epochs = 100

np.set_printoptions(linewidth=200,
                    threshold=(full_history_length + 1) *
                    model_history_length *
                    input_size)  # unset with np.set_printoptions()

# output location
run_dir = os.path.join('dashboards', f'nn_dashboard', f'run_random_forest')

if not os.path.exists(run_dir):
    os.makedirs(run_dir)

stdout_add_file(os.path.join(run_dir, 'log_confirm.txt'))

# ============
classifier: RandomForestClassifier = joblib.load(
    os.path.join(run_dir, 'random_forest_best.dump'))

# 10-Fold Cross validation
# print(np.mean(cross_val_score(clf, nn_dashboard_diff_none_train, nn_dashboard_diff_none_train_answers, cv=10)))

bins = list(drange_inc(0, 1, '0.05'))  # 5% point bin size
bin_labels = list(range(1, 21))

# def binning(g):
#     return pd.Series(data={'actual': g.actual.sum(), 'count': len(g.index)})

# =======================
pred_model_layer_1 = 256
pred_model_layer_2 = 256
pred_epochs = 100

np.set_printoptions(linewidth=200,
                    threshold=(full_history_length + 1) *
                    model_history_length *
                    input_size)  # unset with np.set_printoptions()

# output location
run_dir = os.path.join('dashboards', f'pfa_dashboard', f'run_random_forest')

if not os.path.exists(run_dir):
    os.makedirs(run_dir)

stdout_add_file(os.path.join(run_dir, 'log.txt'))

# =======================
# Get the data
pfa_dashboard_diff_none_train: pd.DataFrame = pd.io.parsers.read_csv(
    os.path.join('dashboards', f'pfa_dashboard',
                 f'pfa_dashboard_diff_none_train.csv'),
    delimiter=",",
    header=None)
pfa_dashboard_diff_none_train_answers = pd.io.parsers.read_csv(os.path.join(
    'dashboards', f'pfa_dashboard',
    f'pfa_dashboard_diff_none_train_answers.csv'),
                                                               delimiter=",",
                                                               header=None)

# =======================
feature_num = 27 # <correct or not> + <26 features>

lstm_layer_size = 64
epochs = 240

# output location
run_dir = os.path.join('runs', f'run_t{history_length}_l{lstm_layer_size}_e{epochs}')
score_dir = os.path.join('runs', f'run_t{history_length}_l{lstm_layer_size}_e{epochs}_score')

if not os.path.exists(score_dir):
    os.makedirs(score_dir)

# Setup some printing magic
# https://stackoverflow.com/questions/11325019/how-to-output-to-the-console-and-file
# https://stackoverflow.com/questions/7152762/how-to-redirect-print-output-to-a-file-using-python?noredirect=1&lq=1
stdout_add_file(os.path.join(score_dir, 'score.txt'))
# we want to see everything in the prints
np.set_printoptions(linewidth=200, threshold=(history_length + 1) * history_length * feature_num) # unset with np.set_printoptions()

# =========== data
answer_snapshots = read_numpy_3d_array_from_txt(os.path.join('outputs', f'snapshot_validate_l{history_length}.txt'))

# input and outputs
seq_in = answer_snapshots
# we're using an auto encoder so the input is the output
seq_out = seq_in


# https://github.com/keras-team/keras/issues/4563