# #############

model_name = model.name.replace('/', '_').replace(':', '_')
val_loss_file = open('val_loss.txt', 'w')
train_loss_file = open('train_loss.txt', 'w')
epoch_scale = int(SAMPLES_PER_EPOCH / BATCH_SIZE)
for i in range(NUMBER_OF_EPOCHS * epoch_scale):
    is_new_epoch = ((i + 1) % epoch_scale == 0)
    if is_new_epoch:
        epoch_num = int((i + 1) / epoch_scale)
    if is_new_epoch:
        print('Calculating validation loss ({} iterations)'.format(
            len(val_truth_ds_pairs) / BATCH_SIZE))
        total_val_loss = 0
        val_count = 0
        for pair in next_batch(BATCH_SIZE, val_truth_ds_pairs):
            loss_value = sess.run([loss],
                                  feed_dict={
                                      train_flag: False,
                                      x: pair[1],
                                      y_true: pair[0]
                                  })
            total_val_loss += np.mean(loss_value)
            val_count += 1
        loss_value = total_val_loss / val_count
        val_loss_file.write('{},{}\n'.format(epoch_num, loss_value))
        print("Epoch {}, Val Loss {}".format(epoch_num, loss_value))
    batch = randomly_batch(BATCH_SIZE, train_truth_ds_pairs)
    if write_tb:
        if is_new_epoch:
            summary, _, loss = sess.run([merged, train_step, loss],
# initialize tensorboard file writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('aux/tensorboard/overtrain', sess.graph)

# initialize the variables for the session
sess.run(tf.global_variables_initializer())

# #############
# TRAINING LOOP
# #############

# Calulate loss on training sample
example_loss = 0.0
example_loss_count = 0
for truth, example in next_batch(1, train_truth_ds_pairs):
    example_loss += np.mean((truth[0].flatten() - example[0].flatten())**2)
    example_loss_count += 1
example_loss = example_loss / float(example_loss_count)

print('loss score of example {}'.format(example_loss))

# training loop
train_loss_file = open('overtrain_loss.txt', 'w')
for i in range(NUMBER_OF_EPOCHS):
    for pair in next_batch(BATCH_SIZE, train_truth_ds_pairs):
        train_step.run(feed_dict={
            train_flag: True,
            x: pair[1],
            y_true: pair[0]
        },
saver = tf.train.Saver()

# create session
sess = tf.Session()

# restore model from checkpoint file
saver.restore(sess, model_checkpoint_file_name)


# #############
# TRAINING LOOP
# #############

test_loss_file = open('test_loss.txt', 'w')
count = 0
for pair in next_batch(BATCH_SIZE, test_truth_ds_pairs):
    loss_test = sess.run([waveform_mse],
                         feed_dict={train_flag: False,
                                    x: pair[1],
                                    y_true: pair[0]}
                         )
    test_loss_file.write('{}\n'.format(np.mean(loss_test)))
    print("Iteration {}, Test Loss {}".format((count + 1), loss_test))
    count += 1

test_loss_file.close()

truth, example = read_file_pair(test_truth_ds_pairs[0])
y_reco = model.eval(feed_dict={train_flag: False,
                               x: example.reshape(1, -1, 1)},
                    session=sess).flatten()