# Construct cost
cost_sparse = n_beta * tf.reduce_sum(KL_Div(n_rho, rho_hat))
cost_J = tf.reduce_mean(tf.nn.l2_loss(pred['out'] - x))
cost_reg = n_lambda * (tf.nn.l2_loss(weights['hidden']) + tf.nn.l2_loss(weights['out']))
cost = cost_J + cost_reg + cost_sparse

optimizer = tf.train.GradientDescentOptimizer(learning_rate=n_learning_rate).minimize(cost)


# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    saver = tf.train.Saver()

    # Training cycle
    for epoch in range(n_num_epochs):
        batch_xs=gid.getPatches(n_epoch_size)
        # Fit training using batch data
        sess.run(optimizer, feed_dict={x: batch_xs})

    print("Optimization Finished!")

    saver.save(sess, 'my-SAE')

    outWeights = sess.run(weights['hidden'])
    vis.display_network(outWeights)
cost_reg = n_lambda * (tf.nn.l2_loss(weights['hidden']) + tf.nn.l2_loss(weights['out']))
cost = cost_J + cost_reg + cost_sparse

optimizer = tf.train.Optimizer(learning_rate=n_learning_rate).minimize(cost)


# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    saver = tf.train.Saver()

    outWeights = sess.run(weights['hidden'])
    vis.display_network(outWeights, filename='pretrainweights')

    # Training cycle
    for epoch in range(n_num_epochs):
        batch_xs=gid.getPatches(n_epoch_size)
        # Fit training using batch data
        sess.run(optimizer, feed_dict={x: batch_xs})

    print("Optimization Finished!")

    saver.save(sess, 'my-SAE')

    outWeights = sess.run(weights['hidden'])
    vis.display_network(outWeights)
    vis.display_network(outWeights.T, filename="weightsT")