batch_size = 128
display_step = 1

corruption_level = 0.3
sparse_reg = 0

#
n_inputs = 784
n_hidden = 400
n_hidden_fc = 784
n_outputs = 10
lr = 0.001

# define the autoencoder
ae = Autoencoder(n_layers=[n_inputs, n_hidden],
                          transfer_function = tf.nn.relu,
                          optimizer = tf.train.AdamOptimizer(learning_rate = lr),
                          ae_para = [corruption_level, sparse_reg])

# define the output layer using softmax
xavier_initializer = tf.contrib.layers.xavier_initializer()
x = tf.placeholder(tf.float32, [None, n_hidden])
W = tf.Variable(xavier_initializer((n_hidden, n_hidden_fc),dtype=tf.float32))
b = tf.Variable(tf.zeros([n_hidden_fc]))
h = tf.nn.relu(tf.matmul(x, W) + b)

W_out = tf.Variable(xavier_initializer((n_hidden_fc, n_outputs),dtype=tf.float32))
b_out = tf.Variable(tf.zeros([n_outputs]))

# y = tf.matmul(x, W) + b
y = tf.matmul(h, W_out) + b_out
# 每次读入的样本数
batch_size = 100
display_step = 1

corruption_level = 0.05
sparse_reg = 0.02

# 输入维数
n_inputs = 3752
n_hidden = 600
n_outputs = 2
lr = 0.0001

# 模型定义
ae = Autoencoder(n_layers=[n_inputs, n_hidden],
                 transfer_function=tf.nn.relu,
                 optimizer=tf.train.AdamOptimizer(learning_rate=lr),
                 ae_para=[corruption_level, sparse_reg])

# 定义softmax的模型
x = tf.placeholder(tf.float32, [None, n_hidden])
W = tf.Variable(tf.zeros([n_hidden, n_outputs]))
b = tf.Variable(tf.zeros([n_outputs]))
# 矩阵乘
y = tf.matmul(x, W) + b

# 定义损失函数以及优化器
y_ = tf.placeholder(tf.float32, [None, n_outputs])
# 交叉熵的损失函数
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
# Adam优化器
Exemple #3
0
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

corruption_level = 0.0
sparse_reg = 2

#
n_inputs = 784
n_hidden = 1000
n_outputs = 10

ae = Autoencoder(n_layers=[n_inputs, n_hidden],
                 transfer_function=tf.nn.sigmoid,
                 optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
                 ae_para=[corruption_level, sparse_reg])
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for epoch in range(training_epochs):
    avg_cost = 0
    total_batch = int(n_samples / batch_size)
    # Loop over all batches
    for i in range(total_batch):
        batch_xs, _ = mnist.train.next_batch(batch_size)

        # Fit training using batch data
        temp = ae.partial_fit()
        cost, opt = sess.run(temp,
                             feed_dict={
Exemple #4
0
batch_size = 128
display_step = 1

corruption_level = 0.3
sparse_reg = 0

#
n_inputs = 784
n_hidden = 400
n_hidden2 = 100
n_outputs = 10
lr = 0.001

# define the autoencoder
ae = Autoencoder(n_layers=[n_inputs, n_hidden],
                 transfer_function=tf.nn.relu,
                 optimizer=tf.train.AdamOptimizer(learning_rate=lr),
                 ae_para=[corruption_level, sparse_reg])
ae_2nd = Autoencoder(n_layers=[n_hidden, n_hidden2],
                     transfer_function=tf.nn.relu,
                     optimizer=tf.train.AdamOptimizer(learning_rate=lr),
                     ae_para=[corruption_level, sparse_reg])
# define the output layer using softmax
x = tf.placeholder(tf.float32, [None, n_hidden2])
W = tf.Variable(tf.zeros([n_hidden2, n_outputs]))
b = tf.Variable(tf.zeros([n_outputs]))
y = tf.matmul(x, W) + b

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, n_outputs])

cross_entropy = tf.reduce_mean(
Exemple #5
0
corruption_level = 0
sparse_reg = 0.000001

#
n_inputs = n_input
n_hidden = 1000
n_hidden2 = 800
n_hidden3 = 500
n_outputs = n_input
lr = 0.1

# define the autoencoder
ae = Autoencoder(n_layers=[n_inputs, n_hidden],
                 transfer_function=tf.nn.sigmoid,
                 optimizer=tf.train.AdamOptimizer(learning_rate=lr),
                 ae_para=[corruption_level, sparse_reg],
                 reference=True)
ae_2nd = Autoencoder(n_layers=[n_hidden, n_hidden2],
                     transfer_function=tf.nn.sigmoid,
                     optimizer=tf.train.AdamOptimizer(learning_rate=lr),
                     ae_para=[corruption_level, sparse_reg],
                     reference=False)
ae_3rd = Autoencoder(n_layers=[n_hidden2, n_hidden3],
                     transfer_function=tf.nn.sigmoid,
                     optimizer=tf.train.AdamOptimizer(learning_rate=0.1 * lr),
                     ae_para=[corruption_level, sparse_reg],
                     reference=False)

## define the output layer using softmax in the fine tuning step
corrupt = tf.placeholder(tf.float32, [None, n_inputs])