Esempio n. 1
0
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)

n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

if FLAGS.job_name == "ps":
    server.join()
elif FLAGS.job_name == "worker":

    with tf.device(tf.train.replica_device_setter(
            worker_device="/job:worker/task:%d" % FLAGS.task_index,
            cluster=cluster)):
        autoencoder = Autoencoder(n_input=784,
                                  n_hidden=200,
                                  transfer_function=tf.nn.softplus,
                                  optimizer=tf.train.AdamOptimizer(learning_rate=0.001))

    sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
                             init_op=autoencoder.init_op)

    with sv.prepare_or_wait_for_session(server.target) as sess:
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(n_samples / batch_size)
            # Loop over all batches
            for i in range(total_batch):
                batch_xs = get_random_block_from_data(X_train, batch_size)

                # Fit training using batch data
                cost = autoencoder.partial_fit(batch_xs, sess)
Esempio n. 2
0

def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data) - batch_size)
    return data[start_index:(start_index + batch_size)]


X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)

n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

autoencoder = Autoencoder(n_layers=[784, 200],
                          transfer_function = tf.nn.softplus,
                          optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))

for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(n_samples / batch_size)
    # Loop over all batches
    for i in range(total_batch):
        batch_xs = get_random_block_from_data(X_train, batch_size)

        # Fit training using batch data
        cost = autoencoder.partial_fit(batch_xs)
        # Compute average loss
        avg_cost += cost / n_samples * batch_size

    # Display logs per epoch step
Esempio n. 3
0
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('./data', one_hot=True)

n_samples = int(mnist.train.num_examples)
training_epoch = 20
batch_size = 128
display_step = 1

n_inputs = 784
n_hidden1 = 400
n_hidden2 = 100
n_output = 10

ae1 = Autoencoder(n_layers=[n_inputs, n_hidden1],
                  transfer_function=tf.nn.relu,
                  optimizer=tf.train.AdamOptimizer(0.001))

ae2 = Autoencoder(n_layers=[n_hidden1, n_hidden2],
                  transfer_function=tf.nn.relu,
                  optimizer=tf.train.AdamOptimizer(0.001))

x = tf.placeholder(tf.float32, [None, n_hidden2])
w = tf.Variable(tf.zeros([n_hidden2, n_output]))
b = tf.Variable(tf.zeros([n_output]))
y = tf.matmul(x, w) + b

y_ = tf.placeholder(tf.float32, [None, n_output])

cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
Esempio n. 4
0
train_data = tf.data.Dataset.from_tensor_slices(X_train).batch(128).shuffle(
    buffer_size=1024)
test_data = tf.data.Dataset.from_tensor_slices(X_test).batch(128).shuffle(
    buffer_size=512)

n_samples = int(len(X_train) + len(X_test))
training_epochs = 20
batch_size = 128
display_step = 1

optimizer = tf.optimizers.Adam(learning_rate=0.01)
mse_loss = tf.keras.losses.MeanSquaredError()
loss_metric = tf.keras.metrics.Mean()

autoencoder = Autoencoder([200, 394, 784])

# Iterate over epochs.
for epoch in range(10):
    print(f'Epoch {epoch+1}')

    # Iterate over the batches of the dataset.
    for step, x_batch in enumerate(train_data):
        with tf.GradientTape() as tape:
            recon = autoencoder(x_batch)
            loss = mse_loss(x_batch, recon)

        grads = tape.gradient(loss, autoencoder.trainable_variables)
        optimizer.apply_gradients(zip(grads, autoencoder.trainable_variables))

        loss_metric(loss)
Esempio n. 5
0

def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data) - batch_size)
    return data[start_index:(start_index + batch_size)]


X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)

n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1

autoencoder = Autoencoder(
    n_layers=[784, 200],
    transfer_function=tf.nn.softplus,
    optimizer=tf.train.AdamOptimizer(learning_rate=0.001))

for epoch in range(training_epochs):
    avg_cost = 0.
    total_batch = int(n_samples / batch_size)
    # Loop over all batches
    for i in range(total_batch):
        batch_xs = get_random_block_from_data(X_train, batch_size)

        # Fit training using batch data
        cost = autoencoder.partial_fit(batch_xs)
        # Compute average loss
        avg_cost += cost / n_samples * batch_size

    # Display logs per epoch step