Exemplo n.º 1
0
def test():
    model_to_be_restored = MLP()
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model_to_be_restored
    )  # 实例化Checkpoint,设置恢复对象为新建立的模型model_to_be_restored
    checkpoint.restore(tf.train.latest_checkpoint('./save'))  # 从文件恢复模型参数
    num_eval_samples = np.shape(data_loader.eval_labels)[0]
    y_pred = model_to_be_restored.predict(tf.constant(
        data_loader.eval_data)).numpy()
    print("test accuracy: %f" %
          (sum(y_pred == data_loader.eval_labels) / num_eval_samples))
Exemplo n.º 2
0
def test():
    model_to_be_restored = MLP()
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model_to_be_restored
    )  # instantiate a Checkpoint, set newly initialized model `model_to_be_restored` to be the object to be restored
    checkpoint.restore(tf.train.latest_checkpoint(
        './save'))  # restore parameters of model from file
    num_eval_samples = np.shape(data_loader.eval_labels)[0]
    y_pred = model_to_be_restored.predict(tf.constant(
        data_loader.eval_data)).numpy()
    print("test accuracy: %f" %
          (sum(y_pred == data_loader.eval_labels) / num_eval_samples))
Exemplo n.º 3
0
def train():
    model = MLP()
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model)  # 实例化Checkpoint,设置保存对象为model
    for batch_index in range(num_batches):
        X, y = data_loader.get_batch(batch_size)
        with tf.GradientTape() as tape:
            y_logit_pred = model(tf.convert_to_tensor(X))
            loss = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                          logits=y_logit_pred)
            print("batch %d: loss %f" % (batch_index, loss.numpy()))
        grads = tape.gradient(loss, model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
        if (batch_index + 1) % 100 == 0:  # 每隔100个Batch保存一次
            checkpoint.save('./save/model.ckpt')  # 保存模型参数到文件
Exemplo n.º 4
0
def train():
    model = MLP()
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model
    )  # instantiate a Checkpoint, set `model` as object to be saved
    for batch_index in range(num_batches):
        X, y = data_loader.get_batch(batch_size)
        with tf.GradientTape() as tape:
            y_logit_pred = model(tf.convert_to_tensor(X))
            loss = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                          logits=y_logit_pred)
            print("batch %d: loss %f" % (batch_index, loss.numpy()))
        grads = tape.gradient(loss, model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
        if (batch_index + 1) % 100 == 0:  # save every 100 batches
            checkpoint.save('./save/model.ckpt')  # save model to .ckpt file
Exemplo n.º 5
0
    def __init__(self):
        mnist = tf.contrib.learn.datasets.load_dataset("mnist")
        self.train_data = mnist.train.images  # np.array [55000, 784].
        self.train_labels = np.asarray(
            mnist.train.labels, dtype=np.int32)  # np.array [55000] of int32.
        self.eval_data = mnist.test.images  # np.array [10000, 784].
        self.eval_labels = np.asarray(
            mnist.test.labels, dtype=np.int32)  # np.array [10000] of int32.

    def get_batch(self, batch_size):
        index = np.random.randint(0, np.shape(self.train_data)[0], batch_size)
        return self.train_data[index, :], self.train_labels[index]


if model_type == 'MLP':
    model = MLP()
else:
    model = CNN()
data_loader = DataLoader()
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
for batch_index in range(num_batches):
    X, y = data_loader.get_batch(batch_size)
    with tf.GradientTape() as tape:
        y_logit_pred = model(tf.convert_to_tensor(X))
        loss = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                      logits=y_logit_pred)
        print("batch %d: loss %f" % (batch_index, loss.numpy()))
    grads = tape.gradient(loss, model.variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))

num_eval_samples = np.shape(data_loader.eval_labels)[0]
Exemplo n.º 6
0
import tensorflow as tf
import numpy as np
from en.model.mlp.mlp import MLP
from en.model.mlp.utils import DataLoader

tf.enable_eager_execution()
num_batches = 10000
batch_size = 50
learning_rate = 0.001
model = MLP()
data_loader = DataLoader()
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
summary_writer = tf.contrib.summary.create_file_writer(
    './tensorboard')  # 实例化记录器
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
    for batch_index in range(num_batches):
        X, y = data_loader.get_batch(batch_size)
        with tf.GradientTape() as tape:
            y_logit_pred = model(tf.convert_to_tensor(X))
            loss = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                          logits=y_logit_pred)
            print("batch %d: loss %f" % (batch_index, loss.numpy()))
            tf.contrib.summary.scalar("loss", loss,
                                      step=batch_index)  # 记录当前loss
        grads = tape.gradient(loss, model.variables)
        optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))