Beispiel #1
0
def test():
    model_to_be_restored = MLP()
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model_to_be_restored
    )  # 实例化Checkpoint,设置恢复对象为新建立的模型model_to_be_restored
    checkpoint.restore(tf.train.latest_checkpoint('./save'))  # 从文件恢复模型参数
    num_eval_samples = np.shape(data_loader.eval_labels)[0]
    y_pred = model_to_be_restored.predict(tf.constant(
        data_loader.eval_data)).numpy()
    print("test accuracy: %f" %
          (sum(y_pred == data_loader.eval_labels) / num_eval_samples))
Beispiel #2
0
def test():
    model_to_be_restored = MLP()
    checkpoint = tf.train.Checkpoint(
        myAwesomeModel=model_to_be_restored
    )  # instantiate a Checkpoint, set newly initialized model `model_to_be_restored` to be the object to be restored
    checkpoint.restore(tf.train.latest_checkpoint(
        './save'))  # restore parameters of model from file
    num_eval_samples = np.shape(data_loader.eval_labels)[0]
    y_pred = model_to_be_restored.predict(tf.constant(
        data_loader.eval_data)).numpy()
    print("test accuracy: %f" %
          (sum(y_pred == data_loader.eval_labels) / num_eval_samples))
Beispiel #3
0
else:
    model = CNN()
data_loader = DataLoader()
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
for batch_index in range(num_batches):
    X, y = data_loader.get_batch(batch_size)
    with tf.GradientTape() as tape:
        y_logit_pred = model(tf.convert_to_tensor(X))
        loss = tf.losses.sparse_softmax_cross_entropy(labels=y,
                                                      logits=y_logit_pred)
        print("batch %d: loss %f" % (batch_index, loss.numpy()))
    grads = tape.gradient(loss, model.variables)
    optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))

num_eval_samples = np.shape(data_loader.eval_labels)[0]
y_pred = model.predict(data_loader.eval_data).numpy()
print("test accuracy: %f" %
      (sum(y_pred == data_loader.eval_labels) / num_eval_samples))

num_correct_pred = 0
for batch_index in range(num_eval_samples // batch_size):
    y_pred = model.predict(
        data_loader.eval_data[batch_index * batch_size:(batch_index + 1) *
                              batch_size]).numpy()
    num_correct_pred += sum(
        y_pred == data_loader.eval_labels[batch_index *
                                          batch_size:(batch_index + 1) *
                                          batch_size])
print("test accuracy: %f" %
      (num_correct_pred / np.shape(data_loader.eval_labels)[0]))