class EarlyStoppingTest(TestCase): def setUp(self): self.early_stpping = EarlyStopping('loss', after=10) self.estimator = Estimator(model_builder=model_builder, input_fn=input_fn, hooks=[BasicLogger(), self.early_stpping]) def test(self): self.estimator.train_and_evaluate(1000, data=train_data, validation=test_data, batch_size=32, batch_size_eval=32)
from estimator.estimator.Estimator import Estimator, Mode from estimator.estimator.Hook import Hook from estimator.estimator.TensorBoardLogger import TensorBoardLogger from estimator.estimator.Saver import Saver import numpy as np import tensorflow as tf EPOCHS = 100 # create the dataset train_data = (np.random.rand(1000,2),np.random.rand(1000,1)) test_data = (np.random.rand(100,2),np.random.rand(100,1)) # define the input function input_fn = Estimator.create_input_fn(input_shape=[None,2],output_shape=[None,1]) # define the model builder def model_builder(x, y, config): # config is a dictionary that can be passed to the estimator net = tf.layers.dense(x, 16, activation=tf.nn.relu) predictions = tf.layers.dense(net, 1, activation=tf.nn.sigmoid) loss = tf.losses.mean_squared_error(labels=y, predictions=predictions) train_step = tf.train.AdamOptimizer(0.01).minimize(loss) # it must return a dictionary contain the operation to train, predict and evaluate return { Mode.TRAIN: {'train_step': train_step }, Mode.PREDICT: { 'predictions': predictions }, Mode.EVAL: { 'loss': loss } # used as metrics } class Logger(Hook):
from estimator.estimator.Estimator import Estimator, Mode import numpy as np import tensorflow as tf EPOCHS = 100 # create the dataset train_data = (np.random.rand(1000,2),np.random.rand(1000,1)) test_data = (np.random.rand(100,2),np.random.rand(100,1)) # define the input function input_fn = Estimator.create_input_fn(input_shape=[None,2],output_shape=[None,1]) # define the model builder def model_builder(x, y, config): # config is a dictionary that can be passed to the estimator net = tf.layers.dense(x, 16, activation=tf.nn.relu) predictions = tf.layers.dense(net, 1, activation=tf.nn.sigmoid) loss = tf.losses.mean_squared_error(labels=y, predictions=predictions) train_step = tf.train.AdamOptimizer(0.01).minimize(loss) # it must return a dictionary contain the operation to train, predict and evaluate return { Mode.TRAIN: {'train_step': train_step }, Mode.PREDICT: { 'predictions': predictions }, Mode.EVAL: { 'loss': loss } # used as metrics } estimator = Estimator(model_builder, input_fn) # we can define a batch size before train, default is one estimator.train(data=train_data, epochs=EPOCHS, batch_size=64) res = estimator.evaluate(data=test_data)
N_CLASSES = 10 (train_x, train_y), (test_x, test_y) = mnist.load_data() # we need to add a dimension to the targets train_y = np.expand_dims(train_y, -1) test_y = np.expand_dims(test_y, -1) tf.set_random_seed(0) train_data = (train_x, train_y) test_data = (test_x, test_y) shape = train_data[0].shape[1:] input_fn = Estimator.create_input_fn([None, *shape], [None, 1], output_type=tf.uint8) def model(x, y, config): x = tf.reshape(x, [-1, 28 * 28]) net = tf.layers.dense(x, 256, activation=tf.nn.relu) net = tf.layers.dropout(net, 0.2) net = tf.layers.dense(net, 128, activation=tf.nn.relu) out = tf.layers.dense(net, N_CLASSES) predictions = tf.nn.softmax(out) y_one_hot = tf.one_hot(y, N_CLASSES, dtype=tf.float32) y_one_hot = tf.reshape(y_one_hot, [-1, N_CLASSES]) loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_one_hot,
def setUp(self): self.early_stpping = EarlyStopping('loss', after=10) self.estimator = Estimator(model_builder=model_builder, input_fn=input_fn, hooks=[BasicLogger(), self.early_stpping])
from unittest import TestCase from estimator.estimator.Estimator import Mode, Estimator from estimator.estimator.EarlyStopping import EarlyStopping from estimator.estimator.BasicLogger import BasicLogger import tensorflow as tf import numpy as np train_data = (np.random.rand(100, 2), np.random.rand(100, 1)) test_data = (np.random.rand(100, 2), np.random.rand(100, 1)) # define the input function input_fn = Estimator.create_input_fn(input_shape=[None, 2], output_shape=[None, 1]) # define the model builder def model_builder(x, y, config): # config is a dictionary that can be passed to the estimator net = tf.layers.dense(x, 16, activation=tf.nn.relu) predictions = tf.layers.dense(net, 1, activation=tf.nn.sigmoid) loss = tf.losses.mean_squared_error(labels=y, predictions=predictions) train_step = tf.train.AdamOptimizer(0.01).minimize(loss) # it must return a dictionary contain the operation to train, predict and evaluate return { Mode.TRAIN: { 'train_step': train_step }, Mode.PREDICT: { 'predictions': predictions },
from examples.complete.data import input_fn from examples.complete.data import data from examples.complete.model import model_builder from estimator.estimator.Estimator import Estimator from estimator.estimator.Estimator import Mode from estimator.estimator.BasicLogger import BasicLogger import numpy as np EPOCHS = 200 BATCH_SIZE = 64 estimator = Estimator(model_builder, input_fn, hooks=[BasicLogger()]) # estimator.train(EPOCHS, data[Mode.TRAIN], batch_size=BATCH_SIZE) estimator.train_and_evaluate(EPOCHS, data[Mode.TRAIN], validation=data[Mode.EVAL], batch_size=BATCH_SIZE, every=3) # estimator.evaluate(data[Mode.EVAL]) print(estimator.predict(np.array([[1, 2]])))
import numpy as np from estimator.estimator.Estimator import Mode from estimator.estimator.Estimator import Estimator # train, test = tf.keras.datasets.mnist.load_data() DATA_SIZE = [100000,200,150] data = { Mode.TRAIN : (np.random.rand(DATA_SIZE[0],2), np.random.rand(DATA_SIZE[0],1)), Mode.VAL: (np.random.rand(DATA_SIZE[1],2), np.random.rand(DATA_SIZE[1],1)), Mode.EVAL: (np.random.rand(DATA_SIZE[1], 2), np.random.rand(DATA_SIZE[1], 1)) } input_fn = Estimator.create_input_fn([None,2], [None,1]) # def input_fn(batch_size): # x, y = tf.placeholder(tf.float32, shape=[None, 2]), tf.placeholder(tf.float32, shape=[None, 1]) # # return { Mode.TRAIN: tf.data.Dataset.from_tensor_slices((x, y)).shuffle(10000).repeat().batch(batch_size), # Mode.EVAL: tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size).repeat() }, x, y