def test_basic_logging(capsys): with valohai.logger() as logger: logger.log("myint", 123) captured = capsys.readouterr() assert captured.out == '\n{"myint": 123}\n' with valohai.logger() as logger: logger.log("I am float", 0.1241234435877) captured = capsys.readouterr() assert captured.out == '\n{"I am float": 0.1241234435877}\n'
def test_partial_logging(capsys): with valohai.logger() as logger: logger.log("myint", 123) logger.log("myfloat", 0.5435) captured = capsys.readouterr() assert captured.out == '\n{"myint": 123, "myfloat": 0.5435}\n' logger = valohai.logger() logger.log("myint", 234) logger.log("myfloat", 0.2322323) logger.log("extrathing", 2) logger.flush() captured = capsys.readouterr() assert captured.out == '\n{"myint": 234, "myfloat": 0.2322323, "extrathing": 2}\n'
def log(epoch, logs): with vh.logger() as logger: logger.log('epoch', epoch) logger.log('accuracy', logs['accuracy']) logger.log('loss', logs['loss'])
# This enables Valohai to version your metadata # and for you to use it to compare experiments def log(epoch, logs): with vh.logger() as logger: logger.log('epoch', epoch) logger.log('accuracy', logs['accuracy']) logger.log('loss', logs['loss']) cb = tf.keras.callbacks.LambdaCallback(on_epoch_end=log) model.fit(x_train, y_train, epochs=vh.parameters('epochs').value, callbacks=[cb]) # Evaluate the model and print out the test metrics as JSON test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2) with vh.logger() as logger: logger.log('test_accuracy', test_acc) logger.log('test_loss', test_loss) # Write output files to Valohai outputs directory # This enables Valohai to version your data # and upload output it to the default data store model.save(vh.outputs('model').path('model.h5'))
def log_metadata(epoch, logs): """Helper function to log training metrics""" with valohai.logger() as logger: logger.log('epoch', epoch) logger.log('accuracy', logs['accuracy']) logger.log('loss', logs['loss'])
def main(): # valohai.prepare enables us to update the valohai.yaml configuration file with # the Valohai command-line client by running `valohai yaml step train_model.py` valohai.prepare( step='train-model', image='tensorflow/tensorflow:2.6.0', default_inputs={ 'dataset': 'https://valohaidemo.blob.core.windows.net/mnist/preprocessed_mnist.npz', }, default_parameters={ 'learning_rate': 0.001, 'epochs': 5, }, ) # Read input files from Valohai inputs directory # This enables Valohai to version your training data # and cache the data for quick experimentation input_path = valohai.inputs('dataset').path() with np.load(input_path, allow_pickle=True) as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10), ]) optimizer = tf.keras.optimizers.Adam( learning_rate=valohai.parameters('learning_rate').value) loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy']) # Print metrics out as JSON # This enables Valohai to version your metadata # and for you to use it to compare experiments callback = tf.keras.callbacks.LambdaCallback(on_epoch_end=log_metadata) model.fit(x_train, y_train, epochs=valohai.parameters('epochs').value, callbacks=[callback]) # Evaluate the model and print out the test metrics as JSON test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=2) with valohai.logger() as logger: logger.log('test_accuracy', test_accuracy) logger.log('test_loss', test_loss) # Write output files to Valohai outputs directory # This enables Valohai to version your data # and upload output it to the default data store suffix = uuid.uuid4() output_path = valohai.outputs().path(f'model-{suffix}.h5') model.save(output_path)