def train(): from keras.metrics import categorical_accuracy TransformImage.register('flatten', lambda img: img.flatten()) transform = (TransformImage(0) .by('rerange', 0, 255, 0, 1, 'float32') .by('flatten')) build_batch = (BuildBatch(BATCH_SIZE) .by(0, 'vector', 'float32') .by(1, 'one_hot', 'uint8', NUM_CLASSES)) plot = PlotLines((0, 1), layout=(2, 1), every_sec=1) print('loading data...') train_samples, test_samples = load_samples() print('creating network ...') network = create_network() print('training...', NUM_EPOCHS) for epoch in range(NUM_EPOCHS): print('EPOCH:', epoch) t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >> transform >> build_batch >> network.train() >> plot >> Unzip()) print('train loss : {:.6f}'.format(t_loss >> Mean())) print('train acc : {:.1f}'.format(100 * (t_acc >> Mean()))) e_acc = (test_samples >> transform >> build_batch >> network.evaluate([categorical_accuracy])) print('test acc : {:.1f}'.format(100 * e_acc)) network.save_best(e_acc, isloss=False)
def train(): from keras.metrics import categorical_accuracy rerange = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32') build_batch = (BuildBatch(BATCH_SIZE).by(0, 'image', 'float32').by( 1, 'one_hot', 'uint8', NUM_CLASSES)) p = 0.1 augment = (AugmentImage(0).by('identical', 1.0).by( 'elastic', p, [5, 5], [100, 100], [0, 100]).by('brightness', p, [0.7, 1.3]).by('color', p, [0.7, 1.3]).by( 'shear', p, [0, 0.1]).by('fliplr', p).by('rotate', p, [-10, 10])) plot_eval = PlotLines((0, 1), layout=(2, 1)) print('creating network...') network = create_network() print('loading data...') train_samples, test_samples = load_samples() train_samples, val_samples = train_samples >> SplitRandom(0.8) print('training...', len(train_samples), len(val_samples)) for epoch in range(NUM_EPOCHS): print('EPOCH:', epoch) t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >> Pick(PICK) >> augment >> rerange >> Shuffle(100) >> build_batch >> network.train() >> Unzip()) t_loss, t_acc = t_loss >> Mean(), t_acc >> Mean() print("train loss : {:.6f}".format(t_loss)) print("train acc : {:.1f}".format(100 * t_acc)) v_loss, v_acc = (val_samples >> rerange >> build_batch >> network.validate() >> Unzip()) v_loss, v_acc = v_acc >> Mean(), v_acc >> Mean() print('val loss : {:.6f}'.format(v_loss)) print('val acc : {:.1f}'.format(100 * v_acc)) network.save_best(v_acc, isloss=False) plot_eval((t_acc, v_acc)) print('testing...', len(test_samples)) e_acc = (test_samples >> rerange >> build_batch >> network.evaluate( [categorical_accuracy])) print('test acc : {:.1f}'.format(100 * e_acc))
def train(): from keras.metrics import categorical_accuracy print('creating network ...') network = create_network() print('loading data...') train_samples, test_samples = load_samples() augment = (AugmentImage(0) .by('identical', 1) .by('translate', 0.5, [-3, +3], [-3, +3]) .by('rotate', 0.5, [-5, +5]) .by('shear', 0.5, [0, 0.2]) .by('elastic', 0.5, [5, 5], [100, 100], [0, 100])) transform = (TransformImage(0) .by('rerange', 0, 255, 0, 1, 'float32')) build_batch = (BuildBatch(BATCH_SIZE, prefetch=0) .input(0, 'image', 'float32') .output(1, 'one_hot', 'uint8', NUM_CLASSES)) plot = PlotLines((0, 1), layout=(2, 1), every_sec=1) print('training...', NUM_EPOCHS) for epoch in range(NUM_EPOCHS): print('EPOCH:', epoch) t_loss, t_acc = (train_samples >> PrintProgress(train_samples) >> Pick(PICK) >> augment >> transform >> Shuffle(100) >> build_batch >> network.train() >> plot >> Unzip()) print('train loss : {:.6f}'.format(t_loss >> Mean())) print('train acc : {:.1f}'.format(100 * (t_acc >> Mean()))) e_acc = (test_samples >> transform >> build_batch >> network.evaluate([categorical_accuracy])) print('test acc : {:.1f}'.format(100 * e_acc)) network.save_best(e_acc, isloss=False)
""" from __future__ import print_function from glob import glob from nutsflow import Collect, Consume, Get, Zip, Map, ArgMax, Print from nutsml import (TransformImage, BuildBatch, ReadImage, ViewImageAnnotation, ConvertLabel) BATCH_SIZE = 128 if __name__ == "__main__": from cnn_train import create_network, load_names convert_label = ConvertLabel(None, load_names()) rerange = TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32') show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(3, 3), interpolation='spline36') pred_batch = BuildBatch(BATCH_SIZE).input(0, 'image', 'float32') print('loading network...') network = create_network() network.load_weights() print('predicting...') samples = glob('images/*.png') >> Print() >> ReadImage(None) >> Collect() predictions = (samples >> rerange >> pred_batch >> network.predict() >> convert_label) samples >> Get(0) >> Zip(predictions) >> show_image >> Consume()
.. module:: mlp_predict :synopsis: Example nuts-ml pipeline for classification """ from __future__ import print_function from nutsflow import Collect, Consume, Get, Zip, Map, Format, ArgMax from nutsml import (TransformImage, BuildBatch, ReadImage, ReadLabelDirs, ViewImageAnnotation) BATCH_SIZE = 128 if __name__ == "__main__": from mlp_train import create_network TransformImage.register('flatten', lambda img: img.flatten()) transform = (TransformImage(0).by('rerange', 0, 255, 0, 1, 'float32').by('flatten')) show_image = ViewImageAnnotation(0, (1, 2), pause=1, figsize=(4, 4)) pred_batch = BuildBatch(BATCH_SIZE).by(0, 'vector', 'float32') print('loading network...') network = create_network() network.load_weights() print('predicting...') samples = ReadLabelDirs('images', '*.png') >> ReadImage(0) >> Collect() truelabels = samples >> Get(1) >> Format('true: {}') predictions = (samples >> transform >> pred_batch >> network.predict() >> Map(ArgMax()) >> Format('pred: {}')) samples >> Get(0) >> Zip(predictions,
""" .. module:: view_train_images :synopsis: Example for showing images with transformation """ from nutsflow import Take, Consume, GetCols from nutsml import ViewImage, TransformImage if __name__ == "__main__": from mlp_train import load_samples samples, _ = load_samples() transform = (TransformImage(0).by('elastic', 5, 100)) (samples >> GetCols(0, 0, 1) >> Take(1000) >> transform >> ViewImage( (0, 1), pause=1) >> Consume())