示例#1
0
def test_dataset(backend_default, data):
    dataset = MNIST(path=data)
    dataset.gen_iterators()
    train_set = dataset.data_dict['train']
    train_set.be = NervanaObject.be

    for i in range(2):
        for X_batch, y_batch in train_set:
            neon_logger.display("Xshape: {}, yshape: {}".format(X_batch.shape, y_batch.shape))
        train_set.index = 0
示例#2
0
def test_dataset(backend_default, data):
    dataset = MNIST(path=data)
    dataset.gen_iterators()
    train_set = dataset.data_dict['train']
    train_set.be = NervanaObject.be

    for i in range(2):
        for X_batch, y_batch in train_set:
            neon_logger.display("Xshape: {}, yshape: {}".format(
                X_batch.shape, y_batch.shape))
        train_set.index = 0
def main(args):
    # load up the mnist data set
    dataset = MNIST(path=args.data_dir)

    # initialize model object
    mlp = Model(layers=[
        Affine(nout=100,
               init=Gaussian(loc=0.0, scale=0.01),
               activation=Rectlin()),
        Affine(nout=10,
               init=Gaussian(loc=0.0, scale=0.01),
               activation=Logistic(shortcut=True))
    ])

    # setup optimizer
    optimizer = GradientDescentMomentum(0.1,
                                        momentum_coef=0.9,
                                        stochastic_round=args.rounding)

    # configure callbacks
    callbacks = Callbacks(mlp,
                          eval_set=dataset.valid_iter,
                          **args.callback_args)

    # run fit
    # setup cost function as CrossEntropy
    mlp.fit(dataset.train_iter,
            optimizer=optimizer,
            num_epochs=args.epochs,
            cost=GeneralizedCost(costfunc=CrossEntropyBinary()),
            callbacks=callbacks)
    error_rate = mlp.eval(dataset.valid_iter, metric=Misclassification())
    neon_logger.display('Classification accuracy = %.4f' % (1 - error_rate))
示例#4
0
def test_model_get_outputs(backend_default, data):
    dataset = MNIST(path=data)
    train_set = dataset.train_iter

    init_norm = Gaussian(loc=0.0, scale=0.1)

    layers = [
        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin()),
        Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))
    ]
    mlp = Model(layers=layers)
    out_list = []
    mlp.initialize(train_set)
    for x, t in train_set:
        x = mlp.fprop(x)
        out_list.append(x.get().T.copy())
    ref_output = np.vstack(out_list)

    train_set.reset()
    output = mlp.get_outputs(train_set)
    assert np.allclose(output, ref_output[:output.shape[0], :])

    # test model benchmark inference
    mlp.benchmark(train_set, inference=True, niterations=5)
示例#5
0
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
from neon import logger as neon_logger
import h5py
import numpy as np

# parse the command line arguments
parser = NeonArgparser(__doc__)

args = parser.parse_args()

# load up the mnist data set
dataset = MNIST(path=args.data_dir)
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()

# generate the HDF5 file
datsets = {'train': (X_train, y_train),
           'test': (X_test, y_test)}

for ky in ['train', 'test']:
    df = h5py.File('mnist_%s.h5' % ky, 'w')

    # input images
    in_dat = datsets[ky][0]
    df.create_dataset('input', data=in_dat)
    df['input'].attrs['lshape'] = (1, 28, 28)  # (C, H, W)
示例#6
0
def test_model_serialize(backend_default, data):
    dataset = MNIST(path=data)
    (X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
    train_set = ArrayIterator(
        [X_train, X_train], y_train, nclass=nclass, lshape=(1, 28, 28))

    init_norm = Gaussian(loc=0.0, scale=0.01)

    # initialize model
    path1 = Sequential([Conv((5, 5, 16), init=init_norm, bias=Constant(0), activation=Rectlin()),
                        Pooling(2),
                        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
    path2 = Sequential([Affine(nout=100, init=init_norm, bias=Constant(0), activation=Rectlin()),
                        Dropout(keep=0.5),
                        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())])
    layers = [MergeMultistream(layers=[path1, path2], merge="stack"),
              Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
              Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))]

    tmp_save = 'test_model_serialize_tmp_save.pickle'
    mlp = Model(layers=layers)
    mlp.optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)
    mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    mlp.initialize(train_set, cost=mlp.cost)
    n_test = 3
    num_epochs = 3
    # Train model for num_epochs and n_test batches
    for epoch in range(num_epochs):
        for i, (x, t) in enumerate(train_set):
            x = mlp.fprop(x)
            delta = mlp.cost.get_errors(x, t)
            mlp.bprop(delta)
            mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
            if i > n_test:
                break

    # Get expected outputs of n_test batches and states of all layers
    outputs_exp = []
    pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs_exp.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Serialize model
    mlp.save_params(tmp_save, keep_states=True)

    # Load model
    mlp = Model(tmp_save)

    mlp.initialize(train_set)
    outputs = []
    pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Check outputs, states, and params are the same
    for output, output_exp in zip(outputs, outputs_exp):
        assert allclose_with_out(output.get(), output_exp.get())

    for pd, pd_exp in zip(pdicts, pdicts_exp):
        for s, s_e in zip(pd['states'], pd_exp['states']):
            if isinstance(s, list):  # this is the batch norm case
                for _s, _s_e in zip(s, s_e):
                    assert allclose_with_out(_s, _s_e)
            else:
                assert allclose_with_out(s, s_e)
        for p, p_e in zip(pd['params'], pd_exp['params']):
            assert type(p) == type(p_e)
            if isinstance(p, list):  # this is the batch norm case
                for _p, _p_e in zip(p, p_e):
                    assert allclose_with_out(_p, _p_e)
            elif isinstance(p, np.ndarray):
                assert allclose_with_out(p, p_e)
            else:
                assert p == p_e

    os.remove(tmp_save)
示例#7
0
"""

from neon.data import MNIST
from neon.initializers import Gaussian, Constant
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, RMSProp
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

dataset = MNIST(path=args.data_dir)
train_set = dataset.train_iter
valid_set = dataset.valid_iter

# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)

# initialize model
layers = []
layers.append(Affine(nout=100, init=init_norm, bias=Constant(0),
                     activation=Rectlin()))
layers.append(Affine(nout=10, init=init_norm, bias=Constant(0),
                     activation=Logistic(shortcut=True),
                     name='special_linear'))

cost = GeneralizedCost(costfunc=CrossEntropyBinary())
示例#8
0
from neon.optimizers.optimizer import MultiOptimizer, GradientDescentMomentum
from neon.transforms import Softmax, CrossEntropyMulti, Rectlin, Misclassification
from neon.models import Model
from neon.data import ArrayIterator, MNIST
from neon.callbacks.callbacks import Callbacks
from callbacks.callbacks import TrainByStageCallback, FuzzyPruneCallback

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

parser = NeonArgparser(__doc__)
args = parser.parse_args()
be = gen_backend(backend='gpu', batch_size=128, datatype=np.float32)

# setup a dataset iterator
mnist = MNIST(path='../dataset/mnist')
(X_train, y_train), (X_test, y_test), nclass = mnist.load_data()
train_set = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
valid_set = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))

# define model
nfilters = [20, 50, 500]
# nfilters = [24, 56, 500]
init_w = Gaussian(scale=0.01)
relu = Rectlin()
common_params = dict(init=init_w, activation=relu)
layers = [
    Conv((5, 5, nfilters[0]), bias=Constant(0.1), padding=0, **common_params),
    Pooling(2, strides=2, padding=0),
    Conv((5, 5, nfilters[1]), bias=Constant(0.1), padding=0, **common_params),
    Pooling(2, strides=2, padding=0),
示例#9
0
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, Sequential, MergeMultistream
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser

# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

# hyperparameters
num_epochs = args.epochs

dataset = MNIST(path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
train_set = ArrayIterator([X_train, X_train],
                          y_train,
                          nclass=nclass,
                          lshape=(1, 28, 28))
for example in train_set:
    import pdb
    pdb.set_trace()
valid_set = ArrayIterator([X_test, X_test],
                          y_test,
                          nclass=nclass,
                          lshape=(1, 28, 28))

# weight initialization
init_norm = Gaussian(loc=0.0, scale=0.01)
示例#10
0
#!/usr/bin/env python
from neon.util.argparser import NeonArgparser

parser = NeonArgparser(__doc__)
args = parser.parse_args()

from neon.data import MNIST
from neon.data import ArrayIterator

mnist = MNIST()

(X_train, y_train), (X_test, y_test), nclass = mnist.load_data()

#print "X_test: %s" % X_test[1]

# setup training set iterator
train_set = ArrayIterator(X_train, y_train, nclass=nclass)
# setup test set iterator
test_set = ArrayIterator(X_test, y_test, nclass=nclass)

#Initialize weights to small random numbers with Gaussian
from neon.initializers import Gaussian

init_norm = Gaussian(loc=0.0, scale=0.01)

#Affine is a FC network with 100 hidden units
from neon.layers import Affine
#We will use ReLu for hidden units and SoftMax for output units. Softmax is used to ensure that
#all outputs sum up to 1 and are within the range of [0, 1]
from neon.transforms import Rectlin, Softmax
示例#11
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 22 06:37:04 2017

@author: Administrator
"""

from neon.backends import gen_backend
from neon.data import MNIST

be = gen_backend(batch_size=128)
mnist = MNIST(path='data/')
train_set = mnist.train_iter
valid_set = mnist.valid_iter
示例#12
0
def test_model_serialize(backend_default, data):
    dataset = MNIST(path=data)
    (X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
    train_set = ArrayIterator([X_train, X_train],
                              y_train,
                              nclass=nclass,
                              lshape=(1, 28, 28))

    init_norm = Gaussian(loc=0.0, scale=0.01)

    # initialize model
    path1 = Sequential([
        Conv((5, 5, 16),
             init=init_norm,
             bias=Constant(0),
             activation=Rectlin()),
        Pooling(2),
        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())
    ])
    path2 = Sequential([
        Affine(nout=100,
               init=init_norm,
               bias=Constant(0),
               activation=Rectlin()),
        Dropout(keep=0.5),
        Affine(nout=20, init=init_norm, bias=init_norm, activation=Rectlin())
    ])
    layers = [
        MergeMultistream(layers=[path1, path2], merge="stack"),
        Affine(nout=20, init=init_norm, batch_norm=True, activation=Rectlin()),
        Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True))
    ]

    tmp_save = 'test_model_serialize_tmp_save.pickle'
    mlp = Model(layers=layers)
    mlp.optimizer = GradientDescentMomentum(learning_rate=0.1,
                                            momentum_coef=0.9)
    mlp.cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    mlp.initialize(train_set, cost=mlp.cost)
    n_test = 3
    num_epochs = 3
    # Train model for num_epochs and n_test batches
    for epoch in range(num_epochs):
        for i, (x, t) in enumerate(train_set):
            x = mlp.fprop(x)
            delta = mlp.cost.get_errors(x, t)
            mlp.bprop(delta)
            mlp.optimizer.optimize(mlp.layers_to_optimize, epoch=epoch)
            if i > n_test:
                break

    # Get expected outputs of n_test batches and states of all layers
    outputs_exp = []
    pdicts_exp = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs_exp.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Serialize model
    mlp.save_params(tmp_save, keep_states=True)

    # Load model
    mlp = Model(tmp_save)

    mlp.initialize(train_set)
    outputs = []
    pdicts = [l.get_params_serialize() for l in mlp.layers_to_optimize]
    for i, (x, t) in enumerate(train_set):
        outputs.append(mlp.fprop(x, inference=True))
        if i > n_test:
            break

    # Check outputs, states, and params are the same
    for output, output_exp in zip(outputs, outputs_exp):
        assert np.allclose(output.get(), output_exp.get())

    for pd, pd_exp in zip(pdicts, pdicts_exp):
        for s, s_e in zip(pd['states'], pd_exp['states']):
            if isinstance(s, list):  # this is the batch norm case
                for _s, _s_e in zip(s, s_e):
                    assert np.allclose(_s, _s_e)
            else:
                assert np.allclose(s, s_e)
        for p, p_e in zip(pd['params'], pd_exp['params']):
            assert type(p) == type(p_e)
            if isinstance(p, list):  # this is the batch norm case
                for _p, _p_e in zip(p, p_e):
                    assert np.allclose(_p, _p_e)
            elif isinstance(p, np.ndarray):
                assert np.allclose(p, p_e)
            else:
                assert p == p_e

    os.remove(tmp_save)
示例#13
0
from neon.layers import Affine, Conv, Pooling, Dropout
from neon.transforms import Rectlin, Softmax
from neon.models import Model
from neon.layers import GeneralizedCost
from neon.transforms import CrossEntropyMulti
from neon.optimizers import GradientDescentMomentum
from neon.callbacks.callbacks import Callbacks
from neon.data import MNIST
from neon.util.argparser import NeonArgparser
from neon.initializers import Uniform
from neon.transforms import Misclassification

parser = NeonArgparser(__doc__)
args = parser.parse_args()

MNIST_dataset = MNIST()
train_set = MNIST_dataset.train_iter
test_set = MNIST_dataset.valid_iter

init_uni = Uniform(low=-0.1, high=0.1)
layers = [
    Conv(fshape=(5, 5, 32), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Conv(fshape=(5, 5, 32), init=init_uni, activation=Rectlin()),
    Pooling(fshape=2, strides=2),
    Dropout(keep=0.5),
    Affine(nout=256, init=init_uni, activation=Rectlin()),
    Dropout(keep=0.5),
    Affine(nout=10, init=init_uni, activation=Softmax())
]
model = Model(layers)