import numpy as np
import neon as ng
import neon.transformers as ngt
from contextlib import closing

from neon.frontend import NeonArgparser, ArrayIterator
from neon.frontend import XavierInit, UniformInit
from neon.frontend import Affine, Convolution, Pooling, Sequential
from neon.frontend import Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax
from neon.frontend import make_bound_computation, make_default_callbacks, loop_train  # noqa

np.seterr(all='raise')

parser = NeonArgparser(description=__doc__)
# Default batch_size for convnet-googlenet is 128.
parser.set_defaults(batch_size=128, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('N', 'C', 'H', 'W')
    },
    'label': {
    # Hyperparameters
    # Optimizer
    base_lr = 0.1
    gamma = 0.1
    momentum_coef = 0.9
    wdecay = 0.0001
    nesterov = False

    print("HyperParameters")
    print("Learning Rate:     " + str(base_lr))
    print("Momentum:          " + str(momentum_coef))
    print("Weight Decay:      " + str(wdecay))
    print("Nesterov:          " + str(nesterov))

    # Command Line Parser
    parser = NeonArgparser(description="Resnet for Imagenet and Cifar10")
    parser.add_argument('--dataset',
                        type=str,
                        default="cifar10",
                        help="Enter cifar10 or i1k")
    parser.add_argument('--size',
                        type=int,
                        default=56,
                        help="Enter size of resnet")
    parser.add_argument('--disable_batch_norm', action='store_true')
    parser.add_argument('--save_file',
                        type=str,
                        default=None,
                        help="File to save weights")
    parser.add_argument('--inference',
                        type=str,
        gen_txt.append(pred_char)

    # Convert integer index of tokens to actual tokens
    gen_txt = [index_to_token[i] for i in gen_txt]
    return gen_txt


def expand_onehot(x):
    """
    Simply converts an integer to a one-hot vector of the same size as out_axis
    """
    return ng.one_hot(x, axis=out_axis)


# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--use_embedding', default=False, dest='use_embedding', action='store_true',
                    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len', type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units', type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
args = parser.parse_args()

use_embedding = args.use_embedding
recurrent_units = args.recurrent_units
batch_size = args.batch_size
seq_len = args.seq_len
Exemple #4
0
from __future__ import division, print_function
from contextlib import closing
import neon as ng
from neon.frontend import Layer, Sequential, LSTM, Affine
from neon.frontend import UniformInit, Tanh, Logistic, Identity, Adam
from neon.frontend import NeonArgparser, loop_train
from neon.frontend import make_bound_computation, make_default_callbacks
from neon.frontend import ArrayIterator
import neon.transformers as ngt
import timeseries
import utils
import imp

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--predict_seq',
                    default=False,
                    dest='predict_seq',
                    action='store_true',
                    help='If given, seq_len future timepoints are predicted')
parser.add_argument('--look_ahead',
                    type=int,
                    help="Number of time steps to start predicting from",
                    default=1)
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.set_defaults()
args = parser.parse_args()
"""
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Sequential
from neon.frontend import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax, loop_train, make_bound_computation, make_default_callbacks
from neon.frontend import NeonArgparser
from neon.frontend import ArrayIterator

from neon.frontend import CIFAR10
import neon.transformers as ngt

parser = NeonArgparser(description='Train simple mlp on cifar10 dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
# Model specification
Exemple #6
0
"""

import numpy as np
from contextlib import closing
import neon as ng
from neon.frontend import Layer, Preprocess, Recurrent, Affine, Softmax, Tanh
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt

from neon.frontend import PTB

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.set_defaults(batch_size=128, num_iterations=2000)
args = parser.parse_args()

# model parameters
time_steps = 5
hidden_size = 256
gradient_clip_value = 5

# download penn treebank
# set shift_target to be False, since it is going to predict the same sequence
tree_bank_data = PTB(path=args.data_dir, shift_target=False)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,
                                    time_steps=time_steps,
Exemple #7
0
"""

from contextlib import closing
import neon as ng
from neon.frontend import (Layer, Sequential, Preprocess, BiRNN, Recurrent,
                           Affine, Softmax, Tanh, LookupTable)
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt

from neon.frontend import PTB

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.add_argument('--use_lut',
                    action='store_true',
                    help='choose to use lut as first layer')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 150
hidden_size = 500
"""
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Convolution, Pooling, Sequential
from neon.frontend import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import ArrayIterator

from neon.frontend import CIFAR10
import neon.transformers as ngt

parser = NeonArgparser(description='Train simple CNN on cifar10 dataset')
parser.add_argument('--use_batch_norm', action='store_true',
                    help='whether to use batch normalization')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
Exemple #9
0
import numpy as np
import neon as ng
import neon.transformers as ngt
from contextlib import closing

from neon.frontend import NeonArgparser, ArrayIterator
from neon.frontend import GaussianInit, UniformInit
from neon.frontend import Affine, Convolution, Pooling, Sequential
from neon.frontend import Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax
from neon.frontend import make_bound_computation, make_default_callbacks, loop_train  # noqa

np.seterr(all='raise')

parser = NeonArgparser(
    description='Train convnet-vgg_a model on random dataset')
# Default batch_size for convnet-vgg_a is 64
parser.set_defaults(batch_size=64, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('N', 'C', 'H', 'W')
    },
    'label': {
Exemple #10
0
from contextlib import closing
import os
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Sequential
from neon.frontend import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from neon.frontend import ax, loop_train, make_bound_computation, make_default_callbacks
from neon.frontend import loop_eval
from neon.frontend import NeonArgparser
from neon.frontend import ArrayIterator

from neon.frontend import MNIST
from neon.frontend import Saver
import neon.transformers as ngt

parser = NeonArgparser(description='Train simple mlp on mnist dataset')
parser.add_argument('--save_file',
                    type=str,
                    default=None,
                    help="File to save weights")
parser.add_argument('--load_file',
                    type=str,
                    default=None,
                    help="File to load weights")
parser.add_argument('--inference',
                    action="store_true",
                    help="Run Inference with loaded weight")
args = parser.parse_args()

if args.inference and (args.load_file is None):
    print("Need to set --load_file for Inference problem")
Exemple #11
0
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Convolution, Pooling, Sequential
from neon.frontend import XavierInit, Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import ArrayIterator

from neon.frontend import MNIST
import neon.transformers as ngt

parser = NeonArgparser(description='Train LeNet topology on Mnist dataset')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = MNIST(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
# Model specification
Exemple #12
0
"""
from contextlib import closing
import neon as ng
from neon.frontend import (Layer, Sequential, Preprocess, LSTM, Affine,
                           Softmax, Tanh, Logistic)
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt

from neon.frontend import PTB

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='lstm',
                    choices=['lstm'],
                    help='type of recurrent layer to use (lstm)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 64
time_steps = 50
hidden_size = 128
gradient_clip_value = 5

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)