base_lr = 0.1
    gamma = 0.1
    momentum_coef = 0.9
    wdecay = 0.0001
    nesterov = False

    print("HyperParameters")
    print("Learning Rate:     " + str(base_lr))
    print("Momentum:          " + str(momentum_coef))
    print("Weight Decay:      " + str(wdecay))
    print("Nesterov:          " + str(nesterov))

    # Command Line Parser
    parser = NeonArgparser(description="Resnet for Imagenet and Cifar10")
    parser.add_argument('--dataset',
                        type=str,
                        default="cifar10",
                        help="Enter cifar10 or i1k")
    parser.add_argument('--size',
                        type=int,
                        default=56,
                        help="Enter size of resnet")
    parser.add_argument('--disable_batch_norm', action='store_true')
    parser.add_argument('--save_file',
                        type=str,
                        default=None,
                        help="File to save weights")
    parser.add_argument('--inference',
                        type=str,
                        default=None,
                        help="File to load weights")
    parser.add_argument('--resume',
    # Convert integer index of tokens to actual tokens
    gen_txt = [index_to_token[i] for i in gen_txt]
    return gen_txt


def expand_onehot(x):
    """
    Simply converts an integer to a one-hot vector of the same size as out_axis
    """
    return ng.one_hot(x, axis=out_axis)


# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--use_embedding', default=False, dest='use_embedding', action='store_true',
                    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len', type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units', type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
args = parser.parse_args()

use_embedding = args.use_embedding
recurrent_units = args.recurrent_units
batch_size = args.batch_size
seq_len = args.seq_len
num_iterations = args.num_iterations
Beispiel #3
0
import neon as ng
from neon.frontend import Layer, Sequential, LSTM, Affine
from neon.frontend import UniformInit, Tanh, Logistic, Identity, Adam
from neon.frontend import NeonArgparser, loop_train
from neon.frontend import make_bound_computation, make_default_callbacks
from neon.frontend import ArrayIterator
import neon.transformers as ngt
import timeseries
import utils
import imp

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--predict_seq',
                    default=False,
                    dest='predict_seq',
                    action='store_true',
                    help='If given, seq_len future timepoints are predicted')
parser.add_argument('--look_ahead',
                    type=int,
                    help="Number of time steps to start predicting from",
                    default=1)
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.set_defaults()
args = parser.parse_args()

# Plot the inference / generation results
do_plots = True
Beispiel #4
0
from contextlib import closing
import neon as ng
from neon.frontend import (Layer, Sequential, Preprocess, BiRNN, Recurrent,
                           Affine, Softmax, Tanh, LookupTable)
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt

from neon.frontend import PTB

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='rnn',
                    choices=['rnn', 'birnn'],
                    help='type of recurrent layer to use (rnn or birnn)')
parser.add_argument('--use_lut',
                    action='store_true',
                    help='choose to use lut as first layer')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 50
time_steps = 150
hidden_size = 500

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
Beispiel #5
0
from __future__ import division
from __future__ import print_function
from contextlib import closing
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Convolution, Pooling, Sequential
from neon.frontend import UniformInit, Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import ArrayIterator

from neon.frontend import CIFAR10
import neon.transformers as ngt

parser = NeonArgparser(description='Train simple CNN on cifar10 dataset')
parser.add_argument('--use_batch_norm', action='store_true',
                    help='whether to use batch normalization')
args = parser.parse_args()

np.random.seed(args.rng_seed)

# Create the dataloader
train_data, valid_data = CIFAR10(args.data_dir).load_data()
train_set = ArrayIterator(train_data, args.batch_size, total_iterations=args.num_iterations)
valid_set = ArrayIterator(valid_data, args.batch_size)

inputs = train_set.make_placeholders()
ax.Y.length = 10

######################
# Model specification
Beispiel #6
0
import numpy as np
import neon as ng
from neon.frontend import Layer, Affine, Preprocess, Sequential
from neon.frontend import GaussianInit, Rectlin, Logistic, GradientDescentMomentum
from neon.frontend import ax, loop_train, make_bound_computation, make_default_callbacks
from neon.frontend import loop_eval
from neon.frontend import NeonArgparser
from neon.frontend import ArrayIterator

from neon.frontend import MNIST
from neon.frontend import Saver
import neon.transformers as ngt

parser = NeonArgparser(description='Train simple mlp on mnist dataset')
parser.add_argument('--save_file',
                    type=str,
                    default=None,
                    help="File to save weights")
parser.add_argument('--load_file',
                    type=str,
                    default=None,
                    help="File to load weights")
parser.add_argument('--inference',
                    action="store_true",
                    help="Run Inference with loaded weight")
args = parser.parse_args()

if args.inference and (args.load_file is None):
    print("Need to set --load_file for Inference problem")
    quit()

if args.save_file is not None:
Beispiel #7
0
from contextlib import closing
import neon as ng
from neon.frontend import (Layer, Sequential, Preprocess, LSTM, Affine,
                           Softmax, Tanh, Logistic)
from neon.frontend import UniformInit, RMSProp
from neon.frontend import ax, loop_train
from neon.frontend import NeonArgparser, make_bound_computation, make_default_callbacks
from neon.frontend import SequentialArrayIterator
import neon.transformers as ngt

from neon.frontend import PTB

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--layer_type',
                    default='lstm',
                    choices=['lstm'],
                    help='type of recurrent layer to use (lstm)')
parser.set_defaults()
args = parser.parse_args()

# these hyperparameters are from the paper
args.batch_size = 64
time_steps = 50
hidden_size = 128
gradient_clip_value = 5

# download penn treebank
tree_bank_data = PTB(path=args.data_dir)
ptb_data = tree_bank_data.load_data()
train_set = SequentialArrayIterator(ptb_data['train'],
                                    batch_size=args.batch_size,