Exemplo n.º 1
0
    Simply converts an integer to a one-hot vector of the same size as out_axis
    """
    return ng.one_hot(x, axis=out_axis)


# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--use_embedding', default=False, dest='use_embedding', action='store_true',
                    help='If given, embedding layer is used as the first layer')
parser.add_argument('--seq_len', type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.add_argument('--recurrent_units', type=int,
                    help="Number of recurrent units in the network",
                    default=256)
parser.set_defaults(num_iterations=20000)
args = parser.parse_args()

use_embedding = args.use_embedding
recurrent_units = args.recurrent_units
batch_size = args.batch_size
seq_len = args.seq_len
num_iterations = args.num_iterations

# Ratio of the text to use for training
train_ratio = 0.95
# Define initialization method of neurons in the network
init_uni = UniformInit(-0.1, 0.1)

# Create the object that includes the sample text
shakes = Shakespeare(train_split=train_ratio)
Exemplo n.º 2
0
import neon as ng
import neon.transformers as ngt
from contextlib import closing

from neon.frontend import NeonArgparser, ArrayIterator
from neon.frontend import XavierInit, UniformInit
from neon.frontend import Affine, Convolution, Pooling, Sequential
from neon.frontend import Rectlin, Softmax, GradientDescentMomentum
from neon.frontend import ax
from neon.frontend import make_bound_computation, make_default_callbacks, loop_train  # noqa

np.seterr(all='raise')

parser = NeonArgparser(description=__doc__)
# Default batch_size for convnet-googlenet is 128.
parser.set_defaults(batch_size=128, num_iterations=100)
args = parser.parse_args()

# Setup data provider
image_size = 224
X_train = np.random.uniform(-1, 1,
                            (args.batch_size, 3, image_size, image_size))
y_train = np.ones(shape=(args.batch_size), dtype=np.int32)
train_data = {
    'image': {
        'data': X_train,
        'axes': ('N', 'C', 'H', 'W')
    },
    'label': {
        'data': y_train,
        'axes': ('N', )
Exemplo n.º 3
0
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--predict_seq',
                    default=False,
                    dest='predict_seq',
                    action='store_true',
                    help='If given, seq_len future timepoints are predicted')
parser.add_argument('--look_ahead',
                    type=int,
                    help="Number of time steps to start predicting from",
                    default=1)
parser.add_argument('--seq_len',
                    type=int,
                    help="Number of time points in each input sequence",
                    default=32)
parser.set_defaults()
args = parser.parse_args()

# Plot the inference / generation results
do_plots = True
try:
    imp.find_module('matplotlib')
except ImportError:
    do_plots = False

# Feature dimension of the input (for Lissajous curve, this is 2)
feature_dim = 2
# Output feature dimension (for Lissajous curve, this is 2)
output_dim = 2
# Number of recurrent units in the network
recurrent_units = 32