Пример #1
0
def test_model_get_outputs_rnn(backend_default, data):

    data_path = load_ptb_test(path=data)
    data_set = Text(time_steps=50, path=data_path)

    # weight initialization
    init = Constant(0.08)

    # model initialization
    layers = [
        Recurrent(150, init, activation=Logistic()),
        Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
    ]

    model = Model(layers=layers)
    output = model.get_outputs(data_set)

    assert output.shape == (data_set.ndata, data_set.seq_length,
                            data_set.nclass)

    # since the init are all constant and model is un-trained:
    # along the feature dim, the values should be all the same
    assert np.allclose(output[0, 0], output[0, 0, 0], rtol=0, atol=1e-5)
    assert np.allclose(output[0, 1], output[0, 1, 0], rtol=0, atol=1e-5)

    # along the time dim, the values should be increasing:
    assert np.alltrue(output[0, 2] > output[0, 1])
    assert np.alltrue(output[0, 1] > output[0, 0])
Пример #2
0
def test_model_get_outputs_rnn(backend_default, data):

    data_path = load_text('ptb-valid', path=data)

    data_set = Text(time_steps=50, path=data_path)

    # weight initialization
    init = Constant(0.08)

    # model initialization
    layers = [
        Recurrent(150, init, Logistic()),
        Affine(len(data_set.vocab), init, bias=init, activation=Rectlin())
    ]

    model = Model(layers=layers)
    output = model.get_outputs(data_set)

    assert output.shape == (
        data_set.ndata, data_set.seq_length, data_set.nclass)
Пример #3
0
# parse the command line arguments
default_overrides = dict(save_path='rnn_text_gen.pickle',
                         serialize=1,
                         batch_size=64)

parser = NeonArgparser(__doc__, default_overrides=default_overrides)
args = parser.parse_args()

# hyperparameters
time_steps = 64
hidden_size = 512
gradient_clip_value = 5

# download shakespeare text
data_path = load_shakespeare(path=args.data_dir)
train_path, valid_path = Text.create_valid_file(data_path)

# load data and parse on character-level
train_set = Text(time_steps, train_path)
valid_set = Text(time_steps, valid_path, vocab=train_set.vocab)

# weight initialization
init = Uniform(low=-0.08, high=0.08)

# model initialization
layers = [
    LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh()),
    Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)
Пример #4
0
vocab_size = 20000
sentence_length = 100
embedding_dim = 128
hidden_size = 128
reset_cells = True

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# make dataset
path = load_text('imdb', path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = Text.pad_data(
    path, vocab_size=vocab_size, sentence_length=sentence_length)

print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]

train_set = DataIterator(X_train, y_train, nclass=2)
valid_set = DataIterator(X_test, y_test, nclass=2)

# weight initialization
init_emb = Uniform(low=-0.1/embedding_dim, high=0.1/embedding_dim)
init_glorot = GlorotUniform()

layers = [
    LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=init_emb),
Пример #5
0
if args.callback_args['serialize'] is None:
    args.callback_args['serialize'] = 1

# hyperparameters
args.batch_size = 64
time_steps = 64
hidden_size = 512
gradient_clip_value = 5

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# download shakespeare text
data_path = load_shakespeare(path=args.data_dir)
train_path, valid_path = Text.create_valid_file(data_path)

# load data and parse on character-level
train_set = Text(time_steps, train_path)
valid_set = Text(time_steps, valid_path, vocab=train_set.vocab)

# weight initialization
init = Uniform(low=-0.08, high=0.08)

# model initialization
layers = [
    LSTM(hidden_size, init, activation=Logistic(), gate_activation=Tanh()),
    Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)
Пример #6
0
hidden_size = 1000
clip_gradients = False

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# download penn treebank
train_path = load_text('ptb-train', path=args.data_dir)
valid_path = load_text('ptb-valid', path=args.data_dir)

# load data and parse on character-level
train_set = Text(time_steps, train_path)
valid_set = Text(time_steps, valid_path, vocab=train_set.vocab)

# weight initialization
init = Uniform(low=-0.08, high=0.08)

# model initialization
if rlayer_type == 'lstm':
    rlayer = LSTM(hidden_size, init, Logistic(), Tanh())
elif rlayer_type == 'gru':
    rlayer = GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic())
else:
    raise NotImplementedError('%s layer not implemented' % rlayer_type)

layers = [
    rlayer,
Пример #7
0
print(
    'batch_size: %s \nvocab_size: %s \nsentence_length: %s \nembedding_dim: %s \nhidden_size: %s'
    % (batch_size, vocab_size, sentence_length, embedding_dim, hidden_size))

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# make dataset
(X_train, y_train), (X_test, y_test), nclass = Text.pad_data(
    os.path.join(
        data_root,
        'train_valid_text_index_in_binary_label_shuffled_10000.pickle'),
    vocab_size=vocab_size,
    sentence_length=sentence_length)

print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]

train_set = DataIterator(X_train, y_train, nclass=2)
valid_set = DataIterator(X_test, y_test, nclass=2)

# weight initialization
init_emb = Uniform(low=-0.1 / embedding_dim, high=0.1 / embedding_dim)
init_glorot = GlorotUniform()
Пример #8
0
# download penn treebank
train_path = load_text('ptb-train', path=args.data_dir)
valid_path = load_text('ptb-valid', path=args.data_dir)


# define a custom function to parse the input into individual tokens, which for
# this data, splits into individual words.  This can be passed into the Text
# object during dataset creation as seen below.
def tokenizer(s):
    return s.replace('\n', '<eos>').split()


# load data and parse on word-level
train_set = Text(time_steps,
                 train_path,
                 tokenizer=tokenizer,
                 onehot_input=False)
valid_set = Text(time_steps,
                 valid_path,
                 vocab=train_set.vocab,
                 tokenizer=tokenizer,
                 onehot_input=False)

# weight initialization
init = Uniform(low=-0.1, high=0.1)

# model initialization
rlayer_params = {
    "output_size": hidden_size,
    "init": init,
    "activation": Tanh(),
Пример #9
0
# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# download penn treebank
train_path = load_text('ptb-train', path=args.data_dir)
valid_path = load_text('ptb-valid', path=args.data_dir)

# load data and parse on word-level
# a Text object can take a given tokenizer, for word-level parsing, it is str.split
# a user can pass in a custom-defined tokenzier as well
train_set = Text(time_steps, train_path, tokenizer=str.split)
valid_set = Text(time_steps, valid_path, vocab=train_set.vocab, tokenizer=str.split)

# weight initialization
init = Uniform(low=-0.08, high=0.08)

# model initialization
if rlayer_type == 'lstm':
    rlayer1 = LSTM(hidden_size, init, Logistic(), Tanh())
    rlayer2 = LSTM(hidden_size, init, Logistic(), Tanh())
elif rlayer_type == 'gru':
    rlayer1 = GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic())
    rlayer2 = GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic())
else:
    raise NotImplementedError('%s layer not implemented' % rlayer_type)
Пример #10
0
vocab_size = 20000
sentence_length = 100
embedding_dim = 128
hidden_size = 128
reset_cells = True

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# make dataset
path = load_text('imdb', path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = Text.pad_data(
    path, vocab_size=vocab_size, sentence_length=sentence_length)

print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]

import numpy as np
train_set = DataIterator(X_train, y_train, nclass=2)
test_set = DataIterator(X_test, y_test, nclass=2)

# weight initialization
init_emb = Uniform(low=-0.1/embedding_dim, high=0.1/embedding_dim)
init_glorot = GlorotUniform()

layers = [
hidden_size = 128
reset_cells = True

print('batch_size: %s \nvocab_size: %s \nsentence_length: %s \nembedding_dim: %s \nhidden_size: %s' %
      (batch_size,      vocab_size,      sentence_length,      embedding_dim,      hidden_size))

# setup backend
be = gen_backend(backend=args.backend,
                 batch_size=batch_size,
                 rng_seed=args.rng_seed,
                 device_id=args.device_id,
                 default_dtype=args.datatype)

# make dataset
(X_train, y_train), (X_test, y_test), nclass = Text.pad_data(
    os.path.join(
        data_root, 'train_valid_text_index_in_binary_label_shuffled_10000.pickle'),
    vocab_size=vocab_size, sentence_length=sentence_length)

print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]

train_set = DataIterator(X_train, y_train, nclass=2)
valid_set = DataIterator(X_test, y_test, nclass=2)

# weight initialization
init_emb = Uniform(low=-0.1 / embedding_dim, high=0.1 / embedding_dim)
init_glorot = GlorotUniform()

layers = [