Example #1
0
# Load dataset
# ===========================================================================
ds = F.load_imdb(nb_words=max_features, maxlen=maxlen)

X_train = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:],
                        name='X_train')
X_score = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:],
                        name='X_score')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')

# ===========================================================================
# Build model
# ===========================================================================
f = N.Sequence(
    [
        N.Embedding(max_features, embedding_size),
        N.Dropout(0.25),
        N.Dimshuffle(pattern=(0, 1, 'x', 2)),  # convolution on time dimension
        N.Conv(nb_filter,
               filter_size=(filter_length, 1),
               pad='valid',
               stride=(1, 1),
               activation=K.relu),
        N.Pool(pool_size=(pool_length, 1), mode='max'),
        N.Flatten(outdim=3),
        N.Merge(
            [
                N.Dense(lstm_output_size, activation=K.linear,
                        name='ingate'),  # input-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,
Example #2
0
print('X:', X.shape, 'y:', y.shape)
print('X_train:', X_train.shape, 'y_train:', y_train.shape)
print('X_valid:', X_valid.shape, 'y_valid:', y_valid.shape)

E = tk.embed(embedding)
# these numbers must be the same for all time
print('Tokenizer:', np.sum(E), np.sum(X_train), np.sum(y_train),
      np.sum(X_valid), np.sum(y_valid))
# ===========================================================================
# Building model
# ===========================================================================
X = K.placeholder(shape=(None, MAX_SEQ_LEN), dtype='int32', name='X')
y = K.placeholder(shape=(None, nb_labels), dtype='float32', name='y')

f = N.Sequence([
    N.Embedding(tk.nb_words, embedding_dims, W_init=E),
    N.Dimshuffle(pattern=(0, 1, 'x', 2)),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
           strides=1,
           pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
           strides=1,
           pad='valid',
           activation=K.relu),
    N.Pool(pool_size=(5, 1), pad='valid', mode='max'),
    N.Conv(num_filters=128,
           filter_size=(5, 1),
Example #3
0
nb_epoch = 2

ds = F.load_imdb(nb_words=max_features, maxlen=maxlen)
print(ds)

# ===========================================================================
# ODIN
# ===========================================================================
X = K.placeholder(shape=(None, ) + ds['X_train'].shape[1:],
                  name='X',
                  dtype='int32')
y = K.placeholder(shape=(None, ), name='y', dtype='int32')

net_odin = N.Sequence(
    [
        N.Embedding(input_size=max_features, output_size=embedding_size),
        N.Dropout(level=0.25),
        N.Dimshuffle(pattern=(0, 1, 'x', 2)),
        N.Conv(nb_filter, (filter_length, 1),
               strides=1,
               pad='valid',
               activation=K.relu),
        N.Pool(pool_size=(pool_length, 1), pad='valid', mode='max'),
        N.Flatten(outdim=3),
        # ====== LSTM ====== #
        N.Merge(
            [
                N.Dense(lstm_output_size, activation=K.linear,
                        name='ingate'),  # input-gate
                N.Dense(lstm_output_size,
                        activation=K.linear,