Exemplo n.º 1
0
def bidirectional_model():
    inputs = Input(shape=(maxlen, ), dtype='int32')
    x = Embedding(max_features, 128, input_length=maxlen)(inputs)
    x = Bidirectional(LSTM(64))(x)
    x = Dropout(0.5)(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x)
    # try using different optimizers and different optimizer configs
    model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
    return model
Exemplo n.º 2
0
def cnn_lstm_model():
    ''' '''
    print('Build model...')
    inputs = Input(shape=(maxlen, ), dtype='int32')
    x = Embedding(max_features, embedding_size, input_length=maxlen)(inputs)
    x = Dropout(0.25)(x)
    x = Conv1D(filters,
               kernel_size,
               padding='valid',
               activation='relu',
               strides=1)(x)
    x = MaxPooling1D(pool_size=pool_size)(x)
    x = LSTM(lstm_output_size)(x)
    x = Dense(1, activation='sigmoid')(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemplo n.º 3
0
def get_word_embeddings(args, data, input_sequence, type):
    # shape of emb_table:
    # word_collection_size, emb_dim
    if type == "word":
        emb_table = data["word_emb_table"]
    elif type == "orth_word":
        emb_table = data["orth_word_emb_table"]
    else:
        print "Wrong embedding specified"
        sys.exit(1)

    max_sentence_len = data["train_word"][0].shape[1]

    # Input shape: batch_size, max_sentence_len
    # Output shape: batch_size, max_sentence_len, emb_dim
    emb_output = Embedding(input_dim=emb_table.shape[0],
                           output_dim=emb_table.shape[1],
                           weights=[emb_table],
                           input_shape=(max_sentence_len, ),
                           trainable=args.fine_tune,
                           name=type + "_emb")(input_sequence)

    return emb_output
def cnn_model_fn():
    ''' '''
    print('Build model...')
    inputs = Input(shape=(maxlen, ),
                   dtype='int32')  # a index sequence with lenght = maxlen
    x = Embedding(max_features, embedding_dims, input_length=maxlen)(inputs)
    x = Dropout(0.2)(x)
    x = Conv1D(filters,
               kernel_size,
               padding='valid',
               activation='relu',
               strides=1)(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(hidden_dims)(x)
    x = Dropout(0.2)(x)
    x = Activation('relu')(x)
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Exemplo n.º 5
0
def get_char_based_embeddings(args, data, input_sequence, type):
    # max_sentence_len and max_word_len are the same
    # for both char and orth_char embeddings.

    # data["char"][0] denotes char_train data
    # shape: batch_size, max_sentence_len, max_word_len
    max_sentence_len = data["train_word"][0].shape[1]
    max_word_len = data["char"][0].shape[-1] / max_sentence_len

    # FIXME Dropout

    # char_emb_table has the shape
    # char_collection_size, char_emb_dim
    if type == "char":
        char_emb_table = data["char"][3]
    elif type == "orth_char":
        char_emb_table = data["orth_char"][3]

    char_emb_dim = char_emb_table.shape[1]

    # Input shape: batch_size, max_sentence_len * max_word_len
    # Output shape: batch_size, max_sentence_len * max_word_len, char_emb_dim

    emb_output = Embedding(input_dim=char_emb_table.shape[0],
                           output_dim=char_emb_dim,
                           weights=[char_emb_table],
                           input_length=(max_sentence_len * max_word_len),
                           name=type + "_emb_1")(input_sequence)

    # Input shape: batch_size, max_sentence_len * max_word_len, char_emb_dim
    # Output shape: batch_size, max_sentence_len, max_word_len, char_emb_dim
    reshape_output_1 = Reshape(target_shape=(max_sentence_len, max_word_len,
                                             char_emb_dim),
                               name=type + "_reshape_2")(emb_output)

    # Input shape: batch_size, max_sentence_len, max_word_len, char_emb_dim
    # Output shape: batch_size, char_emb_dim, max_sentence_len, max_word_len
    permute_output_1 = Permute(dims=(3, 1, 2),
                               name=type + "_permute_3")(reshape_output_1)

    # Input Shape: batch_size, char_emb_dim, max_sentence_len, max_word_len
    # Output Shape: batch_size, num_filters, max_sentence_len, max_word_len
    num_filters = args.num_filters
    conv_output = Conv2D(filters=num_filters,
                         kernel_size=(1, const.CONV_WINDOW),
                         strides=1,
                         padding="same",
                         data_format="channels_first",
                         activation="tanh",
                         name=type + "_conv2d_4")(permute_output_1)

    # Input Shape: batch_size, num_filters, max_sentence_len, max_word_len
    # Output Shape: batch_size, num_filters, max_sentence_len, 1
    maxpool_output = MaxPool2D(pool_size=(1, max_word_len),
                               data_format="channels_first",
                               name=type + "_maxpool_5")(conv_output)

    # Input Shape: batch_size, num_filters, max_sentence_len, 1
    # Output shape: batch_size, num_filters, max_sentence_len
    reshape_output_2 = Reshape(target_shape=(num_filters, max_sentence_len),
                               name=type + "_reshape_6")(maxpool_output)

    # Input shape: batch_size, num_filters, max_sentence_len
    # Output shape: batch_size, max_sentence_len, num_filters
    permute_output_2 = Permute(dims=(2, 1),
                               name=type + "_word_emb")(reshape_output_2)

    return permute_output_2
Exemplo n.º 6
0
    if i >= MAX_NB_WORDS:  # select the first 20000 words or vocabularies
        continue
    embedding_vector = embeddings_index.get(
        word
    )  # for each word out of these 20000 words, we get 100 dims or features
    if embedding_vector is not None:
        # replace embedding_matrix (20000, 100) zeros with real 20000 words' 100 dims or features
        embedding_matrix[i] = embedding_vector

# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(
    num_words,  # only use 20000 words out of 400000 vocabularies
    EMBEDDING_DIM,  # each word has a vector of 100 dim
    weights=[
        embedding_matrix
    ],  # only take weights of words (20000, 100), out of (400000, 100)
    input_length=
    MAX_SEQUENCE_LENGTH,  # max length of a sample text, 1000; a text is no longer than 1000 words
    trainable=False)  # keep embedding weights fixed

print('Training model.')

# train a 1D convnet with global maxpooling
sequence_input = Input(
    shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32'
)  # (?, 1000) build placeholder for texts with any number of samples, 1000 in length each
embedded_sequences = embedding_layer(
    sequence_input
)  # output tensor is (?, text_input_length, embedding_dim) == (?, 1000, 100) == each sample text has 1000 words to describe, each word has 100 dims to describe
x = Conv1D(128, 5, activation='relu')(
Exemplo n.º 7
0
np.save(
    os.path.join(SAVE_DIR,
                 'data_' + str(lendata - 1) + '_' + str(lendata) + '.npy'),
    data[lendata - 1:lendata])
np.save(
    os.path.join(SAVE_DIR,
                 'labels_' + str(lendata - 1) + '_' + str(lendata) + '.npy'),
    labels[lendata - 1:lendata])

tokenizer = None
data = None
labels = None

embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(256, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(2)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)  # global max pooling
Exemplo n.º 8
0
def cnn_sentiment_model(inputs,
                        nb_words,
                        embedding_dim=300,
                        static_embedding=True,
                        embedding_weights=None,
                        filter_hs=None,
                        nb_filters=100,
                        emb_size=100,
                        hidden_dropout=0.2,
                        is_training=True,
                        augmentation_function=None,
                        l2_weight=1e-4,
                        img_shape=None,
                        new_shape=None,
                        image_summary=False,
                        batch_norm_decay=0.99,
                        seed=0,
                        embedding_dropout=0.2):
    from tensorflow.contrib.keras.python.keras.layers import Embedding, Input, Convolution1D, MaxPooling1D, Flatten, \
        Dense, Dropout, Activation
    from tensorflow.contrib.keras.python.keras.initializers import glorot_uniform
    from tensorflow.contrib.keras.python.keras.layers.merge import Concatenate

    from tensorflow.contrib.keras.python.keras import backend as K
    K.set_learning_phase(1 if is_training else 0)

    sequence_length = img_shape[0]

    if filter_hs is None:
        filter_hs = [3, 4, 5]

    model = inputs

    def ci(shape, dtype=None, partition_info=None):
        assert shape[0] == embedding_weights.shape[0] and shape[
            1] == embedding_weights.shape[
                1], 'Shapes are not equal required={} init value={}'.format(
                    shape, embedding_weights.shape)
        return embedding_weights

    model = Embedding(nb_words,
                      embedding_dim,
                      input_length=sequence_length,
                      trainable=(not static_embedding),
                      embeddings_initializer='uniform'
                      if embedding_weights is None else ci)(model)
    if embedding_dropout > 0.0:
        model = Dropout(embedding_dropout, seed=seed)(model,
                                                      training=is_training)

    convs = list()
    for fsz in filter_hs:
        conv = Convolution1D(
            filters=nb_filters,
            kernel_size=fsz,
            padding='valid',
            activation='relu',
            kernel_initializer=glorot_uniform(seed=seed))(model)
        pool = MaxPooling1D(pool_size=sequence_length - fsz + 1)(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)

    if len(filter_hs) > 0:
        graph_out = Concatenate()(convs)
    else:
        graph_out = convs[0]

    model = graph_out

    model = Dense(emb_size,
                  kernel_initializer=glorot_uniform(seed=seed))(model)
    model = Dropout(hidden_dropout, seed=seed)(model, training=is_training)
    model = Activation('relu')(model)

    return model
Exemplo n.º 9
0

"""
### Sequence classification with LSTM:
"""

from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout
from tensorflow.contrib.keras.python.keras.layers import Embedding
from tensorflow.contrib.keras.python.keras.layers import LSTM
from tensorflow.contrib.keras.python.keras import backend as K

model = Sequential()
model.add(Embedding(input_dim=64, output_dim=256, input_length=10))
# input_dim: Size of the vocabulary
# input_length: Length of input sequences, length of each sentences
# output_dim: Dimension of the dense embedding
model.input # (?, 10),
model.output # (?, 10, 256)

model.add(LSTM(128)) # unit=128, dimensionality of the output space
model.output

model.add(Dropout(0.5)) # percent to drop out
# model.ouput # will cause error on Dropout
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
Exemplo n.º 10
0
max_features = 20000
maxlen = 100
batch_size = 32


(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)

X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)


model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))

#model.add(SimpleRNN(128))
#model.add(GRU(128))
model.add(LSTM(128))

model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))


model.compile(loss='binary_crossentropy', optimizer='adam')


model.fit(X_train, y_train, batch_size=batch_size, epochs=1,
          validation_data=(X_test, y_test))
Exemplo n.º 11
0
    def fit(self, eventlog_name):

        import tensorflow as tf
        from tensorflow.contrib.keras.python.keras.engine import Input, Model
        from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, GRU, Embedding, merge, Masking

        features, targets = self.dataset.load(eventlog_name, train=True)
        inputs = []
        layers = []

        with tf.device('/cpu:0'):
            # split attributes
            features = [features[:, :, i] for i in range(features.shape[2])]

            for i, t in enumerate(features):
                voc_size = np.array(self.dataset.attribute_dims[i]) + 1  # we start at 1, hence +1
                emb_size = np.floor(voc_size / 2.0).astype(int)

                i = Input(shape=(None, *t.shape[2:]))
                x = Embedding(input_dim=voc_size, output_dim=emb_size, input_length=t.shape[1], mask_zero=True)(i)
                inputs.append(i)
                layers.append(x)

            # merge layers
            x = merge.concatenate(layers)

        x = GRU(64, implementation=2)(x)

        # shared hidden layer
        x = Dense(512, activation=tf.nn.relu)(x)
        x = Dense(512, activation=tf.nn.relu)(Dropout(0.5)(x))

        # hidden layers per attribute
        outputs = []
        for i, l in enumerate(targets):
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(x))
            o = Dense(256, activation=tf.nn.relu)(Dropout(0.5)(o))
            o = Dense(l.shape[1], activation=tf.nn.softmax)(Dropout(0.5)(o))
            outputs.append(o)

        self.model = Model(inputs=inputs, outputs=outputs)

        # compile model

        # old setting : optimizers from tensorflow

        # self.model.compile(
        # optimizer=tf.train.AdamOptimizer(learning_rate=0.0001),
        # loss='categorical_crossentropy'
        # )

        # new setting : optimizers from keras

        self.model.compile(
            optimizer='Adadelta',
            loss='categorical_crossentropy'
        )

        # train model
        self.model.fit(
            features,
            targets,
            batch_size=100,
            epochs=100,
            validation_split=0.2,
        )
"""
from tensorflow.contrib.keras.python.keras.layers import Input, Embedding, LSTM, Dense, concatenate
from tensorflow.contrib.keras.python.keras.models import Model
import numpy as np

# Headline input: meant to receive sequences of 100 integers, between 1 and 10000.
main_input = Input(shape=(100, ), dtype='int32', name='main_input')

# Generate dummy data as main_input
main_input_array = np.random.random((1000, 100))
# main_output_array = np.random.random()

# This embedding layer will encode the input sequence
# into a sequence of dense 512-dimensional vectors. # here input_length can be None too. # output here (?, 100, 512)
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)

# A LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
lstm_out = LSTM(32)(x)  # output (?, 32)

# Here we insert the auxiliary loss, allowing the LSTM and Embedding layer to be trained smoothly even though the main loss will be much higher in the model.# output (?, 1)
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
# knowing its shape, we can create auxiliary_output_array
auxiliary_output_array = np.random.random((1000, 1))

# At this point, we feed into the model our auxiliary input data by concatenating it with the LSTM output:

auxiliary_input = Input(shape=(5, ), name='aux_input')
# create auxiliary_input_array
auxiliary_input_array = np.random.random((1000, 5))
vision_model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Flatten())

# Now let's get a tensor with the output of our vision model:
image_input = Input(shape=(224, 224, 3))
encoded_image = vision_model(image_input)

# Next, let's define a language model to encode the question into a vector.
# Each question will be at most 100 word long,
# and we will index words as integers from 1 to 9999.
question_input = Input(shape=(100, ), dtype='int32')
embedded_question = Embedding(input_dim=10000,
                              output_dim=256,
                              input_length=100)(
                                  question_input)  # (?, 100, 256)
encoded_question = LSTM(256)(embedded_question)  # (?, 256)

# Let's concatenate the question vector and the image vector:
merged = concatenate([encoded_question, encoded_image])

# And let's train a logistic regression over 1000 words on top:
output = Dense(1000, activation='softmax')(merged)

# This is our final model:
vqa_model = Model(inputs=[image_input, question_input], outputs=output)

# The next stage would be training this model on actual data.
"""
### Video question answering model