Esempio n. 1
0
def get_baseline_convolutional_encoder(filters,
                                       embedding_dimension,
                                       input_shape=None,
                                       dropout=0.05):
    encoder = Sequential()

    # Initial conv
    if input_shape is None:
        # In this case we are using the encoder as part of a siamese network and the input shape will be determined
        # automatically based on the input shape of the siamese network
        encoder.add(
            layers.Conv1D(filters, 32, padding='same', activation='relu'))
    else:
        # In this case we are using the encoder to build a classifier network and the input shape must be defined
        encoder.add(
            layers.Conv1D(filters,
                          32,
                          padding='same',
                          activation='relu',
                          input_shape=input_shape))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D(4, 4))

    # Further convs
    encoder.add(
        layers.Conv1D(2 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(3 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(4 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(layers.GlobalMaxPool1D())

    encoder.add(layers.Dense(embedding_dimension))

    return encoder
Esempio n. 2
0
def lstm_model(num_classes, input_shape, embedding_matrix, max_length):
    """ Creates LSTM model for classification of emotions with Glove embeddings

    :param num_classes: number of classes
    :type num_classes: int
    :param input_shape: shape of the input
    :type input_shape: tuple
    :param embedding_matrix: embedding matrix for Keras Embedding layer
    :type embedding_matrix: numpy.array
    :param max_length: maximum length of the text sequence
    :type max_length: int
    :return: LSTM model
    """
    model = k.Sequential()

    model.add(kl.Embedding(input_dim=embedding_matrix.shape[0],
                           output_dim=embedding_matrix.shape[1],
                           weights=[embedding_matrix],
                           input_length=max_length,
                           trainable=False,
                           name='embedding_layer'))
    model.add(kl.SpatialDropout1D(0.6))
    model.add(kl.LSTM(32, dropout=0.1, recurrent_dropout=0.2))
    model.add(kl.Dense(128))
    model.add(kl.Dropout(0.2))
    model.add(kl.Activation('relu'))
    model.add(kl.Dense(num_classes))
    model.add(kl.Activation('sigmoid'))

    return model
Esempio n. 3
0
def create_cnn(inp_size):
    # Add an Input Layer
    input_layer = layers.Input((inp_size, ))

    # embedding layer learnt from above
    embedding_layer = layers.Embedding(vocab_size, 200)(input_layer)

    # add dropout on this layer
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Convolution1D(100, 3,
                                      activation="tanh")(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
def create_cnn():
    # Adicione uma camada de entrada
    input_layer = layers.Input((70, ))

    # Adicione a camada de incorporação de palavras
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Adicione a camada convolucional
    conv_layer = layers.Convolution1D(90, 3,
                                      activation="relu")(embedding_layer)

    # Adicione a camada de pooling máximo, pega maior valor resltande do mapa de  ativação.

    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Adicione as camadas de saída
    # camada totalmente conectada para normalização dos dados.
    output_layer1 = layers.Dropout(0.7)(pooling_layer)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile o modelo
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adamax(),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Esempio n. 5
0
def createRCNN(word_index, embedding_matrix):
    input_layer = layers.Input((1000, ))

    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)

    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    rnn_layer = layers.Bidirectional(layers.GRU(
        50, return_sequences=True))(embedding_layer)

    conv_layer = layers.Convolution1D(100, 3, activation="relu")(rnn_layer)

    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')

    return model
def create_model():
    inputs = []
    outputs = []
    for c in categorical_cols:
        num_unique_values = int(data[c].nunique())
        embed_dim = int(min(np.ceil((num_unique_values) / 2), 50))
        inp = layers.Input(shape=(1, ))
        out = layers.Embedding(num_unique_values + 1, embed_dim, name=c)(inp)
        out = layers.SpatialDropout1D(0.4)(out)
        out = layers.Reshape(target_shape=(embed_dim, ))(out)
        inputs.append(inp)
        outputs.append(out)
    x = layers.Concatenate()(outputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)
    x = Dense(300, activation="relu")(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Dense(300, activation="relu")(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    y = Dense(1, activation="sigmoid")(x)
    model = models.Model(inputs=inputs, outputs=y)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.Adam(learning_rate=0.00146,
                                            beta_1=0.9,
                                            beta_2=0.999,
                                            amsgrad=False),
                  metrics=[auc])
    return model
Esempio n. 7
0
def tito_embedding(conf):
    """tito model used in talkingdata"""
    input_layer = list()
    embedded_layer = list()
    embed_conf, num_conf = conf
    for name, input_dim, output_dim in embed_conf:
        x_in = kl.Input(shape=(1, ), dtype="int32")
        embedded = kl.Embedding(input_dim,
                                output_dim,
                                name=name,
                                input_length=1)(x_in)
        embedded_layer.append(embedded)
        input_layer.append(x_in)

    embedded_layer = kl.concatenate(embedded_layer)
    embedded_layer = kl.SpatialDropout1D(embedded_layer)
    embedded_layer = [kl.Flatten(embedded_layer)]

    dense_layer = list()
    for name, setting in num_conf.items():
        x_in = kl.Input(shape=(setting[1], ), name=name, dtype="float32")
        dense_layer.append(x_in)
        input_layer.append(x_in)

    if len(dense_layer) > 1:
        dense_num = kl.concatenate(dense_layer)
    else:
        dense_num = dense_layer[0]
    dense_num = kl.Dense(128)(dense_num)
    dense_num = kl.BatchNormalization()(dense_num)
    embedded_layer.append(dense_num)

    return input_layer, embedded_layer
Esempio n. 8
0
def create_cnn():
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Convolution1D(100, 3,
                                      activation="relu")(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')

    return model
def lstm(xtrain, ytrain, xvalid, yvalid, epochs=1):
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the LSTM Layer
    lstm_layer1 = layers.LSTM(128)(embedding_layer)
    dropout1 = layers.Dropout(0.5)(lstm_layer1)
    #lstm_layer2 = layers.LSTM(128)(dropout1)
    #dropout2 = layers.Dropout(0.5)(lstm_layer2)
    # Add the output Layers
    output_layer = layers.Dense(4, activation="softmax")(dropout1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(xtrain, ytrain, batch_size=256, epochs=3)

    predictions = model.predict(xvalid)
    predictions = predictions.argmax(axis=-1)
    accuracy = model.evaluate(xvalid, yvalid, verbose=0)
    f1score = metrics.f1_score(valid_y, predictions, average='weighted')
    return accuracy, f1score
Esempio n. 10
0
 def create_rcnn():
     # Add an Input Layer
     input_layer = layers.Input((70, ))
 
     # Add the word embedding Layer
     embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
     embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
     
     # Add the recurrent layer
     layers.Bidirectional(layers.GRU(50, return_sequences=True))(embedding_layer)
     
     # Add the convolutional Layer
     conv_layer = layers.Convolution1D(100, 3, activation="relu")(embedding_layer)
 
     # Add the pooling Layer
     pooling_layer = layers.GlobalMaxPool1D()(conv_layer)
 
     # Add the output Layers
     output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
     output_layer1 = layers.Dropout(0.25)(output_layer1)
     output_layer2 = layers.Dense(units=max(training_label_encoded) + 1, activation="softmax", name="ouput_layer")(output_layer1)
 
     # Compile the model
     model = models.Model(inputs=input_layer, outputs=output_layer2)
     model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=["sparse_categorical_accuracy"])
     
     return model
def cnn(xtrain, ytrain, xvalid, yvalid, epochs=3):
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Convolution1D(100, 4,
                                      activation="relu")(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(4, activation="softmax")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit(xtrain, ytrain, batch_size=256, epochs=epochs)
    predictions = model.predict(xvalid)
    predictions = predictions.argmax(axis=-1)
    accuracy = model.evaluate(xvalid, yvalid, verbose=0)
    f1score = metrics.f1_score(valid_y, predictions, average='weighted')
    return accuracy, f1score
Esempio n. 12
0
def create_bidirectional_rnn():

    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the LSTM Layer
    lstm_layer = layers.Bidirectional(layers.GRU(100))(embedding_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')

    return model
    def create_rcnn(self):
        # add an input layer
        input_layer = layers.Input((70, ))

        # add the word embedding layer
        embedding_layer = layers.Embedding(len(self.word_index) + 1,
                                           300,
                                           weights=[self.embedding_matrix],
                                           trainable=False)(input_layer)
        embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

        # add the recurrent layer
        rnn_layer = layers.Bidirectional(layers.GRU(
            50, return_sequences=True))(embedding_layer)

        # add the convolutional layer
        conv_layer = layers.Convolution1D(100, 3, activation="relu")(rnn_layer)

        # add the pooling layer
        pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

        # Add the output Layers
        output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
        output_layer1 = layers.Dropout(0.25)(output_layer1)
        output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

        # Compile the model
        model = models.Model(inputs=input_layer, outputs=output_layer2)
        model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')

        return model
def build_model(embedding_matrix, word_index, max_len, lstm_units, 
                verbose = False, compile = True, multi=True, gpu_num=4):
    #logger.info('Build model')
    sequence_input = L.Input(shape=(max_len,), dtype='int32')
    embedding_layer = L.Embedding(*embedding_matrix.shape,
                                weights=[embedding_matrix],
                                trainable=False)
    x = embedding_layer(sequence_input)
    x = L.SpatialDropout1D(0.3)(x)
    x = L.Bidirectional(L.CuDNNLSTM(lstm_units, return_sequences=True))(x)
    x = L.Bidirectional(L.CuDNNLSTM(lstm_units, return_sequences=True))(x)
    att = Attention(max_len)(x)
    avg_pool1 = L.GlobalAveragePooling1D()(x)
    max_pool1 = L.GlobalMaxPooling1D()(x)
    x = L.concatenate([att,avg_pool1, max_pool1])
    preds = L.Dense(1, activation='sigmoid')(x)
    model = Model(sequence_input, preds)
    if multi:
        print('use multi gpus')
        model = ModelMGPU(model, gpus=gpu_num)
    if verbose:
        model.summary()
    if compile:
        model.compile(loss='binary_crossentropy',optimizer=Adam(0.005),metrics=['acc'])
    return model
Esempio n. 15
0
def create_model():
    model = Sequential()
    model.add(
        lrs.Embedding(max_features,
                      embed_size,
                      weights=[embedding_matrix],
                      trainable=False))
    model.add(lrs.SpatialDropout1D(0.2))
    model.add(
        lrs.Bidirectional(lrs.LSTM(128,
                                   return_sequences=True,
                                   dropout=0.0,
                                   recurrent_dropout=0.0),
                          merge_mode='concat'))
    model.add(
        lrs.Conv1D(64,
                   kernel_size=2,
                   padding='valid',
                   kernel_initializer='glorot_uniform'))
    model.add(lrs.GlobalMaxPooling1D())  # avg pooling
    model.add(lrs.Dense(6, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer=Nadam(lr=0.001),
                  metrics=['accuracy'])  # default 0.002
    return model
Esempio n. 16
0
    def model_3(self):
        embedding_matrix = self.build_myself_embedding_matrix()
        input = layers.Input(shape=(self.max_words,))
        embedding = layers.Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
                                     input_length=self.max_words, weights=[embedding_matrix])
        x = layers.SpatialDropout1D(0.2)(embedding(input))
        x = layers.Bidirectional(layers.GRU(400, return_sequences=True))(x)
        x = layers.Bidirectional(layers.GRU(400, return_sequences=True))(x)
        avg_pool = layers.GlobalAveragePooling1D()(x)
        max_pool = layers.GlobalMaxPool1D()(x)
        concat = layers.concatenate([avg_pool, max_pool])

        x = layers.Dense(1024)(concat)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation='relu')(x)
        x = layers.Dropout(0.2)(x)

        x = layers.Dense(512)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation='relu')(x)
        x = layers.Dropout(0.2)(x)
        output = layers.Dense(self.class_num, activation='softmax')(x)

        model = models.Model(input=input, output=output)
        print(model.summary())
        return model
def lstm(train_x, train_y, valid_x, batch_size=1024, epochs=10):
    # Add an Input Layer
    input_layer = layers.Input((70, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(len(word_index) + 1,
                                       300,
                                       weights=[embedding_matrix],
                                       trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the LSTM Layer
    lstm_layer = layers.Bidirectional(layers.LSTM(100))(embedding_layer)
    # Add the output Layers
    output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.fit(train_x, train_y, batch_size=batch_size, epochs=epochs)

    predictions = model.predict(valid_x)
    predictions = predictions.argmax(axis=-1)

    accuracy = metrics.accuracy_score(predictions, valid_y)
    f1score = metrics.f1_score(valid_y, predictions)
    return accuracy, f1score
Esempio n. 18
0
def create_cnn():
    # Add an Input Layer
    input_layer = layers.Input((100, ))

    # Add the word embedding Layer
    embedding_layer = layers.Embedding(10000, 100, trainable=True)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

    # Add the convolutional Layer
    conv_layer = layers.Conv1D(100,
                               3,
                               padding='valid',
                               activation="relu",
                               strides=1)(embedding_layer)

    # Add the pooling Layer
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)

    # Add the output Layers
    output_layer1 = layers.Dense(1, activation="sigmoid")(pooling_layer)

    # Compile the model
    model = models.Model(inputs=input_layer, outputs=output_layer1)
    model.compile(optimizer=optimizers.Adam(),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    model.summary()
    return model
def create_model_alternative(data, catcols, numcols):
    inputs = []
    outputs = []
    for c in catcols:
        num_unique_values = int(data[c].nunique())
        embed_dim = int(min(np.ceil((num_unique_values) / 2), 50))
        inp = layers.Input(shape=(1, ))
        out = layers.Embedding(num_unique_values + 1, embed_dim, name=c)(inp)
        out = layers.SpatialDropout1D(0.3)(out)
        out = layers.Reshape(target_shape=(embed_dim, ))(out)
        inputs.append(inp)
        outputs.append(out)

    num_input = layers.Input(shape=(data[numcols].shape[1], ))
    inputs.append(num_input)
    outputs.append(num_input)

    x = layers.Concatenate()(outputs)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)
    x = Dense(128, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = BatchNormalization()(x)
    x = Dense(32, activation="relu")(x)
    x = Dropout(0.3)(x)
    x = BatchNormalization()(x)
    y = Dense(1, activation="sigmoid")(x)

    model = Model(inputs=inputs, outputs=y)
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model
Esempio n. 20
0
def build_model(verbose = False, compile = True):
    sequence_input = L.Input(shape=(maxlen,), dtype='int32')
    embedding_layer = L.Embedding(len(word_index) + 1,
                                300,
                                weights=[embedding_matrix],
                                input_length=maxlen,
                                trainable=False)
    x = embedding_layer(sequence_input)
    x = L.SpatialDropout1D(0.2)(x)
    x = L.Bidirectional(L.CuDNNLSTM(64, return_sequences=True))(x)

    att = Attention(maxlen)(x)
    avg_pool1 = L.GlobalAveragePooling1D()(x)
    max_pool1 = L.GlobalMaxPooling1D()(x)

    x = L.concatenate([att,avg_pool1, max_pool1])

    preds = L.Dense(1, activation='sigmoid')(x)


    model = Model(sequence_input, preds)
    if verbose:
        model.summary()
    if compile:
        model.compile(loss='binary_crossentropy',optimizer=Adam(0.005),metrics=['acc'])
    return model
Esempio n. 21
0
    def __init__(self, train_X, train_Y, test_X, test_Y, model):
        self.model_path = model
        # embedding_index = {}
        # for i, line in enumerate(open('glove.6B/glove.6B.100d.txt')):
        #     values = line.split()
        #     embedding_index[values[0]] = np.asarray(values[1:], dtype='float32')
        embedding_index = FastText.load_fasttext_format('cc.id.300.bin')

        # Create tokenizer object
        tokenizer = text.Tokenizer()
        tokenizer.fit_on_texts(train_X)
        word_index = tokenizer.word_index

        # Convert text to padded sequence of tokens and load previous model if available, disable train method
        self.test_seq_X = sequence.pad_sequences(tokenizer.texts_to_sequences(test_X), maxlen=70)
        if os.path.isfile(self.model_path):
            self.classifier = load_model(self.model_path)
            return

        # Save if no previous model loaded
        self.train_seq_X = sequence.pad_sequences(tokenizer.texts_to_sequences(train_X), maxlen=70)
        self.train_Y = train_Y
        self.test_Y = test_Y

        if os.path.isfile(self.model_path):
            self.classifier = load_model(self.model_path)
            return

        # Create word embeddings mapping
        embedding_matrix = np.zeros((len(word_index) + 1), 300)
        for word, i in word_index.items():
            embedding_vector = embedding_index.wv.most_similar(word)
            if embedding_vector is not None:
                embedding_matrix[i] = embedding_vector

        # Creating layer
        # Add input layer
        input_layer = layers.Input((70, ))

        # Add the word embedding layer
        embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
        embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)

        # Add LSTM layer
        lstm_layer = layers.LSTM(self.hidden_state)(embedding_layer)

        # Output layers
        output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
        output_layer1 = layers.Dropout(0.25)(output_layer1)
        output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)

        # Compile model
        model = models.Model(inputs=input_layer, outputs=output_layer2)
        model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')

        self.classifier = model
              
        logging.info("LSTM model created")
Esempio n. 22
0
def keras_dropout(layer, rate):
    input_dim = len(layer.input.shape)
    if input_dim == 2:
        return layers.SpatialDropout1D(rate)
    elif input_dim == 3:
        return layers.SpatialDropout2D(rate)
    elif input_dim == 4:
        return layers.SpatialDropout3D(rate)
    else:
        return layers.Dropout(rate)
def build_model(emb_cid, emb_advid):

    inp1 = layers.Input(shape=(max_len, ))
    inp2 = layers.Input(shape=(max_len, ))

    emb1 = layers.Embedding(input_dim=emb_cid.shape[0],
                            output_dim=emb_cid.shape[1],
                            input_length=max_len,
                            weights=[emb_cid],
                            trainable=False)(inp1)
    emb2 = layers.Embedding(input_dim=emb_advid.shape[0],
                            output_dim=emb_advid.shape[1],
                            input_length=max_len,
                            weights=[emb_advid],
                            trainable=False)(inp2)

    sdrop = layers.SpatialDropout1D(rate=0.2)

    emb1 = sdrop(emb1)
    emb2 = sdrop(emb2)

    content = layers.Concatenate()([emb1, emb2])

    mha = MultiHeadAttention(head_num=16)(content)
    mha = layers.Dropout(0.01)(mha)
    mha = layers.Add()([content, mha])
    mha = LayerNormalization()(mha)
    mha = layers.Dropout(0.01)(mha)
    mha_ff = FeedForward(256)(mha)
    mha_out = layers.Add()([mha, mha_ff])
    mha_out = LayerNormalization()(mha_out)

    lstm = layers.Bidirectional(layers.LSTM(128,
                                            return_sequences=True))(mha_out)

    avg_pool = layers.GlobalAveragePooling1D()(lstm)
    max_pool = layers.GlobalMaxPool1D()(lstm)

    x = layers.Concatenate()([avg_pool, max_pool])

    x = layers.Dense(128, activation='relu')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dense(64, activation='relu')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dropout(0.1)(x)

    out = layers.Dense(10, activation='softmax')(x)
    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(1e-3),
                  metrics=['accuracy'])

    return model
Esempio n. 24
0
def FastText(param):
    inp = layers.Input(shape=(param['sentence_len'], ))
    x = layers.Embedding(param['vocab_size'], param['embed_size'])(inp)
    x = layers.SpatialDropout1D(rate=0.1)(x)
    x = layers.GlobalAveragePooling1D()(x)
    outp = layers.Dense(param['num_class'], activation='sigmoid')(x)
    model = Model(inputs=inp, outputs=outp)
    optimizer = optimizers.Adam(lr=0.01)
    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    return model
def create_cnn():
    input_layer = layers.Input((70,))
    embedding_layer = layers.Embedding(len(word_index)+1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
    embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
    conv_layer = layers.Convolution1D(100, 3, activation='relu')(embedding_layer)
    pooling_layer = layers.GlobalMaxPool1D()(conv_layer)
    output_layer1 = layers.Dense(50, activation='relu')(pooling_layer)
    output_layer1 = layers.Dropout(0.25)(output_layer1)
    output_layer2 = layers.Dense(1, activation='sigmoid')(output_layer1)
    model = models.Model(inputs=input_layer, outputs=output_layer2)
    model.compile(optimizer=optimizers.Adam(), loss='binary_crossentropy')
    return model
Esempio n. 26
0
def build_model(units, dropout):
    inp = Input(shape=(7,7,1024))
    main = TimeDistributed(Bidirectional(CuDNNLSTM(units)))(inp)
    main = layers.SpatialDropout1D(dropout)(main)
    main = Bidirectional(CuDNNLSTM(units))(main)
    main = layers.Dropout(dropout)(main)
    out = Dense(128, activation = 'sigmoid')(main)

    model = Model(inputs=inp, outputs = out)
    model.compile(optimizer=Adam(lr = 0.0001), loss='categorical_crossentropy',metrics=[top1_loss])
    model.summary()
    return model
Esempio n. 27
0
def lstm(seq_len: int):
    # input_deepmoji = layers.Input(shape=(2304, ), name="deepmoji_input")
    input_text = layers.Input(shape=(1, ), dtype=tf.string, name="text_input")

    # embedding = layers.Embedding(168, 64)(input_text)
    embedding = layers.Lambda(ELMo, output_shape=(1024, ))(input_text)

    spt_dropout_1 = layers.SpatialDropout1D(0.4)(embedding)
    lstm1 = layers.Bidirectional(
        layers.LSTM(350,
                    kernel_initializer='random_uniform',
                    return_sequences=True,
                    recurrent_dropout=0.4))(spt_dropout_1)
    spt_dropout_2 = layers.SpatialDropout1D(0.3)(lstm1)
    lstm2 = layers.Bidirectional(
        layers.LSTM(350,
                    kernel_initializer='random_uniform',
                    return_sequences=True,
                    recurrent_dropout=0.3))(spt_dropout_2)
    spt_dropout_3 = layers.SpatialDropout1D(0.2)(lstm2)
    lstm3 = layers.Bidirectional(
        layers.LSTM(300,
                    kernel_initializer='random_uniform',
                    return_sequences=True,
                    recurrent_dropout=0.3))(spt_dropout_3)

    att = Attention()(lstm3)

    # merged = layers.Concatenate()([input_deepmoji, att])
    dense = layers.Dense(100, activation='relu')(att)
    pred = layers.Dense(2, activation='softmax', name="output")(dense)

    model = Model(inputs=input_text, outputs=pred)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    model.summary()

    return model
Esempio n. 28
0
def create_model(num_filters, kernel_size, vocab_size, embedding_dim, maxlen):
    model = Sequential()
    model.add(layers.Embedding(vocab_size, embedding_dim, input_length=maxlen))
    model.add(layers.SpatialDropout1D(0.25))
    model.add(layers.Conv1D(num_filters, kernel_size, padding='same', activation='relu'))
    model.add(layers.AveragePooling1D())
    model.add(layers.GlobalMaxPool1D())
    model.add(layers.Dense(10, activation='relu')) 
    model.add(layers.Dropout(0.25))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
    return model
Esempio n. 29
0
def keras_dropout(layer, rate):
    '''keras dropout layer.
    '''

    from keras import layers

    input_dim = len(layer.input.shape)
    if input_dim == 2:
        return layers.SpatialDropout1D(rate)
    elif input_dim == 3:
        return layers.SpatialDropout2D(rate)
    elif input_dim == 4:
        return layers.SpatialDropout3D(rate)
    else:
        return layers.Dropout(rate)
Esempio n. 30
0
def TextRnn(param):
    hidden_size = 64
    inp = layers.Input(shape=(param['sentence_len'], ))
    x = layers.Embedding(param['vocab_size'], param['embed_size'])(inp)
    x = layers.SpatialDropout1D(rate=0.1)(x)
    x = layers.Bidirectional(
        layers.GRU(units=hidden_size, return_sequences=True))(x)
    x = layers.GlobalMaxPooling1D()(x)
    x = layers.Dropout(0.2)(x)
    outp = layers.Dense(units=param['num_class'], activation='sigmoid')(x)
    model = Model(inputs=inp, outputs=outp)
    optimizer = optimizers.Adam()
    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    return model