コード例 #1
0
def build_model(
    hparams
):
    input_layer = Input(shape=(hparams["max_sequence_length"], ))
    embedding_layer_static = get_w2v('').get_keras_embedding(train_embeddings=False)(input_layer)
    embedding_layer = get_w2v('').get_keras_embedding(train_embeddings=True)(input_layer)
    
    submodels = []
    kernel_sizes = hparams['kernel_sizes'].split('-')
    for ks in kernel_sizes:
        model = Sequential()
        conv_1_d = Conv1D(
            activation = 'relu',
            filters = hparams["filters"], 
            kernel_size = int(ks),
            kernel_constraint = max_norm(hparams["max_norm_value"])
        )
        conv_layer_static = conv_1_d(embedding_layer_static)
        conv_layer = conv_1_d(embedding_layer)
        max_pooling_static = GlobalMaxPooling1D()(conv_layer_static)
        max_pooling = GlobalMaxPooling1D()(conv_layer)
        concatenate_layer = Concatenate()([max_pooling_static, max_pooling])
        submodels.append(concatenate_layer)
    concat = Concatenate()(submodels)
    dropout_layer_1 = Dropout(hparams['dropout_ratio'])(concat)
    hidden_layer = Dense(
        hparams['hidden_size'], 
        activation = 'relu', 
        kernel_initializer = initializers.RandomUniform(
            minval = - 1 / np.sqrt(len(kernel_sizes) * 2* hparams['filters']),
            maxval = 1 / np.sqrt(len(kernel_sizes) * 2 * hparams['filters'])
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_1)
    dropout_layer_2 = Dropout(hparams['dropout_ratio'])(hidden_layer)
    output_layer = Dense(
        2,
        activation = 'sigmoid',
        kernel_initializer = initializers.RandomUniform(
            minval = - 1 / np.sqrt(hparams['hidden_size']),
            maxval = 1 / np.sqrt(hparams['hidden_size'])
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_2)
    
    model = Model(inputs=[input_layer], outputs=[output_layer])
    model.compile(
        loss = dice_loss,
        optimizer = Adam(learning_rate = hparams["learning_rate"]),
        metrics = [f1_score]
    )
    from keras.utils.vis_utils import plot_model
    plot_model(model, "model_cnn_multichannel.png", show_layer_names=False)
    return model
コード例 #2
0
def build_model(
    hparams
):
    if hparams['word_embedding'] == 'w2v':
        input_layer = Input(shape=(hparams['max_sequence_length'], ))
        embedding_layer = get_w2v('').get_keras_embedding(train_embeddings=hparams['train_embeddings'])(input_layer)
    if hparams['word_embedding'] == 'elmo':
        input_layer = Input(shape=(hparams['max_sequence_length'], 1024, ))
        embedding_layer = input_layer

    submodels = []
    kernel_sizes = hparams['kernel_sizes'].split('-')
    for ks in kernel_sizes:
        model = Sequential()
        conv_layer = Conv1D(
            activation = 'relu',
            filters = hparams['filters'], 
            kernel_size = int(ks),
            kernel_constraint = max_norm(hparams['max_norm_value'])
        )(embedding_layer)
        max_pooling = GlobalMaxPooling1D()(conv_layer)
        submodels.append(max_pooling)
    concat = Concatenate()(submodels)

    dropout_layer_1 = Dropout(hparams['dropout_ratio'])(concat)
    hidden_layer = Dense(
        hparams['hidden_size'], 
        activation = 'relu', 
        kernel_initializer = initializers.RandomUniform(
            minval = - 1 / np.sqrt(len(kernel_sizes) * hparams['filters']),
            maxval = 1 / np.sqrt(len(kernel_sizes) * hparams['filters'])
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_1)
    dropout_layer_2 = Dropout(hparams['dropout_ratio'])(concat)
    output_layer = Dense(
        2,
        activation = 'sigmoid',
        kernel_initializer = initializers.RandomUniform(
            minval = - 1 / np.sqrt(hparams['hidden_size']),
            maxval = 1 / np.sqrt(hparams['hidden_size'])
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_2)
    
    model = Model(inputs=[input_layer], outputs=[output_layer])
    model.compile(
        loss = metric.dice_loss,
        optimizer = Adam(learning_rate = hparams['learning_rate']),
        metrics = [f1_score]
    )

    return model
コード例 #3
0
def build_model(hparams):
  if hparams['word_embedding'] == 'w2v':
      input_layer = Input(shape=(hparams['max_sequence_length'], ))
      embedding_layer = get_w2v('').get_keras_embedding(train_embeddings=hparams['train_embeddings'])(input_layer)
  if hparams['word_embedding'] == 'elmo':
      input_layer = Input(shape=(hparams['max_sequence_length'], 1024, ))
      embedding_layer = input_layer
  if hparams["word_embedding"] == "random":
    input_layer = Input(shape=(hparams['max_sequence_length'], ))
    embedding_layer = Embedding(
      hparams["dictionary_len"] + 2,
      hparams["embedding_size"],
      input_length = hparams["max_sequence_length"],
      embeddings_initializer = initializers.RandomNormal(
        mean=0., 
        stddev = 2 / hparams["max_sequence_length"]
      )
    )(input_layer)
  flatten_layer = Flatten()(embedding_layer)
  dropout_layer_1 = Dropout(hparams["dropout_ratio"])(flatten_layer)
  hidden_layer = Dense(
    hparams["hidden_size"], 
    activation = 'relu', 
    kernel_initializer = initializers.RandomUniform(
      minval = - 1 / np.sqrt(hparams["embedding_size"] * hparams["max_sequence_length"]),
      maxval = 1 / np.sqrt(hparams["embedding_size"] * hparams["max_sequence_length"])
    ),
    bias_initializer = initializers.Zeros(),
    kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
  )(dropout_layer_1)
  dropout_layer_2 = Dropout(hparams["dropout_ratio"])(hidden_layer)
  output_layer = Dense(
    2,
    activation = 'sigmoid',
    kernel_initializer = initializers.RandomUniform(
      minval = - 1 / np.sqrt(hparams["hidden_size"]),
      maxval = 1 / np.sqrt(hparams["hidden_size"])
    ),
    bias_initializer = initializers.Zeros(),
    kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
  )(dropout_layer_2)
  model = Model(inputs=[input_layer], outputs=[output_layer])
  model.compile(
    loss = metric.dice_loss,
    optimizer = Adam(learning_rate = hparams["learning_rate"]),
    metrics = [f1_score]
  )
  return model
コード例 #4
0
def build_model(hparams): 

    input_layer_dynamic = Input(shape=(hparams['max_sequence_length'],), name='w2v_input')
    input_layer_static = Input(shape=(hparams['max_sequence_length'],hparams['embedding_size']),name='ELMo_input')

    embedding_layer = get_w2v('').get_keras_embedding(train_embeddings=True)(input_layer_dynamic)
    
    submodels = []
    submodels.extend(build_submodels(hparams['kernel_sizes'],hparams['filters'],
                    hparams['max_norm_value'],embedding_layer))
    submodels.extend(build_submodels(hparams['kernel_sizes'],hparams['filters'],
                    hparams['max_norm_value'],input_layer_static))
    
    concat = Concatenate()(submodels)

    dropout_layer_1 = Dropout(hparams['dropout_ratio'])(concat)
    hidden_layer = Dense(
        hparams['hidden_size'], 
        activation = 'relu', 
        kernel_initializer = initializers.RandomUniform(
            minval = - 1 / np.sqrt(2 * len(hparams['kernel_sizes'])*hparams['filters']),
            maxval = 1 / np.sqrt(2 * len(hparams['kernel_sizes'])*hparams['filters'] )
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_1)
    dropout_layer_2 = Dropout(hparams['dropout_ratio'])(hidden_layer)
    output_layer = Dense(
        2,
        activation = 'sigmoid',
        kernel_initializer = initializers.RandomUniform(
          minval = - 1 / np.sqrt(hparams['hidden_size']),
          maxval = 1 / np.sqrt(hparams['hidden_size'])
        ),
        bias_initializer = initializers.Zeros(),
        kernel_regularizer = regularizers.l2(hparams['l2_regularization'])
    )(dropout_layer_2)
    
    model = Model(inputs=[input_layer_dynamic,input_layer_static], outputs=output_layer)
    model.compile(
        loss = metric.dice_loss,
        optimizer = Adam(learning_rate = hparams['learning_rate']),
        metrics = [f1_score]
    )
    #model.summary()

    return model
コード例 #5
0
def build_model(hparams):
    hidden_sizes = hparams['hidden_sizes_rnn'].split('-')
    input_layer = Input(shape=(hparams['max_sequence_length'], ))
    if hparams["word_embedding"] == "w2v":
        embedding_layer = get_w2v('').get_keras_embedding(
            train_embeddings=hparams['train_embeddings'])(input_layer)
    if hparams["word_embedding"] == "random":
        embedding_layer = Embedding(
            hparams["dictionary_len"] + 2,
            hparams["embedding_size"],
            input_length=hparams["max_sequence_length"],
            embeddings_initializer=initializers.RandomNormal(
                mean=0.,
                stddev=2 / hparams["max_sequence_length"]))(input_layer)
    bidirection_layer_1 = Bidirectional(
        GRU(int(hidden_sizes[0]), activation='relu',
            return_sequences=True))(embedding_layer)
    bidirection_layer_2 = Bidirectional(
        GRU(int(hidden_sizes[1]), activation='relu',
            return_sequences=True))(bidirection_layer_1)
    bidirection_layer_3 = Bidirectional(
        GRU(int(hidden_sizes[2]),
            activation='relu'), merge_mode="concat")(bidirection_layer_2)
    dropout_layer_1 = Dropout(hparams["dropout_ratio"])(bidirection_layer_3)
    output_layer = Dense(2,
                         activation='sigmoid',
                         kernel_initializer=initializers.RandomUniform(
                             minval=-1 / np.sqrt(int(hidden_sizes[2])),
                             maxval=1 / np.sqrt(int(hidden_sizes[2]))),
                         bias_initializer=initializers.Zeros(),
                         kernel_regularizer=regularizers.l2(
                             hparams['l2_regularization']))(dropout_layer_1)
    model = Model(inputs=[input_layer], outputs=[output_layer])
    model.compile(loss=metric.dice_loss,
                  optimizer=RMSprop(learning_rate=hparams["learning_rate"],
                                    momentum=0.9),
                  metrics=[f1_score])
    tf.keras.utils.plot_model(model, "model_rnn.png")

    model.summary()
    return model
コード例 #6
0
from utils.embeddings import get_w2v, encode_for_w2v

import numpy as np

from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorboard.plugins.hparams import api as hp
from transformers import AutoTokenizer

import tensorflow as tf

X_train = []

for input_path in grid_specs["input_path"]:
    DS_X = np.load(input_path["input_path"], allow_pickle=True)
    if input_path['word_embedding'] == 'w2v':
        w2v_model = get_w2v(grid_specs['w2v_path'], DS_X)
        DS_X = encode_for_w2v(DS_X)
        if input_path['sentence_padding']:
            DS_X = pad_sequences(DS_X,
                                 maxlen=50,
                                 padding="post",
                                 truncating="post",
                                 value=w2v_model.vocab["</s>"].index)
    elif input_path['word_embedding'] == 'elmo' and input_path[
            'sentence_padding']:
        DS_X = pad_sequences(DS_X,
                             maxlen=50,
                             padding="post",
                             truncating="post",
                             value=0)
    if "context_input" in input_path and input_path["context_input"]:
コード例 #7
0
            plt.annotate(word,
                         alpha=0.5,
                         xy=(x[i], y[i]),
                         xytext=(5, 2),
                         textcoords='offset points',
                         ha='right',
                         va='bottom',
                         size=8)
    plt.legend(loc=4)
    plt.title("Word2Vec")
    plt.grid(True)
    if filename:
        plt.savefig(filename, format='png', dpi=150, bbox_inches='tight')
    plt.show()


from w2v_plot import w2v_plot

from utils.embeddings import get_w2v
from utils.embeddings import model_w2v

import numpy as np

sentences_tokenized = np.load("_dataset/Task_A_input.npy", allow_pickle=True)
words_to_plot = set()
for sentence in sentences_tokenized:
    for word in sentence:
        words_to_plot.add(word)
w2v = get_w2v("_utils_files/word2vec/spacy_m/trained_model-5.model")
w2v_plot(w2v,
         ["bello", "puttana", "donna", "troia", "uomo", "persona", "bella"])
コード例 #8
0
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf

from sklearn.model_selection import StratifiedKFold

from utils.embeddings import encode_for_w2v, get_w2v
from utils.data_preparation import encode_classes


# Load dataset
DS_X = np.load("_dataset/Task_A_input.npy", allow_pickle=True)
DS_Y = np.load("_dataset/Task_A_target.npy", allow_pickle=True)

# Load word2vec model and prepare dataset for training 
w2v_path = '_utils_files/word2vec/nlpl/trained_model-30.model'                                           # <============= INSERT EMBEDDINGS PATH HERE
w2v_model = get_w2v(w2v_path, DS_X, train_purpose=False)
DS_X = encode_for_w2v(DS_X)
DS_X = pad_sequences(DS_X, maxlen=70, padding="post", value=w2v_model.vocab["</s>"].index)

# Dive dataset in training and validation
skf = StratifiedKFold(n_splits = 10, random_state = 79946, shuffle=True)
for train_indices, validation_indices in skf.split(DS_X, encode_classes(DS_Y)):
    X_tr = DS_X[train_indices]
    Y_tr = DS_Y[train_indices]
    X_vl = DS_X[validation_indices]
    Y_vl = DS_Y[validation_indices]
    break

# HYPERPARAMETERS
model_parameters = {
    'dropout_ratio': 0.7,