Exemplo n.º 1
0
def create_model(train=True):
    if train:
        (train_x, chars_x, train_y, word_maxlen, char_maxlen, x_length), (
            test_x, test_chars_x, test_y, word_maxlen, char_maxlen, y_length), \
        (word_len, char_len, vocab, chars_vocab, chunk_tags, embedding_weights) = process_data.load_cnn_data()
    else:
        with open('model/chars-config.pkl', 'rb') as inp:
            (word_len, char_len, vocab, chars_vocab, chunk_tags,
             embedding_weights) = pickle.load(inp)
    # model = Sequential()
    word_in = Input(shape=(word_len, ), name='word_in')
    # model.add(Embedding(len(vocab) + 1, EMBED_DIM, weights=[embedding_weights], mask_zero=True))  # Random embedding
    # words embedding
    embed_words = Embedding(len(vocab) + 1,
                            EMBED_DIM,
                            mask_zero=True,
                            name='words_embedding')(word_in)

    # character embedding
    char_in = Input(shape=(
        word_len,
        1,
    ), name='char_in')
    embed_chars = TimeDistributed(
        Embedding(len(chars_vocab) + 2,
                  100,
                  mask_zero=False,
                  name='char_embedding'))(char_in)
    # char_enc = TimeDistributed(LSTM(units=20, return_sequences=False, recurrent_dropout=0.5))(embed_chars)
    dropout = Dropout(0.3, name='char_dropout')(embed_chars)
    conv1d_out = TimeDistributed(
        Conv1D(kernel_size=3,
               filters=100,
               padding='same',
               activation='tanh',
               strides=1,
               name='cov1d'))(dropout)
    maxpool_out = TimeDistributed(MaxPooling1D(1,
                                               name='max_pooling'))(conv1d_out)
    char = TimeDistributed(Flatten(name='flatten'))(maxpool_out)
    char = Dropout(0.3)(char)

    x = concatenate([embed_words, char])
    x = Bidirectional(
        LSTM(BiRNN_UNITS // 2,
             recurrent_dropout=0.1,
             return_sequences=True,
             name='LSTM'))(x)
    x = Dropout(0.7)(x)
    crf = CRF(len(chunk_tags), sparse_target=True)
    out = crf(x)
    model = Model([word_in, char_in], out)
    model.summary()
    model.compile('adam', loss=crf.loss_function, metrics=[crf.accuracy])
    if train:
        return model, (train_x, chars_x, train_y,
                       word_len), (test_x, test_chars_x, test_y,
                                   y_length), (vocab, chunk_tags)
    else:
        return model, (vocab, chunk_tags)
import pickle

import keras
import numpy as np
from sklearn_crfsuite.metrics import flat_classification_report
import process_data
import cnn_bilsm_crf_model

EPOCHS = 10
model, (train_x, chars_x, train_y,
        word_len), (test_x, test_chars_x, test_y,
                    length), (vocab,
                              chunk_tags) = cnn_bilsm_crf_model.create_model()
dev_x, dev_chars_x, dev_y, _, _, dev_length = process_data.load_cnn_data(
    use_dev=True)
# train model
# split = 7000

chars_x = np.array([[[ch] for ch in s] for s in chars_x])
test_chars_x = np.array([[[ch] for ch in s] for s in test_chars_x])
dev_chars_x = np.array([[[ch] for ch in s] for s in dev_chars_x])

#
# train_x = train_x[:100]
# chars_x = chars_x[:100]
# train_y = train_y[:100]
# test_x = test_x[:100]
# test_chars_x = test_chars_x[:100]
# test_y = test_y[:100]

history = model.fit(
Exemplo n.º 3
0
import matplotlib.pyplot as plt
import pickle
import yaml

import process_data as pro
import batch
import cnn

np.random.seed(0)
tf.set_random_seed(0)

# yaml形式の設定を読み込む
f = open("settings.yml", encoding='UTF-8')
settings = yaml.load(f)

image, ratio = pro.load_cnn_data(0)

print (image.shape, ratio.shape)

print(len(image))
num_train = settings["num_train"] ##訓練用データ数
num_validate = settings["num_validate"]
num_test = settings["num_test"]
train_x = image[:num_train]
val_x = image[num_train:num_train + num_validate]
test_x = image[num_train + num_validate:num_train + num_validate + num_test]
train_t = ratio[:num_train]
val_t = ratio[num_train:num_train + num_validate]
test_t = ratio[num_train + num_validate:num_train + num_validate + num_test]

print (train_x.shape, val_x.shape, test_x.shape)