Esempio n. 1
0
def define_model():
    input = Input(shape=(max_wrd_len, ))
    mask = Masking(mask_value=0)(input)
    model = Embedding(input_dim=chr_vocab_size,
                      output_dim=100,
                      input_length=max_wrd_len,
                      mask_zero=True)(mask)
    model = Dropout(0.1)(model)
    model = Bidirectional(
        LSTM(units=250, return_sequences=True, recurrent_dropout=0.1))(model)
    model = Bidirectional(LSTM(units=250, recurrent_dropout=0.1))(model)
    out = Dense(num_classes, activation="softmax")(model)
    model = Model([input], out)

    #load existing weight if exist
    if os.path.isfile(outFileName + "-best.hdf5"):
        model.load_weights(outFileName + "-best.hdf5")
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())
    plot_model(model, show_shapes=True, to_file=outFileName + '-plot.png')
    return model
Esempio n. 2
0
max_len = pickle.load(open(state_name + "max_len.p", "rb"))
n_notes = len(notes)
n_chords = len(chords)

input = Input(shape=(max_len, ))
model = Embedding(input_dim=n_notes, output_dim=50,
                  input_length=max_len)(input)
model = Dropout(0.1)(model)
model = Bidirectional(
    LSTM(units=100, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_chords, activation="softmax"))(model)

model = Model(input, out)

model.load_weights("model")

# test = ['67 b', '67 c', '67 e', '67 b', '69 b', '69 c', '69 c', '69 e', '67 b', '67 c', '67 c', '67 e', '72 b', '72 c', '72 c', '72 e', '71 b', '71 c', '71 c', '71 c', '71 c', '71 c', '71 c', '71 e', '67 b', '67 c', '67 e', '67 b', '69 b', '69 c', '69 c', '69 e', '67 b', '67 c', '67 c', '67 e', '74 b', '74 c', '74 c', '74 e', '72 b', '72 c', '72 c', '72 c','72 c', '72 c', '72 c', '72 e']

piece = converter.parse("ode.mid")
notes1 = [n.pitch.midi for n in piece[0] if type(n) == note.Note]
offsets = [n.offset for n in piece[0] if type(n) == note.Note]

lengths = [offsets[i + 1] - offsets[i] for i in range(len(offsets) - 1)]
lengths.append(2.0)

new_notes = []
new_beginnings = []

for i in range(len(lengths)):
    for l in range(int(lengths[i] / 0.5)):
Esempio n. 3
0
model.summary()

model_checkpoint = ModelCheckpoint('lstm_crf_model.h5',
                                   save_best_only=True,
                                   save_weights_only=True)

model.fit(X_tr,
          np.array(y_tr),
          batch_size=32,
          epochs=10,
          validation_split=0.1,
          verbose=1,
          callbacks=[model_checkpoint])

model.load_weights('lstm_crf_model.h5')
y_pred = model.predict(X_te)  # 3 dim

# print(y_te.shape)
y_te, y_pred = np.argmax(y_te, -1), np.argmax(y_pred, -1)
# print(y_te.shape)

report = flat_classification_report(y_pred=y_pred, y_true=y_te)
print(report)

i = 190
p = model.predict(np.array([X_te[i]]))
p = np.argmax(p, axis=-1)
print("{:15}||{:5}||{}".format("Word", "True", "Pred"))
print(30 * "=")
for w, t, pred in zip(X_te[i], y_te[i], p[0]):
# (number of batches) -1, because batches start from 0
# test_batch_generator = batch_generator(x_test_filename, '', batch_size, test_steps - 1)  # testing batch generator
test_generator = DataGenerator(x_test_filename,
                               '',
                               test_steps,
                               batch_size=batch_size,
                               shuffle=False)

# ======================================================================================================================
# Model Loading
# ======================================================================================================================

# save_all_weights | load_all_weights: saves model and optimizer weights (save_weights and save)
# load_status = model.load_weights("pretrained_models\\fulltext_model_weights.h5")  # sentences_model_weights.h5
load_status = model.load_weights(
    "pretrained_models\\well tuned - run 1\\checkpoint\\model.05.h5"
)  # sentences_model_weights.h5
#

# `assert_consumed` can be used as validation that all variable values have been
# restored from the checkpoint. See `tf.train.Checkpoint.restore` for other
# methods in the Status object.
#print(load_status.assert_consumed())

model.summary()

print('AFTER LOADING', model.get_weights())
# ======================================================================================================================
# Predict on validation data
# ======================================================================================================================
Esempio n. 5
0
# CRF Layer
crf = CRF(len(tag2index))

out = crf(model)  # output
model = Model(input, out)

#Optimiser
adam = k.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999)

# Compile model
model.compile(optimizer=adam,
              loss=crf.loss_function,
              metrics=[crf.accuracy, 'accuracy'])

model.load_weights('mb-full.h5')


#ESTA FUNCION RECIBE EN sequences LA LISTA DE ORACIONES DONDE CADA ELEMENTO DE LA ORACION ES UN ONE HOT VECTOR
def logits_to_tokens(sequences, index):
    token_sequences = []
    for categorical_sequence in sequences:
        token_sequence = []
        for categorical in categorical_sequence:
            token_sequence.append(index[np.argmax(categorical)])

        token_sequences.append(token_sequence)

    return token_sequences