Пример #1
0
# maybe too agressive? eventually gets to arround 0.45 accuracy, which is not the best...
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

# well also not so hot...
# we seem to be missing the extra info extracted by already pretrained model (in the initial epochs at least)
sgd = optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

# ==============================================================================
# TRAIN 1
# ==============================================================================
#

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(x_test, y_test),
                    verbose=1)

from visualize_history import visualize_history
specialname = ''
visualize_history(history.history,
                  show_also='acc',
                  save=True,
                  save_path='classifier6_' + str(epochs) + 'epochs_' +
                  specialname)
# Whoops, sudden drop to loss: nan

# ==============================================================================
# REPORT
# ==============================================================================
#

#print(history1.history)
#print(history2.history)

split_n = len(history1.history['val_loss'])

# val_loss', 'val_acc', 'loss', 'acc
history1.history['val_loss'] += history2.history['val_loss']
history1.history['val_acc'] += history2.history['val_acc']
history1.history['loss'] += history2.history['loss']
history1.history['acc'] += history2.history['acc']

from visualize_history import visualize_history
plt = visualize_history(history1.history, show_also='acc', show=False, save=False)
#visualize_history(history2.history, show_also='acc', save=False, save_path='classifier5b_'+str(epochs)+'epochs_')

plt.axvline(x=split_n-0.5, linestyle='dashed', color='black')

filename = 'classifier5b_CHILL_'+str(epochs_first)+'+'+str(epochs_second)+'epochs_'
plt.savefig(filename)

plt.show()

fine_model.save('5b_final_fine_model.h5')
Пример #3
0
# ====================================================================================

# x_train,y_train,x_test,y_test
history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    validation_data=(x_test, y_test),
                    verbose=1)

model.save(PLOTNAME + "model.h5")

visualize_history(history.history,
                  show_also='acc',
                  save=True,
                  show=False,
                  save_path=PLOTNAME + specialname)

# ==============================================================================

### Now analyze results:
from sklearn.metrics import classification_report, confusion_matrix

# x_train,y_train,x_test,y_test

pred = model.predict(x_test, batch_size=32, verbose=1)
#y_predicted = np.argmax(pred, axis=1)
y_predicted = convert_back_from_categorical_data(pred)
#y_test_label = np.argmax(y_test, axis=1)
y_test_label = convert_back_from_categorical_data(y_test)