コード例 #1
0
class TrainingModel:
    def __init__(self, input_shape):
        self.model = Sequential()
        self.model.add(Dense(64, activation='relu', input_shape=input_shape))
        self.model.add(Dropout(0.3))
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dropout(0.3))
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dense(1, activation='sigmoid'))
        self.model.compile(optimizer='adam',
                           loss='binary_crossentropy',
                           metrics=['accuracy'])

    def fit(self, data, label):
        self.model.fit(data, label, epochs=1, batch_size=128, verbose=0)

    def predict(self, data):
        return self.model.predict_classes(data)

    def evaluate(self, X_test, y_test, print_report=True):
        y_predicted = self.predict(X_test)
        y_predicted_probs = self.model.predict_proba(X_test)
        if print_report:
            self.print_report(y_test, y_predicted, y_predicted_probs)
        else:
            accuracy = accuracy_score(y_test, y_predicted)
            report = classification_report(y_test,
                                           y_predicted,
                                           output_dict=True)
            auc_score = roc_auc_score(y_test, y_predicted_probs)
            matrix = confusion_matrix(y_test, y_predicted)

            return {
                'accuracy': accuracy,
                'auc_score': auc_score,
                **report['weighted avg'],
            }

    def print_report(self, test, predicted, predicted_probs):
        accuracy = accuracy_score(test, predicted)
        report = classification_report(test, predicted)
        matrix = confusion_matrix(test, predicted)

        print('Accuracy score: {:.5f}'.format(accuracy))
        print('-' * 20)
        print('Confusion Matrix:')
        print(matrix)
        print('-' * 20)
        print(report)
        print('-' * 20)
        print('AUC score: {:.5f}'.format(roc_auc_score(test, predicted_probs)))
コード例 #2
0
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, Activation, BatchNormalization, AveragePooling2D
from tensorflow.keras.optimizers import SGD, RMSprop, Adam
import tensorflow_datasets as tfds  # pip install tensorflow-datasets
import tensorflow as tf
import logging
import numpy as np

tf.logging.set_verbosity(tf.logging.ERROR)
tf.get_logger().setLevel(logging.ERROR)

model = Sequential()
model.add(Dense(2, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1))

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
model.fit(X, y, batch_size=1, epochs=1000, verbose=0)

print("Network test:")
print("XOR(0,0):", model.predict_proba(np.array([[0, 0]])))
print("XOR(0,1):", model.predict_proba(np.array([[0, 1]])))
print("XOR(1,0):", model.predict_proba(np.array([[1, 0]])))
print("XOR(1,1):", model.predict_proba(np.array([[1, 1]])))
def bert_tensorflow_test(X_train, X_test, Y_train, Y_test):
    # Model
    model = Sequential()
    model.add(
        Masking(mask_value=0., input_shape=(MAX_SEQUENCE_LEN, VECTOR_DIM)))
    #forward_layer = LSTM(200, return_sequences=True)
    forward_layer = GRU(10, return_sequences=False, dropout=0.5)
    #backward_layer = LSTM(200, activation='relu', return_sequences=True,
    backward_layer = GRU(10,
                         return_sequences=False,
                         dropout=0.5,
                         go_backwards=True)
    model.add(
        Bidirectional(forward_layer,
                      backward_layer=backward_layer,
                      input_shape=(MAX_SEQUENCE_LEN, VECTOR_DIM)))
    #model.add(TimeDistributed(Dense(NUM_CLASSES)))
    # Remove TimeDistributed() so that predictions are now made for the entire sentence
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))

    #print('preds shape', model.predict(X_train[:3]).shape)
    #print('Y_train shape', Y_train[:3].shape)
    #print(list(Y_train[:3]))
    classes = []
    for y in Y_train:
        cls = np.argmax(y)
        classes.append(cls)
    print(Counter(classes))

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print('compiled model')
    model.fit(X_train, Y_train, batch_size=8,
              epochs=10)  #, validation_split=0.1)
    print('fit model')
    eval = model.evaluate(X_test, Y_test, batch_size=8)
    #print('X_test[0]')
    #print(X_test[0])
    #print(X_train[0])
    preds = model.predict_proba(X_test, verbose=1, batch_size=8)
    print(preds)
    num_correct = 0
    num_incorrect = 0
    TP = 0
    TN = 0
    FP = 0
    FN = 0
    # idiomatic = 2, non-idiomatic = 3
    with open('preds_out_temp.txt', 'w') as tempoutf:
        for pred, y in zip(preds, Y_test):
            if np.argmax(y) == 2 or np.argmax(y) == 3:
                if np.argmax(y) == np.argmax(pred):
                    num_correct += 1
                else:
                    num_incorrect += 1
            if np.argmax(pred) == 2 and np.argmax(y) == 2:
                TP += 1
            if np.argmax(pred) == 3 and np.argmax(y) == 3:
                TN += 1
            if np.argmax(pred) == 2 and np.argmax(y) == 3:
                FP += 1
            if np.argmax(pred) == 3 and np.argmax(y) == 2:
                FN += 1
    custom_accuracy = num_correct / (num_correct + num_incorrect)
    print('custom accuracy is', num_correct / (num_correct + num_incorrect))
    for y in Y_test:
        cls = np.argmax(y)
        classes.append(cls)
    class_nums = Counter(classes)
    print(class_nums)
    default_acc = class_nums[2] / (class_nums[2] + class_nums[3])
    print('default accuracy is', default_acc, 'or', 1 - default_acc)
    return eval, custom_accuracy, default_acc, [TP, TN, FP, FN]
コード例 #4
0
                        model.add(Dense(3))
                        model.add(Activation('softmax'))

                        model.compile(loss='sparse_categorical_crossentropy',
                                    optimizer=RMSprop(lr=lr[n]),
                                    metrics=['accuracy'])

                        model.fit(x_train, y_train, batch_size=64,
                                        epochs=40, 
                                        verbose=2, validation_data=(x_val, y_val),
                                        callbacks=es)
                        score, acc=model.evaluate(x_test, y_test, verbose=0)
                        print('Test accuracy:', acc)
                        print('the mse value is : ', model.evaluate(x_test, y_test))
                        print('train accuracy: ', model.evaluate(x_train, y_train))
                        y_pred=model.predict_proba(x_eval)
                        score=roc_auc_score(y_eval,y_pred, multi_class='ovo')
                        print('auc: ', score)
                        fpr=dict()
                        tpr=dict()
                        roc_auc=dict()
                        for i in range(3):
                            fpr[i], tpr[i], _=roc_curve(y_eval[:,i], np.array(y_pred)[:,i])
                            roc_auc[i]=auc(fpr[i], tpr[i])
                            print('roc_auc: ', roc_auc[i])

                        fpr['micro'], tpr['micro'], _= roc_curve(y_eval.ravel(), np.array(y_pred).ravel())
                        roc_auc['micro']=auc(fpr['micro'], tpr['micro'])

                        all_fpr=np.unique(np.concatenate([fpr[i] for i in range(3)]))
                        mean_tpr=np.zeros_like(all_fpr)
コード例 #5
0
    plt.plot(epoch_range, history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val'], loc='upper left')
    plt.show()


print(history.history)
plot_learningCurve(history, epochs)

loss, acc = model.evaluate(X_test, y_test)
print("Test loss: ", loss)
print("Test accuracy: ", acc)

y_test_pred = model.predict_proba(X_test)
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_test_pred)
auc_keras = auc(fpr_keras, tpr_keras)
print('Testing data AUC: ', auc_keras)

#roc
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))
# plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
コード例 #6
0
ファイル: lime.py プロジェクト: SuhBoGyeong/MLCS-Internship
model=Sequential()
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(128, activation='relu'))
model.add(Dense(32, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt2, metrics=['acc'])
#history=model.fit(x_train,y_train, epochs=100, batch_size=30, verbose=1)
#print('the mse value is : ', model.evaluate(x_train, y_train))
history=model.fit(x_train,y_train, epochs=100, batch_size=30, verbose=1)
print('the mse value is : ', model.evaluate(x_train, y_train))    




preds=model.predict_proba(x_train,batch_size=None, verbose=1)
preds_label=model.predict_classes(x_train)

#print(preds.shape)
#print(preds)

'''i=0
for label in preds_label:
    print(preds[i], preds_label[i])
    if int(np.argmax(preds[i]))!=int(preds_label[i]):
        print('aaaaaaaaaaaaa')
    i+=1'''


explainer=lime.lime_tabular.LimeTabularExplainer(x_train, feature_names=features,
                            class_names=['0','1','2'])
コード例 #7
0
        X_train,
        Y_train,
        validation_split=0.3,
        batch_size=BATCH_SIZE,
        epochs=EPOCH_COUNT,
        verbose=2,
        shuffle=False,
        callbacks=[EarlyStopping(monitor='val_loss', patience=5)])

    # save model to file
    ExtraSensoryHelperFunctions.save_model_keras(
        model, SAVE_MODEL_NAME, ExtraSensoryHelperFunctions.MODEL_PATH)

    # predict
    pred = model.predict(X_test, verbose=1)
    pred_proba = model.predict_proba(X_test)

    pred[pred >= 0.5] = 1
    pred[pred < 0.5] = 0
    # print('pred: ', pred)
    # print('Y_test: ', Y_test)

    conf_mat = multilabel_confusion_matrix(Y_test, pred)
    # print('conf mat: ')
    # print(conf_mat)

    # summarize history for accuracy
    plt = ExtraSensoryHelperFunctions.PlotEpochVsAcc(plt, history)
    plt.savefig(cur_result_path + SAVE_MODEL_NAME + '_Acc.png')

    # summarize history for loss
コード例 #8
0
class CNNModel:
    def __init__(self):
        self.x_train = self.x_test = self.y_train = self.y_test = None
        self.history = None
        self.model = None
        self.model_dir = 'models/'

    def load_data(self):
        IMG = Image_Loader()
        [self.x_train, self.y_train, self.x_test,
         self.y_test] = IMG.load_images()
        print('Training and testing data loaded.')

    def build_model(self):
        x_shape = self.x_train[0].shape
        self.model = Sequential()
        self.model.add(
            layers.Conv2D(32, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(64, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(
            layers.Conv2D(128, (3, 3),
                          activation='relu',
                          kernel_initializer='he_uniform',
                          padding='same',
                          input_shape=x_shape))
        self.model.add(layers.MaxPooling2D(2, 2))
        self.model.add(layers.Dropout(0.2))
        self.model.add(layers.Flatten())
        self.model.add(
            layers.Dense(128,
                         activation='relu',
                         kernel_initializer='he_uniform'))
        self.model.add(layers.Dense(10, activation='sigmoid'))
        self.model.add(layers.Dropout(0.2))
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
        self.model.summary()

    def train(self):
        num_epochs = 100
        num_batches = 64
        self.history = self.model.fit(self.x_train,
                                      self.y_train,
                                      batch_size=num_batches,
                                      epochs=num_epochs,
                                      validation_data=(self.x_test,
                                                       self.y_test),
                                      callbacks=[TqdmCallback()])

    def eval_model(self):
        try:
            score = self.model.evaluate(self.x_train, self.y_train)
            print("Training Loss: ", score[0])
            print("Training Accuracy: ", score[1])
            score = self.model.evaluate(self.x_test, self.y_test)
            print("Testing Loss: ", score[0])
            print("Testing Accuracy: ", score[1])

            if (self.history):
                plt.subplot(1, 2, 1)
                plt.plot(self.history.history['accuracy'], label='accuracy')
                plt.plot(self.history.history['val_accuracy'],
                         label='val_accuracy')
                plt.title('Training and Validation Accuracy')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='lower right')

                plt.subplot(1, 2, 2)
                plt.plot(self.history.history['loss'], label='loss')
                plt.plot(self.history.history['val_loss'], label='val_loss')
                plt.title('Training and Validation Loss')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.ylim([0.7, 1])
                plt.legend(loc='upper right')
        except:
            if (self.x_train is None):
                print(
                    'Train and test data not loaded. Run CNNModel.load_data().'
                )
            else:
                print(
                    'Please make sure train and test data are loaded correctly.'
                )

    def load_image(self, image, url=0):
        # set url to 1 if image is from internet
        if url:
            resp = get(image)
            img_bytes = BytesIO(resp.content)
            img = load_img(img_bytes, target_size=(32, 32))
        else:
            img = load_img(image, target_size=(32, 32))
        img_pix = asarray(img)
        img_pix = img_pix.reshape(1, 32, 32, 3)
        img_pix = img_pix.astype('float32')
        img_pix = img_pix / 255.0
        print('Image loaded.')
        return img_pix

    def predict(self, img_pix):
        y_pred = self.model.predict_classes(img_pix)
        y_prob = self.model.predict_proba(img_pix)
        y_pred = y_pred[0]
        y_prob = y_prob[0][y_pred]
        pred_dict = {'prediction': str(y_pred), 'confidence': str(y_prob)}
        return pred_dict

    def serialize(self, filename):
        file = self.model_dir + filename
        self.model.save(file, save_format='h5')
        print('Model saved.')

    def deserialize(self, filename):
        file = self.model_dir + filename
        model = tf.keras.models.load_model(file)
        self.model = model
        print('Model loaded.')