Beispiel #1
0
def classifierModel(X_train, y_train, modelType):
    if modelType == 1:
        model = LogisticRegression(class_weight='balanced',
                                   solver='lbfgs',
                                   max_iter=1500)
    elif modelType == 2:
        model = GaussianNB()
    elif modelType == 3:
        model = RandomForestClassifier(max_depth=5,
                                       random_state=2,
                                       n_estimators=750)
    elif modelType == 4:
        model = ExtraTreesClassifier(random_state=2, n_estimators=1000)
    elif modelType == 5:
        model = Sequential([
            Flatten(input_shape=(17, )),
            Dense(32, activation='relu'),
            Dense(32, activation='relu'),
            Dense(1, activation='sigmoid'),
        ])
        model.compile(optimizer='adam',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        hist = model.fit(X_train, y_train, batch_size=1)
        return (hist, model)
    else:
        print('Unknown moodel type!')
        return

    model.fit(X_train, y_train)
    return model
Beispiel #2
0
def stacking_train_predict(X_train, y_train, X_test, y_test=None, estimator='average'):
    if estimator == 'average':
        y_pred = X_test.apply('mean', axis=1)
    
    if estimator == 'logreg':
        model = LogisticRegression()
        params = {'C': np.arange(0.05,5,0.05)}
        clf = GridSearchCV(model, params, cv=10, scoring='neg_log_loss', refit=True)
        clf.fit(X_train, y_train)
        y_pred = clf.predict_proba(X_test)[:,1]

    if estimator == 'nn':
        model = Sequential()
        model.add(Dense(128, input_dim=X_train.shape[1], activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        opti = Adam(lr=0.01, decay=0.01)
        model.compile(loss='binary_crossentropy', optimizer=opti, metrics=['accuracy'])
        validation_data = (X_test, y_test) if y_test is not None else None
        model.fit(X_train, y_train, epochs=200, batch_size=64, verbose=1, validation_data=validation_data)
        y_pred = model.predict(X_test).transpose()[0]
        y_pred = np.clip(y_pred, 0.0001, 0.9999)
    
    return y_pred
    def create_model(self, model_name):
        if model_name == 'SVM':
            model = svm.SVC(kernel='linear', C=10, max_iter=10000)
        elif model_name == 'Logistic':
            model = LogisticRegression(max_iter=1000)
        elif model_name == 'CNN':
            patience = 5
            krs = (3, 3)
            input_shape = (128, 128, 3)
            num_classes = 2
            model = Sequential()
            model.add(Conv2D(32, kernel_size=krs, input_shape=input_shape))

            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(BatchNormalization())
            model.add(LeakyReLU(alpha=0.05))
            model.add(Dropout(0.3))
            '''model.add(Conv2D(64, krs))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(BatchNormalization())
            model.add(LeakyReLU(alpha=0.05))
            model.add(Dropout(0.3))
            
            model.add(Conv2D(128, krs))
            model.add(MaxPooling2D(pool_size=(2, 2)))
            model.add(BatchNormalization())
            model.add(Activation('relu'))
            model.add(Dropout(0.3))'''

            model.add(Flatten())

            # model.add(Dense(1024))
            # model.add(LeakyReLU(alpha=0.05))
            # model.add(Dropout(0.5))

            # model.add(Dense(512))
            #model.add(LeakyReLU(alpha=0.05))
            #model.add(Dropout(0.3))

            model.add(Dense(128))
            model.add(LeakyReLU(alpha=0.05))
            model.add(Dropout(0.3))

            model.add(Dense(num_classes, activation='softmax'))
            model.compile(loss=keras.losses.categorical_crossentropy,
                          optimizer=keras.optimizers.Adam(lr=0.00001),
                          metrics=['accuracy'])
        return model
Beispiel #4
0
model = Sequential()

input_dim = xtrain.shape[1]
#nb_classes = y_train.shape[1]

model = Sequential()
model.add(Dense(input_dim=input_dim, units=1))
model.add(Activation('sigmoid'))
from keras import optimizers
opt = optimizers.Nadam(lr=0.002,
                       beta_1=0.9,
                       beta_2=0.999,
                       epsilon=1e-08,
                       schedule_decay=0.004)
model.compile(loss=binary_focal_loss(gamma=2, alpha=0.9),
              optimizer=opt,
              metrics=[f1])

history = model.fit(xtrain, ytrain, epochs=15, batch_size=500, verbose=1)

# Training
print(
    "------------------Training performance--------------------------------------"
)
print("AUC Score (test): %f" % roc_auc_score(ytrain, y_predprob))
prediction = model.predict(xtrain)
print("Precison is", precision_score(ytrain, prediction, average='binary'))
print("Recall is", recall_score(ytrain, prediction, average='binary'))
print("F1 score is", f1_score(ytrain, prediction, average='binary'))
print("Accuracy is", model.score(xtrain, ytrain))
print(confusion_matrix(ytrain, prediction))
Beispiel #5
0
print(lstm_train_x.shape,lstm_test_x.shape)
print(lstm_train_y.shape,lstm_test_y.shape)

import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense,Dropout
from keras.callbacks import *

clf = Sequential()
clf.add(LSTM(units=150, input_shape=(lb,lstm_train_x.shape[2])))
clf.add(Dropout(0.2))
clf.add(Dense(units = 1))

clf.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001),loss = 'binary_crossentropy', metrics = 'accuracy')
clf.fit(lstm_train_x,lstm_train_y, epochs = 100, batch_size = 100, callbacks = EarlyStopping(monitor='loss', mode = 'min'))

test_loss, test_acc = clf.evaluate(lstm_test_x,lstm_test_y)
train_loss, train_acc = clf.evaluate(lstm_train_x,lstm_train_y)
print("Single Layer LSTM")
print("Train accuracy", train_acc)
print("Test accuracy", test_acc)

clf = Sequential()
clf.add(LSTM(units=150,return_sequences = True, input_shape=(lb,lstm_train_x.shape[2])))
clf.add(Dropout(0.3))
clf.add(Activation('relu'))
clf.add(LSTM(units=100, return_sequences = True))
clf.add(Dropout(0.3))
clf.add(Activation('relu'))
Beispiel #6
0
tf.test.gpu_device_name()

# Create a simple model
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

model = Sequential()

model.add(
    Dense(20, kernel_initializer="uniform", activation="relu", input_dim=40))
model.add(Dense(1, kernel_initializer="uniform", activation="sigmoid"))

model.compile(optimizer="adam",
              loss="binary_crossentropy",
              metrics=["accuracy"])

# Display Model Summary and Show Parameters
model.summary()

# Start Training Our Classifier

batch_size = 64
epochs = 25

history = model.fit(X_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
n_epochs=20
n_batch =1

model = Sequential()
model.add(Dense(100, input_dim=X_train.shape[1], activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, kernel_regularizer=regularizers.l2(0.01),
                  activity_regularizer=regularizers.l1(0.01),
                  activation='relu'))
#model.add(LSTM(300,return_sequences=True,activation='tanh',stateful=True,
#               batch_input_shape=(n_batch, time_step, X_train.shape[2])))
#model.add(LSTM(100, input_shape=(time_step,n_features)))
model.add(Dense(Y_train.shape[1], activation='softmax'))
# Compile model
adam=keras.optimizers.Adam(lr=0.01, beta_1=0.99, beta_2=0.99, epsilon=None, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
checkpoint = ModelCheckpoint(filepath="best_weights_mod5.hdf5", monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
history = model.fit(X_train,Y_train,validation_data=(X_test,Y_test),callbacks=callbacks_list,verbose=1,epochs=n_epochs,batch_size=n_batch)
###plot loss

acc_mod1=[]
val_acc_mod1=[]
acc_mod1.append(history.history["acc"])
val_acc_mod1.append(history.history["val_acc"])
acc_mod1=pd.DataFrame(np.array(acc_mod1).reshape(n_epochs,1))
val_acc_mod1=pd.DataFrame(np.array(val_acc_mod1).reshape(n_epochs,1))
acc_mod1.rename(columns={acc_mod1.columns[0]:"Accuracy"},inplace=True)
val_acc_mod1.rename(columns={val_acc_mod1.columns[0]:"Val_Accuracy"},inplace=True)

Acc_mod=acc_mod1.join(val_acc_mod1)
model.add(Dropout(0.2))
model.add(Dense(40, input_shape=(40, ), activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(40, input_shape=(40, ), activation='relu'))
# output layer
model.add(Dense(5, input_shape=(40, ), activation='softmax'))

#Model details
model.output_shape
model.summary()
model.get_config()
model.get_weights()

model.compile(
    loss=
    'categorical_crossentropy',  #categorical_crossentropy, binary_crossentropy
    optimizer='adam',  #SGD, RMSprop, adam
    metrics=['accuracy'])

model.fit(X_train, y_train, epochs=100, batch_size=10, verbose=1)

#Evaluation
pred = model.predict(X_test)
score = model.evaluate(X_test, y_test, verbose=1)

y_pred = np.argmax(pred, axis=1)
y_test = np.argmax(y_test, axis=1)

# Import the modules from `sklearn.metrics`
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score
Beispiel #9
0
  print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")

model.summary()

sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()

def loss(labels, logits):
  return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)

example_batch_loss  = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)") 
print("scalar_loss:      ", example_batch_loss.numpy().mean())

model.compile(
    optimizer = tf.train.AdamOptimizer(),
    loss = loss)

# Directory where the checkpoints will be saved
checkpoint_dir = './training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")

checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
    filepath=checkpoint_prefix,
    save_weights_only=True)

EPOCHS=1


# In[28]:
Beispiel #10
0
def logistic_all(trainX, testX, trainy, testy, path, n):
    os.system('mkdir ' + path)
    ns_probs = [0 for _ in range(testy.shape[0])]
    model = LogisticRegression(solver='lbfgs', max_iter=1e10, tol=1e-16)
    model.fit(trainX, trainy)

    # predict probabilities, tol=1
    lr_probs = model.predict_proba(testX)
    # keep probabilities for the positive outcome only
    lr_probs = lr_probs[:, 1]
    # calculate scores
    ns_auc = roc_auc_score(testy, ns_probs)
    lr_auc = roc_auc_score(testy, lr_probs)
    # calculate roc curves
    ns_fpr, ns_tpr, _ = roc_curve(testy, ns_probs)
    lr_fpr, lr_tpr, _ = roc_curve(testy, lr_probs)
    # plot the roc curve for the model
    pyplot.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
    pyplot.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')
    # axis labels
    pyplot.xlabel('False Positive Rate')
    pyplot.ylabel('True Positive Rate')
    # show the legend
    pyplot.legend()
    # show the plot
    yhat = model.predict(testX)
    mse = mean_squared_error(testy, yhat)
    aic = calculate_aic(testy.shape[0], mse, 1)
    bic = calculate_bic(testy.shape[0], mse, 1)
    lr_precision, lr_recall, _ = precision_recall_curve(testy, lr_probs)
    lr_f1, lr_auc = f1_score(testy, yhat), auc(lr_recall, lr_precision)

    pyplot.savefig(path + '/roc.png')
    pyplot.clf()
    sf_2(trainX, testX, trainy, testy, path)

    testX = (testX - np.min(trainX) + 1e-5) / (np.max(trainX) -
                                               np.min(trainX) + 1e-5)
    trainX = (trainX - np.min(trainX) + 1e-5) / (np.max(trainX) -
                                                 np.min(trainX) + 1e-5)

    model = tf.keras.models.Sequential(
        [tf.keras.layers.Dense(1, activation='sigmoid')])
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    history = model.fit(trainX,
                        trainy,
                        epochs=n,
                        verbose=0,
                        validation_data=(testX, testy))
    start = time.process_time()
    _, train_acc = model.evaluate(trainX, trainy, verbose=0)
    _, test_acc = model.evaluate(testX, testy, verbose=0)
    time_taken = time.process_time() - start

    predy = model.predict(trainX)
    resid = np.array([trainy[i] - predy[i] for i in range(trainy.shape[0])])
    ll_fit = -np.sum(np.abs(resid))
    ns_probs = [0 for _ in range(trainy.shape[0])]
    ll_overall = -np.sum(
        np.array([trainy[i] - ns_probs[i] for i in range(trainy.shape[0])]))
    r2 = 1 - (ll_fit / ll_overall)

    pyplot.plot(history.history['accuracy'])
    pyplot.plot(history.history['val_accuracy'])
    pyplot.ylim([0, 1.1])
    pyplot.ylabel('accuracy')
    pyplot.xlabel('epoch')
    pyplot.legend(['accuracy', 'val_accuracy'], loc='lower right')
    pyplot.savefig(path + '/acc.png')
    pyplot.clf()

    return ns_auc, lr_auc, lr_f1, train_acc, test_acc, mse, aic, bic, r2, time_taken / (
        trainX.shape[0] + testX.shape[0])
def train_model(classifier):
    if (classifier == 'LR'):
        model = LogisticRegression(random_state=seed)
        model.fit(X_train, y_train)
        return model
    if (classifier == 'KNN'):
        print("\n  K TREINO  TESTE")
        print(" -- ------ ------")
        for k in range(1, 130, 2):
            model = KNeighborsClassifier(n_neighbors=k,
                                         weights='uniform',
                                         metric='minkowski',
                                         p=2)
            model = model.fit(X_train, y_train)
            y_resposta_treino = model.predict(X_train)
            y_resposta_teste = model.predict(X_test)
            acuracia_treino = sum(y_resposta_treino == y_train) / len(y_train)
            acuracia_teste = sum(y_resposta_teste == y_test) / len(y_test)
            print("%3d" % k, "%6.1f" % (100 * acuracia_treino),
                  "%6.1f" % (100 * acuracia_teste))
        return model
    if (classifier == 'SV'):
        model = SVC(kernel='linear', random_state=seed)  # kernel = 'rbf'
        model.fit(X_train, y_train)
        return model
    if (classifier == 'NB'):
        model = GaussianNB()
        model.fit(X_train, y_train)
        return model
    if (classifier == 'DT'):
        model = DecisionTreeClassifier(criterion='entropy', random_state=seed)
        model.fit(X_train, y_train)
        return model
    if (classifier == 'RF'):
        # Hiper-parâmetros selecionados após a busca:
        model = RandomForestClassifier(n_estimators=1600,
                                       min_samples_split=2,
                                       min_samples_leaf=4,
                                       max_features='sqrt',
                                       max_depth=10,
                                       bootstrap=True,
                                       random_state=seed)
        model.fit(X_train, y_train)
        print(model.feature_importances_)
        return model
    if (classifier == 'RG'):
        model = RidgeClassifier(alpha=1,
                                class_weight='balanced',
                                solver='auto')
        model.fit(X_train, y_train)
        return model
    if (classifier == 'GBC'):
        # Hiper-parâmetros selecionados após a busca:
        model = GradientBoostingClassifier(
            random_state=seed,
            n_estimators=200,
            min_samples_split=5,
            min_samples_leaf=1,
            max_features='sqrt',
            max_depth=10,
        )
        rfe = RFE(model)
        rfe = rfe.fit(X_train, y_train)
        return rfe
    if (classifier == 'MLP'):
        kfold = StratifiedKFold(n_splits=3, shuffle=True, random_state=seed)
        cvscores = []
        for treino, teste in kfold.split(X_train, y_train):
            model = tf.keras.models.Sequential()
            model.add(tf.keras.layers.Dense(units=20, activation='relu'))
            model.add(tf.keras.layers.Dense(units=10, activation='relu'))
            model.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
            model.compile(optimizer='adam',
                          loss='binary_crossentropy',
                          metrics=['accuracy'])
            model.fit(X_train, y_train, batch_size=32, epochs=100, verbose=0)
            scores = model.evaluate(X_test, y_test, verbose=0)
            print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
            cvscores.append(scores[1] * 100)
        print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
        model.summary()
        return model
# Neural Network Architecture

# Create initial set of linear layers
model=Sequential()
# Now, add to our linear layers and note their neurons in each added layer
# Input dimension only needs to be noted for the first layer and it is the number of features/columns
model.add(Dense(input_dim=27, units=8, activation='relu', name='output_1'))
model.add(Dense(units=16, activation='relu', name='output_2'))
# Make sure output later has two neurons for each type of classification of attrition
model.add(Dense(units=2, activation='sigmoid'))

# Compile the Network
# More information on optimizer types:
# https://keras.io/optimizers/
model.compile(optimizer=Adam(lr=0.01), loss='binary_crossentropy', metrics=['accuracy'])
# loss='binary_crossentropy' specifies that your model should optimize the log 
# loss for binary classification.  
# metrics=['accuracy'] specifies that accuracy should be printed out

# Review NN configuration
model.summary()

History = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, verbose=1)

model.predict_classes(X_test)

# Log Loss over time
plt.figure(figsize=(5,5))
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
Beispiel #13
0
class modelClass:
    def __init__(self):

        #Model parameters
        self.nCNN = None
        self.nDense = None
        self.nEmbedding = None
        self.nCNNFilters = None
        self.nNNFilters = None
        self.nKernel = None
        self.nStrides = None
        self.poolSize = None


        self.vocab_size = None
        self.maxLen = None

        self.nClasses = None

        #Data
        self.xTrain = None
        self.yTrain = None
        self.xValidation = None
        self.yValidation = None
        self.xTest = None
        self.yTest = None
        
        self.model = None

    def loadDataOneHot(self):        
        dataFrame = pd.read_csv(os.path.join(DATA_DIR, "dataset_examples.tsv"),sep="\t")

        vectorizer = CountVectorizer()
        texts = vectorizer.fit_transform(list(dataFrame["text"]))
        


        #make labels into 0,1
        encoder = LabelBinarizer()
        labels = encoder.fit_transform(list(dataFrame["sentiment"]))

        self.stratifyData(texts,labels)
    
    def loadDataSequence(self):
        #dataFrame = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/dataset_examples.tsv",sep="\t")
        dataFrame = pd.read_csv(os.path.join(DATA_DIR, "dataset_examples.tsv"),sep="\t")
        

        texts = list(dataFrame["text"])
        tokenizer = Tokenizer()
        
        tokenizer.fit_on_texts(texts)
        self.vocab_size = len(tokenizer.word_index) + 1
        sequenceText = tokenizer.texts_to_sequences(texts)

        self.maxLen = max([len(text) for text in sequenceText ])
        padSequenceText = pad_sequences(sequenceText,padding = "post",maxlen = self.maxLen )


        #make labels into 0,1
        encoder = LabelBinarizer()
        #encoder = LabelEncoder()
        #labels = to_categorical(dataFrame["sentiment"])
        labels = encoder.fit_transform(dataFrame["sentiment"])
        #labels = labels.flatten()
        self.stratifyData(padSequenceText,labels)
    
    def stratifyData(self,texts,labels):
        """
        Given data is split stratified wise into 70% training, 15% validation and 15% test sets.
        """
        xTrain,xValidation,yTrain,yValidation =  train_test_split(texts,labels,test_size = 0.3,random_state=42,stratify=labels)
        xValidation,xTest,yValidation,yTest =  train_test_split(xValidation,yValidation,test_size = 0.5,random_state=42,stratify=yValidation)

        
        self.xTrain = xTrain
        self.xValidation = xValidation
        self.xTest = xTest
        self.yTrainDecoded = yTrain
        self.yTrain = yTrain
        self.yValidation = yValidation
        self.yTest = yTest
        self.nClasses = len(set(yTest.flatten()))
    
    def optimizeLR(self,C):
        print("C is right now",C[0][0])
        self.model = LogisticRegression(C=C[0][0])
        score = self.crossEval(10)
        return score

    def optimizeCNN(self,variables):# nDense, nEmbedding, nCNNFilters, nNNFilters, nKernel, nStrides,poolSize):
        self.nCNN = int(variables[0][0])
        self.nDense = int(variables[0][1])
        self.nEmbedding = int(variables[0][2])
        self.nCNNFilters = int(variables[0][3])
        self.nNNFilters = int(variables[0][4])
        self.nKernel = int(variables[0][5])
        self.nStrides = int(variables[0][6])
        self.poolSize = int(variables[0][7])

        self.buildCNN()

        score = self.crossEval(10)
        return score
    
    def addConvLayer(self,model):
        model.add(Conv1D(kernel_size = self.nKernel, filters = self.nCNNFilters,strides = self.nStrides, padding="valid" ))
        model.add(Activation("elu"))
        model.add(BatchNormalization())
        model.add(MaxPool1D(pool_size = self.poolSize,padding="valid"))
        return model

    def buildCNN(self):
        
        model = Sequential()
        model.add(Embedding(input_dim = self.vocab_size, output_dim = self.nEmbedding, input_length = self.maxLen ))

        #add nCNN conv layers 
        for _ in range(0,self.nCNN):
            model = self.addConvLayer(model)
        
        model.add(Flatten())
        #add nDense
        
        for _ in range(0,self.nDense):
            model.add(Dense(self.nNNFilters))
        
        model.add(Dense(1, activation = "softmax" ))
        model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])

        self.model = model

    
        
    

    def buildLR(self,C):
        self.model = LogisticRegression(C=C)
    
    def crossEval(self,folds):

        skf = StratifiedKFold(n_splits=folds, shuffle=True,random_state=42)
        type1 ="<class 'sklearn.linear_model.logistic.LogisticRegression'>"
        count = 1
        scores = []
        for train,test in skf.split(self.xTrain,self.yTrain):
            
            if str(type(self.model)) == type1:
                self.model.fit(self.xTrain[train],self.yTrain[train])
                score = self.model.score(self.xTrain[test],self.yTrain[test])
            else:
                self.buildCNN()
                self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
                self.model.fit(self.xTrain[train],self.yTrain[train],epochs=100,batch_size=10,verbose=0)
                score = self.model.evaluate(self.xTrain[test],self.yTrain[test])
            
            print("The score of iteration ", count, "was", score)
            scores.append(score)
            count = count+1
        
        meanScore = np.mean(scores)*100
        print("Final score was ", meanScore)

        return 1-meanScore
    
    def trainModel(self):
        self.model.fit(self.xTrain,self.yTrain)
    
    def validateModel(self):
        print("Validation score is", self.model.score(self.xValidation,self.yValidation))

    def testModel(self):
        print("test score is", self.model.score(self.xTest,self.yTest))
Beispiel #14
0
def GFM_MLC(args):

    # Parameters
    batch_size = 32
    dataset = args.dataset
    epochs = 1000  # early stopping on validation data
    verbosity = 0
    sklearn = False
    c = args.c
    print('Amount of regularization= {:.3f}'.format(c))

    features_train = np.load(
        '../data/{}/features/features_train_max.npy'.format(dataset))
    features_validation = np.load(
        '../data/{}/features/features_validation_max.npy'.format(dataset))
    features_test = np.load(
        '../data/{}/features/features_test_max.npy'.format(dataset))
    n_features = features_train.shape[1]

    # rescale
    from sklearn.preprocessing import StandardScaler
    featurescaler = StandardScaler().fit(features_train)

    features_train = featurescaler.transform(features_train)
    features_validation = featurescaler.transform(features_validation)
    features_test = featurescaler.transform(features_test)

    csv_path_train = '../data/{}/TRAIN.csv'.format(dataset)
    csv_path_validation = '../data/{}/VALIDATION.csv'.format(dataset)
    csv_path_test = '../data/{}/TEST.csv'.format(dataset)

    df_train = pd.read_csv(csv_path_train)
    df_validation = pd.read_csv(csv_path_validation)
    df_test = pd.read_csv(csv_path_test)

    train_steps = np.ceil(len(df_train) / batch_size)
    validation_steps = np.ceil(len(df_validation) / batch_size)
    test_steps = np.ceil(len(df_test) / batch_size)

    # Extract ground truth labels
    y_true_train = np.array([
        ast.literal_eval(df_train['marginal_labels'][i])
        for i in range(len(df_train))
    ])
    y_true_validation = np.array([
        ast.literal_eval(df_validation['marginal_labels'][i])
        for i in range(len(df_validation))
    ])
    y_true_test = np.array([
        ast.literal_eval(df_test['marginal_labels'][i])
        for i in range(len(df_test))
    ])

    n_labels = y_true_train.shape[1]

    y_gfm_train = np.array([
        ast.literal_eval(df_train['gfm_labels'][i])
        for i in range(len(df_train))
    ])
    y_gfm_validation = np.array([
        ast.literal_eval(df_validation['gfm_labels'][i])
        for i in range(len(df_validation))
    ])

    # Compute max_s: the maximum number of positive label for a single instance
    max_s = np.max(
        np.array([
            np.max(np.sum(y_true_train, axis=1)),
            np.max(np.sum(y_true_validation, axis=1)),
            np.max(np.sum(y_true_test, axis=1))
        ]))

    # Containers
    GFM_train_entries = []
    GFM_validation_entries = []
    GFM_test_entries = []

    for label in range(n_labels):
        # print('Label {} of {}...'.format(label, n_labels))
        # extract one multinomial regression problem
        if sklearn:
            y_label_train = np.argmax(y_gfm_train[:, label, :], axis=1)
            y_label_validation = np.argmax(y_gfm_validation[:, label, :],
                                           axis=1)
        else:
            y_label_train = y_gfm_train[:, label, :]
            y_label_validation = y_gfm_validation[:, label, :]
        # print(y_label_train.shape)

        if sklearn:
            from sklearn.linear_model import LogisticRegression
            model = LogisticRegression(multi_class='ovr', solver='lbfgs', C=c)

        else:
            model = GFM_labelwise_classifier(n_features, max_s + 1, c).model
            optimizer = Adam()
            model.compile(loss='categorical_crossentropy', optimizer=optimizer)
            callbacks = [
                EarlyStopping(monitor='val_loss',
                              min_delta=0,
                              patience=3,
                              verbose=verbosity,
                              mode='auto'),
                ModelCheckpoint(
                    '../models/GFMMLC_labelwise_{}.h5'.format(dataset),
                    monitor='val_loss',
                    save_best_only=True,
                    verbose=verbosity)
            ]

            model.fit(x=features_train,
                      y=y_label_train,
                      batch_size=batch_size,
                      epochs=epochs,
                      verbose=verbosity,
                      callbacks=callbacks,
                      validation_data=(features_validation,
                                       y_label_validation))
            # Load best model
            model.load_weights(
                '../models/GFMMLC_labelwise_{}.h5'.format(dataset))
            model.compile(loss='categorical_crossentropy', optimizer=optimizer)

        pis_train = model.predict(features_train)
        pis_validation = model.predict(features_validation)
        pis_test = model.predict(features_test)

        if sklearn:
            from sklearn.preprocessing import OneHotEncoder
            enc = OneHotEncoder()
            enc.fit(
                np.argmax(np.argmax(y_gfm_train[:, :, :], axis=1),
                          axis=1).reshape(-1, 1))
            pis_train = enc.transform(pis_train.reshape(-1, 1)).toarray()
            pis_validation = enc.transform(pis_validation.reshape(
                -1, 1)).toarray()
            pis_test = enc.transform(pis_test.reshape(-1, 1)).toarray()

        GFM_train_entries.append(pis_train)
        GFM_validation_entries.append(pis_validation)
        GFM_test_entries.append(pis_test)

    # Combine all the predictonis
    pis_train = np.stack(GFM_train_entries).transpose(1, 0, 2)
    pis_validation = np.stack(GFM_validation_entries).transpose(1, 0, 2)
    pis_test = np.stack(GFM_test_entries).transpose(1, 0, 2)

    pis_train_final = [
        complete_matrix_columns_with_zeros(mat[:, 1:], len=n_labels)
        for mat in pis_train
    ]
    pis_validation_final = [
        complete_matrix_columns_with_zeros(mat[:, 1:], len=n_labels)
        for mat in pis_validation
    ]
    pis_test_final = [
        complete_matrix_columns_with_zeros(mat[:, 1:], len=n_labels)
        for mat in pis_test
    ]

    # Compute optimal predictions for F1
    for beta in [1, 2]:
        GFM = GeneralFMaximizer(beta, n_labels)

        # Run GFM algo on this output
        (optimal_predictions_train,
         E_F_train) = GFM.get_predictions(predictions=pis_train_final)
        (optimal_predictions_validation, E_F_validation) = GFM.get_predictions(
            predictions=pis_validation_final)
        (optimal_predictions_test,
         E_F_test) = GFM.get_predictions(predictions=pis_test_final)

        # Evaluate F score
        F_train = compute_F_score(y_true_train,
                                  optimal_predictions_train,
                                  t=0.5,
                                  beta=beta)
        F_validation = compute_F_score(y_true_validation,
                                       optimal_predictions_validation,
                                       t=0.5,
                                       beta=beta)
        F_test = compute_F_score(y_true_test,
                                 optimal_predictions_test,
                                 t=0.5,
                                 beta=beta)

        print('GFM_MLC ({})'.format(dataset))
        print('-' * 50)
        # print('F{} score on training data: {:.4f}'.format(beta, F_train))
        # print('F{} score on validation data: {:.4f}'.format(beta, F_validation))
        print('F{} score on test data: {:.4f}'.format(beta, F_test))

        # Store test set predictions to submit to Kaggle
        if (dataset == 'KAGGLE_PLANET') and (beta == 2):
            # Map predictions to filenames
            def filepath_to_filename(s):
                return os.path.basename(os.path.normpath(s)).split('.')[0]

            test_filenames = [
                filepath_to_filename(f) for f in df_test['full_path']
            ]
            GFM_predictions_mapping = dict(
                zip(test_filenames, [
                    csv_helpers.decode_label_vector(f)
                    for f in optimal_predictions_test
                ]))
            # Create submission file
            csv_helpers.create_submission_file(GFM_predictions_mapping,
                                               name='Planet_GFM_MC_labelwise')
import keras
from sklearn import datasets
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils

# encode class values as integers
encoder = LabelEncoder()
encoder.fit(label)
encoded_Y = encoder.transform(label)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y)

from sklearn.model_selection import train_test_split
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.core import Dropout
from time import time
from keras import losses

model = Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(80, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss=losses.mean_squared_error,
              optimizer='sgd',
              metrics=['accuracy'])
model.fit(feature, dummy_y, batch_size=32, epochs=1000)
Beispiel #16
0
results = results.append(model_results,ignore_index = True)


#ANN

import keras
from keras.models import Sequential
from keras.layers import Dense

classifier = Sequential()

classifier.add(Dense(units= 9, kernel_initializer = 'uniform', activation = 'relu', input_dim = 19))
classifier.add(Dense(units= 9, kernel_initializer = 'uniform', activation = 'relu'))
classifier.add(Dense(units= 1, kernel_initializer = 'uniform', activation = 'sigmoid'))

classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])

classifier.fit(x_train,y_train, batch_size= 10, nb_epoch=100)


y_pred = classifier.predict(x_test)
y_pred = (y_pred>0.5)


acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test,y_pred)
f1 = f1_score(y_test,y_pred)


model_results = pd.DataFrame([['ANN', acc, prec, rec, f1]],
classifier.add(
    Dense(output_dim=60, kernel_initializer='normal', activation='relu'))
classifier.add(
    Dense(output_dim=40, kernel_initializer='normal', activation='relu'))
classifier.add(
    Dense(output_dim=20, kernel_initializer='normal', activation='relu'))
classifier.add(
    Dense(output_dim=10, kernel_initializer='normal', activation='relu'))

# Adding the output layer
classifier.add(
    Dense(output_dim=1, kernel_initializer='normal', activation='sigmoid'))

# Compiling the ANN
classifier.compile(loss='binary_crossentropy',
                   optimizer='adam',
                   metrics=['mean_absolute_error'])

# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size=100, nb_epoch=500)

# Part 3 - Making the predictions and evaluating the model

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# In[147]:

y_pred

# In[148]:
Beispiel #18
0
)
model.add(layers.Dropout(0.5))
model.add(layers.Dense(
    16, kernel_regularizer=regularizers.l1(0.001),activation='relu')
)
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))


# In[113]:


NumEpochs = 10
BatchSize = 512

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])

history = model.fit(
    partial_X_train, partial_y_train,
    epochs=NumEpochs, batch_size=BatchSize, validation_data=(X_val, y_val)
)

results = model.evaluate(X_test, y_test)

print("Test Loss and Accuracy")
print("results ", results)

history_dict = history.history
display(history_dict.keys())

Beispiel #19
0
def main(argv=None):
    parser = argparse.ArgumentParser(description='Visualization exmaple')
    parser.add_argument('--s3_bucket',
                        type=str,
                        required=True,
                        help='S3 Bucket to use.')
    parser.add_argument('--s3_prefix',
                        type=str,
                        help='S3 Bucket path prefix.',
                        default="iris-example")

    args = parser.parse_args()

    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)

    url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
    names = [
        'sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species'
    ]
    iris = pd.read_csv(url, names=names)

    array = iris.values
    X, y = array[:, 0:4], np.where(array[:, 4] == 'Iris-setosa', 1, 0)

    test_size = 0.2
    seed = 7
    X_train, X_test, y_train, y_test = model_selection.train_test_split(
        X, y, test_size=test_size, random_state=seed)

    model = LogisticRegression()
    model.fit(X_train, y_train)

    y_pred = model.predict(X_test)
    logging.info("Trained Model's evaluation score: {}".format(
        model.score(X_test, y_test)))

    df = pd.concat([
        pd.DataFrame(y_test, columns=['target']),
        pd.DataFrame(y_pred, columns=['predicted'])
    ],
                   axis=1)

    vocab = list(df['target'].unique())
    cm = confusion_matrix(df['target'], df['predicted'], labels=vocab)

    data = []
    for target_index, target_row in enumerate(cm):
        for predicted_index, count in enumerate(target_row):
            data.append((vocab[target_index], vocab[predicted_index], count))

    df_cm = pd.DataFrame(data, columns=['target', 'predicted', 'count'])
    cm_file = os.path.join('/tmp', 'confusion_matrix.csv')
    with file_io.FileIO(cm_file, 'w') as f:
        df_cm.to_csv(f,
                     columns=['target', 'predicted', 'count'],
                     header=False,
                     index=False)

    model = tf.keras.Sequential([
        tf.keras.layers.Dense(10, activation='relu',
                              input_shape=(4, )),  # input shape required
        tf.keras.layers.Dense(32, activation='relu'),
        tf.keras.layers.Dense(64, activation='relu'),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dense(2)
    ])

    model.compile(optimizer='sgd',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    time_hash = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    log_dir = "/tmp/logs/fit/" + time_hash
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)

    model.fit(x=X_train,
              y=y_train,
              epochs=10,
              validation_data=(X_test, y_test),
              callbacks=[tensorboard_callback])

    # upload to S3
    AWS_REGION = 'us-west-2'
    s3_client = boto3.client('s3', region_name=AWS_REGION)
    try:
        # upload cm file to S3
        cm_file_name = cm_file

        cm_object_name = 'confusion_matrix.csv'
        s3_cm_file = 's3://' + args.s3_bucket + '/' + args.s3_prefix + '/' + cm_object_name
        cm_response = s3_client.upload_file(
            cm_file_name, args.s3_bucket,
            args.s3_prefix + '/' + cm_object_name)

        # upload tb log dir to S3
        s3_tb_file = 's3://' + args.s3_bucket + '/' + args.s3_prefix + '/tb-logs'
        for path, subdirs, files in os.walk(log_dir):
            path = path.replace("\\", "/")
            directory_name = path.replace(log_dir, "")
            for file in files:
                s3_client.upload_file(
                    os.path.join(path, file), args.s3_bucket,
                    args.s3_prefix + '/tb-logs/' + directory_name + '/' + file)

    except ClientError as e:
        logging.info("ERROR IN S3 UPLOADING!!!!!!")
        logging.ERROR(e)

    logging.info("S3 object_name is: {}".format(s3_cm_file))

    metadata = {
        'outputs': [
            # Markdown that is hardcoded inline
            {
                'storage': 'inline',
                'source':
                '# Inline Markdown\n[A link](https://www.kubeflow.org/)',
                'type': 'markdown',
            },
            {
                'source':
                'https://raw.githubusercontent.com/kubeflow/pipelines/master/README.md',
                'type': 'markdown',
            },
            {
                'type':
                'confusion_matrix',
                'format':
                'csv',
                'schema': [
                    {
                        'name': 'target',
                        'type': 'CATEGORY'
                    },
                    {
                        'name': 'predicted',
                        'type': 'CATEGORY'
                    },
                    {
                        'name': 'count',
                        'type': 'NUMBER'
                    },
                ],
                'source':
                s3_cm_file,
                # Convert vocab to string because for bealean values we want "True|False" to match csv data.
                'labels':
                list(map(str, vocab)),
            },
            {
                'type': 'tensorboard',
                'source': s3_tb_file,
            }
        ]
    }

    with file_io.FileIO('/tmp/mlpipeline-ui-metadata.json', 'w') as f:
        json.dump(metadata, f)

    logging.info("Succeed in Markdown")
data = concat([Y, X], axis=1)

train, test = train_test_split(data, test_size=0.3, random_state=0, stratify=data['Survived'])
X_train = train[train.columns[1:]]
Y_train = train[train.columns[:1]]
X_test = test[test.columns[1:]]
Y_test = test[test.columns[:1]]

print(X_train.shape)

model = LogisticRegression()
model.fit(X_train, Y_train)
print(model.score(X_test, Y_test))


from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.models import Sequential

# create first model
model = Sequential()
model.add(Dense(18, input_dim=18, activation='relu'))
model.add(Dense(18, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='Nadam', metrics=['accuracy'])
# Fit the model
model.fit(X_train, Y_train, epochs=1000, batch_size=32)
# evaluate the model
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
Beispiel #21
0
class TestModel(object):
    def __init__(self, nw, ny, nout, test_type='linear', mod_pkg='sklearn',\
                 lam=1.,nepochs=None, batch_size=100, lr=None,\
                 verbose=0):
        """
        TestModel:  A model to test the SE against
        
        There are three basic models for the test
        * 'linear':  Linear gaussian model
        * 'logistic':  Binary classification with a logistic output
        * 'nonlinear':  Non-linear regression
        
        Parameters
        ----------
        test_type: {'linear', 'logistic', 'nonlinear'}
            Test type.  
        mod_pkg:  {'sklearn', 'tensorflow'}
            Model type
        lam: float
            L2-regularization on weights
        nw:  int
            number of features
        ny:  int
            number of measurements
        nout:  int
            number of outputs
        batch_size: int
            batch_size.  Set to 0 for full-batch gradient descent.
            Full batch will be slower, but will approach the true
            minima more exactly
        lr:  int or `None`
            learning rate.  Set to `None` for default lr
        nepochs:  int
            number of epochs.  For full-batch gradient descent,
            this is the number of steps
        verbose:  int
            verbosity level for fit routine
    
        Returns
        -------
        None.

        """
        self.test_type = test_type
        self.mod_pkg = mod_pkg
        self.lam = lam
        self.nw = nw
        self.nout = nout
        self.ny = ny
        self.nepochs = nepochs
        self.batch_size = batch_size
        self.lr = lr
        self.verbose = verbose

        # Check arguments
        if not (self.test_type in ['linear', 'logistic', 'nonlinear']):
            raise ValueError('Test type %s unknown' % self.test_type)
        if not (self.mod_pkg in ['sklearn', 'tf']):
            raise ValueError('Model package %s unknown' % self.mod_pkg)

        # Test if full batch is used
        self.full_batch = (self.batch_size == 0)
        if self.full_batch:
            self.batch_size = ny

        # Set the default learning rates
        lr_mini_batch = {'linear': 0.01, 'logistic': 0.003,\
                         'nonlinear': 0.01}
        lr_full_batch = {'linear': 0.1, 'logistic': 0.01,\
                         'nonlinear': 0.1}
        if (self.lr is None):
            if self.full_batch:
                self.lr = lr_full_batch[self.test_type]
            else:
                self.lr = lr_mini_batch[self.test_type]

        # Set default number of epochs
        if self.nepochs is None:
            if self.full_batch:
                self.nepochs = 1000
            else:
                self.nepochs = 200

        # Build the model
        if self.mod_pkg == 'sklearn':
            self.build_mod_sklearn()
        else:
            self.build_mod_tf()

    # Output functions for the non-linear regression case
    fscale = 3

    def fnl(p):
        """ 
        Output function 
        """
        u = TestModel.fscale * p
        return (np.exp(u) - 1) / (1 + np.exp(u))

    def fnl_grad(p):
        """ 
        Output function gradient
        """
        u = TestModel.fscale * p
        u = np.minimum(u, 10)
        grad = TestModel.fscale * 2 * np.exp(u) / ((1 + np.exp(u))**2)
        return grad

    def fnl_tf(p):
        """
        Tensorflow implementation of the function
        """
        z = (K.exp(TestModel.fscale * p) - 1) / (1 +
                                                 K.exp(TestModel.fscale * p))
        return z

    def build_mod_sklearn(self):
        """
        Builds the model using the sklearn package
        """
        if self.test_type == 'nonlinear':
            raise ValueError('nonlinear model are not supported in ' +\
                             'the skelarn packge.  Use mod_pkg=tf')

        # Fit the data with sklearn's Ridge or LogisticRegression method
        if self.test_type == 'linear':
            self.mod = Ridge(alpha=self.lam, fit_intercept=False)
        elif self.test_type == 'logistic':
            self.mod = LogisticRegression(fit_intercept=False, C=1 / self.lam)

    def build_mod_tf(self):
        """
        Builds the model using Tensorflow
        """

        # Set L2 regression level.  TF will minimize,
        #
        #    ||y-p||**2 + alpha*||w||^2/nw
        #
        # alpha = lam*nout/nw
        K.clear_session()
        alpha = self.lam / self.nout / self.ny
        if self.test_type == 'logistic':
            alpha /= 2

        self.mod = tfk.models.Sequential()
        self.mod.add(tfkl.Dense(self.nout,input_shape=(self.nw,),\
            name='linear', use_bias=False,\
            kernel_regularizer=tfk.regularizers.l2(alpha)) )
        if self.test_type == 'logistic':
            self.mod.add(tfkl.Activation('sigmoid'))
        elif self.test_type == 'nonlinear':
            self.mod.add(tfkl.Lambda(TestModel.fnl_tf))

    def fit(self, Xtr, ytr):
        """
        Fits data 
        
        Parameters
        ----------
        Xtr, ytr:  ndarrays
            Training data
        """
        # Check if output shape matches
        if (ytr.shape != (self.ny, self.nout)):
            raise ValueError('Expecting shape %s.  Received %s'\
                             % (str((self.ny,self.nout)),str(ytr.shape)))

        # The logistic sklearn method can only do one output
        if (self.test_type == 'logistic') and (self.mod_pkg == 'sklearn'):
            if (self.nout != 1):
                raise ValueError('Logistic model with sklearn only takes ' +\
                                 'single ouptut')
            ytr = np.squeeze(ytr)

        if self.mod_pkg == 'sklearn':
            # Fit the data using skelarn
            self.mod.fit(Xtr, ytr)

            # Store the coefficients
            self.what = self.mod.coef_.T
        else:
            # Fit the model using tensorflow
            if self.full_batch:
                opt = tfk.optimizers.Adam(lr=self.lr)
            else:
                opt = tfk.optimizers.SGD(lr=self.lr)

            if self.test_type == 'logistic':
                self.mod.compile(\
                    optimizer=opt, loss='binary_crossentropy',\
                    metrics=['accuracy'])

            else:
                self.mod.compile(\
                    optimizer=opt, loss='mse',\
                    metrics=['mse'])
            self.hist = self.mod.fit(\
                Xtr,ytr,epochs=self.nepochs,\
                batch_size=self.batch_size,\
                verbose=self.verbose)
            self.what = self.mod.get_weights()[0]

    def test_grad(self, Xtr, ytr):
        """
        Tests the gradient 

        Parameters
        ----------
        Xtr : ndarray
            matrix of features
        ytr : ndarray
            matrix of responses

        Returns
        -------
        None.

        """
        ptr = Xtr.dot(self.what)
        if self.test_type == 'linear':
            e = ptr - ytr
        elif self.test_type == 'logistic':
            # The binary cross entropy loss is:
            #    log(1 + exp(-o)) - y*p
            # So, the gradient is:
            #    grad = 1/(1+exp(-p)) - y
            e = 1 / (1 + np.exp(-ptr)) - ytr
        elif self.test_type == 'nonlinear':
            e = (TestModel.fnl(ptr) - ytr) * TestModel.fnl_grad(ptr)

        grad_out = Xtr.T.dot(e)
        grad_in = self.lam * self.what
        grad = grad_in + grad_out
        return grad, grad_in, grad_out
        print('Final MSE grad = %12.4e' % np.mean(grad**2))

    def predict(self, Xts):
        """
        Predicts values on test data
        
        Parameters
        ----------
        Xts:  ndarray
            Test data
            
        Returns:
        --------
        yts_hat:  ndarray
            Predicted output
        """
        yts_hat = self.mod.predict(Xts)
        if self.test_type == 'logistic':
            nts = yts_hat.shape[0]
            yts_hat = np.reshape(yts_hat, (nts, 1))
        return yts_hat

        ""