Beispiel #1
0
    def fit(self, X, y):

        # check is tensorflow is available
        _check_tensorflow()

        sequence_length = X.shape[1]
        if self._model is None or sequence_length != self.sequence_length:
            self.sequence_length = sequence_length
            keras_model = _create_lstm_pool_model(
                embedding_matrix=self.embedding_matrix,
                backwards=self.backwards,
                dropout=self.dropout,
                optimizer=self.optimizer,
                max_sequence_length=sequence_length,
                lstm_out_width=self.lstm_out_width,
                learn_rate=self.learn_rate,
                verbose=self.verbose)
            self._model = KerasClassifier(keras_model, verbose=self.verbose)
        self._model.fit(X,
                        y,
                        batch_size=self.batch_size,
                        epochs=self.epochs,
                        shuffle=self.shuffle,
                        class_weight=self.class_weight,
                        verbose=self.verbose)
Beispiel #2
0
class NeuralNet(BaseEstimator):
    def __init__(self, neuralNet, epochs, batch_size, verbose):
        self.neuralNet = neuralNet
        self.epochs = epochs
        self.batch_size = batch_size
        self.verbose = verbose

    def fit(self, X, y):
        weights = np.mean(np.sum(y, axis=0)) / np.sum(y, axis=0)
        self.dict_weights = dict(enumerate(weights))
        self.classifier = KerasClassifier(
            build_fn=self.neuralNet,
            epochs=self.epochs,
            batch_size=self.batch_size,
            verbose=self.verbose,
            class_weight=self.dict_weights,
        )
        self.classifier.fit(X, y)
        return self

    def predict(self, X):
        return self.classifier.predict_proba(X) > 0.5

    def score(self, X, y):
        return self.classifier.score(X, y)

    def predict_proba(self, X):
        return self.classifier.predict_proba(X)
Beispiel #3
0
    def test_LSTM_compilation_and_fit_predict_without_execution_error(self):
        # given
        x_train = np.array(
            ['this is really awesome !', 'this is really crap !!'])
        y_train = np.array([1, 0])

        ids_x_train = np.empty([2, 5])
        for i in range(0, len(x_train)):
            ids = [
                dummy_hash_function(token) for token in x_train[i].split(' ')
            ]
            ids_x_train[i, :] = ids
        num_labels = 2
        y_enc = to_categorical(y_train, num_labels)
        dictionary_size = np.int(np.max(ids_x_train) + 1)

        # when
        lstm_factory = LSTMFactory()
        clf_keras = KerasClassifier(build_fn=lstm_factory.create_model,
                                    dictionary_size=dictionary_size,
                                    num_labels=num_labels)
        clf_keras.fit(ids_x_train, y_enc, epochs=1, verbose=False)

        x_test = np.array(['it is really awesome !'])
        ids_x_test = np.empty([1, 5])
        ids_x_test[0, :] = [
            dummy_hash_function(token) for token in x_test[0].split(' ')
        ]

        y_pred = clf_keras.predict(ids_x_test)

        # then
        self.assertIsNotNone(y_pred)
Beispiel #4
0
    def fit(self, X, y, **kwargs):
        """
        Fit the workflow by building the word corpus, and fitting the keras model.
    
        Parameters
        ----------
        X : array-like, iterable
            Collection of str or an iterable which yields str
        y : array-like, shape (n_samples,)
            Class targets.
        **kwargs : 
            parameters passed to inner keras model
    
        Returns
        -------
        self : object
            Returns an instance of self.
        """

        x = self.text2seq.fit_transform(X)
        y_enc = to_categorical(y, self.num_labels)

        self.model_ = KerasClassifier(build_fn=self.factory.create_model,
                                      dictionary_size=self.text2seq.dictionary_size_,
                                      num_labels=self.num_labels)

        self.model_.fit(x, y_enc, **kwargs)

        return self
Beispiel #5
0
class KerasGridOptimizer():
    def __init__(self,
                 epochs=10,
                 n_layers=1,
                 n_nodes=32,
                 learning_rate=5e-4,
                 batch_size=128):
        self._epochs = epochs
        self._n_layers = n_layers
        self._n_nodes = n_nodes
        self._learn_rate = learning_rate
        self._batch_size = batch_size

    def get_params(self, deep=False):
        return {
            'epochs': self._epochs,
            'n_layers': self._n_layers,
            'n_nodes': self._n_nodes
        }

    def set_params(self,
                   epochs=None,
                   n_layers=None,
                   n_nodes=None,
                   learning_rate=None,
                   batch_size=None):
        if epochs is not None:
            self._epochs = epochs
        if n_layers is not None:
            self._n_layers = n_layers
        if n_nodes is not None:
            self._n_nodes = n_nodes
        if learning_rate is not None:
            self._learn_rate = learning_rate
        if batch_size is not None:
            self._batch_size = batch_size
        return self

    def fit(self, X, y):
        self._model = models.Sequential()
        self._model.add(layers.Flatten(input_shape=(26, )))
        for i in range(self._n_layers):
            self._model.add(layers.Dense(self._n_nodes, activation='relu'))
        self._model.add(layers.Dense(3, activation='softmax'))

        opt = keras.optimizers.Adam(learning_rate=self._learn_rate)
        self._model.compile(loss='categorical_crossentropy',
                            optimizer=opt,
                            metrics=['accuracy'])

        self._est = KerasClassifier(build_fn=lambda: self._model,
                                    epochs=self._epochs,
                                    verbose=0,
                                    batch_size=self._batch_size)

        self._est.fit(X, y)

    def score(self, X, y):
        return self._est.score(X, y)
    def fit(self, X_raw, y_raw):
        """Classifier training function.

        Here you will implement the training function for your classifier.

        Parameters
        ----------
        X_raw : numpy.ndarray
            A numpy array, this is the raw data as downloaded
        y_raw : numpy.ndarray (optional)
            A one dimensional numpy array, this is the binary target variable

        Returns
        -------
        ?
        """
        X_clean = self._preprocessor(X_raw)
        class_weights = class_weight.compute_class_weight(
            'balanced', np.unique(y_raw), y_raw)
        es = EarlyStopping(monitor='val_loss',
                           min_delta=0.0001,
                           patience=5,
                           mode='auto',
                           restore_best_weights=True)

        # ARCHITECTURE OF OUR MODEL

        def make_nn(hidden_layers=[7, 7], lrate=0.001):
            sgd = optimizers.SGD(lr=lrate)
            adam = optimizers.Adam(lr=lrate)
            he_init = he_normal()
            model = Sequential()
            for k in hidden_layers:
                model.add(
                    Dense(k,
                          activation='relu',
                          kernel_initializer=he_init,
                          bias_initializer='zeros'))
            model.add(
                Dense(1,
                      activation='sigmoid',
                      kernel_initializer=he_init,
                      bias_initializer='zeros'))
            model.compile(loss='binary_crossentropy', optimizer=adam)
            return model

        self.base_classifier = KerasClassifier(make_nn,
                                               class_weight=class_weights,
                                               epochs=350,
                                               validation_split=0.1,
                                               batch_size=32,
                                               callbacks=[es])

        # THE FOLLOWING GETS CALLED IF YOU WISH TO CALIBRATE YOUR PROBABILITES

        self.base_classifier.fit(X_clean, y_raw)
        return self.base_classifier
def classifier(X_train, y_train, X_test, X_params_select, y_params_select,
               tune_size, config_list, opt_modulo_params):
    ###
    # Hyperparameterarrays
    ###

    learn_rate = [0.1]
    momentum = [float(x) for x in np.linspace(start=0, stop=0.9, num=4)]
    hidden_layer_n = [int(x) for x in np.linspace(start=10, stop=100, num=4)]

    param_grid = {
        'learn_rate': learn_rate,
        'momentum': momentum,
        'hidden_layer_n': hidden_layer_n,
    }

    ###
    # Training and Fitting
    ###
    early_stopping = EarlyStopping(monitor='accuracy', patience=500)
    model = KerasClassifier(build_fn=create_model, epochs=10000)
    if config_list['optimize_method'] == 1:
        if config_list['randomized_search'] == 1:
            grid = RandomizedSearchCV(estimator=model,
                                      param_distributions=param_grid,
                                      cv=2,
                                      n_iter=25)
        else:
            grid = GridSearchCV(estimator=model, param_grid=param_grid, cv=2)

        model = grid.fit(X_params_select,
                         y_params_select,
                         epochs=10000,
                         callbacks=[early_stopping],
                         verbose=0)
        opt_modulo_params = grid.best_params_
        #model = create_model(**opt_modulo_params)
        print(opt_modulo_params)

    else:
        model = create_model()
        model.fit(X_train,
                  y_train,
                  epochs=10000,
                  callbacks=[early_stopping],
                  verbose=0)

    predictions_prob = model.predict(
        X_test
    )  #bugfix: https://datascience.stackexchange.com/questions/13461/how-can-i-get-prediction-for-only-one-instance-in-keras
    print(predictions_prob)
    y_pred = np.where(predictions_prob >= 0.5, 1, -1)
    y_pred = list(itertools.chain(*y_pred))
    y_pred = np.array(y_pred)
    print(y_pred)
    print(type(y_pred))
    return y_pred, opt_modulo_params
Beispiel #8
0
def ede_dnn(dnn_model,
            Xtrain,
            ytrain,
            Xtest,
            ytest,
            batch_size,
            epochs,
            model_dir,
            patience=3,
            factor=0.2,
            export='DNN_y2',
            verbose=0):
    """
    Used to generate DNN model instance and training.

    :param dnn_model: Model to be generated
    :param Xtrain: Training input data
    :param ytrain: Training ground truth
    :param Xtest: Testing input data
    :param ytest: Testing ground truth
    :param batch_size: DNN Batch size
    :param epochs: Training Epochs
    :param model_dir: Model directory location
    :param patience: Patiance for early stopping callback
    :param factor: Factor for reduce learning rate
    :param export: name used for exporting
    :return: tf.history
    """
    # One hot encoding of groundtruth both testing and training
    y_oh_train = pd.get_dummies(ytrain, prefix='target')
    y_oh_test = pd.get_dummies(ytest, prefix='target')

    early_stopping = EarlyStopping(monitor="loss",
                                   patience=patience)  # early stop patience
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=factor,
                                  patience=5,
                                  min_lr=0.00001)
    model = KerasClassifier(build_fn=dnn_model,
                            verbose=verbose,
                            callbacks=[early_stopping, reduce_lr])
    history = model.fit(np.asarray(Xtrain),
                        np.asarray(y_oh_train),
                        batch_size=batch_size,
                        epochs=epochs,
                        callbacks=[early_stopping, reduce_lr],
                        verbose=0,
                        validation_data=(np.asarray(Xtest),
                                         np.asarray(y_oh_test)))
    # Saving History
    df_history = pd.DataFrame(history.history)
    history_name = "DNN_history_{}.csv".format(export)
    df_history.to_csv(os.path.join(model_dir, history_name), index=False)
    return history
def create_scikit_keras_classifier(X, y):
    # create simple (dummy) Keras DNN model for classification
    batch_size = 500
    epochs = 10
    model_func = create_scikit_keras_model_func(X.shape[1])
    model = KerasClassifier(build_fn=model_func,
                            nb_epoch=epochs,
                            batch_size=batch_size,
                            verbose=1)
    model.fit(X, y)
    return model
Beispiel #10
0
 def fit(self, X, y):
     weights = np.mean(np.sum(y, axis=0)) / np.sum(y, axis=0)
     self.dict_weights = dict(enumerate(weights))
     self.classifier = KerasClassifier(
         build_fn=self.neuralNet,
         epochs=self.epochs,
         batch_size=self.batch_size,
         verbose=self.verbose,
         class_weight=self.dict_weights,
     )
     self.classifier.fit(X, y)
     return self
    def fit(self, X, y):
        if scipy.sparse.issparse(X):
            X = X.toarray()
        if self._model is None or X.shape[1] != self.input_dim:
            self.input_dim = X.shape[1]
            keras_model = _create_dense_nn_model(
                self.input_dim, self.dense_width, self.optimizer,
                self.learn_rate, self.regularization, self.verbose)
            self._model = KerasClassifier(keras_model, verbose=self.verbose)

        self._model.fit(X, y, batch_size=self.batch_size, epochs=self.epochs,
                        shuffle=self.shuffle, verbose=self.verbose,
                        class_weight=_set_class_weight(self.class_weight))
    def fit(self, X_raw, y_raw, claims_raw):
        """Classifier training function.

        Here you will use the fit function for your classifier.

        Parameters
        ----------
        X_raw : ndarray
            This is the raw data as downloaded
        y_raw : ndarray
            A one dimensional array, this is the binary target variable
        claims_raw: ndarray
            A one dimensional array which records the severity of claims

        Returns
        -------
        self: (optional)
            an instance of the fitted model

        """
        nnz = np.where(claims_raw != 0)[0]
        self.y_mean = np.mean(claims_raw[nnz])
        # =============================================================
        # REMEMBER TO A SIMILAR LINE TO THE FOLLOWING SOMEWHERE IN THE CODE
        X_clean = self._preprocessor(X_raw, mode='train')
        
        #Compute class weights and define callback for early stopping
        class_weights = class_weight.compute_class_weight('balanced', np.unique(y_raw), y_raw)
        es = EarlyStopping(monitor='val_loss', min_delta = 0.0001, patience = 5, mode='auto', restore_best_weights=True)
        # ARCHITECTURE OF OUR MODEL 
        
        def make_nn(hidden_layers=[5,5], lrate=0.0005, random_seed=0):
            sgd = optimizers.SGD(lr=lrate)
            he_init = he_normal(seed=random_seed)
            model = Sequential()
            for k in hidden_layers:
                model.add(Dense(k, activation='relu', kernel_initializer=he_init, bias_initializer='zeros'))
            model.add(Dense(1, activation='sigmoid', kernel_initializer=he_init, bias_initializer='zeros'))
            model.compile(loss='binary_crossentropy', optimizer=sgd)
            return model

        self.base_classifier = KerasClassifier(make_nn, class_weight=class_weights, epochs=350, validation_split=0.1, batch_size=64, callbacks=[es])

        # THE FOLLOWING GETS CALLED IF YOU WISH TO CALIBRATE YOUR PROBABILITES
        if self.calibrate:
            self.base_classifier = fit_and_calibrate_classifier(
                self.base_classifier, X_clean, y_raw)
        else:
            self.base_classifier.fit(X_clean, y_raw)
        return self.base_classifier
Beispiel #13
0
def load_keras_classifier(name, path=ASSETS_PATH):
    """Load a Keras model from disk, as KerasClassifier (sklearn wrapper)"""
    model_path, classes_path = keras_model_and_classes_paths(name)

    nn = KerasClassifier(build_fn=do_nothing)

    # load model and classes
    nn.model = keras.models.load_model(model_path)
    classes = pickle.load(open(classes_path, 'rb'))

    # required for sklearn to believe that the model is trained
    nn._estimator_type = "classifier"
    nn.classes_ = classes

    return nn
Beispiel #14
0
def neural_network_hyper(features_encoded, target_encoded):
    """ Hyperparameter tuning of Neural network
    :param: Features data
    :param: Target data
    :return: Neural network model with best parameters 
    """
    global hyperparameter
    if not hyperparameter:
        estimator = KerasClassifier(build_fn=create_model,
                                    n_hidden=5,
                                    size_nodo=12,
                                    ativ="relu",
                                    opt="adam",
                                    dropout=0.1,
                                    epochs=500,
                                    batch_size=10000,
                                    validation_split=0.1,
                                    verbose=0)
        return estimator
    grid_param = {
        'n_hidden': [2, 5],
        'size_nodo': [50, 200],
        'ativ': ['relu', 'softmax'],
        'opt': ['adam'],
        'dropout': [0.1],
        'epochs': [50],
        'batch_size': [20000]
    }
    model = KerasClassifier(build_fn=create_model,
                            verbose=1,
                            validation_split=0.1)
    ann_hyper_parameters = grid_search(features_encoded, target_encoded, 2,
                                       grid_param, model)
    print('\n\n\nBest Neural Network Hyper-parameters using GridSearch:\n',
          ann_hyper_parameters)

    estimator = KerasClassifier(
        build_fn=create_model,
        n_hidden=ann_hyper_parameters['n_hidden'],
        size_nodo=ann_hyper_parameters['size_nodo'],
        ativ=ann_hyper_parameters['ativ'],
        opt=ann_hyper_parameters['opt'],
        dropout=ann_hyper_parameters['dropout'],
        epochs=1000,  #ann_hyper_parameters['epochs'],
        batch_size=ann_hyper_parameters['batch_size'],
        validation_split=0.1,
        verbose=1)
    return estimator
Beispiel #15
0
def run_classification_nn(path_dataset,
                          name_type,
                          dim,
                          cross_values,
                          epochs=100):
    """Draft classification function with NN2. Development stopped."""
    path_base = join(path_dataset, "reduced")

    if name_type in ("mae", "maae"):
        path_read = join(path_base, "ae_{}".format(name_type))
    else:
        path_read = join(path_base, name_type)

    data, class_ = read_feature_data(path_read, dim)

    data = data.to_numpy()

    kfold = KFold(n_splits=cross_values, random_state=42, shuffle=True)

    history_acc = []
    accumulate_acc = []

    def create_model():
        model = Sequential()
        model.add(Dense(22, input_dim=dim, activation="relu"))
        model.add(Dense(12, activation="relu"))
        model.add(Dense(1, activation="softmax"))
        model.compile(loss="binary_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])
        return model

    for train_index, val_index in kfold.split(data):

        model = KerasClassifier(build_fn=create_model,
                                epochs=epochs,
                                batch_size=128,
                                verbose=0)

        history = model.fit(data[train_index], class_[train_index])
        pred = model.predict(data[val_index])

        accuracy = accuracy_score(class_[val_index], pred)
        accumulate_acc.append(accuracy)

        history_acc.append(history.history["loss"])

    return history_acc, accumulate_acc
Beispiel #16
0
def grid_search(layers_list, epochs_list, X_train, Y_train, indim=300):
    tup_layers = tuple([tuple(l) for l in layers_list])
    tup_epochs = tuple(epochs_list)

    model = KerasClassifier(build_fn=create_model,
                            verbose=0)  #use our create_model

    # define the grid search parameters
    batch_size = [1]  #starting with just a few choices
    epochs = tup_epochs
    lyrs = tup_layers

    #use this to override our defaults. keys must match create_model args
    param_grid = dict(batch_size=batch_size,
                      epochs=epochs,
                      input_dim=[indim],
                      lyrs=lyrs)

    # buld the search grid
    grid = GridSearchCV(
        estimator=model,  #we created model above
        param_grid=param_grid,
        cv=3,  #use 3 folds for cross-validation
        verbose=2)  # include n_jobs=-1 if you are using CPU

    grid_result = grid.fit(np.array(X_train), np.array(Y_train))

    # summarize results
    print("Best: %f using %s" %
          (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
    def gridSearch(inputs_train, output_train):
        model = KerasClassifier(build_fn=create_model, verbose=10)

        # defining grid search parameters
        param_grid = {
            'optimizer': ['RMSprop'],
            'batch_size': [10],
            'epochs': [100],
            #                  'learn_rate': [0.001, 0.01, 0.1, 0.2, 0.3],
            #                  'momentum': [0.0, 0.2, 0.4, 0.6, 0.8, 0.9],
            'init_mode': ['lecun_uniform'],
            'activation': ['softmax'],
            'weight_constraint': [1],
            'dropout_rate': [0.0, 0.5, 0.9],
            'neurons': [10, 30]
        }
        grid = GridSearchCV(estimator=model,
                            param_grid=param_grid,
                            cv=3,
                            verbose=10)
        grid_result = grid.fit(inputs_train, output_train)

        # summarize results
        print("Best: %f using %s" %
              (grid_result.best_score_, grid_result.best_params_))

        return grid.best_params_, grid.best_score_
Beispiel #18
0
def Keras_Classifier(n_splits, save_model_address, model_types, train_images,
                     train_labels, test_images, test_labels, image_size1,
                     image_size2, image_size3, label_types, epochs, times, L1,
                     L2, F1, F2, F3):
    print("begin of keras_classifier: ")
    model = KerasClassifier(build_fn=create_model,
                            model_types=model_types,
                            image_size1=image_size1,
                            image_size2=image_size2,
                            label_types=label_types,
                            image_size3=image_size3,
                            times=times,
                            L1=L1,
                            L2=L2,
                            F1=F1,
                            F2=F2,
                            F3=F3,
                            epochs=epochs,
                            batch_size=2,
                            verbose=1)
    kfold = KFold(n_splits=n_splits, shuffle=True, random_state=5000)
    #scores = cross_val_score(model1, train_images, train_labels, cv=kfold)
    print("before of cross_val_predict:")
    y_pre = cross_val_predict(model, train_images, train_labels, cv=kfold)
    print(y_pre)
    y_scores = y_pre
    print("train_labels", train_labels)
    print("y_scores", y_scores)
    fpr, tpr, thresholds = roc_curve(train_labels, y_scores)
    plt.plot(fpr, tpr)
    plt.savefig("ROC.png")
    plt.show()
Beispiel #19
0
def cross_val_nn():
    print('Cross validation initializing')
    X, y = data_time()
    mod = KerasClassifier(build_fn=phish_nn,
                          epochs=15,
                          batch_size=1500,
                          verbose=0)
    num_folds = 5
    kfold = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=808)
    print('Starting 5-fold cross validation of model')
    cv_results = cross_val_score(mod, X, y, cv=kfold)
    print('Starting 5-fold cross-validation predictions')
    cv_preds = cross_val_predict(mod,
                                 X,
                                 y,
                                 cv=kfold,
                                 verbose=0,
                                 method='predict')
    print('The average cross-validation accuracy is: ',
          round(cv_results.mean(), 4) * 100, '%')
    print('The 5-fold cross validation accuracy results are: \n', cv_results)
    acc = accuracy_score(y, cv_preds)
    cm = confusion_matrix(y, cv_preds)
    print('Confusion Matrix \n', cm)
    print('Accuracy Score: \n', acc)
    f1s = f1_score(y, cv_preds)
    print('The F1 score for the cross validated model is: \n', f1s)
    precis = precision_score(y, cv_preds, average='binary')
    rec = recall_score(y, cv_preds, average='binary')
    print('The precision-recall score is: \n', precis)
    print('The recall score is: \n', rec)
    return cm
def train_age(X, Y, X_test, train=True, epoch=10, batch_size=1024):
    # 类别转换为0和1
    encoder = LabelEncoder()
    encoder.fit(Y)
    Y = encoder.transform(Y)
    if train:
        scaler = StandardScaler()
        scaler.fit(X)
        X = scaler.transform(X)
        Y = to_categorical(Y)
        model = create_age_model()
        model.fit(X, Y, batch_size=batch_size, epochs=epoch)

        X_test = scaler.transform(X_test)
        y_pre = model.predict(X_test)
        y_pred_age = np.argmax(y_pre, axis=1)

        return y_pred_age
    else:
        # estimator = KerasClassifier(
        #     build_fn=create_gender_model, epochs=epoch, batch_size=batch_size, verbose=0)
        estimators = []
        estimators.append(('standardize', StandardScaler()))
        estimators.append(('mlp',
                           KerasClassifier(build_fn=create_age_model,
                                           epochs=epoch,
                                           batch_size=batch_size,
                                           verbose=0)))
        pipeline = Pipeline(estimators)
        kfold = StratifiedKFold(n_splits=10, shuffle=True)
        results = cross_val_score(pipeline, X, Y, cv=kfold)
        print("Baseline: %.2f%% (%.2f%%)" %
              (results.mean() * 100, results.std() * 100))
Beispiel #21
0
def run_gridSearch(X_train, y_train):
    # Tuning the model: Grid Search method
    # Method to tune and test different combinations of parameters/hyperparameters

    # Wrap the whole thing
    wrappedClassifier = KerasClassifier(
        build_fn=build_model(dropout_rate=0.2, optimizer='adam'))

    # Create parameters dictionary
    params = {
        'batch_size': [10, 20],
        'epochs': [15, 20],
        'Optimizer': ['adam', 'nadam']
    }

    gridSearch = GridSearchCV(estimator=wrappedClassifier,
                              param_grid=params,
                              scoring='accuracy',
                              cv=7)

    gridSearch = gridSearch.fit(X_train, y_train)

    best_param = gridSearch.best_params_
    best_accuracy = gridSearch.best_score_

    return best_param, best_accuracy
Beispiel #22
0
 def __init__(self, vocab_size, create_model_callback, title, layers,
              isFinal, existentModel):
     print("""\n\nRNN PARAMETERS
 _________________________________
 vocab_size:    {}
 title:         {}
 layers:        {}
 isFinal:       {} 
 existentModel: {} 
 \n\n""".format(vocab_size, title, layers, isFinal, existentModel))
     metric_monitor = "val_recall"  #"val_loss"
     self.callbacks = [
         EarlyStopping(monitor=metric_monitor, mode='min', verbose=1),
         ModelCheckpoint("{}_checkpoint_model.h5".format(title),
                         monitor=metric_monitor,
                         mode='max',
                         save_best_only=True,
                         verbose=1),
         CSVLogger('{}_train_callback_log.txt'.format(title))
     ]
     MLModel.__init__(self,
                      model=KerasClassifier(
                          build_fn=create_model_callback,
                          epochs=50,
                          batch_size=10,
                          verbose=2,
                          vocab_size=vocab_size,
                          hidden_dims=layers,
                      ),
                      param_grid=getRNNGrid(),
                      title=title,
                      isFinal=isFinal,
                      existentModel=existentModel)
Beispiel #23
0
def gridSearch(inputs_train, output_train):
    model = KerasClassifier(build_fn=create_model, verbose=0)

    # defining grid search parameters
    param_grid = {
        'optimizer':
        ['SGD', 'RMSprop',
         'Adam'],  #best:SGD , 'Adagrad',, 'Adadelta', 'Adamax', 'Nadam'
        'batch_size': [10, 100, 500],  #best:10
        'epochs': [100, 1000],  #best:100
        #                  'learn_rate': [0.001, 0.01, 0.1, 0.2, 0.3],
        #                  'momentum': [0.0, 0.2, 0.4, 0.6, 0.8, 0.9],
        #                  'init_mode': ['uniform','normal'], #, 'zero', 'lecun_uniform',, 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform'
        #                  'activation': ['softmax','relu','sigmoid'], #, 'softplus', 'softsign', , 'tanh', , 'hard_sigmoid', 'linear'
        #                 # 'weight_constraint': [1, 3, 5],
        #                  'dropout_rate': [0.0, 0.9], #, 0.5
        #                  'neurons': [25, 50] #10,
    }
    grid = GridSearchCV(estimator=model,
                        param_grid=param_grid,
                        cv=3,
                        verbose=10)
    grid_result = grid.fit(inputs_train, output_train)

    # summarize results
    print("Best: %f using %s" %
          (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))

    return grid.best_params_, grid.best_score_
Beispiel #24
0
def neural_network_classifier(X_train, X_test, t_train, t_test):
    # Function to create model, required for KerasClassifier
    def create_model():
        # create model
        model = keras.Sequential()
        model.add(layers.Dense(16, input_shape=(4,), activation='tanh'))
        model.add(layers.Dense(8, activation='tanh'))
        model.add(layers.Dense(4, activation='tanh'))
        model.add(layers.Dense(3, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam', metrics=['accuracy'])
        return model

    nnclassifier = KerasClassifier(build_fn=create_model, verbose=0)

    k_fold = StratifiedKFold(
        n_splits=folds, random_state=random_seed, shuffle=True)

    param_grid = {
        "batch_size": [10, 20, 40, 60, 80, 100],
        "epochs": [10, 50, 100]
    }

    gs = GridSearchCV(nnclassifier, param_grid,
                      scoring=scoring, cv=k_fold, n_jobs=-1)
    gs.fit(X_train, t_train)

    print(gs.best_params_)
    use_model(gs, gs.best_score_, X_train, X_test,
              t_test, 'Neural Net Classification')
Beispiel #25
0
def main():
    ''' Main function '''
    # Load data
    x_train, y_train, x_test, y_test, test_img_idx = prepare_data()

    MCBN_model = KerasClassifier(build_fn=create_model,
                                 epochs=15,
                                 batch_size=32,
                                 verbose=0)

    print("Start fitting monte carlo batch_normalization model")

    X = x_train[0:int(TRAIN_VAL_SPLIT * len(x_train))]
    X = X.astype('float32')
    Y = y_train[0:int(TRAIN_VAL_SPLIT * len(x_train))]
    X /= 255

    # define the grid search parameters
    optimizer = [
        'SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam'
    ]
    param_grid = dict(optimizer=optimizer)
    grid = GridSearchCV(estimator=MCBN_model,
                        param_grid=param_grid,
                        n_jobs=-1,
                        cv=3)
    grid_result = grid.fit(X, Y)
    # summarize results
    print("Best: %f using %s" %
          (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
Beispiel #26
0
 def build_nn_classifier(self, X, y, params):
     return KerasClassifier(self.build_classifier,
                            input_dim=X.shape[1],
                            epochs=200,
                            batch_size=8,
                            verbose=1 if self.verbose else 0,
                            **params)
Beispiel #27
0
def get_keras_classifier_pipeline(data):
    """
    Keras Classifier: https://www.tensorflow.org/api_docs/python/tf/keras/wrappers/scikit_learn/KerasClassifier
    """

    # Infer the shape of the feature vector size after passing in some testing through the pipeline ot transform it
    if config.INFER_KERAS_INPUT_SHAPE:
        spy = utils.Pipeline_Spy()
        pipeline = create_classifier_pipeline(spy, data)

        # Need to represent the single data sample as a 1 by num_features array, not a 1-dimensional vector num_features long
        data_sample = np.array(data.iloc[1, :])[np.newaxis, ...]
        print("Original data shape: {}".format(data_sample.shape))

        feature_vector_transformed = pipeline.fit_transform(data_sample)[0]
        print("Transformed data shape: {}".format(
            feature_vector_transformed.shape))

        feature_vector_input_length = len(feature_vector_transformed)

        print("Inferred feature vector length for Keras model: {}".format(
            feature_vector_input_length))
    else:
        feature_vector_input_length = config.KERAS_INPUT_SHAPE

    clf = KerasClassifier(build_fn=create_keras_model,
                          input_dim=feature_vector_input_length,
                          epochs=150,
                          batch_size=32)

    return create_classifier_pipeline(clf, data)
def train_gender(X, Y, X_test, train=True, epoch=10, batch_size=1024):
    # 类别转换为0和1
    encoder = LabelEncoder()
    encoder.fit(Y)
    Y_encoded = encoder.transform(Y)
    if train:
        scaler = StandardScaler()
        scaler.fit(X)
        X = scaler.transform(X)
        model = create_gender_model()
        model.fit(X,
                  Y_encoded,
                  validation_split=0.1,
                  batch_size=batch_size,
                  epochs=epoch)

        X_test = scaler.transform(X_test)
        y_pre = model.predict(X_test)
        threshold = 0.5
        y_pred_gender = np.where(y_pre > threshold, 1, 0)
        return y_pred_gender
    else:
        estimators = []
        estimators.append(('standardize', StandardScaler()))
        estimators.append(('mlp',
                           KerasClassifier(build_fn=create_gender_model,
                                           epochs=epoch,
                                           batch_size=batch_size,
                                           verbose=0)))
        pipeline = Pipeline(estimators)
        kfold = StratifiedKFold(n_splits=5, shuffle=True)
        results = cross_val_score(pipeline, X, Y_encoded, cv=kfold)
        print("Baseline: %.2f%% (%.2f%%)" %
              (results.mean() * 100, results.std() * 100))
Beispiel #29
0
def main():
    args = parse_option()
    print(args)


    x_train, y_train = mnist_reader.load_mnist('data/fashion', kind='train')
    x_test, y_test = mnist_reader.load_mnist('data/fashion', kind='t10k')
    class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    x_train=x_train.reshape(x_train.shape[0], 28, 28, 1)
    x_test=x_test.reshape(x_test.shape[0], 28, 28 ,1)
    x_train = x_train / 255.0
    x_test = x_test / 255.0
    y_train=keras.utils.to_categorical(y_train)
    y_test=keras.utils.to_categorical(y_test)
    num_classes = 10


    print("Grid search for batch_size,batch norm and learning rate")
    model = KerasClassifier(build_fn=build_model,,epochs=40,verbose=1)
    batch_size = [32,64,128]
    lr = [0.01,0.001]
    use_bn = [True,False]
    param_grid = dict(batch_size=batch_size, lr=lr,use_bn=use_bn)
    grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=3)
    grid_result = grid.fit(x_train, y_train)
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
Beispiel #30
0
 def build_nn_classifier(self, X, y, params):
     return KerasClassifier(build_fn=self.nn_classifier_model,
                            input_dim=X.shape[1],
                            num_classes=len(np.unique(y)),
                            epochs=200,
                            batch_size=8,
                            verbose=1 if self.verbose else 0,
                            **params)