Esempio n. 1
0
def extremLM(data, target, checkHiddenPoints,
             fileName):  # Extreme Learning Machine
    scalar = 10000
    X = data.iloc[:, :-1]
    X_norm = (X - X.mean()) / (X.max() - X.min())
    y = data[target]
    # y_int = y.apply(lambda x: x * scalar)  # Need to be fixed!
    y_int = y * scalar
    y_int = y_int.apply(np.int64)
    # print(y_int)
    elmList = []
    if checkHiddenPoints:
        inter_num = 100
        hiddenNum = [1000, 3000, 5000, 6000, 8000,
                     10000]  # number of hidden points in ELM
        errorList = []
        for hidNum in hiddenNum:
            total_error = 0
            for i in range(inter_num):
                X_train, X_test, y_train, y_test = train_test_split(
                    X_norm, y_int, test_size=0.2)
                elm = ELM(hid_num=hidNum).fit(X_train, y_train)
                y_pred = elm.predict(X_test)
                sum_mean = 0
                # print("This is test value:", y_test.values)
                # print("This is prediction values:", y_pred)
                for i in range(len(y_pred)):
                    sum_mean += (y_pred[i] - y_test.values[i])**2
                sum_erro = (np.sqrt(sum_mean / len(y_pred))) / scalar
                # calculate RMSE
                total_error = total_error + sum_erro
                print("RMSE:", sum_erro)  # Root Mean Squared Error, RMSE
            print("This is average RMSE for ELM:", total_error / inter_num)
            errorList.append(total_error / inter_num)
        # Plot
        x_pos = list(range(len(hiddenNum)))
        plt.bar(x_pos, errorList, align='center', alpha=0.5)
        plt.grid()
        plt.ylabel('Root Mean Squared Error')
        plt.xticks(x_pos, hiddenNum)
        plt.title('Different errors based on the number of hidden points')
        plt.show()
    else:
        for j in range(100):
            X_train, X_test, y_train, y_test = train_test_split(X_norm,
                                                                y_int,
                                                                test_size=0.2)
            elm = ELM(hid_num=6000).fit(X_train, y_train)
            y_pred = elm.predict(X_test)
            sum_mean = 0
            # print("This is test value:", y_test.values / scalar)
            print("This is prediction values:", y_pred / scalar)
            print("This is iteration number: ", j)
            for i in range(len(y_pred)):
                sum_mean += (y_pred[i] - y_test.values[i])**2
            sum_erro = (np.sqrt(sum_mean / len(y_pred))) / scalar
            elmList.append(sum_erro)
        return elmList
Esempio n. 2
0
def make_experiments_without_extract(X, y):
    svc_no_extract_scores = []
    knn_no_extract_scores = []
    gnb_no_extract_scores = []
    dt_no_extract_scores = []
    mlp_no_extract_scores = []
    elm_no_extract_scores = []

    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_train.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_train, y_train).predict(X_test)
        knn_pred = knn_clf.fit(X_train, y_train).predict(X_test)
        gnb_pred = gnb_clf.fit(X_train, y_train).predict(X_test)
        dt_pred = dt_clf.fit(X_train, y_train).predict(X_test)
        mlp_pred = mlp_clf.fit(X_train, y_train).predict(X_test)

        elm_clf.train(X_train, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_no_extract_scores.append(round(accuracy_score(svc_pred, y_test),
                                           2))
        knn_no_extract_scores.append(round(accuracy_score(knn_pred, y_test),
                                           2))
        gnb_no_extract_scores.append(round(accuracy_score(gnb_pred, y_test),
                                           2))
        dt_no_extract_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_no_extract_scores.append(round(accuracy_score(mlp_pred, y_test),
                                           2))
        elm_no_extract_scores.append(round(accuracy_score(elm_pred, y_test),
                                           2))

    return [
        round(np.average(svc_no_extract_scores), 2),
        round(np.average(knn_no_extract_scores), 2),
        round(np.average(gnb_no_extract_scores), 2),
        round(np.average(dt_no_extract_scores), 2),
        round(np.average(mlp_no_extract_scores), 2),
        round(np.average(elm_no_extract_scores), 2)
    ]
Esempio n. 3
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine
    """
    def __init__(self, *, hidden_neurons=None, a=1, random_state=None):
        super().__init__(hidden_neurons=hidden_neurons,
                         a=a,
                         random_state=random_state)

        self.betas = []
        self.elm = None
        self.out_num = None

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T

        return X

    def fit(self, X, y):
        if self.hidden_neurons is None:
            self.hidden_neurons = 2 * X.shape[1]

        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_neurons[:-1]:
            _X = self.__calc_hidden_layer(X)
            W = self._random_state.uniform(-1., 1., (hid_num, _X.shape[1]))
            H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hidden_neurons=self.hidden_neurons[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
Esempio n. 4
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine

    """

    def __init__(self, hidden_units, a=1):
        self.hidden_units = hidden_units
        self.betas = []
        self.a = a

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T
        return X

    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1.,
                                  (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
Esempio n. 5
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine

    """

    def __init__(self, hidden_units, a=1):
        self.hidden_units = hidden_units
        self.betas = []
        self.a = a

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T
        return X

    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1.,
                                  (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
Esempio n. 6
0
def make_experiments_with_lda(X, y):
    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        lda = LinearDiscriminantAnalysis()
        X_lda = lda.fit_transform(X_train, y_train)
        X_test_lda = lda.transform(X_test)

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_lda.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_lda, y_train).predict(X_test_lda)
        knn_pred = knn_clf.fit(X_lda, y_train).predict(X_test_lda)
        gnb_pred = gnb_clf.fit(X_lda, y_train).predict(X_test_lda)
        dt_pred = dt_clf.fit(X_lda, y_train).predict(X_test_lda)
        mlp_pred = mlp_clf.fit(X_lda, y_train).predict(X_test_lda)

        elm_clf.train(X_lda, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test_lda)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_lda_scores.append(round(accuracy_score(svc_pred, y_test), 2))
        knn_lda_scores.append(round(accuracy_score(knn_pred, y_test), 2))
        gnb_lda_scores.append(round(accuracy_score(gnb_pred, y_test), 2))
        dt_lda_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_lda_scores.append(round(accuracy_score(mlp_pred, y_test), 2))
        elm_lda_scores.append(round(accuracy_score(elm_pred, y_test), 2))

    return [
        round(np.average(svc_lda_scores), 2),
        round(np.average(knn_lda_scores), 2),
        round(np.average(gnb_lda_scores), 2),
        round(np.average(dt_lda_scores), 2),
        round(np.average(mlp_lda_scores), 2),
        round(np.average(elm_lda_scores), 2)
    ]
Esempio n. 7
0
hit_rates = []
no_of_attributes = dataset.shape[1] - 1
no_of_classes = len(dataset[0, no_of_attributes])

# insert bias
no_rows = dataset.shape[0]
dataset = np.c_[-1 * np.ones(no_rows), dataset]

# perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic')

for j in range(0, 20):
    print("realization %d" % j)
    train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset)
    train_X = np.array(train_X, dtype=float)
    test_X = np.array(test_X, dtype=float)

    hidden_units = ELM.model_training(no_of_classes, no_of_attributes, train_X,
                                      train_y)
    elm = ELM(no_of_classes, no_of_attributes, hidden_units)
    elm.train(train_X, train_y)
    predictions = elm.predict(test_X)
    hit_rates.append(elm.evaluate(test_y, predictions))
    print(elm.confusion_matrix(test_y, predictions))
    # Perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, perceptron, hidden_neurons, j)

print('hit rates: {}'.format(hit_rates))
print('accuracy: {}'.format(np.mean(hit_rates)))
print('std: {}'.format(np.std(hit_rates)))
# Perceptron.show_plot_decision_boundaries()
Esempio n. 8
0
best = [[], 0]
for i in range(iters):
    CVO = KFold(n_splits=n_folds, shuffle=True)
    acc_values = []
    for train_index, test_index in CVO.split(X):
        X_train, X_test = X[train_index], X[test_index]
        Y_train, Y_test = Y[train_index], Y[test_index]

        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        elm = ELM(hidden_units=20, activation="log")
        #elm = ELM(hidden_units = 20, activation="tan")
        elm.fit(X_train, Y_train)
        Y_hat = elm.predict(X_test)
        Y_hat = np.round(Y_hat)
        acc_values.append(
            np.sum(
                np.where(
                    np.argmax(Y_hat, axis=1) == np.argmax(Y_test, axis=1), 1,
                    0)) / len(Y_test))
        if acc_values[-1] > best[1]:
            best[0] = confusion_matrix(np.argmax(Y_test, axis=1),
                                       np.argmax(Y_hat, axis=1))
            best[1] = acc_values[-1]
    accuracy[i] = np.mean(acc_values)

print("Accuracy", np.mean(accuracy))
print("Standard Deviation (accuracy)", np.std(accuracy, axis=0))
Esempio n. 9
0
mse = np.zeros((iters, 1))
rmse = np.zeros((iters, 1))
for i in range(iters):
    X_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :1],
                                                        dataset[:, 1],
                                                        test_size=0.33)
    Y_train = Y_train.reshape((Y_train.shape[0], 1))
    Y_test = Y_test.reshape((Y_test.shape[0], 1))

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    elm = ELM(hidden_units=8)
    elm.fit(X_train, Y_train)
    Y_hat = elm.predict(X_test)

    mse[i] = ((Y_test - Y_hat)**2).mean(axis=0)
    rmse[i] = mse[i]**(1. / 2)

print("Average MSE", np.mean(mse, axis=0))
print("Average RMSE", np.mean(rmse, axis=0))
print("Standard Deviation (MSE)", np.std(mse, axis=0))
print("Standard Deviation (RMSE)", np.std(rmse, axis=0))

xx = dataset[:, 0:1]
xx = scaler.transform(xx)
yy = dataset[:, 1]
fig, ax = plt.subplots()
Z = elm.predict(xx)
plt.plot(xx, Z, "-", label="ELM output")
Esempio n. 10
0
#neural_network.add_neuron(9, "linear")
neural_network.add_neuron(100, "sigmoid")

output_classes = []
print(len(train))
print(datetime.datetime.now())
for item in train.values:
    #item[:len(item)-1]
    neural_network.train(item[:len(item) - 1])

    output_classes.append(item[len(item) - 1])
neural_network.update_beta(output_classes)  #create output_weights
print(datetime.datetime.now())

error_values = []
for item in h.test_dataset.values:
    predicted = neural_network.predict(item[:len(item) - 1])
    print(predicted)
    actual_value = item[len(item) - 1]
    print(actual_value)
    error_values.append((actual_value - predicted)**2)  #square the error

print("MSE (Mean Squared Error): ", mean(error_values))

if debug:
    print("checking correctness of shapes:")
    print(neural_network.input_weights[0].shape)
    print(len(neural_network.input_weights))
    print(neural_network.H.shape)
    print(len(neural_network.bias))
Esempio n. 11
0
def main(args):
    # ===============================
    # Load dataset
    # ===============================
    n_classes = 10
    (x_train, t_train), (x_test, t_test) = mnist.load_data()

    # ===============================
    # Preprocess
    # ===============================
    x_train = x_train.astype(np.float32) / 255.
    x_train = x_train.reshape(-1, 28**2)
    x_test = x_test.astype(np.float32) / 255.
    x_test = x_test.reshape(-1, 28**2)
    t_train = to_categorical(t_train, n_classes).astype(np.float32)
    t_test = to_categorical(t_test, n_classes).astype(np.float32)

    # ===============================
    # Instantiate ELM
    # ===============================
    model = ELM(
        n_input_nodes=28**2,
        n_hidden_nodes=args.n_hidden_nodes,
        n_output_nodes=n_classes,
        loss=args.loss,
        activation=args.activation,
        name='elm',
    )

    # ===============================
    # Training
    # ===============================
    model.fit(x_train, t_train)
    train_loss, train_acc = model.evaluate(x_train,
                                           t_train,
                                           metrics=['loss', 'accuracy'])
    print('train_loss: %f' % train_loss)
    print('train_acc: %f' % train_acc)

    # ===============================
    # Validation
    # ===============================
    val_loss, val_acc = model.evaluate(x_test,
                                       t_test,
                                       metrics=['loss', 'accuracy'])
    print('val_loss: %f' % val_loss)
    print('val_acc: %f' % val_acc)

    # ===============================
    # Prediction
    # ===============================
    x = x_test[:10]
    t = t_test[:10]
    y = softmax(model.predict(x))

    for i in range(len(y)):
        print('---------- prediction %d ----------' % (i + 1))
        class_pred = np.argmax(y[i])
        prob_pred = y[i][class_pred]
        class_true = np.argmax(t[i])
        print('prediction:')
        print('\tclass: %d, probability: %f' % (class_pred, prob_pred))
        print('\tclass (true): %d' % class_true)

    # ===============================
    # Save model
    # ===============================
    print('saving model...')
    model.save('model.h5')
    del model

    # ===============================
    # Load model
    # ===============================
    print('loading model...')
    model = load_model('model.h5')