Esempio n. 1
0
def make_experiments_without_extract(X, y):
    svc_no_extract_scores = []
    knn_no_extract_scores = []
    gnb_no_extract_scores = []
    dt_no_extract_scores = []
    mlp_no_extract_scores = []
    elm_no_extract_scores = []

    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_train.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_train, y_train).predict(X_test)
        knn_pred = knn_clf.fit(X_train, y_train).predict(X_test)
        gnb_pred = gnb_clf.fit(X_train, y_train).predict(X_test)
        dt_pred = dt_clf.fit(X_train, y_train).predict(X_test)
        mlp_pred = mlp_clf.fit(X_train, y_train).predict(X_test)

        elm_clf.train(X_train, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_no_extract_scores.append(round(accuracy_score(svc_pred, y_test),
                                           2))
        knn_no_extract_scores.append(round(accuracy_score(knn_pred, y_test),
                                           2))
        gnb_no_extract_scores.append(round(accuracy_score(gnb_pred, y_test),
                                           2))
        dt_no_extract_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_no_extract_scores.append(round(accuracy_score(mlp_pred, y_test),
                                           2))
        elm_no_extract_scores.append(round(accuracy_score(elm_pred, y_test),
                                           2))

    return [
        round(np.average(svc_no_extract_scores), 2),
        round(np.average(knn_no_extract_scores), 2),
        round(np.average(gnb_no_extract_scores), 2),
        round(np.average(dt_no_extract_scores), 2),
        round(np.average(mlp_no_extract_scores), 2),
        round(np.average(elm_no_extract_scores), 2)
    ]
Esempio n. 2
0
def make_experiments_with_lda(X, y):
    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        lda = LinearDiscriminantAnalysis()
        X_lda = lda.fit_transform(X_train, y_train)
        X_test_lda = lda.transform(X_test)

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_lda.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_lda, y_train).predict(X_test_lda)
        knn_pred = knn_clf.fit(X_lda, y_train).predict(X_test_lda)
        gnb_pred = gnb_clf.fit(X_lda, y_train).predict(X_test_lda)
        dt_pred = dt_clf.fit(X_lda, y_train).predict(X_test_lda)
        mlp_pred = mlp_clf.fit(X_lda, y_train).predict(X_test_lda)

        elm_clf.train(X_lda, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test_lda)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_lda_scores.append(round(accuracy_score(svc_pred, y_test), 2))
        knn_lda_scores.append(round(accuracy_score(knn_pred, y_test), 2))
        gnb_lda_scores.append(round(accuracy_score(gnb_pred, y_test), 2))
        dt_lda_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_lda_scores.append(round(accuracy_score(mlp_pred, y_test), 2))
        elm_lda_scores.append(round(accuracy_score(elm_pred, y_test), 2))

    return [
        round(np.average(svc_lda_scores), 2),
        round(np.average(knn_lda_scores), 2),
        round(np.average(gnb_lda_scores), 2),
        round(np.average(dt_lda_scores), 2),
        round(np.average(mlp_lda_scores), 2),
        round(np.average(elm_lda_scores), 2)
    ]
                                np.zeros(dtype=np.int, shape=ith_total_test) +
                                i,
                                axis=0)
    return train_input, train_target, test_input, test_target


# data preparation
my_data = genfromtxt('iris.csv', delimiter=',')
x_inp = my_data[:, 0:-1]
t_inp = my_data[:, -1]

train_input, train_target, test_input, test_target = split_data(
    x_inp, t_inp, 0.6, 0.4)

e = ELM(50)
e.train(train_input, train_target)
e.test(test_input, test_target)
print e.train_acc
print e.test_acc
"""
# start for article on https://fennialesmana.com/extreme-learning-machine/
# 1. Prepare the input data (x) and target data (t)
x = np.array([[-1, -5, 5, 5], [2, -4, 2, 3]])
t = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
# 2. Prepare the number of hidden nodes, input weight (w), and bias (b) randomly
w = np.array([[0.5, 0.2], [0.7, -0.4], [-0.6, 0.3]])
b = np.array([[0.6], [0.7], [0.4]])
# 3. Calculate the output of hidden layer (H)
H = np.dot(w, x) + b
H = (1/(1+(numpy.matlib.exp(H*-1)))).transpose()
# 4. Calculate the weight of hidden to output layer using zero error equation
Esempio n. 4
0
hit_rates = []
no_of_attributes = dataset.shape[1] - 1
no_of_classes = len(dataset[0, no_of_attributes])

# insert bias
no_rows = dataset.shape[0]
dataset = np.c_[-1 * np.ones(no_rows), dataset]

# perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic')

for j in range(0, 20):
    print("realization %d" % j)
    train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset)
    train_X = np.array(train_X, dtype=float)
    test_X = np.array(test_X, dtype=float)

    hidden_units = ELM.model_training(no_of_classes, no_of_attributes, train_X,
                                      train_y)
    elm = ELM(no_of_classes, no_of_attributes, hidden_units)
    elm.train(train_X, train_y)
    predictions = elm.predict(test_X)
    hit_rates.append(elm.evaluate(test_y, predictions))
    print(elm.confusion_matrix(test_y, predictions))
    # Perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, perceptron, hidden_neurons, j)

print('hit rates: {}'.format(hit_rates))
print('accuracy: {}'.format(np.mean(hit_rates)))
print('std: {}'.format(np.std(hit_rates)))
# Perceptron.show_plot_decision_boundaries()
Esempio n. 5
0
class ELMTest(unittest.TestCase):
    # We will test both FP and FN versions of the ELM
    def setUp(self):
        self.mockConnectionPool = Mock(spec=pycassa.pool.ConnectionPool)
        self.minimizationType = 'FP'
        self.numberNeurons = 100


    def test_init(self):
        # Check that wrong input raises exception
        self.assertRaises(Exception, ELM, 'SomeString', self.mockConnectionPool, self.numberNeurons)
        self.assertRaises(Exception, ELM, self.minimizationType, 'NotAConnectionPool', self.numberNeurons)
        self.assertRaises(Exception, ELM, self.minimizationType, self.mockConnectionPool, 'NotANumber')
        
    @patch('elm.pycassa.columnfamily.ColumnFamily')    
    def test_createTrainingData(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.get_range = mock_columnfamilyget_range
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.createTrainingData()

    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_storeTrainingDataToCassandra(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.get_range = mock_columnfamilyget_range
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.createTrainingData()
        self.myELM.storeTrainingDataToCassandra()

    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_train(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.train()


    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_verdict(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        mockFunction.return_value = returnedMock

        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.train()

        inputTestData = np.loadtxt('./test_input_data_training_elm.txt').reshape((633, 9))
        outputTestData = np.loadtxt('./test_output_data_training_elm.txt').reshape((633, 1))
        ELMTestingData = ELMData(inputTestData, outputTestData, [], [])
Esempio n. 6
0
h = Helper()
h.get_dataset(datasets[3])

train, test = h.split_dataset()

neural_network = ELM(input_size=13, output_layer_size=1)

#neural_network.add_neuron(9, "linear")
neural_network.add_neuron(100, "sigmoid")

output_classes = []
print(len(train))
print(datetime.datetime.now())
for item in train.values:
    #item[:len(item)-1]
    neural_network.train(item[:len(item) - 1])

    output_classes.append(item[len(item) - 1])
neural_network.update_beta(output_classes)  #create output_weights
print(datetime.datetime.now())

error_values = []
for item in h.test_dataset.values:
    predicted = neural_network.predict(item[:len(item) - 1])
    print(predicted)
    actual_value = item[len(item) - 1]
    print(actual_value)
    error_values.append((actual_value - predicted)**2)  #square the error

print("MSE (Mean Squared Error): ", mean(error_values))