def test_ConfusionELM_Multilabel(self):
     T = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
     Y = np.array([[1, 1], [1, 0], [0, 1], [0, 1]])
     elm = ELM(1, 2)
     elm.classification = "ml"
     C = elm.confusion(T, Y)
     np.testing.assert_allclose(C, np.array([[2, 1], [0, 2]]))
Beispiel #2
1
 def test_CrossValidation_ReturnsError(self):
     model = ELM(5, 2)
     model.add_neurons(10, 'tanh')
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     err = model.train(X, T, 'CV', k=3)
     self.assertIsNotNone(err)
Beispiel #3
0
class HPELMNN(Classifier):
    
    def __init__(self):
        self.__hpelm = None
    
    @staticmethod
    def name():
        return "hpelmnn"

    def train(self, X, Y, class_number=-1):
        class_count = max(np.unique(Y).size, class_number)
        feature_count = X.shape[1]
        self.__hpelm = ELM(feature_count, class_count, 'wc')
        self.__hpelm.add_neurons(feature_count, "sigm")

        Y_arr = Y.reshape(-1, 1)
        enc = OneHotEncoder()
        enc.fit(Y_arr)
        Y_OHE = enc.transform(Y_arr).toarray()

        out_fd = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        self.__hpelm.train(X, Y_OHE)
        sys.stdout = out_fd

    def predict(self, X):
        Y_predicted = self.__hpelm.predict(X)
        return Y_predicted
Beispiel #4
0
 def test_ConfusionELM_Multilabel(self):
     T = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
     Y = np.array([[1, 1], [1, 0], [0, 1], [0, 1]])
     elm = ELM(1, 2)
     elm.classification = "ml"
     C = elm.confusion(T, Y)
     np.testing.assert_allclose(C, np.array([[2, 1], [0, 2]]))
Beispiel #5
0
def predict_new_data(argv):
	'''
	Implements output prediction for new data

	Arguments:
	argv -- system inputs

	Returns:
	Y -- predicted Y value

	'''

	# file1 = saved model, file2 = excel file with new data
	print(argv)
	_, file1, file2 = argv
	print(file1)
	print(file2)

	# Process the excel data 
	X = process_data(file2)

	# Load model
	model = ELM(20, 1, tprint=5)
	model.load('{}'.format(file1))

	# Predict Y
	Y_predicted = model.predict(X)

	return Y_predicted	
 def test_RegressionError_Works(self):
     T = np.array([1, 2, 3])
     Y = np.array([1.1, 2.2, 3.3])
     err1 = np.mean((T - Y)**2)
     elm = ELM(1, 1)
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, err1)
 def test_24_AddNeurons_InitDefault_BiasWNotZero(self):
     elm = ELM(2, 1)
     elm.add_neurons(3, "sigm")
     W = elm.neurons[0][2]
     bias = elm.neurons[0][3]
     self.assertGreater(np.sum(np.abs(W)), 0.001)
     self.assertGreater(np.sum(np.abs(bias)), 0.001)
Beispiel #8
0
 def test_MultilabelError_CorrectWithMultipleClasses(self):
     T = np.zeros((100, 5))
     T[:, 0] = 1
     Y = np.zeros((100, 5))
     Y[:, 1] = 1
     model = ELM(1, 5, classification='ml')
     self.assertEqual(0.4, model.error(T, Y))
Beispiel #9
0
 def test_MultiLabelClassification_Works(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 4, 5, 6])
     T = np.array([[1, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'ml')
     elm.train(X, T, 'mc')
Beispiel #10
0
def t10k_test(model_path='../models/elm.model'):
    images, labels = read_mnist.load_mnist('../data/', kind='t10k')
    images = map(read_mnist.up_to_2D, images)
    images = map(get_hog, images)
    images = np.mat(np.array(images))

    labels = np.mat(map(read_mnist.handle_label, labels))

    elm = ELM(images.shape[1], labels.shape[1])
    # print images.shape[1], images.shape[1]
    elm.load(model_path)
    results = elm.predict(images)

    labels = map(get_labels, np.array(labels))
    results = map(get_labels, np.array(results))
    yes, tot = 0, len(labels)

    for i in range(0, len(labels)):
        if labels[i] == results[i]:
            yes += 1

    print 'YES :', yes
    print 'TOT :', tot
    print 'ACC : ', str(float(yes) / tot * 100.0) + '%'
    return float(yes) / tot * 100.0
Beispiel #11
0
 def test_CrossValidation_ReturnsError(self):
     model = ELM(5, 2)
     model.add_neurons(10, 'tanh')
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     err = model.train(X, T, 'CV', k=3)
     self.assertIsNotNone(err)
Beispiel #12
0
 def test_RegressionError_Works(self):
     T = np.array([1, 2, 3])
     Y = np.array([1.1, 2.2, 3.3])
     err1 = np.mean((T - Y) ** 2)
     elm = ELM(1, 1)
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, err1)
Beispiel #13
0
 def test_MultilabelError_CorrectWithMultipleClasses(self):
     T = np.zeros((100, 5))
     T[:, 0] = 1
     Y = np.zeros((100, 5))
     Y[:, 1] = 1
     model = ELM(1, 5, classification='ml')
     self.assertEqual(0.4, model.error(T, Y))
Beispiel #14
0
 def test_24_AddNeurons_InitDefault_BiasWNotZero(self):
     elm = ELM(2, 1)
     elm.add_neurons(3, "sigm")
     W = elm.neurons[0][2]
     bias = elm.neurons[0][3]
     self.assertGreater(np.sum(np.abs(W)), 0.001)
     self.assertGreater(np.sum(np.abs(bias)), 0.001)
Beispiel #15
0
 def test_TrainWithBatch_OverwritesBatch(self):
     elm = ELM(1, 1, batch=123)
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm.add_neurons(1, "lin")
     elm.train(X, T, batch=234)
     self.assertEqual(234, elm.batch)
 def test_MultiLabelClassError_Works(self):
     X = np.array([1, 2, 3])
     T = np.array([[0, 1], [1, 1], [1, 0]])
     Y = np.array([[0.4, 0.6], [0.8, 0.6], [1, 1]])
     elm = ELM(1, 2, classification="ml")
     elm.add_neurons(1, "lin")
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, 1.0 / 6)
 def test_20_SLFN_AddTwoNeuronTypes_GotThem(self):
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.add_neurons(1, "sigm")
     self.assertEquals(2, len(elm.neurons))
     ntypes = [nr[0] for nr in elm.neurons]
     self.assertIn("lin", ntypes)
     self.assertIn("sigm", ntypes)
Beispiel #18
0
 def test_AddNeurons_WorksWithLongType(self):
     if sys.version_info[0] == 2:
         ltype = long
     else:
         ltype = int
     model = ELM(3, 2)
     L = ltype(10)
     model.add_neurons(L, 'tanh')
Beispiel #19
0
 def test_MultiLabelClassError_Works(self):
     X = np.array([1, 2, 3])
     T = np.array([[0, 1], [1, 1], [1, 0]])
     Y = np.array([[0.4, 0.6], [0.8, 0.6], [1, 1]])
     elm = ELM(1, 2, classification="ml")
     elm.add_neurons(1, "lin")
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, 1.0 / 6)
def predictor_init(model_path):
    elm = ELM(324, 324)
    elm.load(model_path)
    if elm == None:
        print 'Error: elm is None.'
        exit()
    print elm
    return elm
Beispiel #21
0
 def test_AddNeurons_WorksWithLongType(self):
     if sys.version_info[0] == 2:
         ltype = long
     else:
         ltype = int
     model = ELM(3, 2)
     L = ltype(10)
     model.add_neurons(L, 'tanh')
 def test_WeightedClassError_Works(self):
     T = np.array([[0, 1], [0, 1], [1, 0]])
     Y = np.array([[0, 1], [0.4, 0.6], [0, 1]])
     # here class 0 is totally incorrect, and class 1 is totally correct
     w = (9, 1)
     elm = ELM(1, 2, classification="wc", w=w)
     elm.add_neurons(1, "lin")
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, 0.9)
Beispiel #23
0
 def test_WeightedClassError_Works(self):
     T = np.array([[0, 1], [0, 1], [1, 0]])
     Y = np.array([[0, 1], [0.4, 0.6], [0, 1]])
     # here class 0 is totally incorrect, and class 1 is totally correct
     w = (9, 1)
     elm = ELM(1, 2, classification="wc", w=w)
     elm.add_neurons(1, "lin")
     e = elm.error(T, Y)
     np.testing.assert_allclose(e, 0.9)
 def test_25_AddNeurons_InitTwiceBiasW_CorrectlyMerged(self):
     elm = ELM(2, 1)
     W1 = np.random.rand(2, 3)
     W2 = np.random.rand(2, 4)
     bias1 = np.random.rand(3,)
     bias2 = np.random.rand(4,)
     elm.add_neurons(3, "sigm", W1, bias1)
     elm.add_neurons(4, "sigm", W2, bias2)
     np.testing.assert_array_almost_equal(np.hstack((W1, W2)), elm.neurons[0][2])
     np.testing.assert_array_almost_equal(np.hstack((bias1, bias2)), elm.neurons[0][3])
Beispiel #25
0
def build_ELM_encoder(xinput, target, num_neurons):

    elm = ELM(xinput.shape[1], target.shape[1])
    elm.add_neurons(num_neurons, "sigm")
    elm.add_neurons(num_neurons, "lin")
    #elm.add_neurons(num_neurons, "rbf_l1")
    elm.train(xinput, target, "r")
    ypred = elm.predict(xinput)
    print "mse error", elm.error(ypred, target)
    return elm, ypred
Beispiel #26
0
 def test_LOOandOP_CanSelectMoreThanOneNeuron(self):
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     for _ in range(10):
         model = ELM(5, 2)
         model.add_neurons(5, 'lin')
         model.train(X, T, 'LOO', 'OP')
         max2 = model.nnet.L
         if max2 > 1:
             break
     self.assertGreater(max2, 1)
Beispiel #27
0
 def test_25_AddNeurons_InitTwiceBiasW_CorrectlyMerged(self):
     elm = ELM(2, 1)
     W1 = np.random.rand(2, 3)
     W2 = np.random.rand(2, 4)
     bias1 = np.random.rand(3, )
     bias2 = np.random.rand(4, )
     elm.add_neurons(3, "sigm", W1, bias1)
     elm.add_neurons(4, "sigm", W2, bias2)
     np.testing.assert_array_almost_equal(np.hstack((W1, W2)),
                                          elm.neurons[0][2])
     np.testing.assert_array_almost_equal(np.hstack((bias1, bias2)),
                                          elm.neurons[0][3])
Beispiel #28
0
def build_ELM_encoder(xinput, target, num_neurons):


    elm = ELM(xinput.shape[1], target.shape[1])
    elm.add_neurons(num_neurons, "sigm")
    elm.add_neurons(num_neurons, "lin")
    #elm.add_neurons(num_neurons, "rbf_l1")
    elm.train(xinput, target, "r")
    ypred = elm.predict(xinput)
    print "mse error", elm.error(ypred, target)
    return elm, ypred
Beispiel #29
0
def getElm(data, label, classification='c', w=None, nn=10, func="sigm"):

    elm = ELM(len(data[0]), len(label[0]), classification, w)
    elm.add_neurons(10, func)
    elm.add_neurons(10, "rbf_l1")
    elm.add_neurons(10, "rbf_l2")
    elm.train(data, np.array(label))
    return elm
 def test_LoadELM_WrongFile(self):
     elm = ELM(1, 1)
     try:
         f, fname = tempfile.mkstemp()
         self.assertRaises(IOError, elm.load, fname + "ololo2")
     finally:
         os.close(f)
 def test_SaveELM_WrongFile(self):
     elm = ELM(1, 1)
     try:
         f, fname = tempfile.mkstemp()
         self.assertRaises(IOError, elm.save,
                           os.path.dirname(fname) + "olo/lo")
     finally:
         os.close(f)
    def tune_elm(train_x, train_y, test_x_raw, test_x, act_funcs,
                 neuron_counts):
        '''
		Assumptions:
		1. NN has only 1 hidden layer
		2. act_funcs: list of distinct activation functions
		3. neuron_counts: list of distinct '# of neurons in the hidden layer'
		'''
        print("Tuning ELM...")
        features = train_x.shape[1]
        train_y = Pre_processor.one_hot_encoding(train_y)
        ind_func = 0
        while (ind_func < len(act_funcs)):
            ind_neuron = 0
            cur_act_func = act_funcs[ind_func]
            while (ind_neuron < len(neuron_counts)):
                cur_neuron_count = neuron_counts[ind_neuron]
                print(cur_act_func + " | " + str(cur_neuron_count) + "...")
                clf = ELM(features, Constants.tot_labels)
                clf.add_neurons(cur_neuron_count, cur_act_func)
                clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
                pred_y = clf.predict(test_x)
                pred_y = Pre_processor.one_hot_decoding_full(pred_y)
                file_name = "submission_" + str(
                    cur_neuron_count) + "_" + cur_act_func + ".csv"
                Database.save_results(test_x_raw, pred_y, file_name)
                ind_neuron = ind_neuron + 1
            ind_func = ind_func + 1
 def test_Classification_WorksCorreclty(self):
     elm = ELM(1, 2)
     X = np.array([-1, -0.6, -0.3, 0.3, 0.6, 1])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'c')
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertLess(Y[5, 0], Y[5, 1])
 def epoch(train_x, train_y, test_x, test_x_raw, filename):
     features = train_x.shape[1]
     train_y = Pre_processor.one_hot_encoding(train_y)
     clf = ELM(features, Constants.tot_labels)
     clf.add_neurons(550, "sigm")
     clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
     pred_y = clf.predict(test_x)
     pred_y = Pre_processor.one_hot_decoding_full(pred_y)
     Database.save_results(test_x_raw, pred_y, filename)
Beispiel #35
0
 def fit(self, x_train: ndarray, y_train: ndarray, *args, **kwargs)\
         -> None:
     self.classifier = ELMachine(x_train.shape[1], y_train.shape[1])
     for neuron in self.neurons:
         logger.info("Adding {} neurons with '{}' function.".format(
             neuron[0], neuron[1]))
         self.classifier.add_neurons(neuron[0], neuron[1])
     logger.debug("Training the Extreme Learning Machine Classifier...")
     start = time()
     self.classifier.train(x_train, y_train, **kwargs)
     logger.debug("Done training in {} seconds.".format(time() - start))
Beispiel #36
0
def fit(X, Y, model="SVM"):
    # shuffle data
    tr_set = np.concatenate((Y, X), axis=1)
    np.random.shuffle(tr_set)
    x = tr_set[:, 1:]
    y = tr_set[:, 0].reshape(-1, 1)
    y = y.ravel()

    # split data into train and test part for evaluating model performance
    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=0)

    if model == "SVM":
        tuned_parameters = [
            {
                "kernel": ["rbf"],
                "gamma": [10**x for x in range(-7, 8)],
                "C": [10**x for x in range(-5, 1)],
            },
            {
                "kernel": ["linear"],
                "C": [10**x for x in range(-5, 1)]
            },
        ]
        clf = GridSearchCV(svm.SVC(probability=True), tuned_parameters, cv=5)
        clf.fit(X_train, y_train)
        y_pred = clf.predict(X_test)  # model performance

        means = clf.cv_results_["mean_test_score"]
        stds = clf.cv_results_["std_test_score"]
        for mean, std, params in zip(means, stds, clf.cv_results_["params"]):
            print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
        print("Best parameters set found on training set:")
        print()
        print(clf.best_params_)

    if model == "ELM":
        label_encoder = LabelEncoder()
        y = y.reshape(-1, 1)
        y = label_encoder.fit_transform(y)

        onehot_encoder = OneHotEncoder(sparse=False)
        T_onehot_encoded = onehot_encoder.fit_transform(y.ravel().reshape(
            -1, 1))

        clf = ELM(x.shape[1], T_onehot_encoded.shape[1], classification="ml")
        clf.add_neurons(500, "rbf_l2")
        clf.train(x, T_onehot_encoded)
        y_pred = clf.predict(x).argmax(1)
        y_test = y

    return clf, accuracy_score(y_test, y_pred)
def make_h5_file(data_merged):
    '''
	Converts csv file to hdf5 file

	Argument:
	data_merged -- csv file

	Returns:
	None
	'''

    # Singular file
    ELM.make_hdf5(data_merged, 'data_merged.h5', delimiter=',')

    # Multiple file
    ELM.make_hdf5('x.csv', 'x.h5', delimiter=',')
    ELM.make_hdf5('t.csv', 't.h5', delimiter=',')
    ELM.make_hdf5('xtest.csv', 'xtest.h5', delimiter=',')
    ELM.make_hdf5('ttest.csv', 'ttest.h5', delimiter=',')

    return None
 def test_WeightedClassification_ClassWithLargerWeightWins(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'wc', w=(1, 0.1))
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertGreater(Y[1, 0], Y[1, 1])
     self.assertGreater(Y[2, 0], Y[2, 1])
 def test_TrainWithBatch_OverwritesBatch(self):
     elm = ELM(1, 1, batch=123)
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm.add_neurons(1, "lin")
     elm.train(X, T, batch=234)
     self.assertEqual(234, elm.batch)
 def test_MultiLabelClassification_Works(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 4, 5, 6])
     T = np.array([[1, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'ml')
     elm.train(X, T, 'mc')
Beispiel #41
0
class ELM(Classifier):
    def __init__(self, neurons: Tuple[Tuple] = None) -> None:
        clf = None
        self.neurons = neurons if neurons else DEFAULT_NEURONS
        super().__init__(clf)

    def fit(self, x_train: ndarray, y_train: ndarray, *args, **kwargs)\
            -> None:
        self.classifier = ELMachine(x_train.shape[1], y_train.shape[1])
        for neuron in self.neurons:
            logger.info("Adding {} neurons with '{}' function.".format(
                neuron[0], neuron[1]))
            self.classifier.add_neurons(neuron[0], neuron[1])
        logger.debug("Training the Extreme Learning Machine Classifier...")
        start = time()
        self.classifier.train(x_train, y_train, **kwargs)
        logger.debug("Done training in {} seconds.".format(time() - start))

    def predict(self, x_test: ndarray) -> ndarray:
        logger.debug("Predicting {} samples...".format(x_test.shape[0]))
        start = time()
        predictions = np.argmax(self.classifier.predict(x_test), axis=-1)
        logger.debug("Done all predictions in {} seconds.".format(time() -
                                                                  start))
        return predictions

    def predict_proba(self, x_test: ndarray) -> float:
        logger.debug("Predicting {} samples...".format(x_test.shape[0]))
        start = time()
        predictions = self.classifier.predict(x_test)
        logger.debug("Done all predictions in {} seconds.".format(time() -
                                                                  start))
        return predictions
Beispiel #42
0
 def test_20_SLFN_AddTwoNeuronTypes_GotThem(self):
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.add_neurons(1, "sigm")
     self.assertEquals(2, len(elm.neurons))
     ntypes = [nr[0] for nr in elm.neurons]
     self.assertIn("lin", ntypes)
     self.assertIn("sigm", ntypes)
Beispiel #43
0
 def test_Classification_WorksCorreclty(self):
     elm = ELM(1, 2)
     X = np.array([-1, -0.6, -0.3, 0.3, 0.6, 1])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'c')
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertLess(Y[5, 0], Y[5, 1])
Beispiel #44
0
def model_training(model_path, data_path, neurons=300):
    images, labels = read_mnist.load_mnist(data_path, kind='train')

    images = map(read_mnist.up_to_2D, images)
    images = map(get_hog, images)
    images = np.mat(np.array(images))

    labels = np.mat(map(read_mnist.handle_label, labels))

    elm = ELM(images.shape[1], labels.shape[1])

    elm.add_neurons(neurons, 'sigm')
    elm.add_neurons(neurons, 'sigm')
    # elm.add_neurons(int(images.shape[1]*0.8), 'sigm')
    # elm.add_neurons(int(images.shape[1]*0.6), 'tanh')
    elm.train(images, labels)
    elm.save(model_path)
 def test_PrecisionELM_UsesPrecision(self):
     elm1 = ELM(1, 1, precision='32')
     self.assertIs(elm1.nnet.precision, np.float32)
     elm2 = ELM(1, 1, precision='single')
     self.assertIs(elm2.nnet.precision, np.float32)
     elm3 = ELM(1, 1, precision=np.float32)
     self.assertIs(elm3.nnet.precision, np.float32)
     elm4 = ELM(1, 1, precision='64')
     self.assertIs(elm4.nnet.precision, np.float64)
     elm5 = ELM(1, 1, precision='double')
     self.assertIs(elm5.nnet.precision, np.float64)
     elm6 = ELM(1, 1, precision=np.float64)
     self.assertIs(elm6.nnet.precision, np.float64)
     elm7 = ELM(1, 1)  # default double precision
     self.assertIs(elm7.nnet.precision, np.float64)
     elm8 = ELM(1, 1, precision="lol")  # default double precision
     self.assertIs(elm8.nnet.precision, np.float64)
Beispiel #46
0
    def train(self, X, Y, class_number=-1):
        class_count = max(np.unique(Y).size, class_number)
        feature_count = X.shape[1]
        self.__hpelm = ELM(feature_count, class_count, 'wc')
        self.__hpelm.add_neurons(feature_count, "sigm")

        Y_arr = Y.reshape(-1, 1)
        enc = OneHotEncoder()
        enc.fit(Y_arr)
        Y_OHE = enc.transform(Y_arr).toarray()

        out_fd = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        self.__hpelm.train(X, Y_OHE)
        sys.stdout = out_fd
Beispiel #47
0
 def test_WeightedClassification_ClassWithLargerWeightWins(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'wc', w=(1, 0.1))
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertGreater(Y[1, 0], Y[1, 1])
     self.assertGreater(Y[2, 0], Y[2, 1])
Beispiel #48
0
def getElm(data, label, classification='', w=None, nn=10, func="sigm"):

    print 'creat ELM, data,label shape=', data.shape, label.shape

    elm = ELM(data.shape[1],
              label.shape[1],
              classification=classification,
              w=w)
    elm.add_neurons(nn, func)
    elm.train(data, label, "c")
    return elm
#!/usr/bin/env python

import numpy as np
import time
import random
import sys
from hpelm import ELM

inp = np.loadtxt("input.txt")
outp = np.loadtxt("output.txt")

for neuron in range(10, 1000, 10):
    elm = ELM(92, 40)
    elm.add_neurons(neuron, "sigm")

    t0 = time.clock()
    elm.train(inp, outp, "c")
    t1 = time.clock()
    t = t1-t0

    pred = elm.predict(inp)
    acc = float(np.sum(outp.argmax(1) == pred.argmax(1))) / outp.shape[0]
    print "neuron=%d error=%.1f%% time=%dns" % (neuron, 100-acc*100, t*1000000)
    
    if int(acc) == 1:
        break
 def test_3_OneDimensionInputs_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([[1], [2], [3]])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
 def test_2_NonNumpyTargets_RaiseError(self):
     X = np.array([[1, 2], [3, 4], [5, 6]])
     T = np.array([['a'], ['b'], ['c']])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     self.assertRaises(AssertionError, elm.train, X, T)
 def test_6_WrongDimensionalityTargets_RaiseError(self):
     X = np.array([[1, 2], [3, 4], [5, 6]])
     T = np.array([[1], [2], [3]])
     elm = ELM(1, 2)
     elm.add_neurons(1, "lin")
     self.assertRaises(AssertionError, elm.train, X, T)
 def test_4_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
 def test_8_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([[1, 2], [3, 4], [5, 6]])
     T = np.array([[0], [0], [0]])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
 def test_7_ZeroInputs_RunsCorrectly(self):
     X = np.array([[0, 0], [0, 0], [0, 0]])
     T = np.array([1, 2, 3])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
 def test_11_LinearNeurons_MoreThanInputs_Truncated(self):
     elm = ELM(2, 1)
     elm.add_neurons(3, "lin")
     self.assertEqual(2, elm.neurons[0][1])
 def test_12_LinearNeurons_DefaultMatrix_Identity(self):
     elm = ELM(4, 1)
     elm.add_neurons(3, "lin")
     np.testing.assert_array_almost_equal(np.eye(4, 3), elm.neurons[0][2])
Beispiel #58
0
ak,bk=calc_wavelet_parameter(X_learn)

def calc_W_B_para(C=0.7,input_node_num=input_node_num,hide_node_num=hide_node_num,):
    beta=C*math.pow(hide_node_num,(1/float(input_node_num)))
    W_old=np.random.uniform(-0.5,0.5,size=(input_node_num,hide_node_num))

    if input_node_num == 1:
        W_old = W_old / np.abs(W_old)
    else:
        W_old = np.sqrt(1. / np.square(W_old).sum(axis=1).reshape(input_node_num, 1)) * W_old
    W_new=beta*W_old
    Bias=np.random.uniform(-beta,beta,size=(hide_node_num,))
    return [W_new,Bias]
W,B=calc_W_B_para()

elm = ELM(input_node_num,output_node_num,ak=ak,bk=bk)
elm.add_neurons(20, "avg_arcsinh_morlet",W=W,B=B)
elm.train(X_learn, Y_learn, "r")

def plot_prognostic(train_out):

    inputs_regressors_num=list(X_learn[len(X_learn)-1,:])

    len_just_prog=len_prognostics-1100
    FC1_prognostics=[]
    for i in range(len_just_prog):
        if i <regressors_num:
            if i ==0:
                inputs=list(inputs_regressors_num)
                inputs=np.array(inputs)
                inputs.resize(1,4)
 def test_13_SLFN_AddLinearNeurons_GotThem(self):
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     self.assertEquals("lin", elm.neurons[0][0])
 def test_14_SLFN_AddSigmoidalNeurons_GotThem(self):
     elm = ELM(1, 1)
     elm.add_neurons(1, "sigm")
     self.assertEquals("sigm", elm.neurons[0][0])