コード例 #1
1
ファイル: unittest_elm.py プロジェクト: akusok/hpelm
 def test_CrossValidation_ReturnsError(self):
     model = ELM(5, 2)
     model.add_neurons(10, 'tanh')
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     err = model.train(X, T, 'CV', k=3)
     self.assertIsNotNone(err)
コード例 #2
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_TrainWithBatch_OverwritesBatch(self):
     elm = ELM(1, 1, batch=123)
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm.add_neurons(1, "lin")
     elm.train(X, T, batch=234)
     self.assertEqual(234, elm.batch)
コード例 #3
0
 def test_MultiLabelClassification_Works(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 4, 5, 6])
     T = np.array([[1, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'ml')
     elm.train(X, T, 'mc')
コード例 #4
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_MultiLabelClassification_Works(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 4, 5, 6])
     T = np.array([[1, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'ml')
     elm.train(X, T, 'mc')
コード例 #5
0
 def test_TrainWithBatch_OverwritesBatch(self):
     elm = ELM(1, 1, batch=123)
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm.add_neurons(1, "lin")
     elm.train(X, T, batch=234)
     self.assertEqual(234, elm.batch)
コード例 #6
0
    def tune_elm(train_x, train_y, test_x_raw, test_x, act_funcs,
                 neuron_counts):
        '''
		Assumptions:
		1. NN has only 1 hidden layer
		2. act_funcs: list of distinct activation functions
		3. neuron_counts: list of distinct '# of neurons in the hidden layer'
		'''
        print("Tuning ELM...")
        features = train_x.shape[1]
        train_y = Pre_processor.one_hot_encoding(train_y)
        ind_func = 0
        while (ind_func < len(act_funcs)):
            ind_neuron = 0
            cur_act_func = act_funcs[ind_func]
            while (ind_neuron < len(neuron_counts)):
                cur_neuron_count = neuron_counts[ind_neuron]
                print(cur_act_func + " | " + str(cur_neuron_count) + "...")
                clf = ELM(features, Constants.tot_labels)
                clf.add_neurons(cur_neuron_count, cur_act_func)
                clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
                pred_y = clf.predict(test_x)
                pred_y = Pre_processor.one_hot_decoding_full(pred_y)
                file_name = "submission_" + str(
                    cur_neuron_count) + "_" + cur_act_func + ".csv"
                Database.save_results(test_x_raw, pred_y, file_name)
                ind_neuron = ind_neuron + 1
            ind_func = ind_func + 1
コード例 #7
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_ELM_SaveLoad(self):
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm = ELM(1, 2, precision='32', norm=0.02)
     elm.add_neurons(1, "lin")
     elm.add_neurons(2, "tanh")
     elm.train(X, T, "wc", w=(0.7, 0.3))
     B1 = elm.nnet.get_B()
     try:
         f, fname = tempfile.mkstemp()
         elm.save(fname)
         elm2 = ELM(3, 3)
         elm2.load(fname)
     finally:
         os.close(f)
     self.assertEqual(elm2.nnet.inputs, 1)
     self.assertEqual(elm2.nnet.outputs, 2)
     self.assertEqual(elm2.classification, "wc")
     self.assertIs(elm.precision, np.float32)
     self.assertIs(elm2.precision, np.float64)  # precision has changed
     np.testing.assert_allclose(np.array([0.7, 0.3]), elm2.wc)
     np.testing.assert_allclose(0.02, elm2.nnet.norm)
     np.testing.assert_allclose(B1, elm2.nnet.get_B())
     self.assertEqual(elm2.nnet.get_neurons()[0][1], "lin")
     self.assertEqual(elm2.nnet.get_neurons()[1][1], "tanh")
コード例 #8
0
class ELM(Classifier):
    def __init__(self, neurons: Tuple[Tuple] = None) -> None:
        clf = None
        self.neurons = neurons if neurons else DEFAULT_NEURONS
        super().__init__(clf)

    def fit(self, x_train: ndarray, y_train: ndarray, *args, **kwargs)\
            -> None:
        self.classifier = ELMachine(x_train.shape[1], y_train.shape[1])
        for neuron in self.neurons:
            logger.info("Adding {} neurons with '{}' function.".format(
                neuron[0], neuron[1]))
            self.classifier.add_neurons(neuron[0], neuron[1])
        logger.debug("Training the Extreme Learning Machine Classifier...")
        start = time()
        self.classifier.train(x_train, y_train, **kwargs)
        logger.debug("Done training in {} seconds.".format(time() - start))

    def predict(self, x_test: ndarray) -> ndarray:
        logger.debug("Predicting {} samples...".format(x_test.shape[0]))
        start = time()
        predictions = np.argmax(self.classifier.predict(x_test), axis=-1)
        logger.debug("Done all predictions in {} seconds.".format(time() -
                                                                  start))
        return predictions

    def predict_proba(self, x_test: ndarray) -> float:
        logger.debug("Predicting {} samples...".format(x_test.shape[0]))
        start = time()
        predictions = self.classifier.predict(x_test)
        logger.debug("Done all predictions in {} seconds.".format(time() -
                                                                  start))
        return predictions
コード例 #9
0
 def test_ELM_SaveLoad(self):
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm = ELM(1, 2, precision='32', norm=0.02)
     elm.add_neurons(1, "lin")
     elm.add_neurons(2, "tanh")
     elm.train(X, T, "wc", w=(0.7, 0.3))
     B1 = elm.nnet.get_B()
     try:
         f, fname = tempfile.mkstemp()
         elm.save(fname)
         elm2 = ELM(3, 3)
         elm2.load(fname)
     finally:
         os.close(f)
     self.assertEqual(elm2.nnet.inputs, 1)
     self.assertEqual(elm2.nnet.outputs, 2)
     self.assertEqual(elm2.classification, "wc")
     self.assertIs(elm.precision, np.float32)
     self.assertIs(elm2.precision, np.float64)  # precision has changed
     np.testing.assert_allclose(np.array([0.7, 0.3]), elm2.wc)
     np.testing.assert_allclose(0.02, elm2.nnet.norm)
     np.testing.assert_allclose(B1, elm2.nnet.get_B())
     self.assertEqual(elm2.nnet.get_neurons()[0][1], "lin")
     self.assertEqual(elm2.nnet.get_neurons()[1][1], "tanh")
コード例 #10
0
ファイル: hpelmnn.py プロジェクト: grzesiekzajac/ziwm
class HPELMNN(Classifier):
    
    def __init__(self):
        self.__hpelm = None
    
    @staticmethod
    def name():
        return "hpelmnn"

    def train(self, X, Y, class_number=-1):
        class_count = max(np.unique(Y).size, class_number)
        feature_count = X.shape[1]
        self.__hpelm = ELM(feature_count, class_count, 'wc')
        self.__hpelm.add_neurons(feature_count, "sigm")

        Y_arr = Y.reshape(-1, 1)
        enc = OneHotEncoder()
        enc.fit(Y_arr)
        Y_OHE = enc.transform(Y_arr).toarray()

        out_fd = sys.stdout
        sys.stdout = open(os.devnull, 'w')
        self.__hpelm.train(X, Y_OHE)
        sys.stdout = out_fd

    def predict(self, X):
        Y_predicted = self.__hpelm.predict(X)
        return Y_predicted
コード例 #11
0
ファイル: tools.py プロジェクト: winroot/smartImgProcess
def getElm(data, label, classification='c', w=None, nn=10, func="sigm"):

    elm = ELM(len(data[0]), len(label[0]), classification, w)
    elm.add_neurons(10, func)
    elm.add_neurons(10, "rbf_l1")
    elm.add_neurons(10, "rbf_l2")
    elm.train(data, np.array(label))
    return elm
コード例 #12
0
 def epoch(train_x, train_y, test_x, test_x_raw, filename):
     features = train_x.shape[1]
     train_y = Pre_processor.one_hot_encoding(train_y)
     clf = ELM(features, Constants.tot_labels)
     clf.add_neurons(550, "sigm")
     clf.train(train_x, train_y, 'CV', 'OP', 'c', k=10)
     pred_y = clf.predict(test_x)
     pred_y = Pre_processor.one_hot_decoding_full(pred_y)
     Database.save_results(test_x_raw, pred_y, filename)
コード例 #13
0
 def test_Classification_WorksCorreclty(self):
     elm = ELM(1, 2)
     X = np.array([-1, -0.6, -0.3, 0.3, 0.6, 1])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'c')
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertLess(Y[5, 0], Y[5, 1])
コード例 #14
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_Classification_WorksCorreclty(self):
     elm = ELM(1, 2)
     X = np.array([-1, -0.6, -0.3, 0.3, 0.6, 1])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'c')
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertLess(Y[5, 0], Y[5, 1])
コード例 #15
0
 def test_WeightedClassification_ClassWithLargerWeightWins(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'wc', w=(1, 0.1))
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertGreater(Y[1, 0], Y[1, 1])
     self.assertGreater(Y[2, 0], Y[2, 1])
コード例 #16
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_WeightedClassification_ClassWithLargerWeightWins(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'wc', w=(1, 0.1))
     Y = elm.predict(X)
     self.assertGreater(Y[0, 0], Y[0, 1])
     self.assertGreater(Y[1, 0], Y[1, 1])
     self.assertGreater(Y[2, 0], Y[2, 1])
コード例 #17
0
def build_ELM_encoder(xinput, target, num_neurons):

    elm = ELM(xinput.shape[1], target.shape[1])
    elm.add_neurons(num_neurons, "sigm")
    elm.add_neurons(num_neurons, "lin")
    #elm.add_neurons(num_neurons, "rbf_l1")
    elm.train(xinput, target, "r")
    ypred = elm.predict(xinput)
    print "mse error", elm.error(ypred, target)
    return elm, ypred
コード例 #18
0
 def test_LOOandOP_CanSelectMoreThanOneNeuron(self):
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     for _ in range(10):
         model = ELM(5, 2)
         model.add_neurons(5, 'lin')
         model.train(X, T, 'LOO', 'OP')
         max2 = model.nnet.L
         if max2 > 1:
             break
     self.assertGreater(max2, 1)
コード例 #19
0
ファイル: myEML.py プロジェクト: DIYer22/ELM
def getElm(data, label, classification='', w=None, nn=10, func="sigm"):

    print 'creat ELM, data,label shape=', data.shape, label.shape

    elm = ELM(data.shape[1],
              label.shape[1],
              classification=classification,
              w=w)
    elm.add_neurons(nn, func)
    elm.train(data, label, "c")
    return elm
コード例 #20
0
ファイル: elmTest.py プロジェクト: btekgit/mitosisdetection
def build_ELM_encoder(xinput, target, num_neurons):


    elm = ELM(xinput.shape[1], target.shape[1])
    elm.add_neurons(num_neurons, "sigm")
    elm.add_neurons(num_neurons, "lin")
    #elm.add_neurons(num_neurons, "rbf_l1")
    elm.train(xinput, target, "r")
    ypred = elm.predict(xinput)
    print "mse error", elm.error(ypred, target)
    return elm, ypred
コード例 #21
0
ファイル: unittest_elm.py プロジェクト: akusok/hpelm
 def test_LOOandOP_CanSelectMoreThanOneNeuron(self):
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     for _ in range(10):
         model = ELM(5, 2)
         model.add_neurons(5, 'lin')
         model.train(X, T, 'LOO', 'OP')
         max2 = model.nnet.L
         if max2 > 1:
             break
     self.assertGreater(max2, 1)
コード例 #22
0
 def test_CrossValidation_ReturnsError(self):
     model = ELM(5, 2)
     model.add_neurons(10, 'tanh')
     X = np.random.rand(100, 5)
     T = np.random.rand(100, 2)
     err = model.train(X, T, 'CV', k=3)
     self.assertIsNotNone(err)
コード例 #23
0
def model_training(model_path, data_path, neurons=300):
    images, labels = read_mnist.load_mnist(data_path, kind='train')

    images = map(read_mnist.up_to_2D, images)
    images = map(get_hog, images)
    images = np.mat(np.array(images))

    labels = np.mat(map(read_mnist.handle_label, labels))

    elm = ELM(images.shape[1], labels.shape[1])

    elm.add_neurons(neurons, 'sigm')
    elm.add_neurons(neurons, 'sigm')
    # elm.add_neurons(int(images.shape[1]*0.8), 'sigm')
    # elm.add_neurons(int(images.shape[1]*0.6), 'tanh')
    elm.train(images, labels)
    elm.save(model_path)
コード例 #24
0
def run(X, Y):
    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
    #ELM model
    X_train=X_train.values
    X_test=X_test.values
    y_train=y_train.values
    y_test=y_test.values
    print 'ELM tanh'
    for x in range(50, 500, 50):
        elm = ELM(X_train.shape[1], 1, classification='c')
        elm.add_neurons(x, 'tanh')
        elm.train(X_train, y_train)
        pred = elm.predict(X_test)
        temp = []
#        print 'Error(TANH, ', x, '): ', elm.error(y_test, pred)
        for p in pred:
            if p >=0.5:
                temp.append(1)
            else:
                temp.append(0)
        pred = np.asarray(temp)
#        print 'Error(TANH, ', x, '): ', elm.error(y_test, pred)
        evaluate(y_test, pred)
    
    print 'ELM rbf_linf tanh'
    for x in range(10, 100, 10):
        elm = ELM(X_train.shape[1], 1)
        elm.add_neurons(x, 'rbf_linf')
        elm.add_neurons(x*2, 'tanh')
        elm.train(X_train, y_train)
        pred = elm.predict(X_test)
        temp = []
#        print 'Error(TANH, ', x, '): ', elm.error(y_test, pred)
        for p in pred:
            if p >=0.5:
                temp.append(1)
            else:
                temp.append(0)
        pred = np.asarray(temp)
#        print 'Error(RBF+TANH, ', x, ',', 2*x, '): ', elm.error(y_test, pred)
        evaluate(y_test, pred)
def model_elm(XX, TT, XX_test, TT_test, model_type):
    '''
	Builda elm model using hpelm package

	Arguments:
	XX -- randomized and normalized training X-values, numpy array of shape (# compositions, # molar%)
	TT -- randomized and normalized training Y-values, numpy array of shape (Fragility value, 1)
	XX_test -- randomized and normalized testing X-values, numpy array of shape (# compositions, # molar%)
	TT_test -- randomized and normalized testing Y-values, numpy array of shape (Fragility value, 1)

	Returns:
	model -- save model in ELM format
	'''

    # Hyperparameters
    k = 5  # Use this if model_type == CV
    np.random.seed(10)

    # Build hpelm model
    # ELM(inputs, outputs, classification='', w=None, batch=1000, accelerator=None, precision='double', norm=None, tprint=5)
    model = ELM(20, 1, tprint=5)

    # Add neurons
    model.add_neurons(7, 'tanh')  # Number of neurons with tanh activation
    model.add_neurons(7, 'lin')  # Number of neurons with linear activation

    # if then condition for types of training
    if (model_type == 'CV'):
        print('-' * 10 + 'Training with Cross-Validation' + '-' * 10)
        model.train(XX, TT, 'CV', k=k)  # Train the model with cross-validation
    elif (model_type == 'LOO'):
        print('-' * 10 + 'Training with Leave-One-Out' + '-' * 10)
        model.train(XX, TT, 'LOO')  # Train the model with Leave-One-Out
    else:
        print('-' * 10 + 'Training with regression' + '-' * 10)
        model.train(XX, TT, 'r')  # Train the model with regression

    # Train ELM models
    TTH = model.predict(XX)  # Calculate training error
    YY_test = model.predict(XX_test)  # Calculate testing error
    print('Model Training Error: ', model.error(TT,
                                                TTH))  # Print training error
    print('Model Test Error: ', model.error(YY_test,
                                            TT_test))  # Print testing error
    print(str(model))  # Print model information
    print('-' * 50)

    # Call plot function
    my_plot(TT_test, YY_test)

    return model
コード例 #26
0
#id_output = id_input[::-1]
#xinput = XXtrain[id_input,]
#xoutput = XXtrain[id_input,]
#print xinput.shape
## INPUT LAYER
singleLayer = True
if singleLayer:
    t = 5
    mn_error = np.zeros([t, 1])
    for i in range(0, t):
        elmSingle = ELM(input_shape, YY.shape[1])
        #elmSingle.add_neurons(ninputsig/2, "sigm")
        elmSingle.add_neurons(ninputsig, "sigm")
        #elmSingle.add_neurons(100, "rbf_l1")
        #elmSingle.add_neurons(ninputsig/10, "lin")
        elmSingle.train(XXtrainIn, YYtrain, "c", norm=1e-5)
        print "\n Trained input elm", elmSingle
        youtput = elmSingle.predict(XXtest)

        p = ytest.squeeze()
        yout = np.argmax(youtput, axis=1)
        nhit = sum(yout == p)
        ntpos = sum((yout == 1) & (p == 1))
        ntneg = sum((yout == 0) & (p == 0))
        npos = sum((p == 1))
        nneg = sum((p == 0))
        print "\n Testing results"
        print "Tpos:", ntpos, " / ", npos, "TD:", ntpos / float(npos)
        print "Tneg:", ntneg, " / ", nneg, "TN:", ntneg / float(nneg)
        print "Acc: ", nhit / (float)(len(p)), "total", len(p)
        mn_error[i] = nhit / (float)(len(p))
コード例 #27
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_MRSR_Works(self):
     X = np.random.rand(10, 3)
     T = np.random.rand(10, 2)
     elm = ELM(3, 2)
     elm.add_neurons(5, "tanh")
     elm.train(X, T, "LOO", "OP")
コード例 #28
0
ファイル: elm_test.py プロジェクト: Newsteinwell/write-code
def calc_W_B_para(C=0.7,input_node_num=input_node_num,hide_node_num=hide_node_num,):
    beta=C*math.pow(hide_node_num,(1/float(input_node_num)))
    W_old=np.random.uniform(-0.5,0.5,size=(input_node_num,hide_node_num))

    if input_node_num == 1:
        W_old = W_old / np.abs(W_old)
    else:
        W_old = np.sqrt(1. / np.square(W_old).sum(axis=1).reshape(input_node_num, 1)) * W_old
    W_new=beta*W_old
    Bias=np.random.uniform(-beta,beta,size=(hide_node_num,))
    return [W_new,Bias]
W,B=calc_W_B_para()

elm = ELM(input_node_num,output_node_num,ak=ak,bk=bk)
elm.add_neurons(20, "avg_arcsinh_morlet",W=W,B=B)
elm.train(X_learn, Y_learn, "r")

def plot_prognostic(train_out):

    inputs_regressors_num=list(X_learn[len(X_learn)-1,:])

    len_just_prog=len_prognostics-1100
    FC1_prognostics=[]
    for i in range(len_just_prog):
        if i <regressors_num:
            if i ==0:
                inputs=list(inputs_regressors_num)
                inputs=np.array(inputs)
                inputs.resize(1,4)
                FC1_prognostics.append(elm.predict(inputs))
            elif i>=1:
コード例 #29
0
ファイル: test_correctness.py プロジェクト: zhufengGNSS/hpelm
 def test_7_ZeroInputs_RunsCorrectly(self):
     X = np.array([[0, 0], [0, 0], [0, 0]])
     T = np.array([1, 2, 3])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #30
0
ファイル: test_correctness.py プロジェクト: zhufengGNSS/hpelm
 def test_3_OneDimensionInputs_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([[1], [2], [3]])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #31
0
    # train_x = np.array(data[old_batch_size_k:(batch_size * k), 1:], dtype="float")
    # train_y = np.array(data[old_batch_size_k:(batch_size * k), 0], dtype="int")
    # if k == epoch:
    #     train_x = np.array(data[old_batch_size_k:, 1:], dtype="float")
    #     train_y = np.array(data[old_batch_size_k:, 0], dtype="int")
    # old_batch_size_k = batch_size * k
    # print (train_x.shape, train_y.shape)

    # whole
    train_x = np.array(data[:, 1:], dtype="float")
    train_y = np.array(data[:, 0], dtype="int")
    print ("X", train_x.shape)
    print ("Y", train_y.shape)
    # end whole
    train_y = np.eye(np.max(train_y) + 1)[train_y]
    elm.train(train_x, train_y)
    k += 1

end_time = time.time()


def predict(_testImagesList):
    # testImagesList = np.load(testPath)
    testList = _testImagesList

    # print ('testImagesList', testList, testList.shape)
    test_x = np.array(testList[:, 1:], dtype="float")
    test_y = np.array(testList[:, 0], dtype="int")
    test_y = np.eye(np.max(test_y) + 1)[test_y]
    #
    Y = elm.predict(test_x)
コード例 #32
0
from sklearn.metrics import accuracy_score

X = np.loadtxt("tweet_vecs.txt", delimiter=',')
Y = np.loadtxt("tweet_label.txt", delimiter=',')
Y_onehot = np.loadtxt("tweet_label_onehot.txt", delimiter=',')

X_train, X_test, Y_train, Y_test, Y_onehot_train, Y_onehot_test = train_test_split(X, Y, Y_onehot, test_size=0.20, shuffle=False)

print("Starting training...")
elm = ELM(X_train.shape[1], Y_onehot_train.shape[1])
elm.add_neurons(200, "sigm")
elm.add_neurons(100, "tanh")
elm.add_neurons(100, "sigm")
elm.add_neurons(100, "sigm")
elm.add_neurons(100, "tanh")
elm.train(X_train, Y_onehot_train, "CV", "OP", "c", k=5)
print("Finished training...")

Y_predicted_elm = elm.predict(X_test)
Y_predicted = np.zeros((Y_predicted_elm.shape[0]))
for i, row in enumerate(Y_predicted_elm):
    idx_of_max = np.argmax(row)
    Y_predicted[i] = idx_of_max+1

with open("Y_predicted.txt", 'w+') as predfile, open("Y_true.txt", 'w+') as trufile:
    for i in Y_predicted:
        predfile.write(str(i))
        predfile.write("\n")
    for i in Y_test:
        trufile.write(str(i))
        trufile.write("\n")
コード例 #33
0
ファイル: elm_naive.py プロジェクト: akusok/hpelm
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 19 16:29:09 2014

@author: akusok
"""

import numpy as np
import os
from hpelm import ELM

curdir = os.path.dirname(__file__)
pX = os.path.join(curdir, "../datasets/Unittest-Iris/iris_data.txt")
pY = os.path.join(curdir, "../datasets/Unittest-Iris/iris_classes.txt")

X = np.loadtxt(pX)
Y = np.loadtxt(pY)

elm = ELM(4,3)
elm.add_neurons(15, "sigm")
elm.train(X, Y, "c")
Yh = elm.predict(X)
acc = float(np.sum(Y.argmax(1) == Yh.argmax(1))) / Y.shape[0]
print("Iris dataset training error: %.1f%%" % (100-acc*100))
コード例 #34
0
 def test_3_OneDimensionInputs_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([[1], [2], [3]])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #35
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_MRSR2_Works(self):
     X = np.random.rand(20, 9)
     T = np.random.rand(20, 12)
     elm = ELM(9, 12)
     elm.add_neurons(5, "tanh")
     elm.train(X, T, "LOO", "OP")
コード例 #36
0
print(X_train_d3.shape)
print(X_train_d2.shape)
print(X_test_d3.shape)
print(X_test_d2.shape)
X_train = ss.fit_transform(np.concatenate((X_train_d3, X_train_d2), axis=1))
X_test = ss.transform(np.concatenate((X_test_d3, X_test_d2), axis=1))
print(X_train.shape)
print(X_test.shape)

#use ELM as classifier

from hpelm import ELM
acc = []
elm = ELM(4, 1)
elm.add_neurons(50, 'sigm')
elm.train(X_train, y_train, "LOO")
y_pred = elm.predict(X_test)
print(len(y_pred))
for i in range(len(y_pred)):
    if y_pred[i] >= 0.5:
        y_pred[i] = 1
    else:
        y_pred[i] = 0
print(y_test)
acc.append(accuracy_score(y_test, y_pred))
avg_acc = np.mean(acc)
print(avg_acc)

# use LDA as classifier

from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
コード例 #37
0
result6.write("accuracy" +  "\t" +  "precision" + "\t" + "recall" + "\t" + "f1-measure" +"\t" + " mse" + "\t" + " mae" + "\t" + "auc")
result6.write("\n")




result6a = open("data/result/linmse.txt", "w")
result6a.write("accuracy" +  "\t" +  "precision" + "\t" + "recall" + "\t" + "f1-measure" +"\t" + " mse" + "\t" + " mae" + "\t" + "auc")
result6a.write("\n")



print "sigmoid with multi class error"
elm = ELM(1804,9)
elm.add_neurons(900, "sigm")
elm.train(X, Y, "c","CV", k=10)
r1 = elm.predict(X)
print("r1 shape")
print(r1[0])
print(str(elm))
print("performance measures")
result.write(str(elm))
result.write("\n")

r1=r1.argmax(1)
accuracy = accuracy_score(Y1, r1)
print(accuracy)
recall = recall_score(Y1, r1, average="weighted")
precision = precision_score(Y1, r1 , average="weighted")
f1 = f1_score(Y1, r1 , average="weighted")
mse = mean_squared_error(Y1, r1)
コード例 #38
0
ファイル: test_correctness.py プロジェクト: IstanbulBoy/hpelm
 def test_WeightedClassification_DefaultWeightsWork(self):
     elm = ELM(1, 2)
     X = np.array([1, 2, 3, 1, 2, 3])
     T = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])
     elm.add_neurons(1, "lin")
     elm.train(X, T, 'wc')
コード例 #39
0
result5 = open("data/result/tanh.txt", "w")

result5.write("accuracy" +  "\t" +  "precision" + "\t" + "recall" + "\t" + "f1-measure" +"\t" + " mse" + "\t" + " mae" + "\t" + "auc")
result5.write("\n")


result6 = open("data/result/lin.txt", "w")

result6.write("accuracy" +  "\t" +  "precision" + "\t" + "recall" + "\t" + "f1-measure" +"\t" + " mse" + "\t" + " mae" + "\t" + "auc")
result6.write("\n")


print "sigmoid with multi class error"
elm = ELM(41,23)
elm.add_neurons(15, "sigm")
elm.train(X, Y, "c")
r1 = elm.predict(X1)
print(str(elm))
print("performance measures")
result.write(str(elm))
result.write("\n")

r1=r1.argmax(1)
accuracy = accuracy_score(Y1, r1)
print(accuracy)
recall = recall_score(Y1, r1, average="weighted")
precision = precision_score(Y1, r1 , average="weighted")
f1 = f1_score(Y1, r1 , average="weighted")
mse = mean_squared_error(Y1, r1)
mae = mean_absolute_error(Y1, r1)
fpr, tpr, thresholds = metrics.roc_curve(Y1, r1,pos_label=2)
コード例 #40
0
ファイル: test_correctness.py プロジェクト: zhufengGNSS/hpelm
 def test_4_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #41
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 19 16:29:09 2014

@author: akusok
"""

import numpy as np
import os
from hpelm import ELM

curdir = os.path.dirname(__file__)
pX = os.path.join(curdir, "../datasets/Unittest-Iris/iris_data.txt")
pY = os.path.join(curdir, "../datasets/Unittest-Iris/iris_classes.txt")

X = np.loadtxt(pX)
Y = np.loadtxt(pY)

elm = ELM(4, 3)
elm.add_neurons(15, "sigm")
elm.train(X, Y, "c")
Yh = elm.predict(X)
acc = float(np.sum(Y.argmax(1) == Yh.argmax(1))) / Y.shape[0]
print "Iris dataset training error: %.1f%%" % (100 - acc * 100)
コード例 #42
0
 def test_4_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([1, 2, 3])
     T = np.array([1, 2, 3])
     elm = ELM(1, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #43
0
ファイル: test_correctness.py プロジェクト: zhufengGNSS/hpelm
 def test_8_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([[1, 2], [3, 4], [5, 6]])
     T = np.array([[0], [0], [0]])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #44
0
 def test_7_ZeroInputs_RunsCorrectly(self):
     X = np.array([[0, 0], [0, 0], [0, 0]])
     T = np.array([1, 2, 3])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #45
0
 def test_8_OneDimensionTargets_RunsCorrectly(self):
     X = np.array([[1, 2], [3, 4], [5, 6]])
     T = np.array([[0], [0], [0]])
     elm = ELM(2, 1)
     elm.add_neurons(1, "lin")
     elm.train(X, T)
コード例 #46
0
#!/usr/bin/env python

import numpy as np
import time
import random
import sys
from hpelm import ELM

inp = np.loadtxt("input.txt")
outp = np.loadtxt("output.txt")

for neuron in range(10, 1000, 10):
    elm = ELM(92, 40)
    elm.add_neurons(neuron, "sigm")

    t0 = time.clock()
    elm.train(inp, outp, "c")
    t1 = time.clock()
    t = t1-t0

    pred = elm.predict(inp)
    acc = float(np.sum(outp.argmax(1) == pred.argmax(1))) / outp.shape[0]
    print "neuron=%d error=%.1f%% time=%dns" % (neuron, 100-acc*100, t*1000000)
    
    if int(acc) == 1:
        break
コード例 #47
0
                        temp = [0] * 5
                        temp[rating_mat['test_1'].T[mat_iid][i - 1] - 1] = 1
                        test_rat.append(temp)
                        #print mat_iid
                mat_iid += 1

        X = np.asarray(X, dtype=np.uint8)
        T = np.asarray(T, dtype=np.uint8)
        test = np.asarray(test, dtype=np.uint8)
        test_rat = np.asarray(test_rat, dtype=np.uint8)

        ##print X.shape,test.shape

        elm = ELM(X.shape[1], T.shape[1])
        elm.add_neurons(neuron, node)
        elm.train(X, T, "LOO")
        Y = elm.predict(test)

        pred = np.argmax(Y, axis=1)
        true = np.argmax(test_rat, axis=1)

        print 'Split 1 RMSE: ', mse(true, pred)**0.5
        print 'Split 1 NMAE: ', mae(true, pred) / 4

        i1_rmse = mse(true, pred)**0.5
        i1_nmae = mae(true, pred) / 4
        #################################SPLIT 2#####################################
        train_ids = map(int, train_ids_read.readline().strip().split(','))
        test_ids = map(int, test_ids_read.readline().strip().split(','))

        X = []
コード例 #48
0
ファイル: hpELM.py プロジェクト: stuartnankai/MasterThesis
def hpElM(data, target, iterNum, isNormal, isRegression, isPCA, n_components,
          normalMethod, testSize):
    print("ELM is running")
    y = target
    elmList = []
    # neuronsNum = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 200]
    neuronsNum = [5, 10, 20, 30, 40, 50, 75, 200]
    # neuronsNum = [5]

    if normalMethod == 1:
        sc = preprocessing.Normalizer()
    elif normalMethod == 2:
        sc = preprocessing.StandardScaler()
    elif normalMethod == 3:
        sc = preprocessing.MinMaxScaler()
    for j in range(iterNum):
        errorList = []
        X_train, X_test, y_train, y_test = train_test_split(data,
                                                            y,
                                                            test_size=testSize)

        sc.fit(X_train)
        X_train_std = sc.transform(X_train)
        X_test_std = sc.transform(X_test)
        if isPCA:
            X_train, X_test = reduceDim.featureExtraction(
                X_train_std, X_test_std, n_components)
            # print("This is the size of input by using PCA: ", len(X_train[0]))
        else:
            print("Not use PCA...", )
            X_train = X_train_std
            X_test = X_test_std
        # print("This is : ", X_train)
        # print("This is : ", y_train.values)

        for neuron in neuronsNum:
            elm1 = ELM(len(X_train[1]), y.shape[1])
            elm1.add_neurons(neuron, 'sigm')
            # elm1.add_neurons(neuron, 'tanh')
            # elm1.add_neurons(neuron,'rbf_l2')
            elm1.train(X_train, y_train.values, 'CV', 'OP', 'r', k=3)
            y_pred_temp = elm1.predict(X_test)
            errorPara = elm1.error(y_pred_temp, y_test.values)
            errorList.append(errorPara)
        print("This is error list: ", errorList)
        bestPos = errorList.index(min(errorList))
        bestPara = neuronsNum[bestPos]
        print("This is the best number of neuron: ", bestPara)

        elm = ELM(len(X_train[1]), y.shape[1])
        elm.add_neurons(bestPara, 'sigm')
        # elm.add_neurons(bestPara,'tanh')
        # elm.add_neurons(bestPara,'rbf_l2')
        elm.train(X_train, y_train.values, 'CV', 'OP', 'r', k=5)
        y_pred_temp = elm.predict(X_test)

        # elm.add_neurons(30, "sigm")
        # elm.add_neurons(30, "rbf_l2")
        # elm.train(X_train, y_train.values, 'CV','OP',k=5)
        #
        # # svr_rbf = SVR(kernel='rbf', C=1000.0, gamma='auto', max_iter=-1, epsilon=0.1)
        # # svr_poly = SVR(kernel='poly', C=1000, degree=3)
        # y_pred_temp = elm.predict(X_test)
        # print("This is temp y_pred: ", y_pred_temp )
        y_pred = []
        for t in y_pred_temp:
            if t < 0:
                y_pred.append(0)
            else:
                y_pred.append(t)
        # y_pred = svr_poly.fit(X_train, y_train).predict(X_test)
        if isRegression:
            return y_pred
        else:
            sum_mean = 0
            for i in range(len(y_pred)):
                if isNormal:
                    print(
                        "This is REAL value %.4f, ======ELM=====> PRED value: %.4f"
                        % (y_test[i], y_pred[i]))
                    # sum_mean += (y_pred[i] - y_test[i]) ** 2  # if the target is np array

                    sum_mean += (float("{0:.4f}".format(float(y_pred[i]))) -
                                 y_test[i])**2
                else:
                    print(
                        "This is REAL value %.4f, ======ELM=====> PRED value: %.4f"
                        % (y_test.values[i], y_pred[i]))
                    # sum_mean += (y_pred[i] - y_test.values[i]) ** 2

                    sum_mean += (float("{0:.4f}".format(float(y_pred[i]))) -
                                 y_test.values[i])**2
            sum_erro = np.sqrt(sum_mean / len(y_pred))
            elmList.append(sum_erro[0])
            print("This is RMSE for ELM: ", sum_erro[0])
            print("This is iteration num: ", j + 1)
    return elmList
コード例 #49
0
ファイル: GMMHMM.py プロジェクト: marsyang1991/FOG
y_train = np.array(feature_train.iloc[:, -1])
X_test = np.array(feature_test.iloc[:, :-1])
y_test = np.array(feature_test.iloc[:, -1])
y_all = y_train
X_all = X_train
X_train_0 = X_train[y_train == 0][:]
X_train_1 = X_train[y_train == 1][:]
X_train_0_down = np.array(random.sample(X_train_0, X_train_1.shape[0]))
X_train = np.vstack([X_train_0_down, X_train_1])
y_train_0 = np.zeros([X_train_0_down.shape[0], 1], dtype=int)
y_train_1 = np.ones([X_train_1.shape[0], 1], dtype=int)
y_train = np.vstack([y_train_0, y_train_1])
y_train = utils.one_hot(y_train)
elm = ELM(X_train.shape[1], y_train.shape[1], classification="c")
elm.add_neurons(10, "sigm")
elm.train(X_train, y_train, "CV", k=10)

Y = elm.predict(X_train)
print(elm.error(y_train, Y))
# y_pred = np.argmax(Y, 1)
# cm = metrics.confusion_matrix(y_true=y_test, y_pred=y_pred)
# print cm
# X_hmm = []
# lengths_hmm = []
# frameNumber = 20
# n_components = 5
# n_mix = 6
# for index in range(0, len(y_all)):
#     if y_all[index] == 0:
#         continue
#     else: