예제 #1
0
    def __init__(self,
                 model_file='./model.h5',
                 elm_model_files=None,
                 feat_path='./temp.csv',
                 context_len=5,
                 max_time_steps=300,
                 elm_hidden_num=50,
                 stl=True,
                 elm_main_task_id=-1,
                 sr=16000,
                 tasks='arousal:2,valence:2'):

        self.stl = stl
        self.model = self.model = keras.models.load_model(model_file)

        self.elm_model_files = elm_model_files
        self.sess = tf.Session()
        self.elm_model = []
        self.tasks = []
        self.tasks_names = []
        self.total_high_level_feat = 0

        #setting multi-task
        for task in tasks.split(","):
            print("task: ", task)
            task_n_class = task.split(':')
            self.tasks.append(int(task_n_class[1]))
            self.tasks_names.append(task_n_class[0])
            self.total_high_level_feat = self.total_high_level_feat + int(
                task_n_class[1])

        #seeting an elm model for a post-classifier
        if self.elm_model_files != None:
            print("elm model is loaded")
            elm_tasks = elm_model_files.split(',')
            if len(elm_tasks) == len(self.tasks):
                print("#tasks: ", len(self.tasks))

                for i in range(0, len(self.tasks)):
                    elm_model_task = ELM(self.sess,
                                         1,
                                         self.total_high_level_feat * 4,
                                         elm_hidden_num,
                                         self.tasks[i],
                                         task=self.tasks_names[i])
                    elm_path = elm_tasks[i]
                    elm_model_task.load(elm_path)

                    self.elm_model.append(elm_model_task)
                self.elm_hidden_num = elm_hidden_num
                self.elm_main_task_id = elm_main_task_id
            else:
                print("mismatch between tasks and elm models")
                exit()

        self.sr = sr
        self.feat_path = feat_path
        self.context_len = context_len
        self.max_time_steps = max_time_steps
        self.model.summary()
예제 #2
0
    def __init__(self, model_file = './model.h5', elm_model_files = None, feat_path = './temp.csv', context_len = 5, max_time_steps = 300, elm_hidden_num = 50, stl = True, elm_main_task_id = -1, sr = 16000, tasks = 'arousal:2,valence:2', min_max = None, seq2seq = False):
        
        self.stl = stl
        self.model = self.model = keras.models.load_model(model_file, custom_objects={'Conv3DHighway': Conv3DHighway, 'Conv2DHighway': Conv2DHighway, 'Conv1DHighway': Conv1DHighway, 'Highway': Highway, 'w_categorical_crossentropy': w_categorical_crossentropy, 'categorical_focal_loss': categorical_focal_loss, 'f1': f1, 'precision': precision, 'recall': recall})
        self.seq2seq = seq2seq

        self.elm_model_files = elm_model_files
        self.sess = tf.Session()
        self.elm_model = []
        self.tasks = []
        self.tasks_names = []
        self.total_high_level_feat = 0

        self.feat_ext = FeatExt(min_max)

        #print("Plotting model")
        #config = self.model.get_config()
        #print(config)
        #plot_model(self.model, to_file='model.png', show_shapes=True)

        #setting multi-task
        for task in tasks.split(","):
            print("task: ", task)
            task_n_class = task.split(':') 
            self.tasks.append(int(task_n_class[1]))
            self.tasks_names.append(task_n_class[0])
            self.total_high_level_feat = self.total_high_level_feat + int(task_n_class[1])            
        
        #seeting an elm model for a post-classifier
        if self.elm_model_files != None:
            print("elm model is loaded")
            elm_tasks = elm_model_files.split(',')
            if len(elm_tasks) == len(self.tasks):
                print("#tasks: ", len(self.tasks))

                for i in range(0, len(self.tasks)):
                    elm_model_task = ELM(self.sess, 1, self.total_high_level_feat * 4, elm_hidden_num, self.tasks[i], task = self.tasks_names[i])
                    elm_path = elm_tasks[i] 
                    elm_model_task.load(elm_path)

                    self.elm_model.append(elm_model_task)   
                self.elm_hidden_num = elm_hidden_num
                self.elm_main_task_id = elm_main_task_id
            else:
                print("mismatch between tasks and elm models")
                exit()

        self.sr = sr
        self.feat_path = feat_path
        self.context_len = context_len
        self.max_time_steps = max_time_steps   
예제 #3
0
def make_experiments_without_extract(X, y):
    svc_no_extract_scores = []
    knn_no_extract_scores = []
    gnb_no_extract_scores = []
    dt_no_extract_scores = []
    mlp_no_extract_scores = []
    elm_no_extract_scores = []

    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_train.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_train, y_train).predict(X_test)
        knn_pred = knn_clf.fit(X_train, y_train).predict(X_test)
        gnb_pred = gnb_clf.fit(X_train, y_train).predict(X_test)
        dt_pred = dt_clf.fit(X_train, y_train).predict(X_test)
        mlp_pred = mlp_clf.fit(X_train, y_train).predict(X_test)

        elm_clf.train(X_train, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_no_extract_scores.append(round(accuracy_score(svc_pred, y_test),
                                           2))
        knn_no_extract_scores.append(round(accuracy_score(knn_pred, y_test),
                                           2))
        gnb_no_extract_scores.append(round(accuracy_score(gnb_pred, y_test),
                                           2))
        dt_no_extract_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_no_extract_scores.append(round(accuracy_score(mlp_pred, y_test),
                                           2))
        elm_no_extract_scores.append(round(accuracy_score(elm_pred, y_test),
                                           2))

    return [
        round(np.average(svc_no_extract_scores), 2),
        round(np.average(knn_no_extract_scores), 2),
        round(np.average(gnb_no_extract_scores), 2),
        round(np.average(dt_no_extract_scores), 2),
        round(np.average(mlp_no_extract_scores), 2),
        round(np.average(elm_no_extract_scores), 2)
    ]
예제 #4
0
def main():
    from sklearn import preprocessing
    from sklearn.datasets import fetch_openml as fetch_mldata
    from sklearn.model_selection import cross_val_score

    db_name = 'iris'
    hid_num = 1000
    data_set = fetch_mldata(db_name, version=1)
    data_set.data = preprocessing.scale(data_set.data)
    data_set.target = preprocessing.LabelEncoder().fit_transform(data_set.target)

    print(db_name)
    print('ECOBELM', hid_num)
    e = ECOBELM(hid_num, c=2**5)
    ave = 0
    for i in range(10):
        scores = cross_val_score(
            e, data_set.data, data_set.target, cv=5, scoring='accuracy')
        ave += scores.mean()
    ave /= 10
    print("Accuracy: %0.2f " % (ave))

    print('ELM', hid_num)
    e = ELM(hid_num)
    ave = 0
    for i in range(10):
        scores = cross_val_score(
            e, data_set.data, data_set.target, cv=5, scoring='accuracy')
        ave += scores.mean()
    ave /= 10
    print("Accuracy: %0.2f " % (ave))
예제 #5
0
    def test_train_wisconsin1(self, mockFunction):
        print 'Yes, I am running, be patient, Human.'
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        inputData = np.loadtxt('./test_train_input_wisconsin1.txt').reshape((379, 30))
        outputData = np.loadtxt('./test_train_output_wisconsin1.txt').reshape((379, 1))
        self.myELM.ELMTrainingData = ELMData(inputData, outputData, [], [])
        self.myELM.storeTrainingDataToCassandra()
        self.myELM.performTraining()
        self.myELM.trained = True

        print 'Best LOO result retained: %s' % self.myELM.bestClassificationRateLOO

        inputTestData = np.loadtxt('./test_test_input_wisconsin1.txt').reshape((190, 30))
        outputTestData = np.loadtxt('./test_test_output_wisconsin1.txt').reshape((190, 1))
        testData = ELMData(inputTestData, outputTestData, [], [])
        confidence, verdict = self.myELM.verdict(testData)
        print 'Test error: %s' % np.mean(verdict == outputTestData)

        listOfErrors = [item for item in xrange(len(verdict)) if verdict[item]!=outputTestData[item]]
        print confidence[listOfErrors]
예제 #6
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine
    """
    def __init__(self, *, hidden_neurons=None, a=1, random_state=None):
        super().__init__(hidden_neurons=hidden_neurons,
                         a=a,
                         random_state=random_state)

        self.betas = []
        self.elm = None
        self.out_num = None

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T

        return X

    def fit(self, X, y):
        if self.hidden_neurons is None:
            self.hidden_neurons = 2 * X.shape[1]

        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_neurons[:-1]:
            _X = self.__calc_hidden_layer(X)
            W = self._random_state.uniform(-1., 1., (hid_num, _X.shape[1]))
            H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hidden_neurons=self.hidden_neurons[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
예제 #7
0
def main():
    from sklearn import preprocessing
    from sklearn.datasets import fetch_mldata
    from sklearn.model_selection import train_test_split

    db_name = 'diabetes'
    data_set = fetch_mldata(db_name)
    data_set.data = preprocessing.normalize(data_set.data)

    X_train, X_test, y_train, y_test = train_test_split(
        data_set.data, data_set.target, test_size=0.4)

    mlelm = MLELM(hidden_units=(10, 30, 200)).fit(X_train, y_train)
    elm = ELM(200).fit(X_train, y_train)

    print("MLELM Accuracy %0.3f " % mlelm.score(X_test, y_test))
    print("ELM Accuracy %0.3f " % elm.score(X_test, y_test))
예제 #8
0
    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1., (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self
예제 #9
0
def main():
    from sklearn import preprocessing
    from sklearn.datasets import fetch_mldata
    from sklearn.model_selection import train_test_split

    db_name = 'diabetes'
    data_set = fetch_mldata(db_name)
    data_set.data = preprocessing.normalize(data_set.data)

    X_train, X_test, y_train, y_test = train_test_split(data_set.data,
                                                        data_set.target,
                                                        test_size=0.4)

    mlelm = MLELM(hidden_units=(10, 30, 200)).fit(X_train, y_train)
    elm = ELM(200).fit(X_train, y_train)

    print("MLELM Accuracy %0.3f " % mlelm.score(X_test, y_test))
    print("ELM Accuracy %0.3f " % elm.score(X_test, y_test))
예제 #10
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine

    """

    def __init__(self, hidden_units, a=1):
        self.hidden_units = hidden_units
        self.betas = []
        self.a = a

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T
        return X

    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1.,
                                  (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
예제 #11
0
class MLELM(ELM):
    """
    Multi Layer Extreme Learning Machine

    """

    def __init__(self, hidden_units, a=1):
        self.hidden_units = hidden_units
        self.betas = []
        self.a = a

    def __calc_hidden_layer(self, X):
        """
        Args:
        X np.array input feature vector
        """
        for beta in self.betas:
            X = np.dot(beta, X.T).T
        return X

    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1.,
                                  (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self

    def predict(self, X):
        X = self.__calc_hidden_layer(self._add_bias(X))
        return self.elm.predict(X)
예제 #12
0
    def fit(self, X, y):
        if self.hidden_neurons is None:
            self.hidden_neurons = 2 * X.shape[1]

        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_neurons[:-1]:
            _X = self.__calc_hidden_layer(X)
            W = self._random_state.uniform(-1., 1., (hid_num, _X.shape[1]))
            H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hidden_neurons=self.hidden_neurons[-1])
        self.elm.fit(_X, y)

        return self
예제 #13
0
파일: elm_test.py 프로젝트: ymiche/fi
    def test_verdict(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        mockFunction.return_value = returnedMock

        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.train()

        inputTestData = np.loadtxt('./test_input_data_training_elm.txt').reshape((633, 9))
        outputTestData = np.loadtxt('./test_output_data_training_elm.txt').reshape((633, 1))
        ELMTestingData = ELMData(inputTestData, outputTestData, [], [])
예제 #14
0
def make_experiments_with_lda(X, y):
    for train_index, test_index in skf.split(X, y):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]

        lda = LinearDiscriminantAnalysis()
        X_lda = lda.fit_transform(X_train, y_train)
        X_test_lda = lda.transform(X_test)

        svc_clf = SVC(random_state=444)
        knn_clf = KNeighborsClassifier()
        gnb_clf = GaussianNB()
        dt_clf = DecisionTreeClassifier(random_state=444)
        mlp_clf = MLPClassifier(random_state=444)
        elm_clf = ELM(X_lda.shape[1], 1, 1000)

        svc_pred = svc_clf.fit(X_lda, y_train).predict(X_test_lda)
        knn_pred = knn_clf.fit(X_lda, y_train).predict(X_test_lda)
        gnb_pred = gnb_clf.fit(X_lda, y_train).predict(X_test_lda)
        dt_pred = dt_clf.fit(X_lda, y_train).predict(X_test_lda)
        mlp_pred = mlp_clf.fit(X_lda, y_train).predict(X_test_lda)

        elm_clf.train(X_lda, y_train[:, np.newaxis])
        elm_pred = elm_clf.predict(X_test_lda)
        elm_pred = (elm_pred > 0.5).astype(int)

        svc_lda_scores.append(round(accuracy_score(svc_pred, y_test), 2))
        knn_lda_scores.append(round(accuracy_score(knn_pred, y_test), 2))
        gnb_lda_scores.append(round(accuracy_score(gnb_pred, y_test), 2))
        dt_lda_scores.append(round(accuracy_score(dt_pred, y_test), 2))
        mlp_lda_scores.append(round(accuracy_score(mlp_pred, y_test), 2))
        elm_lda_scores.append(round(accuracy_score(elm_pred, y_test), 2))

    return [
        round(np.average(svc_lda_scores), 2),
        round(np.average(knn_lda_scores), 2),
        round(np.average(gnb_lda_scores), 2),
        round(np.average(dt_lda_scores), 2),
        round(np.average(mlp_lda_scores), 2),
        round(np.average(elm_lda_scores), 2)
    ]
예제 #15
0
파일: elm_test.py 프로젝트: ymiche/fi
 def test_train(self, mockFunction):
     returnedMock = Mock(spec=pycassa.ColumnFamily)
     returnedMock.get = mock_columnfamilyget
     returnedMock.batch = Mock()
     mockbatchinsert = Mock()
     mockbatchinsert.insert = mock_batchinsert
     mockbatch = Mock()
     mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
     mockbatch.__exit__ = Mock(return_value=False)
     mockFunction.return_value = returnedMock
     returnedMock.batch.return_value = mockbatch
     self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
     self.myELM.train()
예제 #16
0
def extremLM(data, target, checkHiddenPoints,
             fileName):  # Extreme Learning Machine
    scalar = 10000
    X = data.iloc[:, :-1]
    X_norm = (X - X.mean()) / (X.max() - X.min())
    y = data[target]
    # y_int = y.apply(lambda x: x * scalar)  # Need to be fixed!
    y_int = y * scalar
    y_int = y_int.apply(np.int64)
    # print(y_int)
    elmList = []
    if checkHiddenPoints:
        inter_num = 100
        hiddenNum = [1000, 3000, 5000, 6000, 8000,
                     10000]  # number of hidden points in ELM
        errorList = []
        for hidNum in hiddenNum:
            total_error = 0
            for i in range(inter_num):
                X_train, X_test, y_train, y_test = train_test_split(
                    X_norm, y_int, test_size=0.2)
                elm = ELM(hid_num=hidNum).fit(X_train, y_train)
                y_pred = elm.predict(X_test)
                sum_mean = 0
                # print("This is test value:", y_test.values)
                # print("This is prediction values:", y_pred)
                for i in range(len(y_pred)):
                    sum_mean += (y_pred[i] - y_test.values[i])**2
                sum_erro = (np.sqrt(sum_mean / len(y_pred))) / scalar
                # calculate RMSE
                total_error = total_error + sum_erro
                print("RMSE:", sum_erro)  # Root Mean Squared Error, RMSE
            print("This is average RMSE for ELM:", total_error / inter_num)
            errorList.append(total_error / inter_num)
        # Plot
        x_pos = list(range(len(hiddenNum)))
        plt.bar(x_pos, errorList, align='center', alpha=0.5)
        plt.grid()
        plt.ylabel('Root Mean Squared Error')
        plt.xticks(x_pos, hiddenNum)
        plt.title('Different errors based on the number of hidden points')
        plt.show()
    else:
        for j in range(100):
            X_train, X_test, y_train, y_test = train_test_split(X_norm,
                                                                y_int,
                                                                test_size=0.2)
            elm = ELM(hid_num=6000).fit(X_train, y_train)
            y_pred = elm.predict(X_test)
            sum_mean = 0
            # print("This is test value:", y_test.values / scalar)
            print("This is prediction values:", y_pred / scalar)
            print("This is iteration number: ", j)
            for i in range(len(y_pred)):
                sum_mean += (y_pred[i] - y_test.values[i])**2
            sum_erro = (np.sqrt(sum_mean / len(y_pred))) / scalar
            elmList.append(sum_erro)
        return elmList
예제 #17
0
def definir_mat_elm(a_delay,a_neurons):
	a_elm = list()
	num_ativos = len(a_delay)
	num_ind = len(a_delay[0])
	for i in range(num_ativos):
		aux = list()
		for j in range(num_ind):
			#print(a_delay[i][j],a_neurons[i][j])
			el = ELM(int(a_delay[i][j]),int(a_neurons[i][j]))
			#el = ELM(3,50)
			aux.append(el)
		a_elm.append(aux)
		del aux
	return a_elm
예제 #18
0
def main():
    from sklearn import preprocessing
    from sklearn.datasets import fetch_openml as fetch_mldata
    from sklearn.model_selection import train_test_split

    db_name = 'diabetes'
    data_set = fetch_mldata(db_name)
    data_set.data = preprocessing.normalize(data_set.data)

    tmp = data_set.target
    tmpL = [1 if i == "tested_positive" else -1 for i in tmp]
    data_set.target = tmpL

    X_train, X_test, y_train, y_test = train_test_split(data_set.data,
                                                        data_set.target,
                                                        test_size=0.4,
                                                        random_state=0)

    mlelm = MLELM(hidden_neurons=(10, 30, 200),
                  random_state=0).fit(X_train, y_train)
    elm = ELM(hidden_neurons=200, random_state=0).fit(X_train, y_train)

    print("MLELM Accuracy %0.3f " % mlelm.score(X_test, y_test))
    print("ELM Accuracy %0.3f " % elm.score(X_test, y_test))
예제 #19
0
    def fit(self, X, y):
        self.out_num = max(y)
        X = self._add_bias(X)

        for hid_num in self.hidden_units[:-1]:
            _X = self.__calc_hidden_layer(X)
            np.random.seed()
            W = np.random.uniform(-1., 1.,
                                  (hid_num, _X.shape[1]))
            _H = np.linalg.pinv(self._sigmoid(np.dot(W, _X.T)))
            beta = np.dot(_H.T, _X)
            self.betas.append(beta)

        _X = self.__calc_hidden_layer(X)

        self.elm = ELM(hid_num=self.hidden_units[-1])
        self.elm.fit(_X, y)

        return self
예제 #20
0
파일: main.py 프로젝트: ryukinix/svm
def main():
    X, y = dataset.digits()
    print("--X--")
    print(X)
    print("SHAPE: ", X.shape)
    print("--y--")
    print(y)
    print("SHAPE: ", y.shape)
    # Testando modelos já implementados em bibliotecas SVM e MLP
    models = []
    q = 200
    models.append(('MLP', MLPClassifier((q, ))))
    models.append(('ELM', ELM(q=q)))
    # models.append(('NN_RBF', NNRBF(q=500)))
    models.append(('LinearSVM', SVC(kernel="linear", gamma='scale')))
    models.append(('PolySVM', SVC(kernel="poly", gamma='scale')))
    models.append(('RBF_SVM', SVC(kernel="rbf", gamma='scale')))

    k = 5
    # Variáveis auxiliares
    accs = []
    names = []
    print("--BENCHMARK--")
    # Laço de teste dos modelos
    for name, model in models:
        # Criando os Folds
        kfold = KFold(n_splits=k, random_state=42)
        # Recebendo os resultados obtidos pela validação cruzada
        cv_accs = cross_val_score(model, X, y, cv=kfold, scoring='accuracy')
        accs.append(cv_accs)
        names.append(name)
        acc_mean = cv_accs.mean()
        acc_std = cv_accs.std()
        msg = "Acc({}) = {:.2f}±{:.2f}".format(name, acc_mean, acc_std)
        print(msg)

    plot(names, accs)
예제 #21
0
def elm_load_predict(model,
                     X_test,
                     multiTasks,
                     unweighted,
                     stl,
                     dictForLabelsTest,
                     hidden_num=50,
                     main_task_id=-1,
                     elm_load_path='./model/elm.ckpt',
                     dataset='test'):
    sess = tf.Session()

    print('elm high level feature generating')
    pred_test = model.predict([X_test])
    feat_test = high_level_feature_mtl(pred_test,
                                       stl=stl,
                                       main_task_id=main_task_id)

    print('high level feature dim: ', feat_test.shape[1])

    scores = []
    for task, classes, idx in multiTasks:
        elm = ELM(sess, feat_test.shape[0], feat_test.shape[1], hidden_num,
                  dictForLabelsTest[task].shape[1])

        print('elm loading')
        elm.load(elm_load_path)

        print('elm testing')
        if unweighted:
            labels = dictForLabelsTest[task]
            preds = elm.test(feat_test)
            scores.append(unweighted_recall(preds, labels, task, dataset))
        else:
            acc = elm.test(feat_test, labels)
            scores.append(acc)
    return scores
예제 #22
0
hit_rates = []
no_of_attributes = dataset.shape[1] - 1
no_of_classes = len(dataset[0, no_of_attributes])

# insert bias
no_rows = dataset.shape[0]
dataset = np.c_[-1 * np.ones(no_rows), dataset]

# perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic')

for j in range(0, 20):
    print("realization %d" % j)
    train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset)
    train_X = np.array(train_X, dtype=float)
    test_X = np.array(test_X, dtype=float)

    hidden_units = ELM.model_training(no_of_classes, no_of_attributes, train_X,
                                      train_y)
    elm = ELM(no_of_classes, no_of_attributes, hidden_units)
    elm.train(train_X, train_y)
    predictions = elm.predict(test_X)
    hit_rates.append(elm.evaluate(test_y, predictions))
    print(elm.confusion_matrix(test_y, predictions))
    # Perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, perceptron, hidden_neurons, j)

print('hit rates: {}'.format(hit_rates))
print('accuracy: {}'.format(np.mean(hit_rates)))
print('std: {}'.format(np.std(hit_rates)))
# Perceptron.show_plot_decision_boundaries()
예제 #23
0
accuracy = np.zeros((20, 1))
mean_time = 0
data = []
best = [[], 0]
for i in range(iters):
    CVO = KFold(n_splits=n_folds, shuffle=True)
    acc_values = []
    for train_index, test_index in CVO.split(X):
        X_train, X_test = X[train_index], X[test_index]
        Y_train, Y_test = Y[train_index], Y[test_index]

        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        elm = ELM(hidden_units=20, activation="log")
        #elm = ELM(hidden_units = 20, activation="tan")
        elm.fit(X_train, Y_train)
        Y_hat = elm.predict(X_test)
        Y_hat = np.round(Y_hat)
        acc_values.append(
            np.sum(
                np.where(
                    np.argmax(Y_hat, axis=1) == np.argmax(Y_test, axis=1), 1,
                    0)) / len(Y_test))
        if acc_values[-1] > best[1]:
            best[0] = confusion_matrix(np.argmax(Y_test, axis=1),
                                       np.argmax(Y_hat, axis=1))
            best[1] = acc_values[-1]
    accuracy[i] = np.mean(acc_values)
예제 #24
0
def elm_predict(model,
                X_train,
                X_test,
                X_valid,
                multiTasks,
                unweighted,
                stl,
                dictForLabelsTrain,
                dictForLabelsTest,
                dictForLabelsValid,
                hidden_num=50,
                main_task_id=-1,
                elm_save_path='./model/elm.ckpt',
                dataset='test'):
    sess = tf.Session()

    print('elm high level feature generating')
    pred_train = model.predict([X_train])
    feat_train = high_level_feature_mtl(pred_train,
                                        stl=stl,
                                        main_task_id=main_task_id)
    print('high level feature dim for train: ', feat_train.shape[1])

    #add total features
    add_high_feature(feat_train, multiTasks, dictForLabelsTrain,
                     total_high_pred_train)

    pred_test = model.predict([X_test])
    feat_test = high_level_feature_mtl(pred_test,
                                       stl=stl,
                                       main_task_id=main_task_id)

    #add total features
    add_high_feature(feat_test, multiTasks, dictForLabelsTest,
                     total_high_pred_test)

    print('high level feature dim for test: ', feat_test.shape[1])

    if len(X_valid) != 0:
        pred_valid = model.predict([X_valid])
        feat_valid = high_level_feature_mtl(pred_valid,
                                            stl=stl,
                                            main_task_id=main_task_id)

    scores = []
    for task, classes, idx in multiTasks:
        elm = ELM(sess,
                  feat_train.shape[0],
                  feat_train.shape[1],
                  hidden_num,
                  dictForLabelsTrain[task].shape[1],
                  task=str(task))

        print('elm training')
        elm.feed(feat_train, dictForLabelsTrain[task])
        elm.save(elm_save_path + "." + str(task) + ".elm.ckpt")

        print('elm testing')
        labels = dictForLabelsTest[task]
        if unweighted:
            preds = elm.test(feat_test)
            scores.append(unweighted_recall(preds, labels, task, dataset))
        else:
            acc = elm.test(feat_test, labels)
            scores.append(acc)

        if len(X_valid) != 0:
            print('elm validating')
            labels = dictForLabelsValid[task]
            if unweighted:
                preds = elm.test(feat_valid)
                scores.append(unweighted_recall(preds, labels, task, dataset))
            else:
                acc = elm.test(feat_valid, labels)
                scores.append(acc)
    return scores
예제 #25
0
파일: train.py 프로젝트: wxy521/Numpy-ELM
def main(args):
    # ===============================
    # Load dataset
    # ===============================
    n_classes = 10
    (x_train, t_train), (x_test, t_test) = mnist.load_data()

    # ===============================
    # Preprocess
    # ===============================
    x_train = x_train.astype(np.float32) / 255.
    x_train = x_train.reshape(-1, 28**2)
    x_test = x_test.astype(np.float32) / 255.
    x_test = x_test.reshape(-1, 28**2)
    t_train = to_categorical(t_train, n_classes).astype(np.float32)
    t_test = to_categorical(t_test, n_classes).astype(np.float32)

    # ===============================
    # Instantiate ELM
    # ===============================
    model = ELM(
        n_input_nodes=28**2,
        n_hidden_nodes=args.n_hidden_nodes,
        n_output_nodes=n_classes,
        loss=args.loss,
        activation=args.activation,
        name='elm',
    )

    # ===============================
    # Training
    # ===============================
    model.fit(x_train, t_train)
    train_loss, train_acc = model.evaluate(x_train,
                                           t_train,
                                           metrics=['loss', 'accuracy'])
    print('train_loss: %f' % train_loss)
    print('train_acc: %f' % train_acc)

    # ===============================
    # Validation
    # ===============================
    val_loss, val_acc = model.evaluate(x_test,
                                       t_test,
                                       metrics=['loss', 'accuracy'])
    print('val_loss: %f' % val_loss)
    print('val_acc: %f' % val_acc)

    # ===============================
    # Prediction
    # ===============================
    x = x_test[:10]
    t = t_test[:10]
    y = softmax(model.predict(x))

    for i in range(len(y)):
        print('---------- prediction %d ----------' % (i + 1))
        class_pred = np.argmax(y[i])
        prob_pred = y[i][class_pred]
        class_true = np.argmax(t[i])
        print('prediction:')
        print('\tclass: %d, probability: %f' % (class_pred, prob_pred))
        print('\tclass (true): %d' % class_true)

    # ===============================
    # Save model
    # ===============================
    print('saving model...')
    model.save('model.h5')
    del model

    # ===============================
    # Load model
    # ===============================
    print('loading model...')
    model = load_model('model.h5')
예제 #26
0
# insert bias
no_rows = dataset.shape[0]
dataset = np.c_[-1 * np.ones(no_rows), dataset]

dictionary = {}
dictionary['mse'] = []
dictionary['rmse'] = []

for j in range(0, 20):
    print("realization %d" % j)
    train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset)
    train_X = np.array(train_X, dtype=float)
    test_X = np.array(test_X, dtype=float)

    elm = ELM(no_of_classes, no_of_attributes)
    elm.train(train_X, train_y)
    predictions = elm.predict(test_X)
    mse, rmse = elm.evaluate(test_y, predictions)
    dictionary['mse'].append(mse)
    dictionary['rmse'].append(rmse)
    # ELM.plot_decision_boundaries_one(train_X, train_y, test_X, test_y, elm, j)

print('mean square error: {}'.format(dictionary['mse']))
print('root mean square error: {}'.format(dictionary['rmse']))
print('mean mse: {}'.format(np.mean(dictionary['mse'])))
print('mean rmse: {}'.format(np.mean(dictionary['rmse'])))
print('std mse: {}'.format(np.std(dictionary['mse'])))
print('std rmse: {}'.format(np.std(dictionary['rmse'])))
# ELM.show_plot_decision_boundaries()
예제 #27
0
    model12.train(out1, out1, alpha, batch_size, max_iter)

    # stacking the pretrained autoencoders
    model = MLP([n, 42, 24], ['sigmoid', 'sigmoid'])

    # initializing pretrained weights
    model.W_list[0] = model11.W_list[0]
    model.W_list[-1] = model11.W_list[0].T

    model.W_list[1] = model12.W_list[0]
    model.W_list[-2] = model12.W_list[0].T

    # finetuning the stacked autoencoder
    # print("training stacked autoencoder")
    # model.train(X_train, X_train, alpha, batch_size, 50)

    print("\nELM part of the neural network\n")

    elm_X_train = np.ndarray((X_train.shape[0], model.A[2].shape[0]))
    elm_X_test = np.ndarray((X_test.shape[0], model.A[2].shape[0]))

    for i in range(X_train.shape[0]):
        model.forward_prop(X_train[i])
        elm_X_train[i] = model.A[2].reshape(-1, )
    for i in range(X_test.shape[0]):
        model.forward_prop(X_test[i])
        elm_X_test[i] = model.A[2].reshape(-1, )

    elm_model = ELM(128, elm_X_train, y_train, 'tanh')
    elm_model.test(elm_X_test, y_test)
    elm_model.test(elm_X_train, y_train)
예제 #28
0
파일: elm_test.py 프로젝트: ymiche/fi
class ELMTest(unittest.TestCase):
    # We will test both FP and FN versions of the ELM
    def setUp(self):
        self.mockConnectionPool = Mock(spec=pycassa.pool.ConnectionPool)
        self.minimizationType = 'FP'
        self.numberNeurons = 100


    def test_init(self):
        # Check that wrong input raises exception
        self.assertRaises(Exception, ELM, 'SomeString', self.mockConnectionPool, self.numberNeurons)
        self.assertRaises(Exception, ELM, self.minimizationType, 'NotAConnectionPool', self.numberNeurons)
        self.assertRaises(Exception, ELM, self.minimizationType, self.mockConnectionPool, 'NotANumber')
        
    @patch('elm.pycassa.columnfamily.ColumnFamily')    
    def test_createTrainingData(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.get_range = mock_columnfamilyget_range
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.createTrainingData()

    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_storeTrainingDataToCassandra(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.get_range = mock_columnfamilyget_range
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.createTrainingData()
        self.myELM.storeTrainingDataToCassandra()

    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_train(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        returnedMock.batch = Mock()
        mockbatchinsert = Mock()
        mockbatchinsert.insert = mock_batchinsert
        mockbatch = Mock()
        mockbatch.__enter__ = Mock(return_value=mockbatchinsert)
        mockbatch.__exit__ = Mock(return_value=False)
        mockFunction.return_value = returnedMock
        returnedMock.batch.return_value = mockbatch
        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.train()


    @patch('elm.pycassa.columnfamily.ColumnFamily')
    def test_verdict(self, mockFunction):
        returnedMock = Mock(spec=pycassa.ColumnFamily)
        returnedMock.get = mock_columnfamilyget
        mockFunction.return_value = returnedMock

        self.myELM = ELM(self.minimizationType, self.mockConnectionPool, self.numberNeurons)
        self.myELM.train()

        inputTestData = np.loadtxt('./test_input_data_training_elm.txt').reshape((633, 9))
        outputTestData = np.loadtxt('./test_output_data_training_elm.txt').reshape((633, 1))
        ELMTestingData = ELMData(inputTestData, outputTestData, [], [])
예제 #29
0
iters = 20
mse = np.zeros((iters, 1))
rmse = np.zeros((iters, 1))
for i in range(iters):
    X_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :1],
                                                        dataset[:, 1],
                                                        test_size=0.33)
    Y_train = Y_train.reshape((Y_train.shape[0], 1))
    Y_test = Y_test.reshape((Y_test.shape[0], 1))

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    elm = ELM(hidden_units=8)
    elm.fit(X_train, Y_train)
    Y_hat = elm.predict(X_test)

    mse[i] = ((Y_test - Y_hat)**2).mean(axis=0)
    rmse[i] = mse[i]**(1. / 2)

print("Average MSE", np.mean(mse, axis=0))
print("Average RMSE", np.mean(rmse, axis=0))
print("Standard Deviation (MSE)", np.std(mse, axis=0))
print("Standard Deviation (RMSE)", np.std(rmse, axis=0))

xx = dataset[:, 0:1]
xx = scaler.transform(xx)
yy = dataset[:, 1]
fig, ax = plt.subplots()
예제 #30
0
from elm import ELM
from helper import Helper
from statistics import mean
import datetime

datasets = [
    'datasets/wisconsin_transformed.csv', 'datasets/abalone.csv',
    'datasets/computer_revised.csv', 'datasets/servo_revised.csv'
]
debug = False
h = Helper()
h.get_dataset(datasets[3])

train, test = h.split_dataset()

neural_network = ELM(input_size=13, output_layer_size=1)

#neural_network.add_neuron(9, "linear")
neural_network.add_neuron(100, "sigmoid")

output_classes = []
print(len(train))
print(datetime.datetime.now())
for item in train.values:
    #item[:len(item)-1]
    neural_network.train(item[:len(item) - 1])

    output_classes.append(item[len(item) - 1])
neural_network.update_beta(output_classes)  #create output_weights
print(datetime.datetime.now())
예제 #31
0
from elm import ELM
from sklearn.preprocessing import normalize
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
import tempfile
import pandas as pd



test_data_home = tempfile.mkdtemp()

db_name = 'australian'

data_set = pd.read_csv('australian.csv')
data_set_data = data_set.iloc[:, :-1]
df_norm = (data_set_data - data_set_data.mean()) / (data_set_data.max() - data_set_data.min())
print(df_norm)
y = data_set['class-label']

print(y)

X_train, X_test, y_train, y_test = train_test_split(
    df_norm, y, test_size=0.4)

elm = ELM(hid_num=10).fit(X_train, y_train)

print("ELM Accuracy %0.3f " % elm.score(X_test, y_test))
예제 #32
0
        test_target = np.append(test_target,
                                np.zeros(dtype=np.int, shape=ith_total_test) +
                                i,
                                axis=0)
    return train_input, train_target, test_input, test_target


# data preparation
my_data = genfromtxt('iris.csv', delimiter=',')
x_inp = my_data[:, 0:-1]
t_inp = my_data[:, -1]

train_input, train_target, test_input, test_target = split_data(
    x_inp, t_inp, 0.6, 0.4)

e = ELM(50)
e.train(train_input, train_target)
e.test(test_input, test_target)
print e.train_acc
print e.test_acc
"""
# start for article on https://fennialesmana.com/extreme-learning-machine/
# 1. Prepare the input data (x) and target data (t)
x = np.array([[-1, -5, 5, 5], [2, -4, 2, 3]])
t = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
# 2. Prepare the number of hidden nodes, input weight (w), and bias (b) randomly
w = np.array([[0.5, 0.2], [0.7, -0.4], [-0.6, 0.3]])
b = np.array([[0.6], [0.7], [0.4]])
# 3. Calculate the output of hidden layer (H)
H = np.dot(w, x) + b
H = (1/(1+(numpy.matlib.exp(H*-1)))).transpose()
예제 #33
0
                training_pred_emotion_sequence > 0.25, axis=1)
            portion_over_threshold = portion_over_threshold / max(
                training_pred_emotion_sequence.shape[1], 1)
            high_lvl_features[index] = np.concatenate(
                (max_, min_, mean, portion_over_threshold), axis=1)
            high_lvl_labels[index] = labels[2]
        return high_lvl_features, high_lvl_labels

    training_high_lvl_features, training_labels = extract_high_level_features(
        training_sequence)
    test_high_lvl_features, testing_labels = extract_high_level_features(
        test_sequence)

    sess = tf.Session()
    elm = ELM(sess,
              *training_high_lvl_features.shape,
              hidden_num=50,
              output_len=4)
    elm.feed(training_high_lvl_features, training_labels)
    predictions = elm.test(test_high_lvl_features)
    predictions = np.argmax(predictions, axis=1)
    testing_labels = np.argmax(testing_labels, axis=1)
    cm = confusion_matrix(testing_labels,
                          predictions,
                          labels=np.array([0, 1, 2, 3]))
    with open(args.save_dir + f'/{args.prefix}_elm_confussion_matrix.txt',
              mode='w') as f:
        f.write(str(cm))
    with open(args.save_dir + f'/{args.prefix}_elm_confussion_matrix.pkl',
              mode='wb') as f:
        pickle.dump(cm, f, protocol=4)