Exemple #1
0
    def train(self, x_train, y_train):
        error_old = 0
        cont_epochs = 0
        mse_vector = []
        while True:
            self.updateEta(cont_epochs)
            x_train, y_train = util.shuffleData(x_train, y_train)
            (m, _) = x_train.shape
            error_epoch = 0
            for i in range(m):
                xi = x_train[i]
                y = self.predict(xi)
                d = y_train[i]

                error = d - y
                error_epoch += error**2

                # update weights
                self.w += self.eta * (error * xi)

            mse = error_epoch / m
            mse_vector.append(mse)

            if abs(error_epoch - error_old) <= self.precision:
                #print('Stop Precision: {}'.format(abs(error_epoch - error_old)))
                break
            if cont_epochs >= self.epochs:
                #print('Stop Epochs: {}'.format(cont_epochs))
                break

            error_old = error_epoch
            cont_epochs += 1
Exemple #2
0
    def execute(self):
        # Pre processing
        x_data = util.normalizeData(self.x_data)
        x_data = util.insertBias(x_data)
        y_data = self.y_data

        for i in range(self.realizations):
            self.initWeigths()
            x_data_aux, y_data_aux = util.shuffleData(x_data, y_data)
            x_train, x_test, y_train, y_test = util.splitData(
                x_data_aux, y_data_aux, self.train_size)
            self.train(x_train, y_train)
            acc, tpr, spc, ppv = self.test(x_test, y_test)

            self.hit_rate.append(acc)
            self.tpr.append(tpr)
            self.spc.append(spc)
            self.ppv.append(ppv)

        #util.plotColorMap(x_train, x_test, y_train, self.predict)

        self.acc = np.mean(self.hit_rate)
        self.std = np.std(self.hit_rate)
        self.tpr = np.mean(self.tpr)
        self.spc = np.mean(self.spc)
        self.ppv = np.mean(self.ppv)

        print('Hit rate: {}'.format(self.hit_rate))
        print('Accuracy: {:.2f}'.format(self.acc * 100))
        print('Minimum: {:.2f}'.format(np.amin(self.hit_rate) * 100))
        print('Maximum: {:.2f}'.format(np.amax(self.hit_rate) * 100))
        print('Standard Deviation: {:.2f}'.format(self.std))
        print('Sensitivity: {:.2f}'.format(self.tpr * 100))
        print('Specificity: {:.2f}'.format(self.spc * 100))
        print('Precision: {:.2f}'.format(self.ppv * 100))
Exemple #3
0
    def train(self, x_train, y_train):
        stop_error = 1
        cont_epochs = 0
        vector_error = []
        while (stop_error and cont_epochs < self.epochs):
            self.updateEta(cont_epochs)
            stop_error = 0
            x_train, y_train = util.shuffleData(x_train, y_train)
            (m, _) = x_train.shape
            aux = 0
            for i in range(m):
                xi = x_train[i]
                y = self.predict(xi)

                d = y_train[i]
                error = d - y
                aux += abs(int(error))

                # check if this error
                if not np.array_equal(error, [0]):
                    stop_error = 1

                # update weights
                self.w += self.eta * (error * xi)
            vector_error.append(aux)
            cont_epochs += 1
Exemple #4
0
    def train(self, x_train, y_train, hidden_layer):
        error_old = 0
        cont_epochs = 0
        mse_vector = []
        params = self.initWeigths(hidden_layer)
        w = params['w']
        m = params['m']
        while True:
            self.updateEta(cont_epochs)
            x_train, y_train = util.shuffleData(x_train, y_train)
            (p, _) = x_train.shape
            error_epoch = 0
            for k in range(p):
                x_k = x_train[k]
                H = np.dot(x_k, w)
                H = self.function(H)
                H_ = self.derivate(H)

                H = np.concatenate(([-1], H), axis=None)
                Y = np.dot(H, m)
                Y = self.function(Y)
                Y_ = self.derivate(Y)

                # Quadratic Error Calculation
                d = y_train[k]
                error = d - Y
                error_epoch += 0.5 * np.sum(error**2)

                # Output layer
                delta_output = (error * Y_).reshape(-1, 1)
                aux_output = (self.eta * delta_output)
                m += np.dot(H.reshape(-1, 1), aux_output.T)

                # Hidden layer
                delta_hidden = np.sum(np.dot(m, delta_output)) * H_
                aux_hidden = (self.eta * delta_hidden).reshape(-1, 1)
                w += np.dot(x_k.reshape(-1, 1), aux_hidden.T)

            mse = error_epoch / p
            mse_vector.append(mse)

            if abs(error_epoch - error_old) <= self.precision:
                #print('Stop Precision: {} (Epochs {})'.format(abs(error_epoch - error_old), cont_epochs))
                break
            if cont_epochs >= self.epochs:
                #print('Stop Epochs: {}'.format(cont_epochs))
                break

            error_old = error_epoch
            cont_epochs += 1
        #util.plotErrors(mse_vector)
        params['w'] = w
        params['m'] = m
        return params
Exemple #5
0
    def train(self, x_train, y_train, n_centers, width):

        params = self.initWeigths(n_centers)
        c = params['c']

        x_train, y_train = util.shuffleData(x_train, y_train)
        (p, _) = x_train.shape
        h = np.zeros((p, n_centers))

        for i in range(p):
            for j in range(n_centers):
                h[i, j] = self.saidas_centro(x_train[i], c[j], width)

        bias = -1 * np.ones((p, 1))
        h = np.concatenate((bias, h), axis=1)
        w = np.dot(np.linalg.pinv(h), y_train)

        params['w'] = w
        return params
Exemple #6
0
    def execute(self):
        x_data = util.insertBias(self.x_data)
        y_data = self.y_data

        for i in range(self.realizations):
            x_data_aux, y_data_aux = util.shuffleData(x_data, y_data)
            x_train, x_test, y_train, y_test = util.splitData(x_data_aux, y_data_aux, self.train_size)
            
            best_hidden_layer = self.hidden_layer
            params = self.train(x_train, y_train, best_hidden_layer)
            mse, rmse = self.test(x_test, y_test, params)

            self.mse.append(mse)
            self.rmse.append(rmse)

        print('{} Realizations'.format(self.realizations))
        print('MSE: {}'.format(np.mean(self.mse)))
        print('Std MSE: {}'.format(np.std(self.mse)))
        print('RMSE: {}'.format(np.mean(self.rmse)))
        print('Std RMSE: {}'.format(np.std(self.rmse)))
Exemple #7
0
    def execute(self):
        x_data = util.normalizeData(self.x_data)
        x_data = util.insertBias(x_data)
        y_data = self.y_data

        for i in range(self.realizations):
            x_data_aux, y_data_aux = util.shuffleData(x_data, y_data)
            x_train, x_test, y_train, y_test = util.splitData(
                x_data_aux, y_data_aux, self.train_size)

            if self.g_search:
                best_n_centers, best_width = self.grid_search(x_train, y_train)
                print('Best N Centers: ', best_n_centers)
                print('Best Width: ', best_width)
            else:
                best_n_centers = self.n_centers
                best_width = self.width

            params = self.train(x_train, y_train, best_n_centers, best_width)
            acc, tpr, spc, ppv = self.test(x_test, y_test, params,
                                           best_n_centers, best_width)

            self.hit_rate.append(acc)
            self.tpr.append(tpr)
            self.spc.append(spc)
            self.ppv.append(ppv)

        self.acc = np.mean(self.hit_rate)
        self.std = np.std(self.hit_rate)
        self.tpr = np.mean(self.tpr)
        self.spc = np.mean(self.spc)
        self.ppv = np.mean(self.ppv)

        print('Hit rate: {}'.format(self.hit_rate))
        print('Accuracy: {:.2f}'.format(self.acc * 100))
        print('Minimum: {:.2f}'.format(np.amin(self.hit_rate) * 100))
        print('Maximum: {:.2f}'.format(np.amax(self.hit_rate) * 100))
        print('Standard Deviation: {:.2f}'.format(self.std))
        print('Sensitivity: {:.2f}'.format(self.tpr * 100))
        print('Specificity: {:.2f}'.format(self.spc * 100))
        print('Precision: {:.2f}'.format(self.ppv * 100))
Exemple #8
0
    def adaline(self):
        # Pre processing
        x_data = util.normalizeData(self.x_data)
        x_data = util.insertBias(x_data)
        y_data = self.y_data

        for i in range(self.realizations):
            self.initWeigths()
            x_data_aux, y_data_aux = util.shuffleData(x_data, y_data)
            x_train, x_test, y_train, y_test = util.splitData(
                x_data_aux, y_data_aux, self.train_size)
            self.train(x_train, y_train)
            mse, rmse = self.test(x_test, y_test)

            self.mse.append(mse)
            self.rmse.append(rmse)

        print('{} Realizations'.format(self.realizations))
        print('MSE: {}'.format(np.mean(self.mse)))
        print('Std MSE: {}'.format(np.std(self.mse)))
        print('RMSE: {}'.format(np.mean(self.rmse)))
        print('Std RMSE: {}'.format(np.std(self.rmse)))
Exemple #9
0
    def train(self, x_train, y_train):
        stop_error = 1
        cont_epochs = 0
        vector_error = []
        while (stop_error and cont_epochs < self.epochs):
            self.updateEta(cont_epochs)
            stop_error = 0
            x_train, y_train = util.shuffleData(x_train, y_train)
            (m, _) = x_train.shape
            for i in range(m):
                xi = x_train[i]
                y, y_ = self.predict(xi)
                d = y_train[i]
                error = d - y

                # check if this error
                if not np.array_equal(error, [0, 0, 0]):
                    stop_error = 1

                # update weights
                aux = (y_ * error).reshape(-1, 1)
                self.w += self.eta * (aux * xi)
            cont_epochs += 1