def model(self, x, y, epochs=100, learning='gradient'):
        """
        Parameters
        ----------
        x : ndarray, list
            Features of the training data
        y : ndarray, list
            Labels of the training data
        epochs : int, optional
            Number of epochs to run
        learning : str, optional
            Type of the learning

        Return
        ------
        None : NoneType
        """
        self.epochs = epochs
        self.learning = learning
        self.weights = np.random.randn(self.input_layer, 1)

        for e in range(self.epochs):
            del_weights = np.zeros(self.weights.shape)
            predict = list()
            for i in range(len(x)):
                x_tmp = list(x[i]) + [-1]
                output = sigmoid(
                    np.dot(
                        np.array(x_tmp).reshape(1, self.input_layer),
                        self.weights))
                predict.append(output[0][0])
                for j in range(self.input_layer):
                    if self.learning == 'gradient':
                        del_weights[j] += self.lr * (
                            y[i] -
                            output[0]) * output[0] * (1 - output[0]) * x_tmp[j]
                    elif self.learning == 'perceptron':
                        del_weights[j] += self.lr * (y[i] -
                                                     output[0]) * x_tmp[j]
                    else:
                        print("Not a valid learning algorithm.")
                        sys.exit()
            for j in range(self.input_layer):
                self.weights[j] += del_weights[j]

            self.training_loss.append(MSE(y, predict))
            print(
                f"Epochs: {e+1}/{epochs}.. Training Loss: {self.training_loss[e]: 3f}..\nWeights: {self.weights.tolist()}\n"
            )

        self.slope = (-1 * self.weights[0] / self.weights[1])[0]
        self.intercept = (self.weights[2] / self.weights[1])[0]
        return
示例#2
0
    def model(self, x, y, epochs=100, debug=False, debug_verbose=False):
        """
        Find the weights

        Parameters
        ----------
        x : ndarray, list
            Features of the training data
        y : ndarray, list
            Labels of the training data
        epochs : int, optional
            Number of epochs to run
        learning : str, optional
            Type of the learning

        Return
        ------
        None : NoneType
        """

        self.epochs = epochs
        self.model_ran = True
        self.data_length = len(x)
        self.training_loss = list()

        for e in range(self.epochs):
            predict = list()
            self.zero_grad()
            for i in range(self.data_length):
                x_tmp = self.__reshape_input(x[i])
                output = self.forward(x_tmp)
                y_tmp = self.__reshape_output(y[i])
                self.backward(x_tmp, output, y_tmp)

                predict.append(output.tolist())
            self.update_weights()

            if debug:
                self.debug(more_verbose=debug_verbose)

            self.training_loss.append(MSE(y, predict))
            print(
                f"Epochs: {e+1}/{epochs} Training Loss: {self.training_loss[e]}.."
            )

        return
示例#3
0
def k_folds(X_train, y_train, k=5):
    l_regression = LinealRegression()
    error = MSE()

    chunk_size = int(len(X_train) / k)
    mse_list = []
    for i in range(0, len(X_train), chunk_size):
        end = i + chunk_size if i + chunk_size <= len(X_train) else len(
            X_train)
        new_X_valid = X_train[i:end]
        new_y_valid = y_train[i:end]
        new_X_train = np.concatenate([X_train[:i], X_train[end:]])
        new_y_train = np.concatenate([y_train[:i], y_train[end:]])

        l_regression.fit(new_X_train, new_y_train)
        prediction = l_regression.predict(new_X_valid)
        mse_list.append(error(new_y_valid, prediction))

    mean_MSE = np.mean(mse_list)

    return mean_MSE
示例#4
0
noise = np.random.normal(0, .1, y.shape)
noisy_y = y + noise
plt.figure(1)
plt.plot(x, noisy_y)
plt.show()

# Split Data
percentage = 0.8
permuted_idxs = np.random.permutation(x.shape[0])
train_idxs = permuted_idxs[0:int(percentage * x.shape[0])]
test_idxs = permuted_idxs[int(percentage * x.shape[0]):x.shape[0]]
x_train = x[train_idxs]
x_test = x[test_idxs]
y_train = noisy_y[train_idxs]
y_test = noisy_y[test_idxs]
error = MSE()

# Model Initialization
lineal_regression = LinealRegression()

# Polynomic X train and X test
p_x_train = np.zeros((x_train.shape[0], MAX_ORDER + 1))
p_x_test = np.zeros((x_test.shape[0], MAX_ORDER + 1))
MSE_register = np.zeros((MAX_ORDER, 1))
Predictions = np.zeros((y_test.shape[0], MAX_ORDER))
MSE_notK = np.zeros((MAX_ORDER, 1))

p_x_train = np.hstack((np.ones((x_train.shape[0], 1)), p_x_train))
p_x_test = np.hstack((np.ones((x_test.shape[0], 1)), p_x_test))

for i in range(MAX_ORDER):
示例#5
0
# Data extraction and preprocessing
data_example = SplitData('income.csv')
x_train, y_train = data_example.get_train_data()
x_validation, y_validation = data_example.get_test_data()
y_train = y_train
y_validation = y_validation

# Model Initialization
lineal_regression = LinealRegression()
affine_regression = LinealRegression()

# Metrics
MSE_register = np.zeros((MAX_ORDER, 1))
Predictions = np.zeros((y_validation.shape[0], MAX_ORDER))
mse = MSE()

# Polynomic X train and X test
p_x_train = np.zeros((x_train.shape[0], MAX_ORDER))
p_x_test = np.zeros((x_validation.shape[0], MAX_ORDER))

for i in range(MAX_ORDER):
    p_x_train[:, i] = x_train**(i + 1)
    p_x_test[:, i] = x_validation**(i + 1)

    if LINEAL_TYPE == 'CLASSIC':
        lineal_regression.fit(p_x_train[:, :i + 1], y_train)
        Predictions[:, i] = lineal_regression.predict(p_x_test[:, :i + 1])
        MSE_register[i] = mse(y_validation, Predictions[:, i])
    else:
        affine_regression.fit(
示例#6
0
    def model(self, x, y, epochs=100):
        """
        Parameters
        ----------
        x : ndarray
            Features of the training data
        y : ndarray
            Labels of the training data
        epochs : int, optional
            Number of epochs

        Returns
        -------
        None : NoneType
        """
        self.epochs = epochs
        self.data_size = len(x)
        for i in range(self.layers - 2):
            self.weights.append(
                self.__initialization(self.nodes[i], self.nodes[i + 1] - 1))
        self.weights.append(
            self.__initialization(self.nodes[i + 1], self.output_layer))

        for e in range(self.epochs):
            predict = list()
            for i in range(self.data_size):
                tmp = [x[i]]
                # Forward pass
                for n in range(self.layers - 1):
                    self.activations.append(list(tmp[0]) + [-1])
                    tmp = sigmoid(
                        np.dot(
                            np.array(list(tmp[0]) + [-1]).reshape(
                                1, self.nodes[n]), self.weights[n]))
                output = tmp[0]
                predict.append(output)

                # Backward pass
                del_weights0 = [[0 for u in range(self.nodes[-2])]
                                for v in range(self.nodes[-1])]
                for u in range(self.nodes[-1]):
                    for v in range(self.nodes[-2]):
                        del_weights0[u][v] += self.lr * (
                            y[i][u] - output[u]) * output[u] * (
                                1 - output[u]) * self.activations[-1][v]
                del_weights1 = [[0 for u in range(self.nodes[-3])]
                                for v in range(self.nodes[-2] - 1)]
                for q in range(self.nodes[-3]):
                    for w in range(self.nodes[-2] - 1):
                        for z in range(self.nodes[-1]):
                            del_weights1 += self.lr * (
                                y[i][z] - output[z]) * output[u] * (
                                    1 - output[u]) * self.weights[1][w][
                                        z] * self.activations[-1][w] * (
                                            1 - self.activations[-1][w]
                                        ) * self.activations[-2][q]
            del_weights0 = np.array(del_weights0).reshape(
                self.nodes[-2], self.nodes[-1])
            for u in range(self.nodes[-2]):
                for v in range(self.nodes[-1]):
                    self.weights[-1][u][v] += del_weights0[u][v]
            for u in range(self.nodes[-3]):
                for v in range(self.nodes[-2] - 1):
                    self.weights[-2][u][v] += del_weights1[u][v]
            predict = np.array(predict).reshape(self.data_size,
                                                self.output_layer)
            self.training_loss.append(MSE(y, predict))
            print(
                f"Epochs: {e+1}/{self.epochs}.. Training Loss: {self.training_loss[e]}.."
            )