Esempio n. 1
0
    def fit(self, X, Y, epochs = 100, Visual = False):

        def Loss(w, lamb, X, Y):
            n = X.shape[0]
            return (1/n) * sum(((Y[i] - np.dot(w, X[i]))**2 for i in range(n))) + lamb * np.linalg.norm(w)**2

        def grad_f(w, x_i, y_i):
            return -2*x_i*(y_i - np.dot(w, x_i))
            
        X_ones = add_ones(X)
        n, p = X_ones.shape
        d = np.zeros(p)
        z = np.zeros((n, p)) # Remembering gradients
        self.w = np.zeros(p)
        visit = np.zeros(n) # Visited samples
        loss_values = []
        for k in range(epochs):
            i = np.random.randint(0, n)
            visit[i] = 1
            d = d - z[i] + grad_f(self.w, X_ones[i], Y[i])
            z[i] = grad_f(self.w, X_ones[i], Y[i])
            m = np.sum(visit)
            reg = self.w
            reg[0] = 0
            self.w = self.w - self.lamb*self.delta*reg - (self.delta/m) * d
            loss_values.append(np.linalg.norm(Loss(self.w, self.lamb, X_ones, Y)))
            print('training, iteration: '+str(k+1)+'/'+str(epochs)+'\r', sep=' ', end='', flush=True)
            
        if Visual:
            it = range(len(loss_values))
            plt.figure()
            plt.plot(it, loss_values, 'r')
            plt.title("Loss over epochs")
            plt.show()
Esempio n. 2
0
 def predict(self, X):
     X_ones = add_ones(X)
     n = self.X.shape[0]
     m = X_ones.shape[0]
     Y_pred = np.array([
         sum([(1 / self.C) * self.alpha[i] *
              self.kernel(self.X[i], X_ones[j], self.param)
              for i in range(n)]) for j in range(m)
     ])
     return Y_pred
 def BackPropagation(self, outputs, derivates, training_targets, prev_delta_w, momentum, learning_rate):
     curr_delta_w = []
     output = outputs[-1]
     delta = self.loss(training_targets, output, True).T
     
     for i in range( len(self.layers) )[::-1]:
         delta_w = learning_rate*np.dot(delta, utils.add_ones(outputs[i])).T
         if i != 0:
             delta = np.dot(self.weights_Matrics[i][1:,:], delta)*derivates[i-1]
         self.weights_Matrics[i]+=momentum*delta_w+(1-momentum)*prev_delta_w[i]
         curr_delta_w.append(delta_w)
     return curr_delta_w[::-1]
Esempio n. 4
0
    def BackPropagation(self, outputs, derivates, training_targets,
                        prev_delta_w, momentum, learning_rate):
        curr_delta_w = []
        output = outputs[-1]
        delta = self.loss(training_targets, output, True).T

        for i in range(len(self.layers))[::-1]:
            delta_w = learning_rate * np.dot(delta, utils.add_ones(
                outputs[i])).T
            if i != 0:
                delta = np.dot(self.weights_Matrics[i][1:, :],
                               delta) * derivates[i - 1]
            self.weights_Matrics[i] += momentum * delta_w + (
                1 - momentum) * prev_delta_w[i]
            curr_delta_w.append(delta_w)
        return curr_delta_w[::-1]
Esempio n. 5
0
    def fit(self, X, Y, epochs=100, Visual=False):
        def dual_Loss(alpha, X, Y, k):
            n = X.shape[0]
            return sum([alpha[i] * Y[i] - (alpha[i]**2) / 4
                        for i in range(n)]) - (1 / (2 * self.C)) * sum([
                            alpha[i] * alpha[j] * k(X[i], X[j], self.param)
                            for i in range(n) for j in range(n)
                        ])

        X_ones = add_ones(X)
        n, p = X_ones.shape
        self.alpha = np.zeros(n)
        self.Y = Y
        self.X = X_ones
        loss_values = []
        for k in range(epochs):
            perm = np.random.permutation(n)
            for i in perm:
                delta_i = (self.Y[i] - sum([
                    self.alpha[j] *
                    self.kernel(self.X[i], self.X[j], self.param)
                    for j in range(n)
                ]) - (1 / 2) * self.alpha[i]) / (
                    (1 / 2) +
                    self.kernel(self.X[i], self.X[i], self.param) / self.C)
                self.alpha[i] = self.alpha[i] + delta_i
                print('training, iteration: ' + str(k + 1) + '/' +
                      str(epochs) + '\r',
                      sep=' ',
                      end='',
                      flush=True)
            if Visual:
                loss_values.append(
                    dual_Loss(self.alpha, X_ones, Y, self.kernel))

        if Visual:
            it = range(len(loss_values))
            plt.figure()
            plt.plot(it, loss_values, 'r')
            plt.title("Loss over epochs")
            plt.show()
Esempio n. 6
0
    def fit(self, X, Y, epochs=100, Visual=False):
        def Loss(X, Y, lamb, beta):
            n = X.shape[0]
            return (1 / n) * sum(
                (np.dot(X[i], beta) - Y[i])**2
                for i in range(n)) + lamb * np.linalg.norm(beta)**2

        def grad_loss(X, Y, lamb, beta):
            """Computes gradient of the loss
            function"""
            n = X.shape[0]
            Y_pred = np.dot(X, beta)
            dbeta = 2 * (np.sum([(Y_pred[i] - Y[i]) * X[i] for i in range(n)],
                                axis=0)) + 2 * lamb * beta
            return dbeta

        X_ones = add_ones(X)
        n, p = X_ones.shape
        loss_values = []
        self.beta = np.zeros(p)

        for i in range(epochs):  # Full Gradient algorithm
            dbeta = grad_loss(X_ones, Y, self.lamb, self.beta)
            self.beta = self.beta - self.delta * dbeta
            print('training, iteration: ' + str(i + 1) + '/' + str(epochs) +
                  '\r',
                  sep=' ',
                  end='',
                  flush=True)
            if Visual:
                loss_values.append(Loss(X_ones, Y, self.lamb, self.beta))

        if Visual:
            it = range(len(loss_values))
            plt.figure()
            plt.plot(it, loss_values, 'r')
            plt.title("Loss over epochs")
            plt.show()
 def unproject_points(self, uvs):
     return np.dot(self.K_inverted, add_ones(uvs).T).T[:, 0:2]
Esempio n. 8
0
 def predict(self, X):
     X_ones = add_ones(X)
     Y_pred = np.dot(X_ones, self.w)
     return Y_pred
Esempio n. 9
0
X_2016, Y_2016 = retrieve(2016)
X_2017, Y_2017 = retrieve(2017)
X_2018, Y_2018 = retrieve(2018)

X = np.concatenate((X_2016, X_2017, X_2018))
Y = np.concatenate((Y_2016, Y_2017, Y_2018))

n, p = X.shape
LR = LinearRegression(lamb=1000, delta=0.00000000000000005)
LR.fit(X, Y, epochs=15, Visual=True)
Y_pred = LR.predict(X)

w = LR.weights()

labels = ["biais"] + list(retrieve_features())
X_ones = add_ones(X)
inv = np.linalg.inv(X_ones.T @ X_ones)
beta_hat = (inv @ X_ones.T) @ Y
Y_hat = X_ones @ beta_hat
sigma_hat = np.linalg.norm(Y_hat - Y) / np.sqrt(n - p - 1)
alpha = 0.05
f = t.ppf(1 - alpha / 2, n - p - 1)

output = []

for i in range(p + 1):  # Formule du poly
    rho = inv[i, i]
    beta = beta_hat[i]
    output.append(
        [labels[i], w[i],
         abs(beta / np.sqrt(rho * sigma_hat**2)) >= f])