コード例 #1
0
    def test_plot_boundary(self):
        negatives = self.data[self.data[:, 2] ==
                              0]  # SELECT * FROM self.data WHERE col2 == 0
        positives = self.data[self.data[:, 2] ==
                              1]  # SELECT * FROM self.data WHERE col2 == 1
        plt.xlabel("Exam 1 score")
        plt.ylabel("Exam 2 score")
        plt.xlim([25, 115])
        plt.ylim([25, 115])
        plt.scatter(negatives[:, 0],
                    negatives[:, 1],
                    c='y',
                    marker='o',
                    s=40,
                    linewidths=1,
                    label="Not admitted")
        plt.scatter(positives[:, 0],
                    positives[:, 1],
                    c='b',
                    marker='+',
                    s=40,
                    linewidths=2,
                    label="Admitted")
        plt.legend()

        self.X = np.concatenate([np.ones((self.m, 1)), self.X], axis=1)
        theta = np.zeros((self.n + 1, 1))
        theta_optimized, _ = gradient_descent(self.X, self.y, theta)

        x1 = self.X[:, 1]
        x2 = -(1 / theta_optimized[2]) * (theta_optimized[0] +
                                          theta_optimized[1] * x1)
        plt.plot(x1, x2)
        plt.show()
コード例 #2
0
 def test_prediction(self):
     self.X = np.concatenate([np.ones((self.m, 1)), self.X], axis=1)
     theta = np.zeros((self.n + 1, 1))
     theta_optimized, _ = gradient_descent(self.X, self.y, theta)
     test_data = np.array([1, 45, 85]).reshape((1, 3))
     prediction = hypothesis(test_data, theta_optimized)
     self.assertAlmostEqual(prediction, 0.776, places=3)
     self.assertEqual(classify(test_data, self.X, theta_optimized), 1)
コード例 #3
0
 def test_gradient_descent(self):
     self.X = np.concatenate([np.ones((self.m, 1)), self.X], axis=1)
     theta = np.zeros((self.n + 1, 1))
     theta_optimized, min_cost = gradient_descent(self.X, self.y, theta)
     np.testing.assert_almost_equal(theta_optimized,
                                    np.array(
                                        [-25.161301, 0.206231, 0.201471]),
                                    decimal=3)
     self.assertAlmostEqual(min_cost, 0.203, places=3)
コード例 #4
0
def LWR(x, y, method="closed_form"):
    n = m = len(x)
    _learned = np.zeros(n)
    w = np.array([weight(x, x[i]) for i in range(m)])

    if method == "closed_form":
        theta = closed_form(w, x, y)
    else:
        theta = gradient_descent(x, y, J, limit=260, alpha=1)

    for i in range(n):
        _learned[i] = theta[0] + theta[1] * x[i]

    return _learned
コード例 #5
0
ファイル: Logistic Regression.py プロジェクト: Esme01/ML2020
lr = 0.2

train_loss = []
valid_loss = []
train_acc = []
valid_acc = []
step = 1
for epoch in range(epoches):
    X_train, Y_train = utils.shuffle(X_train, Y_train)

    for i in range(int(np.floor((train_size / batch_size)))):
        X = X_train[i * batch_size:(i + 1) * batch_size]
        Y = Y_train[i * batch_size:(i + 1) * batch_size]

        #w,b = utils.gradient_descent(X,Y,w,b,lr)
        w_grad, b_grad = utils.gradient_descent(X, Y, w, b)
        w -= lr / np.sqrt(step) * w_grad
        b -= lr / np.sqrt(step) * b_grad
        step += 1
    y_train_pred = utils.f(X_train, w, b)
    Y_train_pred = np.round(y_train_pred)
    train_acc.append(utils.accruacy(Y_train_pred, Y_train))
    train_loss.append(
        utils.cross_entropy_loss(y_train_pred, Y_train) / train_size)

    y_valid_pred = utils.f(X_valid, w, b)
    Y_valid_pred = np.round(y_valid_pred)
    valid_acc.append(utils.accruacy(Y_valid_pred, Y_valid))
    valid_loss.append(
        utils.cross_entropy_loss(y_valid_pred, Y_valid) / valid_size)
コード例 #6
0
ファイル: 4.2.py プロジェクト: hunering/demo-code
import numpy as np
from utils import gradient_descent
from keras.utils import to_categorical

def function_2(x):
  return x[0]**2 + x[1]**2

init_x = np.array([1.0, 2.0])
x = gradient_descent(function_2, init_x, lr=0.1, step_num=100)
print(x)

コード例 #7
0
        start + i * step_size
        for i in range(0, int((end - start) // step_size))
    ]
    y = []

    for _x in x:
        y += [1 / (1 + np.math.exp(-_x))]

    plt.plot(x, y)
    plt.show()


if __name__ == "__main__":

    # # plot using gradient descent
    d = gradient_descent(*get_vector_data(ds1), J)
    plot(d[0], d[1], ds1, "ds1 gradient descent")

    # plot using linear regression
    plot(*closed_form(*get_vector_data(ds1)), ds1, "ds1 closed form")

    # plot using gradient descent
    d2 = gradient_descent(*get_vector_data(ds2), J)
    plot(d2[0], d2[1], ds2, "ds2 gradient descent")

    # plot using linear regression
    plot(*closed_form(*get_vector_data(ds2)), ds2, "ds2 closed form")

    # plot the learned t1 and t2
    plt.plot(range(len(d[2])), [m[1] for m in d[2]], label='theta 1')
    plt.plot(range(len(d[2])), [m[0] for m in d[2]], label='theta 2')
コード例 #8
0
        start + i * step_size
        for i in range(0, int((end - start) // step_size))
    ]
    y = []

    for _x in x:
        y += [1 / (1 + np.math.exp(-_x))]

    plt.plot(x, y)
    plt.show()


if __name__ == "__main__":

    # # plot using gradient descent
    d = gradient_descent(*get_vector_data(ds1), J, limit=1e12)
    plot(d[0], d[1], ds1, "ds1 gradient descent")

    # plot using linear regression
    plot(*closed_form(*get_vector_data(ds1)), ds1, "ds1 closed form")

    # plot using gradient descent
    d2 = gradient_descent(*get_vector_data(ds2), J)
    plot(d2[0], d2[1], ds2, "ds2 gradient descent")

    # plot using linear regression
    plot(*closed_form(*get_vector_data(ds2)), ds2, "ds2 closed form")

    # plot the learned t1 and t2
    plt.plot(range(len(d[2])), [m[1] for m in d[2]], label='theta 1')
    plt.plot(range(len(d[2])), [m[0] for m in d[2]], label='theta 2')
"""
Implementation of logistic regression using gradient descent

"""

import numpy as np
import sys
import utils
import math

def h(thetas, xi):
    """
    Sigmoid hypothesis function for logistic regression
    
    thetas : Theta values
    xi : Feature vector
    
    """
    return 1.0 / (1.0 + math.exp(-thetas.dot(xi)))


if __name__ == "__main__":
    path = sys.argv[1]
    training_xs, training_ys = utils.training_data_from_csv_file(path)
    thetas = utils.gradient_descent(h,
                                    training_xs,
                                    training_ys,
                                    0.001,
                                    4000)
    utils.print_thetas(thetas)
コード例 #10
0
Y = np.asarray(Y).astype(np.float)
m = Y.size

# Задача 1 - Нормализация признаков
print('Нормализация признаков...')
X, mu, sigma = ut.featureNormalize(X)
ones_column = np.ones((m, 1))
X = np.hstack((ones_column, X))

# Задача 2 - Метод градиентного спуска
print('Выполнение градиентного спуска...')
alpha = 0.01
num_iters = 400

theta = np.zeros((3, 1))
theta, J_history = ut.gradient_descent(X, Y, theta, alpha, num_iters)

fig = plt.figure()
ax = plt.axes()
plt.plot(np.arange(J_history.size), J_history)
ax.set_xlabel("Число итераций")
ax.set_ylabel("Функция стоимости J")
#plt.show()

print("theta, полученное методом градиентного спуска:", theta)

# Оценка стоимости 3-комантной квартиры площадью 60 м2
in_x = np.array([1, 60, 3])
norm_mu = np.array([0, mu[0, 0], mu[0, 1]])
norm_sigma = np.array([1, sigma[0, 0], sigma[0, 1]])
norm_in_x = np.subtract(in_x, norm_mu) / norm_sigma