Ejemplo n.º 1
0
train_std[2] = np.std(train_x_3)
train_std[3] = np.std(train_x_4)
train_std[4] = np.std(train_x_5)

train_x_1_n = [(x-train_mean[0])/train_std[0] for x in train_x_1]
train_x_2_n = [(x-train_mean[1])/train_std[1] for x in train_x_2]
train_x_3_n = [(x-train_mean[2])/train_std[2] for x in train_x_3]
train_x_4_n = [(x-train_mean[3])/train_std[3] for x in train_x_4]
train_x_5_n = [(x-train_mean[4])/train_std[4] for x in train_x_5]

train_x = np.zeros((100,5))

train_x[:, 0] = train_x_1_n
train_x[:, 1] = train_x_2_n
train_x[:, 2] = train_x_3_n
train_x[:, 3] = train_x_4_n
train_x[:, 4] = train_x_5_n

train_y = np.sin( train_x_1 ) + np.power ( train_x_1 , 2) * 0.1 + np.random .randn (100 , 1) * 0.5
eta = 0.01
plt.plot(train_x_1, train_y)
w = reg_gradient_descent(train_x, train_y, eta, 0, NUM_IT)
train_y_p = np.zeros(100)
for i in range(0, 100):
    train_y_p[i] = w[0]
    for j in range(1, 6):
        train_y_p[i] = train_y_p[i] + w[j] * ((train_x_1[i] ** j - train_mean[j-1])/train_std[j-1])

plt.plot(train_x_1, train_y_p)
plt.show()
Ejemplo n.º 2
0
    x, t = data ["x"], data["t"]
    x_eval , t_eval = data ["x_eval"], data ["t_eval"] 
train_x = x[0:50,:]
train_t = t[0:50]
train_x_shape = np.shape(train_x)
x_eval_shape = np.shape(x_eval)
T = x_eval_shape[0]
N = train_x_shape[1]
M = train_x_shape[0]
eta = 0.1
NUM_IT = [1, 10, 100, 1000, 5000] 
shape_NUM_IT = np.shape(NUM_IT)
L = shape_NUM_IT[0] 
error = np.zeros((L, 3))
for i in range(0, L):
    w = reg_gradient_descent(train_x, train_t, eta, 0, NUM_IT[i])
    error[i, 0] = NUM_IT[i]
    for j in range(0, M):   
        t_predicted_train = np.dot(w[1:N+1], np.transpose(train_x[j, :])) + w[0]  
        if t_predicted_train <= 0.5:
            t_predicted_train = 0
        else:
            t_predicted_train = 1   
        if t_predicted_train != train_t[j]:
            error[i, 1] = error[i, 1] + 1 
    for j in range(0, T):   
        t_predicted_eval = np.dot(w[1:N+1], np.transpose(x_eval[j, :])) + w[0]  
        if t_predicted_eval <= 0.5:
            t_predicted_eval = 0
        else:
            t_predicted_eval = 1   
Ejemplo n.º 3
0
from reg_gradient_descent import reg_gradient_descent

with np.load("TINY_MNIST.npz") as data:
    x, t = data["x"], data["t"]
    x_eval, t_eval = data["x_eval"], data["t_eval"]
train_x = x[0:50, :]
train_t = t[0:50]
lamda = [0, 0.0001, 0.001, 0.01, 0.1, 0.5]
lamda_shape = np.shape(lamda)
train_x_shape = np.shape(train_x)
x_eval_shape = np.shape(x_eval)
T = x_eval_shape[0]
L = lamda_shape[0]
N = train_x_shape[1]
eta = 0.1
error = np.zeros((L, 2))
NUM_IT = 5000
for i in range(0, L):
    w = reg_gradient_descent(train_x, train_t, eta, lamda[i], NUM_IT)
    error[i, 0] = lamda[i]
    for j in range(0, T):
        t_predicted = np.dot(w[1 : N + 1], np.transpose(x_eval[j, :])) + w[0]
        if t_predicted <= 0.5:
            t_predicted = 0
        else:
            t_predicted = 1
        if t_predicted != t_eval[j]:
            error[i, 1] = error[i, 1] + 1
for i in range(0, L):
    print("%f\t%d" % (error[i, 0], error[i, 1]))