Exemplo n.º 1
0
    return (np.sum((t-y)**2))/(len(x))


def error_val(x, t):
    y = np.dot(x, W) + b

    return (np.sum((t - y) ** 2)) / (len(x))

f = lambda x : loss_func(x_data, t_data)

learning_rate = 1e-3
print("Initial error value =", error_val(x_data, t_data), "Initial W =", W, "\n", ", b =", b)

for step in range(10001):
    W -= learning_rate * nd.numerical_derivative(f,W)
    b -= learning_rate * nd.numerical_derivative(f,b)
    if(step % 400 ==0):
        print("step =", step, "error value =", error_val(x_data,t_data), "W =", W, ", b =",b)

e = np.zeros_like(t_data)
predict_velocity = []
def predict():
    cnt = 0
    delta = 1e-7
    y = np.dot(x_data, W) + b
    for i in range(len(t_data)):
        error_rate = (t_data[i] - y[i]) / (t_data[i] + delta) * 100
        print(i,"예측속도: ",y[i],"실제속도: ",t_data[i],"오차율[%]", error_rate
              ,"W =",W, "b =", b )
        predict_velocity.append(y[i])
Exemplo n.º 2
0
Arquivo: myLinear.py Projeto: GDN2/DL
        self.b = np.random.rand(output_nodes)

    def loss_func(self):
        y = np.dot(self.xdata, self.W) + self.b
        return np.sum((self.tdata - y)**2) / len(self.xdata)

    def predict(self, xdata):
        y = np.dot(xdata, self.W) + self.b
        return y


loaded_data = np.loadtxt('./regression_testdata_05.csv',
                         delimiter=',',
                         dtype=np.float32)
xdata = loaded_data[:, 0:-1].reshape(-1, 4)
tdata = loaded_data[:, -1].reshape(-1, 1)
learning_rate = 1e-3
obj1 = myLinear(xdata, tdata, 4, 1)
f = lambda x: obj1.loss_func()

for step in range(1000001):
    if (step % 400 == 0):
        obj1.W -= learning_rate * nd.numerical_derivative(f, obj1.W)
        obj1.b -= learning_rate * nd.numerical_derivative(f, obj1.b)
        print("step", step, "error value =", obj1.loss_func(), "W =", obj1.W,
              "b =", obj1.b)

x = np.array([3, 5, 6, 2]).reshape(1, 4)
real_val = obj1.predict(x)
print(real_val)
Exemplo n.º 3
0
        z3 = np.dot(a2, self.W3) + self.b3
        y = a3 = self.sigmoid(z3)

        if (y > 0.5):
            result = 1
        if (y < 0.5):
            result = 0
        return y, result


xdata = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
xor_tdata = np.array([0, 1, 1, 0]).reshape(4, 1)
obj1 = MyDeep(xdata, xor_tdata, 2, 6, 1)
learning_rate = 1e-4
f = lambda x: obj1.feed_forward()

for step in range(10001):
    obj1.W2 -= learning_rate * nd.numerical_derivative(f, obj1.W2)
    obj1.b2 -= learning_rate * nd.numerical_derivative(f, obj1.b2)
    obj1.W3 -= learning_rate * nd.numerical_derivative(f, obj1.W3)
    obj1.b3 -= learning_rate * nd.numerical_derivative(f, obj1.b3)

    if (step % 400 == 0):
        print("step =", step, "loss val =", obj1.feed_forward(), "obj1.W2",
              obj1.W2, "obj1.b2", obj1.b2, "obj1.W3", obj1.W3, "obj1.b3",
              obj1.b3)

for data in xdata:
    (real_val, logical_val) = obj1.predict(data)
    print("real_val", real_val, "logical_val", logical_val)
Exemplo n.º 4
0
    return -np.sum(tdata * np.log(y + delta) + (1 - tdata) * np.log((1 - y) + delta))

def predict(xdata):
    z2 = np.dot(xdata, W2) + b2
    a2 = sigmoid(z2)

    z3 = np.dot(a2,W3) + b3
    y = a3 = sigmoid(z3)

    if y > 0.5:
        result = 1
    else:
        result = 0
    return y, result
f = lambda x : feed_forward(xdata, xor_tdata)

print("Initial loss value = ", loss_val(xdata, xor_tdata))

for step in range(8001):
    W2 -= learning_rate + nd.numerical_derivative(f, W2)
    b2 -= learning_rate + nd.numerical_derivative(f, b2)
    W3 -= learning_rate + nd.numerical_derivative(f, W3)
    b3 -= learning_rate + nd.numerical_derivative(f, b3)

    if (step % 400 == 0):
        print("step =",step,", loss value =", loss_val(xdata, xor_tdata))

for data in test_data:
    (real_val, logical_val) = predict(data)
    print("real_val", real_val, "logical_val", logical_val)