Beispiel #1
0
B = np.array([0.0, 0, 1]).transpose()


def sig(x):
    s = 1 / (1 + np.exp(-x))
    return s


def f(p):
    X = p
    Y = np.dot(A, X)
    return norm(Y - B)


p = np.array([0.0, 0, 0])
p = gd.gradientDescendent(f, p)
print(
    "\n==================================================================================\n"
)

print("p = {}".format(p))

weights = np.array([p[0], p[1]])
bias = p[2]

inputArr = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
B = np.dot(inputArr, weights) + bias
print("Inputs: \n{}".format(inputArr))
print("\u03A3(in⋅w) + b = {}".format(B))
print("sig(\u03A3) = {}".format(sig(B)))
Beispiel #2
0
#1   0 | 0     x2 y2 o2
#1   1 | 1     x3 y3 o3
#sig(w1*x + w2*y + b) = o
#sig(w1*0 + w2*0 + b) = o0   => 0
#sig(w1*0 + w2*1 + b) = o1   => 0
#sig(w1*1 + w2*0 + b) = o2   => 0
#sig(w1*1 + w2*1 + b) = o3   => 1
#f(x,y,w) = (o0-0)^2 + (o1-0)^2 + (o2-0)^2 + (o3-1)^2

import gd2 as gd
import numpy as np
from numpy.linalg import norm  # 縣代

B = np.array([0.0, 0, 1])  # B=[w1, w2, b]


def loss(B):

    A = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
    O = np.array([0.0, 0, 0, 1])  # 輸出
    dot = np.dot(A, B)  #  A、B 內積
    return np.linalg.norm(sigmoid(dot) - O,
                          2)  # (o0-0)^2 + (o1-0)^2 + (o2-0)^2 + (o3-1)^2


def sigmoid(dot):
    return 1.0 / (1 + np.exp(-dot))  # dot >= 0.5 -> 1; dot < 0.5 -> 0


gd.gradientDescendent(loss, B)
Beispiel #3
0
"""
A X = B ,求 X 是多少?

3x+2y=5
1x+1y=2
"""
import gd2 as gd
import numpy as np

A = np.array([[3.0, 2],[1, 1]])
B = np.array([5.0, 2]) # np.array([[5.0, 2]]).transpose()

def f(p): #  能量函數:計算 ||AX-B||,也就是 ||Y-B||
    X = p.transpose()
    Y = A.dot(X)
    return np.linalg.norm(Y-B, 1)

p = np.array([0.0, 0])

gd.gradientDescendent(f, p, step=0.001)
Beispiel #4
0
    Y = np.dot(A, X)
    for i in Y:
        i = sig(i)
        i = 1 if i >= 0.5 else 0
    return (np.linalg.norm(Y - B, 2))**2


A = np.array([
    [0, 0, 1],  # A矩陣為輸入
    [0, 1, 1],
    [1, 0, 1],
    [1, 1, 1]
])

B = np.array([[0.0, 0, 0, 1]])  # B矩陣為標準答案

p = np.array([0.0, 0, 0])  # 設定 w1、w2、b 的初始值

p = gd2.gradientDescendent(f, p)  # 用梯度下降法找最低點

print("\n\nw1 = {}, w2 = {}, b = {}".format(p[0], p[1], p[2]))

C = np.dot(A, p)
result = []
for i in C:
    result.append(1) if i >= 0.5 else result.append(0)

print("\tx y | o")
print("\t----|--")
for i in range(len(result)):
    print("\t{} {} | {}".format(A[i][0], A[i][1], result[i]))