コード例 #1
0
ファイル: ex03.py プロジェクト: et0511/linear-algebra-basics
# 경사하강법(수치미분)
import os
import sys
from pathlib import Path
try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import mean_squares_error, gradient_descent
except ImportError:
    print('Library Module Can Not Found')
import numpy as np

# data
times = [2, 4, 6, 8]
scores = [81, 93, 91, 97]

# 경사하강법
result = gradient_descent(mean_squares_error,
                          np.array([0., 0.]),
                          epoch=5000,
                          data_training=(times, scores))
print(f'직선 y = {result[0]}x + {result[1]}')

# 평균제곱오차
print(f'오차(평균제곱오차):{mean_squares_error(np.array(result), (times, scores))}')
コード例 #2
0
def loss(x, data_training):
    data_in, data_out = data_training
    e = data_out * np.log(sigmoid(x[0] * data_in + x[1])) + (
        1 - data_out) * np.log(1 - sigmoid(x[0] * data_in + x[1]))
    return -1 * np.mean(e)


# data
times = np.array([2, 4, 6, 8, 10, 12, 14])
passed = np.array([0, 0, 0, 1, 1, 1, 1])

# 경사하강법
params = gradient_descent(loss,
                          np.array([0., 0.]),
                          lr=0.5,
                          epoch=50000,
                          data_training=(times, passed))

# graph
x = np.arange(0, 15, 0.1)
y = sigmoid(params[0] * x + params[1])

fig, splt = plt.subplots()
splt.scatter(times, passed)
splt.plot(x, y)

plt.show()

# predict
x_p = 7
コード例 #3
0
# 경사하강법

import sys
import numpy as np
import os
from pathlib import Path

try :
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'library'))
    from common import gradient_descent
except :
    print('Library Module Can Not Found')

def f(x) :
    print(np.sum(x**2, axis=0))
    return np.sum(x**2, axis=0)
    # return x**2

# gradient_descent(f, np.array([-3., 4.]), lr=0.1) # 소수점을 붙여줘야 한다. 아니면 다른 변수형인 int형이 되어 Error.
# gradient_descent(f, np.array([-3., 4.]), lr=10)
gradient_descent(f, np.array([-3., -2]), lr=0.001, epoch=10)

# epoch나 lr의 값을 지정해주지 않으면 선언된 함수의 수로 진행된다.
# lr(learning rate : 학습률) : 갱신하는 양을 결정하는 변수. 얼마만큼 데이터를 이동시켜 최소 기울기를 찾을지 결정.
# epoch : 경사하강법의 반복 횟수.
コード例 #4
0
import os
import sys
from pathlib import Path
import numpy as np
try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import gradient_descent
except ImportError:
    print('Library Module Can Not Found')


def f(x):
    return np.sum((x + 2)**2 + 1, axis=0)


gradient_descent(f, np.array([-3., 4.]), lr=0.1)
# gradient_descent(f, np.array([-3., 4.]), lr=10)
# gradient_descent(f, np.array([-3., 4.]), lr=0.001)
コード例 #5
0
ファイル: ex01.py プロジェクト: chu83/linear-algebra-basics
#기울기(gradient)
import os
import sys
from pathlib import Path
import numpy as np

try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import gradient_descent

except ImportError:
    print('Library Module Can Not Found')


def f(x):
    return np.sum(x**2 ,axis=0)

gradient_descent(f, np.array([-3., 4.]), lr=0.1)
gradient_descent(f, np.array([-3., 4.]), lr=0.001, epoch=1000000)