Exemple #1
0
    def forward(self, x, t=None):
        self.y = softmax(x)
        self.t = t

        if self.t is None:  #단순 소프트맥스 처리
            return self.y

        # 약전파시 전치행렬에 대한 내적을 대비
        if self.t.ndim == 1:
            self.t = self.t[np.newaxis, :]

        loss = cross_entropy_error(self.y, self.t)
        return loss
Exemple #2
0
# test
(train_x, train_t), (test_x, test_t) = load_mnist(normalize=True,
                                                  flatten=True,
                                                  one_hot_label=True)
print(train_x.shape)  # 60,000 * 784
print(train_t.shape)  # 60,000 * 10

train_size = len(train_x)
batch_size = 10

batch_mask = np.random.choice(train_size, batch_size)
print(batch_mask)

train_x_batch = train_x[batch_mask]
train_t_batch = train_t[batch_mask]
print(train_x_batch.shape)  # 10 * 784
print(train_t_batch.shape)  # 10 * 10

# test
# 만약에 batch_size가 3인 경우

t = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0.])

y1 = np.array([0.1, 0.05, 0.7, 0., 0.02, 0.03, 0.1, 0., 0., 0.])
y2 = np.array([0.1, 0.05, 0., 0.6, 0.02, 0.03, 0.1, 0.1, 0., 0.])
y3 = np.array([0., 0.92, 0.02, 0., 0.03, 0.03, 0., 0., 0., 0.])

print(cross_entropy_error(y1, t))
print(cross_entropy_error(y2, t))
print(cross_entropy_error(y3, t))
Exemple #3
0
def loss(x, t):
    y = forward_propagation(x)
    e = cross_entropy_error(y, t)
    return e
Exemple #4
0
def loss(w):  #softmax
    y = forward_progation(w)
    e = cross_entropy_error(y, t)

    return e
Exemple #5
0
def loss():
    y = forward_progation()
    e = cross_entropy_error(y, t)
    return e
def loss(w, x, t):
    a = np.dot(x, w)
    y = softmax(a)  # softmax(x @ w)
    e = cross_entropy_error(y, t)

    return e
Exemple #7
0
 def forward(self, x, t):
     self.t = t
     self.y = softmax(x)
     self.loss = cross_entropy_error(self.y, self.t)
     return self.loss
Exemple #8
0
# test1
(train_x, train_t), (test_x, test_t) = load_mnist(normalize=True,
                                                  flatten=True,
                                                  one_hot_label=True)
print(train_x.shape)  # 60000 x 784
print(train_t.shape)  # 60000 x 10

train_size = len(train_x)
batch_size = 10

batch_mask = np.random.choice(train_size, batch_size)
print(batch_mask)

train_x_batch = train_x[batch_mask]
train_t_batch = train_t[batch_mask]
print(train_x_batch.shape)  # 10 x 784
print(train_t_batch.shape)  # 10 x 10

# test3
# 만약에 batch-size가 3인 경우
t = np.array([[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
              [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
              [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])

y = np.array([[0.1, 0.05, 0.7, 0., 0.02, 0.03, 0.1, 0., 0., 0.],
              [0.1, 0.05, 0., 0.4, 0.02, 0.03, 0.1, 0.3, 0., 0.],
              [0., 0.92, 0.02, 0., 0.02, 0.03, 0.1, 0., 0., 0.]])

print(cross_entropy_error(y, t))
Exemple #9
0
# 신경망학습: 교차 엔트로피 손실함수(Cross Entropy Error, CEE)

import os
import sys
import numpy as np
from pathlib import Path
try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import cross_entropy_error
except ImportError:
    print('Library Module Can Not Found')

t = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0.])

y1 = np.array([0.1, 0.05, 0.7, 0., 0.02, 0.03, 0., 0.1, 0., 0.])
y2 = np.array([0.1, 0.05, 0.05, 0.6, 0.02, 0.03, 0.05, 0.1, 0., 0.])
y3 = np.array([0., 0., 0.95, 0.02, 0.01, 0.01, 0., 0.1, 0., 0.])

# test
print(cross_entropy_error(y1, t))  # vector가 들어감
print(cross_entropy_error(y2, t))
print(cross_entropy_error(y3, t))
Exemple #10
0
 def forward(self, a, t):
     self.y = co.softmax(a)
     self.t = t
     self.loss = co.cross_entropy_error(self.y, self.t)
     return self.loss
Exemple #11
0
 def loss(self, x, t):
     z = self.predict(x)
     y = softmax(z)
     loss = cross_entropy_error(y, t)
     return loss
Exemple #12
0
 def loss(self, x, t):
     z = self.predict(x)
     y = common.soft_max(z)
     return common.cross_entropy_error(y, t)
Exemple #13
0
 def loss(self, x, t):
     y = self.predict(x)
     return common.cross_entropy_error(y, t)
Exemple #14
0
def loss(w):
    a = np.dot(x, w)
    y = softmax(a)
    e = cross_entropy_error(y, t)

    return e