Beispiel #1
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 정답 레이블이 원핫 벡터일 경우 정답의 인덱스로 변환
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #2
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)

        return loss
Beispiel #3
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #4
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # if t is one-hot vector style
        # if self.t.size == self.y.size:
        #     self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #5
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # if answer label is one-hot vector, convert into answer index
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #6
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)
        #print(f'[forward]\nt.shape:{self.t.shape}')
        # 정답 레이블이 원핫 벡터일 경우 정답의 인덱스로 변환
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)  # 예측한 인덱스만 저장됨.
            #print(self.t)

        loss = cross_entropy_error(self.y, self.t)
        return loss
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # if teaching data label is one-hot vector
        # then convert to answer data index
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #8
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 정답 레이블이 원핫 벡터일 경우 정답의 인덱스로 변환
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        # Loss (Scala value)
        # L = f(x) (x = vector)
        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #9
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)

        # print('SoftmaxWithLoss forward: x {}, t {}, loss {}'.format(x.shape, t.shape, loss))

        return loss
Beispiel #10
0
    def loss(self, x, true_lable):
        """
        输入x和真实label,返回预测值与真实值之间的交叉熵误差
        :param x: 输入x
        :param true_lable: 真实label
        :return: 误差值
        """
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(true_label=true_lable, prediction=y)

        return loss
Beispiel #11
0
class SoftmaxWithLoss:
    def __init__(self):
        self.params, self.grads = [], []
        self.y = None  # softmax的輸出
        self.t = None  # 訓練標籤

    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 訓練標籤為one-hot向量時,轉換成正解標籤的索引值        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #12
0
    def forward(self, x, t, s=0):
        '''
        This function will perform softmax on x and
        return the loss of its result compared with y

        Args:
            x (numpy.array): size of (batch_size, input_size).
            y (numpy.array): size of (batch_size, 1).
                Each element in y represents the id of corresponding answer to x.
        Return:
            float: result of cross_entropy_error
        '''
        if s == 0:
            self.t, self.y = [], []

        y = softmax(x)

        self.t.append(t)
        self.y.append(y)

        loss = cross_entropy_error(y, t)
        return loss
Beispiel #13
0
 def forward(self, x, t):
     self.t = t
     self.y = softmax(x)
     loss = cross_entropy_error(self.y, t)
     return loss
    def loss(self, x, t):  # 손실함수 정의, x:입력값, t:정답
        z = self.predict(x)
        y = softmax(z)  # 총합이 1이 되어지는 가중치 값이 출력
        loss = cross_entropy_error(y, t)

        return loss  # 예측값과 실제값의 오차 반환
Beispiel #15
0
    def loss(self, x, y):
        z = self.predict(x)
        pred_y = softmax(z)
        loss = cross_entropy_error(pred_y, y)

        return loss
Beispiel #16
0
	def loss(self, x, t):
		z = self.predict(x)
		y = softmax(z)
		loss = cross_entropy_error(y, t)

		return loss
Beispiel #17
0
    def loss(self, x, t): # 損失関数を求める method (loss) を定義
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss
import sys, os
sys.path.append(os.pardir)
sys.path.append(os.curdir)
import numpy as np
from common.functions import cross_entropy_error

t = [0, 0, 1, 0, 0, 0]
y = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1]

error = cross_entropy_error(np.array(y), np.array(t))
print(error)

t = [0, 0, 0, 1]
y = [0.8, 1]

error = cross_entropy_error(np.array(y), np.array(t))
print(error)


# 展示y和t的关系
# y是预测结果,y.shape[0]代表多少组结果,应该和t的shape一致(与y的行数y.shape[0]一致)
# t是标签,t.size应该是1,如果不是1,则t的标签是one-hot形式的,可以使用t.argmax(axis=1)来获取结果
def sumlog(y, t):
    y = np.array(y)
    t = np.array(t)
    print(y)
    print(t)
    print(y.size)
    print(y.shape)
    print(t.shape[0])
    return np.sum(np.log(y[np.arange(y.shape[0]), t]))
    def loss(self, x, t):  # 손실함수의 값 구함
        z = self.predict(x)
        y = softmax(z)  # 예측 결과의 비중을 확인
        loss = cross_entropy_error(y, t)  # 예측과 정답과의 손실함수

        return loss
Beispiel #20
0
 def forward(self, x, t):
     self.t = t
     self.y = 1. / (1. + np.exp(-x))
     self.loss = cross_entropy_error(np.c_[1 - self.y, self.y], self.t)
     return self.loss
Beispiel #21
0
 def loss(self, x, t):
     '''x接收输入数据 t接收正确解标签'''
     z = self.predict(x)
     y = softmax(z)  #激活函数转换
     loss = cross_entropy_error(y, t)
     return loss  #返回误差
def loss(x, t, w):
    z = predict(x, w)
    y = softmax(z)
    return cross_entropy_error(y, t)
 def loss(this, x, t):  #x: inputs, t: labels
     y = this.predict(x)
     return cross_entropy_error(y, t)
Beispiel #24
0
    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss
Beispiel #25
0
def cross_entropy_error_num(y, t):
    if y.dim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)
    batch_size = y.shape[0]
    delta = 1e-7
    return -np.sum(
        np.log(y[np.arange(batch_size), t] + delta)
    ) / batch_size  # t = [2, 7, 0, 9, 4]라면  y[0,2], y[1,7], y[2,0], y[3,9], y[4,4]
    # 즉, 정답 레이블에 해당하는 신경망 출력만 추출


t = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
y = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]
print(mean_squared_error(np.array(y), np.array(t)))  # 0.09750000000000003
print(cross_entropy_error(np.array(y), np.array(t)))  # 0.510825457099338
t = [[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]
y = [[0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0],
     [0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.3]]
print(mean_squared_error(np.array(y), np.array(t)))  # 0.5975
print(cross_entropy_error(np.array(y), np.array(t)))  # 2.302584092994546

# 데이터가 너무 많으면, 손실값의 전체 크기가 한도를 넘으니깐 미니배치로 나누어 처리하자
import sys, os
sys.path.append(os.pardir)  # 현 폴더위치를 부모폴더로 이동
import numpy as np
from dataset.mnist import load_mnist  # 부모폴터 밑에 dataset 폴더내의 mnist.py 안에 load_mist function을 로드

(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,
                                                  one_hot_label=True)
print(x_train.shape)  # (60000, 784)
 def loss(this, x, t):#x: inputs, t: labels
     z = this.predict(x)
     y = softmax(z)#Imported function from common directory
     loss = cross_entropy_error(y, t)
     return loss
## 事前データ
# 入力値
x = np.array([1., 2.])

# 目標出力
d = np.array([0, 0, 0, 1])

# ネットワークの初期化
network = init_network()

# 出力
y, z1 = forward(network, x)

# 誤差
loss = functions.cross_entropy_error(d, y)

## 表示
print("\n##### 結果表示 #####")
print_vec("出力", y)
print_vec("訓練データ", d)
print_vec("誤差", loss)

# In[ ]:

# 回帰
# 2-3-2ネットワーク

# !試してみよう_ノードの構成を 3-5-4 に変更してみよう

Beispiel #28
0
# 交差エントロピー誤差の実装
def cross_entropy_error(y, t):
    # yが0だった場合でも計算を進められるようにするため
    delta = 1e-7

    return -np.sum(t * np.log(y + delta))


# In[5]:

# 実際に試してみる
print('----- Cross Entropy Error -----')
t = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0])

y1 = np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0])
print('Example 1:', cross_entropy_error(y1, t))

y2 = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print('Example 2:', cross_entropy_error(y2, t))

# #### 4.2.3 ミニバッチ学習
# 先程の交差エントロピー誤差はデータが1つの場合のものである。
# これを$N$個のデータに適用できるように拡張すると次のようになる。
# \begin{equation}
# E = - \frac{1}{N} \sum_{n} \sum_{k} t_{nk} \log y_{nk}
# \end{equation}
#
# ミニバッチ学習とは数百万、数千万という膨大なデータの中から一部を抜き出し、その抜き出したデータを使って学習を行うこと。
# ミニバッチ学習のために訓練データの中から指定された個数のデータをランダムに選び出すコードを書く。

# In[6]:
 def loss(self, x, t):
     return cross_entropy_error(self.predict(x), t)
Beispiel #30
0
    def loss(self, x, t):
        y = self.predict(x)

        return cross_entropy_error(y, t)
Beispiel #31
0
 def forward(self, x, d):
     self.d = d
     self.y = functions.softmax(x)
     self.loss = functions.cross_entropy_error(self.d, self.y)
     
     return self.loss