Beispiel #1
0
    def forward(self, x, t):
        self.t = t
        self.y = 1 / (1 + np.exp(-x))

        self.loss = cross_entropy_error(np.c_[1 - self.y, self.y], self.t)

        return self.loss
Beispiel #2
0
def estimate_E_out(f, w, n_rounds=1000):
    '''
    Estima o erro fora da amostra de h[w](x), gerando uma nova amostra
    e calculando o erro de entropia cruzada sobre ela.
    '''
    dataset = generate_dataset(f, n_rounds)
    return myround(cross_entropy_error(w, dataset))
    def loss(self, x, t):
        z = self.predict(x)
        #y = softmax(z)
        y = functions.relu(z)
        loss = functions.cross_entropy_error(y, t)

        return loss
Beispiel #4
0
    def forward(self, x: np.ndarray, t: np.ndarray) -> float:
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #5
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y , self.t)
        return loss
Beispiel #6
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #7
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 정답 레이블이 원핫 벡터일 경우 정답의 인덱스로 변환
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #8
0
	def loss(self, x, t):
		"""
		x : array-like
			input
		t : array-like
			true label
		"""
		z = self.predict(x)
		y = softmax(z)
		loss = cross_entropy_error(y, t)
		return loss
    def loss(self, x, t):
        """Loss function using cross entropy error

        Args:
            x (numpy.ndarray): image data which mean input to NN
            t (numpy.ndarray): labels

        Returns:
            float: result of cross entropy error
        """
        y = self.predict(x)

        return cross_entropy_error(y, t)
    def forward(self, x, t):
        """forward propagation

        Args:
            x (numpy.ndarray): input
            t (numpy.ndarray): train data

        Returns:
            float: cross entropy error
        """
        self.t = t
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.t)

        return self.loss
    def forward(self, x, t):
        """順伝播

        SoftmaxWithLossレイヤの順伝播の結果を返す

        Args:
            x (ndarray): 入力
            t (ndarray): 教師ラベル
        """
        self.t = t
        self.y = softmax(x)

        # 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Beispiel #12
0
    def loss(self, x, t):
        y = self.predict(x)

        return f.cross_entropy_error(y, t)
Beispiel #13
0
    def forward(self, x, true):
        self.true = true
        self.pred = f.softmax(x)
        self.loss = f.cross_entropy_error(self.pred, self.true)

        return self.loss
Beispiel #14
0
 def forward(self, x, t):
     self.t = t
     self.y = softmax(x)
     self.loss = cross_entropy_error(self.y, self.t)
     
     return self.loss
Beispiel #15
0
    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss
Beispiel #16
0
    def loss(self,x,t): #lossの算出 by cross_entropy_error
        y = self.predict(x)

        return cross_entropy_error(y,t)
Beispiel #17
0
import numpy as np
"""データ"""
x = np.array([[0.1, 0.8]])
w1 = np.array([[10, 7], [0.8, 6]])
b1 = np.array([[1, 1]])
w2 = np.array([[0.4, 30], [0.8, 0.2]])
b2 = np.array([[1, 1]])
t = np.array([[1, 0]])
learning_rate = 0.02

for i in range(1):
    a1 = np.dot(x, w1) + b1
    z1 = relu(a1)
    a2 = np.dot(z1, w2) + b2
    y = softmax(a2)
    loss = cross_entropy_error(y, t)
    """共通の偏微分"""
    dEdY = -(t / y)  # [dEdy1, dEdy2]
    S = np.sum(np.exp(a2))
    dYdS = np.exp(a2) / np.square(S)  # [dY1dS, dY2dS]

    print("dw2_11: ",
          (-(t[0][0] / y[0][0]) *
           (np.exp(a2[0][1]) / np.square(S))) * np.exp(a2[0][0]) * z1[0][0])
    print("dw2_12: ",
          (-(t[0][0] / y[0][0]) *
           (np.exp(a2[0][1]) / np.square(S))) * np.exp(a2[0][1]) * z1[0][0])
    print("dw2_21: ",
          (-(t[0][0] / y[0][0]) *
           (np.exp(a2[0][1]) / np.square(S))) * np.exp(a2[0][0]) * z1[0][1])
    print("dw2_22: ",
Beispiel #18
0
def evaluate_E_in(w, dataset):
    '''
    Calcula o erro dentro da amostra (rotulada) dataset, de uma hipótese parametrizada por w.
    '''
    return myround(cross_entropy_error(w, dataset))
Beispiel #19
0
 def loss(self, x, t):  #xは入力データ,tは正解ラベル
     z = self.predict(x)
     y = softmax(z)
     loss = cross_entropy_error(y, t)
     return loss
Beispiel #20
0
    def forward(self, x: np.ndarray, t: np.ndarray) -> np.ndarray:
        self.t = t
        self.y = 1 / (1 + np.exp(-x))

        self.loss = cross_entropy_error(np.c_[1 - self.y, self.y], self.t)
        return self.loss
Beispiel #21
0
 def loss(self, t):
     self.t = t
     self.error = cross_entropy_error(self.y, self.t)
     return self.error
import numpy as np
import matplotlib.pylab as plt
import sys,os
sys.path.append("../lib")
sys.path.append(os.pardir)
import functions
from dataset.mnist import load_mnist

t = [0,0,1,0,0,0,0,0,0,0]
y_good = [0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0.0, 0.1, 0.0, 0.0]
print(functions.mean_squared_error(np.array(y_good), np.array(t)))

y_bad = [0.1, 0.05, 0.2, 0.0, 0.05, 0.1, 0.0, 0.1, 0.8, 0.0]
print(functions.mean_squared_error(np.array(y_bad), np.array(t)))

print(functions.cross_entropy_error(np.array(y_good), np.array(t)))
print(functions.cross_entropy_error(np.array(y_bad), np.array(t)))
    def loss(self, y, t):
        # loss를 계산
        error = cross_entropy_error(y, t)

        return error