Exemplo n.º 1
0
class MLP:
    def __init__(self, input_size, hidden_size_1, hidden_size_2, output_size):
        self.input_layer = InputLayer(input_size, hidden_size_1, ReLU)
        self.hidden_layer = HiddenLayer(hidden_size_1, hidden_size_2)
        self.output_layer = SoftmaxOutputLayer(hidden_size_2, output_size)

    def predict(self, x):
        x = self.input_layer.forward(x)
        x = self.hidden_layer.forward(x)
        prob = self.output_layer.predict(x)

        pred = np.argmax(prob, axis=-1)

        return pred

    def loss(self, x, y):
        x = self.input_layer.forward(x)
        x = self.hidden_layer.forward(x)
        loss = self.output_layer.forward(x, y)
        return loss

    def gradient(self):
        d_prev = 1
        d_prev = self.output_layer.backward(d_prev=d_prev)
        d_prev = self.hidden_layer.backward(d_prev)
        self.input_layer.backward(d_prev)

    def update(self, learning_rate):
        self.input_layer.W -= self.input_layer.dW * learning_rate
        self.input_layer.b -= self.input_layer.db * learning_rate
        self.hidden_layer.W -= self.hidden_layer.dW * learning_rate
        self.hidden_layer.b -= self.hidden_layer.db * learning_rate
        self.output_layer.W -= self.output_layer.dW * learning_rate
        self.output_layer.b -= self.output_layer.db * learning_rate
Exemplo n.º 2
0
class MLP:
    def __init__(self, input_size, hidden_size, output_size):

        self.input_layer = InputLayer(input_size, hidden_size, Sigmoid)
        self.output_layer = SigmoidOutputLayer(hidden_size, output_size)

    def predict(self, x):
        x = self.input_layer.forward(x)
        pred = self.output_layer.predict(x)
        pred[pred >= 0.5] = 1
        pred[pred < 0.5] = 0
        return pred

    def loss(self, x, y):
        x = self.input_layer.forward(x)
        loss = self.output_layer.forward(x, y)
        return loss

    def gradient(self):
        d_prev = 1
        d_prev = self.output_layer.backward(d_prev=d_prev)
        self.input_layer.backward(d_prev)

    def update(self, learning_rate):
        self.input_layer.W -= self.input_layer.dW * learning_rate
        self.input_layer.b -= self.input_layer.db * learning_rate
        self.output_layer.W -= self.output_layer.dW * learning_rate
        self.output_layer.b -= self.output_layer.db * learning_rate
Exemplo n.º 3
0
 def __init__(self, input_size, hidden_size_1, hidden_size_2, output_size):
     self.input_layer = InputLayer(input_size, hidden_size_1, ReLU)
     self.hidden_layer = HiddenLayer(hidden_size_1, hidden_size_2)
     self.output_layer = SoftmaxOutputLayer(hidden_size_2, output_size)
Exemplo n.º 4
0
"""
The results should be exactly same as below:
결과는 아래와 일치해야 합니다:

Forward: 
 [[1.69405863 0.        ]
 [1.41846232 0.        ]]
dW: 
 [[ 1.  0.]
 [-3.  0.]
 [-2.  0.]]
db: 
 [-1.  0.]
"""

input_layer = InputLayer(3, 2, ReLU)
input_layer.w = np.array([[1.0, -2.0], [2.0, -1.0], [-3.5, -0.5]])
input_layer.b = np.array([1.0, -1.0])
temp6 = np.array([[-1, 3, 2], [0.0, 1.0, 0.0]])
temp7 = np.array([[-1, 3], [0.0, 1.0]])
print('Forward: \n', input_layer.forward(temp6))
input_layer.backward(temp7)
print('dW: \n', input_layer.dW)
print('db: \n', input_layer.db)
print()

print('===== HiddenLayer Check =====')
"""
The results should be exactly same as below:
결과는 아래와 일치해야 합니다:
Exemplo n.º 5
0
    def __init__(self, input_size, hidden_size, output_size):

        self.input_layer = InputLayer(input_size, hidden_size, Sigmoid)
        self.output_layer = SigmoidOutputLayer(hidden_size, output_size)