def __forward_propagation(self, X):
     W1 = self.paramters['W1']
     b1 = self.paramters['b1']
     W2 = self.paramters['W2']
     b2 = self.paramters['b2']
     # forward propagation
     Z1 = np.dot(W1, X) + b1
     A1 = np.tanh(Z1)
     Z2 = np.dot(W2, A1) + b2
     A2 = sigmoid(Z2)
     cache = {'X': X, 'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2}
     return A2, cache
 def __one_layer_forward(self, A_prev, W, b, activation, keep_prob):
     Z = np.dot(W, A_prev) + b
     if activation == 'sigmoid':
         A = sigmoid(Z)
     if activation == 'relu':
         A = relu(Z)
     if activation == 'leaky_relu':
         A = leaky_relu(Z)
     if activation == 'tanh':
         A = np.tanh(Z)
     if keep_prob == 1:
         D = np.ones((A.shape[0], A.shape[1]))
     else:
         D = np.random.rand(A.shape[0], A.shape[1])
         D = D < keep_prob
         A = A * D
         A = A / keep_prob
     cache = {'Z': Z, 'A': A, 'D': D}
     return A, cache
Esempio n. 3
0
 def predict_prob(self, X):
     n_samples = X.shape[0]
     return sigmoid(np.dot(X, self.W) + self.b).reshape(n_samples,)
 def sigmoid_backward(self, dA, Z):
     s = sigmoid(Z)
     dZ = dA * s * (1 - s)
     return dZ
 def __forward_propagation(self, X):
     m = X.shape[1]
     A = sigmoid(np.dot(self.w.T, X) + self.b)
     return A