def predict(self, input_matrix): input_matrix = np.array(input_matrix) self._activations = [input_matrix] self._layer_inputs = [input_matrix] for layer_weight in self._layer_weights: self._layer_inputs.append(np.dot(self._activations[-1], layer_weight)) self._activations.append(Activation.sigmoid(self._layer_inputs[-1])) return self._activations[-1]
def predict(w, b, X): m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) A = Activation.sigmoid(np.dot(w.T, X) + b) for i in range(A.shape[1]): Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0 return Y_prediction
def propagate(w, b, x, y): m = x.shape[1] A = Activation.sigmoid(np.dot(w.T, x) + b) cost = (-1 / m) * np.sum(y * np.log(A) + (1 - y) * (np.log(1 - A))) dz = A - y dw = (1 / m) * np.dot(x, dz.T) db = (1 / m) * np.sum(dz) cost = np.squeeze(cost) grads = {"dw": dw, "db": db} return grads, cost
def predict(self, input_matrix): input_matrix = np.array(input_matrix) self._activations = [input_matrix] self._layer_inputs = [input_matrix] for layer_weight in self._layer_weights: self._layer_inputs.append( np.dot(self._activations[-1], layer_weight)) self._activations.append(Activation.sigmoid( self._layer_inputs[-1])) return self._activations[-1]
def apply_activation_fun(data,activation="relu"): if activation=="relu": return A.relu(data) elif activation == "softmax": return A.softmax(data) elif activation == "tanh": return A.tanh(data) elif activation == "softplus": return A.softplus(data) elif activation == "swish": return A.swish(data) elif activation == "sigmoid": return A.sigmoid(data)