Exemple #1
0
    def cost(
        self,
        parameters,
        activations,
        x,
        y_true=None,
        dy_true=None,
        lambd=0.0,
        gamma=0.0,
    ):
        """
        Cost function for training

        :param x:
        :param parameters:
        :param activations:
        :param y_true:
        :param dy_true:
        :param lambd:
        :param gamma:
        :return:
        """
        y_pred, caches = L_model_forward(x, parameters, activations)
        dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
        w = [value for name, value in parameters.items() if "W" in name]
        cost = lse(y_true, y_pred, lambd, w, dy_true, dy_pred, gamma)
        return cost
Exemple #2
0
    def grad(
        self,
        parameters,
        activations,
        x,
        y_true=None,
        dy_true=None,
        lambd=0.0,
        gamma=0.0,
    ):
        """
        Gradient of cost function for training

        :param x:
        :param parameters:
        :param activations:
        :param y_true:
        :param dy_true:
        :param lambd:
        :param gamma:
        :return:
        """
        y_pred, caches = L_model_forward(x, parameters, activations)
        dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
        grad = L_model_backward(y_pred, y_true, dy_pred, dy_true, caches,
                                dy_caches, lambd, gamma)
        return grad
Exemple #3
0
    def gradient(self, X):
        """
        Predict output(s) given inputs X.

        :param X: inputs to neural network, np array of shape (n_x, m) where n_x = no. inputs, m = no. training examples
        :return: J: prediction, J = np array of shape (n_y, n_x, m) = Jacobian
        """
        assert X.shape[0] == self.number_of_inputs

        number_of_examples = X.shape[1]

        mu_x, sigma_x = self._scale_factors['x']
        mu_y, sigma_y = self._scale_factors['y']

        X_norm = (X - mu_x) / sigma_x

        Y_norm, _ = L_model_forward(X_norm, self.parameters, self.activations)
        J_norm, _ = L_grads_forward(X_norm, self.parameters, self.activations)

        J = (J_norm * sigma_y / sigma_x).reshape(self.number_of_outputs, self.number_of_inputs, number_of_examples)

        return J