示例#1
0
    def update_weights(self, w, dEdW):
        """
        Updates the parameters.

        Parameters
        ----------
        w : numpy.ndarray
            The weight to be updated.
        dEdW : numpy.ndarray
            The derivative of error with respect to weight.
        """

        self.accumulated_gradient = self.beta1 * self.accumulated_gradient + (
            1 - self.beta1) * dEdW
        self.accumulated_sq_gradient = self.beta2 * self.accumulated_sq_gradient + (
            1 - self.beta2) * np.square(dEdW)

        if self.bias_fix:
            self.t += 1
            accumulated_gradient_t = self.accumulated_gradient / (
                1 - (self.beta1**self.t))
            accumulated_sq_gradient_t = self.accumulated_sq_gradient / (
                1 - (self.beta2**self.t))
            return w - self.learning_rate * accumulated_gradient_t / (
                np.sqrt(accumulated_sq_gradient_t) + 1e-8)

        return w - self.learning_rate * self.accumulated_gradient / (
            np.sqrt(self.accumulated_sq_gradient) + 1e-8)
示例#2
0
    def update_weights(self, w, dEdW):
        """
        Updates the parameters.

        Parameters
        ----------
        w : numpy.ndarray
            The weight to be updated.
        dEdW : numpy.ndarray
            The derivative of error with respect to weight.
        """

        self.accumulated_sq_gradient += np.square(dEdW)
        return w - self.learning_rate * dEdW / (
            np.sqrt(self.accumulated_sq_gradient) + 1e-8)
示例#3
0
def mean_squared_error(y_predicted, y_target, return_deriv=False):
    """
    Calculates the mean square error between the predicted and the target ouputs.

    Parameters
    ----------
    y_predicted : numpy.ndarray
        The ouput predicted by the model.
    y_target : numpy.ndarray
        The expected output.
    return_deriv : bool, optional
        If set to true, the function returns derivative of the error along with the error.

    Returns
    -------
    numpy.array : The error.
    numpy.array : The error's derivative, optional.
    """

    if return_deriv:
        return np.sum(np.square(y_predicted - y_target)) / (2. * y_target.shape[0]), \
               (y_predicted - y_target) / y_target.shape[0]

    return np.sum(np.square(y_predicted - y_target)) / (2. * y_target.shape[0])
示例#4
0
 def value(self, weights, batch_size):
     return self.l1 * np.absolute(weights) / (1.0 * batch_size) + (self.l2 / 2.0) *\
     np.square(weights) / (1.0 * batch_size)
示例#5
0
 def value(self, weights, batch_size):
     return (self.lmda / 2.0) * np.square(weights) / (1.0 * batch_size)