예제 #1
0
    def _newton_rhaphson(self, T, E, precision=1e-5, show_progress=False):
        from lifelines.utils import _smart_search

        def hessian_function(parameters, T, E):
            return np.array([[
                _d_lambda_d_lambda_(parameters, T, E),
                _d_rho_d_lambda_(parameters, T, E)
            ],
                             [
                                 _d_rho_d_lambda_(parameters, T, E),
                                 _d_rho_d_rho(parameters, T, E)
                             ]])

        def gradient_function(parameters, T, E):
            return np.array([
                _lambda_gradient(parameters, T, E),
                _rho_gradient(parameters, T, E)
            ])

        # initialize the parameters. This shows dramatic improvements.
        parameters = _smart_search(_negative_log_likelihood, 2, T, E)
        i = 1
        step_size = 0.9
        max_steps = 50
        converging, completed = True, False
        start = time.time()

        while converging and i < max_steps:
            # Do not override hessian and gradient in case of garbage
            h, g = hessian_function(parameters, T,
                                    E), gradient_function(parameters, T, E)

            delta = solve(h, -step_size * g.T)
            if np.any(np.isnan(delta)):
                raise ConvergenceError(
                    "delta contains nan value(s). Convergence halted.")

            parameters += delta

            # Save these as pending result
            hessian = h

            if show_progress:
                print(
                    "Iteration %d: norm_delta = %.5f, seconds_since_start = %.1f"
                    % (i, norm(delta), time.time() - start))

            if norm(delta) < precision:
                converging = False
                completed = True
            i += 1

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        if not completed:
            warnings.warn(
                "Newton-Rhapson failed to converge sufficiently in %d steps." %
                max_steps, ConvergenceWarning)

        return parameters, hessian
예제 #2
0
    def _newton_rhaphson(self, T, E, precision=1e-5):
        from lifelines.utils import _smart_search

        def jacobian_function(parameters, T, E):
            return np.array([[
                _d_lambda_d_lambda_(parameters, T, E),
                _d_rho_d_lambda_(parameters, T, E)
            ],
                             [
                                 _d_rho_d_lambda_(parameters, T, E),
                                 _d_rho_d_rho(parameters, T, E)
                             ]])

        def gradient_function(parameters, T, E):
            return np.array([
                _lambda_gradient(parameters, T, E),
                _rho_gradient(parameters, T, E)
            ])

        # initialize the parameters. This shows dramatic improvements.
        parameters = _smart_search(_negative_log_likelihood, 2, T, E)

        iter = 1
        step_size = 1.
        converging = True

        while converging and iter < 50:
            # Do not override hessian and gradient in case of garbage
            j, g = jacobian_function(parameters, T,
                                     E), gradient_function(parameters, T, E)

            delta = solve(j, -step_size * g.T)
            if np.any(np.isnan(delta)):
                raise ValueError(
                    "delta contains nan value(s). Convergence halted.")

            parameters += delta

            # Save these as pending result
            jacobian = j

            if norm(delta) < precision:
                converging = False
            iter += 1

        self._jacobian = jacobian
        return parameters
예제 #3
0
    def _newton_rhaphson(self, T, E, precision=1e-5):
        from lifelines.utils import _smart_search

        def jacobian_function(parameters, T, E):
            return np.array([
                [_d_lambda_d_lambda_(parameters, T, E), _d_rho_d_lambda_(parameters, T, E)],
                [_d_rho_d_lambda_(parameters, T, E), _d_rho_d_rho(parameters, T, E)]
            ])

        def gradient_function(parameters, T, E):
            return np.array([_lambda_gradient(parameters, T, E), _rho_gradient(parameters, T, E)])

        # initialize the parameters. This shows dramatic improvements.
        parameters = _smart_search(_negative_log_likelihood, 2, T, E)

        iter = 1
        step_size = 1.
        converging = True

        while converging and iter < 50:
            # Do not override hessian and gradient in case of garbage
            j, g = jacobian_function(parameters, T, E), gradient_function(parameters, T, E)

            delta = solve(j, - step_size * g.T)
            if np.any(np.isnan(delta)):
                raise ValueError("delta contains nan value(s). Convergence halted.")

            parameters += delta

            # Save these as pending result
            jacobian = j

            if norm(delta) < precision:
                converging = False
            iter += 1

        self._jacobian = jacobian
        return parameters