예제 #1
0
파일: TDSE.py 프로젝트: cationly/TDSE
def CrankNicolson(A,B,psi):
	for n in range(nt-1):
		#psi[n+1,:] = np.linalg.solve(H,psi[n,:]) # non sparse solver
		b = B*psi[n,:]
		psi[n+1,:] = linalg.spsolve(A,b) # sparse solver

	return psi
예제 #2
0
파일: TDSE.py 프로젝트: cationly/TDSE
def BTCS(A,B,psi):

	for n in range(nt-1):
		#psi[n+1,:] = np.linalg.solve(H,psi[n,:]) # non sparse solver
		psi[n+1,:] = linalg.spsolve(A,psi[n,:]) # sparse solver

	return psi
예제 #3
0
    def _newton_rhaphson(
        self,
        df,
        events,
        start,
        stop,
        weights,
        show_progress=False,
        step_size=None,
        precision=10e-6,
        max_steps=50,
        initial_point=None,
    ):  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
        """
        Newton Rhaphson algorithm for fitting CPH model.

        Parameters
        ----------
        df: DataFrame
        stop_times_events: DataFrame
             meta information about the subjects history
        show_progress: boolean, optional (default: True)
            to show verbose output of convergence
        step_size: float
            > 0 to determine a starting step size in NR algorithm.
        precision: float
            the convergence halts if the norm of delta between
                     successive positions is less than epsilon.

        Returns
        --------
        beta: (1,d) numpy array.
        """
        assert precision <= 1.0, "precision must be less than or equal to 1."

        _, d = df.shape

        # make sure betas are correct size.
        if initial_point is not None:
            beta = initial_point
        else:
            beta = np.zeros((d, ))

        i = 0
        converging = True
        ll, previous_ll = 0, 0
        start_time = time.time()

        step_sizer = StepSizer(step_size)
        step_size = step_sizer.next()

        while converging:
            i += 1

            if self.strata is None:
                h, g, ll = self._get_gradients(df.values, events.values,
                                               start.values, stop.values,
                                               weights.values, beta)
            else:
                g = np.zeros_like(beta)
                h = np.zeros((d, d))
                ll = 0
                for _h, _g, _ll in self._partition_by_strata_and_apply(
                        df, events, start, stop, weights, self._get_gradients,
                        beta):
                    g += _g
                    h += _h
                    ll += _ll

            if i == 1 and np.all(beta == 0):
                # this is a neat optimization, the null partial likelihood
                # is the same as the full partial but evaluated at zero.
                # if the user supplied a non-trivial initial point, we need to delay this.
                self._log_likelihood_null = ll

            if self.penalizer > 0:
                # add the gradient and hessian of the l2 term
                g -= self.penalizer * beta
                h.flat[::d + 1] -= self.penalizer

            try:
                # reusing a piece to make g * inv(h) * g.T faster later
                inv_h_dot_g_T = spsolve(-h, g, sym_pos=True)
            except ValueError as e:
                if "infs or NaNs" in str(e):
                    raise ConvergenceError(
                        """hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                        e,
                    )
                else:
                    # something else?
                    raise e
            except LinAlgError as e:
                raise ConvergenceError(
                    """Convergence halted due to matrix inversion problems. Suspicion is high colinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                    e,
                )

            delta = step_size * inv_h_dot_g_T

            if np.any(np.isnan(delta)):
                raise ConvergenceError(
                    """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
            # Save these as pending result
            hessian, gradient = h, g
            norm_delta = norm(delta)
            newton_decrement = g.dot(inv_h_dot_g_T) / 2

            if show_progress:
                print(
                    "\rIteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
                    % (i, norm_delta, step_size, ll, newton_decrement,
                       time.time() - start_time),
                    end="",
                )

            # convergence criteria
            if norm_delta < precision:
                converging, completed = False, True
            elif previous_ll > 0 and abs(ll -
                                         previous_ll) / (-previous_ll) < 1e-09:
                # this is what R uses by default
                converging, completed = False, True
            elif newton_decrement < 10e-8:
                converging, completed = False, True
            elif i >= max_steps:
                # 50 iterations steps with N-R is a lot.
                # Expected convergence is less than 10 steps
                converging, completed = False, False
            elif step_size <= 0.0001:
                converging, completed = False, False
            elif abs(ll) < 0.0001 and norm_delta > 1.0:
                warnings.warn(
                    "The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/questions/11109/how-to-deal-with-perfect-separation-in-logistic-regression",
                    ConvergenceWarning,
                )
                converging, completed = False, False

            step_size = step_sizer.update(norm_delta).next()

            beta += delta

        self._hessian_ = hessian
        self._score_ = gradient
        self._log_likelihood = ll

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        elif show_progress and not completed:
            print("Convergence failed. See any warning messages.")

        # report to the user problems that we detect.
        if completed and norm_delta > 0.1:
            warnings.warn(
                "Newton-Rhapson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is colinearity or complete separation in the dataset?"
                % norm_delta,
                ConvergenceWarning,
            )
        elif not completed:
            warnings.warn(
                "Newton-Rhapson failed to converge sufficiently in %d steps." %
                max_steps, ConvergenceWarning)

        return beta
예제 #4
0
    def _newton_rhaphson(self,
                         X,
                         T,
                         E,
                         weights=None,
                         initial_beta=None,
                         step_size=None,
                         precision=10e-6,
                         show_progress=True,
                         max_steps=50):
        """
        Newton Rhaphson algorithm for fitting CPH model.

        Note that data is assumed to be sorted on T!

        Parameters:
            X: (n,d) Pandas DataFrame of observations.
            T: (n) Pandas Series representing observed durations.
            E: (n) Pandas Series representing death events.
            weights: (n) an iterable representing weights per observation.
            initial_beta: (1,d) numpy array of initial starting point for
                          NR algorithm. Default 0.
            step_size: float > 0.001 to determine a starting step size in NR algorithm.
            precision: the convergence halts if the norm of delta between
                     successive positions is less than epsilon.
            show_progress: since the fitter is iterative, show convergence
                     diagnostics.
            max_steps: the maximum number of interations of the Newton-Rhaphson algorithm.

        Returns:
            beta: (1,d) numpy array.
        """
        self.path = []
        assert precision <= 1., "precision must be less than or equal to 1."
        n, d = X.shape

        # make sure betas are correct size.
        if initial_beta is not None:
            assert initial_beta.shape == (d, 1)
            beta = initial_beta
        else:
            beta = np.zeros((d, 1))

        step_sizer = StepSizer(step_size)
        step_size = step_sizer.next()

        # Method of choice is just efron right now
        if self.tie_method == 'Efron':
            get_gradients = self._get_efron_values
        else:
            raise NotImplementedError("Only Efron is available.")

        i = 0
        converging = True
        ll, previous_ll = 0, 0
        start = time.time()

        while converging:
            self.path.append(beta.copy())
            i += 1
            if self.strata is None:
                h, g, ll = get_gradients(X.values, beta, T.values, E.values,
                                         weights.values)
            else:
                g = np.zeros_like(beta).T
                h = np.zeros((beta.shape[0], beta.shape[0]))
                ll = 0
                for strata in np.unique(X.index):
                    stratified_X, stratified_T, stratified_E, stratified_W = X.loc[
                        [strata]], T.loc[[strata
                                          ]], E.loc[[strata
                                                     ]], weights.loc[[strata]]
                    _h, _g, _ll = get_gradients(stratified_X.values, beta,
                                                stratified_T.values,
                                                stratified_E.values,
                                                stratified_W.values)
                    g += _g
                    h += _h
                    ll += _ll

            if self.penalizer > 0:
                # add the gradient and hessian of the l2 term
                g -= self.penalizer * beta.T
                h.flat[::d + 1] -= self.penalizer

            # reusing a piece to make g * inv(h) * g.T faster later
            try:
                inv_h_dot_g_T = spsolve(-h, g.T, sym_pos=True)
            except ValueError as e:
                if 'infs or NaNs' in str(e):
                    raise ConvergenceError(
                        """hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
                else:
                    # something else?
                    raise e

            delta = step_size * inv_h_dot_g_T

            if np.any(np.isnan(delta)):
                raise ConvergenceError(
                    """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")

            # Save these as pending result
            hessian, gradient = h, g
            norm_delta = norm(delta)

            # reusing an above piece to make g * inv(h) * g.T faster.
            newton_decrement = g.dot(inv_h_dot_g_T) / 2

            if show_progress:
                print(
                    "Iteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
                    % (i, norm_delta, step_size, ll, newton_decrement,
                       time.time() - start))

            # convergence criteria
            if norm_delta < precision:
                converging, completed = False, True
            elif previous_ll != 0 and abs(ll - previous_ll) / (
                    -previous_ll) < 1e-09:
                # this is what R uses by default
                converging, completed = False, True
            elif newton_decrement < precision:
                converging, completed = False, True
            elif i >= max_steps:
                # 50 iterations steps with N-R is a lot.
                # Expected convergence is ~10 steps
                converging, completed = False, False
            elif step_size <= 0.00001:
                converging, completed = False, False
            elif abs(ll) < 0.0001 and norm_delta > 1.0:
                warnings.warn(
                    "The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ ",
                    ConvergenceWarning)
                converging, completed = False, False

            step_size = step_sizer.update(norm_delta).next()

            beta += delta
            previous_ll = ll

        self._hessian_ = hessian
        self._score_ = gradient
        self._log_likelihood = ll

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        if not completed:
            warnings.warn(
                "Newton-Rhapson failed to converge sufficiently in %d steps." %
                max_steps, ConvergenceWarning)

        return beta
예제 #5
0
Out[23]: 
<1000x1000 sparse matrix of type '<class 'numpy.float64'>'
	with 1199 stored elements in LInked List format>
# Using sparse linear algebra to construct very large matrix
from scipy.sparse import linalg

A.tocsr()
Out[25]: 
<1000x1000 sparse matrix of type '<class 'numpy.float64'>'
	with 1199 stored elements in Compressed Sparse Row format>

A = A.tocsr()

b = np.random.rand(1000)

linalg.spsolve(A, b)
Out[28]: 
array([ 1.02915228e+03, -7.85583876e+02,  7.61953672e-01,  3.45776825e-01,
        3.83563609e-01,  1.28994524e+01,  6.50914349e-01,  2.19035486e+00,
        1.13218127e+00,  1.51082314e-01,  4.62628171e-01,  5.83295740e-01,
        3.91205168e-02,  1.88571795e-01,  1.86687801e+00,  2.65111610e+00,
        1.68807545e-01,  5.73963129e-01,  3.52168601e-01,  1.52507698e+00,
        8.04053593e-01,  1.20538926e+00,  7.07025229e-01,  2.80672030e-01,
        1.57497657e+00,  6.56527222e-01,  1.00865551e+00,  3.54420461e-01,
        7.27046400e-01,  3.51571169e+00,  5.54786769e-01,  2.32975265e+00,
        7.38424849e-01,  7.83093560e+00,  2.38245530e+00,  3.24374834e-01,
        1.38672018e-01,  1.43731479e+00,  9.25715643e-01,  2.45526256e+00,
        1.14452790e+00,  1.59577685e+00,  7.17544015e-01,  2.43724628e+00,
        3.06250473e+00,  7.22986631e-01,  8.60082386e-01,  1.29276469e+00,
        1.94882135e-01,  3.05958909e-01,  1.09775666e+00,  1.40682936e+00,
        3.44682165e+00,  3.68727951e+00,  3.09483754e+00,  1.73739779e+00,
def ALS(train_set,
        lambda_val=0.1,
        alpha=40,
        iterations=10,
        rank_size=20,
        seed=0):

    ### 신뢰행렬 정의
    conf = (alpha * train_set)

    num_user = conf.shape[0]
    num_item = conf.shape[1]

    ## seed 값을 정해서 Random한 X,Y feature 벡터로 시작하기 X = User_Vector, Y = Item_Vector
    rstate = np.random.RandomState(seed)

    ## normal distribution 안에 있는 샘플들을 추출시켜줌
    X = sparse.csr_matrix(rstate.normal(size=(num_user, rank_size)))
    Y = sparse.csr_matrix(rstate.normal(size=(num_item, rank_size)))

    X_eye = sparse.eye(num_user)
    Y_eye = sparse.eye(num_item)
    lambda_eye = lambda_val * sparse.eye(rank_size)

    ## iterations start
    for iter in range(iterations):
        yTy = Y.T.dot(Y)
        xTx = X.T.dot(X)
        ## user vec
        for u in range(num_user):
            conf_samp = conf[u, :].toarray()
            pref = conf_samp.copy()

            ## 메모리 에러 ㅠㅠ
            pref[pref == 1] = 0
            pref[pref == 2] = 0
            pref[pref == 3] = 0
            pref[pref == 4] = 0
            pref[pref == 5] = 0
            pref[pref == 6] = 0
            pref[pref == 7] = 0
            pref[pref == 8] = 0
            pref[pref == 9] = 0
            pref[pref >= 10] = 1

            ## diag와 X, Y 정의하고 식 세우기
            CuI = sparse.diags(conf_samp, [0])
            yTCuIY = Y.T.dot(CuI).dot(Y)
            yTCupu = Y.T.dot(CuI + Y_eye).dot(pref.T)

            X[u] = spsolve(yTy + yTCuIY + lambda_eye, yTCupu)

        ## item vec
        for i in range(num_item):
            conf_samp = conf[:, i].T.toarray()
            pref = conf_samp.copy()
            pref[pref == 1] = 0
            pref[pref == 2] = 0
            pref[pref == 3] = 0
            pref[pref == 4] = 0
            pref[pref == 5] = 0
            pref[pref == 6] = 0
            pref[pref == 7] = 0
            pref[pref == 8] = 0
            pref[pref == 9] = 0
            pref[pref >= 10] = 1

            CiI = sparse.diags(conf_samp, [0])
            xTCiIX = X.T.dot(CiI).dot(X)
            xTCiPi = X.T.dot(CiI + X_eye).dot(pref.T)
            Y[i] = spsolve(xTx + xTCiIX + lambda_eye, xTCiPi)

    return X, Y.T
    def _newton_rhaphson(
        self,
        df,
        events,
        start,
        stop,
        weights,
        show_progress=False,
        step_size=None,
        precision=10e-6,
        max_steps=50,
        initial_point=None,
    ):  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
        """
        Newton Rhaphson algorithm for fitting CPH model.

        Parameters
        ----------
        df: DataFrame
        stop_times_events: DataFrame
             meta information about the subjects history
        show_progress: boolean, optional (default: True)
            to show verbose output of convergence
        step_size: float
            > 0 to determine a starting step size in NR algorithm.
        precision: float
            the convergence halts if the norm of delta between
                     successive positions is less than epsilon.

        Returns
        --------
        beta: (1,d) numpy array.
        """
        assert precision <= 1.0, "precision must be less than or equal to 1."

        _, d = df.shape

        # make sure betas are correct size.
        if initial_point is not None:
            beta = initial_point
        else:
            beta = np.zeros((d,))

        i = 0
        converging = True
        ll, previous_ll = 0, 0
        start_time = time.time()

        step_sizer = StepSizer(step_size)
        step_size = step_sizer.next()

        while converging:
            i += 1

            if self.strata is None:
                h, g, ll = self._get_gradients(
                    df.values, events.values, start.values, stop.values, weights.values, beta
                )
            else:
                g = np.zeros_like(beta)
                h = np.zeros((d, d))
                ll = 0
                for _h, _g, _ll in self._partition_by_strata_and_apply(
                    df, events, start, stop, weights, self._get_gradients, beta
                ):
                    g += _g
                    h += _h
                    ll += _ll

            if i == 1 and np.all(beta == 0):
                # this is a neat optimization, the null partial likelihood
                # is the same as the full partial but evaluated at zero.
                # if the user supplied a non-trivial initial point, we need to delay this.
                self._log_likelihood_null = ll

            if self.penalizer > 0:
                # add the gradient and hessian of the l2 term
                g -= self.penalizer * beta
                h.flat[:: d + 1] -= self.penalizer

            try:
                # reusing a piece to make g * inv(h) * g.T faster later
                inv_h_dot_g_T = spsolve(-h, g, sym_pos=True)
            except ValueError as e:
                if "infs or NaNs" in str(e):
                    raise ConvergenceError(
                        """hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                        e,
                    )
                else:
                    # something else?
                    raise e
            except LinAlgError as e:
                raise ConvergenceError(
                    """Convergence halted due to matrix inversion problems. Suspicion is high colinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
                    e,
                )

            delta = step_size * inv_h_dot_g_T

            if np.any(np.isnan(delta)):
                raise ConvergenceError(
                    """delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
                )
            # Save these as pending result
            hessian, gradient = h, g
            norm_delta = norm(delta)
            newton_decrement = g.dot(inv_h_dot_g_T) / 2

            if show_progress:
                print(
                    "\rIteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
                    % (i, norm_delta, step_size, ll, newton_decrement, time.time() - start_time),
                    end=""
                )

            # convergence criteria
            if norm_delta < precision:
                converging, completed = False, True
            elif previous_ll > 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
                # this is what R uses by default
                converging, completed = False, True
            elif newton_decrement < 10e-8:
                converging, completed = False, True
            elif i >= max_steps:
                # 50 iterations steps with N-R is a lot.
                # Expected convergence is less than 10 steps
                converging, completed = False, False
            elif step_size <= 0.0001:
                converging, completed = False, False
            elif abs(ll) < 0.0001 and norm_delta > 1.0:
                warnings.warn(
                    "The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/questions/11109/how-to-deal-with-perfect-separation-in-logistic-regression",
                    ConvergenceWarning,
                )
                converging, completed = False, False

            step_size = step_sizer.update(norm_delta).next()

            beta += delta

        self._hessian_ = hessian
        self._score_ = gradient
        self._log_likelihood = ll

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        elif show_progress and not completed:
            print("Convergence failed. See any warning messages.")

        # report to the user problems that we detect.
        if completed and norm_delta > 0.1:
            warnings.warn(
                "Newton-Rhapson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is colinearity or complete separation in the dataset?"
                % norm_delta,
                ConvergenceWarning,
            )
        elif not completed:
            warnings.warn("Newton-Rhapson failed to converge sufficiently in %d steps." % max_steps, ConvergenceWarning)

        return beta
예제 #8
0
    def _newton_rhaphson(self, df, stop_times_events, weights, show_progress=False, step_size=None, precision=10e-6,
                         max_steps=50):
        """
        Newton Rhaphson algorithm for fitting CPH model.

        Note that data is assumed to be sorted on T!

        Parameters:
            df: (n, d) Pandas DataFrame of observations
            stop_times_events: (n, d) Pandas DataFrame of meta information about the subjects history
            show_progress: True to show verbous output of convergence
            step_size: float > 0 to determine a starting step size in NR algorithm.
            precision: the convergence halts if the norm of delta between
                     successive positions is less than epsilon.

        Returns:
            beta: (1,d) numpy array.
        """
        assert precision <= 1., "precision must be less than or equal to 1."

        n, d = df.shape

        # make sure betas are correct size.
        beta = np.zeros((d, 1))

        i = 0
        converging = True
        ll, previous_ll = 0, 0
        start = time.time()

        step_sizer = StepSizer(step_size)
        step_size = step_sizer.next()

        while converging:
            i += 1
            h, g, ll = self._get_gradients(df, stop_times_events, weights, beta)

            if self.penalizer > 0:
                # add the gradient and hessian of the l2 term
                g -= self.penalizer * beta.T
                h.flat[::d + 1] -= self.penalizer

            try:
                # reusing a piece to make g * inv(h) * g.T faster later
                inv_h_dot_g_T = spsolve(-h, g.T, sym_pos=True)
            except ValueError as e:
                if 'infs or NaNs' in str(e):
                    raise ConvergenceError("""hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
                else:
                    # something else?
                    raise e

            delta = step_size * inv_h_dot_g_T

            if np.any(np.isnan(delta)):
                raise ConvergenceError("""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""")
            # Save these as pending result
            hessian, gradient = h, g
            norm_delta = norm(delta)
            newton_decrement = g.dot(inv_h_dot_g_T)/2

            if show_progress:
                print("Iteration %d: norm_delta = %.5f, step_size = %.5f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f" % (i, norm_delta, step_size, ll, newton_decrement, time.time() - start))

            # convergence criteria
            if norm_delta < precision:
                converging, completed = False, True
            elif previous_ll > 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
                # this is what R uses by default
                converging, completed = False, True
            elif newton_decrement < 10e-8:
                converging, completed = False, True
            elif i >= max_steps:
                # 50 iterations steps with N-R is a lot.
                # Expected convergence is less than 10 steps
                converging, completed = False, False
            elif step_size <= 0.0001:
                converging, completed = False, False
            elif abs(ll) < 0.0001 and norm_delta > 1.0:
                warnings.warn("The log-likelihood is getting suspciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-complete-or-quasi-complete-separation-in-logisticprobit-regression-and-how-do-we-deal-with-them/ ", ConvergenceWarning)
                converging, completed = False, False

            step_size = step_sizer.update(norm_delta).next()

            beta += delta

        self._hessian_ = hessian
        self._score_ = gradient
        self._log_likelihood = ll

        if show_progress and completed:
            print("Convergence completed after %d iterations." % (i))
        if not completed:
            warnings.warn("Newton-Rhapson failed to converge sufficiently in %d steps." % max_steps, ConvergenceWarning)

        return beta
예제 #9
0
#  Reset the random seed
np.random.seed(2)

#  Define the stock price initially, the number of days, the interest rate and volatility
N_days = 5

S0 = 100.0
r = 0.01 / 252
dt = 1.0
sigma = 0.3 / np.sqrt(252)

epsilon = np.random.normal(size=N_days)

#  For the diags command, we need to define the entries on the diagonals.
Lambda = r * dt + np.sqrt(dt) * sigma * epsilon
ones = -np.ones(N_days + 1)
ones[0] = 1
L = Lambda + 1

#  Build the matrix
M = diags([L, ones], [-1, 0], format='csc')
Y = np.zeros(N_days + 1)
Y[0] = S0

#  Solve the system
S = linalg.spsolve(M, Y)
plt.plot(S)
plt.grid(True)
plt.xlabel('Days')
plt.ylabel('Stock Price')
예제 #10
0
print(s)

# ## Sparse Linear Algebra
# SciPy has some routines for computing with sparse and potentially very large matrices.
# The necessary tools are in the submodule scipy.sparse.
#
# We make one example on how to construct a large matrix:
from scipy import sparse

# Row-based linked list sparse matrix
A = sparse.lil_matrix((1000, 1000))
print(A)

A[0, :100] = np.random.rand(100)
A[1, 100:200] = A[0, :100]
print(A.setdiag(np.random.rand(1000)))
print(A)

# **Linear Algebra for Sparse Matrices**
from scipy.sparse import linalg

# Convert this matrix to Compressed Sparse Row format.
print(A.tocsr())
A = A.tocsr()
b = np.random.rand(1000)
print(linalg.spsolve(A, b))

# There is a lot more that SciPy is capable of, such as Fourier Transforms, Bessel Functions, etc...
#
# You can reference the Documentation for more details!
예제 #11
0
    name = (f"SolveLp{e}.txt")

    fid = open(name,"w")
    
    for i in Ns:

        print(f"i = {i}")    
    
        t1 = perf_counter()
        
        A = mlp(i)
        B = np.ones(i)
        
        t2 = perf_counter()
        
        C = spsolve(A,B)
        
        t3 = perf_counter()
        

        ens = t2 - t1
        sol = t3 - t2
        

        Te.append(ens) 
        Ts.append(sol)
    
        fid.write(f"{i} {ens} {sol}\n")
    

        fid.flush()
예제 #12
0
import numpy as np
#Solve two equations
arr4 = np.array(np.arange(4, 8)).reshape(2, 2)
print(arr4)
arr5 = np.array(np.arange(0, 2)).reshape(2, 1)
print(arr5)
print(linalg.solve(arr4, arr5))
from scipy import sparse
print(sparse.lil_matrix((50, 50)))
#To do linear algebra or linear matrix calculation by using sparse function and linalg function
from scipy.sparse import linalg
import numpy as np
arr1 = np.array(np.arange(4, 13)).reshape(3, 3)
print(arr1)
arr2 = np.array(np.arange(0, 9)).reshape(3, 3)
print(arr2)
print(linalg.spsolve(arr1, arr2))
#import numpy as np
#x=np.array(np.arange(0,4))
#print(np.exp(x))
#print(np.log(x))
#print(np.std(x))
#print(x.sum())
#To find the integration of some values using python
import scipy.integrate  #SINGLE INTEGRAL
#a=lambda x:x**2
#print(scipy.integrate.quad(a,2,4))
x = int(input("enter any number"))
y = int(input("enter any number"))
b = lambda z: x**(y**(1 / 2))
print(scipy.integrate.quad(b, 3, 6))