예제 #1
0
    def updateParams(self,
                     initial_dists,
                     transition_dists,
                     emission_dists,
                     group_graphs=None,
                     compute_marginal=True):

        assert isinstance(initial_dists,
                          dict), 'Make a dict that maps groups to parameters'
        assert isinstance(transition_dists,
                          dict), 'Make a dict that maps groups to parameters'
        assert isinstance(emission_dists,
                          dict), 'Make a dict that maps groups to parameters'

        # Ignore warning for when an entry of a dist is 0
        with np.errstate(divide='ignore', invalid='ignore'):
            log_initial_dists = {}
            for group, dist in initial_dists.items():
                log_initial_dists[group] = np.log(dist)

            log_transition_dists = {}
            for group, dists in transition_dists.items():
                log_transition_dists[group] = [np.log(dist) for dist in dists]

            log_emission_dists = {}
            for group, dist in emission_dists.items():
                log_emission_dists[group] = np.log(dist)

        self.updateNatParams(log_initial_dists,
                             log_transition_dists,
                             log_emission_dists,
                             group_graphs=group_graphs,
                             compute_marginal=compute_marginal)
예제 #2
0
    def fit(self, X, x, c=None, n=None, t=None, baseline='Fleming-Harrington', init=[]):
        x, c, n, t = surpyval.xcnt_handler(x, c, n, t, group_and_sort=False)

        if init == []:
            init = self.phi_init(X)
        else:
            init = np.array(init)

        if baseline == 'Breslow':
            fun  = lambda params : self.neg_ll_cox(X, x, c, n, *params)
        else:
            self.baseline = nonparametric_dists[baseline].fit(x, c, n, t)
            fun  = lambda params : self.neg_ll(X, x, c, n, *params)
        
        jac = jacobian(fun)
        hess = hessian(fun)

        with np.errstate(all='ignore'):
            res = minimize(fun, init, jac=jac)
            res = minimize(fun, res.x, jac=jac, method='TNC', tol=1e-20)

        params = res.x
        se = np.sqrt(np.diag(inv(hess(res.x))))

        return {'params' : params, 'exp(param)' : np.exp(params),'se' : se}
예제 #3
0
def expected_improvement(x, gaussian_process, evaluated_loss):
    """ expected_improvement

    Expected improvement acquisition function.

    Arguments:
    ----------
        x: array-like, shape = [n_samples, n_hyperparams]
            The point for which the expected improvement needs to be computed.
        gaussian_process: GaussianProcessRegressor object.
            Gaussian process trained on previously evaluated hyperparameters.
        evaluated_loss: Numpy array.
            Numpy array that contains the values off the loss function for the previously
            evaluated hyperparameters.
    """

    x_to_predict = x.reshape(1, -1)

    mu, sigma = gaussian_process.predict(x_to_predict, return_std=True)

    loss_optimum = np.min(evaluated_loss)

    # In case sigma equals zero
    with np.errstate(divide='ignore'):
        Z = (mu - loss_optimum) / sigma
        expected_improvement = (
            mu - loss_optimum) * norm.cdf(Z) + sigma * norm.pdf(Z)
        expected_improvement[sigma == 0.0] == 0.0

    return expected_improvement
예제 #4
0
def HMMModelTest():

    with np.errstate(under='ignore',
                     divide='raise',
                     over='raise',
                     invalid='raise'):

        T = 10
        D_latent = 5
        D_obs = 4
        meas = 2
        size = 3

        alpha_0 = np.random.random(D_latent) + 1
        alpha = np.random.random((D_latent, D_latent)) + 1
        L = np.random.random((D_latent, D_obs)) + 1

        params = {'alpha_0': alpha_0, 'alpha': alpha, 'L': L}

        hmm = HMMModel(**params)

        _, ys = HMMModel.generate(T=T,
                                  latentSize=D_latent,
                                  obsSize=D_obs,
                                  measurements=meas,
                                  size=size)

        hmm.fit(ys=ys,
                method='gibbs',
                nIters=500,
                burnIn=200,
                skip=2,
                verbose=True)
        marginal = hmm.state.ilog_marginal(ys)
        print('\nParams')
        for p in hmm.state.params:
            print(np.round(p, decimals=3))
            print()
        print('MARGNIAL', marginal)

        hmm.fit(ys=ys,
                method='EM',
                nIters=1000,
                monitorMarginal=10,
                verbose=False)
        marginal = hmm.state.ilog_marginal(ys)
        print('\nParams')
        for p in hmm.state.params:
            print(np.round(p, decimals=3))
            print()
        print('MARGNIAL', marginal)

        hmm.fit(ys=ys, method='cavi', maxIters=1000, verbose=False)
        elbo = hmm.state.iELBO(ys)
        print('\nPrior mean field params')
        for p in hmm.state.prior.mf_params:
            print(np.round(p, decimals=3))
            print()
        print('ELBO', elbo)
예제 #5
0
def testHMMBasic():
    with np.errstate(under='ignore',
                     divide='raise',
                     over='raise',
                     invalid='raise'):
        T = 40
        D_latent = 3
        D_obs = 2
        meas = 4
        size = 5

        initialDist = Dirichlet.generate(D=D_latent)
        transDist = TransitionDirichletPrior.generate(D_in=D_latent,
                                                      D_out=D_latent)
        emissionDist = TransitionDirichletPrior.generate(D_in=D_latent,
                                                         D_out=D_obs)

        state = HMMState(initialDist=initialDist,
                         transDist=transDist,
                         emissionDist=emissionDist)

        _, ys = HMMState.generate(measurements=meas,
                                  T=T,
                                  D_latent=D_latent,
                                  D_obs=D_obs,
                                  size=size)

        kS = int(np.random.random() * T / 10) + 2
        knownStates = np.random.choice(T, kS)
        knownStates = np.vstack(
            (knownStates, np.random.choice(D_latent,
                                           knownStates.shape[0]))).reshape(
                                               (2, -1)).T

        # Sort and remove duplicates
        knownStates = np.array(sorted(knownStates, key=lambda x: x[0]))
        knownStates = knownStates[1:][~(np.diff(knownStates[:, 0]) == 0)]

        xNoCond, ysNoCond = state.isample(T=T, measurements=meas, size=size)
        xForward, yForward = state.isample(ys=ys,
                                           knownLatentStates=knownStates)
        xBackward, yBackward = state.isample(ys=ys, forwardFilter=False)

        state.ilog_likelihood((xNoCond, ysNoCond))
        state.ilog_likelihood((xForward, yForward))
        state.ilog_likelihood((xBackward, yBackward), forwardFilter=False)

        state.ilog_likelihood((xNoCond, ysNoCond), conditionOnY=True)
        state.ilog_likelihood((xForward, yForward),
                              knownLatentStates=knownStates,
                              conditionOnY=True)
        state.ilog_likelihood((xBackward, yBackward),
                              forwardFilter=False,
                              conditionOnY=True)

        print('Done with basic HMM state test')
def stateAndModelTests():
    with np.errstate(all='raise'):

        D_latent = 5
        D_obs = 7

        T = 15
        M = 4
        HMMParams = {
            'alpha_0': np.random.random(D_latent) + 1,
            'alpha_pi': np.random.random((D_latent, D_latent)) + 1,
            'alpha_L': np.random.random((D_latent, D_obs)) + 1
        }

        LDSParams = {
            'mu_0': np.random.random(D_latent),
            'kappa_0': np.random.random() * D_latent,
            'psi_0': InverseWishart.generate(D=D_latent),
            'nu_0': D_latent,
            'M_trans': np.random.random((D_latent, D_latent)) * 0.01,
            'V_trans': InverseWishart.generate(D=D_latent),
            'psi_trans': InverseWishart.generate(D=D_latent),
            'nu_trans': D_latent,
            'M_emiss': np.random.random((D_obs, D_latent)) * 0.01,
            'V_emiss': InverseWishart.generate(D=D_latent),
            'psi_emiss': InverseWishart.generate(D=D_obs),
            'nu_emiss': D_obs
        }

        u = np.random.random((T, D_latent))
        nBad = int(np.random.random() * T)
        badMask = np.random.choice(T, nBad)
        u[badMask] = np.nan
        u = None

        hmmPrior = HMMDirichletPrior(**HMMParams)
        hmmState = HMMState(prior=hmmPrior)

        ldsPrior = LDSMNIWPrior(**LDSParams)
        ldsState = LDSState(prior=ldsPrior)

        testsForDistWithoutPrior(hmmPrior)
        testsForDistWithoutPrior(ldsPrior)
        testForDistWithPrior(hmmState, T=T)
        testForDistWithPrior(hmmState, T=T, fromStats=True)
        testForDistWithPrior(ldsState,
                             T=T,
                             measurements=M,
                             u=u,
                             stabilize=True)
        testForDistWithPrior(ldsState,
                             T=T,
                             measurements=M,
                             u=u,
                             stabilize=True,
                             fromStats=True)
예제 #7
0
def log_sum_exp_loo(x):
    '''
    Log sum exp function with the ability to compute automatic gradients
    - x (numpy array)
    '''

    x_max = anp.max(x, axis=0)
    with anp.errstate(divide='ignore'):
        out = anp.log(anp.sum(anp.exp(x - x_max), axis=0))
    out += x_max

    return out
예제 #8
0
    def _compute_sandwich_errors(self, T, E, weights, X):
        with np.errstate(all="ignore"):
            # convergence will fail catastrophically elsewhere.

            ll_gradient = grad(self._negative_log_likelihood)
            params = self.params_.values
            n_params = params.shape[0]
            J = np.zeros((n_params, n_params))

            for t, e, w, x in zip(T, E, weights, X):
                score_vector = ll_gradient(params, t, e, w, x)
                J += np.outer(score_vector, score_vector)

            return self.variance_matrix_ @ J @ self.variance_matrix_
예제 #9
0
def LDSModelTest():
    with np.errstate(all='raise'), scipy.special.errstate(all='raise'):
        T = 10
        D_latent = 2
        D_obs = 3
        meas = 1
        size = 1

        lds = LDSModel(**LDSModel._genericParams(D_latent, D_obs))

        u = np.random.random((T, D_latent))
        nBad = int(np.random.random() * T)
        badMask = np.random.choice(T, nBad)
        u[badMask] = np.nan
        u = None

        (_, ys), tru = LDSModel.generate(T=T,
                                         latentSize=D_latent,
                                         obsSize=D_obs,
                                         measurements=meas,
                                         size=size,
                                         stabilize=True,
                                         returnTrueParams=True)

        # Have abosultely no idea whats wrong with gibbs sampling here....
        # Going to move on to EM and CAVI and see if I can find the bug
        # lds.fit( ys=ys,u=u, method='gibbs', nIters=500, burnIn=200, skip=2, verbose=True )
        # marginal = lds.state.ilog_marginal( ys )
        # print( '\nParams' )
        # for p in lds.state.params:
        #     print( np.round( p, decimals=3 ) )
        #     print()
        # print( 'MARGNIAL', marginal )

        # f**k this shit
        # lds.fit( ys=ys, u=u, method='EM', nIters=100000, monitorMarginal=10, verbose=False )
        # marginal = lds.state.ilog_marginal( ys )
        # print( '\nParams' )
        # for p in lds.state.params:
        #     print( np.round( p, decimals=3 ) )
        #     print()
        # print( 'MARGNIAL', marginal )

        lds.fit(ys=ys, u=u, method='cavi', maxIters=1000, verbose=False)
        elbo = lds.state.iELBO(ys)
        print('\nPrior mean field params')
        for p in lds.state.prior.mf_params:
            print(np.round(p, decimals=3))
            print()
        print('ELBO', elbo)
예제 #10
0
def testLDSSampleStats():

    with np.errstate(all='raise'), scipy.special.errstate(all='raise'):

        T = 40
        D_latent = 3
        D_obs = 2
        meas = 4
        size = 5

        A, sigma = MatrixNormalInverseWishart.generate(D_in=D_latent,
                                                       D_out=D_latent)
        C, R = MatrixNormalInverseWishart.generate(D_in=D_latent, D_out=D_obs)
        mu0, sigma0 = NormalInverseWishart.generate(D=D_latent)

        state = LDSState(A=A, sigma=sigma, C=C, R=R, mu0=mu0, sigma0=sigma0)

        u = np.random.random((T, D_latent))
        nBad = int(np.random.random() * T)
        badMask = np.random.choice(T, nBad)
        u[badMask] = np.nan

        _, ys = LDSState.generate(measurements=meas,
                                  T=T,
                                  D_latent=D_latent,
                                  D_obs=D_obs,
                                  size=size,
                                  stabilize=True)

        xNoCond, ysNoCond = state.isample(u=u,
                                          T=T,
                                          measurements=meas,
                                          size=size,
                                          stabilize=True)
        xForward, yForward = state.isample(u=u, ys=ys)
        xBackward, yBackward = state.isample(u=u, ys=ys, forwardFilter=False)

        stats1 = state.isample(u=u,
                               T=T,
                               measurements=meas,
                               size=size,
                               stabilize=True,
                               returnStats=True)
        stats2 = state.isample(u=u, ys=ys, returnStats=True)
        stats3 = state.isample(u=u,
                               ys=ys,
                               forwardFilter=False,
                               returnStats=True)

        print('Done with basic LDS state test')
예제 #11
0
def testLDSMNIWPriorBasic():

    with np.errstate( all='raise' ), scipy.special.errstate( all='raise' ):
        T = 10
        D_latent = 7
        D_obs = 3
        D = 4
        meas = 4
        size = 5

        LDSParams = {
            'mu_0': np.random.random( D_latent ),
            'kappa_0': np.random.random() * D_latent,
            'psi_0': InverseWishart.generate( D=D_latent ),
            'nu_0': D_latent,

            'M_trans': np.random.random( ( D_latent, D_latent ) ),
            'V_trans': InverseWishart.generate( D=D_latent ),
            'psi_trans': InverseWishart.generate( D=D_latent ),
            'nu_trans': D_latent,

            'M_emiss': np.random.random( ( D_obs, D_latent ) ),
            'V_emiss': InverseWishart.generate( D=D_latent ),
            'psi_emiss': InverseWishart.generate( D=D_obs ),
            'nu_emiss': D_obs
        }

        prior = LDSMNIWPrior( **LDSParams )
        state = LDSState( prior=prior )

        u = np.random.random( ( T, D_latent ) )
        nBad = int( np.random.random() * T )
        badMask = np.random.choice( T, nBad )
        u[ badMask ] = np.nan

        _, ys = LDSState.generate( measurements=meas, T=T, D_latent=D_latent, D_obs=D_obs, size=size, stabilize=True )

        xNoCond  , ysNoCond  = state.isample( u=u, T=T, measurements=meas, size=size, stabilize=True )
        xForward , yForward  = state.isample( u=u, ys=ys )
        xBackward, yBackward = state.isample( u=u, ys=ys, forwardFilter=False )

        state.ilog_likelihood( ( xNoCond, ysNoCond ), u=u )
        state.ilog_likelihood( ( xForward, yForward ), u=u, forwardFilter=False )
        state.ilog_likelihood( ( xBackward, yBackward ), u=u )

        state.ilog_likelihood( ( xNoCond, ysNoCond ), u=u, conditionOnY=True )
        state.ilog_likelihood( ( xForward, yForward ), u=u, forwardFilter=False, conditionOnY=True )
        state.ilog_likelihood( ( xBackward, yBackward ), u=u, conditionOnY=True )

        print( 'Done with basic LDS prior test' )
예제 #12
0
    def _compute_likelihood_ratio_test(self):
        """
        This function computes the likelihood ratio test for the model. We
        compare the existing model (with all the covariates) to the trivial model
        of no covariates.

        """
        from lifelines.statistics import chisq_test

        ll_null = self._ll_null
        ll_alt = self._log_likelihood

        test_stat = 2 * ll_alt - 2 * ll_null
        degrees_freedom = self.params_.shape[0] - 2  # delta in number of parameters between models
        p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
        with np.errstate(invalid="ignore", divide="ignore"):
            return test_stat, degrees_freedom, -np.log2(p_value)
예제 #13
0
def logsumexp(a, axis=None, keepdims=False):
    """Modified from scipy :
    Compute the log of the sum of exponentials of input elements.
    Parameters
    ----------
    a : array_like
        Input array.
    axis : None or int or tuple of ints, optional
        Axis or axes over which the sum is taken. By default `axis` is None,
        and all elements are summed.
        .. versionadded:: 0.11.0
    keepdims : bool, optional
        If this is set to True, the axes which are reduced are left in the
        result as dimensions with size one. With this option, the result
        will broadcast correctly against the original array.
        .. versionadded:: 0.15.0
    Returns
    -------
    res : ndarray
        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
        is returned.
    sgn : ndarray
        If return_sign is True, this will be an array of floating-point
        numbers matching res and +1, 0, or -1 depending on the sign
        of the result. If False, only one result is returned.
    """

    a_max = np.amax(a, axis=axis, keepdims=True)

    # Cutting the max if infinite
    a_max = np.where(~np.isfinite(a_max), 0, a_max)
    assert np.sum(~np.isfinite(a_max)) == 0

    tmp = np.exp(a - a_max)

    # suppress warnings about log of zero
    with np.errstate(divide='ignore'):
        s = np.sum(tmp, axis=axis, keepdims=keepdims)
        out = np.log(s)

    if not keepdims:
        a_max = np.squeeze(a_max, axis=axis)
    out += a_max

    return out
예제 #14
0
    def _compute_likelihood_ratio_test(self):
        """
        This function computes the likelihood ratio test for the Weibull model. We
        compare the existing model (with all the covariates) to the trivial model
        of no covariates.

        """
        ll_null = WeibullFitter().fit(self.durations,
                                      self.event_observed)._log_likelihood
        ll_alt = self._log_likelihood

        test_stat = 2 * ll_alt - 2 * ll_null
        degrees_freedom = self.params_.shape[
            0] - 2  # diff in number of parameters between models
        p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
        with np.errstate(invalid="ignore", divide="ignore"):
            return test_stat, degrees_freedom, -np.log2(p_value)
예제 #15
0
def _build_errors_df(name_errors, label):
    """Helper to build errors DataFrame."""
    series = []
    percentiles = np.linspace(0, 100, 21)
    index = percentiles / 100
    for name, errors in name_errors:
        series.append(
            pd.Series(np.nanpercentile(errors, q=percentiles),
                      index=index,
                      name=name))
    df = pd.concat(series, axis=1)
    df.columns.name = 'derivative'
    df.index.name = 'quantile'
    df = df.stack().rename('error').reset_index()
    with np.errstate(divide='ignore'):
        df['log(error)'] = np.log(df['error'])
    if label is not None:
        df['label'] = label
    return df
예제 #16
0
    def summary(self):
        """Summary statistics describing the fit.

        Returns
        -------
        df : DataFrame
            Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
        ci = 1 - self.alpha
        with np.errstate(invalid="ignore", divide="ignore"):
            df = pd.DataFrame(index=self.params_.index)
            df["coef"] = self.params_
            df["exp(coef)"] = np.exp(self.params_)
            df["se(coef)"] = self.standard_errors_
            df["z"] = self._compute_z_values()
            df["p"] = self._compute_p_values()
            df["-log2(p)"] = -np.log2(df["p"])
            df["lower %g" % ci] = self.confidence_intervals_["lower-bound"]
            df["upper %g" % ci] = self.confidence_intervals_["upper-bound"]
            return df
예제 #17
0
파일: numeric.py 프로젝트: Yusheng-cai/wham
def alogsumexp(a, b=None, axis=None, keepdims=False):
    """
    Performs logsumexp using the numpy from autograd
    np.log(np.sum(a*np.exp(b)))

    Args:
        a(np.ndarray): The matrix/vector to be exponentiated  (shape (N,..))
        b(np.ndarray): The number at which to multiply exp(a) (shape (N,)) (default None)
        axis(int): the axis at which to sum over (defaul None)
        keepdims(bool): whether to keep the result as the same shape (default False)

    Return:
        a matrix that is the logsumexp result of a & b
    """
    if b is not None:
        if nup.any(b == 0):
            a = a + 0.  # promote to at least float
            a[b == 0] = -nup.inf

    # find maximum of a along the axis provided
    a_max = nup.amax(a, axis=axis, keepdims=True)

    if b is not None:
        b = nup.asarray(b)
        tmp = b * nup.exp(a - a_max)
    else:
        tmp = nup.exp(a - a_max)

    #suppress warnings about log of zero
    with nup.errstate(divide='ignore'):
        s = nup.sum(tmp, axis=axis, keepdims=keepdims)

    out = nup.log(s)

    if not keepdims:
        a_max = nup.squeeze(a_max, axis=axis)

    out += a_max

    return out
예제 #18
0
    def transitionProb(self, child):
        parents, parent_order = self.getParents(child, get_order=True)
        ndim = len(parents) + 1
        pi = np.copy(self.pis[ndim])

        # If we know the latent state for child, then ensure that we
        # transition there.  Also make sure we're only using the possible
        # parent latent states!!!!
        modified = False
        for parent, order in zip(parents, parent_order):
            if (int(parent) in self.possible_latent_states):
                parent_states = self.possible_latent_states[int(parent)]
                impossible_parent_axes = np.setdiff1d(
                    np.arange(pi.shape[order]), parent_states)
                index = [slice(0, s) for s in pi.shape]
                index[order] = impossible_parent_axes
                pi[tuple(index)] = np.NINF
                modified = True

        if (int(child) in self.possible_latent_states):
            child_states = self.possible_latent_states[int(child)]
            impossible_child_axes = np.setdiff1d(np.arange(pi.shape[-1]),
                                                 child_states)
            pi[..., impossible_child_axes] = np.NINF
            modified = True

        if (modified == True):
            with np.errstate(invalid='ignore'):
                pi[..., :] -= logsumexp(pi, axis=-1)[..., None]

        # In case entire rows summed to -inf
        pi[np.isnan(pi)] = np.NINF

        # Reshape pi's axes to match parent order
        assert len(parents) + 1 == pi.ndim
        assert parent_order.shape[0] == parents.shape[0]
        pi = np.moveaxis(pi, np.arange(ndim),
                         np.hstack((parent_order, ndim - 1)))
        return pi
예제 #19
0
    def R_cb(self, t, cb=0.05):
        def ssf(params):
            params = np.reshape(params, (self.m, self.dist.k + 1))
            F = np.zeros_like(t)
            for i in range(self.m):
                F = F + params[i, 0] * self.dist.ff(t, *params[i, 1::])
            return 1 - F

        pvars = self.hess_inv[np.triu_indices(self.hess_inv.shape[0])]
        with np.errstate(all='ignore'):
            jac = jacobian(ssf)(self.res.x)

        var_u = []
        for i, j in enumerate(jac):
            j = np.atleast_2d(j).T * j
            j = j[np.triu_indices(j.shape[0])]
            var_u.append(np.sum(j * pvars))
        diff = (z(cb / 2) * np.sqrt(np.array(var_u)) *
                np.array([1., -1.]).reshape(2, 1))
        R_hat = self.sf(t)
        exponent = diff / (R_hat * (1 - R_hat))
        R_cb = R_hat / (R_hat + (1 - R_hat) * np.exp(exponent))
        return R_cb.T
import autograd.scipy.special as special
from util import *
from my_hawkes import *
import equations
import autograd.numpy as np
import matplotlib.pyplot as plt
from autograd import grad
import wp
import time
from scipy.optimize import minimize
from tick.hawkes import HawkesKernelTimeFunc, SimuHawkes
from tick.base import TimeFunction
from joblib import Parallel, delayed
from scipy import interpolate

np.errstate(divide='ignore')
np.random.seed(100)

figsize=(8,6)
markersize = 20
labelsize = 28
fontsize = 28

def fit_vbhp(hps, nz, zmax, run_time, filename, start_from_ideal, gamma, alpha, support):

    nhp = len(hps)
    _minimize_method = 'L-BFGS-B'
    _minimize_options = dict(maxiter=10, disp=False, ftol=0, maxcor=20)
    ite = 0
    baseline = 10
    z = np.linspace(1e-6, zmax, nz).reshape((1, -1))
예제 #21
0
    def fit(self,
            X,
            x,
            c=None,
            n=None,
            t=None,
            init=[],
            fixed={},
            verbose=False):
        x, c, n, t = surpyval.xcnt_handler(x=x,
                                           c=c,
                                           n=n,
                                           t=t,
                                           group_and_sort=False)

        if init == []:
            stress_data = []
            params_at_X = []

            # How do I make this work when there is only one failure per stress?
            for s in np.unique(X, axis=0):
                mask = (X == s).all(axis=1)
                with warnings.catch_warnings():
                    warnings.filterwarnings('error')
                    try:
                        params_at_X.append(
                            self.dist.fit(x[mask], c[mask], n[mask]).params)
                        stress_data.append(s)
                    except np.RankWarning:
                        pass
                    finally:
                        pass

            params_at_X = np.array(params_at_X)
            stress_data = np.array(stress_data)
            dist_init = params_at_X.mean(axis=0)

            i = self.param_map[self.life_parameter]

            if len(params_at_X) < 2:
                raise ValueError(
                    "Insufficient data at separate X values. Try manually setting initial guess using `init` keyword in `fit`"
                )

            parameter_data = params_at_X[:, i]

            parameter_data = self.inverse_param_transform(parameter_data)

            if callable(self.life_model.phi_init):
                if str(inspect.signature(self.life_model.phi_init)) == '(X)':
                    phi_init = self.life_model.phi_init(X)
                else:
                    phi_init = self.life_model.phi_init(
                        parameter_data, stress_data)
            else:
                phi_init = self.life_model.phi_init

            init = np.array([*dist_init, *phi_init])
        else:
            init = np.array(init)

        if self.baseline != []:
            baseline_model = self.dist.fit(x, c, n, t)
            baseline_fixed = {
                k: baseline_model.params[baseline_model.param_map[k]]
                for k in self.baseline
            }
            fixed = {**baseline_fixed, **fixed}

        if self.fixed != {}:
            fixed = {**self.fixed, **fixed}

        # Dynamic or static bounds determination
        if callable(self.life_model.phi_bounds):
            bounds = (*self.bounds, *self.life_model.phi_bounds(X))
        else:
            bounds = (*self.bounds, *self.life_model.phi_bounds)

        if callable(self.life_model.phi_param_map):
            phi_param_map = self.life_model.phi_param_map(X)
        else:
            phi_param_map = self.life_model.phi_param_map

        param_map = {
            **self.param_map,
            **{k: v + len(self.param_map)
               for k, v in phi_param_map.items()}
        }
        self.param_map = param_map

        transform, inv_trans, funcs, inv_f = bounds_convert(x, bounds)
        const, fixed_idx, not_fixed = fix_idx_and_function(
            fixed, param_map, funcs)

        init = transform(init)[not_fixed]

        with np.errstate(all='ignore'):
            fun = lambda params, verbose: self.neg_ll(
                X, x, c, n, *inv_trans(const(params)), verbose=verbose)
            # jac = jacobian(fun)
            # hess = hessian(fun)
            res = minimize(fun, init, args=(verbose))
            res = minimize(fun, res.x, args=(verbose), method='TNC')
            # res = minimize(fun, init, jac=jac, method='BFGS')
            # res = minimize(fun, init, method='Newton-CG', jac=jac)

        params = inv_trans(const(res.x))
        dist_params = np.array(params[0:self.k_dist])
        phi_params = np.array(params[self.k_dist:])

        model = Regression()
        model.model = self
        model.kind = self.kind
        model.distribution = self.dist
        model.reg_model = self.life_model
        model.params = np.array(params)
        model.dist_params = dist_params
        model.phi_params = phi_params
        model.res = res
        model._neg_ll = res['fun']
        model.fixed = self.fixed
        model.k_dist = self.k_dist

        model.k = len(bounds)

        model.data = {'x': x, 'c': c, 'n': n, 't': t}

        return model
예제 #22
0
    def fit(self, X, x, c=None, n=None, t=None, init=[], fixed={}):
        x, c, n, t = surpyval.xcnt_handler(x, c, n, t, group_and_sort=False)

        if init == []:
            ps = self.dist.fit(x, c=c, n=n, t=t).params
            if callable(self.phi_init):
                init_phi = self.phi_init(X)

            init = np.array([*ps, *init_phi])
        else:
            init = np.array(init)

        if self.baseline != []:
            baseline_model = self.dist.fit(x, c, n, t)
            baseline_fixed = {
                k: baseline_model.params[baseline_model.param_map[k]]
                for k in self.baseline
            }
            fixed = {**baseline_fixed, **fixed}

        # Dynamic or static bounds determination
        if callable(self.phi_bounds):
            bounds = (*self.bounds, *self.phi_bounds(X))
        else:
            bounds = (*self.bounds, *self.phi_bounds)

        if callable(self.phi_param_map):
            phi_param_map = self.phi_param_map(X)
        else:
            phi_param_map = self.phi_param_map

        param_map = {**self.param_map, **phi_param_map}

        transform, inv_trans, funcs, inv_f = bounds_convert(x, bounds)
        const, fixed_idx, not_fixed = fix_idx_and_function(
            fixed, param_map, funcs)

        init = transform(init)[not_fixed]

        with np.errstate(all='ignore'):
            fun = lambda params: self.neg_ll(X, x, c, n,
                                             *inv_trans(const(params)))
            # jac = jacobian(fun)
            # hess = hessian(fun)
            res = minimize(fun, init)
            res = minimize(fun, res.x, method='TNC')

        params = inv_trans(const(res.x))

        model = Regression()
        model.model = self
        model.reg_model = self.phi
        model.kind = "Proportional Hazard"
        model.distribution = self.dist
        model.params = np.array(params)
        model.res = res
        model._neg_ll = res['fun']
        model.fixed = self.fixed
        model.k_dist = self.k_dist
        model.phi_param_map = phi_param_map

        print(res)

        return params
예제 #23
0
def testLDSSample():

    import matplotlib.pyplot as plt

    with np.errstate(under='ignore',
                     divide='raise',
                     over='raise',
                     invalid='raise'):

        T = 10
        D_latent = 2
        D_obs = 3
        meas = 1

        A, sigma = MatrixNormalInverseWishart.generate(D_in=D_latent,
                                                       D_out=D_latent)
        C, R = MatrixNormalInverseWishart.generate(D_in=D_latent, D_out=D_obs)
        mu0, sigma0 = NormalInverseWishart.generate(D=D_latent)

        state = LDSState(A=A, sigma=sigma, C=C, R=R, mu0=mu0, sigma0=sigma0)

        u = np.random.random((T, D_latent))
        nBad = int(np.random.random() * T)
        badMask = np.random.choice(T, nBad)
        u[badMask] = np.nan

        _, ys = LDSState.generate(measurements=meas,
                                  T=T,
                                  D_latent=D_latent,
                                  D_obs=D_obs,
                                  size=1,
                                  stabilize=True)

        # Create the distribution over P( X | Y )
        (alpha, ), (beta, ) = state.EStep(ys=ys, u=u)
        smoothed = [np.add(a, b) for a, b in zip(alpha, beta)]
        means = []
        covs = []
        for J, h, _ in smoothed:
            mu, sigma = Normal.natToStandard(J, h, fromPrecision=True)
            means.append(mu)
            covs.append(sigma)
        means = np.array(means)
        TRange = np.arange(len(means))

        # Sample X ~ P( X | Y )
        (xForward, ), yForward = state.isample(u=u, ys=ys)
        (xBackward, ), yBackward = state.isample(u=u,
                                                 ys=ys,
                                                 forwardFilter=False)

        # Compare X ~ P( X | Y ) to E[ P( X | Y ) ]
        ax1 = plt.subplot2grid((2, 1), (0, 0), rowspan=1, colspan=1)
        ax2 = plt.subplot2grid((2, 1), (1, 0), rowspan=1, colspan=1)

        ax1.plot(TRange, means[:, 0], color='red', linewidth=4, alpha=0.7)
        ax1.plot(TRange, xForward[:, 0], color='blue', linewidth=2, alpha=0.3)
        ax1.plot(TRange,
                 xBackward[:, 0],
                 color='green',
                 linewidth=2,
                 alpha=0.3)

        ax2.plot(TRange, means[:, 1], color='red', linewidth=4, alpha=0.7)
        ax2.plot(TRange, xForward[:, 1], color='blue', linewidth=2, alpha=0.3)
        ax2.plot(TRange,
                 xBackward[:, 1],
                 color='green',
                 linewidth=2,
                 alpha=0.3)

        plt.show()

        print('Done with LDS state test')
예제 #24
0
def testStableKalmanFilter():

    # np.random.seed( 3 )

    with np.errstate(all='raise'), scipy.special.errstate(all='raise'):
        T = 1000
        D_latent = 7
        D_obs = 3
        D = 4

        mp = StableKalmanFilter()
        mpTrue = KalmanFilter()

        A, sigma = MatrixNormalInverseWishart.generate(D_in=D_latent,
                                                       D_out=D_latent)
        C, R = MatrixNormalInverseWishart.generate(D_in=D_latent, D_out=D_obs)
        mu0, sigma0 = NormalInverseWishart.generate(D=D_latent)

        u = np.random.random((T, D_latent))

        ys = np.array(
            [Regression.sample(params=(C, R), size=T)[1] for _ in range(D)])

        mpTrue.updateParams(A=A,
                            sigma=sigma,
                            C=C,
                            R=R,
                            mu0=mu0,
                            sigma0=sigma0,
                            u=u,
                            ys=ys)

        start = time.time()
        mp.updateParams(A=A,
                        sigma=sigma,
                        C=C,
                        R=R,
                        mu0=mu0,
                        sigma0=sigma0,
                        u=u,
                        ys=ys)
        end = time.time()
        print('Preprocess: ', end - start)

        start = time.time()
        alphas = mp.forwardFilter()
        betas = mp.backwardFilter()
        end = time.time()
        print('Both filters: ', end - start)

        alphasTrue, betasTrue = (mpTrue.forwardFilter(),
                                 mpTrue.backwardFilter())

        # for i, ( a, b ) in enumerate( zip( alphas, betas ) ):
        #     Ja, ha, log_Za = a
        #     if( i == 1 ):
        #         print( 'Ja', Ja )

        Ja, ha, log_Za = alphas[-1]
        Jb, hb, log_Zb = betas[-1]

        for i, (a, b, _a,
                _b) in enumerate(zip(alphas, betas, alphasTrue, betasTrue)):
            Ja, ha, log_Za = a
            Jb, hb, log_Zb = b

            _Ja, _ha, _log_Za = _a
            _Jb, _hb, _log_Zb = _b

            assert np.allclose(
                Ja, _Ja, rtol=1e-5,
                atol=1e-6), '%s\n%s' % ((Ja - _Ja), np.max((Ja - _Ja)))
            assert np.allclose(
                Jb, _Jb, rtol=1e-5,
                atol=1e-6), '%s\n%s' % ((Jb - _Jb), np.max((Jb - _Jb)))
            assert np.allclose(
                ha, _ha, rtol=1e-5,
                atol=1e-6), '%s\n%s' % ((ha - _ha), np.max((ha - _ha)))
            assert np.allclose(
                hb, _hb, rtol=1e-5,
                atol=1e-6), '%s\n%s' % ((hb - _hb), np.max((hb - _hb)))
            assert np.allclose(
                log_Za, _log_Za, rtol=1e-5, atol=1e-6), '%s\n%s' % (
                    (log_Za - _log_Za), np.max((log_Za - _log_Za)))
            assert np.allclose(
                log_Zb, _log_Zb, rtol=1e-5, atol=1e-6), '%s\n%s' % (
                    (log_Zb - _log_Zb), np.max((log_Zb - _log_Zb)))

        print('Passed the stable kalman filter marginal test!\n\n')
예제 #25
0
    def fit(self, X, x, c=None, n=None, t=None, init=[], fixed={}):
        x, c, n, t = surpyval.xcnt_handler(x=x, c=c, n=n, t=t, group_and_sort=False)

        if init == []:
            stress_data = np.unique(X, axis=0)
            params_at_X = []
            for s in stress_data:
                params_at_X.append(self.dist.fit(x[X == s], c[X == s], n[X == s]).params)

            params_at_X = np.array(params_at_X)
            dist_init = params_at_X.mean(axis=0)

            acc_parameter_data = params_at_X[:, self.param_map[self.fixed_parameter]]
            acc_parameter_data = self.acc_parameter_relationship(acc_parameter_data)

            if callable(self.acc_model.phi_init):
                phi_init = self.acc_model.phi_init(acc_parameter_data, stress_data)
            else:
                phi_init = self.acc_model.phi_init


            init = np.array([*dist_init, *phi_init])
        else:
            init = np.array(init)

        if self.fixed != {}:
            fixed = {**self.fixed, **fixed}

        # Dynamic or static bounds determination
        if callable(self.acc_model.phi_bounds):
            bounds = (*self.bounds, *self.acc_model.phi_bounds(X))
        else:
            bounds = (*self.bounds, *self.acc_model.phi_bounds)

        if callable(self.acc_model.phi_param_map):
            phi_param_map = self.acc_model.phi_param_map(X)
        else:
            phi_param_map = self.acc_model.phi_param_map

        param_map = {**self.param_map, **{k : v + len(self.param_map) for k, v in phi_param_map.items()}}

        transform, inv_trans, funcs, inv_f = bounds_convert(x, bounds)
        const, fixed_idx, not_fixed = fix_idx_and_function(fixed, param_map, funcs)

        init = transform(init)[not_fixed]

        with np.errstate(all='ignore'):
            fun  = lambda params : self.neg_ll(X, x, c, n, *inv_trans(const(params)))
            # fun  = lambda params : self.neg_ll(X, x, c, n, *params)
            # jac = jacobian(fun)
            # hess = hessian(fun)
            res = minimize(fun, init)
            res = minimize(fun, res.x, method='TNC')
            # res = minimize(fun, init, jac=jac, method='BFGS')
            # res = minimize(fun, init, method='Newton-CG', jac=jac)

        params = inv_trans(const(res.x))
        model = Regression()
        model.model = self
        model.reg_model = self.acc_model
        model.kind = "Accelerated Failure Time"
        model.distribution = self.dist
        model.params = np.array(params)
        model.res = res
        model._neg_ll = res['fun']
        model.fixed = self.fixed
        model.k_dist = self.k_dist
        model.k = len(bounds)

        model.data = {
            'x' : x,
            'c' : c,
            'n' : n,
            't' : t
        }

        return model