Beispiel #1
0
def to_common_arr(x):
    """ Numerically stable transform from real line to positive reals

    Returns ag_np.log(1.0 + ag_np.exp(x))

    Autograd friendly and fully vectorized

    Args
    ----
    x : array of values in (-\infty, +\infty)

    Returns
    -------
    ans : array of values in (0, +\infty), same size as x
    """
    if not isinstance(x, float):
        mask1 = x > 0
        mask0 = ag_np.logical_not(mask1)
        out = ag_np.zeros_like(x)
        out[mask0] = ag_np.log1p(ag_np.exp(x[mask0]))
        out[mask1] = x[mask1] + ag_np.log1p(ag_np.exp(-x[mask1]))
        return out
    if x > 0:
        return x + ag_np.log1p(ag_np.exp(-x))
    else:
        return ag_np.log1p(ag_np.exp(x))
Beispiel #2
0
def to_unconstrained_arr(p):
    """ Numerically stable transform from positive reals to real line

    Implements ag_np.log(ag_np.exp(x) - 1.0)

    Autograd friendly and fully vectorized

    Args
    ----
    p : array of values in (0, +\infty)

    Returns
    -------
    ans : array of values in (-\infty, +\infty), same size as p
    """
    ## Handle numpy array case
    if not isinstance(p, float):
        mask1 = p > 10.0
        mask0 = ag_np.logical_not(mask1)
        out = ag_np.zeros_like(p)
        out[mask0] =  ag_np.log(ag_np.expm1(p[mask0]))
        out[mask1] = p[mask1] + ag_np.log1p(-ag_np.exp(-p[mask1]))
        return out
    ## Handle scalar float case
    else:
        if p > 10:
            return p + ag_np.log1p(-ag_np.exp(-p))
        else:
            return ag_np.log(ag_np.expm1(p))
Beispiel #3
0
def stick_jacobian_det(y_):
    y = y_.T
    Km1 = y.shape[0]
    k = np.arange(Km1)[(slice(None), ) + (None, ) * (y.ndim - 1)]
    eq_share = logit(1. / (Km1 + 1 - k))  # -np.log(Km1 - k)
    yl = y + eq_share
    yu = np.concatenate([np.ones(y[:1].shape), 1 - invlogit(yl)])
    S = cumprod(yu)
    return np.sum(
        np.log(S[:-1]) - np.log1p(np.exp(yl)) - np.log1p(np.exp(-yl)), 0).T
    def predict_cumulative_hazard(self, X, times=None, ancillary_X=None):
        """
        Return the cumulative hazard rate of subjects in X at time points.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        times: iterable, optional
            an iterable of increasing times to predict the cumulative hazard at. Default
            is the set of all durations (observed and unobserved). Uses a linear interpolation if
            points in time are not in the index.
        ancillary_X: numpy array or DataFrame, optional
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.

        Returns
        -------
        cumulative_hazard_ : DataFrame
            the cumulative hazard of individuals over the timeline
        """
        times = coalesce(times, self.timeline, np.unique(self.durations))
        alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X)
        return pd.DataFrame(np.log1p(np.outer(times, 1 / alpha_) ** beta_), columns=_get_index(X), index=times)
Beispiel #5
0
    def predict_cumulative_hazard(self, X, times=None, ancillary_X=None):
        """
        Return the cumulative hazard rate of subjects in X at time points.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        times: iterable, optional
            an iterable of increasing times to predict the cumulative hazard at. Default
            is the set of all durations (observed and unobserved). Uses a linear interpolation if
            points in time are not in the index.
        ancillary_X: numpy array or DataFrame, optional
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.

        Returns
        -------
        cumulative_hazard_ : DataFrame
            the cumulative hazard of individuals over the timeline
        """
        times = coalesce(times, self.timeline, np.unique(self.durations))
        alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X)
        return pd.DataFrame(np.log1p(np.outer(times, 1 / alpha_) ** beta_), columns=_get_index(X), index=times)
def inv_logistic_sigmoid(
        p, do_force_safe=True):
    ''' Compute inverse logistic sigmoid from unit interval to reals.

    Numerically stable and fully vectorized.

    Args
    ----
    p : array-like, with values in (0, 1)

    Returns
    -------
    x : array-like, size of p, with values in (-infty, infty)

    Examples
    --------
    >>> np.round(inv_logistic_sigmoid(0.11), 6)
    -2.090741
    >>> np.round(inv_logistic_sigmoid(0.5), 6)
    0.0
    >>> np.round(inv_logistic_sigmoid(0.89), 6)
    2.090741

    >>> p_vec = np.asarray([
    ...     1e-100, 1e-10, 1e-5,
    ...     0.25, 0.75, .9999, 1-1e-14])
    >>> np.round(inv_logistic_sigmoid(p_vec), 2)
    array([-230.26,  -23.03,  -11.51,   -1.1 ,    1.1 ,    9.21,   32.24])
    '''
    if do_force_safe:
        p = np.minimum(np.maximum(p, MIN_VAL), MAX_VAL)
    return np.log(p) - np.log1p(-p)
Beispiel #7
0
    def _cumulative_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        alpha_ = np.exp(np.dot(Xs[0], alpha_params))

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        beta_ = np.exp(np.dot(Xs[1], beta_params))
        return np.log1p((T / alpha_) ** beta_)
Beispiel #8
0
    def test_e_logistic_term(self):
        z_dim = (3, 2)
        z_mean = np.random.random(z_dim)
        z_sd = np.exp(z_mean)

        num_std_draws = 100
        std_draws = model.get_standard_draws(num_std_draws)

        # This would normally be a matrix of zeros and ones
        y = np.random.random(z_dim)

        num_draws = 10000
        z_draws = sp.stats.norm(loc=z_mean, scale=z_sd).rvs(
            (num_draws, z_dim[0], z_dim[1]))
        logit_term_draws = \
            np.expand_dims(y, axis=0) * z_draws - \
            np.log1p(np.exp(z_draws))

        # The Monte Carlo error will be dominated by the number of draws used
        # in the get_e_logistic_term approximation.
        test_se = np.max(
            3 * np.std(logit_term_draws, axis=0) / np.sqrt(num_std_draws))
        print('Logistic test moment tolerance: ', test_se)
        np_test.assert_allclose(
            np.sum(np.mean(logit_term_draws, axis=0)),
            model.get_e_logistic_term(y, z_mean, z_sd, std_draws),
            atol=test_se)
def to_diffable_arr(topics_KV, min_eps=MIN_EPS, do_force_safe=False):
    ''' Transform normalized topics to unconstrained space.

    Args
    ----
    topics_KV : 2D array, size K x V
        minimum value of any entry must be min_eps
        each row should sum to 1.0

    Returns
    -------
    log_topics_vec : 2D array, size K x (V-1)
        unconstrained real values

    Examples
    --------
    >>> topics_KV = np.eye(3) + np.ones((3,3))
    >>> topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    >>> log_topics_vec = to_diffable_arr(topics_KV)
    >>> out_KV = to_common_arr(log_topics_vec)
    >>> np.allclose(out_KV, topics_KV)
    True
    '''
    if do_force_safe:
        topics_KV = to_safe_common_arr(topics_KV, min_eps)
    K, V = topics_KV.shape
    log_topics_KV = np.log(topics_KV)
    log_topics_KVm1 = log_topics_KV[:, :-1]
    log_topics_KVm1 = log_topics_KVm1 - log_topics_KV[:, -1][:, np.newaxis]
    return log_topics_KVm1 + np.log1p(-V * min_eps)
Beispiel #10
0
    def neg_hessian_log_emissions_prob(self, data, input, mask, tag, x):
        """
        d/dx log p(y | x) = d/dx [y * (Cx + Fu + d) - exp(Cx + Fu + d)
                          = y * C - lmbda * C
                          = (y - lmbda) * C

        d/dx  (y - lmbda)^T C = d/dx -exp(Cx + Fu + d)^T C
            = -C^T exp(Cx + Fu + d)^T C
        """
        if self.link_name == "log":
            assert self.single_subspace
            lambdas = self.mean(self.forward(x, input, tag))
            return -np.einsum('tn, ni, nj ->tij', -lambdas[:, 0, :],
                              self.Cs[0], self.Cs[0])

        elif self.link_name == "softplus":
            assert self.single_subspace
            lambdas = np.log1p(
                np.exp(
                    np.dot(x, self.Cs[0].T) + np.dot(input, self.Fs[0].T) +
                    self.ds[0]))
            expterms = np.exp(-np.dot(x, self.Cs[0].T) -
                              np.dot(input, self.Fs[0].T) - self.ds[0])
            diags = (data / lambdas * (expterms - 1.0 / lambdas) -
                     expterms) / (1.0 + expterms)**2
            return -np.einsum('tn, ni, nj ->tij', diags, self.Cs[0],
                              self.Cs[0])
    def _cumulative_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        alpha_ = np.exp(np.dot(Xs[0], alpha_params))

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        beta_ = np.exp(np.dot(Xs[1], beta_params))
        return np.log1p((T / alpha_) ** beta_)
Beispiel #12
0
def log_logist(theta, Z, nu, mu, Sig):
    dots = -np.dot(Z, theta.T)
    log_lik = -np.sum(np.maximum(dots, 0) + np.log1p(np.exp(-np.abs(dots))),
                      axis=0)
    log_pri = log_multivariate_t(theta[:, :-1], mu, Sig, nu) + log_cauchy(
        theta[:, -1])
    return log_pri + log_lik
  def testConditionAndMarginalizeBernoulli(self):
    def log_joint(x, logits):
      return np.sum(x * logits)
    p = np.random.beta(2., 2., [3, 4])
    logit_p = np.log(p) - np.log1p(-p)
    # TODO(mhoffman): Without the cast this gives wrong answers due to autograd
    # casts. This is scary.
    x = (np.random.uniform(size=(8,) + p.shape) < p).astype(np.float32)

    conditional, marginalized_value = _condition_and_marginalize(
        log_joint, 0, SupportTypes.BINARY, x, logit_p)

    correct_marginalized_value = np.sum(-x.shape[0] * np.log1p(-p))
    self.assertAlmostEqual(correct_marginalized_value, marginalized_value,
                           places=4)

    self.assertTrue(np.allclose(p, conditional.args[0]))
Beispiel #14
0
    def compute_derivs(self):
        '''
    lazy slow AD-based implementation ... should actually hand-compute
      these for any serious use.
    '''
        Y = self.training_data.Y
        z = self.training_data.X.dot(self.params.get_free())
        f = lambda z, Y: -(Y * np.log(np.log1p(np.exp(z))) - np.log1p(np.exp(z)
                                                                      ))
        grad = autograd.grad(f, argnum=0)
        grad2 = autograd.grad(grad)

        self.D1 = np.zeros(Y.shape[0])
        self.D2 = np.zeros(Y.shape[0])
        for n in range(Y.shape[0]):
            self.D1[n] = grad(z[n], Y[n])
            self.D2[n] = grad2(z[n], Y[n])
 def decode(self, val, name):
     assert val > self.lower, '{} = {} must be > self.lower = {}'.format(
             name, val, self.lower)
     # Inverse of encoding: Careful with numerics:
     # val_int = log(exp(arg) - 1) = arg + log(1 - exp(-arg))
     #         = arg + log1p(-exp(-arg))
     arg = val - self.lower
     return arg + anp.log1p(-anp.exp(-arg))
Beispiel #16
0
def gpdfit(ary):
    """Estimate the parameters for the Generalized Pareto Distribution (GPD).
    Empirical Bayes estimate for the parameters of the generalized Pareto
    distribution given the data.
    Parameters
    ----------
    ary : array
        sorted 1D data array
    Returns
    -------
    k : float
        estimated shape parameter
    sigma : float
        estimated scale parameter
    """
    prior_bs = 3
    prior_k = 10
    n = len(ary)
    m_est = 30 + int(n**0.5)

    b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5))
    b_ary /= prior_bs * ary[int(n / 4 + 0.5) - 1]
    b_ary += 1 / ary[-1]

    k_ary = np.log1p(-b_ary[:, None] * ary).mean(axis=1)  # pylint: disable=no-member
    len_scale = n * (np.log(-(b_ary / k_ary)) - k_ary - 1)
    weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1)

    # remove negligible weights
    real_idxs = weights >= 10 * np.finfo(float).eps
    if not np.all(real_idxs):
        weights = weights[real_idxs]
        b_ary = b_ary[real_idxs]
    # normalise weights
    weights /= weights.sum()

    # posterior mean for b
    b_post = np.sum(b_ary * weights)
    # estimate for k
    k_post = np.log1p(-b_post * ary).mean()  # pylint: disable=invalid-unary-operand-type,no-member
    # add prior for k_post
    k_post = (n * k_post + prior_k * 0.5) / (n + prior_k)
    sigma = -k_post / b_post

    return k_post, sigma
    def _cumulative_hazard(self, params, T, Xs):
        Xbeta = np.dot(Xs["beta_"], params["beta_"])
        lT = np.log(T)

        return np.log1p(
            np.exp(Xbeta +
                   (params["phi1_"] * lT + params["phi2_"] *
                    self.basis(lT, np.log(self.KNOTS[1]), np.log(
                        self.KNOTS[0]), np.log(self.KNOTS[-1])))))
Beispiel #18
0
    def _log_1m_sf(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)
        return -np.log1p((T / alpha_)**-beta_)
    def _log_1m_sf(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)
        return -np.log1p((T / alpha_) ** -beta_)
def softplus(x):
    """ Numerically stable transform from real line to positive reals
    Returns np.log(1.0 + np.exp(x))
    Autograd friendly and fully vectorized
    
    @param x: array of values in (-\infty, +\infty)
    @return ans : array of values in (0, +\infty), same size as x
    """
    if not isinstance(x, float):
        mask1 = x > 0
        mask0 = np.logical_not(mask1)
        out = np.zeros_like(x)
        out[mask0] = np.log1p(np.exp(x[mask0]))
        out[mask1] = x[mask1] + np.log1p(np.exp(-x[mask1]))
        return out
    if x > 0:
        return x + np.log1p(np.exp(-x))
    else:
        return np.log1p(np.exp(x))
def get_mle_data_log_lik_terms(mle_par, x_mat, y_vec, y_g_vec):
    beta = mle_par['beta'].get()

    # atleast_1d is necessary for indexing by y_g_vec to work right.
    e_u = np.atleast_1d(mle_par['u'].get())

    # Log likelihood from data.
    z = e_u[y_g_vec] + np.squeeze(np.matmul(x_mat, beta))

    return y_vec * z - np.log1p(np.exp(z))
Beispiel #22
0
def log_1plusexp(eta_):
    ''' Numerically stable version np.log(1 + np.exp(eta)) 
    eta_ (nd-array): An ndarray that potentially contains high values that 
        will overflow while taking the exponential
    -----------------------------------------------------------------------
    returns (nd-array): log(1 + exp(eta_))
    '''

    eta_original = deepcopy(eta_)
    eta_ = np.where(eta_ >= np.log(sys.float_info.max), np.log(sys.float_info.max) - 1, eta_) 
    return np.where(eta_ >= 50, eta_original, np.log1p(np.exp(eta_)))
Beispiel #23
0
    def _log_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)

        return log_beta_ - log_alpha_ + np.expm1(log_beta_) * (
            np.log(T) - log_alpha_) - np.log1p((T / alpha_)**beta_)
def logaddexp(logA, logB):
    """
        Given the log of two matrices A and B as logA and logB, carries out A + B in log space
    """

    maxN = np.maximum(logA, logB)
    minN = np.minimum(logA, logB)

    C = np.log1p(np.exp(minN - maxN)) + maxN

    return C
def softplus_stable(x, bias=None, dt=1.0):

    if bias is not None:
        inp = x + bias

    f = np.log1p(np.exp(inp)) * dt
    logf = np.log(f)
    df = np.exp(inp) / (1.0 + np.exp(inp)) * dt
    ddf = np.exp(inp) / (1.0 + np.exp(inp))**2 * dt

    return f, logf, df, ddf
def ll(x, num_peds, ess, robot_mu_x, robot_mu_y, ped_mu_x, ped_mu_y, \
       cov_robot_x, cov_robot_y, inv_cov_robot_x, inv_cov_robot_y, \
       cov_ped_x, cov_ped_y, inv_cov_ped_x, inv_cov_ped_y, \
       one_over_cov_sum_x, one_over_cov_sum_y, normalize):
    T = np.size(robot_mu_x)

    quad_robot_mu_x = np.dot((x[:T]-robot_mu_x).T, np.dot(inv_cov_robot_x, \
                                                                x[:T]-robot_mu_x))
    quad_robot_mu_y = np.dot((x[T:2*T]-robot_mu_y).T, np.dot(inv_cov_robot_y, \
                                                             x[T:2*T]-robot_mu_y))
    llambda = -0.5 * quad_robot_mu_x - 0.5 * quad_robot_mu_y

    n = 2
    for ped in range(ess):
        quad_ped_mu_x = np.dot((x[n*T:(n+1)*T]-ped_mu_x[ped]).T, np.dot(\
                                inv_cov_ped_x[ped], x[n*T:(n+1)*T]-ped_mu_x[ped]))
        quad_ped_mu_y = np.dot((x[(n+1)*T:(n+2)*T]-ped_mu_y[ped]).T, np.dot(\
                            inv_cov_ped_y[ped], x[(n+1)*T:(n+2)*T]-ped_mu_y[ped]))
        llambda = llambda - 0.5 * quad_ped_mu_x - 0.5 * quad_ped_mu_y
        n = n + 2

    n = 2
    for ped in range(ess):
        # if normalize == True:
        #   # normalize_x = np.multiply(np.power(2*np.pi,-0.5), \
        # one_over_std_sum_x[ped])
        #   # normalize_y = np.multiply(np.power(2*np.pi,-0.5), \
        # one_over_std_sum_y[ped])
        # else:
        normalize_x = 1.
        normalize_y = 1.

        vel_x = np.tile(x[:T], (T, 1)).T - np.tile(x[n * T:(n + 1) * T],
                                                   (T, 1))
        vel_y = np.tile(x[T:2 * T],
                        (T, 1)).T - np.tile(x[(n + 1) * T:(n + 2) * T], (T, 1))
        n = n + 2

        vel_x_2 = np.power(vel_x, 2)
        vel_y_2 = np.power(vel_y, 2)

        quad_robot_ped_x = np.multiply(vel_x_2, one_over_cov_sum_x[ped])
        quad_robot_ped_y = np.multiply(vel_y_2, one_over_cov_sum_y[ped])

        Z_x = np.multiply(normalize_x, np.exp(-0.5 * quad_robot_ped_x))
        Z_y = np.multiply(normalize_y, np.exp(-0.5 * quad_robot_ped_y))

        Z = np.multiply(Z_x, Z_y)

        log_znot_norm = np.sum(np.log1p(-Z))

        llambda = llambda + log_znot_norm
    return -1. * llambda
Beispiel #27
0
def _log_logistic_sigmoid(x_real):
    ''' Compute log of logistic sigmoid transform from real line to unit interval.

    Numerically stable and fully vectorized.

    Args
    ----
    x_real : array-like, with values in (-infty, +infty)

    Returns
    -------
    log_p_real : array-like, size of x_real, with values in <= 0
    '''
    if not isinstance(x_real, float):
        out = np.zeros_like(x_real)
        mask1 = x_real > 50.0
        out[mask1] = - np.log1p(np.exp(-x_real[mask1]))
        mask0 = np.logical_not(mask1)
        out[mask0] = x_real[mask0]
        out[mask0] -= np.log1p(np.exp(x_real[mask0]))
        return out
    return _log_logistic_sigmoid_not_vectorized(x_real)
Beispiel #28
0
def multivariate_studentst_logpdf(data, mus, Sigmas, nus, Ls=None):
    """
    Compute the log probability density of a multivariate Student's t distribution.
    This will broadcast as long as data, mus, Sigmas, nus have the same (or at
    least be broadcast compatible along the) leading dimensions.

    Parameters
    ----------
    data : array_like (..., D)
        The points at which to evaluate the log density

    mus : array_like (..., D)
        The mean(s) of the t distribution(s)

    Sigmas : array_like (..., D, D)
        The covariances(s) of the t distribution(s)

    nus : array_like (...,)
        The degrees of freedom of the t distribution(s)

    Ls : array_like (..., D, D)
        Optionally pass in the Cholesky decomposition of Sigmas

    Returns
    -------
    lps : array_like (...,)
        Log probabilities under the multivariate Gaussian distribution(s).
    """
    # Check inputs
    D = data.shape[-1]
    assert mus.shape[-1] == D
    assert Sigmas.shape[-2] == Sigmas.shape[-1] == D
    if Ls is not None:
        assert Ls.shape[-2] == Ls.shape[-1] == D
    else:
        Ls = np.linalg.cholesky(Sigmas)  # (..., D, D)

    # Quadratic term
    q = batch_mahalanobis(Ls, data - mus) / nus  # (...,)
    lp = -0.5 * (nus + D) * np.log1p(q)  # (...,)

    # Normalizer
    lp = lp + gammaln(0.5 * (nus + D)) - gammaln(0.5 * nus)  # (...,)
    lp = lp - 0.5 * D * np.log(np.pi) - 0.5 * D * np.log(nus)  # (...,)
    L_diag = np.reshape(Ls, Ls.shape[:-2] + (-1, ))[..., ::D + 1]  # (..., D)
    half_log_det = np.sum(np.log(abs(L_diag)), axis=-1)  # (...,)
    lp = lp - half_log_det

    return lp
 def log_prior(self, x, w):
     ''' 
         Returns log(p(Y|x,w))
     '''
     n = x.shape[0]
     if self.prior_model == "logistic_regression":
         negative_energy = np.dot(x, w)
         return np.vstack(
             (-np.log1p(np.exp(negative_energy)) * np.ones(x.shape[0]),
              negative_energy - np.log1p(np.exp(negative_energy)))).T
     elif self.prior_model == "mlp":
         cur_idx = self.n_features * self.hidden_layer_sizes[0]
         wi = w[:cur_idx].reshape(
             (self.n_features, self.hidden_layer_sizes[0]))
         bi = w[cur_idx:cur_idx + self.hidden_layer_sizes[0]]
         ho = np.dot(x, wi) + bi
         hi = np.tanh(ho)
         cur_idx += self.hidden_layer_sizes[0]
         for i in range(len(self.hidden_layer_sizes) - 1):
             wi = w[cur_idx:cur_idx + self.hidden_layer_sizes[i] *
                    self.hidden_layer_sizes[i + 1]].reshape(
                        (self.hidden_layer_sizes[i],
                         self.hidden_layer_sizes[i + 1]))
             cur_idx += self.hidden_layer_sizes[
                 i] * self.hidden_layer_sizes[i + 1]
             bi = w[cur_idx:cur_idx + self.hidden_layer_sizes[i + 1]]
             cur_idx += self.hidden_layer_sizes[i + 1]
             ho = np.dot(hi, wi) + bi
             hi = np.tanh(ho)
             # cur_idx = cur_idx+self.n_hidden_units**2
         negative_energy = np.dot(hi, w[cur_idx:-1]) + w[-1]
         negative_energy = np.vstack((np.zeros(n), negative_energy)).T
         return negative_energy - logsumexp(negative_energy,
                                            axis=1).reshape((n, 1))
     else:
         raise ValueError("Invalid prior model: %s" % self.prior_model)
Beispiel #30
0
def simulate_ramping(beta=np.linspace(-0.02, 0.02, 5),
                     w2=3e-3,
                     x0=0.5,
                     C=40,
                     T=100,
                     bin_size=0.01):

    NC = 5  # number of trial types
    cohs = np.arange(NC)
    trial_cohs = np.repeat(cohs, int(T / NC))
    tr_lengths = np.random.randint(50, size=(T)) + 50
    us = []
    xs = []
    zs = []
    ys = []
    for t in range(T):
        tr_coh = trial_cohs[t]
        betac = beta[tr_coh]

        tr_length = tr_lengths[t]
        x = np.zeros(tr_length)
        z = np.zeros(tr_length)
        x[0] = x0 + np.sqrt(w2) * npr.randn()
        z[0] = 0
        for i in np.arange(1, tr_length):

            if x[i - 1] >= 1.0:
                x[i] = 1.0
                z[i] = 1
            else:
                x[i] = np.min(
                    (1.0, x[i - 1] + betac + np.sqrt(w2) * npr.randn()))
                if x[i] >= 1.0:
                    z[i] = 1
                else:
                    z[i] = 0

        y = npr.poisson(np.log1p(np.exp(C * x)) * bin_size)

        u = np.tile(one_hot(tr_coh, 5), (tr_length, 1))
        us.append(u)
        xs.append(x.reshape((tr_length, 1)))
        zs.append(z.reshape((tr_length, 1)))
        ys.append(y.reshape((tr_length, 1)))

    return ys, xs, zs, us, tr_lengths, trial_cohs
    def get_log_lik(self):
        beta = self.glmm_par_draw['beta'].get()
        u = self.glmm_par_draw['u'].get()
        mu = self.glmm_par_draw['mu'].get()
        tau = self.glmm_par_draw['tau'].get()
        log_tau = np.log(tau)

        log_lik = 0.

        # Log likelihood from data.
        z = u[self.y_g_vec] + np.matmul(self.x_mat, beta)
        log_lik += np.sum(self.y_vec * z - np.log1p(np.exp(z)))

        # Log likelihood from random effect terms.
        log_lik += -0.5 * tau * np.sum((mu - u) ** 2) + 0.5 * log_tau * len(u)

        return log_lik
Beispiel #32
0
def get_e_logistic_term(y, z_mean, z_sd, std_draws):
    assert z_sd.ndim == y.ndim
    assert z_mean.ndim == y.ndim

    # The last axis will be the standard draws axis.
    draws_axis = z_sd.ndim
    z_draws = \
        np.expand_dims(z_sd, axis=draws_axis) * std_draws + \
        np.expand_dims(z_mean, axis=draws_axis)

    # By dividing by the number of standard draws after summing,
    # we add the sample means for all the observations.
    # Note that
    # log(1 - p) = log(1 / (1 + exp(z))) = -log(1 + exp(z))
    logit_term = \
        np.sum(np.log1p(np.exp(z_draws))) / std_draws.size
    return np.sum(y * z_mean) - logit_term
def ll(x, T, robot_mu_x, robot_mu_y, \
        ped_mu_x, ped_mu_y, \
        inv_cov_robot_x, inv_cov_robot_y, \
        inv_cov_ped_x, inv_cov_ped_y, \
        one_over_cov_sum_x, one_over_cov_sum_y, normalize):
    quad_robot_mu_x = np.dot((x[:T]-robot_mu_x).T, np.dot(\
                                               inv_cov_robot_x, x[:T]-robot_mu_x))
    quad_robot_mu_y = np.dot((x[T:2*T] - robot_mu_y).T, np.dot( \
                                          inv_cov_robot_y, x[T:2*T] - robot_mu_y))
    llambda = -0.5 * quad_robot_mu_x - 0.5 * quad_robot_mu_y

    n = 2
    quad_ped_mu_x = np.dot((x[n*T:(n+1)*T]-ped_mu_x).T, np.dot(inv_cov_ped_x,\
                                                         x[n*T:(n+1)*T]-ped_mu_x))
    quad_ped_mu_y = np.dot((x[(n+1)*T:(n+2)*T]-ped_mu_y).T, np.dot(inv_cov_ped_y,\
                                                     x[(n+1)*T:(n+2)*T]-ped_mu_y))
    llambda = llambda - 0.5 * quad_ped_mu_x - 0.5 * quad_ped_mu_y

    n = 2
    # if normalize == True:
    #   normalize_x = np.multiply(np.power(2*np.pi, -0.5), \
    #                                                   np.diag(one_over_std_sum_x))
    #   normalize_y = np.multiply(np.power(2*np.pi, -0.5), \
    #                                                   np.diag(one_over_std_sum_y))
    # else:
    normalize_x = 1.
    normalize_y = 1.

    vel_x = x[:T] - x[n * T:(n + 1) * T]
    vel_y = x[T:2 * T] - x[(n + 1) * T:(n + 2) * T]
    vel_x_2 = np.power(vel_x, 2)
    vel_y_2 = np.power(vel_y, 2)

    quad_robot_ped_x = np.multiply(vel_x_2, np.diag(one_over_cov_sum_x))
    quad_robot_ped_y = np.multiply(vel_y_2, np.diag(one_over_cov_sum_y))

    Z_x = np.multiply(normalize_x, np.exp(-0.5 * quad_robot_ped_x))
    Z_y = np.multiply(normalize_y, np.exp(-0.5 * quad_robot_ped_y))

    Z = np.multiply(Z_x, Z_y)

    log_znot_norm = np.sum(np.log1p(-Z))

    llambda = llambda + log_znot_norm
    return -1. * llambda
 def _cumulative_hazard(self, params, times):
     alpha_, beta_ = params
     return np.log1p((times / alpha_) ** beta_)
Beispiel #35
0
def test_log1p():
    fun = lambda x : 3.0 * np.log1p(x)
    d_fun = grad(fun)
    check_grads(fun, abs(npr.randn()))
    check_grads(d_fun, abs(npr.randn()))
    def _log_hazard(self, params, T, *Xs):
        alpha_params = params[self._LOOKUP_SLICE["alpha_"]]
        log_alpha_ = np.dot(Xs[0], alpha_params)
        alpha_ = np.exp(log_alpha_)

        beta_params = params[self._LOOKUP_SLICE["beta_"]]
        log_beta_ = np.dot(Xs[1], beta_params)
        beta_ = np.exp(log_beta_)

        return log_beta_ - log_alpha_ + np.expm1(log_beta_) * (np.log(T) - log_alpha_) - np.log1p((T / alpha_) ** beta_)
 def _log_1m_sf(self, params, times):
     alpha_, beta_ = params
     return -np.log1p((times / alpha_) ** -beta_)
Beispiel #38
0
def test_log1p():
    fun = lambda x : 3.0 * np.log1p(x)
    check_grads(fun)(abs(npr.randn()))