コード例 #1
0
    def test_evaluate_L_diagonal(self):
        """ checking np.diag(dpp.L) = utils.evaluate_L_diagonal(eval_L, X_data)
        """

        X = rndm.randn(100, 20)

        np.testing.assert_almost_equal(
            np.diag(utils.example_eval_L_linear(X)),
            utils.evaluate_L_diagonal(utils.example_eval_L_linear, X))

        X = rndm.rand(100, 1)

        np.testing.assert_almost_equal(
            np.diag(utils.example_eval_L_min_kern(X)),
            utils.evaluate_L_diagonal(utils.example_eval_L_min_kern, X))

        pass
コード例 #2
0
    def __create_lambda_acc_dictionary(self, X_data, eval_L, lam, rng):
        n = X_data.shape[0]
        rls_oversample = 5
        diag_norm = np.asarray(evaluate_L_diagonal(eval_L, X_data))
        init_rls_estimate = diag_norm / (diag_norm + lam)

        selected_init = rng.rand(n) <= rls_oversample * init_rls_estimate

        # force at least one sample to be selected
        selected_init[0] = 1

        return CentersDictionary(idx=selected_init.nonzero()[0],
                                 X=X_data[selected_init, :],
                                 probs=np.ones(np.sum(selected_init)) * init_rls_estimate[selected_init],
                                 lam=n,
                                 rls_oversample=rls_oversample)
コード例 #3
0
def estimate_rls_from_weighted_dict_eigendecomp(X_to_estimate, eval_L,
                                                dict_alphadpp, eigvec, eigvals,
                                                alpha_hat):
    """ Given embedded points, and a decomposition of embedded covariance matrix, estimate RLS.
    Note that this is a different estimator than the one used in BLESS (i.e. :func:`dppy.bless.estimate_rls_bless`),
    which we use here for efficiency because we can recycle already embedded points and eigen-decomposition.

    :param array_like eigvec: eigenvectors of I_A_mm = B_bar_T*B_bar_T.T + lam I, see :func:`vfx_sampling_precompute_constants`
    :param array_like eigvals: eigenvalues of I_A_mm = B_bar_T*B_bar_T.T + lam I, see :func:`vfx_sampling_precompute_constants`
    :param array_like B_bar_T: (m x n) transposed matrix of n points embedded using a dictionary with m centers
    :param array_like diag_L: diagonal of L
    :param array_like diag_L_hat: diagonal of L_hat, the Nystrom approximation of L
    :param float alpha_hat: a rescaling factor used to adjust the expected size of the DPP sample
    :return: RLS estimates for all rows in B_bar_T
    :rtype:
        array_like
    """
    W_sqrt = (1.0 / np.sqrt(dict_alphadpp.probs).reshape(-1, 1))
    m = W_sqrt.shape[0]

    if not (eigvec.shape[0] == m):
        raise ValueError(
            'Input eigendecomposition has wrong shape: {} {} {}'.format(
                m, eigvec.shape, eigvals.shape))

    diag_L_to_estimate = evaluate_L_diagonal(eval_L, X_to_estimate)
    L_DX = eval_L(dict_alphadpp.X, X_to_estimate)
    L_DX *= W_sqrt
    E = eigvec.T.dot(L_DX)
    E *= np.sqrt(1.0 / (alpha_hat * eigvals + 1.0)).reshape(-1, 1)

    rls_estimate = alpha_hat * (diag_L_to_estimate -
                                alpha_hat * np.square(E, out=E).sum(axis=0))

    if not np.all(rls_estimate >= 0.0):
        raise ValueError(
            'Some estimated RLS is negative, this should never happen. '
            'Min prob: {}'.format(np.min(rls_estimate)))

    return rls_estimate
コード例 #4
0
def alpha_dpp_sampling_precompute_constants(X_data,
                                            eval_L,
                                            rng,
                                            desired_expected_size=None,
                                            rls_oversample_alphadpp=4.0,
                                            rls_oversample_bless=4.0,
                                            nb_iter_bless=None,
                                            verbose=True,
                                            **kwargs):
    """Pre-compute quantities necessary for the alpha-dpp rejection sampling loop, such as the
    inner Nystrom approximation, and the initial rescaling alpha_hat for the binary search.
        :param array_like X_data: dataset such that L = eval_L(X_data), out of which we aresampling objects according
        to a DPP
        :param callable eval_L: likelihood function. Given two sets of n points X and m points Y, eval_L(X, Y) should
        compute the (n x m) matrix containing the likelihood between points. The function should also
        accept a single argument X and return eval_L(X) = eval_L(X, X).
        As an example, see the implementation of any of the kernels provided by scikit-learn
        (e.g. sklearn.gaussian_process.kernels.PairwiseKernel).
        :param np.random.RandomState rng: random source used for sampling
        :param desired_expected_size: desired expected sample size for the DPP. If None, use the natural DPP expected
        sample size. The alpha DPP sampling algorithm can approximately adjust the expected sample size of the DPP by
        rescaling the L matrix with a scalar alpha_star <= 1. Adjusting the expected sample size can be useful to
        control downstream complexity, and it is necessary to improve the probability of drawing a sample with
        exactly k elements when using alpha-dpp for k-DPP sampling. Currently only reducing the sample size is supported,
        and the sampler will return an exception if the DPP sample has already a natural expected size
        smaller than desired_expected_size.
        :type desired_expected_size:
            float or None, default None
        :param rls_oversample_alphadpp: Oversampling parameter used to construct alpha_dpp's internal Nystrom
        approximation. The rls_oversample_alphadpp >= 1 parameter is used to increase the rank of the approximation by
        a rls_oversample_alphadpp factor. This makes each rejection round slower and more memory intensive,
        but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
        Empirically, a small factor rls_oversample_alphadpp = [2,10] seems to work. It is suggested to start with
        a small number and increase if the algorithm fails to terminate.
        :type rls_oversample_alphadpp:
            float, default 4.0
        :param rls_oversample_bless: Oversampling parameter used during bless's internal Nystrom approximation.
        Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_alphadpp`,
        and can be tuned separately.
        The rls_oversample_bless >= 1 parameter is used to increase the rank of the approximation by
        a rls_oversample_bless factor. This makes the one-time pre-processing slower and more memory intensive,
        but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
        Empirically, a small factor rls_oversample_bless = [2,10] seems to work. It is suggested to start with
        a small number and increase if the algorithm fails to terminate or is not accurate.
        :type rls_oversample_bless:
            float, default 4.0
        :param int nb_iter_bless:  iterations for BLESS, if None it is set to log(n)
        :type nb_iter_bless:
            int or None, default None
        :param bool verbose: controls verbosity of debug output, including progress bars.
        The progress bar reports the inner execution of the bless algorithm, showing:
            - lam: lambda value of the current iteration (where lambda = 1/alpha)
            - m: current size of the dictionary (number of centers contained)
            - m_expected: expected size of the dictionary before sampling
            - probs_dist: (mean, max, min) of the approximate rlss at the current iteration
        :return: Pre-computed information necessary for the alpha-dpp rejection sampling loop with fields
        - result.alpha_hat: estimate of the optimal rescaling such that the expected sample size of DPP(alpha_hat * L)
        is equal to a user-indicated constant desired_expected_size, or 1.0 if no such constant was specified
        by the user. It is used to initialize the binary search when sampling from a k-DPP
        - result.alpha_min: lower bound on the optimal rescaling to be used in the binary search when sampling from
        a k-DPP, or alpha_hat if desired_expected_size is none and no search is going to be performed.
        - result.alpha_max: upper bound on the optimal rescaling to be used in the binary search when sampling from
        a k-DPP, or alpha_hat if desired_expected_size is none and no search is going to be performed.
        - result.k: size of the k-DPP to be used in the sampling loop, or -1 if the precomputation is done for a random
        size DPP.
        - result.eigvals_L_hat: eigenvalues and eigenvectors of the L_hat matrix, to be used in the rls nystrom
        approximation and when computing accepting probabilities.
        - result.eigvals_L_hat: see above.
         size of the k-DPP to be used in the sampling loop, or -1 if the precomputation is done for a random
        - result.deff_alpha_L_hat: approximations of the expected sample size of DPP(alpha_star * L) to be used in
        the sampling loop. For more details see [CaDeVa20]
        - result.diag_L: pre-computed diagonal of the L matrix to be used as an upper bound of the marginal inclusion
        probabilities. Can be replaced with any known bound on the diagonal entries
        - result.rls_upper_bound: a vector containing upper bounds for the ridge leverage scores (RLS), a.k.a.
        the marginals of the DPP. These are either computed using the diagonal of the L matrix, or some estimate
        based on Nystrom approximation.
        - result.rls_upper_bound_valid: a boolean vector indicating whether the upper bound is considered tight enough
        to be valid, or should be recomputed. A bound starts as invalid when it is set to a multiple of the diagonal of
        L, and is then made valid when it is estimated using the Nystrom approximation. Afterward, the bound is never
        recomputed unless a loss of accuracy happens, which mostly triggers when the alpha rescaling is changed
        and the marginals must be re-estimated.
        - result.r: placeholder r constant used for alpha-dpp sampling, to be replaced by the user before the sampling loop
        - result.dict_alphadpp: pre-computed dictionary used to generate L_hat. The full object is kept around to
        have access to the inclusion probabilities used when constructing L_hat.
        - result.alpha_switches: number of times the alpha parameter has been changed during the binary search. This is
        included for debugging purposes and initialized to 0, to be later updated in the sampling loop.
        - result.rej_to_first_sample: number of trials until first valid sample is generated. This is included
        for debugging purposes and initialized to 0, to be later updated in the sampling loop. Each trial correspond
        to an alpha-dpp sample in the binary search procedure, and can accounts for many rejections. Multiple trials
        are necessary before deciding to switch the alpha value in the binary search.
        - result.rej_to_first_sample: number of total rejections until first valid sample is generated. This is included
        for debugging purposes and initialized to 0, to be later updated in the sampling loop.
        :rtype: _IntermediateSampleInfoAlphaRescale

    """
    diag_L = evaluate_L_diagonal(eval_L, X_data)

    # Phase 0: compute initial dictionary D_bless with small rls_oversample_bless
    # D_bless is used only to estimate all RLS

    if desired_expected_size is None:
        dict_bless = bless(X_data,
                           eval_L,
                           1.0,
                           rls_oversample_bless,
                           rng,
                           nb_iter_bless=nb_iter_bless,
                           verbose=verbose)
    else:
        lam_max, lam_min, dict_bless = bless_size(X_data,
                                                  eval_L,
                                                  desired_expected_size,
                                                  rls_oversample_bless,
                                                  rng,
                                                  nb_iter_bless=nb_iter_bless,
                                                  verbose=verbose)

    # Phase 1: use estimate RLS to sample the dict_alphadpp dictionary, i.e. the one used to construct A
    # here theory says that to have high acceptance probability we need the oversampling factor to be ~deff^2
    # but even with constant oversampling factor we seem to accept fast

    dict_alphadpp = reduce_lambda(
        X_data,
        eval_L,
        dict_bless,
        dict_bless.lam,
        rng,
        rls_oversample_parameter=rls_oversample_alphadpp)

    # Phase 2: pre-compute L_hat, det(I + L_hat), etc.
    L_DD = eval_L(dict_alphadpp.X, dict_alphadpp.X)

    W_sqrt = (1.0 / np.sqrt(dict_alphadpp.probs)).reshape(-1, 1)

    L_hat = W_sqrt.T * L_DD * W_sqrt
    eigvals_L_hat, eigvecs_L_hat = np.linalg.eigh(L_hat)

    eigvecs_L_hat, eigvals_L_hat = stable_filter(eigvecs_L_hat, eigvals_L_hat)

    rls_estimate = estimate_rls_from_weighted_dict_eigendecomp(
        dict_alphadpp.X, eval_L, dict_alphadpp, eigvecs_L_hat, eigvals_L_hat,
        1.0 / dict_alphadpp.lam)

    natural_expected_size = np.sum(rls_estimate / dict_alphadpp.probs)

    if not natural_expected_size >= 0.0:
        raise ValueError(
            'natural_expected_size is negative, this should never happen. '
            'natural_expected_size: {}'.format(natural_expected_size))

    # s might naturally be too large, but we can rescale L to shrink it
    # if we rescale alpha * L by a constant alpha,
    # s is now trace(alpha * L - alpha * L_hat + L_hat(L_hat + I / alpha)^-1)
    if desired_expected_size is None:
        alpha_hat = 1.0
    elif natural_expected_size <= desired_expected_size:
        raise ValueError(
            'The expected sample size is smaller than the desired sample size or k (if sampling from'
            'a k-DPP).\n'
            'This is unusual (i.e. you are trying to select more than the overall amount of diversity '
            'in your set.\n'
            'Increasing the expected sample size is currently not supported (only decreasing).\n'
            'Please consider decreasing your k={} or changing L.'
            ' Estimated mean cardinality: {}'.format(desired_expected_size,
                                                     natural_expected_size))
    else:
        # since this is monotone in alpha, we can simply use Brent's algorithm (bisection + tricks)
        # it is a root finding algorithm so we must create a function with a root in desired_expected_size
        def temp_func_with_root_in_desired_expected_size(x):
            return np.sum(1.0 - 1.0 /
                          (x * eigvals_L_hat + 1.0)) - desired_expected_size

        alpha_hat, opt_result = brentq(
            temp_func_with_root_in_desired_expected_size,
            a=10.0 * np.finfo(np.float).eps,
            b=4.0,
            full_output=True)

        if not opt_result.converged:
            raise ValueError(
                'Could not find an appropriate rescaling for desired_expected_size.'
                '(Flag, Iter, Root): {}'.format(
                    (opt_result.flag, opt_result.iterations, opt_result.root)))
        elif alpha_hat > 1.0:
            raise ValueError(
                'The rescaling factor alpha_hat is larger than 1 (i.e. we would need to increase the expected sample size).'
                ' Increasing the expected sample size is currently not supported (only decreasing).\n'
                'Please consider decreasing your k={} or changing L.'
                ' alpha_hat: {}'.format(desired_expected_size, alpha_hat))

    deff_alpha_L_hat = np.sum(1.0 - 1.0 / (alpha_hat * eigvals_L_hat + 1.0))

    if desired_expected_size is None:
        alpha_min, alpha_max, k = alpha_hat, alpha_hat, -1
    else:
        alpha_min, alpha_max, k = 1.0 / lam_max, 1.0 / lam_min, desired_expected_size

    result = _IntermediateSampleInfoAlphaRescale(
        alpha_hat=alpha_hat,
        alpha_min=alpha_min,
        alpha_max=alpha_max,
        k=k,
        eigvals_L_hat=eigvals_L_hat,
        eigvecs_L_hat=eigvecs_L_hat,
        deff_alpha_L_hat=deff_alpha_L_hat,
        diag_L=diag_L,
        rls_upper_bound=alpha_hat * diag_L,
        rls_upper_bound_valid=np.full((diag_L.shape[0], ), False),
        r=-1,
        dict_alphadpp=dict_alphadpp,
        alpha_switches=0,
        trial_to_first_sample=0,
        rej_to_first_sample=0)

    return result
コード例 #5
0
def vfx_sampling_precompute_constants(X_data,
                                      eval_L,
                                      rng,
                                      desired_expected_size=None,
                                      rls_oversample_dppvfx=4.0,
                                      rls_oversample_bless=4.0,
                                      nb_iter_bless=None,
                                      verbose=True):
    """Pre-compute quantities necessary for the vfx rejection sampling loop, such as the inner Nystrom approximation,
    and the RLS of all elements in L.

        :param array_like X_data: dataset such that L = eval_L(X_data), out of which we aresampling objects according
        to a DPP
        :param callable eval_L: likelihood function. Given two sets of n points X and m points Y, eval_L(X, Y) should
        compute the (n x m) matrix containing the likelihood between points. The function should also
        accept a single argument X and return eval_L(X) = eval_L(X, X).
        As an example, see the implementation of any of the kernels provided by scikit-learn
        (e.g. sklearn.gaussian_process.kernels.PairwiseKernel).
        :param np.random.RandomState rng: random source used for sampling
        :param desired_expected_size: desired expected sample size for the DPP. If None, use the natural DPP expected
        sample size. The vfx sampling algorithm can approximately adjust the expected sample size of the DPP by
        rescaling the L matrix with a scalar alpha_star <= 1. Adjusting the expected sample size can be useful to
        control downstream complexity, and it is necessary to improve the probability of drawing a sample with
        exactly k elements when using vfx for k-DPP sampling. Currently only reducing the sample size is supported,
        and the sampler will return an exception if the DPP sample has already a natural expected size
        smaller than desired_expected_size.
        :type desired_expected_size:
            float or None, default None
        :param rls_oversample_dppvfx: Oversampling parameter used to construct dppvfx's internal Nystrom approximation.
        The rls_oversample_dppvfx >= 1 parameter is used to increase the rank of the approximation by
        a rls_oversample_dppvfx factor. This makes each rejection round slower and more memory intensive,
        but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
        Empirically, a small factor rls_oversample_dppvfx = [2,10] seems to work. It is suggested to start with
        a small number and increase if the algorithm fails to terminate.
        :type rls_oversample_dppvfx:
            float, default 4.0
        :param rls_oversample_bless: Oversampling parameter used during bless's internal Nystrom approximation.
        Note that this is a different Nystrom approximation than the one related to :func:`rls_oversample_dppvfx`,
        and can be tuned separately.
        The rls_oversample_bless >= 1 parameter is used to increase the rank of the approximation by
        a rls_oversample_bless factor. This makes the one-time pre-processing slower and more memory intensive,
        but reduces variance and the number of rounds of rejections, so the actual runtime might increase or decrease.
        Empirically, a small factor rls_oversample_bless = [2,10] seems to work. It is suggested to start with
        a small number and increase if the algorithm fails to terminate or is not accurate.
        :type rls_oversample_bless:
            float, default 4.0
        :param int nb_iter_bless:  iterations for BLESS, if None it is set to log(n)
        :type nb_iter_bless:
            int or None, default None
        :param bool verbose: controls verbosity of debug output, including progress bars.
        The progress bar reports the inner execution of the bless algorithm, showing:
            - lam: lambda value of the current iteration
            - m: current size of the dictionary (number of centers contained)
            - m_expected: expected size of the dictionary before sampling
            - probs_dist: (mean, max, min) of the approximate rlss at the current iteration

        :return: Pre-computed information necessary for the vfx rejection sampling loop with fields
        - result.alpha_star: appropriate rescaling such that the expected sample size of DPP(alpha_star * L) is equal
        to a user-indicated constant desired_expected_size, or 1.0 if no such constant was specified by the user.
        - result.logdet_I_A: log determinant of the Nystrom approximation of L + I
        - result.q: placeholder q constant used for vfx sampling, to be replaced by the user before the sampling loop
        - result.s and result.z: approximations of the expected sample size of DPP(alpha_star * L) to be used in
        the sampling loop. For more details see [DeCaVa19]
        - result.rls_estimate: approximations of the RLS of all elements in X (i.e. in L)
        - result.rej_to_first_sample: number of total rejections until first valid sample is generated. This is included
        for debugging purposes and initialized to 0, to be later updated in the sampling loop.
        :rtype: _IntermediateSampleInfo

    """
    diag_L = evaluate_L_diagonal(eval_L, X_data)
    trace_L = diag_L.sum()

    # Phase 0: compute initial dictionary D_bless with small rls_oversample_bless
    # D_bless is used only to estimate all RLS

    dict_bless = bless(X_data,
                       eval_L,
                       1.0,
                       rls_oversample_bless,
                       rng,
                       nb_iter_bless=nb_iter_bless,
                       verbose=verbose)

    # Phase 1: use estimate RLS to sample the dict_dppvfx dictionary, i.e. the one used to construct A
    # here theory says that to have high acceptance probability we need the oversampling factor to be ~deff^2
    # but even with constant oversampling factor we seem to accept fast

    D_A = reduce_lambda(X_data,
                        eval_L,
                        dict_bless,
                        dict_bless.lam,
                        rng,
                        rls_oversample_parameter=rls_oversample_dppvfx)

    # Phase 2: pre-compute L_hat, B_bar, l_i, det(I + L_hat), etc.
    U_DD, S_DD, _ = np.linalg.svd(eval_L(D_A.X, D_A.X))
    U_DD, S_root_inv_DD = stable_invert_root(U_DD, S_DD)
    m = U_DD.shape[1]

    E = S_root_inv_DD * U_DD.T

    # The _T indicates that B_bar_T is the transpose of B_bar,
    # we keep it that way for efficiency reasons
    B_bar_T = E.dot(eval_L(D_A.X, X_data))
    diag_L_hat = np.square(B_bar_T).sum(axis=0)
    trace_L_hat = diag_L_hat.sum()

    # While we have L_hat = B_bar_T.T * B_bar_T, we do not want to compute explicitly the (n x n) matrix
    # instead we reason in terms of B_bar_T * B_bar_T.T which is a (m x m) matrix. We call this matrix A_mm.
    # I_A_mm indicates I + A_mm (i.e. A_mm with identity added)
    I_A_mm = B_bar_T.dot(B_bar_T.T)
    I_A_mm[np.diag_indices(m)] += 1.0

    # we now need to compute the l_i estimates using L_hat, it is more efficient to do it in terms of
    # B_bar_T and I_A_mm
    # in particular, we will use the diag(L - L_hat + L_hat(L_hat + I)^-1) estimator
    # but we must first tune L to obtain a desired s
    # we can use the fact the the non-zero eigenvalues of I + L_hat and I_A_mm are equal
    eigvals, eigvec = np.linalg.eigh(I_A_mm)

    if np.any(eigvals <= 1.0):
        raise ValueError(
            'Some eigenvalues of L_hat are negative, this should never happen. '
            'Minimum eig: {}'.format(np.min(eigvals - 1.0)))

    natural_expected_size = trace_L - trace_L_hat + np.sum(
        (eigvals - 1.0) / eigvals)

    if natural_expected_size < 0.0:
        raise ValueError(
            'natural_expected_size < 0, this should never happen. '
            'natural_expected_size: {}'.format(natural_expected_size))

    # s might naturally be too large, but we can rescale L to shrink it
    # if we rescale alpha * L by a constant alpha,
    # s is now trace(alpha * L - alpha * L_hat + L_hat(L_hat + I / alpha)^-1)
    if desired_expected_size is None:
        alpha_star = 1.0
    elif natural_expected_size <= desired_expected_size:
        raise ValueError(
            'The expected sample size is smaller than the desired sample size or k (if sampling from'
            'a k-DPP).\n'
            'This is unusual (i.e. you are trying to select more than the overall amount of diversity '
            'in your set.\n'
            'Increasing the expected sample size is currently not supported (only decreasing).\n'
            'Please consider decreasing your k={} or changing L.'
            ' Estimated mean cardinality: {}'.format(desired_expected_size,
                                                     natural_expected_size))
    else:
        # since this is monotone in alpha, we can simply use Brent's algorithm (bisection + tricks)
        # it is a root finding algorithm so we must create a function with a root in desired_expected_size
        def temp_func_with_root_in_desired_expected_size(x):
            return (x * trace_L - x * trace_L_hat + np.sum(
                (eigvals - 1.0) / (eigvals - 1.0 + 1.0 / x)) -
                    desired_expected_size)

        alpha_star, opt_result = brentq(
            temp_func_with_root_in_desired_expected_size,
            a=10.0 * np.finfo(np.float).eps,
            b=1.0,
            full_output=True)

        if not opt_result.converged:
            raise ValueError(
                'Could not find an appropriate rescaling for desired_expected_size.'
                '(Flag, Iter, Root): {}'.format(
                    (opt_result.flag, opt_result.iterations, opt_result.root)))

    # adjust from I + A to I / alpha_star + A
    I_A_mm[np.diag_indices(m)] += 1.0 / alpha_star - 1.0
    eigvals += 1.0 / alpha_star - 1.0
    rls_estimate = estimate_rls_from_embedded_points(eigvec, eigvals, B_bar_T,
                                                     diag_L, diag_L_hat,
                                                     alpha_star)

    if np.any(rls_estimate < 0.0):
        raise ValueError(
            'Some estimate l_i is negative, this should never happen. '
            'Minimum l_i: {}'.format(np.min(rls_estimate)))

    # s is simply the sum of l_i, and it is our proxy for the expected sample size. If desired_expected_size
    # is set, s should be very close to it
    s = np.sum(rls_estimate)
    if s < 0.0:
        raise ValueError('s < 0, this should never happen. s = {}'.format(s))

    # we need to compute z and logDet(I + L_hat)
    z = np.sum((eigvals - 1.0 / alpha_star) / eigvals)

    # we need logdet(I + alpha * A) and we have eigvals(I / alpha_star + A) we can adjust using sum of logs
    logdet_I_A = np.sum(np.log(alpha_star * eigvals))

    if not logdet_I_A >= 0.0:
        raise ValueError('logdet_I_A is negative, this should never happen. '
                         's: {}'.format(logdet_I_A))

    result = _IntermediateSampleInfo(alpha_star=alpha_star,
                                     logdet_I_A=logdet_I_A,
                                     q=-1,
                                     s=s,
                                     z=z,
                                     rls_estimate=rls_estimate,
                                     rej_to_first_sample=0)

    return result