def pac_bound_one_prime(empirical_gibbs_risk, empirical_disagreement, m, m_prime, KLQP, delta=0.05):
    """ PAC Bound ONE PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a *semi-supervised* PAC-Bayesian upper bound on the Bayes risk by
    using the C-Bound on an upper bound on the Gibbs risk (using m *labeled* examples)
    and a lower bound on the expected disagreement (using m_prime *unlabeled* examples)

    empirical_gibbs_risk : Gibbs risk on the training set
    empirical_disagreement : Expected disagreement on the training set
    m : number of *labeled* training examples
    m_prime : number of *unlabeled* training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0
    if m_prime <=0: print 'INVALID INPUT: m_prime must be strictly postive.'; return 1.0

    xi_m = xi(m)
    right_hand_side = ( KLQP + log( 2 * xi_m / delta ) ) / m
    sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side))

    xi_m_prime = xi(m_prime)
    right_hand_side = ( 2*KLQP + log( 2 * xi_m_prime / delta ) ) / m_prime
    inf_D = solve_kl_inf(empirical_disagreement, right_hand_side)

    return c_bound_third_form(sup_R, inf_D)
Beispiel #2
0
def pac_bound_two_prime(empirical_gibbs_risk,
                        empirical_disagreement,
                        m,
                        KLQP,
                        delta=0.05):
    """ PAC Bound TWO PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a PAC-Bayesian upper bound on the Bayes risk by
    using the C-Bound. To do so, we bound *simultaneously*
    the disagreement and the joint error, and we add a
    constraint by computing another bound on the joint error.

    empirical_gibbs_risk : Gibbs risk on the training set
    empirical_disagreement : Expected disagreement on the training set
    m : number of training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m,
                           KLQP, delta):
        return 1.0

    empirical_joint_error = empirical_gibbs_risk - empirical_disagreement / 2

    xi_m = xi(m)

    right_hand_side = (2 * KLQP + log(2 * xi_m / delta)) / m
    sup_E = min(0.5, solve_kl_sup(empirical_joint_error, right_hand_side))

    right_hand_side = (2 * KLQP + log(2 * (xi_m + m) / delta)) / m
    return maximize_c_bound_under_constraints(empirical_disagreement,
                                              empirical_joint_error,
                                              right_hand_side, sup_E)
def pac_bound_two_prime(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta=0.05):
    """ PAC Bound TWO PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a PAC-Bayesian upper bound on the Bayes risk by
    using the C-Bound. To do so, we bound *simultaneously*
    the disagreement and the joint error, and we add a
    constraint by computing another bound on the joint error.

    empirical_gibbs_risk : Gibbs risk on the training set
    empirical_disagreement : Expected disagreement on the training set
    m : number of training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0

    empirical_joint_error = empirical_gibbs_risk - empirical_disagreement/2

    xi_m = xi(m)

    right_hand_side = ( 2*KLQP + log( 2 * xi_m / delta ) ) / m
    sup_E = min(0.5, solve_kl_sup(empirical_joint_error, right_hand_side))

    right_hand_side  = (2*KLQP + log( 2*(xi_m+m) / delta ) ) / m
    return maximize_c_bound_under_constraints(empirical_disagreement, empirical_joint_error, right_hand_side, sup_E )
def pac_bound_one_prime(empirical_gibbs_risk,
                        empirical_disagreement,
                        m,
                        m_prime,
                        KLQP,
                        delta=0.05):
    """ PAC Bound ONE PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a *semi-supervised* PAC-Bayesian upper bound on the Bayes risk by
    using the C-Bound on an upper bound on the Gibbs risk (using m *labeled* examples)
    and a lower bound on the expected disagreement (using m_prime *unlabeled* examples)

    empirical_gibbs_risk : Gibbs risk on the training set
    empirical_disagreement : Expected disagreement on the training set
    m : number of *labeled* training examples
    m_prime : number of *unlabeled* training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m,
                           KLQP, delta):
        return 1.0
    if m_prime <= 0:
        print 'INVALID INPUT: m_prime must be strictly postive.'
        return 1.0

    xi_m = xi(m)
    right_hand_side = (KLQP + log(2 * xi_m / delta)) / m
    sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side))

    xi_m_prime = xi(m_prime)
    right_hand_side = (2 * KLQP + log(2 * xi_m_prime / delta)) / m_prime
    inf_D = solve_kl_inf(empirical_disagreement, right_hand_side)

    return c_bound_third_form(sup_R, inf_D)
def pac_bound_zero(empirical_gibbs_risk, m, KLQP, delta=0.05):
    """ PAC Bound ZERO of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a PAC-Bayesian upper bound on the Bayes risk by
    multiplying by two an upper bound on the Gibbs risk

    empirical_gibbs_risk : Gibbs risk on the training set
    m : number of training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, None, m, KLQP, delta): return 1.0

    xi_m = xi(m)
    right_hand_side = ( KLQP + log( xi_m / delta ) ) / m
    sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side))

    return 2 * sup_R
def pac_bound_zero(empirical_gibbs_risk, m, KLQP, delta=0.05):
    """ PAC Bound ZERO of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a PAC-Bayesian upper bound on the Bayes risk by
    multiplying by two an upper bound on the Gibbs risk

    empirical_gibbs_risk : Gibbs risk on the training set
    m : number of training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, None, m, KLQP, delta):
        return 1.0

    xi_m = xi(m)
    right_hand_side = (KLQP + log(xi_m / delta)) / m
    sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side))

    return 2 * sup_R
Beispiel #7
0
def pac_bound_one(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta=0.05):
    """ PAC Bound ONE of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015)

    Compute a PAC-Bayesian upper bound on the Bayes risk by
    using the C-Bound on an upper bound on the Gibbs risk
    and a lower bound on the expected disagreement

    empirical_gibbs_risk : Gibbs risk on the training set
    empirical_disagreement : Expected disagreement on the training set
    m : number of training examples
    KLQP : Kullback-Leibler divergence between prior and posterior
    delta : confidence parameter (default=0.05)
    """
    if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0

    xi_m = xi(m)
    right_hand_side = ( KLQP + log( 2 * xi_m / delta ) ) / m
    sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side))

    right_hand_side = ( 2*KLQP + log( 2 * xi_m / delta ) ) / m
    inf_D = solve_kl_inf(empirical_disagreement, right_hand_side)

    return c_bound_third_form(sup_R, inf_D)