def pac_bound_one_prime(empirical_gibbs_risk, empirical_disagreement, m, m_prime, KLQP, delta=0.05): """ PAC Bound ONE PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015) Compute a *semi-supervised* PAC-Bayesian upper bound on the Bayes risk by using the C-Bound on an upper bound on the Gibbs risk (using m *labeled* examples) and a lower bound on the expected disagreement (using m_prime *unlabeled* examples) empirical_gibbs_risk : Gibbs risk on the training set empirical_disagreement : Expected disagreement on the training set m : number of *labeled* training examples m_prime : number of *unlabeled* training examples KLQP : Kullback-Leibler divergence between prior and posterior delta : confidence parameter (default=0.05) """ if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0 if m_prime <=0: print 'INVALID INPUT: m_prime must be strictly postive.'; return 1.0 xi_m = xi(m) right_hand_side = ( KLQP + log( 2 * xi_m / delta ) ) / m sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side)) xi_m_prime = xi(m_prime) right_hand_side = ( 2*KLQP + log( 2 * xi_m_prime / delta ) ) / m_prime inf_D = solve_kl_inf(empirical_disagreement, right_hand_side) return c_bound_third_form(sup_R, inf_D)
def pac_bound_one_prime(empirical_gibbs_risk, empirical_disagreement, m, m_prime, KLQP, delta=0.05): """ PAC Bound ONE PRIME of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015) Compute a *semi-supervised* PAC-Bayesian upper bound on the Bayes risk by using the C-Bound on an upper bound on the Gibbs risk (using m *labeled* examples) and a lower bound on the expected disagreement (using m_prime *unlabeled* examples) empirical_gibbs_risk : Gibbs risk on the training set empirical_disagreement : Expected disagreement on the training set m : number of *labeled* training examples m_prime : number of *unlabeled* training examples KLQP : Kullback-Leibler divergence between prior and posterior delta : confidence parameter (default=0.05) """ if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0 if m_prime <= 0: print 'INVALID INPUT: m_prime must be strictly postive.' return 1.0 xi_m = xi(m) right_hand_side = (KLQP + log(2 * xi_m / delta)) / m sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side)) xi_m_prime = xi(m_prime) right_hand_side = (2 * KLQP + log(2 * xi_m_prime / delta)) / m_prime inf_D = solve_kl_inf(empirical_disagreement, right_hand_side) return c_bound_third_form(sup_R, inf_D)
def pac_bound_one(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta=0.05): """ PAC Bound ONE of Germain, Lacasse, Laviolette, Marchand and Roy (JMLR 2015) Compute a PAC-Bayesian upper bound on the Bayes risk by using the C-Bound on an upper bound on the Gibbs risk and a lower bound on the expected disagreement empirical_gibbs_risk : Gibbs risk on the training set empirical_disagreement : Expected disagreement on the training set m : number of training examples KLQP : Kullback-Leibler divergence between prior and posterior delta : confidence parameter (default=0.05) """ if not validate_inputs(empirical_gibbs_risk, empirical_disagreement, m, KLQP, delta): return 1.0 xi_m = xi(m) right_hand_side = ( KLQP + log( 2 * xi_m / delta ) ) / m sup_R = min(0.5, solve_kl_sup(empirical_gibbs_risk, right_hand_side)) right_hand_side = ( 2*KLQP + log( 2 * xi_m / delta ) ) / m inf_D = solve_kl_inf(empirical_disagreement, right_hand_side) return c_bound_third_form(sup_R, inf_D)