def plot_chi2(ax, LLR, DOF, yscale="log", c='b'): ax.set_xlabel("LLR") ax.set(yscale=yscale) sns.distplot(LLR, color=c, kde=False, ax=ax, norm_hist=True, label="JMCTF") q = np.linspace(0, np.max(LLR), 1000) chi2 = tf.math.exp(tfd.Chi2(df=DOF).log_prob(q)) ax.plot(q, chi2, color=c, lw=2, label="chi^2 (DOF={0})".format(DOF))
def _init_distribution(conditions, **kwargs): df = conditions["df"] return tfd.Chi2(df=df, **kwargs)
#q_null, joint_fitted_null, all_pars_null = joint_null.fit_all(samples) #print(all_pars_null) #fig = plot_MLE_dist(all_pars_null) #fig.tight_layout() #fig.savefig("quickstart_MLE_dists.svg") import matplotlib.pyplot as plt import seaborn as sns from jmctf.plotting import plot_sample_dist, plot_MLE_dist from tensorflow_probability import distributions as tfd fig = plt.figure(figsize=(5, 3)) ax = fig.add_subplot(111) ax.set_xlabel("LLR") ax.set(yscale="log") sns.distplot(LLR, color='b', kde=False, ax=ax, norm_hist=True, label="JMCTF") q = np.linspace(0, np.max(LLR), 1000) chi2 = tf.math.exp(tfd.Chi2(df=DOF).log_prob(q)) ax.plot(q, chi2, color='b', lw=2, label="chi^2 (DOF={0})".format(DOF)) ax.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) fig.tight_layout() fig.savefig("quickstart_LLR.svg") # Or using helper tools: fig = plt.figure(figsize=(5, 3)) ax = fig.add_subplot(111) plot_chi2(ax, LLR, DOF) ax.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) fig.tight_layout() fig.savefig("quickstart_plot_LLR.svg")
color='b', kde=False, ax=ax1, norm_hist=True, label="LEEC bootstrap") sns.distplot(bootstrap_chi2, color='b', kde=False, ax=ax2, norm_hist=True, label="LEEC bootstrap") qx = np.linspace( 0, np.max(chi2_quad), 1000) # 6 sigma too far for tf, cdf is 1. single-precision float I guess qy = tf.math.exp(tfd.Chi2(df=DOF).log_prob(qx)) sns.lineplot(qx, qy, color='g', ax=ax1, label="asymptotic") sns.lineplot(qx, qy, color='g', ax=ax2, label="asymptotic") # Observed empirical and asymptotic p-values epval = 1 - c.CDFf(chi2_quad)(chi2_quad_obs) apval = tfd.Chi2(df=DOF).log_prob(chi2_quad_obs) esig = -tfd.Normal(0, 1).quantile(epval) asig = -tfd.Normal(0, 1).quantile(apval) ax1.axvline(x=chi2_quad_obs, lw=2, c='k', label="e_z={0}, a_z={1}".format(asig, esig)) ax2.axvline(x=chi2_quad_obs, lw=2,
#MLLR = Lall(all_truepar) # Try without subtracting best fit print("MLLR.shape:", MLLR.shape) # Combined MLLR LcombT_sep = LogLike(tf.expand_dims(xT, axis=0)) print("LcombT_sep:", LcombT_sep) LcombT = tf.transpose(tf.reduce_sum(LcombT_sep, axis=1)) MLLR_comb = LcombT - Lcomb_bf[:, 0] print("MLLR_comb.shape:", MLLR_comb.shape) fig = plt.figure(figsize=(6, 4)) ax1 = fig.add_subplot(111) ax1.set(yscale="log") sns.distplot(MLLR[0], bins=50, kde=False, ax=ax1, norm_hist=True, label="MLLR") q = np.linspace(np.min(MLLR[0]), np.max(MLLR[0]), 1000) chi2 = tf.math.exp(tfd.Chi2(df=1).log_prob(q)) sns.lineplot(q, chi2, color='b', ax=ax1) plt.tight_layout() fig.savefig("ind_fits{0}.png".format(suffix)) # Now we need to select the signal region depending on the parameter values # First, select based on the *test* point. This will always choose the # same signal region, no matter the data, so it should work just fine. truepar = tf.broadcast_to(xT, shape=(N, 1)) selected = tf.expand_dims(selector(truepar), 1) print("selected:", selected) ordinals = tf.reshape(tf.range(MLLR.shape[0]), (-1, 1)) idx = tf.stack([ordinals, selected], axis=-1) MLLRsel = tf.gather_nd(MLLR, idx)
def _base_dist(self, nu: IntTensorLike, *args, **kwargs): return tfd.Chi2(df=nu, *args, **kwargs)
framealpha=0, prop={'size': 10}, ncol=1) ax2.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) if do_gof_tests: #qx = np.linspace(0, tfd.Chi2(df=DOF).quantile(tfd.Normal(0,1).cdf(6.)),1000) # TODO: apparantly Quantile not implemented yet for Chi2 in tf qx = np.linspace( 0, sps.chi2(df=DOF).ppf(tfd.Normal(0, 1).cdf(5.)), 1000 ) # 6 sigma too far for tf, cdf is 1. single-precision float I guess qy = tf.math.exp(tfd.Chi2(df=DOF).log_prob(qx)) sns.lineplot(qx, qy, color='g', ax=ax3) sns.lineplot(qx, qy, color='g', ax=ax4) ax3.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) ax4.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) qx = np.linspace(
kde=False, ax=ax2, norm_hist=True, label="LEEC quad") sns.distplot(bootstrap_chi2, color='b', kde=False, ax=ax1, norm_hist=True, label="LEEC bootstrap") sns.distplot(bootstrap_chi2, color='b', kde=False, ax=ax2, norm_hist=True, label="LEEC bootstrap") qx = np.linspace( 0, np.max(chi2_quad), 1000) # 6 sigma too far for tf, cdf is 1. single-precision float I guess qy = 0.5 * tf.math.exp(tfd.Chi2(df=DOF).log_prob( qx)) # Half-chi2 since negative signal contributions not possible? sns.lineplot(qx, qy, color='g', ax=ax1, label="asymptotic") sns.lineplot(qx, qy, color='g', ax=ax2, label="asymptotic") ax1.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) ax2.legend(loc=1, frameon=False, framealpha=0, prop={'size': 10}, ncol=1) fig.tight_layout() fig.savefig("{0}/LEEC_quad_{1}_{2}.png".format(path, master_name, nullname))