def exact_likelihood(self, theta, x): kappa, mu, sigma = theta e = exp( - kappa * self.h ) c = 2 * kappa / sigma ** 2 / (1 - e) q = 2 * kappa * mu / sigma ** 2 - 1 v = 2 * c * x[1:] df = 2 * (q + 1) nc = 2 * c * x[:-1] * e l = ncx2.logpdf(v, df, nc) + log(2 * c) return - l[np.isfinite(l)].mean()
def mle_cir(params): global rates, dt K, theta, sigma = params r0_vector = rates[0:rates.shape[0] - 1] r1_vector = rates[1:] c = 2 * K / ((1 - np.exp(-K * dt)) * sigma**2) nc = 2 * c * r0_vector.values * np.exp(-K * dt) df = 4 * theta / (K * sigma**2) lik = ncx2.logpdf(2 * r1_vector.values * c, df, nc) + np.log(2 * c) likelihood = pd.Series(lik).sum() return -likelihood
def signalData_lnpdf(x, alpha=__alpha, beta=__beta, num_mc=__num_mc, **kwargs): """ evaluate the signal probability density function at x this is done by monte carlo sampling from p(y|alpha, beta) and approximating the integral of ncx2.pdf(x, __noise_df, y) """ y = np.outer(__draw_truncatedPareto(Nsamp=num_mc, alpha=alpha, beta=beta), np.ones_like(x)) ### draw monte carlo samples from p(y|alpha, beta) x = np.outer(np.ones(num_mc), x) ans = ncx2.logpdf(x, __noise_df, y) ### FIXME: ### there is a wrapping problem with calls to scipy.stats.ncx2 when the parameters get really big ### pragmatically, this seems to happen when y >> x, but it also happens when x~y>>1 ### until we can find a better solution, we will simply set any positive values to -infty so they are negligible within __logaddexp ### this should be the correct thing when y>>x, which we think is mostly what happens ans[ans>0] = -np.infty return __logaddexp(ans) - np.log(num_mc)
def lnlike(theta, data, psd): """ The log-likelihood. Non-central chiquared with 2 dof, noncentrality given by the template """ inband = (pca_result.sample_frequencies>1000)*(pca_result<4000) # Construct the template (non-centrality), normalised by the noise variance template = PCtemplatePSD(theta)[inband] / psd[inband] # Normalise by the noise variance data_normed = data[inband] / psd[inband] # likelihood of each frequency lnlikelihoods = ncx2.logpdf(data_normed, 2, template) return sum(lnlikelihoods)
loss = float(loss / len(p_dists)) mds_stress = (stress(debug_D_squareform, mu) if debug_D_squareform is not None else 0.0) all_loss.append(loss) mlflow.log_metric("loss", loss) mlflow.log_metric("stress", mds_stress) print( f"[DEBUG] epoch {epoch}, loss: {loss:.2f}, stress: {mds_stress:,.2f}" # f" mu in [{float(jnp.min(mu)):.3f}, {float(jnp.max(mu)):.3f}], " # f" ss_unc in [{float(jnp.min(ss_unc)):.3f}, {float(jnp.max(ss_unc)):.3f}]" ) ss = EPSILON + jax.nn.softplus(SCALE * ss_unc) print("[DEBUG] mean ss: ", float(jnp.mean(ss))) mlflow.log_metric("mean_ss", float(jnp.mean(ss))) return mu, ss, all_loss if __name__ == "__main__": from scipy.stats import ncx2 # compare jax ncx2_log_pdf with scipy df, nc = 2, 1.06 x = np.array([20.0]) v1 = ncx2.logpdf(x, df, nc) v2 = _ncx2_log_pdf(x, nc) print(f"scipy: {v1[0]:.5f}") print(f"jax : {v2[0]:.5f}")