def compute_5_17(): #Based on code from AstroML and used in Statistics, Data Mining, and Machine Learning in Astronomy #Set the number of points and their standard deviations for both distributions: N1 = 48 N2 = 2 mu = 0 sig1 = 1 sig2 = 5 sig_reject = 10.*sig1 reject_conf = 0.683 #Confidence level to reject outlier: sigs = np.zeros(N1+N2) sigs[:N1] = sig1 sigs[N1:] = sig2 xi = np.random.normal(mu,sigs) #Construct a mu1,g1 parameter space: mu1 = np.linspace(-5,5,100) g1 = np.linspace(0,1,50) l1,g1_2d = likelihood_5_17(xi[::-1],mu1,g1,sig1,sig_reject) l1 /= np.max(l1) ax = plt.figure().add_subplot(111) ax.imshow(l1,origin='lower',aspect='auto',cmap=plt.cm.binary,extent=[mu1[0],mu1[-1],g1[0],g1[-1]]) ax.contour(mu1,g1,convert_to_stdev(np.log(l1)),levels=(0.683,0.955,0.997),colors='red') ax.figure.savefig('chap_5_5-17.png',dpi=300) #Check which points should be rejected: for i in range(len(xi)): newxi = np.roll(xi,-i) li,g1_2d = likelihood_5_17(newxi,mu1,g1,sig1,sig_reject) li /= np.max(li) stdevs = convert_to_stdev(np.log(li)) g1_in_rejection = np.where(stdevs < reject_conf,g1_2d,np.ones(g1_2d.shape)) print i,g1_in_rejection.min()
hist_mu, bins_mu = np.histogram(trace_mu, bins=mu_bins, normed=True) hist_gamma, bins_gamma = np.histogram(trace_gamma, bins=gamma_bins, normed=True) #---------------------------------------------------------------------- # plot the results fig = plt.figure(figsize=(5, 5)) # first axis: likelihood contours ax1 = fig.add_axes((0.4, 0.4, 0.55, 0.55)) ax1.xaxis.set_major_formatter(plt.NullFormatter()) ax1.yaxis.set_major_formatter(plt.NullFormatter()) ax1.contour(mu, gamma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='b', linestyles='dashed') ax1.contour(0.5 * (mu_bins[:-1] + mu_bins[1:]), 0.5 * (gamma_bins[:-1] + gamma_bins[1:]), convert_to_stdev(np.log(L_MCMC.T)), levels=(0.683, 0.955, 0.997), colors='k') # second axis: marginalized over mu ax2 = fig.add_axes((0.1, 0.4, 0.29, 0.55)) ax2.xaxis.set_major_formatter(plt.NullFormatter()) ax2.plot(hist_gamma, 0.5 * (bins_gamma[1:] + bins_gamma[:-1] - bins_gamma[1] + bins_gamma[0]), '-k', drawstyle='steps')
ax.plot(z_fit, mu_fit, '-k') ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray') ax.set_xlim(0, 1.8) ax.set_ylim(36, 46) ax.set_xlabel('$z$') ax.set_ylabel(r'$\mu$') ax.text(0.04, 0.96, "%i observations" % len(z_sample), ha='left', va='top', transform=ax.transAxes) # right plot: the likelihood ax = fig.add_subplot(122) ax.contour(omegaM, omegaL, convert_to_stdev(res.T), levels=(0.683, 0.955, 0.997), colors='k') ax.plot([0, 1], [1, 0], '--k') ax.plot([0, 1], [0.73, 0.73], ':k') ax.plot([0.27, 0.27], [0, 2], ':k') ax.set_xlim(0.05, 0.75) ax.set_ylim(0.4, 1.1) ax.set_xlabel(r'$\Omega_M$') ax.set_ylabel(r'$\Omega_\Lambda$') plt.show()
print "mu from likelihood:", mu[j] print "gamma from likelihood:", gamma[i] print med, sigG = median_sigmaG(xi) print "mu from median", med print "gamma from quartiles:", sigG / 1.483 # Equation 3.54 #------------------------------------------------------------ # Plot the results plt.imshow(logL, origin='lower', cmap=plt.cm.binary, extent=(mu[0], mu[-1], gamma[0], gamma[-1]), aspect='auto') plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) plt.contour(mu, gamma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) plt.text(0.5, 0.9, r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$', fontsize=18, bbox=dict(ec='k', fc='w', alpha=0.9), ha='center', va='center', transform=plt.gca().transAxes) plt.xlabel(r'$\mu$') plt.ylabel(r'$\gamma$') plt.show()
def compute_5_15(): # Original Author: Jake VanderPlas # Modified by Andrew Schechtman-Rook # License: BSD # This code is based on code used to make a figure in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general #------------------------------------------------------------ # Draw points from distribution np.random.seed(0) N = 1000 a_true = 0.01 xmin = 0.0 xmax = 10.0 b_true = 1. / (xmax - xmin) - 0.5 * a_true * (xmax + xmin) lin_dist = linear(xmin, xmax, a_true) data = lin_dist.rvs(N) #------------------------------------------------------------ # Compute and plot the results fig = plt.figure(figsize=(9, 9)) fig.subplots_adjust(left=0.1, right=0.95, wspace=0.3, bottom=0.1, top=0.95, hspace=0.2) a = np.linspace(0.00001, 0.04, 71) b = np.linspace(0.00001, 0.15, 71) for num, nbins in enumerate([5, 100]): # divide points into bins yi, bins = np.histogram(data, bins=np.linspace(xmin, xmax, nbins + 1)) xi = 0.5 * (bins[:-1] + bins[1:]) # compute likelihoods for Poisson and Gaussian models factor = N * (xmax - xmin) * 1. / nbins #LP = logL_poisson(xi, yi, factor * a, factor * b[:, None]) #LG = logL_gaussian(xi, yi, factor * a, factor * b[:, None])#Below is faster, marginally, probably also better practice LP = logL_poisson(xi, yi, factor * a, factor * b.reshape(b.shape + (1,))) LG = logL_gaussian(xi, yi, factor * a, factor * b.reshape(b.shape + (1,))) LP -= np.max(LP) LG -= np.max(LG) # find maximum likelihood point i, j = np.where(LP == np.max(LP)) aP, bP = a[j[0]], b[i[0]] i, j = np.where(LG == np.max(LG)) aG, bG = a[j[0]], b[i[0]] # plot scatter and lines ax = fig.add_subplot(2, 2, 1 + 2 * num) plt.scatter(xi, yi, s=9, c='gray', lw=0) x = np.linspace(xmin - 1, xmax + 1, 1000) for (ai, bi, s) in [(a_true, b_true, '-k'), (aP, bP, '--k'), (aG, bG, '-.k')]: px = ai * x + bi px[x < xmin] = 0 px[x > xmax] = 0 ax.plot(x, factor * px, s) ax.set_xlim(xmin - 1, xmax + 1) ax.set_xlabel('$x$') ax.set_ylabel('$y_i$') ax.text(0.04, 0.96, r'$\rm %i\ points$' % N + '\n' + r'$\rm %i\ bins$' % nbins, ha='left', va='top', transform=ax.transAxes) # plot likelihood contours ax = fig.add_subplot(2, 2, 2 + 2 * num) ax.contour(a, b, convert_to_stdev(LP), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) ax.contour(a, b, convert_to_stdev(LG), levels=(0.683, 0.955, 0.997), colors='gray', linewidths=1, linestyle='dashed') # trick the legend command ax.plot([0], [0], '-k', lw=2, label='Poisson Likelihood') ax.plot([0], [0], '-', c='gray', lw=1, label='Gaussian Likelihood') ax.legend(loc=1) # plot horizontal and vertical lines # in newer matplotlib versions, use ax.vlines() and ax.hlines() ax.plot([a_true, a_true], [0, 0.2], ':k', lw=1) ax.plot([0, 0.06], [b_true, b_true], ':k', lw=1) ax.set_xlabel(r'$a^\ast$') ax.set_ylabel(r'$b^\ast$') ax.set_xlim(0, 0.04) ax.set_ylim(0.001, 0.15) ax.xaxis.set_major_locator(plt.MultipleLocator(0.02)) fig.savefig('chap_5_5-15.png',dpi=300)
fig.subplots_adjust(bottom=0.07, left=0.11, hspace=0.15, top=0.95) ax = fig.add_subplot(211) plt.imshow(logL, origin='lower', aspect='auto', extent=(sigma[0], sigma[-1], A[0], A[-1]), cmap=plt.cm.binary) plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) ax.set_xlabel(r'$\sigma$') ax.set_ylabel(r'$A$') ax.text(0.5, 0.9, r'$L(\sigma,A)\ (\mathrm{Gauss + bkgd},\ n=200)$', bbox=dict(ec='k', fc='w', alpha=0.9), ha='center', va='center', transform=plt.gca().transAxes) ax.contour(sigma, A, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k') ax2 = plt.subplot(212) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.1)) ax2.plot(x, fracA * dist1.pdf(x) + (1. - fracA) * dist2.pdf(x), '-k') ax2.hist(xi, 30, normed=True, histtype='stepfilled', fc='black', alpha=0.5) ax2.set_ylim(0, 0.301) ax2.set_xlim(-1, 11) ax2.set_xlabel('$x$') ax2.set_ylabel('$p(x)$') plt.show()
print "mu from likelihood:", mu[j] print "gamma from likelihood:", gamma[i] print med, sigG = median_sigmaG(xi) print "mu from median", med print "gamma from quartiles:", sigG / 1.483 # Equation 3.54 #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 3.75)) plt.imshow(logL, origin='lower', cmap=plt.cm.binary, extent=(mu[0], mu[-1], gamma[0], gamma[-1]), aspect='auto') plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) plt.contour(mu, gamma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k') plt.text(0.5, 0.93, r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$', bbox=dict(ec='k', fc='w', alpha=0.9), ha='center', va='center', transform=plt.gca().transAxes) plt.xlabel(r'$\mu$') plt.ylabel(r'$\gamma$') plt.show()
x = np.array([-1, 0.44, -0.16]) y = a * x + b dy = np.array([0.25, 0.22, 0.2]) y = np.random.normal(y, dy) # add a fourth point which is a lower bound x4 = 1.0 y4 = a * x4 + b + 0.2 #------------------------------------------------------------ # Compute the likelihoods for each point a_range = np.linspace(0, 2, 80) b_range = np.linspace(-1, 1, 80) logL = -((a_range[:, None, None] * x + b_range[None, :, None] - y) / dy) ** 2 sigma = [convert_to_stdev(logL[:, :, i]) for i in range(3)] # compute best-fit from first three points logL_together = logL.sum(-1) i, j = np.where(logL_together == np.max(logL_together)) amax = a_range[i[0]] bmax = b_range[j[0]] #------------------------------------------------------------ # Plot the first figure: the points and errorbars fig1 = plt.figure(figsize=(5, 3.75)) ax1 = fig1.add_subplot(111) # Draw the true and best-fit lines xfit = np.array([-1.5, 1.5]) ax1.plot(xfit, a * xfit + b, ':k', label='True fit')
ax.plot(x_fit, m_fit * x_fit + b_fit, '-k') ax.set_xlim(40, 250) ax.set_ylim(100, 600) ax.set_xlabel('x') ax.set_ylabel('y') #------------------------------------------------------------ # plot the likelihood contour in m, b ax = fig.add_subplot(122) m = np.linspace(1.7, 2.8, 100) b = np.linspace(-60, 110, 100) logL = np.zeros((len(m), len(b))) for i in range(len(m)): for j in range(len(b)): logL[i, j] = TLS_logL(get_beta(m[i], b[j]), X, dX) ax.contour(m, b, convert_to_stdev(logL.T), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) ax.set_xlabel('slope') ax.set_ylabel('intercept') ax.set_xlim(1.7, 2.8) ax.set_ylim(-60, 110) plt.show()
# plot the results fig = plt.figure(figsize=(5, 3.75)) fig.subplots_adjust(left=0.1, right=0.95, wspace=0.24, bottom=0.15, top=0.9) fig.add_axes((0.58, 0.55, 0.30, 0.40)) plt.imshow(logL, origin='lower', extent=(mu[0], mu[-1], sigma[0], sigma[-1]), cmap=plt.cm.binary, aspect='auto') plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) plt.contour(mu, sigma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k') plt.xlabel(r'${\rm systemic \, velocity \, } v_s \, {\rm (km/s)}$') plt.ylabel(r'${\rm intrinsic \, vel. \, dispersion \,} \sigma \, {\rm (km/s)}$') plt.xlim(-150, 50.0) plt.ylim(0, 100) # plot true values plt.plot([mu_true, mu_true], [0, 100.0], ':r', lw=1) plt.plot([-200, 200.0], [sigma_true, sigma_true], ':r', lw=1) # second axis: marginalized over mu ax2 = fig.add_axes((0.17, 0.1, 0.3, 0.30)) ax2.plot(mu, p_mu, '-k', label='')
L1 = p(mu[:, None], g1, xi, 1, 10) L1 /= np.max(L1) L2 = p(mu[:, None], g1, xi[::-1], 1, 10) L2 /= np.max(L2) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 2.5)) fig.subplots_adjust(left=0.1, right=0.95, wspace=0.05, bottom=0.15, top=0.9) ax1 = fig.add_subplot(121) ax1.imshow(L1.T, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[mu[0], mu[-1], g1[0], g1[-1]]) ax1.contour(mu, g1, convert_to_stdev(np.log(L1).T), levels=(0.683, 0.955, 0.997), colors='k') ax1.set_xlabel(r'$\mu$') ax1.set_ylabel(r'$g_1$') ax2 = fig.add_subplot(122) ax2.imshow(L2.T, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[mu[0], mu[-1], g1[0], g1[-1]]) ax2.contour(mu, g1, convert_to_stdev(np.log(L2).T), levels=(0.683, 0.955, 0.997), colors='k') ax2.set_xlabel(r'$\mu$') ax2.yaxis.set_major_locator(plt.NullLocator()) plt.show()
ax.set_xlabel('$x$') ax.set_ylabel('$y_i$') ax.text(0.04, 0.96, r'$\rm %i\ points$' % N + '\n' + r'$\rm %i\ bins$' % nbins, ha='left', va='top', transform=ax.transAxes) # plot likelihood contours ax = fig.add_subplot(2, 2, 2 + 2 * num) ax.contour(a, b, convert_to_stdev(LP), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) ax.contour(a, b, convert_to_stdev(LG), levels=(0.683, 0.955, 0.997), colors='gray', linewidths=1, linestyle='dashed') # trick the legend command ax.plot([0], [0], '-k', lw=2, label='Poisson Likelihood') ax.plot([0], [0], '-', c='gray', lw=1, label='Gaussian Likelihood')
hist_gamma, bins_gamma = np.histogram(trace_gamma, bins=gamma_bins, normed=True) #---------------------------------------------------------------------- # plot the results fig = plt.figure(figsize=(5, 5)) # first axis: likelihood contours ax1 = fig.add_axes((0.4, 0.4, 0.55, 0.55)) ax1.xaxis.set_major_formatter(plt.NullFormatter()) ax1.yaxis.set_major_formatter(plt.NullFormatter()) ax1.contour(mu, gamma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='b', linestyles='dashed') ax1.contour(0.5 * (mu_bins[:-1] + mu_bins[1:]), 0.5 * (gamma_bins[:-1] + gamma_bins[1:]), convert_to_stdev(np.log(L_MCMC.T)), levels=(0.683, 0.955, 0.997), colors='k') # second axis: marginalized over mu ax2 = fig.add_axes((0.1, 0.4, 0.29, 0.55)) ax2.xaxis.set_major_formatter(plt.NullFormatter()) ax2.plot(hist_gamma, 0.5 *
def chi2plotMarginal(chiPixSig, chiPixCmod, chi2image, sigtrue, sigmaML, CmodML): fig = plt.figure(figsize=(8, 8)) fig.subplots_adjust(left=0.08, bottom=0.15, right=0.95, top=0.90, wspace=0.29, hspace=0.46) ## go from chi2 to ln(L): # since L = exp(-chi2/2) # lnL = -1/2 * chi2 lnL = -0.5*chi2image lnL -= lnL.max() lnL[lnL < -10] = -10 # truncate for clean plotting ## lnL image ax = fig.add_axes([0.35, 0.35, 0.45, 0.6], xticks=[], yticks=[]) ax.set_title('ln(L) image', fontsize=14) # pretty color map plt.imshow(lnL, origin='lower', extent=(chiPixSig[0], chiPixSig[-1], chiPixCmod[0], chiPixCmod[-1]), cmap=plt.cm.RdYlGn, aspect='auto') # colorbar cax = plt.axes([0.82, 0.35, 0.02, 0.6]) cb = plt.colorbar(cax=cax) cb.set_label(r'$lnL(\sigma, C_{mod})$', fontsize=14) plt.clim(np.min(lnL), np.max(lnL)) # contours WHY IS THIS NOT WORKING?? plt.contour(chiPixSig, chiPixCmod, convert_to_stdev(lnL), levels=(0.683, 0.955, 0.997), colors='k') # mark true values ax.plot(sigtrue, 1000.0, 'o', color='red', alpha=0.75) # mark ML solution: (sigmaML, CmodML) ax.plot(sigmaML, CmodML, 'x', color='white', alpha=0.99, lw=35) # compute marginal projections p_sigma = np.exp(lnL).sum(0) p_Cmod = np.exp(lnL).sum(1) # and p(C|sigma=0) L = np.exp(lnL) L0 = L[:,0] pCmod0 = L0 / np.max(L0) * np.max(p_Cmod) ax1 = fig.add_axes([0.35, 0.1, 0.45, 0.23], yticks=[]) ax1.plot(chiPixSig, p_sigma, '-k') ax1.set_xlabel(r'$\sigma$ (pixel)', fontsize=12) ax1.set_ylabel(r'$p(\sigma)$', fontsize=12) ax1.set_xlim(np.min(chiPixSig), np.max(chiPixSig)) ax2 = fig.add_axes([0.15, 0.35, 0.18, 0.6], xticks=[]) ax2.plot(p_Cmod, chiPixCmod, '-k') ax2.plot(pCmod0, chiPixCmod, '--b') ax2.set_ylabel(r'$C_{mod}$ (counts)', fontsize=12) ax2.set_xlabel(r'$p(C_{mod})$', fontsize=12) ax2.set_xlim(ax2.get_xlim()[::-1]) # reverse x axis ax2.set_ylim(np.min(chiPixCmod), np.max(chiPixCmod)) name = None if (name is None): plt.show() else: print 'saving plot to:', name plt.savefig(name, bbox_inches='tight')
def chi2plot(oneDpixels, image, bestModel, chiPixSig, chiPixCmod, chi2image, sigtrue, sigmaML, CmodML): fig = plt.figure(figsize=(8, 8)) fig.subplots_adjust(left=0.08, bottom=0.15, right=0.95, top=0.90, wspace=0.29, hspace=0.46) ## image with noise ax = fig.add_subplot(221) ax.set_title('data image', fontsize=14) plt.imshow(image, origin='lower', interpolation='nearest', extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]), cmap=plt.cm.binary, aspect='auto') plt.clim(-20, 100) plt.colorbar().set_label(r'$counts$', fontsize=14) ax.set_xlabel(r'x (pixels)', fontsize=12) ax.set_ylabel(r'y (pixels)', fontsize=12) ## chi2 image ax = fig.add_subplot(222) ax.set_title('ln($\chi^2_{dof}$) image', fontsize=14) Lchi2image = np.log(chi2image/image.size) # pretty color map plt.imshow(Lchi2image, origin='lower', extent=(chiPixSig[0], chiPixSig[-1], chiPixCmod[0], chiPixCmod[-1]), cmap=plt.cm.RdYlGn, aspect='auto') # mark true values ax.plot(sigtrue, 1000.0, 'o', color='blue', alpha=0.75) # mark ML solution: (sigmaML, CmodML) ax.plot(sigmaML, CmodML, '+', color='blue', alpha=0.99) print 'chi2plot: sigtrue, sigmaML, CmodML=', sigtrue, sigmaML, CmodML # legend plt.clim(np.min(Lchi2image), np.max(Lchi2image)) plt.colorbar().set_label(r'ln($\chi^2_{dof}$)', fontsize=14) # contours plt.contour(chiPixSig, chiPixCmod, convert_to_stdev(Lchi2image), levels=(0.683, 0.955, 0.997), colors='k') ax.set_xlabel(r'$\sigma$ (pixel)', fontsize=12) ax.set_ylabel(r'$C_{mod}$ (counts)', fontsize=12) ## best-fit model image ax = fig.add_subplot(223) ax.set_title('best-fit model', fontsize=14) plt.imshow(bestModel, origin='lower', interpolation='nearest', extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]), cmap=plt.cm.binary, aspect='auto') plt.clim(-20, 100) plt.colorbar().set_label(r'$counts$', fontsize=14) ax.set_xlabel(r'x (pixels)', fontsize=12) ax.set_ylabel(r'y (pixels)', fontsize=12) ## residual difference image - best-fit model ax = fig.add_subplot(224) ax.set_title('data - model residuals', fontsize=14) diffimage = image - bestModel plt.imshow(diffimage, origin='lower', interpolation='nearest', extent=(oneDpixels[0], oneDpixels[-1], oneDpixels[0], oneDpixels[-1]), cmap=plt.cm.binary, aspect='auto') plt.clim(-60, 60) plt.colorbar().set_label(r'$counts$', fontsize=14) ax.set_xlabel(r'x (pixels)', fontsize=12) ax.set_ylabel(r'y (pixels)', fontsize=12) name = None if (name is None): plt.show() else: print 'saving plot to:', name plt.savefig(name, bbox_inches='tight')
# Priors for alpha and beta alpha = pymc.Uniform('alpha', -2*true_alpha, 2*true_alpha) beta = pymc.Uniform('beta', 0, 2*true_beta) x = pymc.Cauchy('x', alpha, beta, observed=True, value=x_array) model = [alpha, beta, x] # Modeling MC = pymc.MCMC(model) MC.sample(iter=num_samples, burn=num_samples/10) # Getting histograms of values trace_alpha = MC.trace('alpha')[:] trace_beta = MC.trace('beta')[:] # compute histogram of results to plot below L_MCMC, alpha_bins, beta_bins = np.histogram2d(trace_alpha, trace_beta, bins=(np.linspace(-5, 5, 41), np.linspace(0, 5, 41))) L_MCMC[L_MCMC == 0] = 1E-16 # prevents zero-division errors fig = plt.figure() plt.contour(0.5 * (alpha_bins[:-1] + alpha_bins[1:]), 0.5 * (beta_bins[:-1] + beta_bins[1:]), convert_to_stdev(np.log(L_MCMC.T)), levels=(0.683, 0.955, 0.997), colors='k') plt.xlabel('alpha') plt.ylabel('beta') plt.savefig(filename)
L2 /= np.max(L2) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 2.5)) fig.subplots_adjust(left=0.1, right=0.95, wspace=0.05, bottom=0.15, top=0.9) ax1 = fig.add_subplot(121) ax1.imshow(L1.T, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[mu[0], mu[-1], g1[0], g1[-1]]) ax1.contour(mu, g1, convert_to_stdev(np.log(L1).T), levels=(0.683, 0.955, 0.997), colors='k') ax1.set_xlabel(r'$\mu$') ax1.set_ylabel(r'$g_1$') ax2 = fig.add_subplot(122) ax2.imshow(L2.T, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[mu[0], mu[-1], g1[0], g1[-1]]) ax2.contour(mu, g1, convert_to_stdev(np.log(L2).T), levels=(0.683, 0.955, 0.997),
# For the model which identifies bad points, # plot circles around points identified as outliers. if i == 2: qi = S.trace('qi')[:] Pi = qi.astype(float).mean(0) outlier_x = xi[Pi < 0.32] outlier_y = yi[Pi < 0.32] ax1.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5, facecolors='none', edgecolors='red') # plot the likelihood contours ax = plt.subplot(222 + i) H, xbins, ybins = np.histogram2d(trace[:, 1], trace[:, 0], bins=bins[i]) H[H == 0] = 1E-16 Nsigma = convert_to_stdev(np.log(H)) ax.contour(0.5 * (xbins[1:] + xbins[:-1]), 0.5 * (ybins[1:] + ybins[:-1]), Nsigma.T, levels=[0.683, 0.955], colors='black') ax.set_xlabel('intercept') ax.set_ylabel('slope') ax.grid() ax.xaxis.set_major_locator(plt.MultipleLocator(40)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.2)) ax.text(0.98, 0.98, labels[i], ha='right', va='top', fontsize=14, bbox=dict(fc='w', ec='none', alpha=0.5), transform=ax.transAxes) ax.set_xlim(bins[i][0][0], bins[i][0][-1])
x = np.array([-1, 0.44, -0.16]) y = a * x + b dy = np.array([0.25, 0.22, 0.2]) y = np.random.normal(y, dy) # add a fourth point which is a lower bound x4 = 1.0 y4 = a * x4 + b + 0.2 #------------------------------------------------------------ # Compute the likelihoods for each point a_range = np.linspace(0, 2, 80) b_range = np.linspace(-1, 1, 80) logL = -((a_range[:, None, None] * x + b_range[None, :, None] - y) / dy)**2 sigma = [convert_to_stdev(logL[:, :, i]) for i in range(3)] # compute best-fit from first three points logL_together = logL.sum(-1) i, j = np.where(logL_together == np.max(logL_together)) amax = a_range[i[0]] bmax = b_range[j[0]] #------------------------------------------------------------ # Plot the first figure: the points and errorbars fig1 = plt.figure(figsize=(5, 3.75)) ax1 = fig1.add_subplot(111) # Draw the true and best-fit lines xfit = np.array([-1.5, 1.5]) ax1.plot(xfit, a * xfit + b, ':k', label='True fit')
ax.set_xlabel('z') ax.set_ylabel(r'$\mu$', fontsize=16) ax.text(0.02, 0.98, "%i observations" % len(z_sample), ha='left', va='top', transform=ax.transAxes) # right plot: the likelihood ax = fig.add_subplot(122) ax.contour(omegaM, omegaL, convert_to_stdev(res.T), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) ax.plot([0, 1], [1, 0], '--k') ax.plot([0, 1], [0.73, 0.73], ':k', lw=1) ax.plot([0.27, 0.27], [0, 2], ':k', lw=1) ax.set_xlim(0.05, 0.75) ax.set_ylim(0.4, 1.1) ax.set_xlabel(r'$\Omega_M$', fontsize=16) ax.set_ylabel(r'$\Omega_\Lambda$', fontsize=16) plt.show()
def start(): direktorij=output_folder.get() if not os.path.exists(direktorij): os.makedirs(direktorij) finishing_string='Name & Mean & Median\\\\ \n' starting_list=['Name','Mean','Median'] finishing_list=[] f.seek(0) textval=[a.get() for a in txtv] textu=[a.get() for a in txtu] textl=[a.get() for a in txtl] labele=OrderedDict([(textval[j],j) for j in range(len(textval)) if(len(textval[j])!=0)]) labeleu=OrderedDict([(textval[j],float(textu[j])) for j in range(len(textval)) if((len(textu[j])!=0) and (len(textval[j])!=0))]) labelel=OrderedDict([(textval[j],float(textl[j])) for j in range(len(textval)) if((len(textl[j])!=0) and (len(textval[j])!=0))]) X=np.array([[0. for i in range(Number_of_columns)] for j in range(Number_of_rows)]) i=0 for line in f: line0=line.strip() line1=line0.split() passing=1 for dicts, vals in labele.items(): if dicts in labelel: if(labelel[dicts]>float(line1[vals])): passing=0 if dicts in labeleu: if(labeleu[dicts]<float(line1[vals])): passing=0 if(passing==1): for j in range(Number_of_columns): X[i][j]=float(line1[j]) i=i+1 X=X[0:i,:] X=np.array([[X[k][l] for l in range(Number_of_columns)] for k in range(i) ]) fig=plt.figure(figsize=(20,15)) count=1 for ime, val in labele.items(): ax=fig.add_subplot(2,3,count) ax.set_xlabel(ime, size=30) ax.hist(X[:,val], bins=50, normed=False, histtype='stepfilled',color='blue',facecolor='blue') ax.axvline(np.mean(X[:,val]), color='orange', linestyle='--') ax.axvline(np.median(X[:,val]), color='green', linestyle='--') ax.xaxis.major.formatter._useMathText = True ax.ticklabel_format(style='sci', axis='x', scilimits=(-5,5)) text='Mean and median:\n'+str("mean$\\rightarrow$ $ {:.2uL}$\n".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))+"median$\\rightarrow$ $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val]) ) )) finishing_string=finishing_string+ime+" & "+"$ {:.2uL}$".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))+" & $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val])))+"\\\\ \n" finishing_list.append([ime,"$ {:.2uL}$".format(ufloat(np.mean(X[:,val]),np.std(X[:,val])))," $ {:.2uL}$".format(ufloat(np.median(X[:,val]),sigmaG(X[:,val])))]) ax.text(.65,.9,text,transform = ax.transAxes) count=count+1 plt.tight_layout() plt.savefig(direktorij+'/Histogrami.png') plt.close() reportwin(starting_list,finishing_string,finishing_list) for names0,vals0 in labele.items(): fig=plt.figure(figsize=(30,25)) labele1=deepcopy(labele) del labele1[names0] labele2=deepcopy(labele1) nx=ceil(np.sqrt(len(labele1)*(len(labele1)-1)/2)) ny=ceil(len(labele1)*(len(labele1)-1)/2/nx) #fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) counts=1 cmap_multicolor = plt.cm.jet for names,vals in labele1.items(): del labele2[names] for names1, vals1 in labele2.items(): N0, xedges0, yedges0 = binned_statistic_2d(X[:,vals], X[:,vals1], X[:,labele[names0]], 'mean', bins=100) ax=fig.add_subplot(ny,nx,counts) im=ax.imshow(N0.T, origin='lower',extent=[xedges0[0], xedges0[-1], yedges0[0], yedges0[-1]], aspect='auto', interpolation='nearest', cmap=cmap_multicolor) plt.xlim(xedges0[0], xedges0[-1]) plt.ylim(yedges0[0], yedges0[-1]) plt.xlabel(names, size=30) plt.ylabel(names1, size=30) ax.xaxis.major.formatter._useMathText = True ax.yaxis.major.formatter._useMathText = True ax.ticklabel_format(style='sci', axis='x', scilimits=(-5,5)) #m_1 = np.linspace(xedges0[0], xedges0[-1], 100) #m_2 = np.linspace(yedges0[0], yedges0[-1], 100) #MX,MY = np.meshgrid(m_1, m_2) #Z = sigmas(MX,np.median(X[:,vals]),sigmaG(X[:,vals]), MY,np.median(X[:,vals1]),sigmaG(X[:,vals1])) H, xbins, ybins = np.histogram2d(X[:,vals], X[:,vals1],bins=100) Nsigma = convert_to_stdev(np.log(H)) cont=plt.contour(0.5 * (xbins[1:] + xbins[:-1]),0.5 * (ybins[1:] + ybins[:-1]),Nsigma.T,levels=[0.6827,0.6827,0.9545, 0.9545], colors=['.25','.25','0.5','0.5'],linewidths=2) counts=counts+1 cmap_multicolor.set_bad('w', 1.) fig.subplots_adjust(bottom=0.1) cbar_ax = fig.add_axes([0.1, 0.05, 0.8, 0.025]) cb=fig.colorbar(im, cax=cbar_ax, format=r'$%.1f$',orientation='horizontal') cb.set_label(str('$\\langle '+names0.replace('$','')+'\\rangle $'), size=30) plt.savefig(direktorij+'/'+''.join([i for i in names0 if (i.isalpha() or i.isdigit())])+'.png',bbox_inches='tight') plt.close()
# plot the best-fit line m_fit, b_fit = get_m_b(beta_fit) x_fit = np.linspace(0, 300, 10) ax.plot(x_fit, m_fit * x_fit + b_fit, '-k') ax.set_xlim(40, 250) ax.set_ylim(100, 600) ax.set_xlabel('$x$') ax.set_ylabel('$y$') #------------------------------------------------------------ # plot the likelihood contour in m, b ax = fig.add_subplot(122) m = np.linspace(1.7, 2.8, 100) b = np.linspace(-60, 110, 100) logL = np.zeros((len(m), len(b))) for i in range(len(m)): for j in range(len(b)): logL[i, j] = TLS_logL(get_beta(m[i], b[j]), X, dX) ax.contour(m, b, convert_to_stdev(logL.T), levels=(0.683, 0.955, 0.997), colors='k') ax.set_xlabel('slope') ax.set_ylabel('intercept') ax.set_xlim(1.7, 2.8) ax.set_ylim(-60, 110) plt.show()
sigma = np.linspace(0.01, 5, 70) mu = np.linspace(-3, 5, 70) logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis]) logL -= logL.max() #------------------------------------------------------------ # plot the results plt.imshow(logL, origin='lower', extent=(mu[0], mu[-1], sigma[0], sigma[-1]), cmap=plt.cm.binary, aspect='auto') plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) plt.text(0.5, 0.9, (r'$L(\mu,\sigma)\ \mathrm{for}\ \bar{x}=1,\ ' r'\sigma_{\rm true}=1,\ n=10$'), bbox=dict(ec='k', fc='w', alpha=0.9), fontsize=18, ha='center', va='center', transform=plt.gca().transAxes) plt.contour(mu, sigma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) plt.xlabel(r'$\mu$') plt.ylabel(r'$\sigma$') plt.show()
hist_mu, bins_mu = np.histogram(trace['mu'], bins=mu_bins, density=True) hist_gamma, bins_gamma = np.histogram(np.exp(trace['log_gamma']), bins=gamma_bins, density=True) # ---------------------------------------------------------------------- # plot the results fig = plt.figure(figsize=(5, 5)) # first axis: likelihood contours ax1 = fig.add_axes((0.4, 0.4, 0.55, 0.55)) ax1.xaxis.set_major_formatter(plt.NullFormatter()) ax1.yaxis.set_major_formatter(plt.NullFormatter()) ax1.contour(mu, gamma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='b', linestyles='dashed') ax1.contour(0.5 * (mu_bins[:-1] + mu_bins[1:]), 0.5 * (gamma_bins[:-1] + gamma_bins[1:]), convert_to_stdev(np.log(L_MCMC.T)), levels=(0.683, 0.955, 0.997), colors='k') # second axis: marginalized over mu ax2 = fig.add_axes((0.1, 0.4, 0.29, 0.55)) ax2.xaxis.set_major_formatter(plt.NullFormatter()) ax2.plot(hist_gamma, 0.5 * (bins_gamma[1:] + bins_gamma[:-1] - bins_gamma[1] + bins_gamma[0]), '-k', drawstyle='steps')
outlier_x = xi[Pi < 0.32] outlier_y = yi[Pi < 0.32] ax1.scatter(outlier_x, outlier_y, lw=1, s=400, alpha=0.5, facecolors='none', edgecolors='red') # plot the likelihood contours ax = plt.subplot(222 + i) H, xbins, ybins = np.histogram2d(trace[:, 1], trace[:, 0], bins=bins[i]) H[H == 0] = 1E-16 Nsigma = convert_to_stdev(np.log(H)) ax.contour(0.5 * (xbins[1:] + xbins[:-1]), 0.5 * (ybins[1:] + ybins[:-1]), Nsigma.T, levels=[0.683, 0.955], colors='black') ax.set_xlabel('intercept') ax.set_ylabel('slope') ax.grid(color='gray') ax.xaxis.set_major_locator(plt.MultipleLocator(40)) ax.yaxis.set_major_locator(plt.MultipleLocator(0.2)) ax.text(0.98, 0.98,
px[x < xmin] = 0 px[x > xmax] = 0 ax.plot(x, factor * px, s) ax.set_xlim(xmin - 1, xmax + 1) ax.set_xlabel('$x$') ax.set_ylabel('$y_i$') ax.text(0.04, 0.96, r'$\rm %i\ points$' % N + '\n' + r'$\rm %i\ bins$' % nbins, ha='left', va='top', transform=ax.transAxes) # plot likelihood contours ax = fig.add_subplot(2, 2, 2 + 2 * num) ax.contour(a, b, convert_to_stdev(LP), levels=(0.683, 0.955, 0.997), colors='k', linewidths=2) ax.contour(a, b, convert_to_stdev(LG), levels=(0.683, 0.955, 0.997), colors='gray', linewidths=1, linestyle='dashed') # trick the legend command ax.plot([0], [0], '-k', lw=2, label='Poisson Likelihood') ax.plot([0], [0], '-', c='gray', lw=1, label='Gaussian Likelihood') ax.legend(loc=1, prop=dict(size=12)) # plot horizontal and vertical lines # in newer matplotlib versions, use ax.vlines() and ax.hlines() ax.plot([a_true, a_true], [0, 0.2], ':k', lw=1)
sigma = np.linspace(0.01, 5, 70) mu = np.linspace(-3, 5, 70) logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis]) logL -= logL.max() # ------------------------------------------------------------ # plot the results fig = plt.figure(figsize=(5, 3.75)) plt.imshow(logL, origin="lower", extent=(mu[0], mu[-1], sigma[0], sigma[-1]), cmap=plt.cm.binary, aspect="auto") plt.colorbar().set_label(r"$\log(L)$") plt.clim(-5, 0) plt.text( 0.5, 0.93, (r"$L(\mu,\sigma)\ \mathrm{for}\ \bar{x}=1,\ " r"\sigma_{\rm true}=1,\ n=10$"), bbox=dict(ec="k", fc="w", alpha=0.9), ha="center", va="center", transform=plt.gca().transAxes, ) plt.contour(mu, sigma, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors="k") plt.xlabel(r"$\mu$") plt.ylabel(r"$\sigma$") plt.show()
plt.imshow(logL, origin="lower", aspect="auto", extent=(sigma[0], sigma[-1], A[0], A[-1]), cmap=plt.cm.binary) plt.colorbar().set_label(r"$\log(L)$") plt.clim(-5, 0) ax.set_xlabel(r"$\sigma$") ax.set_ylabel(r"$A$") ax.text( 0.5, 0.9, r"$L(\sigma,A)\ (\mathrm{Gauss + bkgd},\ n=200)$", fontsize=16, bbox=dict(ec="k", fc="w", alpha=0.9), ha="center", va="center", transform=plt.gca().transAxes, ) ax.contour(sigma, A, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors="k", linewidths=2) ax2 = plt.subplot(212) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.1)) ax2.plot(x, fracA * dist1.pdf(x) + (1.0 - fracA) * dist2.pdf(x), "-k") ax2.hist(xi, 30, normed=True, histtype="stepfilled", fc="black", alpha=0.5) ax2.set_ylim(0, 0.301) ax2.set_xlim(-1, 11) ax2.set_xlabel("$x$") ax2.set_ylabel("$p(x)$") plt.show()
plt.colorbar().set_label(r'$\log(L)$') plt.clim(-5, 0) ax.set_xlabel(r'$\sigma$') ax.set_ylabel(r'$A$') ax.text(0.5, 0.9, r'$L(\sigma,A)\ (\mathrm{Gauss + bkgd},\ n=200)$', bbox=dict(ec='k', fc='w', alpha=0.9), ha='center', va='center', transform=plt.gca().transAxes) ax.contour(sigma, A, convert_to_stdev(logL), levels=(0.683, 0.955, 0.997), colors='k') ax2 = plt.subplot(212) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.1)) ax2.plot(x, fracA * dist1.pdf(x) + (1. - fracA) * dist2.pdf(x), '-k') ax2.hist(xi, 30, density=True, histtype='stepfilled', fc='gray', alpha=0.5) ax2.set_ylim(0, 0.301) ax2.set_xlim(-1, 11) ax2.set_xlabel('$x$') ax2.set_ylabel('$p(x)$') plt.show()
fig = plt.figure() ax, = plot_mcmc([trace_mu, trace_sigma], fig=fig, limits=[(-3, 5), (0, 5)], labels=(r'$\mu$', r'$\sigma$'), levels=[0.683, 0.955, 0.997], colors='k', linewidths=2) #---------------------------------------------------------------------- # Compute and plot likelihood with known ei for comparison # (Same as fig_likelihood_gaussgauss) sigma = np.linspace(0.01, 5, 41) mu = np.linspace(-3, 5, 41) logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis]) logL -= logL.max() im = ax.contourf(mu, sigma, convert_to_stdev(logL), levels=(0, 0.683, 0.955, 0.997), cmap=plt.cm.binary_r, alpha=0.5) im.set_clim(0, 1.1) ax.set_xlabel(r'$\mu$') ax.set_ylabel(r'$\sigma$') ax.set_xlim(-3, 5) ax.set_ylim(0, 5) ax.set_aspect(1. / ax.get_data_ratio()) plt.show()
ax, = plot_mcmc( [trace_mu, trace_sigma], fig=fig, limits=[(-3.2, 4.2), (0, 5)], labels=(r"$\mu$", r"$\sigma$"), levels=[0.683, 0.955, 0.997], colors="k", linewidths=2, ) # ---------------------------------------------------------------------- # Compute and plot likelihood with known ei for comparison # (Same as fig_likelihood_gaussgauss) sigma = np.linspace(0.01, 5, 41) mu = np.linspace(-3.2, 4.2, 41) logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis]) logL -= logL.max() im = ax.contourf(mu, sigma, convert_to_stdev(logL), levels=(0, 0.683, 0.955, 0.997), cmap=plt.cm.binary_r, alpha=0.5) im.set_clim(0, 1.1) ax.set_xlabel(r"$\mu$") ax.set_ylabel(r"$\sigma$") ax.set_xlim(-3.2, 4.2) ax.set_ylim(0, 5) ax.set_aspect(1.0 / ax.get_data_ratio()) plt.show()