def lnlikeHF(pars, samples, obs, u, extra=False): """ Generic likelihood function for importance sampling with any number of dimensions. Now with added jitter parameter (hierarchical) obs should be a 2d array of observations. shape = (ndims, nobs) u should be a 2d array of uncertainties. shape = (ndims, nobs) samples is a 3d array of samples. shape = (ndims, nobs, nsamp) if extra == True, the sigma has both a slope and intercept """ ndims, nobs, nsamp = samples.shape ypred = model(pars, samples) yobs = obs[1, :] xobs = obs[0, :] yerr = u[1, :] ll = np.zeros((nobs, nsamp * nobs)) for i in range(nobs): if extra: inv_sigma2 = 1.0 / (yerr[i] ** 2 + (pars[2] + pars[3] * model1(pars, xobs[i])) ** 2) else: inv_sigma2 = 1.0 / (yerr[i] ** 2 + (pars[2] * model1(pars, xobs[i])) ** 2) ll[i, :] = -0.5 * ((yobs[i] - ypred) ** 2 * inv_sigma2) + np.log(inv_sigma2) loglike = np.sum(np.logaddexp.reduce(ll, axis=1)) if np.isfinite(loglike): return loglike return -np.inf
def lnlikeHFM(pars, samples, obs, u, extra=False): ''' Generic likelihood function for importance sampling with any number of dimensions. Now with added jitter parameter (hierarchical) obs should be a 2d array of observations. shape = (ndims, nobs) u should be a 2d array of uncertainties. shape = (ndims, nobs) samples is a 3d array of samples. shape = (ndims, nobs, nsamp) if extra == True, the sigma has both a slope and intercept Now with a mixture model! ''' ndims, nobs, nsamp = samples.shape ypred = model(pars, samples) yobs = obs[1, :] xobs = obs[0, :] yerr = u[1, :] ll1 = np.zeros((nobs, nsamp*nobs)) ll2 = np.zeros((nobs, nsamp*nobs)) Y, V, P = pars[3], pars[4], pars[5] for i in range(nobs): if extra: inv_sigma2 = 1.0/(yerr[i]**2 + \ (pars[2] + pars[3] * model1(pars, xobs[i]))**2 + V) else: inv_sigma2 = 1.0/(yerr[i]**2 + \ (pars[2]*model1(pars, xobs[i]))**2 + V) ll1[i, :] = -.5*((yobs[i] - ypred)**2*inv_sigma2) + np.log(inv_sigma2) ll2[i, :] = -.5*((yobs[i] - Y)**2*inv_sigma2) + np.log(inv_sigma2) lnlike1 = np.logaddexp.reduce(ll1, axis=1) lnlike2 = np.logaddexp.reduce(ll2, axis=1) loglike = np.sum(np.logaddexp(np.log(1-P) + lnlike1, np.log(P) + lnlike2)) if np.isfinite(loglike): return loglike return -np.inf
def make_plots(whichx, fname): x, y, xerr, yerr = load_data(whichx) with h5py.File("%s_samples_%s.h5" % (whichx, fname)) as f: samp = f["samples"][...] m, c, sig = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samp, [16, 50, 84], axis=0))) pars = [m[0], c[0], sig[0]] print pars plt.clf() plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0, ecolor=".7") plt.plot(x, model1(pars, x), "k") ndraws = 100 p0s = np.random.choice(samp[:, 0], ndraws) p1s = np.random.choice(samp[:, 1], ndraws) p2s = np.random.choice(samp[:, 2], ndraws) for i in range(ndraws): y = p0s[i] * x + p1s[i] plt.plot(x, (y + p2s[i]), "k", alpha=.1) plt.savefig("mcmc_%s_%s" % (whichx, fname)) labels = ["$m$", "$c$", "$\sigma$"] plt.clf() fig = triangle.corner(samp, labels=labels) fig.savefig("triangle_%s_%s" % (whichx, fname))
def make_plots(whichx, fname): x, y, xerr, yerr = load_data(whichx) with h5py.File("%s_samples.h5" % whichx) as f: samp = f["samples"][...] m, c, sig = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samp, [16, 50, 84], axis=0))) pars = [m[0], c[0], sig[0]] print pars plt.clf() plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0, ecolor=".7") plt.plot(x, model1(pars, x), "k") ndraws = 100 p0s = np.random.choice(samp[:, 0], ndraws) p1s = np.random.choice(samp[:, 1], ndraws) p2s = np.random.choice(samp[:, 2], ndraws) for i in range(ndraws): y = p0s[i] * x + p1s[i] plt.plot(x, (y + p2s[i]), "k", alpha=.1) plt.savefig("mcmc_%s_%s" % (whichx, fname)) labels = ["$m$", "$c$", "$\sigma$"] plt.clf() fig = triangle.corner(samp, labels=labels) fig.savefig("triangle_%s_%s" % (whichx, fname))
def forecasts(n_clicks, days, v): if v == None: raise PreventUpdate if days == None: raise PreventUpdate df = m.model1(v, days) fig = fore(df, days) return [dcc.Graph(figure=fig)]
def vanilla_lnlike(pars, obs, u): ''' Generic likelihood function ''' yobs, xobs = obs[0, :], obs[1, :] yerr, xerr = u[0, :], u[1, :] ypred = model1(pars, xobs) inv_sigma2 = 1./(yerr**2 + pars[2]**2) chi2 = -.5*(((yobs - ypred)**2 * inv_sigma2) - np.log(inv_sigma2)) return np.sum(chi2)
u = np.vstack((xerr, yerr)) nsamp = 3 s = generate_samples(obs, u, nsamp) pars_init = [-1.850, 5.413, .065] ndim, nwalkers = len(pars_init), 32 pos = [pars_init + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(s, obs, u)) print "burning in..." pos, _, _, = sampler.run_mcmc(pos, 500) sampler.reset() print "production run..." sampler.run_mcmc(pos, 1000) samp = sampler.chain[:, 50:, :].reshape((-1, ndim)) m, c, lnf = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samp, [16, 50, 84], axis=0))) pars = [m[0], c[0], lnf[0]] plt.clf() plt.plot(s[0, :, :], s[1, :, :], "r.", markersize=2) plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0) print pars_init, pars plt.plot(x, model1(pars_init, x), color="b") plt.plot(x, model1(pars, x), color="g") plt.savefig("mcmc") fig = triangle.corner(samp, truths=pars_init) fig.savefig("triangle.png")
nsamp = 3 s = generate_samples(obs, u, nsamp) pars_init = [-1.850, 5.413, .065] ndim, nwalkers = len(pars_init), 32 pos = [pars_init + 1e-4*np.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(s, obs, u)) print "burning in..." pos, _, _, = sampler.run_mcmc(pos, 500) sampler.reset() print "production run..." sampler.run_mcmc(pos, 1000) samp = sampler.chain[:, 50:, :].reshape((-1, ndim)) m, c, lnf = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samp, [16, 50, 84], axis=0))) pars = [m[0], c[0], lnf[0]] plt.clf() plt.plot(s[0, :, :], s[1, :, :], "r.", markersize=2) plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0) print pars_init, pars plt.plot(x, model1(pars_init, x), color="b") plt.plot(x, model1(pars, x), color="g") plt.savefig("mcmc") fig = triangle.corner(samp, truths=pars_init) fig.savefig("triangle.png")
obs = np.vstack((x, y)) u = np.vstack((xerr, yerr)) nsamp = 3 s = generate_samples(obs, u, nsamp) ndim, nwalkers = len(pars_init), 32 pos = [pars_init + 1e-4*np.random.randn(ndim) for i in range(nwalkers)] sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(s, obs, u)) print "burning in..." pos, _, _, = sampler.run_mcmc(pos, 500) sampler.reset() print "production run..." sampler.run_mcmc(pos, 1000) samp = sampler.chain[:, 50:, :].reshape((-1, ndim)) m, c, sig = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(samp, [16, 50, 84], axis=0))) pars = [m[0], c[0], sig[0]] plt.clf() plt.plot(s[0, :, :], s[1, :, :], "r.", markersize=2) plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt="k.", capsize=0) print pars_init, pars plt.plot(x, model1(pars_init, x)) plt.plot(x, model1(pars, x)) plt.savefig("mcmc") fig = triangle.corner(samp, truths=pars_init) fig.savefig("triangle.png")
def __init__(self): self._app = QApplication(sys.argv) self._view = view.window() self._model = model.model1()
X_train_filenames = [] y_train = [] for i in filedata: X_train_filenames.append(i[0]) y_train.append(int(i[1])) y_train = np.array(y_train) X = np.zeros((len(X_train_filenames), 100, 100, 1)) for i in range(len(X_train_filenames)): img = cv2.imread("./chest_xray/allimages/" + X_train_filenames[i], 0) img = resize(img, (100, 100, 1)) X[i, :, :, :] = img print(i) np.save("train.npy", X) X_train = np.load("train.npy") print(X_train.shape) #X_train = np.array(X_train).reshape(-1, IMG_SIZE, IMG_SIZE, 1) mod1 = model.model1() opt = keras.optimizers.Adam() mod1.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=["accuracy"]) mod1.fit(X_train, y_train, epochs=30, batch_size=100) model_json = mod1.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 mod1.save_weights("model.h5") print("Saved model to disk")
from keras.models import Sequential from keras.layers import Convolution2D, MaxPooling2D from keras.layers import Activation, Dropout, Flatten, Dense from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import Adam, SGD, Adamax from keras.callbacks import EarlyStopping import matplotlib.pyplot as plt from keras.utils import plot_model from keras.callbacks import TensorBoard, CSVLogger import pickle from model import model1, model2, model3, model4, model5, model6, model7, model8 model1() model2() model3() model4() model5() model6() model8()