Пример #1
0
## Author: Júlio Hoffimann Mendes

import numpy as np
import pylab as pl
from pyhum.decomposition import KernelPCA

# make sure results are reproducible
np.random.seed(2014)

# load ensemble from disk (nfeatures x nsamples)
X = np.loadtxt("ensemble.csv", delimiter=",", skiprows=1, usecols=xrange(100))

fig = pl.figure()

for d in xrange(1, 5):
    kpca = KernelPCA(degree=d)
    ncomps = kpca.train(X, ncomps=50)
    img = kpca.predict(np.ones(ncomps))
    denoised = kpca.denoise(img)

    pl.subplot(2, 4, d)
    pl.title("d = %i" % d)
    pl.imshow(img.reshape(250, 250), cmap="PuBu")
    pl.axis("off")

    pl.subplot(2, 4, d + 4)
    pl.imshow(denoised.reshape(250, 250), cmap="PuBu")
    pl.axis("off")

fig.tight_layout(h_pad=0.5, w_pad=0.5)
bbox_props = dict(boxstyle="rarrow,pad=0.3", fc="white", ec="b", lw=2)
Пример #2
0
## Author: Júlio Hoffimann Mendes

import numpy as np
import pylab as pl
from pyhum.decomposition import KernelPCA

# make sure results are reproducible
np.random.seed(2014)

# load ensemble from disk (nfeatures x nsamples)
X = np.loadtxt("ensemble.csv", delimiter=",", skiprows=1, usecols=xrange(100))

fig = pl.figure()

for d in xrange(1, 5):
    kpca = KernelPCA(degree=d)
    ncomps = kpca.train(X, ncomps=50)
    img = kpca.predict(np.ones(ncomps))
    denoised = kpca.denoise(img)

    pl.subplot(2, 4, d)
    pl.title("d = %i" % d)
    pl.imshow(img.reshape(250, 250), cmap="PuBu")
    pl.axis("off")

    pl.subplot(2, 4, d + 4)
    pl.imshow(denoised.reshape(250, 250), cmap="PuBu")
    pl.axis("off")

fig.tight_layout(h_pad=0.5, w_pad=0.5)
bbox_props = dict(boxstyle="rarrow,pad=0.3", fc="white", ec="b", lw=2)
Пример #3
0
# only 44550 of 60048 cells are active
mask = np.loadtxt("null.inc", dtype=bool, skiprows=2)
X = X[mask,:]

pool.wait()

# evaluate forward operator on prior ensemble and save results
if pool.is_master():
    D = np.array(pool.map(G, [m for m in X.T])).T
    np.savetxt("Dprior.dat", D)

pool.proceed()

# ensemble in feature space (ncomps << nfeatures)
kpca = KernelPCA()
kpca.train(X, ncomps=ncomps)
CSI = kpca.featurize(X)

mprior = Nonparametric(CSI.T)

# KDE-based proposal
def kde_proposal(CSI):
    return mprior.sample(n_samples=nsamples)

from utils import alltimes, history
timesteps = [1812, 2421, 3029] # chosen timesteps for Bayesian inference

# history-based uncertainty mitigation
for i, t in enumerate(timesteps, 1):
    dobs = history[alltimes==t,:].flatten()
Пример #4
0
##
## Created: 22 Mar 2014
## Author: Júlio Hoffimann Mendes

import numpy as np
import pylab as pl
from pyhum.decomposition import KernelPCA

# make sure results are reproducible
np.random.seed(2014)

# load ensemble from disk (nfeatures x nsamples)
X = np.loadtxt("ensemble.csv", delimiter=",", skiprows=1, usecols=xrange(100))

# parametrization
kpca = KernelPCA(degree=4)
ncomps = kpca.train(X, ncomps=50)

# featurize and reconstruct
CSI  = kpca.featurize(X)
Xnew = kpca.predict(CSI)
Xnew = kpca.denoise(Xnew)

fig = pl.figure()

for i in xrange(10):
    pl.subplot(5,4,2*i+1)
    pl.imshow(X[:,i].reshape(250,250), cmap="PuBu")
    if i < 2: pl.title("original", fontdict={"fontsize":10})
    pl.axis("off")
Пример #5
0
#-----------------------------------------------------------

logger.info("Plotting acceptance fraction for each walker...")

acceptance = np.loadtxt("acceptance1000.dat")
fig = plot_acceptance(acceptance)
pl.show(); fig.savefig("acceptance.pdf", bbox_inches="tight")

#-----------------------------------------------------------

logger.info("Plotting prior and posterior ensemble... (be patient)")

CSI = np.loadtxt("ensemble1000.dat")
nsamples, ncomps = CSI.shape
Xprior = np.loadtxt("ensemble.csv", delimiter=",", skiprows=1, usecols=xrange(nsamples))
kpca = KernelPCA(degree=4)
kpca.train(Xprior, ncomps=ncomps)
Xpost = kpca.predict(CSI.T)
Xpost = kpca.denoise(Xpost)
idx = np.argsort(posterior)[::-1]
for name, X in [("prior",Xprior),("posterior",Xpost)]:
    fig = pl.figure()
    for i in xrange(25):
        pl.subplot(5,5,i+1)
        pl.imshow(X[:,idx[i]].reshape(250,250), cmap="PuBu")
        pl.axis("off")
    fig.subplots_adjust(left=0.1, bottom=0.0, right=0.9, top=0.92, wspace=0.2, hspace=0.2)
    fig.suptitle(name+" ensemble")
    pl.show(); fig.savefig(name+".pdf", bbox_inches="tight")

#-----------------------------------------------------------
Пример #6
0
# only 44550 of 60048 cells are active
mask = np.loadtxt("null.inc", dtype=bool, skiprows=2)
X = X[mask, :]

pool.wait()

# evaluate forward operator on prior ensemble and save results
if pool.is_master():
    D = np.array(pool.map(G, [m for m in X.T])).T
    np.savetxt("Dprior.dat", D)

pool.proceed()

# ensemble in feature space (ncomps << nfeatures)
kpca = KernelPCA()
kpca.train(X, ncomps=ncomps)
CSI = kpca.featurize(X)

mprior = Nonparametric(CSI.T)


# KDE-based proposal
def kde_proposal(CSI):
    return mprior.sample(n_samples=nsamples)


from utils import alltimes, history
timesteps = [1812, 2421, 3029]  # chosen timesteps for Bayesian inference

# history-based uncertainty mitigation
Пример #7
0
##
## Created: 22 Mar 2014
## Author: Júlio Hoffimann Mendes

import numpy as np
import pylab as pl
from pyhum.decomposition import KernelPCA

# make sure results are reproducible
np.random.seed(2014)

# load ensemble from disk (nfeatures x nsamples)
X = np.loadtxt("ensemble.csv", delimiter=",", skiprows=1, usecols=xrange(100))

# parametrization
kpca = KernelPCA(degree=4)
ncomps = kpca.train(X, ncomps=50)

# featurize and reconstruct
CSI = kpca.featurize(X)
Xnew = kpca.predict(CSI)
Xnew = kpca.denoise(Xnew)

fig = pl.figure()

for i in xrange(10):
    pl.subplot(5, 4, 2 * i + 1)
    pl.imshow(X[:, i].reshape(250, 250), cmap="PuBu")
    if i < 2: pl.title("original", fontdict={"fontsize": 10})
    pl.axis("off")