Example #1
0
 def SetVariables(self, a=1.0, b=1.0, c=1.0, x='uniform'):
     '''Set variables'''
     # Initialize positions
     if x == 'uniform':
         self.x = np.random.rand(self.N, self.D) - 0.5
     elif x == 'normal':
         self.x = np.random.normal(size=(self.N, self.D))
     self.R = np.zeros((self.N, self.N))  # Initialize rij
     self.r = np.zeros(self.N)  # Initialize ri
     self.a = a  # Initialize gaussian parameter
     self.b = b  # Initialize Pade-Jastrow parameter
     self.c = c
     # Declare objects
     from LocalEnergy import LocalEnergy
     self.EL = LocalEnergy(self.N, self.D, self.w, self.Potential,
                           self.Interaction, self.Elements)
     self.Met = Metropolis(self.N, self.D, self.w, self.dx, self.Sampling,
                           self.Elements)
     self.Opt = Optimization(self.N, self.D, self.MC, self.w, self.eta,
                             self.Optimizer)
     self.WF = WaveFunction(self.N, self.D, self.w, self.Elements)
Example #2
0
def main():
    temp = 25.1327
    size = 64
    obs = [OBS.Roughness()]
    sg = ScalarTheory([size, size], SG.Action(1. / temp), obs, "hot")
    mcmoves = SCHED.Explicit([ScalarMove(eps=5.0)])
    met = MET.MetropolisWithOverrelax(mcmoves, sg.action.weight,
                                      SG.OverrelaxMove())
    #met = MET.Metropolis(mcmoves, sg.action.weight);

    ensembleSize = 256
    corSweeps = 256
    thermTime = 2048

    sim = SIM.Simulation(observables=sg.observables,
                         thermTime=thermTime,
                         corSweeps=corSweeps,
                         ensembleSize=ensembleSize,
                         mode="measurements")
    measurements = sim.run(sg, met)
    '''
Example #3
0
i = np.where(x == 0)[0]
if i.size > 0:
    x = np.delete(x, i)

# Creation of noise free synthetic data
pred = calcPred(mtarget, x)  # Unnormalized noise free data

# Noisy data
data = pred + sigdata * np.random.randn(pred.size)
# Noisy data
data_dict = {'x': x, 'data': data, 'sigma': sigdata}

## Run Metropolis

start_time = time.time()
M, LLK, accepted = Metropolis(n_samples, calcLLK, verify, data_dict, m_ini,
                              prior_bounds, prop_cov)
run_time = time.time() - start_time

# Burn-in number
iburn = np.where(LLK >= LLK[n_samples / 2:].mean())[0][0]

# Mean/STD
M_mean = M[iburn:, :].mean(axis=0)
M_std = M[iburn:, :].std(axis=0)

## Output display & figures

# Print information
print("--- %s seconds ---" % (run_time))
print("Acceptance rate : %f" % (float(accepted) / n_samples))
print("Posterior mean : %f , %f , %f" % (M_mean[0], M_mean[1], M_mean[2]))
Example #4
0
def onestep(D, xlb, xub, Xinit, Yinit, flogprior = None, \
            nhist = 5, resolution = 0.01, \
            T = 1, B = 10000, N = 10000, M = 5, \
            parallel = False, processes = 4, sampler = None):
    """
    An adaptive surrogate modeling-based sampling strategy for parameter 
    optimization and distribution estimation (ASMO-PODE)
    use Metropolis/AM/DRAM sampler, Markov Chain Monte Carlo
    One-step mode for offline optimization
    Do NOT call the model evaluation function
    Parameters for ASMO-PODE
        D: dimension of input X
        xlb: lower bound of input
        xub: upper bound of input
        Xinit: initial value of X, Ninit x D matrix
        Yinit: initial value of Y, Ninit dim vector
        flogprior: -2log prior distribution function, 
            should be very simple that do not need surrogate
            use uniform distribution as default
        nhist: number of histograms in each iteration
        resolution: use uniform sampling if the nearest neighbour distance 
                    is smaller than resolution 
                    (parameter space normalized to [0,1])
    Parameters for MCMC:
        T: temperature, default is 1
        B: length of burn-in period
        N: Markov Chain length (after burn-in)
        M: number of Markov Chain
        parallel: evaluate MChain parallelly or not
        processes: number of parallel processes
        sampler: name of sampler, one of Metropolis/AM/DRAM
    """
    nbin = int(np.floor(N / (nhist - 1)))
    x = Xinit.copy()
    y = Yinit.copy()
    ntoc = 0

    # construct surrogate model
    #sm = gp.GPR_Matern(x, y, D, 1, x.shape[0], xlb, xub)
    sm = gwgp.MOGPR('CovMatern5', x, y.reshape((-1,1)), D, 1, xlb, xub, \
            mean = np.zeros(1), noise = 1e-3)
    # for surrogate-based MCMC, use larger value for noise, i.e. 1e-3, to smooth the response surface

    # run MCMC on surrogate model
    if sampler == 'AM':
        [Chain, LogPost, ACC, GRB] = \
            DRAM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \
                parallel, processes)
    elif sampler == 'DRAM':
        [Chain, LogPost, ACC, GRB] = \
            AM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \
                parallel, processes)
    else:
        [Chain, LogPost, ACC, GRB] = \
            Metropolis.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, None, \
                parallel, processes)

    # sort -2logpost with ascending order
    lidx = np.argsort(LogPost)
    Chain = Chain[lidx, :]
    LogPost = LogPost[lidx]

    # normalize the data
    xu = (x - xlb) / (xub - xlb)
    xp = (Chain - xlb) / (xub - xlb)

    # resampling
    xrf = np.zeros([nhist, D])
    for ihist in range(nhist - 1):
        xpt = xp[nbin * ihist:nbin * (ihist + 1), :]
        xptt, pidx = np.unique(xpt.view(xpt.dtype.descr * xpt.shape[1]),\
                               return_index=True)
        xpt = xpt[pidx, :]
        [xtmp, mdist] = maxmindist(xu, xpt)
        if mdist < resolution:
            [xtmp, mdist] = maxmindist(xu, np.random.random([10000, D]))
            ntoc += 1
        xrf[ihist, :] = xtmp
        xu = np.vstack((xu, xtmp))
    xrf[nhist - 1, :] = xp[0, :]
    xu = np.vstack((xu, xrf[nhist - 1, :]))

    # return resample points
    x_resample = xrf * (xub - xlb) + xlb

    return x_resample, Chain, LogPost, ACC, GRB
Example #5
0
def sampler(floglike, D, xlb, xub, \
            Xinit = None, Yinit = None, flogprior = None, \
            niter = 10, nhist = 5, resolution = 0.0001, \
            T = 1, B = 10000, N = 10000, M = 5, \
            parallel = False, processes = 4, sampler = None):
    ''' An adaptive surrogate modeling-based sampling strategy for parameter 
        optimization and distribution estimation (ASMO-PODE)
        use Metropolis/AM/DRAM sampler, Markov Chain Monte Carlo
        Parameters for ASMO-PODE
            floglike: -2log likelihood function, floglike.evaluate(X)
            D: dimension of input X
            xlb: lower bound of input
            xub: upper bound of input
            Xinit: initial value of X, Ninit x D matrix
            Yinit: initial value of Y, Ninit dim vector
            flogprior: -2log prior distribution function, 
                should be very simple that do not need surrogate
                use uniform distribution as default
            niter: total number of iteration
            nhist: number of histograms in each iteration
            resolution: use uniform sampling if the nearest neighbour distance 
                        is smaller than resolution 
                        (parameter space normalized to [0,1])
        Parameters for MCMC:
            T: temperature, default is 1
            B: length of burn-in period
            N: Markov Chain length (after burn-in)
            M: number of Markov Chain
            parallel: evaluate MChain parallelly or not
            processes: number of parallel processes
            sampler: name of sampler, one of Metropolis/AM/DRAM
    '''
    nbin = int(np.floor(N / (nhist - 1)))
    if (Xinit is None and Yinit is None):
        Ninit = D * 10
        Xinit = sampling.glp(Ninit, D)
        for i in range(Ninit):
            Xinit[i, :] = Xinit[i, :] * (xub - xlb) + xlb
        Yinit = np.zeros(Ninit)
        for i in range(Ninit):
            Yinit[i] = floglike.evaluate(Xinit[i, :])
    else:
        Ninit = Xinit.shape[0]
        if len(Yinit.shape) == 2:
            Yinit = Yinit[:, 0]
    x = Xinit.copy()
    y = Yinit.copy()
    ntoc = 0

    if sampler is None:
        sampler = 'Metropolis'

    resamples = []

    for i in range(niter):
        print('Surrogate Opt loop: %d' % i)

        # construct surrogate model
        #sm = gp.GPR_Matern(x, y, D, 1, x.shape[0], xlb, xub)
        sm = gwgp.MOGPR('CovMatern5', x, y.reshape((-1,1)), D, 1, xlb, xub, \
                mean = np.zeros(1), noise = 1e-3)
        # for surrogate-based MCMC, use larger value for noise, i.e. 1e-3, to smooth the response surface

        # run MCMC on surrogate model
        if sampler == 'AM':
            [Chain, LogPost, ACC, GRB] = \
                AM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \
                    parallel, processes)
        elif sampler == 'DRAM':
            [Chain, LogPost, ACC, GRB] = \
                DRAM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \
                    parallel, processes)
        elif sampler == 'Metropolis':
            [Chain, LogPost, ACC, GRB] = \
                Metropolis.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, None, \
                    parallel, processes)
        else:
            [Chain, LogPost, ACC, GRB] = \
                Metropolis.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, None, \
                    parallel, processes)

        # sort -2logpost with ascending order
        lidx = np.argsort(LogPost)
        Chain = Chain[lidx, :]
        LogPost = LogPost[lidx]

        # store result of MCMC on surrogate
        resamples.append({'Chain': Chain.copy(), \
                          'LogPost': LogPost.copy(),'ACC': ACC, 'GRB': GRB})

        # normalize the data
        xu = (x - xlb) / (xub - xlb)
        xp = (Chain - xlb) / (xub - xlb)

        # resampling
        xrf = np.zeros([nhist, D])
        for ihist in range(nhist - 1):
            xpt = xp[nbin * ihist:nbin * (ihist + 1), :].copy()
            xptt, pidx = np.unique(xpt.view(xpt.dtype.descr * xpt.shape[1]),\
                                   return_index=True)
            xpt = xpt[pidx, :]
            [xtmp, mdist] = maxmindist(xu, xpt)
            if mdist < resolution:
                [xtmp, mdist] = maxmindist(xu, np.random.random([10000, D]))
                ntoc += 1
            xrf[ihist, :] = xtmp
            xu = np.vstack((xu, xtmp))
        xrf[nhist - 1, :] = xp[0, :]
        xu = np.vstack((xu, xrf[nhist - 1, :]))
        resamples[i]['ntoc'] = ntoc

        # run dynamic model
        xrf = xrf * (xub - xlb) + xlb
        yrf = np.zeros(nhist)
        for i in range(nhist):
            yrf[i] = floglike.evaluate(xrf[i, :])
        x = np.concatenate((x, xrf.copy()), axis=0)
        y = np.concatenate((y, yrf.copy()), axis=0)

    bestidx = np.argmin(y)
    bestx = x[bestidx, :]
    besty = y[bestidx]

    return Chain, LogPost, ACC, GRB, bestx, besty, x, y, resamples
Example #6
0
    LAPIMAGE = False

    sigmaIn = numpy.where(numpy.random.randn(Size, Size) > 0, 1,
                          -1).astype(numpy.int8)

    ImageOutput(sigmaIn, "Ising2D_Serial_%i_Initial" % (Size))

    Trange = numpy.arange(Tmin, Tmax + Tstep, Tstep)

    E = []
    M = []

    for T in Trange:
        # Indispensable d'utiliser copy : [:] ne fonctionne pas avec numpy !
        sigma = numpy.copy(sigmaIn)
        duration = Metropolis(sigma, J, B, T, Iterations)
        E = numpy.append(E, Energy(sigma, J))
        M = numpy.append(M, Magnetization(sigma, B))
        ImageOutput(sigma, "Ising2D_Serial_%i_%1.1f_Final" % (Size, T))

        print "CPU Time : %f" % (duration)
        print "Total Energy at Temperature %f : %f" % (T, E[-1])
        print "Total Magnetization at Temperature %f : %f" % (T, M[-1])

    if Curves:
        DisplayCurves(Trange, E, M, J, B)

    # Save output
    numpy.savez("Ising2D_Serial_%i_%.8i" % (Size, Iterations), (Trange, E, M))
Example #7
0
x = np.linspace(-8*lockdepth,8*lockdepth,100)/lockdepth 
i = np.where(x==0)[0]
if i.size>0:
    x = np.delete(x,i)
    
# Creation of noise free synthetic data
pred = calcPred(mtarget,x)       # Unnormalized noise free data

# Noisy data
data = pred + sigdata*np.random.randn(pred.size); # Noisy data
data_dict = {'x':x,'data':data,'sigma':sigdata}

## Run Metropolis

start_time = time.time()
M,LLK,accepted = Metropolis(n_samples,calcLLK,verify,data_dict,m_ini,prior_bounds,prop_cov)
run_time = time.time() - start_time

# Mean/STD
M_mean = M.mean(axis=0)
M_std  = M.std(axis=0)

## Output display & figures

# Print information
print("--- %s seconds ---" % (run_time))
print("Acceptance rate : %f" %(float(accepted)/n_samples))
print("Posterior mean : %f , %f" %(M_mean[0] ,M_mean[1]))
print("2-sigma error  : %f , %f" %(2*M_std[0],2*M_std[1] ))

# Plot data
Example #8
0
i = np.where(x == 0)[0]
if i.size > 0:
    x = np.delete(x, i)

# Creation of noise free synthetic data
pred = calcPred(mtarget, x)  # Unnormalized noise free data

# Noisy data
data = pred + sigdata * np.random.randn(pred.size)
# Noisy data
data_dict = {'x': x, 'data': data, 'sigma': sigdata}

## Run Metropolis

start_time = time.time()
M, LLK, accepted = Metropolis(n_samples, calcLLK, verify, data_dict, m_ini,
                              prior_bounds, prop_cov)
run_time = time.time() - start_time

# Mean/STD
M_mean = M.mean(axis=0)
M_std = M.std(axis=0)

## Output display & figures

# Print information
print("--- %s seconds ---" % (run_time))
print("Acceptance rate : %f" % (float(accepted) / n_samples))
print("Posterior mean : %f , %f" % (M_mean[0], M_mean[1]))
print("2-sigma error  : %f , %f" % (2 * M_std[0], 2 * M_std[1]))

# Plot data