def optimization(model, nInput, nOutput, xlb, xub, niter, pct, \ Xinit = None, Yinit = None, pop = 100, gen = 100, \ crossover_rate = 0.9, mu = 20, mum = 20): """ Multi-Objective Adaptive Surrogate Modelling-based Optimization model: the evaluated model function nInput: number of model input nOutput: number of output objectives xlb: lower bound of input xub: upper bound of input niter: number of iteration pct: percentage of resampled points in each iteration Xinit and Yinit: initial samplers for surrogate model construction ### options for the embedded NSGA-II of MO-ASMO pop: number of population gen: number of generation crossover_rate: ratio of crossover in each generation mu: distribution index for crossover mum: distribution index for mutation """ N_resample = int(pop * pct) if (Xinit is None and Yinit is None): Ninit = nInput * 10 Xinit = sampling.glp(Ninit, nInput) for i in range(Ninit): Xinit[i, :] = Xinit[i, :] * (xub - xlb) + xlb Yinit = np.zeros((Ninit, nOutput)) for i in range(Ninit): Yinit[i, :] = model.evaluate(Xinit[i, :]) else: Ninit = Xinit.shape[0] icall = Ninit x = Xinit.copy() y = Yinit.copy() for i in range(niter): print('Surrogate Opt loop: %d' % i) sm = gp.GPR_Matern(x, y, nInput, nOutput, x.shape[0], xlb, xub) bestx_sm, besty_sm, x_sm, y_sm = \ NSGA2.optimization(sm, nInput, nOutput, xlb, xub, \ pop, gen, crossover_rate, mu, mum) D = NSGA2.crowding_distance(besty_sm) idxr = D.argsort()[::-1][:N_resample] x_resample = bestx_sm[idxr, :] y_resample = np.zeros((N_resample, nOutput)) for j in range(N_resample): y_resample[j, :] = model.evaluate(x_resample[j, :]) icall += N_resample x = np.vstack((x, x_resample)) y = np.vstack((y, y_resample)) xtmp = x.copy() ytmp = y.copy() xtmp, ytmp, rank, crowd = NSGA2.sortMO(xtmp, ytmp, nInput, nOutput) idxp = (rank == 0) bestx = xtmp[idxp, :] besty = ytmp[idxp, :] return bestx, besty, x, y
def optimization(model, nInput, xlb, xub, niter, Xinit=None, Yinit=None, ngs=None, maxn=3000, kstop=10, pcento=0.1, peps=0.001): """ Adaptive Surrogate Modelling-based Optimization model: the evaluated model function nInput: number of model input xlb: lower bound of input xub: upper bound of input niter: number of total iteration Xinit/Yinit: initial samplers for surrogate model construction ngs: number of complexes (sub-populations) kstop: maximum number of evolution loops before convergency pcento: the percentage change allowed in kstop loops before convergency peps: minimum range of parameter maxn: number of maximum model runs - surrogate model """ if (Xinit is None and Yinit is None): Ninit = nInput * 10 Xinit = sampling.glp(Ninit, nInput) for i in range(Ninit): Xinit[i, :] = Xinit[i, :] * (xub - xlb) + xlb Yinit = np.zeros(Ninit) for i in range(Ninit): Yinit[i] = model.evaluate(Xinit[i, :]) else: Ninit = Xinit.shape[0] icall = Ninit x = Xinit.copy() y = Yinit.copy() bestf = np.inf for i in range(niter): print('Surrogate Opt loop: %d' % i) sm = gp.GPR_Matern(x, y, nInput, 1, x.shape[0], xlb, xub) bestx_sm, bestf_sm, icall_sm, nloop_sm, \ bestx_list_sm, bestf_list_sm, icall_list_sm = \ SCEUA.optimization(sm, nInput, xlb, xub, ngs, maxn, kstop, pcento, peps, verbose = False) bestx_tmp = bestx_sm.copy() bestf_tmp = model.evaluate(bestx_tmp) icall += 1 x = np.vstack((x, bestx_tmp)) y = np.append(y, bestf_tmp) if bestf_tmp < bestf: bestf = bestf_tmp bestx = bestx_tmp return bestx, bestf, x, y
def optimization(model, nInput, nOutput, xlb, xub, dft, niter, pct, \ Xinit = None, Yinit = None, pop = 100, gen = 100, \ crossover_rate = 0.9, mu = 20, mum = 20, weight = 0.001): ''' Weighted Multi-Objective Adaptive Surrogate Modelling-based Optimization model: the evaluated model function nInput: number of model input nOutput: number of output objectives xlb: lower bound of input xub: upper bound of input dft: default point to constrained the objective space, objective value simulated by default parameters (dimension equals to nOutput, this is the reference point in MO-ASMO paper) niter: number of iteration pct: percentage of resampled points in each iteration Xinit and Yinit: initial samplers for surrogate model construction ### options for the embedded WNSGA-II of WMO-ASMO pop: number of population gen: number of generation crossover_rate: ratio of crossover in each generation mu: distribution index for crossover mum: distribution index for mutation weight: assign weight factor if one objective is worse than the dft point ''' N_resample = int(pop*pct) if (Xinit is None and Yinit is None): Ninit = nInput * 10 Xinit = sampling.glp(Ninit, nInput) for i in range(Ninit): Xinit[i,:] = Xinit[i,:] * (xub - xlb) + xlb Yinit = np.zeros((Ninit, nOutput)) for i in range(Ninit): Yinit[i,:] = model.evaluate(Xinit[i,:]) else: Ninit = Xinit.shape[0] icall = Ninit x = Xinit.copy() y = Yinit.copy() for i in range(niter): print('Surrogate Opt loop: %d' % i) sm = gp.GPR_Matern(x, y, nInput, nOutput, x.shape[0], xlb, xub) bestx_sm, besty_sm, x_sm, y_sm = \ WNSGA2.optimization(sm, nInput, nOutput, xlb, xub, dft, \ pop, gen, crossover_rate, mu, mum, weight) D = WNSGA2.weighted_crowding_distance(besty_sm, dft, weight) idxr = D.argsort()[::-1][:N_resample] x_resample = bestx_sm[idxr,:] y_resample = np.zeros((N_resample,nOutput)) for j in range(N_resample): y_resample[j,:] = model.evaluate(x_resample[j,:]) icall += N_resample x = np.vstack((x, x_resample)) y = np.vstack((y, y_resample)) xtmp = x.copy() ytmp = y.copy() xtmp, ytmp, rank, crowd = WNSGA2.sortMO_W(xtmp, ytmp, nInput, nOutput, dft, weight) bestx = [] besty = [] for i in range(ytmp.shape[0]): if rank[i] == 0 and sum(ytmp[i,:] < dft) == nOutput: bestx.append(xtmp[i,:]) besty.append(ytmp[i,:]) bestx = np.array(bestx) besty = np.array(besty) #idxp = (rank == 0) #bestx = xtmp[idxp,:] #besty = ytmp[idxp,:] return bestx, besty, x, y
def sampler(floglike, D, xlb, xub, \ Xinit = None, Yinit = None, flogprior = None, \ niter = 10, nhist = 5, resolution = 0.0001, \ T = 1, B = 10000, N = 10000, M = 5, \ parallel = False, processes = 4, sampler = None): ''' An adaptive surrogate modeling-based sampling strategy for parameter optimization and distribution estimation (ASMO-PODE) use Metropolis/AM/DRAM sampler, Markov Chain Monte Carlo Parameters for ASMO-PODE floglike: -2log likelihood function, floglike.evaluate(X) D: dimension of input X xlb: lower bound of input xub: upper bound of input Xinit: initial value of X, Ninit x D matrix Yinit: initial value of Y, Ninit dim vector flogprior: -2log prior distribution function, should be very simple that do not need surrogate use uniform distribution as default niter: total number of iteration nhist: number of histograms in each iteration resolution: use uniform sampling if the nearest neighbour distance is smaller than resolution (parameter space normalized to [0,1]) Parameters for MCMC: T: temperature, default is 1 B: length of burn-in period N: Markov Chain length (after burn-in) M: number of Markov Chain parallel: evaluate MChain parallelly or not processes: number of parallel processes sampler: name of sampler, one of Metropolis/AM/DRAM ''' nbin = int(np.floor(N / (nhist - 1))) if (Xinit is None and Yinit is None): Ninit = D * 10 Xinit = sampling.glp(Ninit, D) for i in range(Ninit): Xinit[i, :] = Xinit[i, :] * (xub - xlb) + xlb Yinit = np.zeros(Ninit) for i in range(Ninit): Yinit[i] = floglike.evaluate(Xinit[i, :]) else: Ninit = Xinit.shape[0] if len(Yinit.shape) == 2: Yinit = Yinit[:, 0] x = Xinit.copy() y = Yinit.copy() ntoc = 0 if sampler is None: sampler = 'Metropolis' resamples = [] for i in range(niter): print('Surrogate Opt loop: %d' % i) # construct surrogate model #sm = gp.GPR_Matern(x, y, D, 1, x.shape[0], xlb, xub) sm = gwgp.MOGPR('CovMatern5', x, y.reshape((-1,1)), D, 1, xlb, xub, \ mean = np.zeros(1), noise = 1e-3) # for surrogate-based MCMC, use larger value for noise, i.e. 1e-3, to smooth the response surface # run MCMC on surrogate model if sampler == 'AM': [Chain, LogPost, ACC, GRB] = \ AM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \ parallel, processes) elif sampler == 'DRAM': [Chain, LogPost, ACC, GRB] = \ DRAM.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, \ parallel, processes) elif sampler == 'Metropolis': [Chain, LogPost, ACC, GRB] = \ Metropolis.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, None, \ parallel, processes) else: [Chain, LogPost, ACC, GRB] = \ Metropolis.sampler(sm, D, xlb, xub, None, flogprior, T, B, N, M, None, \ parallel, processes) # sort -2logpost with ascending order lidx = np.argsort(LogPost) Chain = Chain[lidx, :] LogPost = LogPost[lidx] # store result of MCMC on surrogate resamples.append({'Chain': Chain.copy(), \ 'LogPost': LogPost.copy(),'ACC': ACC, 'GRB': GRB}) # normalize the data xu = (x - xlb) / (xub - xlb) xp = (Chain - xlb) / (xub - xlb) # resampling xrf = np.zeros([nhist, D]) for ihist in range(nhist - 1): xpt = xp[nbin * ihist:nbin * (ihist + 1), :].copy() xptt, pidx = np.unique(xpt.view(xpt.dtype.descr * xpt.shape[1]),\ return_index=True) xpt = xpt[pidx, :] [xtmp, mdist] = maxmindist(xu, xpt) if mdist < resolution: [xtmp, mdist] = maxmindist(xu, np.random.random([10000, D])) ntoc += 1 xrf[ihist, :] = xtmp xu = np.vstack((xu, xtmp)) xrf[nhist - 1, :] = xp[0, :] xu = np.vstack((xu, xrf[nhist - 1, :])) resamples[i]['ntoc'] = ntoc # run dynamic model xrf = xrf * (xub - xlb) + xlb yrf = np.zeros(nhist) for i in range(nhist): yrf[i] = floglike.evaluate(xrf[i, :]) x = np.concatenate((x, xrf.copy()), axis=0) y = np.concatenate((y, yrf.copy()), axis=0) bestidx = np.argmin(y) bestx = x[bestidx, :] besty = y[bestidx] return Chain, LogPost, ACC, GRB, bestx, besty, x, y, resamples
# parameters for ASMO-PODE T = 1 B = 10000 N = 10000 M = 4 niter = 16 nhist = 5 resolution = 0.05 parallel = True processes = 4 sampler = 'DRAM' # initial sampling Ninit = 20 Xinit = sampling.glp(Ninit, D, 5) for i in range(Ninit): Xinit[i, :] = Xinit[i, :] * (xub - xlb) + xlb Yinit = np.zeros(Ninit) for i in range(Ninit): Yinit[i] = model.evaluate(Xinit[i, :]) # run ASMO-PODE [Chain, LogPost, ACC, GRB, bestx, besty, x, y, resamples] = \ ASMOPODE.sampler(model, D, xlb, xub, Xinit, Yinit, None, \ niter, nhist, resolution, T, B, N, M, \ parallel, processes, sampler) # plot results #print(ACC) #print(GRB)