def fillpts(lb,ub,npts,data=None,rtol=None,dist=None): """ takes lower and upper bounds (e.g. lb = [0,3], ub = [2,4]) finds npts that are at least rtol away from legacy data produces a list of sample points s = [[1,3],[1,4],[2,3],[2,4]] Inputs: lb -- a list of the lower bounds ub -- a list of the upper bounds npts -- number of sample points data -- a list of legacy sample points rtol -- target radial distance from each point dist -- a mystic.math.Distribution instance Notes: if rtol is None, use max rtol; if rtol < 0, use quick-n-dirty method """ bounds = list(zip(lb,ub)) from mystic.math.distance import euclidean as metric #XXX: expose solver settings to user? #XXX: better npop,ftol? faster? ###if rtol is None or type(rtol) in (int, float): from mystic.solvers import diffev as solver kwds = dict(npop=20, ftol=1e-4, gtol=None, disp=0, full_output=0) ###initial = lambda : bounds ###else: # assume it's a string # from mystic.solvers import fmin_powell as solver # kwds = dict(xtol=1e-8, ftol=1e-8, gtol=2, disp=0, full_output=0) # if rtol == 'None': rtol = None # else: rtol = float(rtol) # import random as rd # initial = lambda : [rd.randrange(l,u)+rd.random() for (l,u) in bounds] #from numpy import round #kwds['constraints'] = lambda x: round(x, 3) # copy the legacy data points (e.g. monitor.x) #XXX: more efficient method? pts = [] if data is None else list(data) # 'min', 'np.sum()', 'np.min()' are also a choice of distance metric if rtol and rtol < 0: # neg radius uses quick-n-dirty method def holes(x): return (metric(pts,x,axis=0).min(axis=0) < -rtol).sum() elif rtol is None: # no radius finds max distance away def holes(x): res = metric(pts,x,axis=0).min() return -res else: # all points should be at least rtol away def holes(x): res = metric(pts,x,axis=0).min() return -res if res < rtol else 0.0 # iteratively find a point away from all other points for pt in range(npts): res = solver(holes, x0=bounds, bounds=bounds, **kwds) #res,cost = res[0],res[1] pts.append(res.ravel().tolist()) pts = pts[-npts:] # inject some randomness #XXX: what are alternatives? some sampling? if dist is None: return pts if not len(pts): return pts pts += dist((len(pts),len(pts[0]))) return pts.tolist()
def fmin_powell(cost, x0, full=1, disp=1, monitor=0): """ change default behavior for selected optimizers """ from mystic.solvers import fmin_powell as solver from mystic.monitors import Monitor, VerboseMonitor if monitor: mon = VerboseMonitor(10) else: mon = Monitor() npop = 10*len(x0) solved = solver(cost, x0, npop=npop, full_output=full, disp=disp, itermon=mon, handler=0) # return: solution, energy, generations, fevals return solved[0], solved[1], solved[2], solved[3]
def fmin_powell(cost, x0, full=1, disp=1, monitor=0): """ change default behavior for selected optimizers """ from mystic.solvers import fmin_powell as solver from mystic.monitors import Monitor, VerboseMonitor if monitor: mon = VerboseMonitor(10) else: mon = Monitor() npop = 10*len(x0) solved = solver(cost, x0, npop=npop, full_output=full, disp=disp, itermon=mon, handler=0) # return: solution, energy, generations, fevals return solved[0], solved[1], solved[3], solved[4]
def impose_reweighted_variance(v, samples, weights=None, solver=None): """impose a variance on a list of points by reweighting weights""" ndim = len(samples) if weights is None: weights = [1.0 / ndim] * ndim if solver is None or solver == 'fmin': from mystic.solvers import fmin as solver elif solver == 'fmin_powell': from mystic.solvers import fmin_powell as solver elif solver == 'diffev': from mystic.solvers import diffev as solver elif solver == 'diffev2': from mystic.solvers import diffev2 as solver norm = sum(weights) m = mean(samples, weights) inequality = "" equality = "" equality2 = "" equality3 = "" for i in range(ndim): inequality += "x%s >= 0.0\n" % (i) # positive equality += "x%s + " % (i) # normalized equality2 += "%s * x%s + " % (float(samples[i]), (i)) # mean equality3 += "x%s*(%s-%s)**2 + " % ((i), float(samples[i]), m) # var equality += "0.0 = %s\n" % float(norm) equality += equality2 + "0.0 = %s*%s\n" % (float(norm), m) equality += equality3 + "0.0 = %s*%s\n" % (float(norm), v) penalties = generate_penalty(generate_conditions(inequality)) constrain = generate_constraint(generate_solvers(solve(equality))) def cost(x): return sum(x) results = solver(cost, weights, constraints=constrain, \ penalty=penalties, disp=False, full_output=True) wts = list(results[0]) _norm = results[1] # should have _norm == norm warn = results[4] # nonzero if didn't converge #XXX: better to fail immediately if xlo < m < xhi... or the below? if warn or not almostEqual(_norm, norm): print "Warning: could not impose mean through reweighting" return None #impose_variance(v, samples, weights), weights return wts #samples, wts # "mean-preserving"
def impose_reweighted_variance(v, samples, weights=None, solver=None): """impose a variance on a list of points by reweighting weights""" ndim = len(samples) if weights is None: weights = [1.0/ndim] * ndim if solver is None or solver == 'fmin': from mystic.solvers import fmin as solver elif solver == 'fmin_powell': from mystic.solvers import fmin_powell as solver elif solver == 'diffev': from mystic.solvers import diffev as solver elif solver == 'diffev2': from mystic.solvers import diffev2 as solver norm = sum(weights) m = mean(samples, weights) inequality = "" equality = ""; equality2 = ""; equality3 = "" for i in range(ndim): inequality += "x%s >= 0.0\n" % (i) # positive equality += "x%s + " % (i) # normalized equality2 += "%s * x%s + " % (float(samples[i]),(i)) # mean equality3 += "x%s*(%s-%s)**2 + " % ((i),float(samples[i]),m) # var equality += "0.0 = %s\n" % float(norm) equality += equality2 + "0.0 = %s*%s\n" % (float(norm),m) equality += equality3 + "0.0 = %s*%s\n" % (float(norm),v) penalties = generate_penalty(generate_conditions(inequality)) constrain = generate_constraint(generate_solvers(solve(equality))) def cost(x): return sum(x) results = solver(cost, weights, constraints=constrain, \ penalty=penalties, disp=False, full_output=True) wts = list(results[0]) _norm = results[1] # should have _norm == norm warn = results[4] # nonzero if didn't converge #XXX: better to fail immediately if xlo < m < xhi... or the below? if warn or not almostEqual(_norm, norm): print "Warning: could not impose mean through reweighting" return None #impose_variance(v, samples, weights), weights return wts #samples, wts # "mean-preserving"