def refine_osc(cls, w, coeff_field, a=1): Cadelta = 1.0 mesh_maxh = w.basis.basis.mesh.hmax() coeff_min_val, coeff_max_grad = 1e10, 0.0 suppLambda = supp(w.active_indices()) if len(suppLambda) > 0: try: # a0_f = coeff_field.mean_func # print "suppLambda", suppLambda for m in suppLambda: coeff, _ = coeff_field[m] min_val, max_grad = abs(coeff.min_val), abs(coeff.max_grad) coeff_min_val, coeff_max_grad = min(coeff_min_val, min_val), max(coeff_max_grad, max_grad) print "DDDD", m, min_val, max_grad, coeff_min_val, coeff_max_grad logger.debug("\tm: %s %s %s %s %s" % (m, min_val, max_grad, coeff_min_val, coeff_max_grad)) # determine (4.14) c_{a,\delta} Cadelta = mesh_maxh * coeff_max_grad / coeff_min_val except: logger.error("coefficient does not provide max_val and max_grad. OSC refinement not supported for this case...") # determine maximal mesh size to resolve coefficient oscillations logger.debug("OSC marking maxh {0} and Cadelta {1} with scaling factor {2}".format(mesh_maxh, Cadelta, a)) print "OSC marking maxh {0} and Cadelta {1} with scaling factor {2}".format(mesh_maxh, Cadelta, a) maxh = a * mesh_maxh / Cadelta # create appropriate mesh by refinement and project current solution new_w = w.refine_maxh(maxh) return new_w, maxh, Cadelta else: logger.info("SKIP OSC refinement since only active mi is deterministic.") return w, 1.0, Cadelta
def LambdaBoundary(Lambda): suppLambda = supp(Lambda) for mu in Lambda: for m in suppLambda: mu1 = mu.inc(m) if mu1 not in Lambda: yield mu1 mu2 = mu.dec(m) if mu2 not in Lambda and mu2 is not None: yield mu2
def evaluateUpperTailBound(cls, w, coeff_field, pde, maxh=1 / 10, add_maxm=10): """Estimate upper tail bounds according to Section 3.2.""" @cache def get_ainfty(m, V): a0_f = coeff_field.mean_func if isinstance(a0_f, tuple): a0_f = a0_f[0] # determine min \overline{a} on D (approximately) f = FEniCSVector.from_basis(V, sub_spaces=0) f.interpolate(a0_f) min_a0 = f.min_val am_f, _ = coeff_field[m] if isinstance(am_f, tuple): am_f = am_f[0] # determine ||a_m/\overline{a}||_{L\infty(D)} (approximately) try: # use exact bounds if defined max_am = am_f.max_val except: # otherwise interpolate f.interpolate(am_f) max_am = f.max_val ainftym = max_am / min_a0 assert isinstance(ainftym, float) return ainftym def prepare_norm_w(energynorm, w): normw = {} for mu in w.active_indices(): normw[mu] = energynorm(w[mu]._fefunc) return normw def LambdaBoundary(Lambda): suppLambda = supp(Lambda) for mu in Lambda: for m in suppLambda: mu1 = mu.inc(m) if mu1 not in Lambda: yield mu1 mu2 = mu.dec(m) if mu2 not in Lambda and mu2 is not None: yield mu2 # evaluate (3.15) def eval_zeta_bar(mu, suppLambda, coeff_field, normw, V, M): assert mu in normw.keys() zz = 0 # print "====zeta bar Z1", mu, M for m in range(M): if m in suppLambda: continue _, am_rv = coeff_field[m] beta = am_rv.orth_polys.get_beta(mu[m]) ainfty = get_ainfty(m, V) zz += (beta[1] * ainfty) ** 2 return normw[mu] * sqrt(zz) # evaluate (3.11) def eval_zeta(mu, Lambda, coeff_field, normw, V, M=None, this_m=None): z = 0 if this_m is None: for m in range(M): _, am_rv = coeff_field[m] beta = am_rv.orth_polys.get_beta(mu[m]) ainfty = get_ainfty(m, V) mu1 = mu.inc(m) if mu1 in Lambda: # print "====zeta Z1", ainfty, beta[1], normw[mu1], " == ", ainfty * beta[1] * normw[mu1] z += ainfty * beta[1] * normw[mu1] mu2 = mu.dec(m) if mu2 in Lambda: # print "====zeta Z2", ainfty, beta[-1], normw[mu2], " == ", ainfty * beta[-1] * normw[mu2] z += ainfty * beta[-1] * normw[mu2] return z else: m = this_m _, am_rv = coeff_field[m] beta = am_rv.orth_polys.get_beta(mu[m]) ainfty = get_ainfty(m, V) # print "====zeta Z3", m, ainfty, beta[1], normw[mu], " == ", ainfty * beta[1] * normw[mu] return ainfty * beta[1] * normw[mu] # prepare some variables energynorm = pde.energy_norm Lambda = w.active_indices() suppLambda = supp(w.active_indices()) # M = min(w.max_order + add_maxm, len(coeff_field)) M = w.max_order + add_maxm normw = prepare_norm_w(energynorm, w) # retrieve (sufficiently fine) function space for maximum norm evaluation V = w[Multiindex()].basis.refine_maxh(maxh)[0] # evaluate estimator contributions of (3.16) from collections import defaultdict # === (a) zeta === zeta = defaultdict(int) # iterate multiindex extensions # print "===A1 Lambda", Lambda for nu in LambdaBoundary(Lambda): assert nu not in Lambda # print "===A2 boundary nu", nu zeta[nu] += eval_zeta(nu, Lambda, coeff_field, normw, V, M) # === (b) zeta_bar === zeta_bar = {} # iterate over active indices for mu in Lambda: zeta_bar[mu] = eval_zeta_bar(mu, suppLambda, coeff_field, normw, V, M) # evaluate summed estimator (3.16) global_zeta = sqrt(sum([v ** 2 for v in zeta.values()]) + sum([v ** 2 for v in zeta_bar.values()])) # also return zeta evaluation for single m (needed for refinement algorithm) eval_zeta_m = lambda mu, m: eval_zeta(mu=mu, Lambda=Lambda, coeff_field=coeff_field, normw=normw, V=V, M=M, this_m=m) logger.debug("=== ZETA %s --- %s --- %s", global_zeta, zeta, zeta_bar) return global_zeta, zeta, zeta_bar, eval_zeta_m
def mark_y(cls, Lambda, zeta_, eval_zeta_m, theta_y, max_new_mi=100, type=1): """Carry out Doerfler marking by activation of new indices.""" zeta = zeta_ global_zeta = np.sqrt(sum([z ** 2 for z in zeta_.values()])) suppLambda = supp(Lambda) maxm = max(suppLambda) logger.debug("---- SUPPORT Lambda %s maxm %s Lambda %s ", suppLambda, maxm, Lambda) # A modified paper marking # ======================== if type == 0: new_mi = [] marked_zeta = 0.0 while True: # break if sufficiently many new mi are selected if theta_y * global_zeta <= marked_zeta or len(new_mi) >= max_new_mi or len(zeta) == 0: if len(new_mi) >= max_new_mi: logger.warn("max new_mi reached (%i) WITHOUT sufficient share of global zeta!" % len(new_mi)) if len(zeta) == 0: logger.warn("NO MORE MI TO MARK!") break sorted_zeta = sorted(zeta.items(), key=itemgetter(1)) logger.debug("SORTED ZETA %s", sorted_zeta) new_zeta = sorted_zeta[-1] mu = new_zeta[0] zeta.pop(mu) logger.debug("ADDING %s to new_mi %s", mu, new_mi) assert mu not in Lambda new_mi.append(mu) marked_zeta = np.sqrt(marked_zeta ** 2 + new_zeta[1] ** 2) # extend set of inactive potential indices if necessary (see section 5.7) # mu2 = mu.dec(maxm) # NOTE: the following is a slight extension of the algorithm in the paper since it executed the extension on all active multiindices (and not only with the latest activated) # if mu2 in Lambda: possible_new_mu = [] minm = min(set(range(1, maxm + 2)).difference(set(suppLambda))) # find min(N\setminus supp\Lambda) for mu2 in Lambda: new_mu = mu2.inc(minm) # assert new_mu not in Lambda # if new_mu not in Lambda and new_mu not in zeta.keys(): if new_mu not in zeta.keys(): # logger.debug("extending multiindex candidates by %s since %s is at the boundary of Lambda (reachable from %s), minm: %s", new_mu, mu, mu2, minm) logger.debug("extending multiindex candidates by %s since it is at the boundary of Lambda (reachable from %s), minm: %s", new_mu, mu2, minm) possible_new_mu += [new_mu] zeta[new_mu] = eval_zeta_m(mu2, minm) # update global zeta global_zeta = np.sqrt(global_zeta ** 2 + zeta[new_mu] ** 2) logger.debug("new global_zeta is %f", global_zeta) else: logger.debug("no further extension of multiindex candidates required") if len(new_mi) >= max_new_mi: logger.debug("maximal number new mi reached!") elif len(zeta) == 0: logger.debug("no more new indices available!") logger.info("possible new mu considered %s" % possible_new_mu) # B minimal y-dimension marking # ============================= else: assert type == 1 # === EVALUATE EXTENSION === # ========================== # determine possible new mi new_y = {} minm = min(set(range(1, maxm + 2)).difference(set(suppLambda))) # find min(N\setminus supp\Lambda) for mu2 in Lambda: new_mu = mu2.inc(minm) if new_mu not in Lambda and new_mu not in zeta.keys() and new_mu not in new_y.keys(): # if new_mu not in zeta.keys() and new_mu not in new_y.keys(): logger.debug("extending multiindex candidates by %s since it is at the boundary of Lambda (reachable from %s), minm: %s", new_mu, mu2, minm) new_val = eval_zeta_m(mu2, minm) # update global zeta global_zeta = np.sqrt(global_zeta ** 2 + new_val ** 2) logger.debug("new global_zeta is %f", global_zeta) # test for new y dimension if len(set(supp([new_mu])).difference(set(suppLambda))) > 0: assert new_mu not in new_y.keys() new_y[new_mu] = new_val else: assert new_mu not in zeta.keys() zeta[new_mu] = new_val else: logger.debug("no further extension of multiindex candidates required") # === DETERMINE NEW Y DIMENSIONS === # ================================== # determine how many new y dimensions are needed new_mi = [] sorted_new_y = sorted(new_y.items(), key=itemgetter(1)) sum_zeta_val = np.sqrt(sum([z ** 2 for z in zeta.values()])) # add new dimension y while sum_zeta_val is smaller than required marking value while sum_zeta_val < theta_y*global_zeta and len(sorted_new_y) > 0: # add new largest y new_zeta = sorted_new_y[-1] mu = new_zeta[0] sorted_new_y.pop(-1) logger.debug("ADDING NEW Y %s to new_mi %s while target_zeta is %s", mu, new_mi, theta_y*global_zeta) assert mu not in Lambda new_mi.append(mu) global_zeta = np.sqrt(global_zeta ** 2 - new_zeta[1] ** 2) if len(sorted_new_y) == 0 and zeta_val < theta_y*global_zeta: logger.warn("UNABLE to mark sufficiently many NEW MI!") # === DETERMINE HIGHER ORDER ACTIVE MI EXTENSION === # ================================================== # add mi corresponding to already active y dimensions sorted_zeta = sorted(zeta.items(), key=itemgetter(1)) logger.debug("SORTED ZETA %s", sorted_zeta) marked_zeta = 0 while marked_zeta < theta_y*global_zeta and len(sorted_zeta) > 0: new_zeta = sorted_zeta.pop(-1) mu = new_zeta[0] logger.debug("ADDING EXTENSION OF EXISTING MI %s to new_mi %s while marked_zeta is %s", mu, new_mi, marked_zeta) assert mu not in Lambda new_mi.append(mu) marked_zeta = np.sqrt(marked_zeta ** 2 + new_zeta[1] ** 2) zeta = sorted_zeta logger.info("possible new mu considered %s and %s" % (new_y.keys(), new_mi) ) if len(zeta) == 0: if theta_y*global_zeta > marked_zeta: logger.warning("list of mi candidates is empty and reduction goal NOT REACHED, %f > %f!", theta_y * global_zeta, marked_zeta) if len(new_mi) > 0: logger.info("SELECTED NEW MULTIINDICES %s", new_mi) else: logger.info("NO NEW MULTIINDICES SELECTED") return new_mi
if len(sim_stats) > 0: print sim_stats[0].keys() for k in sim_stats[0].keys(): # print "DATA", k if k not in ["CONF", "OPTS"]: D[k] = [s[k] for s in sim_stats] # evaluate additional data D["NUM-MI"] = [len(m) for m in D["MI"]] try: D["EFFICIENCY"] = [est / err for est, err in zip(D["ERROR-EST"], D["MC-ERROR-H1A"])] D["WITH-MC"] = True except: D["WITH-MC"] = False print "WARNING: No MC data found!" # ...from w_history D["NUM-Y"] = [len(supp(w.active_indices())) + 1 for w in w_history] D["MESH-CELLS"] = [w.basis.basis.mesh.num_cells() for w in w_history] D["MESH-HMIN"] = [w.basis.basis.mesh.hmin() for w in w_history] D["MESH-HMAX"] = [w.basis.basis.mesh.hmax() for w in w_history] D["MESH-HMINinv"] = [1 / h ** 2 for h in D["MESH-HMIN"]] D["MESH-HMAXinv"] = [1 / h ** 2 for h in D["MESH-HMAX"]] # meshes if options.withMesh: if options.meshDofs > 0: for i, dofs in enumerate(D["DOFS"]): if dofs >= options.meshDofs or i == len(D["DOFS"]) - 1: D["MESH"] = w_history[i].basis.basis.mesh break else: D["MESH"] = w_history[-1].basis.basis.mesh
sim_stats = pickle.load(fin) print "sim_stats has %s iterations" % len(sim_stats) # prepare data D = {} if len(sim_stats) > 0: print sim_stats[0].keys() for k in sim_stats[0].keys(): # print "DATA", k if k not in ["CONF", "OPTS", "PROJ-INACTIVE-ZETA"]: D[k] = [s[k] for s in sim_stats] # evaluate additional data D["NUM-MI"] = [len(m) for m in D["MI"]] try: if options.singleP: D["DIM-Y"] = [len(supp([i[0] for i in ami])) + 1 for ami in D["MI"]] # WARNING: EGSZ1 writes out the squared estimator!!! D["EST"] = [sqrt(est) for est in D["EST"]] D["EFFICIENCY"] = [est / err for est, err in zip(D["EST"], D["MC-H1ERR"])] else: D["DIM-Y"] = [len(supp(ami)) + 1 for ami in D["MI"]] D["EFFICIENCY"] = [est / err for est, err in zip(D["ERROR-EST"], D["MC-ERROR-H1A"])] D["WITH-MC"] = True except: D["WITH-MC"] = False print "WARNING: No MC data found!" # store data for plotting SIM_STATS[P] = D else: print "SKIPPING P{0} data since it is empty!".format(P)