aPatch = lambda: coef.localizeCoefficient(patchT[TInd], aFine_ref) rPatch = lambda: coef.localizeCoefficient(patchT[TInd], a_Fine_to_be_approximated) epsCoarse = lod.computeErrorIndicatorCoarseFromCoefficients(patchT[TInd], csiT[TInd].muTPrime, aPatch, rPatch) return epsCoarse print('precomputing ....') # Use mapper to distribute computations (mapper could be the 'map' built-in or e.g. an ipyparallel map) patchT, correctorsListT, KmsijT, csiT = zip(*map(computeKmsij, range(world.NtCoarse))) patchT, correctorRhsT, RmsiT = zip(*map(computeRmsi, range(world.NtCoarse))) RFull = pglod.assemblePatchFunction(world, patchT, RmsiT) MFull = fem.assemblePatchMatrix(world.NWorldFine, world.MLocFine) print('computing error indicators') epsCoarse = list(map(computeIndicators, range(world.NtCoarse))) ''' Plot error indicator ''' np_eps = np.einsum('i,i -> i', np.ones(np.size(epsCoarse)), epsCoarse) draw_indicator(NWorldCoarse, np_eps, original_style=True, Gridsize=N) Algorithm = algorithms.PercentageVsErrorAlgorithm(world = world, k = k , boundaryConditions = boundaryConditions,
return E_vh, E_f, E_Rf print('precomputing ....') # Use mapper to distribute computations (mapper could be the 'map' built-in or e.g. an ipyparallel map) print('computing real correctors', end='', flush=True) patchT, correctorsListT, KmsijT, csiT = zip(*map(real_computeKmsij, range(world.NtCoarse))) print() print('computing real right hand side correctors', end='', flush=True) patchT, correctorRhsT, RmsiT, cetaTPrimeT = zip(*map(real_computeRmsi, range(world.NtCoarse))) print() KFull = pglod.assembleMsStiffnessMatrix(world, patchT, KmsijT) RFull = pglod.assemblePatchFunction(world, patchT, RmsiT) MFull = fem.assemblePatchMatrix(world.NWorldFine, world.MLocFine) Rf = pglod.assemblePatchFunction(world, patchT, correctorRhsT) basis = fem.assembleProlongationMatrix(world.NWorldCoarse, world.NCoarseElement) bFull = basis.T * MFull * f_trans - RFull basisCorrectors = pglod.assembleBasisCorrectors(world, patchT, correctorsListT) modifiedBasis = basis - basisCorrectors uFull, _ = pglod.solve(world, KFull, bFull, boundaryConditions) uLodFine = modifiedBasis * uFull uLodFine += Rf
def StartAlgorithm(self): assert (self.init) # only start the algorithm once # in case not every element is affected, the percentage would be missleading. eps_size = np.size(self.E_vh) self.E_vh = { i: self.E_vh[i] for i in range(np.size(self.E_vh)) if self.E_vh[i] > 0 } full_percentage = len(self.E_vh) / eps_size world = self.world print('starting algorithm ...... ') TOLt = [] to_be_updatedT = [] energy_errorT = [] rel_energy_errorT = [] tmp_errorT = [] offset = [] TOL = 100 # not relevant for i in range(len(self.E_vh) + 1): if self.init: pass else: offset = self.UpdateNextElement(offset, Printing=False) if self.init: to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated) pass else: to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated * full_percentage) KFull = pglod.assembleMsStiffnessMatrix(world, self.patchT, self.KmsijT) RFull = pglod.assemblePatchFunction(world, self.patchT, self.RmsijT) Rf = pglod.assemblePatchFunction(world, self.patchT, self.correctorsRhsT) basis = fem.assembleProlongationMatrix(world.NWorldCoarse, world.NCoarseElement) bFull = basis.T * self.MFull * self.f_trans - RFull basisCorrectors = pglod.assembleBasisCorrectors( world, self.patchT, self.correctorsListT) modifiedBasis = basis - basisCorrectors uFull, _ = pglod.solve(world, KFull, bFull, self.boundaryConditions) uLodFine = modifiedBasis * uFull uLodFine += Rf uFineFull_trans_LOD = uLodFine if self.init: uFineFull_trans_LOD_old = uLodFine energy_norm = np.sqrt( np.dot(uFineFull_trans_LOD, self.AFine_trans * uFineFull_trans_LOD)) # tmp_error tmp_energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - uFineFull_trans_LOD_old), self.AFine_trans * (uFineFull_trans_LOD - uFineFull_trans_LOD_old))) # actual error if self.compare_with_best_LOD: energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - self.u_best_LOD), self.AFine_trans * (uFineFull_trans_LOD - self.u_best_LOD))) else: energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - self.uFineFull_trans), self.AFine_trans * (uFineFull_trans_LOD - self.uFineFull_trans))) uFineFull_trans_LOD_old = uFineFull_trans_LOD if self.init: self.init = 0 print( ' step({:3d}/{}) T: {} updates: {:7.3f}%, energy error: {:f}, tmp_error: {:f}, relative energy error: {:f}' .format(i, len(self.E_vh), ' - ', to_be_updated * full_percentage, energy_error, tmp_energy_error, energy_error / energy_norm)) else: print( ' step({:3d}/{}) T: {:3d} updates: {:7.3f}%, energy error: {:f}, tmp_error: {:f}, relative energy error: {:f}' .format(i, len(self.E_vh), offset[-1], to_be_updated * full_percentage, energy_error, tmp_energy_error, energy_error / energy_norm)) rel_energy_errorT.append(energy_error / energy_norm) energy_errorT.append(energy_error) tmp_errorT.append(tmp_energy_error) return to_be_updatedT, energy_errorT, tmp_errorT, rel_energy_errorT, TOLt, uFineFull_trans_LOD
def StartAlgorithm(self): assert (self.init) # only start the algorithm once # in case not every element is affected, the percentage would be missleading. eps_size = np.size(self.E_vh) self.E_vh = { i: self.E_vh[i] for i in range(np.size(self.E_vh)) if self.E_vh[i] > 0 } list = [v for v in self.E_vh.values()] list.append(0) tols = np.sort(list)[::-1] eps_size_f = np.size(self.E_vh) self.E_f = { i: self.E_f[i] for i in range(np.size(self.E_f)) if self.E_f[i] > 0 } list_f = [v for v in self.E_f.values()] list_f.append(0) tols_f = np.sort(list)[::-1] # make sure we only update one element all the time for i in range(1, np.size(tols)): if tols[i] == tols[i - 1]: tols[i] -= 1e-7 for i in range(1, np.size(tols_f)): if tols_f[i] == tols_f[i - 1]: tols_f[i] -= 1e-7 full_percentage = len(self.E_vh) / eps_size full_percentage_f = len(self.E_f) / eps_size_f world = self.world print('starting algorithm ...... ') TOLt = [] to_be_updatedT = [] energy_errorT = [] rel_energy_errorT = [] tmp_errorT = [] offset = [] TOL = 100 # not relevant for i in range(np.size(tols)): if TOL == 0: pass else: TOL = tols[i] TOLt.append(TOL) offset = self.UpdateNextElement(TOL, offset, Printing=False) if self.init: to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated) pass else: to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated * full_percentage) KFull = pglod.assembleMsStiffnessMatrix(world, self.patchT, self.KmsijT) RFull = pglod.assemblePatchFunction(world, self.patchT, self.RmsijT) Rf = pglod.assemblePatchFunction(world, self.patchT, self.correctorsRhsT) basis = fem.assembleProlongationMatrix(world.NWorldCoarse, world.NCoarseElement) bFull = basis.T * self.MFull * self.f_trans - RFull basisCorrectors = pglod.assembleBasisCorrectors( world, self.patchT, self.correctorsListT) modifiedBasis = basis - basisCorrectors uFull, _ = pglod.solve(world, KFull, bFull, self.boundaryConditions) uLodFine = modifiedBasis * uFull uLodFine += Rf uFineFull_trans_LOD = uLodFine if self.init: self.init = 0 uFineFull_trans_LOD_old = uLodFine energy_norm = np.sqrt( np.dot(uFineFull_trans_LOD, self.AFine_trans * uFineFull_trans_LOD)) # tmp_error tmp_energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - uFineFull_trans_LOD_old), self.AFine_trans * (uFineFull_trans_LOD - uFineFull_trans_LOD_old))) # actual error if self.compare_with_best_LOD: energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - self.u_best_LOD), self.AFine_trans * (uFineFull_trans_LOD - self.u_best_LOD))) else: energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - self.uFineFull_trans), self.AFine_trans * (uFineFull_trans_LOD - self.uFineFull_trans))) uFineFull_trans_LOD_old = uFineFull_trans_LOD print( ' step({:3d}/{}) TOL: {:f}, updates: {:7.3f}%, energy error: {:f}, tmp_error: {:f}, relative energy error: {:f}' .format(i, np.size(tols), TOL, to_be_updated * full_percentage, energy_error, tmp_energy_error, energy_error / energy_norm)) rel_energy_errorT.append(energy_error / energy_norm) energy_errorT.append(energy_error) tmp_errorT.append(tmp_energy_error) if TOL == 0: # stop now break return to_be_updatedT, energy_errorT, tmp_errorT, rel_energy_errorT, TOLt, uFineFull_trans_LOD
def StartAlgorithm(self): assert (self.init) # only start the algorithm once # in case not every element is affected, the percentage would be missleading. eps_size = np.size(self.E_vh) self.E_vh = { i: self.E_vh[i] for i in range(np.size(self.E_vh)) if self.E_vh[i] > 0 } full_percentage = len(self.E_vh) / eps_size world = self.world print('starting algorithm ...... ') TOL = self.StartingTolerance TOLt = [] to_be_updatedT = [] energy_errorT = [] rel_energy_errorT = [] tmp_errorT = [] offset = [] continue_computing = 1 while continue_computing: TOLt.append(TOL) offset, computed = self.UpdateElements(TOL, offset, Printing=False) if computed: pass else: if self.init: pass else: to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated * full_percentage) energy_errorT.append(energy_error) tmp_errorT.append(old_tmp_energy_error) if np.size(offset) / len(self.E_vh) == 1: print(' every corrector has been updated') continue_computing = 0 continue else: print(' skipping TOL {}'.format(TOL)) TOL *= 3 / 4. continue to_be_updated = np.size(offset) / len(self.E_vh) * 100 to_be_updatedT.append(to_be_updated * full_percentage) KFull = pglod.assembleMsStiffnessMatrix(world, self.patchT, self.KmsijT) RFull = pglod.assemblePatchFunction(world, self.patchT, self.RmsijT) Rf = pglod.assemblePatchFunction(world, self.patchT, self.correctorsRhsT) basis = fem.assembleProlongationMatrix(world.NWorldCoarse, world.NCoarseElement) bFull = basis.T * self.MFull * self.f_trans - RFull basisCorrectors = pglod.assembleBasisCorrectors( world, self.patchT, self.correctorsListT) modifiedBasis = basis - basisCorrectors uFull, _ = pglod.solve(world, KFull, bFull, self.boundaryConditions) uLodFine = modifiedBasis * uFull uLodFine += Rf uFineFull_trans_LOD = uLodFine if self.init: uFineFull_trans_LOD_old = uLodFine self.init = 0 energy_norm = np.sqrt( np.dot(uFineFull_trans_LOD, self.AFine_trans * uFineFull_trans_LOD)) # tmp_error tmp_energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - uFineFull_trans_LOD_old), self.AFine_trans * (uFineFull_trans_LOD - uFineFull_trans_LOD_old))) old_tmp_energy_error = tmp_energy_error # actual error energy_error = np.sqrt( np.dot((uFineFull_trans_LOD - self.uFineFull_trans), self.AFine_trans * (uFineFull_trans_LOD - self.uFineFull_trans))) uFineFull_trans_LOD_old = uFineFull_trans_LOD print( ' TOL: {:f}, updates: {:7.3f}%, energy error: {:f}, tmp_error: {:f}, relative energy error: {:f}' .format(TOL, to_be_updated * full_percentage, energy_error, tmp_energy_error, energy_error / energy_norm)) rel_energy_errorT.append(energy_error / energy_norm) energy_errorT.append(energy_error) tmp_errorT.append(tmp_energy_error) if tmp_energy_error > 1e-5: TOL *= 3 / 4. else: if int(np.size(offset) / len(self.E_vh)) == 1: if computed: print(' stop computing') continue_computing = 0 return to_be_updatedT, energy_errorT, tmp_errorT, rel_energy_errorT, TOLt, uFineFull_trans_LOD
] correctorRhs = lod.computeElementCorrector(patch, IPatch, aPatch, None, MRhsList)[0] Rmsi = lod.computeRhsCoarseQuantities(patch, correctorRhs, aPatch) return patch, correctorRhs, Rmsi # Use mapper to distribute computations (mapper could be the 'map' built-in or e.g. an ipyparallel map) patchT, correctorsListT, KmsijT = zip( *map(computeKmsij, range(world.NtCoarse))) print('!') patchT, correctorRhsT, RmsiT = zip( *map(computeRmsi, range(world.NtCoarse))) print('!') KFull = pglod.assembleMsStiffnessMatrix(world, patchT, KmsijT) RFull = pglod.assemblePatchFunction(world, patchT, RmsiT) MFull = fem.assemblePatchMatrix(NFine, world.MLocFine) free = util.interiorpIndexMap(NWorldCoarse) basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement) bFull = basis.T * MFull * f KFree = KFull[free][:, free] bFree = bFull[free] xFree = sparse.linalg.spsolve(KFree, bFree) basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement) basisCorrectors = pglod.assembleBasisCorrectors( world, patchT, correctorsListT)