def __init__(self, grid, fields): timer = Timer('setup'); timer.start() mesh = fields[0].function_space().mesh() # Same meshes for all assert all(mesh.id() == f.function_space().mesh().id() for f in fields) # Locate each point limit = mesh.num_entities_global(mesh.topology().dim()) bbox_tree = mesh.bounding_box_tree() npoints = np.prod(grid.ns) cells_for_x = [None]*npoints for i, x in enumerate(grid.points()): cell = bbox_tree.compute_first_entity_collision(Point(*x)) if -1 < cell < limit: cells_for_x[i] = Cell(mesh, cell) assert not any(c is None for c in cells_for_x) # For each field I want to build a function which which evals # it at all the points self.data = {} self.eval_fields = [] for u in fields: # Alloc key = u.name() self.data[key] = [np.zeros(npoints) for _ in range(u.value_size())] # Attach the eval for u self.eval_fields.append(eval_u(u, grid.points(), cells_for_x, self.data[key])) info('Probe setup took %g' % timer.stop()) self.grid = grid # Get values at construction self.update()
def update(self): '''Evaluate now (with the fields as they are at the moment)''' timer = Timer('update'); timer.start() status = [f() for f in self.eval_fields] info('Probe update took %g' % timer.stop()) return all(status)
# Initialize the l2 projection lstsq_psi = l2projection(p, W, property_idx) # Set initial condition at mesh and particles psi0_h.interpolate(psi0_expression) p.interpolate(psi0_h.cpp_object(), property_idx) # Initialize add/delete particle AD = AddDelete(p, 15, 25, [psi0_h]) step = 0 t = 0.0 area_0 = assemble(psi0_h * dx) timer = Timer() timer.start() while step < num_steps: step += 1 t += float(dt) if comm.rank == 0: print("Step " + str(step)) # Advect particle, assemble and solve pde projection t1 = Timer("[P] Advect particles step") AD.do_sweep() ap.do_step(float(dt)) AD.do_sweep_failsafe(4) del t1 if projection_type == "PDE":
def solve(self): """ Solve optmal control problem """ msg = "You need to build the problem before solving it" assert hasattr(self, "opt_type"), msg module, method = self.opt_type.split("_") logger.info("\n" + "Starting optimization".center(100, "-")) logger.info( "Scale: {}, \nDerivative Scale: {}".format( self.rd.scale, self.rd.derivative_scale ) ) logger.info( "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter) ) t = Timer() t.start() if self.oneD: res = minimize_1d(self.rd, self.x[0], **self.options) x = res["x"] else: if module == "scipy": res = scipy_minimize(self.rd, self.x, **self.options) x = res["x"] elif module == "pyOpt": obj, x, d = self.problem(**self.options) elif module == "moola": sol = self.solver.solve() x = sol["control"].data elif module == "ipopt": x = self.solver.solve(self.x) else: msg = ( "Unknown optimizatin type {}. " "Define the optimization type as 'module-method', " "where module is e.g scipy, pyOpt and methos is " "eg slsqp." ) raise ValueError(msg) run_time = t.stop() opt_result = {} opt_result["x"] = x opt_result["nfev"] = self.rd.iter opt_result["nit"] = self.rd.iter opt_result["njev"] = self.rd.nr_der_calls opt_result["ncrash"] = self.rd.nr_crashes opt_result["run_time"] = run_time opt_result["controls"] = self.rd.controls_lst opt_result["func_vals"] = self.rd.func_values_lst opt_result["forward_times"] = self.rd.forward_times opt_result["backward_times"] = self.rd.backward_times opt_result["grad_norm"] = self.rd.grad_norm return self.rd, opt_result
def main(module_name, ncases, params, petsc_params): ''' Run the test case in module with ncases. Optionally store results in savedir. For some modules there are multiple (which) choices of preconditioners. ''' # Unpack for k, v in params.items(): exec(k + '=v', locals()) RED = '\033[1;37;31m%s\033[0m' print RED % ('\tRunning %s with %d preconditioner' % (module_name, precond)) module = __import__(module_name) # no importlib in python2.7 # Setup the MMS case u_true, rhs_data = module.setup_mms(eps) # Setup the convergence monitor if log: params = [('solver', solver), ('precond', str(precond)), ('eps', str(eps))] path = '_'.join([module_name] + ['%s=%s' % pv for pv in params]) path = os.path.join(save_dir if save_dir else '.', path) path = '.'.join([path, 'txt']) else: path = '' memory, residuals = [], [] monitor = module.setup_error_monitor(u_true, memory, path=path) # Sometimes it is usedful to transform the solution before computing # the error. e.g. consider subdomains if hasattr(module, 'setup_transform'): # NOTE: transform take two args for case and the current computed # solution transform = module.setup_transform else: transform = lambda i, x: x print '='*79 print '\t\t\tProblem eps = %r' % eps print '='*79 for i in ncases: a, L, W = module.setup_problem(i, rhs_data, eps=eps) # Assemble blocks t = Timer('assembly'); t.start() AA, bb = map(ii_assemble, (a, L)) print '\tAssembled blocks in %g s' % t.stop() # Check the symmetry wh = ii_Function(W) assert (AA*wh.block_vec() - (AA.T)*wh.block_vec()).norm() < 1E-10 if solver == 'direct': # Turn into a (monolithic) PETScMatrix/Vector t = Timer('conversion'); t.start() AAm, bbm = map(ii_convert, (AA, bb)) print '\tConversion to PETScMatrix/Vector took %g s' % t.stop() t = Timer('solve'); t.start() LUSolver('umfpack').solve(AAm, wh.vector(), bbm) print '\tSolver took %g s' % t.stop() niters = 1 if solver == 'iterative': # Here we define a Krylov solver using PETSc BB = module.setup_preconditioner(W, precond, eps=eps) ## AA and BB as block_mat ksp = PETSc.KSP().create() # Default is minres if '-ksp_type' not in petsc_params: petsc_params['-ksp_type'] = 'minres' opts = PETSc.Options() for key, value in petsc_params.iteritems(): opts.setValue(key, None if value == 'none' else value) ksp.setOperators(ii_PETScOperator(AA)) ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED) # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300) ksp.setConvergenceHistory() # We attach the wrapped preconditioner defined by the module ksp.setPC(ii_PETScPreconditioner(BB, ksp)) ksp.setFromOptions() print ksp.getTolerances() # Want the iterations to start from random wh.block_vec().randomize() # Solve, note the past object must be PETSc.Vec t = Timer('solve'); t.start() ksp.solve(as_petsc_nest(bb), wh.petsc_vec()) print '\tSolver took %g s' % t.stop() niters = ksp.getIterationNumber() residuals.append(ksp.getConvergenceHistory()) # Let's check the final size of the residual r_norm = (bb - AA*wh.block_vec()).norm() # Convergence? monitor.send((transform(i, wh), W, niters, r_norm)) # Only send the final if save_dir: path = os.path.join(save_dir, module_name) for i, wh_i in enumerate(wh): # Renaming to make it easier to save state in Visit/Pareview wh_i.rename('u', str(i)) File('%s_%d.pvd' % (path, i)) << wh_i # Plot relative residual norm if plot: plt.figure() [plt.semilogy(res/res[0], label=str(i)) for i, res in enumerate(residuals, 1)] plt.legend(loc='best') plt.show()
def main(module_name, ncases, params, petsc_params): ''' Run the test case in module with ncases. Optionally store results in savedir. For some modules there are multiple (which) choices of preconditioners. ''' # Unpack for k, v in params.items(): exec(k + '=v', locals()) RED = '\033[1;37;31m%s\033[0m' print RED % ('\tRunning %s' % module_name) module = __import__(module_name) # no importlib in python2.7 # Setup the MMS case u_true, rhs_data = module.setup_mms(eps) # Setup the convergence monitor if log: params = [('solver', solver), ('precond', str(precond)), ('eps', str(eps))] path = '_'.join([module_name] + ['%s=%s' % pv for pv in params]) path = os.path.join(save_dir if save_dir else '.', path) path = '.'.join([path, 'txt']) else: path = '' memory, residuals = [], [] monitor = module.setup_error_monitor(u_true, memory, path=path) # Sometimes it is usedful to transform the solution before computing # the error. e.g. consider subdomains if hasattr(module, 'setup_transform'): # NOTE: transform take two args for case and the current computed # solution transform = module.setup_transform else: transform = lambda i, x: x print '=' * 79 print '\t\t\tProblem eps = %g' % eps print '=' * 79 for i in ncases: a, L, W = module.setup_problem(i, rhs_data, eps=eps) # Assemble blocks t = Timer('assembly') t.start() AA, bb = map(ii_assemble, (a, L)) print '\tAssembled blocks in %g s' % t.stop() wh = ii_Function(W) if solver == 'direct': # Turn into a (monolithic) PETScMatrix/Vector t = Timer('conversion') t.start() AAm, bbm = map(ii_convert, (AA, bb)) print '\tConversion to PETScMatrix/Vector took %g s' % t.stop() t = Timer('solve') t.start() LUSolver('umfpack').solve(AAm, wh.vector(), bbm) print '\tSolver took %g s' % t.stop() niters = 1 else: # Here we define a Krylov solver using PETSc BB = module.setup_preconditioner(W, precond, eps=eps) ## AA and BB as block_mat ksp = PETSc.KSP().create() # Default is minres if '-ksp_type' not in petsc_params: petsc_params['-ksp_type'] = 'minres' opts = PETSc.Options() for key, value in petsc_params.iteritems(): opts.setValue(key, None if value == 'none' else value) ksp.setOperators(ii_PETScOperator(AA)) ksp.setNormType(PETSc.KSP.NormType.NORM_PRECONDITIONED) # ksp.setTolerances(rtol=1E-6, atol=None, divtol=None, max_it=300) ksp.setConvergenceHistory() # We attach the wrapped preconditioner defined by the module ksp.setPC(ii_PETScPreconditioner(BB, ksp)) ksp.setFromOptions() print ksp.getTolerances() # Want the iterations to start from random wh.block_vec().randomize() # Solve, note the past object must be PETSc.Vec t = Timer('solve') t.start() ksp.solve(as_petsc_nest(bb), wh.petsc_vec()) print '\tSolver took %g s' % t.stop() niters = ksp.getIterationNumber() residuals.append(ksp.getConvergenceHistory()) # Let's check the final size of the residual r_norm = (bb - AA * wh.block_vec()).norm() # Convergence? monitor.send((transform(i, wh), niters, r_norm)) # Only send the final if save_dir: path = os.path.join(save_dir, module_name) for i, wh_i in enumerate(wh): # Renaming to make it easier to save state in Visit/Pareview wh_i.rename('u', str(i)) File('%s_%d.pvd' % (path, i)) << wh_i # Plot relative residual norm if plot: plt.figure() [ plt.semilogy(res / res[0], label=str(i)) for i, res in enumerate(residuals, 1) ] plt.legend(loc='best') plt.show()
def solve(self): """ Solve optmal control problem """ # msg = "You need to build the problem before solving it" # assert hasattr(self, "opt_type"), msg module = self.parameters["opt_lib"] logger.info("Starting optimization") # logger.info( # "Scale: {}, \nDerivative Scale: {}".format( # self.J.scale, self.J.derivative_scale # ) # ) # logger.info( # "Tolerace: {}, \nMaximum iterations: {}\n".format(self.tol, self.max_iter) # ) t = Timer() t.start() if self.parameters["nvar"] == 1: res = minimize_1d(self.J, self.x[0], **self.options) x = res["x"] else: if module == "scipy": res = scipy_minimize(self.J, self.x, **self.options) x = res["x"] elif module == "pyOpt": obj, x, d = self.problem(**self.options) elif module == "moola": sol = self.solver.solve() x = sol["control"].data elif module == "ipopt": x = self.solver.solve(self.x) else: msg = ( "Unknown optimizatin type {}. " "Define the optimization type as 'module-method', " "where module is e.g scipy, pyOpt and methos is " "eg slsqp." ) raise ValueError(msg) run_time = t.stop() # opt_result = {} # opt_result['initial_contorl'] = self.initial_control # opt_result["optimal_control"] = x # opt_result["run_time"] = run_time # opt_result["nfev"] = self.J.iter # opt_result["nit"] = self.J.iter # opt_result["njev"] = self.J.nr_der_calls # opt_result["ncrash"] = self.J.nr_crashes # opt_result["controls"] = self.J.controls_lst # opt_result["func_vals"] = self.J.func_values_lst # opt_result["forwaJ_times"] = self.J.forwaJ_times # opt_result["backwaJ_times"] = self.J.backwaJ_times # opt_result["grad_norm"] = self.J.grad_norm return optimization_results( initial_control=self.initial_control, optimal_control=x, run_time=run_time )