def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("fext", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) # INITIAL RESIDUAL mbuild = globdat.get("mbuild") fext = globdat.get("fext") cons = globdat.get("cons") disp = globdat.get("solu") ndof = globdat.get("ndof") old_disp = deepcopy(disp) cons.updateSolution(disp) Du = globdat.set("Du", disp - old_disp) K = mbuild.getDenseMatrix() r = fext - np.array(K).dot(Du) solver = Solver(self.type, cons) solver.solve(K, disp, r, mbuild.hbw) return Status.EXIT
def p_convergence(config: ConfigParser, solver: Solver, sol: GridFunction, var: str) -> None: """ Function to check p (interpolat polynomial order) convergence and print results. Args: config: Config file from which to grab solver: The solver used sol: Gridfunction that contains the current solution var: The variable of interest. """ num_refinements = config.get_item(['ERROR ANALYSIS', 'num_refinements'], int) average_lst = config.get_list(['ERROR ANALYSIS', 'error_average'], str, quiet=True) component = solver.model.model_components[var] average = component in average_lst # Reload the model's mesh and finite element space so convergence tests can be chained and don't affect each other. solver.model.load_mesh_fes(mesh=True, fes=True) # First solve used the default settings. if component is None: err = norm('l2_norm', sol, solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes, average) else: err = norm('l2_norm', sol.components[component], solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes.components[component], average) # Track the convergence information. num_dofs_lst = [solver.model.fes.ndof] interp_ord_lst = [solver.model.interp_ord[var]] error_lst = [err] # Then run through a series of interpolant refinements. for n in range(num_refinements): solver.model.interp_ord = {key: val + 1 for key, val in solver.model.interp_ord.items()} solver.model.load_mesh_fes(mesh=False, fes=True) solver.reset_model() sol = solver.solve() if component is None: err = norm('l2_norm', sol, solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes, average) else: err = norm('l2_norm', sol.components[component], solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes.components[component], average) num_dofs_lst.append(solver.model.fes.ndof) interp_ord_lst.append(solver.model.interp_ord[var]) error_lst.append(err) print('L2 norm at refinement {0}: {1}'.format(n, err)) # Display the results nicely. convergence_table = [['Interpolant Order', 'DOFs', 'Error', 'Convergence Rate']] convergence_table.append([interp_ord_lst[0], num_dofs_lst[0], error_lst[0], 0]) for n in range(num_refinements): # TODO: Not sure if this is the correct equation for p-convergence. convergence_rate = math.log(error_lst[n] / error_lst[n + 1]) / math.log(num_dofs_lst[n + 1] / num_dofs_lst[n]) convergence_table.append([interp_ord_lst[n + 1], num_dofs_lst[n + 1], error_lst[n + 1], convergence_rate]) print(tabulate.tabulate(convergence_table, headers='firstrow', floatfmt=['.1f', '.1f', '.3e', '.2f']))
def add_configs(self, belief_points): Solver.add_configs(self) self.alpha_vecs = [ AlphaVector(a=-1, v=np.zeros(self.model.num_states)) ] # filled with a dummy alpha vector self.belief_points = belief_points self.compute_gamma_reward()
def __init__(self, model): Solver.__init__(self, model) self.tree = None self.simulation_time = None # in seconds self.max_particles = None # maximum number of particles can be supplied by hand for a belief node self.reinvigorated_particles_ratio = None # ratio of max_particles to mutate self.utility_fn = None
def initialize_for_solve(self): if hasattr(self, 'f'): # If f is input in form of f(u,t), wrap f to f_f77 for Fortran code. f = self.f self.f_f77 = lambda t,u: np.asarray(f(u,t)) elif hasattr(self, 'f_f77'): # If f is input in form of f(t,u) (usually in Fortran), # wrap f_f77 to the general form f(u,t) for switch_to() f_f77 = self.f_f77 self.f = lambda u,t: np.asarray(f_f77(t,u)) Solver.initialize_for_solve(self)
def calculation(self): while True: try: calculator = Solver(self.type_method, self.xy, self.x) calculator.solve() del calculator break except TypeError: break except ValueError: break
def __init__(self, model): Solver.__init__(self, model) self.tree = None self.gamma = None # discount self.cur_state = None # current state for which action is produced self.horizon = None self.width = None self.max_reward = None # upperbound on possible reward for a state self.max_diff = None # max expected difference between optimal and computed self.utility_fn = None
def eval_models(models_paths: list, path_to_data: str): if len(models_paths) == 0: return 0.0 # getting test loader ds = FashionMnistHandler(path_to_data, False) ds.download() ds.load() # noise parameters are not relevant since test loader shouldn't have noise _, _, test_loader = ds.get_noisy_loaders(0, '1', 0.2, 128, 128, 128) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') test_acc = [] for model_file in models_paths: # creating model checkpoint = torch.load(model_file, map_location=device) model_name = model_file.split("/")[-1] # loading from checkpoint model = CNNModel() model.load_state_dict(checkpoint['model_state_dict']) model.to(device) loss_fn = torch.nn.CrossEntropyLoss() # evaluating _, acc = Solver.eval(model, device, loss_fn=loss_fn, data_loader=test_loader) test_acc.append(acc) print(f"Model {model_name} has {acc:.4f} acc in test dataset") return test_acc
def prepare_once(self, project_name : str, form : str, method : str, option : Dict[str, Any] = None) -> None: self.__project = project_name self.__method = method self.__form = form if method in self.jmetal_solvers: type = 'jmetal' # dump config in /dump/ folder Runner.dump_config(project_name, form, option) else: type = 'default' self.__nrp = NextReleaseProblem(project_name, type) self.__problem = self.__nrp.model(form, option) self.__solver = Solver(method, option) self.__solver.load(self.__problem) # empty solutions self.__solutions = None
def main(): url = StringArgumentParser().get_users_input() title = ArticleAPIService(url).get_title_of_html() search_text = TextProcessor(title).get_sentence() apiservice = TwitterAPIService(search_text) apiservice.search_tweets_and_save() Solver(apiservice.get_tweets_list(), search_text).determine_genuity()
def solve(solver_name, problem_name, options): 'Solve the problem by solver with options.' # Set cpp_compiler options parameters["form_compiler"]["cpp_optimize"] = True # Set debug level set_log_active(options['debug']) # Set refinement level options['N'] = mesh_sizes[options['refinement_level']] # Create problem and solver problem = Problem(problem_name, options) solver = Solver(solver_name, options) time_step = solver.get_timestep(problem)[0] if MPI.process_number() == 0 and options['verbose']: print 'Problem: ' + str(problem) print 'Solver: ' + str(solver) # Solve problem with solver wct = time.time() u, p = solver.solve(problem) # Compute elapsed time wct = time.time() - wct # Compute number of degrees of freedom num_dofs = u.vector().size() + p.vector().size() # Get the mesh size mesh_size = u.function_space().mesh().hmin() # Get functional value and error functional, error = solver.eval() # Save results cpu_time = solver.cputime() save_results(problem, solver, num_dofs, mesh_size, time_step, functional, error) return 0
def initialize_for_solve(self): # INFO(4) is an integer array to specify how the problem # is to be solved self.info = np.zeros(4, int) self.info[0] = 1 # Compute solution at each time point if hasattr(self, 'spcrad_f77') or hasattr(self, 'spcrad'): self.info[1] = 1 # SPCRAD routine is supplied else: self.spcrad = lambda x,y: 0.0 # dummy function # Is the Jacobian constant? self.info[2] = self.jac_constant if (np.iterable(self.atol) and (len(self.atol) == self.neq)): self.info[3] = 1 # ATOL is a sequence of length NEQ if hasattr(self, 'f'): # If f is input in form of a Python function f(u,t), # let self.f_f77 wrap f and have arguments t, u. f = self.f self.f_f77 = lambda t,u: np.asarray(f(u,t)) elif hasattr(self, 'f_f77'): # The right-hand side "f" is input as a Fortran function # taking the arguments t,u. # Set self.f to be f_f77 wrapped to the general form f(u,t) # for switch_to(). f_f77 = self.f_f77 self.f = lambda u,t: np.asarray(f_f77(t,u)) # If spcrad is input in form of spcrad(u,t), # wrap spcrad to spcrad_f77 for Fortran code. if hasattr(self, 'spcrad'): # If spcrad is in form of spcrad(u,t), wrap for Fortran code. spcrad = self.spcrad self.spcrad_f77 = lambda t,u: np.asarray(spcrad(u,t)) # We call Solver and not Adaptive below because Adaptive # just computes first_step, min_step and max_step, all of # which are non-used parameters for rkc.f Solver.initialize_for_solve(self) # Common settings
def main(fname, means, rnd_seed=1): random.seed(rnd_seed) n_arms = len(means) random.shuffle(means) arms = list(map(lambda mu: BernoulliArm(mu), means)) print("Best arm is " + str(Solver.ind_max(means))) f = open(fname, "w") for temperature in [0.1, 0.2, 0.3, 0.4, 0.5]: algo = Softmax(temperature, [], []) algo.initialize(n_arms) results = test_algorithm(algo, arms, 5000, 250) for i in range(len(results[0])): f.write(str(temperature) + "\t") f.write( "\t".join([str(results[j][i]) for j in range(len(results))]) + "\n") f.close()
def test_case_3(self): # prepare raw problem variables = [0, 1, 2, 3] objectives = [{0: -10, 1: -5, 2: -1, 3: -5}, {0: 3, 1: 2, 2: 3, 3: 0}] constraints = dict() # prepare solvers for solver_name in self.binary_solver: if solver_name == 'epsilon': problem = NextReleaseProblem.MOIP( \ variables, objectives, constraints, \ dict(), dict() \ ) else: constraints = JNRP.regularize_constraints( constraints, len(variables)) problem = JNRP(variables, objectives, constraints) solver = Solver(solver_name) # prepare and solve solver.load(problem) solver.execute() # get the solutions solutions = solver.solutions() print(solver_name + '\t\t' + str(len(solutions))) self.display(solutions, 5)
def test_case_2(self): # prepare raw problem variables = [0, 1, 2, 3] objectives = [{0: -10, 1: -5, 2: -1, 3: 5}] constraints = [{2: 1, 4: 0}, {0: 1, 3: -1, 4: 0}] # prepare solvers for solver_name in self.single_solver: if solver_name == 'single': problem = NextReleaseProblem.MOIP( \ variables, objectives, constraints, \ ['L' for _ in range(len(constraints))], dict() \ ) else: constraints = JNRP.regularize_constraints( constraints, len(variables)) problem = JNRP(variables, objectives, constraints) solver = Solver(solver_name) # prepare and solve solver.load(problem) solver.execute() # get the solutions solutions = solver.solutions() print(solver_name + '\t\t' + str(len(solutions))) self.display(solutions, 5)
args = parser.parse_args() args, lg = parse(args) # Tensorboard save directory resume = args['solver']['resume'] tensorboard_path = 'Tensorboard/{}'.format(args['name']) if resume == False: if osp.exists(tensorboard_path): shutil.rmtree(tensorboard_path, True) lg.info('Remove dir: [{}]'.format(tensorboard_path)) writer = SummaryWriter(tensorboard_path) # create dataset train_data = DIV2K(args['datasets']['train']) lg.info('Create train dataset successfully!') lg.info('Training: [{}] iterations for each epoch'.format(len(train_data))) val_data = DIV2K(args['datasets']['val']) lg.info('Create val dataset successfully!') lg.info('Validating: [{}] iterations for each epoch'.format(len(val_data))) # create solver lg.info('Preparing for experiment: [{}]'.format(args['name'])) solver = Solver(args, train_data, val_data, writer) # train lg.info('Start training...') solver.train()
class Runner: # initialize def __init__(self, configs : List[ConfigType], out_path : str): # not changed member self.__result = dict() # prepare const members # jmetal solvers self.jmetal_solvers = MOEA_METHOD # prepare members self.__project : str = None self.__method : str = None self.__form : str = None self.__nrp : NextReleaseProblem = None self.__problem : ProblemType = None self.__solver : Solver = None self.__solutions : Set[Any] = None # config should be a list assert isinstance(configs, list) # check out_path if exists os.makedirs(os.path.dirname(out_path), exist_ok=True) # run the configurations for one_config in configs: # get the ite_num from config ite_num = one_config['ite_num'] del one_config['ite_num'] assert ite_num > 0 # config name config_name = self.name(**one_config) print(config_name + ' will run ' + str(ite_num) + ' times') # check the config if not self.check_config(**one_config): print('FATAL: illegal config input ' + config_name) continue # each config run ite_num times for ind in range(ite_num): # run this config print('round: ', ind) self.run_once(one_config, config_name, ind) # dump solutions each round self.dump_once(out_path, config_name, ind) # print out message that each round has ended print('\r\t\t\t\t\t\t\t\t\t\t\t\t\r' + config_name + ' finished') # dump result self.dump(out_path, ite_num, False) # check config def check_config(self, project_name : str, form : str, method : str, option : Dict[str, Any] = None) -> bool: check = True # check config if project_name not in ALL_FILES_DICT: check = False if form not in NRP_FORMS: check = False if method not in SOLVING_METHOD: check = False # prepare message message = 'config: project:' + project_name + ' form: ' + form + ' method: ' + method if option: message += ' option: ' + str(option) if not check: # print out print(message, ' fail') return False else: print(message, ' start') return True # clear def clear(self) -> None: self.__project = None self.__method = None self.__form = None self.__nrp = None self.__problem = None self.__solver = None self.__solutions = None # run once def run_once(self, config : ConfigType, name : str, ind : int) -> None: # this config name # print('\r\t\t\t\t\t\t\t\t\t\t\t\t\r' + name + ' round: ' + str(ind), end='') print(name + ' round: ' + str(ind)) # clear all members self.clear() # prepare_config self.prepare_once(**config) # run elapsed_time = self.run() # collect results self.__solutions = self.__solver.solutions() if ind == 0: # first round, initialize self.__result[name] = dict() # record self.__result[name][str(ind)] = dict() self.__result[name][str(ind)]['runtime'] = elapsed_time self.__result[name][str(ind)]['solution number'] = len(self.__solutions) self.__result[name][str(ind)]['solutions'] = self.__solutions # just for debug # self.__result[name][str(ind)] = dict() # self.__result[name][str(ind)] ['runtime'] = 3.33 # self.__result[name][str(ind)] ['solution number'] = 100 # self.__result[name][str(ind)] ['solutions'] = set([(1,2), (3,4), (5,6), (7,8), (9,0)]) # prepare once def prepare_once(self, project_name : str, form : str, method : str, option : Dict[str, Any] = None) -> None: self.__project = project_name self.__method = method self.__form = form if method in self.jmetal_solvers: type = 'jmetal' # dump config in /dump/ folder Runner.dump_config(project_name, form, option) else: type = 'default' self.__nrp = NextReleaseProblem(project_name, type) self.__problem = self.__nrp.model(form, option) self.__solver = Solver(method, option) self.__solver.load(self.__problem) # empty solutions self.__solutions = None # dump solutions once def dump_once(self, out_path : str, name : str, ind : int): # exact output path exact_path = os.path.join(out_path, name) # check if result folder exists if not os.path.exists(exact_path): os.makedirs(exact_path) # prepare the solution file file_name = os.path.join(exact_path, str(ind)+'.txt') assert name in self.__result assert str(ind) in self.__result[name] assert 'solutions' in self.__result[name][str(ind)] # write to file with open(file_name, 'w') as file_out: for solution in list(self.__result[name][str(ind)]['solutions']): file_out.write(str(solution)+'\n') # delete it in checklist for saving memory del self.__result[name][str(ind)]['solutions'] # write a runtime info. file file_name = os.path.join(exact_path, 'info_'+str(ind)+'.json') with open(file_name, 'w') as info_file: json_object = json.dumps(self.__result[name][str(ind)], indent = 4) info_file.write(json_object) info_file.close() # run! just run! def run(self) -> float: start = time.clock() self.__solver.execute() end = time.clock() return end - start # display solutions def display(self, mode : bool = False) -> None: # print solution number found print('number of solutions found: ' + str(len(self.__solutions))) if not mode: return # print each solution for solution in self.__solutions: print(str(solution) + ', ') # dump jmetal config @staticmethod def dump_config(project_name : str, form : str, option : Dict[str, Any]) -> None: # prepare the new option dict neo_option = dict() for key, value in option.items(): if not value: # None neo_option[key] = 'n' elif isinstance(value, int): # int neo_option[key] = 'i' + str(value) elif isinstance(value, float): # float neo_option[key] = 'f' + str(value) else: assert False # end for json_object = json.dumps(neo_option, indent=4) # create DUMP PATH if not exists if not os.path.exists(os.path.dirname(DUMP_PATH)): os.makedirs(DUMP_PATH) # prepare file name file_name = os.path.join(DUMP_PATH, ('config_' + project_name + '_' + form + '.json')) # create config file with open(file_name, 'w+') as file_out: file_out.write(json_object) file_out.close() # end with # config name @staticmethod def name(project_name : str, form : str, method : str, option : Dict[str, Any] = None) -> str: name_str = project_name + '_' + form + '_' + method option_str = '' if option: for k, v in option.items(): option_str += '_' + str(k) + str(v) return name_str + option_str # parse name to config # Note that if there are options, it will just return the string # because we cannot know it's structure here @staticmethod def dename(name : str) -> None: # name should be a string assert isinstance(name, str) # parse the project name first project_name = None for project in ALL_FILES_DICT: if name.startswith(project): project_name = project name = name[len(project)+1:] assert project_name # split by '_' args = name.split('_') assert len(args) >= 2 # note that if length == 2, [2:] will be an empty list return {'project' : project_name, 'form' : args[0], 'method' : args[1], 'option' : args[2:]} # dump all solutions def dump(self, out_path : str, ite_num : int, write_solutions : bool = False) -> None: # result folder should already there assert os.path.exists(os.path.dirname(out_path)) # wirte solution if mode is True if write_solutions: for name, content in self.__result.items(): # prepare each result folder exact_path = os.path.join(out_path, name) os.makedirs(exact_path, exist_ok=True) for ind in range(ite_num): # simple check assert ind in content assert 'solutions' in content[ind] # prepare file solution_file = open(os.path.join(exact_path, str(ind)+'.txt'), "w+") # write into file for solution in list(content[str(ind)]['solutions']): solution_file.write(str(solution) + '\n') solution_file.close() del content[str(ind)]['solutions'] # write checklist checklist_file = open(os.path.join(out_path, 'checklist.json'), 'w+') json_object = json.dumps(self.__result, indent = 4) checklist_file.write(json_object) checklist_file.close()
def __init__(self, model): Solver.__init__(self, model) self.belief_points = None self.alpha_vecs = None self.solved = False
def __init__(self, matches=10): Solver.__init__(self, matches) self.matches = matches self.base = [x for x in range(self.matches)] print('Random solver initialized!')
loss_name = loss.__class__.__name__ print(f"Loss: {loss_name}\n") for noise_value in noise_values: # RUN Experiments name = f'CNN_{loss_name}_{tp_noise}_{noise_value}' print(f"Training {name} with noise of type {tp_noise} and probability {noise_value}...") # data preparation dataset = FashionMnistHandler(data_dir, False) dataset.load() train_loader, val_loader, test_loader = dataset.get_noisy_loaders(p_noise=noise_value, type_noise=tp_noise, val_size=1 / 6, train_batch_size=batch_size, val_batch_size=128, test_batch_size=128) # model, optimizer, summary model = CNNModel() optimizer = torch.optim.Adam(model.parameters(), lr=lr) summ = Summary(name, type_noise=tp_noise, noise_rate=noise_value) solver = Solver(name, PROJECT_DIR, batch_model_dir, batch_summaries_dir, model, optimizer, loss, summ, train_loader, val_loader, test_loader) solver.pretrain() solver.train(loss) print(f"Completed training...")
def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("du", np.zeros(ndof)) globdat.set("Du", np.zeros(ndof)) globdat.set("fext", np.zeros(ndof)) globdat.set("fint", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) mbuild = globdat.get("mbuild") fext = globdat.get("fext") fint = globdat.get("fint") cons = globdat.get("cons") disp = globdat.get("solu") old_disp = deepcopy(disp) cons.updateSolution(disp) du = globdat.set("du", disp - old_disp) Du = globdat.set("Du", deepcopy(du)) K = mbuild.getDenseMatrix() r = fext - fint - np.array(K).dot(Du) solver = Solver(self.type, cons) fdof = cons.getFdof() nrm1 = 0.0 for iter in range(self.niter): # Update displacement vector solver.solve(K, du, r, mbuild.hbw) disp[fdof] += du[fdof] Du[fdof] += du[fdof] # Find interal force vector globdat.set("fint", np.zeros(ndof)) globdat.model.takeAction(self.action, globdat) # Find out-of-balance force vector r = fext - globdat.get("fint") nrm = norm(r[fdof]) logging.info(" Iteration {}: norm = {:.10f} ".format(iter, nrm)) # Check convergence if (iter == 0 and nrm <= self.tiny) or (iter > 0 and nrm < self.tol * nrm1): logging.info(" Converged in {} iterations".format(iter + 1)) globdat.model.takeAction(Action.COMMIT, globdat) return Status.OK elif iter == 0 and nrm > self.tiny: nrm1 = deepcopy(nrm) return Status.EXIT
def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("fext", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) mbuild = globdat.get("mbuild") fext = globdat.get("fext") fint = globdat.get("fint") cons = globdat.get("cons") disp = globdat.get("solu") old_disp = deepcopy(disp) cons.updateSolution(disp) du = disp - old_disp K = mbuild.getDenseMatrix() r = fext - fint - np.array(K).dot(du) solver = Solver(self.type, cons) fdof = cons.getFdof() for iter in range(self.niter): # Update displacement vector solver.solve(K, du, r, mbuild.hbw) disp[fdof] += du[fdof] # Find interal force vector globdat.set("fint", np.zeros(ndof)) if self.nrkey == "full": globdat.model.takeAction(Action.GET_MATRIX_0, globdat) elif self.nrkey == "mod" or self.nrkey == "LE": globdat.model.takeAction(Action.GET_INT_VECTOR, globdat) else: raise ValueError("{} not implemented !".format(self.nrkey)) # Find out-of-balance force vector r = fext - globdat.get("fint") nrm = norm(r[fdof]) logging.info(" Iteration {}: norm = {:.10f} ".format(iter, nrm)) # Check convergence in first iteration if iter == 0 and nrm <= self.tiny: logging.info(" Converged in {} iterations".format(iter + 1)) return Status.OK elif iter == 0 and nrm > self.tiny: nrm1 = deepcopy(nrm) # Check convergence in later iterations if nrm < self.tol * nrm1: logging.info(" Converged in {} iterations".format(iter + 1)) return Status.OK
def __init__(self, matches=10): Solver.__init__(self, matches) self.matches = matches self.reset() print('Bumblesort solver initialized!')
def h_convergence(config: ConfigParser, solver: Solver, sol: GridFunction, var: str) -> None: """ Function to check h (mesh element size) convergence and print results. Args: config: Config file from which to grab solver: The solver used sol: Gridfunction that contains the current solution var: The variable of interest. """ num_refinements = config.get_item(['ERROR ANALYSIS', 'num_refinements'], int) average_lst = config.get_list(['ERROR ANALYSIS', 'error_average'], str, quiet=True) component = solver.model.model_components[var] average = component in average_lst # Reload the model's mesh and finite element space so convergence tests can be chained and don't affect each other. solver.model.load_mesh_fes(mesh=True, fes=True) # First solve used the default settings. if component is None: err = norm('l2_norm', sol, solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes, average) else: err = norm('l2_norm', sol.components[component], solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes.components[component], average) # Track the convergence information. num_dofs_lst = [solver.model.fes.ndof] error_lst = [err] # Then run through a series of mesh refinements and resolve on each # refined mesh. for n in range(num_refinements): solver.model.mesh.Refine() solver.model.fes.Update() solver.reset_model() sol = solver.solve() if component is None: err = norm('l2_norm', sol, solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes, average) else: err = norm('l2_norm', sol.components[component], solver.model.ref_sol['ref_sols'][var], solver.model.mesh, solver.model.fes.components[component], average) num_dofs_lst.append(solver.model.fes.ndof) error_lst.append(err) print('L2 norm at refinement {0}: {1}'.format(n, err)) # Display the results nicely. convergence_table = [['Refinement Level', 'DOFs', 'Error', 'Convergence Rate']] convergence_table.append([1, num_dofs_lst[0], error_lst[0], 0]) for n in range(num_refinements): ref_level = '1/{}'.format(int(2 ** (n + 1))) convergence_rate = math.log(error_lst[n] / error_lst[n + 1]) / \ (math.log(num_dofs_lst[n + 1] / (num_dofs_lst[n] * 2.0 ** (solver.model.mesh.dim - 1)))) convergence_table.append([ref_level, num_dofs_lst[n + 1], error_lst[n + 1], convergence_rate]) print(tabulate.tabulate(convergence_table, headers='firstrow', floatfmt=('.1f', '.1f', '.3e', '.2f')))
logging.info("\nModel info:\n{}".format(model)) if args.continue_training: logging.info("Load package from {}.".format( os.path.join(trainingconfig["exp_dir"], "last.pt"))) pkg = torch.load(os.path.join(trainingconfig["exp_dir"], "last.pt")) model.restore(pkg["model"]) elif trainingconfig['pretrained_model']: logging.info("Load package from {}.".format( trainingconfig['pretrained_model'])) pkg = torch.load(trainingconfig['pretrained_model']) model.restore(pkg["model"], without_fc=True) trainingconfig['init_lr'] *= 0.1 if "multi_gpu" in trainingconfig and trainingconfig["multi_gpu"] == True: logging.info("Let's use {} GPUs!".format(torch.cuda.device_count())) model = torch.nn.DataParallel(model) if torch.cuda.is_available(): model = model.cuda() solver = Solver(model, trainingconfig, tr_loader, cv_loader) if args.continue_training: logging.info("Restore solver states...") solver.restore(pkg) logging.info("Start training...") solver.train() logging.info("Total time: {:.4f} secs".format(timer.toc()))
def __init__(self, matches = 10): Solver.__init__(self, matches) self.base = [ x for x in range(matches) ] self.generator = itertools.permutations(self.base) print('Tryhard solver initialized!')
#py_impls = ["nonblocked", "blocked_classic", "blocked_li"] pyImpls = ["classical", "li"] use_py_modules = dict() use_py_modules['classical'] = True use_py_modules['li'] = True for solverType in pyImpls: print("\n------------\nSolving the " + matType + " matrix using the " + solverType + " solver\n------------\n") # Build solver params_solver = dict() params_solver['maxiters'] = 100 params_solver['verbosity'] = overall_verbosity params_solver['tol'] = 1e-6 params_solver['test_after_solve'] = True params_solver['use_py_modules'] = use_py_modules[solverType] params_solver['block_krylov_type'] = solverType kSolver = Solver.create('krylov', solverType, params_solver) sol,execTime = kSolver.solve(A,B,x0) print("Execution time for the <" + solverType + "> solver: " + str(execTime)) #------------------------------------------------------------- # TODO # Call all the cpp implementation