def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("fext", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) # INITIAL RESIDUAL mbuild = globdat.get("mbuild") fext = globdat.get("fext") cons = globdat.get("cons") disp = globdat.get("solu") ndof = globdat.get("ndof") old_disp = deepcopy(disp) cons.updateSolution(disp) Du = globdat.set("Du", disp - old_disp) K = mbuild.getDenseMatrix() r = fext - np.array(K).dot(Du) solver = Solver(self.type, cons) solver.solve(K, disp, r, mbuild.hbw) return Status.EXIT
def main(): url = StringArgumentParser().get_users_input() title = ArticleAPIService(url).get_title_of_html() search_text = TextProcessor(title).get_sentence() apiservice = TwitterAPIService(search_text) apiservice.search_tweets_and_save() Solver(apiservice.get_tweets_list(), search_text).determine_genuity()
def calculation(self): while True: try: calculator = Solver(self.type_method, self.xy, self.x) calculator.solve() del calculator break except TypeError: break except ValueError: break
def prepare_once(self, project_name : str, form : str, method : str, option : Dict[str, Any] = None) -> None: self.__project = project_name self.__method = method self.__form = form if method in self.jmetal_solvers: type = 'jmetal' # dump config in /dump/ folder Runner.dump_config(project_name, form, option) else: type = 'default' self.__nrp = NextReleaseProblem(project_name, type) self.__problem = self.__nrp.model(form, option) self.__solver = Solver(method, option) self.__solver.load(self.__problem) # empty solutions self.__solutions = None
def solve(solver_name, problem_name, options): 'Solve the problem by solver with options.' # Set cpp_compiler options parameters["form_compiler"]["cpp_optimize"] = True # Set debug level set_log_active(options['debug']) # Set refinement level options['N'] = mesh_sizes[options['refinement_level']] # Create problem and solver problem = Problem(problem_name, options) solver = Solver(solver_name, options) time_step = solver.get_timestep(problem)[0] if MPI.process_number() == 0 and options['verbose']: print 'Problem: ' + str(problem) print 'Solver: ' + str(solver) # Solve problem with solver wct = time.time() u, p = solver.solve(problem) # Compute elapsed time wct = time.time() - wct # Compute number of degrees of freedom num_dofs = u.vector().size() + p.vector().size() # Get the mesh size mesh_size = u.function_space().mesh().hmin() # Get functional value and error functional, error = solver.eval() # Save results cpu_time = solver.cputime() save_results(problem, solver, num_dofs, mesh_size, time_step, functional, error) return 0
def test_case_3(self): # prepare raw problem variables = [0, 1, 2, 3] objectives = [{0: -10, 1: -5, 2: -1, 3: -5}, {0: 3, 1: 2, 2: 3, 3: 0}] constraints = dict() # prepare solvers for solver_name in self.binary_solver: if solver_name == 'epsilon': problem = NextReleaseProblem.MOIP( \ variables, objectives, constraints, \ dict(), dict() \ ) else: constraints = JNRP.regularize_constraints( constraints, len(variables)) problem = JNRP(variables, objectives, constraints) solver = Solver(solver_name) # prepare and solve solver.load(problem) solver.execute() # get the solutions solutions = solver.solutions() print(solver_name + '\t\t' + str(len(solutions))) self.display(solutions, 5)
def test_case_2(self): # prepare raw problem variables = [0, 1, 2, 3] objectives = [{0: -10, 1: -5, 2: -1, 3: 5}] constraints = [{2: 1, 4: 0}, {0: 1, 3: -1, 4: 0}] # prepare solvers for solver_name in self.single_solver: if solver_name == 'single': problem = NextReleaseProblem.MOIP( \ variables, objectives, constraints, \ ['L' for _ in range(len(constraints))], dict() \ ) else: constraints = JNRP.regularize_constraints( constraints, len(variables)) problem = JNRP(variables, objectives, constraints) solver = Solver(solver_name) # prepare and solve solver.load(problem) solver.execute() # get the solutions solutions = solver.solutions() print(solver_name + '\t\t' + str(len(solutions))) self.display(solutions, 5)
logging.info("\nModel info:\n{}".format(model)) if args.continue_training: logging.info("Load package from {}.".format( os.path.join(trainingconfig["exp_dir"], "last.pt"))) pkg = torch.load(os.path.join(trainingconfig["exp_dir"], "last.pt")) model.restore(pkg["model"]) elif trainingconfig['pretrained_model']: logging.info("Load package from {}.".format( trainingconfig['pretrained_model'])) pkg = torch.load(trainingconfig['pretrained_model']) model.restore(pkg["model"], without_fc=True) trainingconfig['init_lr'] *= 0.1 if "multi_gpu" in trainingconfig and trainingconfig["multi_gpu"] == True: logging.info("Let's use {} GPUs!".format(torch.cuda.device_count())) model = torch.nn.DataParallel(model) if torch.cuda.is_available(): model = model.cuda() solver = Solver(model, trainingconfig, tr_loader, cv_loader) if args.continue_training: logging.info("Restore solver states...") solver.restore(pkg) logging.info("Start training...") solver.train() logging.info("Total time: {:.4f} secs".format(timer.toc()))
loss_name = loss.__class__.__name__ print(f"Loss: {loss_name}\n") for noise_value in noise_values: # RUN Experiments name = f'CNN_{loss_name}_{tp_noise}_{noise_value}' print(f"Training {name} with noise of type {tp_noise} and probability {noise_value}...") # data preparation dataset = FashionMnistHandler(data_dir, False) dataset.load() train_loader, val_loader, test_loader = dataset.get_noisy_loaders(p_noise=noise_value, type_noise=tp_noise, val_size=1 / 6, train_batch_size=batch_size, val_batch_size=128, test_batch_size=128) # model, optimizer, summary model = CNNModel() optimizer = torch.optim.Adam(model.parameters(), lr=lr) summ = Summary(name, type_noise=tp_noise, noise_rate=noise_value) solver = Solver(name, PROJECT_DIR, batch_model_dir, batch_summaries_dir, model, optimizer, loss, summ, train_loader, val_loader, test_loader) solver.pretrain() solver.train(loss) print(f"Completed training...")
args = parser.parse_args() args, lg = parse(args) # Tensorboard save directory resume = args['solver']['resume'] tensorboard_path = 'Tensorboard/{}'.format(args['name']) if resume == False: if osp.exists(tensorboard_path): shutil.rmtree(tensorboard_path, True) lg.info('Remove dir: [{}]'.format(tensorboard_path)) writer = SummaryWriter(tensorboard_path) # create dataset train_data = DIV2K(args['datasets']['train']) lg.info('Create train dataset successfully!') lg.info('Training: [{}] iterations for each epoch'.format(len(train_data))) val_data = DIV2K(args['datasets']['val']) lg.info('Create val dataset successfully!') lg.info('Validating: [{}] iterations for each epoch'.format(len(val_data))) # create solver lg.info('Preparing for experiment: [{}]'.format(args['name'])) solver = Solver(args, train_data, val_data, writer) # train lg.info('Start training...') solver.train()
def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("fext", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) mbuild = globdat.get("mbuild") fext = globdat.get("fext") fint = globdat.get("fint") cons = globdat.get("cons") disp = globdat.get("solu") old_disp = deepcopy(disp) cons.updateSolution(disp) du = disp - old_disp K = mbuild.getDenseMatrix() r = fext - fint - np.array(K).dot(du) solver = Solver(self.type, cons) fdof = cons.getFdof() for iter in range(self.niter): # Update displacement vector solver.solve(K, du, r, mbuild.hbw) disp[fdof] += du[fdof] # Find interal force vector globdat.set("fint", np.zeros(ndof)) if self.nrkey == "full": globdat.model.takeAction(Action.GET_MATRIX_0, globdat) elif self.nrkey == "mod" or self.nrkey == "LE": globdat.model.takeAction(Action.GET_INT_VECTOR, globdat) else: raise ValueError("{} not implemented !".format(self.nrkey)) # Find out-of-balance force vector r = fext - globdat.get("fint") nrm = norm(r[fdof]) logging.info(" Iteration {}: norm = {:.10f} ".format(iter, nrm)) # Check convergence in first iteration if iter == 0 and nrm <= self.tiny: logging.info(" Converged in {} iterations".format(iter + 1)) return Status.OK elif iter == 0 and nrm > self.tiny: nrm1 = deepcopy(nrm) # Check convergence in later iterations if nrm < self.tol * nrm1: logging.info(" Converged in {} iterations".format(iter + 1)) return Status.OK
def run(self, globdat): # ADVANCE logging.info("Advancing to the next load step") globdat.i += 1 ndof = globdat.get("ndof") globdat.set("du", np.zeros(ndof)) globdat.set("Du", np.zeros(ndof)) globdat.set("fext", np.zeros(ndof)) globdat.set("fint", np.zeros(ndof)) globdat.set("loadScale", globdat.i) globdat.model.takeAction(Action.ADVANCE, globdat) globdat.model.takeAction(Action.GET_MATRIX_0, globdat) globdat.model.takeAction(Action.GET_EXT_VECTOR, globdat) globdat.model.takeAction(Action.GET_CONSTRAINTS, globdat) mbuild = globdat.get("mbuild") fext = globdat.get("fext") fint = globdat.get("fint") cons = globdat.get("cons") disp = globdat.get("solu") old_disp = deepcopy(disp) cons.updateSolution(disp) du = globdat.set("du", disp - old_disp) Du = globdat.set("Du", deepcopy(du)) K = mbuild.getDenseMatrix() r = fext - fint - np.array(K).dot(Du) solver = Solver(self.type, cons) fdof = cons.getFdof() nrm1 = 0.0 for iter in range(self.niter): # Update displacement vector solver.solve(K, du, r, mbuild.hbw) disp[fdof] += du[fdof] Du[fdof] += du[fdof] # Find interal force vector globdat.set("fint", np.zeros(ndof)) globdat.model.takeAction(self.action, globdat) # Find out-of-balance force vector r = fext - globdat.get("fint") nrm = norm(r[fdof]) logging.info(" Iteration {}: norm = {:.10f} ".format(iter, nrm)) # Check convergence if (iter == 0 and nrm <= self.tiny) or (iter > 0 and nrm < self.tol * nrm1): logging.info(" Converged in {} iterations".format(iter + 1)) globdat.model.takeAction(Action.COMMIT, globdat) return Status.OK elif iter == 0 and nrm > self.tiny: nrm1 = deepcopy(nrm) return Status.EXIT