def initialize(self): cuds = self.get_cuds() # Prepare properties self.kratos_props = {0: KRTS.Properties(0), 1: KRTS.Properties(1)} self.fluid_model_part = KRTS.ModelPart("Fluid") self.spheres_model_part = KRTS.ModelPart("Particles") self.rigid_face_model_part = KRTS.ModelPart("P_Conditions") self.mixed_model_part = KRTS.ModelPart("Mixed") self.addNodalVariablesToModelpart(self.fluid_model_part) self.addNodalVariablesToModelpart(self.spheres_model_part) # Init Settings VolumeOutput = True GiDPostMode = "Ascii" GiDWriteMeshFlag = True GiDWriteConditionsFlag = True GiDMultiFileFlag = "Single" # Out Settings self.nodal_results = [ "VELOCITY", "PRESSURE", "REACTION", "DISPLACEMENT" ] self.gauss_points_results = [] self.gid_io_interfaces = {} # Get the CFD pe if cuds.count_of(item_type=CUBA.CFD) != 1: raise "KratosCFD only allows one CFD pe." for cfd_pe in cuds.iter(item_type=CUBA.CFD): if len(cfd_pe.data[CUBA.DATA_SET]) < 1: raise Exception("CFD PE does not have any associated dataset") self.gid_io_interfaces["fluid"] = GiDOutput( "fluid_output", VolumeOutput, GiDPostMode, GiDMultiFileFlag, GiDWriteMeshFlag, GiDWriteConditionsFlag) # Get the DEM pe if cuds.count_of(item_type=CUBA.GRANULAR_DYNAMICS) != 1: raise Exception("KratosDEM only allows one GRANULAR_DYNAMICS pe.") for gd_pe in cuds.iter(item_type=CUBA.GRANULAR_DYNAMICS): if len(gd_pe.data[CUBA.DATA_SET]) < 1: raise Exception("GD PE does not have any associated dataset") self.gid_io_interfaces["particles"] = KRTS.GidIO( "particles_output", KRTS.GiDPostMode.GiD_PostAscii, KRTS.MultiFileFlag.SingleFile, KRTS.WriteDeformedMeshFlag.WriteDeformed, KRTS.WriteConditionsFlag.WriteConditions) self.initialized = False
def __createGiDIO(self, optimizationSettings): resultsDirectory = optimizationSettings["output"][ "output_directory"].GetString() designHistoryFilename = optimizationSettings["output"][ "design_history_filename"].GetString() designHistoryFilenameWithPath = resultsDirectory + "/" + designHistoryFilename gidIO = GiDOutput( designHistoryFilenameWithPath, optimizationSettings["output"] ["output_format"]["VolumeOutput"].GetBool(), optimizationSettings["output"]["output_format"] ["GiDPostMode"].GetString(), optimizationSettings["output"] ["output_format"]["GiDMultiFileFlag"].GetString(), optimizationSettings["output"]["output_format"] ["GiDWriteMeshFlag"].GetBool(), optimizationSettings["output"] ["output_format"]["GiDWriteConditionsFlag"].GetBool()) return gidIO
adjoint_solver.Initialize() print("adjoint solver created") # # # Set adjoint flags for node in fluid_model_part.GetNodes(6): node.Set(STRUCTURE,True) for node in fluid_model_part.GetNodes(4): node.Set(BOUNDARY,True) # initialize GiD I/O from gid_output import GiDOutput gid_io = GiDOutput(input_file_name, ProjectParameters.VolumeOutput, ProjectParameters.GiDPostMode, ProjectParameters.GiDMultiFileFlag, ProjectParameters.GiDWriteMeshFlag, ProjectParameters.GiDWriteConditionsFlag) if not ProjectParameters.VolumeOutput: cut_list = define_output.DefineCutPlanes() gid_io.define_cuts(fluid_model_part, cut_list) gid_io.initialize_results(fluid_model_part) # 33 # 33 # define the drag computation list drag_list = define_output.DefineDragList() drag_file_output_list = [] for it in drag_list:
def __init__(self,opt_model_part,config,analyzer): # For GID output self.gid_io = GiDOutput(config.design_surface_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Add variables needed for shape optimization opt_model_part.AddNodalSolutionStepVariable(NORMAL) opt_model_part.AddNodalSolutionStepVariable(NORMALIZED_SURFACE_NORMAL) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SURFACE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(MAPPED_OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SURFACE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(MAPPED_CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(DESIGN_UPDATE) opt_model_part.AddNodalSolutionStepVariable(DESIGN_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(SEARCH_DIRECTION) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(IS_ON_BOUNDARY) opt_model_part.AddNodalSolutionStepVariable(BOUNDARY_PLANE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATES_DEACTIVATED) opt_model_part.AddNodalSolutionStepVariable(SENSITIVITIES_DEACTIVATED) # Read and print model part (design surface) buffer_size = 1 model_part_io = ModelPartIO(config.design_surface_name) model_part_io.ReadModelPart(opt_model_part) opt_model_part.SetBufferSize(buffer_size) opt_model_part.ProcessInfo.SetValue(DOMAIN_SIZE,config.domain_size) print("\nThe following design surface was defined:\n\n",opt_model_part) # Set configurations self.config = config # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("> The following objectives are defined:\n") for func_id in config.objectives: print(func_id,":",config.objectives[func_id],"\n") print("> The following constraints are defined:\n") for func_id in config.constraints: print(func_id,":",config.constraints[func_id],"\n") # Create controller object self.controller = Controller( config ); # Model parameters self.opt_model_part = opt_model_part # Create mapper to map between geometry and design space self.mapper = VertexMorphingMapper( self.opt_model_part, self.config.filter_function, self.config.use_mesh_preserving_filter_matrix, self.config.filter_size, self.config.perform_edge_damping, self.config.damped_edges ) # Toolbox to perform optimization self.opt_utils = OptimizationUtilities( self.opt_model_part, self.objectives, self.constraints, self.config.step_size, self.config.normalize_search_direction ) # Toolbox to pre & post process geometry data self.geom_utils = GeometryUtilities( self.opt_model_part )
class VertexMorphingMethod: # -------------------------------------------------------------------------- def __init__(self,opt_model_part,config,analyzer): # For GID output self.gid_io = GiDOutput(config.design_surface_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Add variables needed for shape optimization opt_model_part.AddNodalSolutionStepVariable(NORMAL) opt_model_part.AddNodalSolutionStepVariable(NORMALIZED_SURFACE_NORMAL) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SURFACE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(MAPPED_OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SURFACE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(MAPPED_CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(DESIGN_UPDATE) opt_model_part.AddNodalSolutionStepVariable(DESIGN_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(SEARCH_DIRECTION) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(IS_ON_BOUNDARY) opt_model_part.AddNodalSolutionStepVariable(BOUNDARY_PLANE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATES_DEACTIVATED) opt_model_part.AddNodalSolutionStepVariable(SENSITIVITIES_DEACTIVATED) # Read and print model part (design surface) buffer_size = 1 model_part_io = ModelPartIO(config.design_surface_name) model_part_io.ReadModelPart(opt_model_part) opt_model_part.SetBufferSize(buffer_size) opt_model_part.ProcessInfo.SetValue(DOMAIN_SIZE,config.domain_size) print("\nThe following design surface was defined:\n\n",opt_model_part) # Set configurations self.config = config # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("> The following objectives are defined:\n") for func_id in config.objectives: print(func_id,":",config.objectives[func_id],"\n") print("> The following constraints are defined:\n") for func_id in config.constraints: print(func_id,":",config.constraints[func_id],"\n") # Create controller object self.controller = Controller( config ); # Model parameters self.opt_model_part = opt_model_part # Create mapper to map between geometry and design space self.mapper = VertexMorphingMapper( self.opt_model_part, self.config.filter_function, self.config.use_mesh_preserving_filter_matrix, self.config.filter_size, self.config.perform_edge_damping, self.config.damped_edges ) # Toolbox to perform optimization self.opt_utils = OptimizationUtilities( self.opt_model_part, self.objectives, self.constraints, self.config.step_size, self.config.normalize_search_direction ) # Toolbox to pre & post process geometry data self.geom_utils = GeometryUtilities( self.opt_model_part ) # -------------------------------------------------------------------------- def optimize(self): print("\n> ==============================================================================================================") print("> Starting optimization using the following algorithm: ",self.config.optimization_algorithm) print("> ==============================================================================================================\n") # Print time stamp print(time.ctime()) # Start timer and assign to object such that total time of opt may be measured at each step self.opt_start_time = time.time() # Initialize design output in GID Format and print baseline design self.gid_io.initialize_results(self.opt_model_part) self.gid_io.write_results(0, self.opt_model_part, self.config.nodal_results, []) # Call for for specified optimization algorithm if(self.config.optimization_algorithm == "steepest_descent"): self.start_steepest_descent() elif(self.config.optimization_algorithm == "augmented_lagrange"): self.start_augmented_lagrange() elif(self.config.optimization_algorithm == "penalized_projection"): self.start_penalized_projection() else: sys.exit("Specified optimization_algorithm not implemented!") # Finalize design output in GID formad self.gid_io.finalize_results() # Stop timer opt_end_time = time.time() print("\n> ==============================================================================================================") print("> Finished optimization in ",round(opt_end_time - self.opt_start_time,2)," s!") print("> ==============================================================================================================\n") # -------------------------------------------------------------------------- def start_steepest_descent(self): # Flags to trigger proper function calls constraints_given = False # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Initialize file where design evolution is recorded with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr\t") row.append("\tf\t") row.append("\tdf_absolute[%]\t") row.append("\tdf_relative[%]\t") row.append("\tstep_size[-]\t") row.append("\tt_iteration[s]\t") row.append("\tt_total[s]") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 previous_f = 0.0 # Start optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===================================================================") print("> ",time.ctime(),": Starting optimization iteration ",opt_itr) print(">===================================================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Set controller to evaluate objective self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_value"] = 1 # Set to evaluate objective gradient self.controller.get_controls()[only_F_id]["calc_gradient"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + ".0" self.analyzer( X, self.controller.get_controls(), iterator, response ) # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["gradient"] ) # Compute unit surface normals at each node of current design self.geom_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients self.geom_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute mapping matrix self.mapper.compute_mapping_matrix() # Map sensitivities to design space self.mapper.map_sensitivities_to_design_space( constraints_given ) # Compute search direction self.opt_utils.compute_search_direction_steepest_descent() # # Adjustment of step size # if( opt_itr > 1 and response[only_F_id]["value"]>previous_f): # self.config.step_size = self.config.step_size/2 # Compute design variable update (do one step in the optimization algorithm) self.opt_utils.compute_design_update() # Map design update to geometry space self.mapper.map_design_update_to_geometry_space() # Compute and output some measures to track changes in the objective function delta_f_absolute = 0.0 delta_f_relative = 0.0 print("\n> Current value of objective function = ",response[only_F_id]["value"]) if(opt_itr>1): delta_f_absolute = 100*(response[only_F_id]["value"]-initial_f)/initial_f delta_f_relative = 100*(response[only_F_id]["value"]-previous_f)/initial_f print("> Absolut change of objective function = ",round(delta_f_absolute,6)," [%]") print("> Relative change of objective function = ",round(delta_f_relative,6)," [%]") # Take time needed for current optimization step end_time = time.time() time_current_step = round(end_time - start_time,2) time_optimization = round(end_time - self.opt_start_time,2) print("\n> Time needed for current optimization step = ",time_current_step,"s") print("> Time needed for total optimization so far = ",time_optimization,"s") # Write design in GID format self.gid_io.write_results(opt_itr, self.opt_model_part, self.config.nodal_results, []) # Write design history to file with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["value"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.6f"%(delta_f_relative))+"\t") row.append("\t"+str(self.config.step_size)+"\t") row.append("\t"+str("%.1f"%(time_current_step))+"\t") row.append("\t"+str("%.1f"%(time_optimization))) historyWriter.writerow(row) # Check convergence if(opt_itr>1): # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Check for relative tolerance if(abs(delta_f_relative)<self.config.relative_tolerance_objective): print("\n> Optimization problem converged within a relative objective tolerance of ",self.config.relative_tolerance_objective,"%.") break # Check if value of objective increases if(response[only_F_id]["value"]>previous_f): print("\n> Value of objective function increased!") break # Update design X = self.get_design() # Store values of for next iteration previous_f = response[only_F_id]["value"] # Store initial objective value if(opt_itr==1): initial_f = response[only_F_id]["value"] # -------------------------------------------------------------------------- def start_augmented_lagrange(self): # README!!! # Note that the current implementation assumes that only one scalar objective & constraint is present # Flags to trigger proper function calls constraints_given = True # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Get Id of constraint only_C_id = None for C_id in self.constraints: only_C_id = C_id break # Initialize the optimization algorithm self.vm_utils.initialize_augmented_lagrange( self.constraints, self.config.penalty_fac_0, self.config.gamma, self.config.penalty_fac_max, self.config.lambda_0 ) # Initialize file where design evolution is recorded with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr\t") row.append("\tsub_itr\t") row.append("\tl\t") row.append("\tdl_relative[%]\t") row.append("\tf\t") row.append("\tdf_absolute[%]\t") row.append("\tpenalty_fac\t") row.append("\tC["+str(only_C_id)+"]:"+str(self.constraints[only_C_id]["type"])+"\t") row.append("\tlambda["+str(only_C_id)+"]") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 initial_l = 0.0 previous_l = 0.0 # Start primary optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===================================================================") print("> ",time.ctime(),": Starting optimization iteration ",opt_itr) print(">===================================================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Solve optimization subproblem for sub_opt_itr in range(1,self.config.max_sub_opt_iterations+1): # Some output print("\n>===============================================") print("> Starting suboptimization iteration ",sub_opt_itr) print(">===============================================\n") # Start measuring time needed for current suboptimization step subopt_start_time = time.time() # Set controller to evaluate objective and constraint self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_value"] = 1 self.controller.get_controls()[only_C_id]["calc_value"] = 1 # Set controller to evaluate objective and constraint gradient self.controller.get_controls()[only_F_id]["calc_gradient"] = 1 self.controller.get_controls()[only_C_id]["calc_gradient"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + "." + str(sub_opt_itr) self.analyzer( X, self.controller.get_controls(), iterator, response ) # Evaluate Lagrange function l = self.opt_utils.get_value_of_augmented_lagrangian( only_F_id, self.constraints, response ) # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["gradient"], response[only_C_id]["gradient"] ) # Compute unit surface normals at each node of current design self.geom_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients self.geom_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute mapping matrix self.mapper.compute_mapping_matrix() # Map sensitivities to design space self.mapper.map_sensitivities_to_design_space( constraints_given ) # Compute search direction self.opt_utils.compute_search_direction_augmented_lagrange( self.constraints, response ) # Compute design variable update (do one step in the optimization algorithm) self.opt_utils.compute_design_update() # Map design update to geometry space self.mapper.map_design_update_to_geometry_space() # Compute and output some measures to track changes in the objective function delta_f_absolute = 0.0 delta_l_relative = 0.0 print("\n> Current value of Lagrange function = ",round(l,12)) if(sub_opt_itr>1): delta_f_absolute = 100*(response[only_F_id]["value"]-initial_f)/initial_f delta_l_relative = 100*(l-previous_l)/initial_l print("\n> Relative change of Lagrange function = ",round(delta_l_relative,6)," [%]") # We write every major and every suboptimization iteration in design history with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str(sub_opt_itr)+"\t") row.append("\t"+str("%.12f"%(l))+"\t") row.append("\t"+str("%.6f"%(delta_l_relative))+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["value"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.2f"%(self.vm_utils.get_penalty_fac()))+"\t") row.append("\t"+str("%.12f"%(response[only_C_id]["value"]))+"\t") row.append("\t"+str("%.12f"%(self.vm_utils.get_lambda(only_C_id)))) historyWriter.writerow(row) # Write design in GID format write_itr = float(iterator) self.gid_io.write_results(write_itr, self.opt_model_part, self.config.nodal_results, []) # Check convergence (We ensure that at least 2 subiterations are done) if(sub_opt_itr>3): # Check if maximum iterations were reached if(sub_opt_itr==self.config.max_sub_opt_iterations): print("\n> Maximal iterations of supoptimization problem reached!") break # Check for relative tolerance if(abs(delta_l_relative)<self.config.relative_tolerance_sub_opt): print("\n> Optimization subproblem converged within a relative objective tolerance of ",self.config.relative_tolerance_sub_opt,"%.") break # Check if value of lagrangian increases if(l>previous_l): print("\n> Value of Lagrange function increased!") break # Update design X = self.get_design() # Store value of Lagrange function for next iteration previous_l = l # Store initial objective value if(opt_itr==1 and sub_opt_itr==1): initial_f = response[only_F_id]["value"] initial_l = l # Take time needed for current suboptimization step as well as for the overall opt so far subopt_end_time = time.time() print("\n> Time needed for current suboptimization step = ",round(subopt_end_time - subopt_start_time,2),"s") print("\n> Time needed for total optimization so far = ",round(subopt_end_time - self.opt_start_time,2),"s") # Check Convergence (More convergence criterion for major optimization iteration to be implemented!) # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Update lagrange multipliers and penalty factor self.vm_utils.udpate_augmented_lagrange_parameters( self.constraints, response ) # Take time needed for current optimization step end_time = time.time() print("\n> Time needed for current optimization step = ",round(end_time - start_time,2),"s") # -------------------------------------------------------------------------- def start_penalized_projection(self): # README!!! # Note that the current implementation assumes that only one scalar objective & constraint is present # Flags to trigger proper function calls constraints_given = True # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Get Id of constraint only_C_id = None for C_id in self.constraints: only_C_id = C_id break # Initialize file where design evolution is recorded with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr\t") row.append("\tf\t") row.append("\tdf_absolute[%]\t") row.append("\tdf_relative[%]\t") row.append("\tc["+str(only_C_id)+"]:"+str(self.constraints[only_C_id]["type"])+"\t") row.append("\tc["+str(only_C_id)+"] / reference_value[%]"+"\t") row.append("\tcorrection_scaling[-]\t") row.append("\tstep_size[-]\t") row.append("\tt_iteration[s]\t") row.append("\tt_total[s]") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 previous_f = 0.0 previous_c = 0.0 # Start optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===================================================================") print("> ",time.ctime(),": Starting optimization iteration ",opt_itr) print(">===================================================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Set controller to evaluate objective and constraint self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_value"] = 1 self.controller.get_controls()[only_C_id]["calc_value"] = 1 # Set controller to evaluate objective and constraint gradient self.controller.get_controls()[only_F_id]["calc_gradient"] = 1 self.controller.get_controls()[only_C_id]["calc_gradient"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + ".0" self.analyzer( X, self.controller.get_controls(), iterator, response ) # Check if constraint is active for func_id in self.config.constraints: if(self.config.constraints[func_id]["type"] == "eq"): constraints_given = True elif(response[func_id]["value"]>0): constraints_given = True else: constraints_given = False break # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["gradient"], response[only_C_id]["gradient"] ) # Compute unit surface normals at each node of current design self.geom_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients self.geom_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute mapping matrix self.mapper.compute_mapping_matrix() # Map sensitivities to design space self.mapper.map_sensitivities_to_design_space( constraints_given ) # # Adjustment of step size # if( opt_itr > 1 and response[only_F_id]["value"]>previous_f): # self.config.step_size = self.config.step_size/2 correction_scaling = [False] if(constraints_given): self.opt_utils.compute_projected_search_direction( response[only_C_id]["value"] ) self.opt_utils.correct_projected_search_direction( response[only_C_id]["value"], previous_c, correction_scaling ) self.opt_utils.compute_design_update() else: self.opt_utils.compute_search_direction_steepest_descent() self.opt_utils.compute_design_update() # Map design update to geometry space self.mapper.map_design_update_to_geometry_space() # Compute and output some measures to track objective function delta_f_absolute = 0.0 delta_f_relative = 0.0 print("\n> Current value of objective function = ",response[only_F_id]["value"]) if(opt_itr>1): delta_f_absolute = 100*(response[only_F_id]["value"]-initial_f)/initial_f delta_f_relative = 100*(response[only_F_id]["value"]-previous_f)/initial_f print("\n> Absolut change of objective function = ",round(delta_f_absolute,6)," [%]") print("\n> Relative change of objective function = ",round(delta_f_relative,6)," [%]") # Compute and output some measures to track function print("\n> Current value of constraint function = ",round(response[only_C_id]["value"],12)) # Take time needed for current optimization step end_time = time.time() time_current_step = round(end_time - start_time,2) time_optimization = round(end_time - self.opt_start_time,2) print("\n> Time needed for current optimization step = ",time_current_step,"s") print("\n> Time needed for total optimization so far = ",time_optimization,"s") # Write design in GID format self.gid_io.write_results(opt_itr, self.opt_model_part, self.config.nodal_results, []) # Write design history to file with open(self.config.design_history_directory+"/"+self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["value"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.6f"%(delta_f_relative))+"\t") row.append("\t"+str("%.12f"%(response[only_C_id]["value"]))+"\t") if not response[only_C_id]["reference_value"]: row.append("\t"+str("-\t")) else: percentage_of_reference = 100*(response[only_C_id]["value"]/response[only_C_id]["reference_value"]) row.append("\t"+str("%.6f"%(percentage_of_reference))) row.append("\t"+str("%.12f"%(correction_scaling[0]))+"\t") row.append("\t"+str(self.config.step_size)+"\t") row.append("\t"+str("%.1f"%(time_current_step))+"\t") row.append("\t"+str("%.1f"%(time_optimization))) historyWriter.writerow(row) # Check convergence (Further convergence criterions to be implemented ) if(opt_itr>1): # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Check for relative tolerance if(abs(delta_f_relative)<self.config.relative_tolerance_objective): print("\n> Optimization problem converged within a relative objective tolerance of ",self.config.relative_tolerance_objective,"%.") break # Update design X = self.get_design() # Store values of for next iteration previous_f = response[only_F_id]["value"] previous_c = response[only_C_id]["value"] # Store initial objective value if(opt_itr==1): initial_f = response[only_F_id]["value"] # -------------------------------------------------------------------------- def store_grads_on_nodes(self,objective_grads,constraint_grads={}): # Read objective gradients eucledian_norm_obj_sens = 0.0 for node_Id in objective_grads: # If deactivated, nodal sensitivities will not be assigned and hence remain zero if(self.opt_model_part.Nodes[node_Id].GetSolutionStepValue(SENSITIVITIES_DEACTIVATED)): continue # If not deactivated, nodal sensitivities will be assigned sens_i = Vector(3) sens_i[0] = objective_grads[node_Id][0] sens_i[1] = objective_grads[node_Id][1] sens_i[2] = objective_grads[node_Id][2] self.opt_model_part.Nodes[node_Id].SetSolutionStepValue(OBJECTIVE_SENSITIVITY,0,sens_i) # When constraint_grads is defined also store constraint sensitivities (bool returns false if dictionary is empty) if(bool(constraint_grads)): eucledian_norm_cons_sens = 0.0 for node_Id in constraint_grads: # If deactivated, nodal sensitivities will not be assigned and hence remain zero if(self.opt_model_part.Nodes[node_Id].GetSolutionStepValue(SENSITIVITIES_DEACTIVATED)): continue # If not deactivated, nodal sensitivities will be assigned sens_i = Vector(3) sens_i[0] = constraint_grads[node_Id][0] sens_i[1] = constraint_grads[node_Id][1] sens_i[2] = constraint_grads[node_Id][2] self.opt_model_part.Nodes[node_Id].SetSolutionStepValue(CONSTRAINT_SENSITIVITY,0,sens_i) # -------------------------------------------------------------------------- def get_design(self): # Read and return the current design in the corresponding mode X = {} if(self.config.design_output_mode=="relative"): for node in self.opt_model_part.Nodes: X[node.Id] = node.GetSolutionStepValue(SHAPE_UPDATE) elif(self.config.design_output_mode=="total"): for node in self.opt_model_part.Nodes: X[node.Id] = node.GetSolutionStepValue(SHAPE_CHANGE_ABSOLUTE) elif(self.config.design_output_mode=="absolute"): for node in self.opt_model_part.Nodes: X[node.Id] = [node.X,node.Y,node.Z] else: sys.exit("Wrong definition of design_output_mode!") return X
GiDMultiFileFlag = "Single" # Read optimized model part from restart file optimized_model_part = ModelPart("optimized_model_part") optimized_model_part.AddNodalSolutionStepVariable(NORMAL) restart_file_name = "Small_Cantilever_Restart_File_21" model_part_io = ModelPartIO(restart_file_name) model_part_io.ReadModelPart(optimized_model_part) # Extract volume threshold = 0.2 extracted_volume_model_part = ModelPart("extracted_volume_model_part") TopologyExtractorUtilities().ExtractVolumeMesh(optimized_model_part, threshold, extracted_volume_model_part) # Write extracted volume gid_io = GiDOutput("extracted_volume_model_part", VolumeOutput, GiDPostMode, GiDMultiFileFlag, GiDWriteMeshFlag, GiDWriteConditionsFlag) gid_io.initialize_results(extracted_volume_model_part) gid_io.write_results(1, extracted_volume_model_part, nodal_results, gauss_points_results) gid_io.finalize_results() # Extract surface extracted_surface_model_part = ModelPart("extracted_surface_model_part") TopologyExtractorUtilities().ExtractSurfaceMesh(extracted_volume_model_part, extracted_surface_model_part) # Write extracted surface gid_io_2 = GiDOutput("extracted_surface_model_part", VolumeOutput, GiDPostMode, GiDMultiFileFlag, GiDWriteMeshFlag, GiDWriteConditionsFlag) gid_io_2.initialize_results(extracted_surface_model_part) gid_io_2.write_results(1, extracted_surface_model_part, nodal_results, gauss_points_results) gid_io_2.finalize_results() # Smooth extracted surface
nodes = fluid_model_part.GetNodes(i) for node in nodes: fluid_solver.wall_nodes.append(node) node.SetSolutionStepValue(TURBULENT_VISCOSITY, 0, 0.0) node.Fix(TURBULENT_VISCOSITY) fluid_solver.Initialize() print("fluid solver created") sys.stdout.flush() # initialize GiD I/O from gid_output import GiDOutput gid_io = GiDOutput(input_file_name, ProjectParameters.VolumeOutput, ProjectParameters.GiDPostMode, ProjectParameters.GiDMultiFileFlag, ProjectParameters.GiDWriteMeshFlag, ProjectParameters.GiDWriteConditionsFlag) if not ProjectParameters.VolumeOutput: cut_list = define_output.DefineCutPlanes() gid_io.define_cuts(fluid_model_part, cut_list) # gid_io.initialize_results(fluid_model_part) # MOD. #_____________________________________________________________________________________________________________________________________ # # F L U I D B L O C K E N D S #_____________________________________________________________________________________________________________________________________ import swimming_DEM_gid_output
for process in list_of_processes: print("a") process.ExecuteInitialize() print("b") #TODO: think if there is a better way to do this fluid_model_part = solver.GetComputeModelPart() # initialize GiD I/O from gid_output import GiDOutput output_settings = ProjectParameters["output_configuration"] gid_io = GiDOutput(output_settings["output_filename"].GetString(), output_settings["volume_output"].GetBool(), output_settings["gid_post_mode"].GetString(), output_settings["gid_multi_file_flag"].GetString(), output_settings["gid_write_mesh_flag"].GetBool(), output_settings["gid_write_conditions_flag"].GetBool()) output_time = output_settings["output_time"].GetDouble() gid_io.initialize_results(fluid_model_part) for process in list_of_processes: process.ExecuteBeforeSolutionLoop() ## Stepping and time settings Dt = ProjectParameters["problem_data"]["time_step"].GetDouble() end_time = ProjectParameters["problem_data"]["end_time"].GetDouble()
def __init__(self,opt_model_part,config,analyzer): # For GID output self.gid_io = GiDOutput(config.design_surface_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Add variables needed for shape optimization opt_model_part.AddNodalSolutionStepVariable(NORMAL) opt_model_part.AddNodalSolutionStepVariable(NORMALIZED_SURFACE_NORMAL) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(IS_ON_BOUNDARY) opt_model_part.AddNodalSolutionStepVariable(BOUNDARY_PLANE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATES_DEACTIVATED) opt_model_part.AddNodalSolutionStepVariable(SENSITIVITIES_DEACTIVATED) # Read and print model part (design surface) buffer_size = 1 model_part_io = ModelPartIO(config.design_surface_name) model_part_io.ReadModelPart(opt_model_part) opt_model_part.SetBufferSize(buffer_size) print("\nThe following design surface was defined:\n\n",opt_model_part) # Set configurations self.config = config # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("> The following objectives are defined:\n") for func_id in config.objectives: print(func_id,":",config.objectives[func_id],"\n") print("> The following constraints are defined:\n") for func_id in config.constraints: print(func_id,":",config.constraints[func_id],"\n") # Create controller object self.controller = Controller( config ); # Model parameters self.opt_model_part = opt_model_part # Add toolbox for vertex morphing max_nodes_affected = 10000 # Specification required by spatial (tree) search # Defines maximum nodes that may be considered within the filter radius self.vm_utils = VertexMorphingUtilities( self.opt_model_part, self.config.domain_size, self.objectives, self.constraints, config.filter_size, max_nodes_affected)
class VertexMorphingMethod: # -------------------------------------------------------------------------- def __init__(self,opt_model_part,config,analyzer): # For GID output self.gid_io = GiDOutput(config.design_surface_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Add variables needed for shape optimization opt_model_part.AddNodalSolutionStepVariable(NORMAL) opt_model_part.AddNodalSolutionStepVariable(NORMALIZED_SURFACE_NORMAL) opt_model_part.AddNodalSolutionStepVariable(OBJECTIVE_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(CONSTRAINT_SENSITIVITY) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_CHANGE_ABSOLUTE) opt_model_part.AddNodalSolutionStepVariable(IS_ON_BOUNDARY) opt_model_part.AddNodalSolutionStepVariable(BOUNDARY_PLANE) opt_model_part.AddNodalSolutionStepVariable(SHAPE_UPDATES_DEACTIVATED) opt_model_part.AddNodalSolutionStepVariable(SENSITIVITIES_DEACTIVATED) # Read and print model part (design surface) buffer_size = 1 model_part_io = ModelPartIO(config.design_surface_name) model_part_io.ReadModelPart(opt_model_part) opt_model_part.SetBufferSize(buffer_size) print("\nThe following design surface was defined:\n\n",opt_model_part) # Set configurations self.config = config # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("> The following objectives are defined:\n") for func_id in config.objectives: print(func_id,":",config.objectives[func_id],"\n") print("> The following constraints are defined:\n") for func_id in config.constraints: print(func_id,":",config.constraints[func_id],"\n") # Create controller object self.controller = Controller( config ); # Model parameters self.opt_model_part = opt_model_part # Add toolbox for vertex morphing max_nodes_affected = 10000 # Specification required by spatial (tree) search # Defines maximum nodes that may be considered within the filter radius self.vm_utils = VertexMorphingUtilities( self.opt_model_part, self.config.domain_size, self.objectives, self.constraints, config.filter_size, max_nodes_affected) # -------------------------------------------------------------------------- def optimize(self): print("\n> ==============================================================================================================") print("> Starting optimization using the following algorithm: ",self.config.optimization_algorithm) print("> ==============================================================================================================\n") # Start timer and assign to object such that total time of opt may be measured at each step self.opt_start_time = time.time() # Initialize design output in GID Format self.gid_io.initialize_results(self.opt_model_part) # Call for for specified optimization algorithm if(self.config.optimization_algorithm == "steepest_descent"): self.start_steepest_descent() elif(self.config.optimization_algorithm == "augmented_lagrange"): self.start_augmented_lagrange() elif(self.config.optimization_algorithm == "penalized_projection"): self.start_penalized_projection() else: sys.exit("Specified optimization_algorithm not implemented!") # Finalize design output in GID formad self.gid_io.finalize_results() # Stop timer opt_end_time = time.time() print("\n> ==============================================================================================================") print("> Finished optimization in ",round(opt_end_time - self.opt_start_time,1)," s!") print("> ==============================================================================================================\n") # -------------------------------------------------------------------------- def start_steepest_descent(self): # Flags to trigger proper function calls constraints_given = False # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Initialize file where design evolution is recorded with open(self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr") row.append("\tf") row.append("\tdf_absolute[%]") row.append("\tdf_relative[%]") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 previous_f = 0.0 # Start optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===============================================") print("> Starting optimization iteration ",opt_itr) print(">===============================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Set controller to evaluate objective self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_func"] = 1 # Set to evaluate objective gradient if provided if(self.objectives[only_F_id]["grad"]=="provided"): self.controller.get_controls()[only_F_id]["calc_grad"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + ".0" self.analyzer( X, self.controller.get_controls(), iterator, response ) # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["grad"] ) # Compute unit surface normals at each node of current design self.vm_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients self.vm_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute filtered gradients (do backward mapping) self.vm_utils.filter_gradients( constraints_given ) # Compute search direction self.vm_utils.compute_search_direction_steepest_descent() # Compute design variable update (do one step in the optimization algorithm) self.vm_utils.update_design_variable( self.config.step_size_0 ) # Compute shape update (do forward mapping) self.vm_utils.update_shape() # Compute and output some measures to track changes in the objective function delta_f_absolute = 0.0 delta_f_relative = 0.0 print("\n> Current value of objective function = ",response[only_F_id]["func"]) if(opt_itr>1): delta_f_absolute = 100* ( response[only_F_id]["func"]/initial_f - 1 ) delta_f_relative = 100*( response[only_F_id]["func"]/previous_f - 1 ) print("\n> Absolut change of objective function = ",round(delta_f_absolute,6)," [%]") print("\n> Relative change of objective function = ",round(delta_f_relative,6)," [%]") # Write design history to file with open(self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["func"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.6f"%(delta_f_relative))+"\t") historyWriter.writerow(row) # Write design in GID format self.gid_io.write_results(opt_itr, self.opt_model_part, self.config.nodal_results, []) # Check convergence if(opt_itr>1): # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Check for relative tolerance if(abs(delta_f_relative)<self.config.relative_tolerance_objective): print("\n> Optimization problem converged within a relative objective tolerance of ",self.config.relative_tolerance_objective,".") break # # Check if value of objective increases # if(response[only_F_id]["func"]>previous_f): # print("\n> Value of objective function increased!") # break # Update design X = self.get_design() # Store values of for next iteration previous_f = response[only_F_id]["func"] # Store initial objective value if(opt_itr==1): initial_f = response[only_F_id]["func"] # Take time needed for current optimization step end_time = time.time() print("\n> Time needed for current optimization step = ",round(end_time - start_time,1),"s") print("\n> Time needed for total optimization so far = ",round(end_time - self.opt_start_time,1),"s") # -------------------------------------------------------------------------- def start_augmented_lagrange(self): # README!!! # Note that the current implementation assumes that only one scalar objective & constraint is present # Flags to trigger proper function calls constraints_given = True # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Get Id of constraint only_C_id = None for C_id in self.constraints: only_C_id = C_id break # Initialize the optimization algorithm self.vm_utils.initialize_augmented_lagrange( self.constraints, self.config.penalty_fac_0, self.config.gamma, self.config.penalty_fac_max, self.config.lambda_0 ) # Initialize file where design evolution is recorded with open(self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr\t") row.append("\tsub_itr\t") row.append("\tl\t") row.append("\tdl_relative[%]\t") row.append("\tf\t") row.append("\tdf_absolute[%]\t") row.append("\tpenalty_fac\t") row.append("\tC["+str(only_C_id)+"]: "+str(self.constraints[only_C_id]["type"])+"\t") row.append("\tlambda["+str(only_C_id)+"]\t") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 previous_l = 0.0 # Start primary optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===============================================") print("> Starting optimization iteration ",opt_itr) print(">===============================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Solve optimization subproblem for sub_opt_itr in range(1,self.config.max_sub_opt_iterations+1): # Some output print("\n>===============================================") print("> Starting suboptimization iteration ",sub_opt_itr) print(">===============================================\n") # Start measuring time needed for current suboptimization step subopt_start_time = time.time() # Set controller to evaluate objective and constraint self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_func"] = 1 self.controller.get_controls()[only_C_id]["calc_func"] = 1 # Set controller to evaluate objective and constraint gradient if provided if(self.objectives[only_F_id]["grad"]=="provided"): self.controller.get_controls()[only_F_id]["calc_grad"] = 1 if(self.constraints[only_C_id]["grad"]=="provided"): self.controller.get_controls()[only_C_id]["calc_grad"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + "." + str(sub_opt_itr) self.analyzer( X, self.controller.get_controls(), iterator, response ) # Evaluate Lagrange function l = self.vm_utils.get_value_of_augmented_lagrangian( only_F_id, self.constraints, response ) # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["grad"], response[only_C_id]["grad"] ) # Compute unit surface normals at each node of current design self.vm_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients dJdX_n = self.vm_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute filtered gradients (do backward mapping) self.vm_utils.filter_gradients( constraints_given ) # Compute search direction self.vm_utils.compute_search_direction_augmented_lagrange( self.constraints, response ) # Compute design variable update (do one step in the optimization algorithm) self.vm_utils.update_design_variable( self.config.step_size_0 ) # Compute shape update (do forward mapping) self.vm_utils.update_shape() # Compute and output some measures to track changes in the objective function delta_f_absolute = 0.0 delta_l_relative = 0.0 print("\n> Current value of Lagrange function = ",round(l,12)) if(sub_opt_itr>1): delta_f_absolute = 100* ( response[only_F_id]["func"]/initial_f - 1 ) delta_l_relative = 100*( l/previous_l - 1 ) print("\n> Relative change of Lagrange function = ",round(delta_l_relative,6)," [%]") # We write every major and every suboptimization iteration in design history with open(self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str(sub_opt_itr)+"\t") row.append("\t"+str("%.12f"%(l))+"\t") row.append("\t"+str("%.6f"%(delta_l_relative))+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["func"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.2f"%(self.vm_utils.get_penalty_fac()))+"\t") row.append("\t"+str("%.12f"%(response[only_C_id]["func"]))+"\t") row.append("\t"+str("%.12f"%(self.vm_utils.get_lambda(only_C_id)))+"\t") historyWriter.writerow(row) # Write design in GID format write_itr = float(iterator) self.gid_io.write_results(write_itr, self.opt_model_part, self.config.nodal_results, []) # Check convergence (We ensure that at least 2 subiterations are done) if(sub_opt_itr>3): # Check if maximum iterations were reached if(sub_opt_itr==self.config.max_sub_opt_iterations): print("\n> Maximal iterations of supoptimization problem reached!") break # Check for relative tolerance if(abs(delta_l_relative)<self.config.relative_tolerance_sub_opt): print("\n> Optimization subproblem converged within a relative objective tolerance of ",self.config.relative_tolerance_sub_opt,".") break # Check if value of lagrangian increases if(l>previous_l): print("\n> Value of Lagrange function increased!") break # Update design X = self.get_design() # Store value of Lagrange function for next iteration previous_l = l # Store initial objective value if(opt_itr==1 and sub_opt_itr==1): initial_f = response[only_F_id]["func"] # Take time needed for current suboptimization step as well as for the overall opt so far subopt_end_time = time.time() print("\n> Time needed for current suboptimization step = ",round(subopt_end_time - subopt_start_time,1),"s") print("\n> Time needed for total optimization so far = ",round(subopt_end_time - self.opt_start_time,1),"s") # Check Convergence (More convergence criterion for major optimization iteration to be implemented!) # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Update lagrange multipliers and penalty factor self.vm_utils.udpate_augmented_lagrange_parameters( self.constraints, response ) # Take time needed for current optimization step end_time = time.time() print("\n> Time needed for current optimization step = ",round(end_time - start_time,1),"s") # -------------------------------------------------------------------------- def start_penalized_projection(self): # README!!! # Note that the current implementation assumes that only one scalar objective & constraint is present # Flags to trigger proper function calls constraints_given = True # Get Id of objective only_F_id = None for F_id in self.objectives: only_F_id = F_id break # Get Id of constraint only_C_id = None for C_id in self.constraints: only_C_id = C_id break # Initialize file where design evolution is recorded with open(self.config.design_history_file, 'w') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append("itr") row.append("\tf") row.append("\tdf_absolute[%]") row.append("\tdf_relative[%]") row.append("\tC["+str(only_C_id)+"]: "+str(self.constraints[only_C_id]["type"])+"\t") historyWriter.writerow(row) # Define initial design (initial design corresponds to a zero shape update) # Note that we assume an incremental design variable in vertex morphing X = {} for node in self.opt_model_part.Nodes: X[node.Id] = [0.,0.,0.] # Miscellaneous working variables for data management initial_f = 0.0 previous_f = 0.0 # Start optimization loop for opt_itr in range(1,self.config.max_opt_iterations+1): # Some output print("\n>===============================================") print("> Starting optimization iteration ",opt_itr) print(">===============================================\n") # Start measuring time needed for current optimization step start_time = time.time() # Set controller to evaluate objective and constraint self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_func"] = 1 self.controller.get_controls()[only_C_id]["calc_func"] = 1 # Set controller to evaluate objective and constraint gradient if provided if(self.objectives[only_F_id]["grad"]=="provided"): self.controller.get_controls()[only_F_id]["calc_grad"] = 1 if(self.constraints[only_C_id]["grad"]=="provided"): self.controller.get_controls()[only_C_id]["calc_grad"] = 1 # Initialize response container response = self.controller.create_response_container() # Start analyzer according to specified controls iterator = str(opt_itr) + ".0" self.analyzer( X, self.controller.get_controls(), iterator, response ) # Scale constraint if specified response[only_C_id]["func"] = response[only_C_id]["func"]*self.config.constraint_scaling if(self.config.constraint_scaling!=1.0): for node_Id in response[only_C_id]["grad"]: response[only_C_id]["grad"][node_Id] = response[only_C_id]["grad"][node_Id]*self.config.constraint_scaling # Check if constraint is active for func_id in self.config.constraints: if(self.config.constraints[func_id]["type"] == "eq"): constraints_given = True elif(response[func_id]["func"]>0): constraints_given = True else: constraints_given = False break # Store gradients on the nodes of the model_part self.store_grads_on_nodes( response[only_F_id]["grad"], response[only_C_id]["grad"] ) # Compute unit surface normals at each node of current design self.vm_utils.compute_unit_surface_normals() # Project received gradients on unit surface normal at each node to obtain normal gradients self.vm_utils.project_grad_on_unit_surface_normal( constraints_given ) # Compute filtered gradients (do backward mapping) self.vm_utils.filter_gradients( constraints_given ) # Compute search direction if(constraints_given): self.vm_utils.compute_search_direction_penalized_projection( response[only_C_id]["func"] ) else: self.vm_utils.compute_search_direction_steepest_descent() # Compute design variable update (do one step in the optimization algorithm) self.vm_utils.update_design_variable( self.config.step_size_0 ) # Compute shape update (do forward mapping) self.vm_utils.update_shape() # Compute and output some measures to track changes in the objective function delta_f_absolute = 0.0 delta_f_relative = 0.0 print("\n> Current value of objective function = ",response[only_F_id]["func"]) if(opt_itr>1): delta_f_absolute = 100* ( response[only_F_id]["func"]/initial_f - 1 ) delta_f_relative = 100*( response[only_F_id]["func"]/previous_f - 1 ) print("\n> Absolut change of objective function = ",round(delta_f_absolute,6)," [%]") print("\n> Relative change of objective function = ",round(delta_f_relative,6)," [%]") print("\n> Current value of constraint function = ",round(response[only_C_id]["func"],12)) # Write design history to file with open(self.config.design_history_file, 'a') as csvfile: historyWriter = csv.writer(csvfile, delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL) row = [] row.append(str(opt_itr)+"\t") row.append("\t"+str("%.12f"%(response[only_F_id]["func"]))+"\t") row.append("\t"+str("%.2f"%(delta_f_absolute))+"\t") row.append("\t"+str("%.6f"%(delta_f_relative))+"\t") row.append("\t"+str("%.12f"%(response[only_C_id]["func"]))+"\t") historyWriter.writerow(row) # Write design in GID format self.gid_io.write_results(opt_itr, self.opt_model_part, self.config.nodal_results, []) # Check convergence (Further convergence criterions to be implemented ) if(opt_itr>1): # Check if maximum iterations were reached if(opt_itr==self.config.max_opt_iterations): print("\n> Maximal iterations of optimization problem reached!") break # Update design X = self.get_design() # Store values of for next iteration previous_f = response[only_F_id]["func"] # Store initial objective value if(opt_itr==1): initial_f = response[only_F_id]["func"] # Take time needed for current optimization step end_time = time.time() print("\n> Time needed for current optimization step = ",round(end_time - start_time,1),"s") print("\n> Time needed for total optimization so far = ",round(end_time - self.opt_start_time,1),"s") # -------------------------------------------------------------------------- def store_grads_on_nodes(self,objective_grads,constraint_grads={}): # Read objective gradients eucledian_norm_obj_sens = 0.0 for node_Id in objective_grads: # If deactivated, nodal sensitivities will not be assigned and hence remain zero if(self.opt_model_part.Nodes[node_Id].GetSolutionStepValue(SENSITIVITIES_DEACTIVATED)): continue # If not deactivated, nodal sensitivities will be assigned sens_i = Vector(3) sens_i[0] = objective_grads[node_Id][0] sens_i[1] = objective_grads[node_Id][1] sens_i[2] = objective_grads[node_Id][2] self.opt_model_part.Nodes[node_Id].SetSolutionStepValue(OBJECTIVE_SENSITIVITY,0,sens_i) # When constraint_grads is defined also store constraint sensitivities (bool returns false if dictionary is empty) if(bool(constraint_grads)): eucledian_norm_cons_sens = 0.0 for node_Id in constraint_grads: # If deactivated, nodal sensitivities will not be assigned and hence remain zero if(self.opt_model_part.Nodes[node_Id].GetSolutionStepValue(SENSITIVITIES_DEACTIVATED)): continue # If not deactivated, nodal sensitivities will be assigned sens_i = Vector(3) sens_i[0] = constraint_grads[node_Id][0] sens_i[1] = constraint_grads[node_Id][1] sens_i[2] = constraint_grads[node_Id][2] self.opt_model_part.Nodes[node_Id].SetSolutionStepValue(CONSTRAINT_SENSITIVITY,0,sens_i) # -------------------------------------------------------------------------- def get_design(self): # Read and return the current design in the corresponding mode X = {} if(self.config.design_output_mode=="relative"): for node in self.opt_model_part.Nodes: X[node.Id] = node.GetSolutionStepValue(SHAPE_UPDATE) elif(self.config.design_output_mode=="total"): for node in self.opt_model_part.Nodes: X[node.Id] = node.GetSolutionStepValue(SHAPE_CHANGE_ABSOLUTE) elif(self.config.design_output_mode=="absolute"): for node in self.opt_model_part.Nodes: X[node.Id] = [node.X,node.Y,node.Z] else: sys.exit("Wrong definition of design_output_mode!") return X
node.SetSolutionStepValue(VELOCITY_X, 0, -ylocal) node.SetSolutionStepValue(VELOCITY_Y, 0, xlocal) d = math.sqrt((xlocal - xc)**2 + (ylocal - yc)**2) - 0.1 node.SetSolutionStepValue(TEMPERATURE, 0, d) #if(d <= 0.0): #node.SetSolutionStepValue(TEMPERATURE,0,-1.0) print("base_model_part =", base_model_part) # initialize GiD I/O from gid_output import GiDOutput gid_io_base = GiDOutput("base", ProjectParameters.VolumeOutput, ProjectParameters.GiDPostMode, ProjectParameters.GiDMultiFileFlag, ProjectParameters.GiDWriteMeshFlag, ProjectParameters.GiDWriteConditionsFlag) #printing the mesh of the base gid_io_base.initialize_results(base_model_part) #mount the search structure locator = BinBasedFastPointLocator2D(base_model_part) locator.UpdateSearchDatabase() #locator.UpdateSearchDatabaseAssignedSize(0.01) #construct the utility to move the points bfecc_utility = BFECCConvection2D(locator) base_model_part.CloneTimeStep(0.00)
def __init__(self, opt_model_part, config, analyzer): # Set Topology Optimization configurations self.config = config # For GID output self.gid_io = GiDOutput(config.GiD_output_file_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("\n::[Initializing Topology Optimization Application]::") print(" The following objectives are defined:") for func_id in config.objectives: print(" ", func_id, ":", config.objectives[func_id], "\n") print(" The following constraints are defined:") for func_id in config.constraints: print(" ", func_id, ":", config.constraints[func_id]) # Create controller object self.controller = Controller(config) # Model parameters self.opt_model_part = opt_model_part # Initialize element variables for element_i in opt_model_part.Elements: element_i.SetValue(E_MIN, config.E_min) element_i.SetValue(PENAL, config.penalty) element_i.SetValue(X_PHYS, config.initial_volume_fraction) element_i.SetValue(X_PHYS_OLD, config.initial_volume_fraction) element_i.SetValue( E_0, opt_model_part.Properties[config.simp_property].GetValue( YOUNG_MODULUS)) element_i.SetValue( YOUNG_MODULUS, opt_model_part.Properties[ config.simp_property].GetValue(YOUNG_MODULUS)) # Only happens if continuation strategy is activated (Initialization of penalty factor) if (self.config.continuation_strategy == 1): for element_i in self.opt_model_part.Elements: element_i.SetValue(PENAL, 1) # Add toolbox for topology filtering utilities self.filter_utils = TopologyFilteringUtilities( opt_model_part, self.config.filter_radius, self.config.max_elements_in_filter_radius) # Add toolbox for topology updating utilities self.design_update_utils = TopologyUpdatingUtilities(opt_model_part) # Add toolbox for I/O self.io_utils = IOUtilities()
class SIMPMethod: # -------------------------------------------------------------------------- def __init__(self, opt_model_part, config, analyzer): # Set Topology Optimization configurations self.config = config # For GID output self.gid_io = GiDOutput(config.GiD_output_file_name, config.VolumeOutput, config.GiDPostMode, config.GiDMultiFileFlag, config.GiDWriteMeshFlag, config.GiDWriteConditionsFlag) # Set analyzer self.analyzer = analyzer # Set response functions self.objectives = config.objectives self.constraints = config.constraints print("\n::[Initializing Topology Optimization Application]::") print(" The following objectives are defined:") for func_id in config.objectives: print(" ", func_id, ":", config.objectives[func_id], "\n") print(" The following constraints are defined:") for func_id in config.constraints: print(" ", func_id, ":", config.constraints[func_id]) # Create controller object self.controller = Controller(config) # Model parameters self.opt_model_part = opt_model_part # Initialize element variables for element_i in opt_model_part.Elements: element_i.SetValue(E_MIN, config.E_min) element_i.SetValue(PENAL, config.penalty) element_i.SetValue(X_PHYS, config.initial_volume_fraction) element_i.SetValue(X_PHYS_OLD, config.initial_volume_fraction) element_i.SetValue( E_0, opt_model_part.Properties[config.simp_property].GetValue( YOUNG_MODULUS)) element_i.SetValue( YOUNG_MODULUS, opt_model_part.Properties[ config.simp_property].GetValue(YOUNG_MODULUS)) # Only happens if continuation strategy is activated (Initialization of penalty factor) if (self.config.continuation_strategy == 1): for element_i in self.opt_model_part.Elements: element_i.SetValue(PENAL, 1) # Add toolbox for topology filtering utilities self.filter_utils = TopologyFilteringUtilities( opt_model_part, self.config.filter_radius, self.config.max_elements_in_filter_radius) # Add toolbox for topology updating utilities self.design_update_utils = TopologyUpdatingUtilities(opt_model_part) # Add toolbox for I/O self.io_utils = IOUtilities() # -------------------------------------------------------------------------- def optimize(self): print( "\n> ==============================================================================================================" ) print("> Starting optimization using the following algorithm: ", self.config.optimization_algorithm) print( "> ==============================================================================================================" ) # Start timer and assign to object such that total time of opt may be measured at each step self.opt_start_time = time.time() # Initialize the design output in GiD format and print initial 0 state self.gid_io.initialize_results(self.opt_model_part) self.gid_io.write_results(0, self.opt_model_part, self.config.nodal_results, self.config.gauss_points_results) # Call for the specified optimization algorithm if (self.config.optimization_algorithm == "oc_algorithm"): self.start_oc_algorithm() else: raise TypeError( "Specified optimization_algorithm not implemented!") # Finalize the design output in GiD format self.gid_io.finalize_results() # Stop timer opt_end_time = time.time() print( "\n> ==============================================================================================================" ) print("> Finished optimization in ", round(opt_end_time - self.opt_start_time, 1), " s!") print( "> ==============================================================================================================" ) # -------------------------------------------------------------------------- def start_oc_algorithm(self): # Get Id of objective & constraint only_F_id = None only_C_id = None for F_id in self.objectives: only_F_id = F_id break for C_id in self.constraints: only_C_id = C_id break # Initialize variables for comparison purposes in Topology Optimization Tool pmax = self.config.penalty # Maximum penalty value used for continuation strategy Obj_Function = None Obj_Function_old = None Obj_Function_initial = None Obj_Function_relative_change = None Obj_Function_absolute_change = None # Print the Topology Optimization Settings that will be used in the program print("\n::[Topology Optimization Settings]::") print(" E_min: ", self.config.E_min) print(" Filter radius: ", self.config.filter_radius) print(" Penalty factor: ", self.config.penalty) print(" Rel. Tolerance: ", self.config.relative_tolerance) print(" Volume Fraction:", self.config.initial_volume_fraction) print(" Max. number of iterations:", self.config.max_opt_iterations) if (self.config.restart_write_frequency < self.config.max_opt_iterations): if (self.config.restart_write_frequency == 1): print(" Make a restart file every iteration") elif (self.config.restart_write_frequency > 1): print(" Make a restart file every", self.config.restart_write_frequency, "iterations") else: print(" No restart file will be done during the simulation") else: print(" No restart file will be done during the simulation") # Start optimization loop for opt_itr in range(1, self.config.max_opt_iterations + 1): # Some output print( "\n> ==============================================================================================" ) print("> Starting optimization iteration ", opt_itr) print( "> ==============================================================================================\n" ) # Start measuring time needed for current optimization step start_time = time.time() # Initialize response container response = self.controller.create_response_container() # Set controller to evaluate objective & constraint self.controller.initialize_controls() self.controller.get_controls()[only_F_id]["calc_func"] = 1 self.controller.get_controls()[only_C_id]["calc_func"] = 1 # Set to evaluate objective & constraint gradient if provided if (self.objectives[only_F_id]["grad"] == "provided"): self.controller.get_controls()[only_F_id]["calc_grad"] = 1 if (self.constraints[only_C_id]["grad"] == "provided"): self.controller.get_controls()[only_C_id]["calc_grad"] = 1 # RUN FEM: Call analyzer with current X to compute response (global_strain_energy, dcdx) self.analyzer(self.controller.get_controls(), response, opt_itr) # Filter sensitivities print("\n::[Filter Sensitivities]::") self.filter_utils.ApplyFilter(self.config.filter_type, self.config.filter_kernel) # Update design variables ( densities ) --> new X by: print("\n::[Update Densities]::") self.design_update_utils.UpdateDensitiesUsingOCMethod( self.config.optimization_algorithm, self.config.initial_volume_fraction, self.config.grey_scale_filter, opt_itr, self.config.q_max) # Print of results print("\n::[RESULTS]::") Obj_Function = response[only_F_id]["func"] C_Function = response[only_C_id]["func"] print(" Obj. function value = ", math.ceil(Obj_Function * 1000000) / 1000000) print(" Const. function value = ", math.ceil(C_Function * 1000000) / 1000000) if opt_itr == 1: Obj_Function_initial = Obj_Function if opt_itr > 1: Obj_Function_relative_change = ( Obj_Function - Obj_Function_old) / Obj_Function_initial print( " Relative Obj. Function change =", math.ceil( (Obj_Function_relative_change * 100) * 10000) / 10000, "%") Obj_Function_absolute_change = ( Obj_Function - Obj_Function_initial) / Obj_Function_initial print( " Absolute Obj. Function change =", math.ceil( (Obj_Function_absolute_change * 100) * 10000) / 10000, "%") Obj_Function_old = Obj_Function # Write design in GiD format self.gid_io.write_results(opt_itr, self.opt_model_part, self.config.nodal_results, self.config.gauss_points_results) # Continuation Strategy if (self.config.continuation_strategy == 1): print( " Continuation Strategy for current iteration was ACTIVE") if opt_itr < 20: for element_i in self.opt_model_part.Elements: element_i.SetValue(PENAL, 1) else: for element_i in self.opt_model_part.Elements: element_i.SetValue( PENAL, min(pmax, 1.02 * element_i.GetValue(PENAL))) else: print( " Continuation Strategy for current iteration was UNACTIVE" ) # Write restart file every selected number of iterations restart_filename = self.config.restart_output_file.replace( ".mdpa", "_" + str(opt_itr) + ".mdpa") if (self.config.restart_write_frequency > 0): if (opt_itr % self.config.restart_write_frequency == False): print("\n::[Restart File]::") print(" Saving file at iteration", opt_itr) self.io_utils.SaveOptimizationResults( self.config.restart_input_file, self.opt_model_part, restart_filename) # Check convergence if opt_itr > 1: # Check if maximum iterations were reached if (opt_itr == self.config.max_opt_iterations): end_time = time.time() print("\n Time needed for current optimization step = ", round(end_time - start_time, 1), "s") print(" Time needed for total optimization so far = ", round(end_time - self.opt_start_time, 1), "s") print( "\n Maximal iterations of optimization problem reached!" ) self.io_utils.SaveOptimizationResults( self.config.restart_input_file, self.opt_model_part, restart_filename) break # Check for relative tolerance if (abs(Obj_Function_relative_change) < self.config.relative_tolerance): end_time = time.time() print("\n Time needed for current optimization step = ", round(end_time - start_time, 1), "s") print(" Time needed for total optimization so far = ", round(end_time - self.opt_start_time, 1), "s") print( "\n Optimization problem converged within a relative objective tolerance of", self.config.relative_tolerance) self.io_utils.SaveOptimizationResults( self.config.restart_input_file, self.opt_model_part, restart_filename) break # Set X_PHYS_OLD to X_PHYS to update the value for the next simulation's "change percentage" for element_i in self.opt_model_part.Elements: element_i.SetValue(X_PHYS_OLD, element_i.GetValue(X_PHYS)) # Take time needed for current optimization step end_time = time.time() print("\n Time needed for current optimization step = ", round(end_time - start_time, 1), "s") print(" Time needed for total optimization so far = ", round(end_time - self.opt_start_time, 1), "s")
# get the nodes of the wall for SA. nodes = fluid_model_part.GetNodes(i) for node in nodes: fluid_solver.wall_nodes.append(node) node.SetSolutionStepValue(TURBULENT_VISCOSITY, 0, 0.0) node.Fix(TURBULENT_VISCOSITY) fluid_solver.Initialize() print("fluid solver created") sys.stdout.flush() # initialize GiD I/O from gid_output import GiDOutput gid_io = GiDOutput(input_file_name, ProjectParameters.VolumeOutput, ProjectParameters.GiDPostMode, ProjectParameters.GiDMultiFileFlag, ProjectParameters.GiDWriteMeshFlag, ProjectParameters.GiDWriteConditionsFlag) if not ProjectParameters.VolumeOutput: cut_list = define_output.DefineCutPlanes() gid_io.define_cuts(fluid_model_part, cut_list) # gid_io.initialize_results(fluid_model_part) # MOD. #_____________________________________________________________________________________________________________________________________ # # F L U I D B L O C K E N D S #_____________________________________________________________________________________________________________________________________ import swimming_DEM_gid_output
print("done ", i) for process in list_of_processes: print("a") process.ExecuteInitialize() print("b") #TODO: think if there is a better way to do this fluid_model_part = solver.GetComputeModelPart() # initialize GiD I/O from gid_output import GiDOutput output_settings = ProjectParameters["output_configuration"] gid_io = GiDOutput(output_settings["output_filename"].GetString(), output_settings["volume_output"].GetBool(), output_settings["gid_post_mode"].GetString(), output_settings["gid_multi_file_flag"].GetString(), output_settings["gid_write_mesh_flag"].GetBool(), output_settings["gid_write_conditions_flag"].GetBool()) output_time = output_settings["output_time"].GetDouble() gid_io.initialize_results(fluid_model_part) for process in list_of_processes: process.ExecuteBeforeSolutionLoop() ## Stepping and time settings Dt = ProjectParameters["problem_data"]["time_step"].GetDouble() end_time = ProjectParameters["problem_data"]["end_time"].GetDouble() time = 0.0 step = 0
node.SetSolutionStepValue(VELOCITY_X,0,-ylocal) node.SetSolutionStepValue(VELOCITY_Y,0,xlocal) d = math.sqrt( (xlocal - xc)**2 + (ylocal-yc)**2 ) - 0.1 node.SetSolutionStepValue(TEMPERATURE,0,d) #if(d <= 0.0): #node.SetSolutionStepValue(TEMPERATURE,0,-1.0) print("base_model_part =",base_model_part) # initialize GiD I/O from gid_output import GiDOutput gid_io_base = GiDOutput("base", ProjectParameters.VolumeOutput, ProjectParameters.GiDPostMode, ProjectParameters.GiDMultiFileFlag, ProjectParameters.GiDWriteMeshFlag, ProjectParameters.GiDWriteConditionsFlag) #printing the mesh of the base gid_io_base.initialize_results(base_model_part) #mount the search structure locator = BinBasedFastPointLocator2D(base_model_part) locator.UpdateSearchDatabase() #locator.UpdateSearchDatabaseAssignedSize(0.01) #construct the utility to move the points bfecc_utility = BFECCConvection2D(locator)