def call_SU2_CFD(tag, parallel=False, processors=1): """This calls SU2 to perform an analysis according to the related .cfg file. Assumptions: None Source: N/A Inputs: tag <string> This determines what .cfg is used and what the output file is called. parallel (optional) <boolean> This determines if SU2 will be run in parallel. This setting requires that SU2 has been built to allow this. processors (optional) [-] The number of processors used for a parallel computation. Outputs: <tag>._forces_breakdown.dat This file has standard SU2 run information. CL [-] CD [-] Properties Used: N/A """ if parallel == True: sys.path.append(os.environ['SU2_HOME']) from parallel_computation import parallel_computation parallel_computation(tag + '.cfg', processors) pass else: subprocess.call(['SU2_CFD', tag + '.cfg']) f = open(tag + '_forces_breakdown.dat') SU2_results = Data() # only the total forces have the ":" for line in f: if line.startswith('Total CL:'): print 'CL:', line.split()[2] SU2_results.coefficient_of_lift = float(line.split()[2]) elif line.startswith('Total CD:'): print 'CD:', line.split()[2] SU2_results.coefficient_of_drag = float(line.split()[2]) elif line.startswith('Total CMx:'): print 'CMx:', line.split()[2] SU2_results.moment_coefficient_x = float(line.split()[2]) elif line.startswith('Total CMy:'): print 'CMy:', line.split()[2] SU2_results.moment_coefficient_y = float(line.split()[2]) elif line.startswith('Total CMz:'): print 'CMz:', line.split()[2] SU2_results.moment_coefficient_z = float(line.split()[2]) CL = SU2_results.coefficient_of_lift CD = SU2_results.coefficient_of_drag return CL, CD
def call_SU2_CFD(tag, parallel=False, processors=1): if parallel == True: sys.path.append(os.environ['SU2_HOME']) from parallel_computation import parallel_computation parallel_computation(tag + '.cfg', processors) pass else: subprocess.call(['SU2_CFD', tag + '.cfg']) f = open(tag + '_forces_breakdown.dat') SU2_results = Data() # only the total forces have the ":" for line in f: if line.startswith('Total CL:'): print 'CL:', line.split()[2] SU2_results.coefficient_of_lift = float(line.split()[2]) elif line.startswith('Total CD:'): print 'CD:', line.split()[2] SU2_results.coefficient_of_drag = float(line.split()[2]) elif line.startswith('Total CMx:'): print 'CMx:', line.split()[2] SU2_results.moment_coefficient_x = float(line.split()[2]) elif line.startswith('Total CMy:'): print 'CMy:', line.split()[2] SU2_results.moment_coefficient_y = float(line.split()[2]) elif line.startswith('Total CMz:'): print 'CMz:', line.split()[2] SU2_results.moment_coefficient_z = float(line.split()[2]) CL = SU2_results.coefficient_of_lift CD = SU2_results.coefficient_of_drag return CL, CD
def call_SU2_CFD(tag, parallel=False, processors=1): """This calls SU2 to perform an analysis according to the related .cfg file. Assumptions: None Source: N/A Inputs: tag <string> This determines what .cfg is used and what the output file is called. parallel (optional) <boolean> This determines if SU2 will be run in parallel. This setting requires that SU2 has been built to allow this. processors (optional) [-] The number of processors used for a parallel computation. Outputs: <tag>_history.dat This file has the SU2 convergence history. CL [-] CD [-] Properties Used: N/A """ if parallel == True: sys.path.append(os.environ['SU2_HOME']) from parallel_computation import parallel_computation parallel_computation(tag + '.cfg', processors) pass else: subprocess.call(['SU2_CFD', tag + '.cfg']) f = open(tag + '_history.dat') SU2_results = Data() lines = f.readlines() final_state = lines[-1].split(',') # Lift and Drag CL = float(final_state[1]) CD = float(final_state[2]) SU2_results.coefficient_of_lift = CL SU2_results.coefficient_of_drag = CD print 'CL:', CL print 'CD:', CD # Moments # Moments are currently not recorded since no # reasonable reference length has been chosen #CMx = float(final_state[4]) #CMy = float(final_state[5]) #CMz = float(final_state[6]) #SU2_results.moment_coefficient_x = CMx #SU2_results.moment_coefficient_y = CMy #SU2_results.moment_coefficient_z = CMz #print 'CMx:',CMx #print 'CMy:',CMy #print 'CMz:',CMz return CL, CD
def runCFD(self, config_file, bParallel=True): if bParallel: pc.parallel_computation(config_file, 8) else: subprocess.call(['SU2_CFD', config_file])
def mesh_adaptation(filename, partitions=0, cycles=1, overwrite=False, save_all=False): # General and default parameters Config_INP_filename = filename Config_CFD_filename = "config_CFD_" + Config_INP_filename Config_MAC_filename = "config_MAC_" + Config_INP_filename #Mesh_MAC_filename = "mesh_MAC_" + filename.replace(".cfg",".su2") finest_mesh_filename = "mesh_finest.su2" finest_flow_filename = "restart_flow_finest.dat" finest_lin_filename = "restart_lin_finest.dat" finest_adj_filename = "restart_adj_finest.dat" # assumes serial with partitions = 1 if partitions == 1: partitions = 0 # Get parameters params_get = libSU2.Get_ConfigParams(Config_INP_filename) kind_adapt = params_get['KIND_ADAPT'] objfunc = params_get['ADJ_OBJFUNC'] restart_flow_file = params_get['RESTART_FLOW_FILENAME'] restart_adj_file = params_get['RESTART_ADJ_FILENAME'] original_mesh_file = params_get['MESH_FILENAME'] #output_mesh_file = params_get['MESH_OUT_FILENAME'] Mesh_MAC_filename = params_get['MESH_OUT_FILENAME'] cadj_prefix = libSU2.get_AdjointPrefix(objfunc) # Get solution file names volume_flow_file = params_get['VOLUME_FLOW_FILENAME'] volume_adj_file = params_get['VOLUME_ADJ_FILENAME'] surface_flow_file = params_get['SURFACE_FLOW_FILENAME'] surface_adj_file = params_get['SURFACE_ADJ_FILENAME'] history_file = params_get['CONV_FILENAME'] # Get mesh filenames and filetypes mesh_filetype = params_get['MESH_FORMAT'] if mesh_filetype == "CGNS": error_str = "Currently cannot support mesh adaptation with CGNS grid files. Please convert your CGNS mesh to SU2 format using the CGNS_TO_SU2 flag in the configuration file, re-specify the mesh file to the native .su2 file and set the MESH_FORMAT flag to SU2." print "\n*****\n" + error_str + "\n*****\n" return 1 elif mesh_filetype == "NETCDF_ASCII": error_str = "Currently cannot support mesh adaptation with NETCDF_ASCII grid files. Please convert your mesh to SU2 format, re-specify the mesh file to the native .su2 file and set the MESH_FORMAT flag to SU2." print "\n*****\n" + error_str + "\n*****\n" return 1 # Get output solution filetypes output_filetype = params_get['OUTPUT_FORMAT'] if output_filetype == "TECPLOT": vol_file_ext = ".plt" elif output_filetype == "PARAVIEW": vol_file_ext = ".vtk" if ((kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE_ROBUST")): restart_lin_file = params_get['RESTART_LIN_FILENAME'] # Loop over number of adaptation cycles for iAdaptCycle in range(cycles): # Copy original input file to working files shutil.copy(Config_INP_filename, Config_MAC_filename) shutil.copy(Config_INP_filename, Config_CFD_filename) # Run direct flow simulation # For iAdaptCycle == 0, store restart file, objective function and original mesh file params_set = {'MATH_PROBLEM': 'DIRECT'} if iAdaptCycle > 0: params_set.update({ 'RESTART_SOL': 'YES', 'ADJ_OBJFUNC': objfunc, 'RESTART_FLOW_FILENAME': restart_flow_file, 'RESTART_ADJ_FILENAME': restart_adj_file, 'SOLUTION_FLOW_FILENAME': restart_flow_file, 'MESH_FILENAME': Mesh_MAC_filename }) if ((kind_adapt == "ROBUST") or kind_adapt == ("COMPUTABLE_ROBUST")): params_set.update({'RESTART_LIN_FILENAME': restart_lin_file}) # Load the new config file options and run the direct problem libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Copy flow solution & history file if save_all: print "Saving cycle " + str( iAdaptCycle) + " flow solution and history files..." print "Saving " + volume_flow_file + "_cycle" + str( iAdaptCycle) + vol_file_ext print "Saving " + surface_flow_file + "_cycle" + str( iAdaptCycle) + vol_file_ext print "Saving " + history_file + "_flow_cycle" + str( iAdaptCycle) + vol_file_ext shutil.move( volume_flow_file + vol_file_ext, volume_flow_file + "_cycle" + str(iAdaptCycle) + vol_file_ext) shutil.move( surface_flow_file + vol_file_ext, surface_flow_file + "_cycle" + str(iAdaptCycle) + vol_file_ext) shutil.move( surface_flow_file + ".csv", surface_flow_file + "_cycle" + str(iAdaptCycle) + ".csv") shutil.move( history_file + vol_file_ext, history_file + "_flow_cycle" + str(iAdaptCycle) + vol_file_ext) # If needed, run the adjoint simulation # For the first adaption cycle, use the filenames of the orignal .cfg file if (kind_adapt == "GRAD_ADJOINT" or kind_adapt == "GRAD_FLOW_ADJ" or kind_adapt == "ROBUST" or kind_adapt == "COMPUTABLE_ROBUST" or kind_adapt == "COMPUTABLE" or kind_adapt == "REMAINING"): params_set = { 'MATH_PROBLEM': 'ADJOINT', 'SOLUTION_FLOW_FILENAME': restart_flow_file } if iAdaptCycle > 0: params_set.update({ 'RESTART_SOL': 'YES', 'SOLUTION_ADJ_FILENAME': restart_adj_file, 'MESH_FILENAME': Mesh_MAC_filename }) # Load the new config file options and run the adjoint problem libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Copy adjoint solution & history file if save_all: print "Saving cycle " + str( iAdaptCycle) + " adjoint solution and history files..." print "Saving " + volume_adj_file + "_cycle" + str( iAdaptCycle) + vol_file_ext print "Saving " + surface_adj_file + "_adj_cycle" + str( iAdaptCycle) + vol_file_ext print "Saving " + history_file + "_flow_cycle" + str( iAdaptCycle) + vol_file_ext shutil.move( volume_adj_file + vol_file_ext, volume_adj_file + "_cycle" + str(iAdaptCycle) + vol_file_ext) shutil.move( surface_adj_file + vol_file_ext, surface_adj_file + "_cycle" + str(iAdaptCycle) + vol_file_ext) shutil.move( surface_adj_file + ".csv", surface_adj_file + "_cycle" + str(iAdaptCycle) + ".csv") shutil.move( history_file + vol_file_ext, history_file + "_adj_cycle" + str(iAdaptCycle) + vol_file_ext) # If needed, change the parameters to run the first linear simulation # For the first adaptation cycle, use the filenames from the original .cfg file if kind_adapt == "COMPUTABLE_ROBUST": params_set = { 'MATH_PROBLEM': 'LINEARIZED', 'SOLUTION_FLOW_FILENAME': restart_flow_file } if iAdaptCycle > 0: params_set.update({ 'RESTART_SOL': 'YES', 'RESTART_LIN_FILENAME': restart_lin_file, 'MESH_FILENAME': Mesh_MAC_filename }) # Load the new config file options and run the linearized problem libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Change the parameters to do a direct and adjoint iteration over a fine grid if ((kind_adapt == "ROBUST" or kind_adapt == "COMPUTABLE" or kind_adapt == "COMPUTABLE_ROBUST" or kind_adapt == "REMAINING") and (iAdaptCycle < cycles - 1 or cycles == 1)): # Create the fine grid and interpolate the flow solution from coarse to refined grid params_set = { 'KIND_ADAPT': "FULL_FLOW", 'SOLUTION_FLOW_FILENAME': restart_flow_file, 'RESTART_FLOW_FILENAME': finest_flow_filename, 'MESH_FILENAME': original_mesh_file, 'MESH_OUT_FILENAME': finest_mesh_filename } if iAdaptCycle > 0: params_set.update({'MESH_FILENAME': Mesh_MAC_filename}) # Run the mesh adaptation module libSU2.Set_ConfigParams(Config_MAC_filename, params_set) libSU2_run.SU2_MAC(Config_MAC_filename, partitions) # Create the fine grid and interpolate the adjoint solution from coarse to refined grid params_set = { 'KIND_ADAPT': "FULL_ADJOINT", 'SOLUTION_FLOW_FILENAME': restart_flow_file, 'SOLUTION_ADJ_FILENAME': restart_adj_file, 'RESTART_FLOW_FILENAME': finest_flow_filename, 'RESTART_ADJ_FILENAME': finest_adj_filename, 'MESH_FILENAME': original_mesh_file, 'MESH_OUT_FILENAME': finest_mesh_filename } if iAdaptCycle > 0: params_set.update({'MESH_FILENAME': Mesh_MAC_filename}) # Run the mesh adaptation module libSU2.Set_ConfigParams(Config_MAC_filename, params_set) libSU2_run.SU2_MAC(Config_MAC_filename, partitions) # Create the fine grid and interpolate the linear solution from coarse to refined grid if kind_adapt == "COMPUTABLE_ROBUST": params_set = { 'KIND_ADAPT': "FULL_LINEAR", 'SOLUTION_FLOW_FILENAME': restart_flow_file, 'SOLUTION_LIN_FILENAME': restart_lin_file, 'RESTART_FLOW_FILENAME': finest_flow_filename, 'RESTART_ADJ_FILENAME': finest_lin_filename, 'MESH_FILENAME': original_mesh_file, 'MESH_OUT_FILENAME': finest_mesh_filename } if iAdaptCycle > 0: params_set.update({'MESH_FILENAME': Mesh_MAC_filename}) # Run the mesh adaptation module libSU2.Set_ConfigParams(Config_MAC_filename, params_set) libSU2_run.SU2_MAC(Config_MAC_filename, partitions) # Change the parameters to do one iteration of the flow solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration params_set = { 'MATH_PROBLEM': 'DIRECT', 'EXT_ITER': 2, 'RESTART_SOL': 'YES', 'SOLUTION_FLOW_FILENAME': finest_flow_filename, 'STORE_RESIDUAL': 'YES', 'RESTART_FLOW_FILENAME': finest_flow_filename, 'MESH_FILENAME': finest_mesh_filename, 'FULLMG': 'NO', 'MGLEVEL': 0, 'MGCYCLE': 0, 'MG_PRE_SMOOTH': '( 0 )', 'MG_POST_SMOOTH': '( 0 )', 'MG_CORRECTION_SMOOTH': '( 0 )' } libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Change the parameters to do one iteration of the adjoint solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration params_set = { 'MATH_PROBLEM': 'ADJOINT', 'EXT_ITER': 2, 'RESTART_SOL': 'YES', 'SOLUTION_FLOW_FILENAME': finest_flow_filename, 'SOLUTION_ADJ_FILENAME': finest_adj_filename, 'MESH_FILENAME': finest_mesh_filename, 'FULLMG': 'NO', 'MGLEVEL': 0, 'MGCYCLE': 0, 'MG_PRE_SMOOTH': '( 0 )', 'MG_POST_SMOOTH': '( 0 )', 'MG_CORRECTION_SMOOTH': '( 0 )' } libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Change the parameters to do one iteration of the linear solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration if kind_adapt == "COMPUTABLE_ROBUST": params_set = { 'MATH_PROBLEM': 'LINEARIZED', 'EXT_ITER': 2, 'RESTART_SOL': 'YES', 'SOLUTION_FLOW_FILENAME': finest_flow_filename, 'SOLUTION_LIN_FILENAME': finest_lin_filename, 'RESTART_LIN_FILENAME': finest_lin_filename, 'MESH_FILENAME': finest_mesh_filename, 'FULLMG': 'NO', 'MGLEVEL': 0, 'MGCYCLE': 0, 'MG_PRE_SMOOTH': '( 0 )', 'MG_POST_SMOOTH': '( 0 )', 'MG_CORRECTION_SMOOTH': '( 0 )' } libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation(Config_CFD_filename, partitions) else: libSU2_run.SU2_CFD(Config_CFD_filename, partitions) # Perform adaptation using above solution files if ((kind_adapt == "GRAD_FLOW") or (kind_adapt == "GRAD_ADJOINT") or (kind_adapt == "GRAD_FLOW_ADJ")): params_set = { 'SOLUTION_FLOW_FILENAME': restart_flow_file, 'SOLUTION_ADJ_FILENAME': restart_adj_file } elif ((kind_adapt == "COMPUTABLE") or (kind_adapt == "REMAINING")): params_set = { 'SOLUTION_FLOW_FILENAME': finest_flow_filename, 'SOLUTION_ADJ_FILENAME': finest_adj_filename, 'RESTART_FLOW_FILENAME': restart_flow_file, 'RESTART_ADJ_FILENAME': restart_adj_file } elif ((kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE_ROBUST")): params_set = { 'SOLUTION_FLOW_FILENAME': finest_flow_filename, 'SOLUTION_ADJ_FILENAME': finest_adj_filename, 'SOLUTION_LIN_FILENAME': finest_lin_filename, 'RESTART_FLOW_FILENAME': restart_flow_file, 'RESTART_ADJ_FILENAME': restart_adj_file, 'RESTART_LIN_FILENAME': restart_lin_file } params_set.update({ 'KIND_ADAPT': kind_adapt, 'MESH_OUT_FILENAME': Mesh_MAC_filename }) if iAdaptCycle > 0: params_set.update({'MESH_FILENAME': Mesh_MAC_filename}) # Run the mesh adaptation module libSU2.Set_ConfigParams(Config_MAC_filename, params_set) libSU2_run.SU2_MAC(Config_MAC_filename, partitions) # Copy cycle mesh file if save_all: print "Saving cycle " + str(iAdaptCycle) + " mesh file..." shutil.copy( Mesh_MAC_filename, Mesh_MAC_filename.replace(".su2", "_cycle" + str(iAdaptCycle) + ".su2")) # Clean up if overwrite: os.rename(Mesh_MAC_filename, original_mesh_file) if ((kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE") or (kind_adapt == "COMPUTABLE_ROBUST") or (kind_adapt == "REMAINING")): os.remove(finest_mesh_filename) os.remove(finest_flow_filename) if kind_adapt == "COMPUTABLE_ROBUST": os.remove(finest_lin_filename) if save_all: os.remove(Mesh_MAC_filename) os.remove(Config_MAC_filename) os.remove(Config_CFD_filename)
def call_SU2_CFD(tag,parallel=False,processors=1): """This calls SU2 to perform an analysis according to the related .cfg file. Assumptions: None Source: N/A Inputs: tag <string> This determines what .cfg is used and what the output file is called. parallel (optional) <boolean> This determines if SU2 will be run in parallel. This setting requires that SU2 has been built to allow this. processors (optional) [-] The number of processors used for a parallel computation. Outputs: <tag>_history.dat This file has the SU2 convergence history. CL [-] CD [-] Properties Used: N/A """ if parallel==True: sys.path.append(os.environ['SU2_HOME']) from parallel_computation import parallel_computation parallel_computation( tag+'.cfg', processors ) pass else: subprocess.call(['SU2_CFD',tag+'.cfg']) f = open(tag + '_history.dat') SU2_results = Data() lines = f.readlines() final_state = lines[-1].split(',') # Lift and Drag CL = float(final_state[1]) CD = float(final_state[2]) SU2_results.coefficient_of_lift = CL SU2_results.coefficient_of_drag = CD print('CL:',CL) print('CD:',CD) # Moments # Moments are currently not recorded since no # reasonable reference length has been chosen #CMx = float(final_state[4]) #CMy = float(final_state[5]) #CMz = float(final_state[6]) #SU2_results.moment_coefficient_x = CMx #SU2_results.moment_coefficient_y = CMy #SU2_results.moment_coefficient_z = CMz #print 'CMx:',CMx #print 'CMy:',CMy #print 'CMz:',CMz return CL,CD
def mesh_adaptation( filename , partitions = 0 , cycles = 1 , overwrite = False , save_all = False ): # General and default parameters Config_INP_filename = filename Config_CFD_filename = "config_CFD_" + Config_INP_filename Config_MAC_filename = "config_MAC_" + Config_INP_filename #Mesh_MAC_filename = "mesh_MAC_" + filename.replace(".cfg",".su2") finest_mesh_filename = "mesh_finest.su2" finest_flow_filename = "restart_flow_finest.dat" finest_lin_filename = "restart_lin_finest.dat" finest_adj_filename = "restart_adj_finest.dat" # assumes serial with partitions = 1 if partitions == 1: partitions = 0 # Get parameters params_get = libSU2.Get_ConfigParams( Config_INP_filename ) kind_adapt = params_get['KIND_ADAPT'] objfunc = params_get['ADJ_OBJFUNC'] restart_flow_file = params_get['RESTART_FLOW_FILENAME'] restart_adj_file = params_get['RESTART_ADJ_FILENAME'] original_mesh_file = params_get['MESH_FILENAME'] #output_mesh_file = params_get['MESH_OUT_FILENAME'] Mesh_MAC_filename = params_get['MESH_OUT_FILENAME'] cadj_prefix = libSU2.get_AdjointPrefix(objfunc) # Get solution file names volume_flow_file = params_get['VOLUME_FLOW_FILENAME'] volume_adj_file = params_get['VOLUME_ADJ_FILENAME'] surface_flow_file = params_get['SURFACE_FLOW_FILENAME'] surface_adj_file = params_get['SURFACE_ADJ_FILENAME'] history_file = params_get['CONV_FILENAME'] # Get mesh filenames and filetypes mesh_filetype = params_get['MESH_FORMAT'] if mesh_filetype == "CGNS": error_str = "Currently cannot support mesh adaptation with CGNS grid files. Please convert your CGNS mesh to SU2 format using the CGNS_TO_SU2 flag in the configuration file, re-specify the mesh file to the native .su2 file and set the MESH_FORMAT flag to SU2." print "\n*****\n" + error_str + "\n*****\n" return 1 elif mesh_filetype == "NETCDF_ASCII": error_str ="Currently cannot support mesh adaptation with NETCDF_ASCII grid files. Please convert your mesh to SU2 format, re-specify the mesh file to the native .su2 file and set the MESH_FORMAT flag to SU2." print "\n*****\n" + error_str + "\n*****\n" return 1 # Get output solution filetypes output_filetype = params_get['OUTPUT_FORMAT'] if output_filetype == "TECPLOT": vol_file_ext = ".plt" elif output_filetype == "PARAVIEW": vol_file_ext = ".vtk" if( (kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE_ROBUST") ): restart_lin_file = params_get['RESTART_LIN_FILENAME'] # Loop over number of adaptation cycles for iAdaptCycle in range(cycles): # Copy original input file to working files shutil.copy( Config_INP_filename, Config_MAC_filename ) shutil.copy( Config_INP_filename, Config_CFD_filename ) # Run direct flow simulation # For iAdaptCycle == 0, store restart file, objective function and original mesh file params_set = { 'MATH_PROBLEM' : 'DIRECT'} if iAdaptCycle > 0: params_set.update({'RESTART_SOL' : 'YES' , 'ADJ_OBJFUNC' : objfunc, 'RESTART_FLOW_FILENAME' : restart_flow_file, 'RESTART_ADJ_FILENAME' : restart_adj_file, 'SOLUTION_FLOW_FILENAME' : restart_flow_file, 'MESH_FILENAME' : Mesh_MAC_filename }) if( (kind_adapt == "ROBUST") or kind_adapt == ("COMPUTABLE_ROBUST") ): params_set.update( {'RESTART_LIN_FILENAME' : restart_lin_file} ) # Load the new config file options and run the direct problem libSU2.Set_ConfigParams( Config_CFD_filename, params_set ) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Copy flow solution & history file if save_all: print "Saving cycle " + str(iAdaptCycle) + " flow solution and history files..." print "Saving " + volume_flow_file + "_cycle" + str(iAdaptCycle) + vol_file_ext print "Saving " + surface_flow_file + "_cycle" + str(iAdaptCycle) + vol_file_ext print "Saving " + history_file + "_flow_cycle" + str(iAdaptCycle) + vol_file_ext shutil.move( volume_flow_file + vol_file_ext , volume_flow_file + "_cycle"+str(iAdaptCycle)+vol_file_ext ) shutil.move( surface_flow_file + vol_file_ext , surface_flow_file + "_cycle"+str(iAdaptCycle)+vol_file_ext ) shutil.move( surface_flow_file + ".csv" , surface_flow_file + "_cycle"+str(iAdaptCycle)+".csv" ) shutil.move( history_file + vol_file_ext , history_file + "_flow_cycle"+str(iAdaptCycle)+vol_file_ext ) # If needed, run the adjoint simulation # For the first adaption cycle, use the filenames of the orignal .cfg file if ( kind_adapt == "GRAD_ADJOINT" or kind_adapt == "GRAD_FLOW_ADJ" or kind_adapt == "ROBUST" or kind_adapt == "COMPUTABLE_ROBUST" or kind_adapt == "COMPUTABLE" or kind_adapt == "REMAINING" ): params_set = { 'MATH_PROBLEM' : 'ADJOINT', 'SOLUTION_FLOW_FILENAME' : restart_flow_file } if iAdaptCycle > 0: params_set.update({ 'RESTART_SOL' : 'YES' , 'SOLUTION_ADJ_FILENAME' : restart_adj_file , 'MESH_FILENAME' : Mesh_MAC_filename }) # Load the new config file options and run the adjoint problem libSU2.Set_ConfigParams( Config_CFD_filename, params_set ) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Copy adjoint solution & history file if save_all: print "Saving cycle " + str(iAdaptCycle) + " adjoint solution and history files..." print "Saving " + volume_adj_file + "_cycle" + str(iAdaptCycle) + vol_file_ext print "Saving " + surface_adj_file + "_adj_cycle" + str(iAdaptCycle) + vol_file_ext print "Saving " + history_file + "_flow_cycle" + str(iAdaptCycle) + vol_file_ext shutil.move( volume_adj_file + vol_file_ext , volume_adj_file + "_cycle"+str(iAdaptCycle)+vol_file_ext ) shutil.move( surface_adj_file + vol_file_ext , surface_adj_file + "_cycle"+str(iAdaptCycle)+vol_file_ext ) shutil.move( surface_adj_file + ".csv" , surface_adj_file + "_cycle"+str(iAdaptCycle)+".csv" ) shutil.move( history_file + vol_file_ext , history_file + "_adj_cycle"+str(iAdaptCycle)+vol_file_ext ) # If needed, change the parameters to run the first linear simulation # For the first adaptation cycle, use the filenames from the original .cfg file if kind_adapt == "COMPUTABLE_ROBUST": params_set = {'MATH_PROBLEM' : 'LINEARIZED' , 'SOLUTION_FLOW_FILENAME' : restart_flow_file } if iAdaptCycle > 0: params_set.update({'RESTART_SOL' : 'YES' , 'RESTART_LIN_FILENAME' : restart_lin_file , 'MESH_FILENAME' : Mesh_MAC_filename }) # Load the new config file options and run the linearized problem libSU2.Set_ConfigParams(Config_CFD_filename, params_set) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Change the parameters to do a direct and adjoint iteration over a fine grid if ( (kind_adapt == "ROBUST" or kind_adapt == "COMPUTABLE" or kind_adapt == "COMPUTABLE_ROBUST" or kind_adapt == "REMAINING") and (iAdaptCycle < cycles-1 or cycles == 1) ): # Create the fine grid and interpolate the flow solution from coarse to refined grid params_set = { 'KIND_ADAPT' : "FULL_FLOW" , 'SOLUTION_FLOW_FILENAME' : restart_flow_file , 'RESTART_FLOW_FILENAME' : finest_flow_filename , 'MESH_FILENAME' : original_mesh_file , 'MESH_OUT_FILENAME' : finest_mesh_filename } if iAdaptCycle > 0: params_set.update( {'MESH_FILENAME' : Mesh_MAC_filename} ) # Run the mesh adaptation module libSU2.Set_ConfigParams( Config_MAC_filename, params_set ) libSU2_run.SU2_MAC(Config_MAC_filename,partitions) # Create the fine grid and interpolate the adjoint solution from coarse to refined grid params_set = { 'KIND_ADAPT' : "FULL_ADJOINT" , 'SOLUTION_FLOW_FILENAME' : restart_flow_file , 'SOLUTION_ADJ_FILENAME' : restart_adj_file , 'RESTART_FLOW_FILENAME' : finest_flow_filename , 'RESTART_ADJ_FILENAME' : finest_adj_filename , 'MESH_FILENAME' : original_mesh_file , 'MESH_OUT_FILENAME' : finest_mesh_filename } if iAdaptCycle > 0: params_set.update( {'MESH_FILENAME' : Mesh_MAC_filename} ) # Run the mesh adaptation module libSU2.Set_ConfigParams( Config_MAC_filename, params_set ) libSU2_run.SU2_MAC(Config_MAC_filename,partitions) # Create the fine grid and interpolate the linear solution from coarse to refined grid if kind_adapt == "COMPUTABLE_ROBUST": params_set = { 'KIND_ADAPT' : "FULL_LINEAR" , 'SOLUTION_FLOW_FILENAME' : restart_flow_file , 'SOLUTION_LIN_FILENAME' : restart_lin_file , 'RESTART_FLOW_FILENAME' : finest_flow_filename , 'RESTART_ADJ_FILENAME' : finest_lin_filename , 'MESH_FILENAME' : original_mesh_file , 'MESH_OUT_FILENAME' : finest_mesh_filename } if iAdaptCycle > 0: params_set.update( {'MESH_FILENAME' : Mesh_MAC_filename} ) # Run the mesh adaptation module libSU2.Set_ConfigParams( Config_MAC_filename, params_set ) libSU2_run.SU2_MAC( Config_MAC_filename, partitions ) # Change the parameters to do one iteration of the flow solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration params_set = { 'MATH_PROBLEM' : 'DIRECT' , 'EXT_ITER' : 2 , 'RESTART_SOL' : 'YES' , 'SOLUTION_FLOW_FILENAME' : finest_flow_filename , 'STORE_RESIDUAL' : 'YES' , 'RESTART_FLOW_FILENAME' : finest_flow_filename , 'MESH_FILENAME' : finest_mesh_filename , 'FULLMG' : 'NO' , 'MGLEVEL' : 0 , 'MGCYCLE' : 0 , 'MG_PRE_SMOOTH' : '( 0 )' , 'MG_POST_SMOOTH' : '( 0 )' , 'MG_CORRECTION_SMOOTH' : '( 0 )' } libSU2.Set_ConfigParams( Config_CFD_filename, params_set ) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Change the parameters to do one iteration of the adjoint solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration params_set = { 'MATH_PROBLEM' : 'ADJOINT' , 'EXT_ITER' : 2 , 'RESTART_SOL' : 'YES' , 'SOLUTION_FLOW_FILENAME' : finest_flow_filename , 'SOLUTION_ADJ_FILENAME' : finest_adj_filename , 'MESH_FILENAME' : finest_mesh_filename , 'FULLMG' : 'NO' , 'MGLEVEL' : 0 , 'MGCYCLE' : 0 , 'MG_PRE_SMOOTH' : '( 0 )' , 'MG_POST_SMOOTH' : '( 0 )' , 'MG_CORRECTION_SMOOTH' : '( 0 )' } libSU2.Set_ConfigParams( Config_CFD_filename, params_set ) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Change the parameters to do one iteration of the linear solver on the finest grid # Always start from the interpolated solution and store the residual in the solution file for finest grid # No multigrid or convergence acceleration if kind_adapt == "COMPUTABLE_ROBUST": params_set = { 'MATH_PROBLEM' : 'LINEARIZED' , 'EXT_ITER' : 2 , 'RESTART_SOL' : 'YES' , 'SOLUTION_FLOW_FILENAME' : finest_flow_filename , 'SOLUTION_LIN_FILENAME' : finest_lin_filename , 'RESTART_LIN_FILENAME' : finest_lin_filename , 'MESH_FILENAME' : finest_mesh_filename , 'FULLMG' : 'NO' , 'MGLEVEL' : 0 , 'MGCYCLE' : 0 , 'MG_PRE_SMOOTH' : '( 0 )' , 'MG_POST_SMOOTH' : '( 0 )' , 'MG_CORRECTION_SMOOTH' : '( 0 )' } libSU2.Set_ConfigParams( Config_CFD_filename, params_set ) if partitions > 1: parallel_computation( Config_CFD_filename, partitions ) else: libSU2_run.SU2_CFD( Config_CFD_filename, partitions ) # Perform adaptation using above solution files if( (kind_adapt == "GRAD_FLOW") or (kind_adapt == "GRAD_ADJOINT") or (kind_adapt == "GRAD_FLOW_ADJ")): params_set = { 'SOLUTION_FLOW_FILENAME' : restart_flow_file, 'SOLUTION_ADJ_FILENAME' : restart_adj_file } elif( (kind_adapt == "COMPUTABLE") or (kind_adapt == "REMAINING") ): params_set = { 'SOLUTION_FLOW_FILENAME' : finest_flow_filename, 'SOLUTION_ADJ_FILENAME' : finest_adj_filename , 'RESTART_FLOW_FILENAME' : restart_flow_file , 'RESTART_ADJ_FILENAME' : restart_adj_file } elif( (kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE_ROBUST") ): params_set = { 'SOLUTION_FLOW_FILENAME' : finest_flow_filename, 'SOLUTION_ADJ_FILENAME' : finest_adj_filename , 'SOLUTION_LIN_FILENAME' : finest_lin_filename , 'RESTART_FLOW_FILENAME' : restart_flow_file , 'RESTART_ADJ_FILENAME' : restart_adj_file , 'RESTART_LIN_FILENAME' : restart_lin_file } params_set.update({'KIND_ADAPT' : kind_adapt, 'MESH_OUT_FILENAME' : Mesh_MAC_filename}) if iAdaptCycle > 0: params_set.update({'MESH_FILENAME' : Mesh_MAC_filename}) # Run the mesh adaptation module libSU2.Set_ConfigParams( Config_MAC_filename, params_set ) libSU2_run.SU2_MAC( Config_MAC_filename, partitions ) # Copy cycle mesh file if save_all: print "Saving cycle " + str(iAdaptCycle) + " mesh file..." shutil.copy( Mesh_MAC_filename, Mesh_MAC_filename.replace(".su2", "_cycle"+str(iAdaptCycle)+".su2")) # Clean up if overwrite : os.rename( Mesh_MAC_filename, original_mesh_file ) if( (kind_adapt == "ROBUST") or (kind_adapt == "COMPUTABLE") or (kind_adapt == "COMPUTABLE_ROBUST") or (kind_adapt == "REMAINING") ): os.remove(finest_mesh_filename) os.remove(finest_flow_filename) if kind_adapt == "COMPUTABLE_ROBUST": os.remove(finest_lin_filename) if save_all: os.remove(Mesh_MAC_filename) os.remove(Config_MAC_filename) os.remove(Config_CFD_filename)
def generate_data(model, inputdir, outputdir, cluster_type="pp", pp_cpus=2, runs=1): """ The first pipeline step: data generation. :param model: the model to process :param inputdir: the directory containing the model :param outputdir: the directory containing the output files :param cluster_type: pp for local Parallel Python, lsf for Load Sharing Facility, sge for Sun Grid Engine. :param pp_cpus: the number of CPU used by Parallel Python. :param runs: the number of model simulation :return: no output """ if runs < 1: logger.error("variable " + str(runs) + " must be greater than 0. Please, check your configuration file.") return if not os.path.isfile(os.path.join(inputdir, model)): logger.error(os.path.join(inputdir, model) + " does not exist.") return copasi = get_copasi() if copasi is None: logger.error( "CopasiSE not found! Please check that CopasiSE is installed and in the PATH environmental variable.") return # folder preparation refresh_directory(outputdir, model[:-4]) # execute runs simulations. logger.info("Simulating model " + model + " for " + str(runs) + " time(s)") # Replicate the copasi file and rename its report file groupid = "_" + get_rand_alphanum_str(20) + "_" group_model = model[:-4] + groupid for i in xrange(1, runs + 1): shutil.copyfile(os.path.join(inputdir, model), os.path.join(inputdir, group_model) + str(i) + ".cps") replace_string_in_file(os.path.join(inputdir, group_model) + str(i) + ".cps", model[:-4] + ".csv", group_model + str(i) + ".csv") # run copasi in parallel # To make things simple, the last 10 character of groupid are extracted and reversed. # This string will be likely different from groupid and is the string to replace with # the iteration number. str_to_replace = groupid[10::-1] command = copasi + " " + os.path.join(inputdir, group_model + str_to_replace + ".cps") parallel_computation(command, str_to_replace, cluster_type, runs, outputdir, pp_cpus) # move the report files report_files = [f for f in os.listdir(inputdir) if re.match(group_model + '[0-9]+.*\.csv', f) or re.match(group_model + '[0-9]+.*\.txt', f)] for file in report_files: # Replace some string in the report file replace_str_copasi_sim_report(os.path.join(inputdir, file)) # rename and move the output file shutil.move(os.path.join(inputdir, file), os.path.join(outputdir, file.replace(groupid, "_")[:-4] + ".csv")) # removed repeated copasi files repeated_copasi_files = [f for f in os.listdir(inputdir) if re.match(group_model + '[0-9]+.*\.cps', f)] for file in repeated_copasi_files: os.remove(os.path.join(inputdir, file))
def generate_data(model, inputdir, cluster_type, pp_cpus, nfits, outputdir, sim_data_dir, updated_models_dir): """ The first pipeline step: data generation. :param model: the model to process :param inputdir: the directory containing the model :param cluster_type: pp for parallel python, lsf for load sharing facility, sge for sun grid engine :param pp_cpus: the number of cpu for parallel python :param nfits: the number of fits to perform :param outputdir: the directory to store the results :param sim_data_dir: the directory containing the simulation data sets :param updated_models_dir: the directory containing the Copasi models with updated parameters for each estimation :return: no output """ if int(nfits) < 1: logger.error("variable " + nfits + " must be greater than 0. Please, check your configuration file.") return if not os.path.isfile(os.path.join(inputdir, model)): logger.error(os.path.join(inputdir, model) + " does not exist.") return # folder preparation refresh_directory(sim_data_dir, model[:-4]) refresh_directory(updated_models_dir, model[:-4]) copasi = get_copasi() if copasi is None: logger.error( "CopasiSE not found! Please check that CopasiSE is installed and in the PATH environmental variable." ) return logger.info("Configure Copasi:") logger.info( "Replicate a Copasi file configured for parameter estimation and randomise the initial parameter values" ) groupid = "_" + get_rand_alphanum_str(20) + "_" group_model = model[:-4] + groupid pre_param_estim = RandomiseParameters(inputdir, model) pre_param_estim.print_parameters_to_estimate() pre_param_estim.generate_instances_from_template(nfits, groupid) logger.info("\n") logger.info("Parallel parameter estimation:") # To make things simple, the last 10 character of groupid are extracted and reversed. # This string will be likely different from groupid and is the string to replace with # the iteration number. str_to_replace = groupid[10::-1] command = ( copasi + " -s " + os.path.join(inputdir, group_model + str_to_replace + ".cps") + " " + os.path.join(inputdir, group_model + str_to_replace + ".cps") ) parallel_computation(command, str_to_replace, cluster_type, nfits, outputdir, pp_cpus) # Move the report files to the outputdir report_files = [ f for f in os.listdir(inputdir) if re.match(group_model + "[0-9]+.*\.csv", f) or re.match(group_model + "[0-9]+.*\.txt", f) ] for file in report_files: # copy report and remove the groupid shutil.move(os.path.join(inputdir, file), os.path.join(sim_data_dir, file.replace(groupid, "_"))) # removed repeated copasi files repeated_copasi_files = [f for f in os.listdir(inputdir) if re.match(group_model + "[0-9]+.*\.cps", f)] for file in repeated_copasi_files: # os.remove(os.path.join(inputdir, file)) shutil.move(os.path.join(inputdir, file), os.path.join(updated_models_dir, file.replace(groupid, "_")))