# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('sqland_newbucket_finalAPqflux', codebase=cb) #Add any input files that are necessary for a particular experiment. exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/squareland/land.nc'), os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join(GFDL_BASE, 'input/aquaplanet/ocean_qflux.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk')
# useful for iterative development cb = SocratesCodeBase.from_directory(GFDL_BASE) # or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('soc_aquaplanet', codebase=cb) exp.clear_rundir() inputfiles = [os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc')] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Write out diagnostics need for vertical interpolation post-processing diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('dynamics', 'zsurf') #Tell model which diagnostics to write
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('variable_co2_rrtm_test_experiment', codebase=cb) exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join(base_dir, 'input/co2.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True)
if __name__ == "__main__": conv_schemes = ['none'] depths = [2.] pers = [93] #longitude of perihelion scale = 1. for conv in conv_schemes: for depth_val in depths: for per_value in pers: exp = Experiment( 'grey_titan_T21', codebase=cb) #name of folder in which .nc files are output exp.clear_rundir() exp.diag_table = diag exp.namelist = namelist.copy() exp.namelist['constants_nml'][ 'grav'] = scale * 1.354 #surface gravity exp.namelist['constants_nml'][ 'pstd'] = scale * 14670000.0 #surface pressure - 100 times bigger than below values exp.namelist['constants_nml'][ 'pstd_mks'] = scale * 146700.0 #in Pa exp.namelist['spectral_dynamics_nml'][ 'reference_sea_level_press'] = scale * 146700.0 #in Pa exp.namelist['idealized_moist_phys_nml'][ 'convection_scheme'] = conv
def conduct_comparison_on_test_case(base_commit, later_commit, test_case_name, repo_to_use='[email protected]:execlim/Isca', num_cores_to_use=4): """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables in the diag file. If there are any differences in the output variables then the test classed as a failure.""" data_dir_dict = {} nml_use, input_files_use = get_nml_diag(test_case_name) diag_use = define_simple_diag_table() test_pass = True run_complete = True compile_successful = True #Do the run for each of the commits in turn for s in [base_commit, later_commit]: exp_name = test_case_name + '_trip_test_21_' + s if 'socrates' in test_case_name or 'ape_aquaplanet' in test_case_name: cb = SocratesCodeBase(repo=repo_to_use, commit=s) else: cb = IscaCodeBase(repo=repo_to_use, commit=s) try: cb.compile() exp = Experiment(exp_name, codebase=cb) exp.namelist = nml_use.copy() exp.diag_table = diag_use exp.inputfiles = input_files_use #Only run for 3 days to keep things short. exp.update_namelist({'main_nml': { 'days': 3, }}) except: run_complete = False test_pass = False compile_successful = False continue try: # run with a progress bar with exp_progress(exp, description=s) as pbar: exp.run(1, use_restart=False, num_cores=num_cores_to_use) except FailedRunError as e: #If run fails then test automatically fails run_complete = False test_pass = False continue data_dir_dict[s] = exp.datadir if run_complete: #For each of the diag files defined, compare the output for diag_file_entry in diag_use.files.keys(): base_commit_dataset = xar.open_dataset( data_dir_dict[base_commit] + '/run0001/' + diag_file_entry + '.nc', decode_times=False) later_commit_dataset = xar.open_dataset( data_dir_dict[later_commit] + '/run0001/' + diag_file_entry + '.nc', decode_times=False) diff = later_commit_dataset - base_commit_dataset #Check each of the output variables for differences for var in diff.data_vars.keys(): maxval = np.abs(diff[var]).max() if maxval != 0.: print('Test failed for ' + var + ' max diff value = ' + str(maxval.values)) test_pass = False base_experiment_input_nml = f90nml.read( data_dir_dict[base_commit] + '/run0001/input.nml') later_commit_input_nml = f90nml.read(data_dir_dict[later_commit] + '/run0001/input.nml') if base_experiment_input_nml != later_commit_input_nml: raise AttributeError( f'The two experiments to be compared have been run using different input namelists, and so the results may be different because of this. This only happens when you have run the trip tests using one of the commit IDs before, and that you happen to have used a different version of the test cases on that previous occasion. Try removing both {data_dir_dict[base_commit]} and {data_dir_dict[later_commit]} and try again.' ) if test_pass: print('Test passed for ' + test_case_name + '. Commit ' + later_commit + ' gives the same answer as commit ' + base_commit) return_test_result = 'pass' else: print('Test failed for ' + test_case_name + '. Commit ' + later_commit + ' gives a different answer to commit ' + base_commit) return_test_result = 'fail' else: if compile_successful: #This means that the compiles were both successful, but at least one of the runs crashed. print('Test failed for ' + test_case_name + ' because the run crashed.') else: print('Test failed for ' + test_case_name + ' because at least one of the runs failed to compile.') return_test_result = 'fail' return return_test_result
def conduct_comparison_on_test_case(base_commit, later_commit, test_case_name, repo_to_use='[email protected]:execlim/Isca', num_cores_to_use=4): """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables in the diag file. If there are any differences in the output variables then the test classed as a failure.""" data_dir_dict = {} nml_use, input_files_use = get_nml_diag(test_case_name) diag_use = define_simple_diag_table() test_pass = True run_complete = True #Do the run for each of the commits in turn for s in [base_commit, later_commit]: exp_name = test_case_name + '_trip_test_21_' + s cb = IscaCodeBase(repo=repo_to_use, commit=s) cb.compile() exp = Experiment(exp_name, codebase=cb) exp.namelist = nml_use.copy() exp.diag_table = diag_use exp.inputfiles = input_files_use #Only run for 3 days to keep things short. exp.update_namelist({'main_nml': { 'days': 3, }}) try: # run with a progress bar with exp_progress(exp, description=s) as pbar: exp.run(1, use_restart=False, num_cores=num_cores_to_use) except FailedRunError as e: #If run fails then test automatically fails run_complete = False test_pass = False continue data_dir_dict[s] = exp.datadir if run_complete: #For each of the diag files defined, compare the output for diag_file_entry in diag_use.files.keys(): base_commit_dataset = xar.open_dataset( data_dir_dict[base_commit] + '/run0001/' + diag_file_entry + '.nc', decode_times=False) later_commit_dataset = xar.open_dataset( data_dir_dict[later_commit] + '/run0001/' + diag_file_entry + '.nc', decode_times=False) diff = later_commit_dataset - base_commit_dataset #Check each of the output variables for differences for var in diff.data_vars.keys(): maxval = np.abs(diff[var]).max() if maxval != 0.: print('Test failed for ' + var + ' max diff value = ' + str(maxval.values)) test_pass = False if test_pass: print('Test passed for ' + test_case_name + '. Commit ' + later_commit + ' gives the same answer as commit ' + base_commit) return_test_result = 'pass' else: print('Test failed for ' + test_case_name + '. Commit ' + later_commit + ' gives a different answer to commit ' + base_commit) return_test_result = 'fail' else: print('Test failed for ' + test_case_name + ' because the run crashed.') return_test_result = 'fail' return return_test_result
#Number of cores to run the model on NCORES=16 #Set the horizontal and vertical resolution to be used. RESOLUTION = 'T21', 25 earth_grav = 9.80 grav_earth_multiple = [1, 2] for grav_scale in grav_earth_multiple: #Set up the experiment object, with the first argument being the experiment name. #This will be the name of the folder that the data will appear in. exp = Experiment('project_2_h_s_grav_earth_multiple_'+str(grav_scale), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['hs_forcing_nml']['P00'] = 1.e5 * grav_scale exp.namelist['constants_nml']['pstd'] = 1.013250E+06 * grav_scale exp.namelist['constants_nml']['pstd_mks'] = 101325.0 * grav_scale exp.namelist['spectral_dynamics_nml']['reference_sea_level_press'] = 101325.0 * grav_scale exp.namelist['constants_nml']['grav'] = earth_grav * grav_scale exp.set_resolution(*RESOLUTION) #Step 6. Run the fortran code
#Step 4. Compile the fortran code cb.compile() #Number of cores to run the model on NCORES = 16 #Set the horizontal and vertical resolution to be used. RESOLUTION = 'T21', 18 mld_values_list = [20., 5.] for mld_value in mld_values_list: #Set up the experiment object, with the first argument being the experiment name. #This will be the name of the folder that the data will appear in. exp = Experiment('project_4_rrtm_mld_' + str(mld_value), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['mixed_layer_nml']['depth'] = mld_value exp.set_resolution(*RESOLUTION) #Step 6. Run the fortran code exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 121): exp.run(i, num_cores=NCORES)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( debug=True ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('frierson_debug_test_experiment', codebase=cb) #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True) diag.add_field('mixed_layer', 't_surf', time_avg=True) diag.add_field('dynamics', 'sphum', time_avg=True) diag.add_field('dynamics', 'ucomp', time_avg=True) diag.add_field('dynamics', 'vcomp', time_avg=True) diag.add_field('dynamics', 'temp', time_avg=True)
experiments = [('qflux_on', { 'mixed_layer_nml': { 'do_qflux': True }, 'qflux_nml': { 'qflux_amp': 30.0, 'qflux_width': 16.0 } }), ('qflux_off', { 'mixed_layer_nml': { 'do_qflux': False } })] exps = [] for exp_name, parameters in experiments: exp = Experiment(exp_name, codebase=codebase) # Get some basic paramters from config.py. We'll copy the Namelist, # DiagTable and input files list so for multiple Experiments in the # same file, each has their own version that can be edited independently. exp.diag_table = diag_table.copy() exp.namelist = namelist.copy() exp.inputfiles = inputfiles[::] exp.set_resolution('T42', 40) exp.update_namelist(parameters) exps.append(exp) run_cli(exps)
# useful for iterative development cb = SocratesCodeBase.from_directory(GFDL_BASE) # or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('soc_aquaplanet_amip_clouds', codebase=cb) exp.clear_rundir() exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join( GFDL_BASE, 'exp/test_cases/realistic_continents/input/era-spectral7_T42_64x128.out.nc' ), os.path.join(GFDL_BASE, 'exp/test_cases/realistic_continents/input/sst_clim_amip.nc'), os.path.join( GFDL_BASE, 'exp/test_cases/realistic_continents/input/siconc_clim_amip.nc') ]
'qflux_width': 16., }, }) #Lets do a run! if __name__ == "__main__": cb.compile() NCORES = 16 RESOLUTION = 'T42', 40 do_ice_albedo_list = [False, True] for do_ice_albedo in do_ice_albedo_list: exp = Experiment('project_7_ice_albedo_' + str(do_ice_albedo), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() # Here a namelist option will have to be written and toggled on and off, depending on the changes made by the group to the fortran code. # exp.namelist['mixed_layer_nml']['ice_albedo_feedback'] = do_ice_albedo exp.set_resolution(*RESOLUTION) exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 121): exp.run(i, num_cores=NCORES)
}, }) #Lets do a run! if __name__ == "__main__": cb.compile() NCORES = 16 RESOLUTION = 'T42', 40 qflux_file_list = ['merlis_schneider_30_16', 'off'] for qflux_file_name in qflux_file_list: exp = Experiment('project_5_rrtm_input_file_qflux_' + str(qflux_file_name), codebase=cb) exp.clear_rundir() exp.diag_table = diag if qflux_file_name != 'off': inputfiles.append(os.path.join(base_dir, qflux_file_name + '.nc')) exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['mixed_layer_nml']['load_qflux'] = True exp.namelist['mixed_layer_nml']['time_varying_qflux'] = False exp.namelist['mixed_layer_nml'][ 'qflux_file_name'] = qflux_file_name else:
# useful for iterative development cb = DryCodeBase.from_directory(GFDL_BASE) # or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp_name = 'held_suarez_default' exp = Experiment(exp_name, codebase=cb) #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('dynamics', 'ucomp', time_avg=True) diag.add_field('dynamics', 'vcomp', time_avg=True) diag.add_field('dynamics', 'temp', time_avg=True) diag.add_field('dynamics', 'vor', time_avg=True) diag.add_field('dynamics', 'div', time_avg=True)
}, 'astronomy_nml': { 'obliq': 15 }, 'diag_manager_nml': { 'mix_snapshot_average_fields': False }, 'fms_nml': { 'domains_stack_size': 600000 # default: 0 }, 'fms_io_nml': { 'threading_write': 'single', # default: multi 'fileset_write': 'single', # default: multi } }) obls = [15] for obl in obls: exp = Experiment('top_down_test_obliquity%d' % obl, codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.namelist = namelist.copy() exp.namelist['astronomy_nml']['obliq'] = obl with exp_progress(exp, description='o%.0f d{day}' % obl): exp.run(1, use_restart=False, num_cores=NCORES, overwrite_data=True) for i in range(2, 21): with exp_progress(exp, description='o%.0f d{day}' % s): exp.run(i, num_cores=NCORES, overwrite_data=True)
}) if __name__=="__main__": conv_schemes = ['none'] depths = [2.] pers = [70.85] scale = 1. for conv in conv_schemes: for depth_val in depths: for per_value in pers: exp = Experiment('soc_mars_mk36_per_value'+str((per_value))+'_'+conv+'_mld_'+str(depth_val)+'_with_mola_topo_lh_floor', codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['constants_nml']['grav'] = scale * 3.71 exp.namelist['constants_nml']['pstd'] = scale * 6100.0 exp.namelist['constants_nml']['pstd_mks'] = scale * 610.0 exp.namelist['spectral_dynamics_nml']['reference_sea_level_press'] = scale * 610.0 exp.namelist['idealized_moist_phys_nml']['convection_scheme'] = conv exp.namelist['mixed_layer_nml']['depth'] = depth_val exp.namelist['astronomy_nml']['per'] = per_value # with exp_progress(exp, description='o%.0f d{day}' % scale): exp.run(1, use_restart=False, num_cores=NCORES)
}) if __name__ == "__main__": conv_schemes = ['none'] depths = [2.] pers = [70.85] scale = 1. for conv in conv_schemes: for depth_val in depths: for per_value in pers: exp = Experiment('mars7', codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.namelist = namelist.copy() exp.namelist['constants_nml']['grav'] = scale * 3.71 exp.namelist['constants_nml']['pstd'] = scale * 6100.0 exp.namelist['constants_nml']['pstd_mks'] = scale * 610.0 exp.namelist['spectral_dynamics_nml'][ 'reference_sea_level_press'] = scale * 610.0 exp.namelist['idealized_moist_phys_nml'][ 'convection_scheme'] = conv exp.namelist['mixed_layer_nml']['depth'] = depth_val exp.namelist['astronomy_nml']['per'] = per_value # with exp_progress(exp, description='o%.0f d{day}' % scale):
cb.compile() #Number of cores to run the model on NCORES = 16 #Set the horizontal and vertical resolution to be used. RESOLUTION = 'T21', 25 make_symmetric_values_list = [False, True] for make_symmetric_value in make_symmetric_values_list: #Set up the experiment object, with the first argument being the experiment name. #This will be the name of the folder that the data will appear in. exp = Experiment('project_2_frierson_make_symmetric_' + str(make_symmetric_value), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['spectral_dynamics_nml'][ 'make_symmetric'] = make_symmetric_value exp.set_resolution(*RESOLUTION) #Step 6. Run the fortran code exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 121):
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('frierson3', codebase=cb) #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True) diag.add_field('mixed_layer', 't_surf', time_avg=True) diag.add_field('dynamics', 'sphum', time_avg=True) diag.add_field('dynamics', 'ucomp', time_avg=True) diag.add_field('dynamics', 'vcomp', time_avg=True) diag.add_field('dynamics', 'temp', time_avg=True)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('realistic_continents_qflux_test_experiment', codebase=cb) #Add any input files that are necessary for a particular experiment. exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/land_masks/era_land_t42.nc'), os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join(base_dir, 'input/ami_qflux_ctrl_ice_4320.nc'), os.path.join(base_dir, 'input/siconc_clim_amip.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('variable_co2_grey_test_experiment', codebase=cb) exp.inputfiles = [os.path.join(base_dir, 'input/co2.nc')] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True) diag.add_field('mixed_layer', 't_surf', time_avg=True) diag.add_field('dynamics', 'sphum', time_avg=True) diag.add_field('dynamics', 'ucomp', time_avg=True)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile() # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp_name = 'held_suarez_default' exp = Experiment(exp_name, codebase=cb) #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('dynamics', 'ucomp', time_avg=True) diag.add_field('dynamics', 'vcomp', time_avg=True) diag.add_field('dynamics', 'temp', time_avg=True) diag.add_field('dynamics', 'vor', time_avg=True) diag.add_field('dynamics', 'div', time_avg=True)
cb = IscaCodeBase.from_repo(repo='https://github.com/mp586/Isca.git', commit='f1bb5c4') #looked up commit which was used for original 2xCO2 experiment # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile() # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics #### NORTHLAND : change name of experiment when you're running a different set-up e.g. northland_different_albedo ##### exp = Experiment('northland_same_albedo', codebase=cb) #Add any input files that are necessary for a particular experiment. ##### NORTHLAND: I've uploaded the half_ocean landmask file (land.nc) to input/half_ocean/, make sure to copy it into your Isca/input folder) exp.inputfiles = [os.path.join(GFDL_BASE,'input/half_ocean/land.nc'),os.path.join(GFDL_BASE,'input/rrtm_input_files/ozone_1990.nc')] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True)
}) #Lets do a run! if __name__ == "__main__": cb.compile() NCORES = 16 RESOLUTION = 'T42', 25 earth_grav = 9.80 grav_earth_multiple = [1, 2] for grav_scale in make_symmetric_values_list: exp = Experiment('project_2_grav_earth_multiple_' + str(grav_scale), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() #Note that only gravity is changed here, but what else should be kept constant in order to isolate the effect of gravity alone? exp.namelist['constants_nml']['grav'] = earth_grav * grav_scale exp.set_resolution(*RESOLUTION) exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 121):
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('full_continents_land_evap_pref1_qflux', codebase=cb) #Add any input files that are necessary for a particular experiment. exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/all_continents/land.nc'), os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join( GFDL_BASE, 'exp/mp586/bucket/input/flat_continents_newbucket/ocean_qflux.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True)
'exponent': 7.0, 'robert_coeff': 0.03 } }) #Lets do a run! if __name__ == "__main__": cb.compile() NCORES = 16 RESOLUTION = 'T42', 40 co2_values_list = [350., 700., 1400.] for co2_value in co2_values_list: exp = Experiment('project_1_rrtm_co2_' + str(co2_value), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['rrtm_radiation_nml']['co2ppmv'] = co2_value exp.set_resolution(*RESOLUTION) exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 121): exp.run(i, num_cores=NCORES)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('axisymmetric_test_case', codebase=cb) exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join(base_dir, 'input/sn_1.000_sst.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True) diag.add_field('dynamics', 'bk') diag.add_field('dynamics', 'pk') diag.add_field('atmosphere', 'precipitation', time_avg=True)
#Set the horizontal and vertical resolution to be used. RESOLUTION = 'T21', 40 omega_values_list = ['normal', 'reversed'] omega_normal = 7.2921150e-5 for omega_value in omega_values_list: if omega_value == 'reversed': omega_passed = -omega_normal else: omega_passed = omega_normal #Set up the experiment object, with the first argument being the experiment name. #This will be the name of the folder that the data will appear in. exp = Experiment('project_3_omega_' + str(omega_value), codebase=cb) exp.clear_rundir() exp.diag_table = diag exp.inputfiles = inputfiles exp.namelist = namelist.copy() exp.namelist['constants_nml']['omega'] = omega_passed exp.set_resolution(*RESOLUTION) #Step 6. Run the fortran code exp.run(1, use_restart=False, num_cores=NCORES) for i in range(2, 241): exp.run(i, num_cores=NCORES)
# or it can point to a specific git repo and commit id. # This method should ensure future, independent, reproducibility of results. # cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1') # compilation depends on computer specific settings. The $GFDL_ENV # environment variable is used to determine which `$GFDL_BASE/src/extra/env` file # is used to load the correct compilers. The env file is always loaded from # $GFDL_BASE and not the checked out git repo. cb.compile( ) # compile the source code to working directory $GFDL_WORK/codebase # create an Experiment object to handle the configuration of model parameters # and output diagnostics exp = Experiment('realistic_continents_fixed_sst_test_experiment', codebase=cb) #Add any input files that are necessary for a particular experiment. exp.inputfiles = [ os.path.join(GFDL_BASE, 'input/land_masks/era_land_t42.nc'), os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'), os.path.join(base_dir, 'input/sst_clim_amip.nc'), os.path.join(base_dir, 'input/siconc_clim_amip.nc') ] #Tell model how to write diagnostics diag = DiagTable() diag.add_file('atmos_monthly', 30, 'days', time_units='days') #Tell model which diagnostics to write diag.add_field('dynamics', 'ps', time_avg=True)
def run_experiment(ncores, codebase, diag, namelist, resolution, exp_name, codebase_name): """ Measure the time taken to complete the experiment :param ncores: Number of processor cores to be used :param codebase: One of: Held-Suarez, Grey-Mars :param diag: Diagnostics :param namelist: Namelist file :param resolution: Resolution of simulatin. One of: T21, T42, T85 :param exp_name: Name of the experiment :param codebase_name: Name of the codebase """ runs = 0 if codebase_name == constants.HELD_SUAREZ: runs = 13 elif codebase_name == constants.GREY_MARS: runs = 23 exp = Experiment(f'{exp_name}', codebase=codebase) exp.rm_datadir() exp.clear_rundir() exp.diag_table = diag exp.namelist = namelist.copy() exp.set_resolution(*resolution) start = time.time() print(exp.namelist) exp.run(1, use_restart=False, num_cores=ncores) end = time.time() time_delta = end - start data = [ncores, resolution[0], 1, time_delta] write_to_csvfile(f'{constants.GFDL_BENCH}/{exp_name}', data) for i in range( 2, runs ): # 13 as there are 12 months (12+1=13)(241 for grey_mars) / (13 for held_suarez) start = time.time() exp.run(i, num_cores=ncores) end = time.time() time_delta = end - start data = [ncores, resolution[0], i, time_delta] write_to_csvfile(f'{constants.GFDL_BENCH}/{exp_name}', data)