コード例 #1
0
def run_experiment(ncores, codebase, diag, namelist, resolution, exp_name,
                   codebase_name):
    """
    Measure the time taken to complete the experiment
    :param ncores: Number of processor cores to be used
    :param codebase: One of: Held-Suarez, Grey-Mars
    :param diag: Diagnostics
    :param namelist: Namelist file
    :param resolution: Resolution of simulatin. One of: T21, T42, T85
    :param exp_name: Name of the experiment
    :param codebase_name: Name of the codebase
    """
    runs = 0
    if codebase_name == constants.HELD_SUAREZ:
        runs = 13
    elif codebase_name == constants.GREY_MARS:
        runs = 23

    exp = Experiment(f'{exp_name}', codebase=codebase)
    exp.rm_datadir()
    exp.clear_rundir()
    exp.diag_table = diag
    exp.namelist = namelist.copy()
    exp.set_resolution(*resolution)
    start = time.time()
    print(exp.namelist)
    exp.run(1, use_restart=False, num_cores=ncores)
    end = time.time()
    time_delta = end - start
    data = [ncores, resolution[0], 1, time_delta]
    write_to_csvfile(f'{constants.GFDL_BENCH}/{exp_name}', data)
    for i in range(
            2, runs
    ):  # 13 as there are 12 months (12+1=13)(241 for grey_mars) / (13 for held_suarez)
        start = time.time()
        exp.run(i, num_cores=ncores)
        end = time.time()
        time_delta = end - start
        data = [ncores, resolution[0], i, time_delta]
        write_to_csvfile(f'{constants.GFDL_BENCH}/{exp_name}', data)
コード例 #2
0
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        cb.compile()
        exp = Experiment(exp_name, codebase=cb)
        exp.namelist = nml_use.copy()
        exp.diag_table = diag_use
        exp.inputfiles = input_files_use

        #Only run for 3 days to keep things short.
        exp.update_namelist({'main_nml': {
            'days': 3,
        }})

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        print('Test failed for ' + test_case_name +
              ' because the run crashed.')
        return_test_result = 'fail'

    return return_test_result
コード例 #3
0
        'fileset_write': 'single',                           # default: multi
    }
})


#Lets do a run!
if __name__=="__main__":
    cb.compile()

    NCORES=16
    RESOLUTION = 'T42', 25

    make_symmetric_values_list = [False, True]

    for make_symmetric_value in make_symmetric_values_list:

        exp = Experiment('project_2_held_suarez_make_symmetric_'+str(make_symmetric_value), codebase=cb)
        exp.clear_rundir()

        exp.diag_table = diag
        exp.inputfiles = inputfiles

        exp.namelist = namelist.copy()
        exp.namelist['spectral_dynamics_nml']['make_symmetric'] = make_symmetric_value

        exp.set_resolution(*RESOLUTION)

        exp.run(1, use_restart=False, num_cores=NCORES)
        for i in range(2,121):
            exp.run(i, num_cores=NCORES)
コード例 #4
0
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
diag.add_field('dynamics', 'temp', time_avg=True)
diag.add_field('dynamics', 'vor', time_avg=True)
diag.add_field('dynamics', 'div', time_avg=True)

exp.diag_table = diag

#Empty the run directory ready to run
exp.clear_rundir()

#Define values for the 'core' namelist
namelist_name = os.path.join(
    GFDL_BASE, 'exp/test_cases/realistic_continents/namelist_basefile.nml')
nml = f90nml.read(namelist_name)
exp.namelist = nml

exp.update_namelist({
    'mixed_layer_nml': {
        'do_qflux':
        False,  #Don't use the prescribed analytical formula for q-fluxes
        'do_read_sst': True,  #Read in sst values from input file
        'do_sc_sst': True,  #Do specified ssts (need both to be true)
        'sst_file': 'sst_clim_amip',  #Set name of sst input file
        'specify_sst_over_ocean_only':
        True,  #Make sure sst only specified in regions of ocean.
    }
})

#Lets do a run!
if __name__ == "__main__":
コード例 #5
0
ファイル: trip_test_functions.py プロジェクト: sit23/Isca
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True
    compile_successful = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name or 'ape_aquaplanet' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        try:
            cb.compile()
            exp = Experiment(exp_name, codebase=cb)
            exp.namelist = nml_use.copy()
            exp.diag_table = diag_use
            exp.inputfiles = input_files_use

            #Only run for 3 days to keep things short.
            exp.update_namelist({'main_nml': {
                'days': 3,
            }})
        except:
            run_complete = False
            test_pass = False
            compile_successful = False
            continue

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

            base_experiment_input_nml = f90nml.read(
                data_dir_dict[base_commit] + '/run0001/input.nml')
            later_commit_input_nml = f90nml.read(data_dir_dict[later_commit] +
                                                 '/run0001/input.nml')

            if base_experiment_input_nml != later_commit_input_nml:
                raise AttributeError(
                    f'The two experiments to be compared have been run using different input namelists, and so the results may be different because of this. This only happens when you have run the trip tests using one of the commit IDs before, and that you happen to have used a different version of the test cases on that previous occasion. Try removing both {data_dir_dict[base_commit]} and {data_dir_dict[later_commit]} and try again.'
                )

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        if compile_successful:
            #This means that the compiles were both successful, but at least one of the runs crashed.
            print('Test failed for ' + test_case_name +
                  ' because the run crashed.')
        else:
            print('Test failed for ' + test_case_name +
                  ' because at least one of the runs failed to compile.')

        return_test_result = 'fail'

    return return_test_result