示例#1
0
diag.add_field('dynamics', 'div', time_avg=True)

exp.diag_table = diag

#Empty the run directory ready to run
exp.clear_rundir()

#Define values for the 'core' namelist
namelist_name = os.path.join(
    GFDL_BASE, 'exp/test_cases/realistic_continents/namelist_basefile.nml')
nml = f90nml.read(namelist_name)
exp.namelist = nml

exp.update_namelist({
    'mixed_layer_nml': {
        'do_qflux':
        False,  #Don't use the prescribed analytical formula for q-fluxes
        'do_read_sst': True,  #Read in sst values from input file
        'do_sc_sst': True,  #Do specified ssts (need both to be true)
        'sst_file': 'sst_clim_amip',  #Set name of sst input file
        'specify_sst_over_ocean_only':
        True,  #Make sure sst only specified in regions of ocean.
    }
})

#Lets do a run!
if __name__ == "__main__":
    exp.run(1, use_restart=False, num_cores=NCORES)
    for i in range(2, 121):
        exp.run(i, num_cores=NCORES)
示例#2
0
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        cb.compile()
        exp = Experiment(exp_name, codebase=cb)
        exp.namelist = nml_use.copy()
        exp.diag_table = diag_use
        exp.inputfiles = input_files_use

        #Only run for 3 days to keep things short.
        exp.update_namelist({'main_nml': {
            'days': 3,
        }})

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        print('Test failed for ' + test_case_name +
              ' because the run crashed.')
        return_test_result = 'fail'

    return return_test_result
示例#3
0
        'spinup_restart': spinup_restart
    },
    #    'vert_coordinate_nml': {
    #        'bk': [0.000000, 0.0117665, 0.0196679, 0.0315244, 0.0485411, 0.0719344, 0.1027829, 0.1418581, 0.1894648, 0.2453219, 0.3085103, 0.3775033, 0.4502789, 0.5244989, 0.5977253, 0.6676441, 0.7322627, 0.7900587, 0.8400683, 0.8819111, 0.9157609, 0.9422770, 0.9625127, 0.9778177, 0.9897489, 1.0000000],
    #        'pk': [0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
    #       }
})

#Lets do a run!
if __name__ == "__main__":
    if cold_restart:
        exp.run(1, use_restart=False, num_cores=NCORES)
        for i in range(2, i1):
            exp.run(i, num_cores=NCORES)
    else:
        for i in range(i0, i1):
            if use_external_restart and i == i0:
                exp.run(i,
                        num_cores=NCORES,
                        restart_file=external_restart_file)
                exp.update_namelist(
                    {'atmosphere_nml': {
                        'spinup_restart': False
                    }})
                exp.update_namelist(
                    {'spectral_dynamics_nml': {
                        'spinup_restart': False
                    }})
            else:
                exp.run(i, num_cores=NCORES)
diag.add_field('dynamics', 'temp', time_avg=True)
diag.add_field('dynamics', 'vor', time_avg=True)
diag.add_field('dynamics', 'div', time_avg=True)

exp.diag_table = diag

#Empty the run directory ready to run
exp.clear_rundir()

#Define values for the 'core' namelist
namelist_name = os.path.join(
    GFDL_BASE, 'exp/test_cases/realistic_continents/namelist_basefile.nml')
nml = f90nml.read(namelist_name)
exp.namelist = nml

exp.update_namelist({
    'mixed_layer_nml': {
        'do_qflux': False,  #Don't use analytic formula for q-fluxes 
        'load_qflux': True,  #Do load q-flux field from an input file
        'time_varying_qflux': True,  #q-flux will be time-varying
        'qflux_file_name':
        'ami_qflux_ctrl_ice_4320',  #Name of q-flux input file
    }
})

#Lets do a run!
if __name__ == "__main__":
    exp.run(1, use_restart=False, num_cores=NCORES)
    for i in range(2, 121):
        exp.run(i, num_cores=NCORES)
示例#5
0
cb = DryCodeBase.from_directory(GFDL_BASE)

namelist['main_nml'] = {'dt_atmos': 600, 'days': 30, 'calendar': 'no_calendar'}

earth_omega = 7.292e-5

scales = [1.0, 10.0, 100.0, 1000.0]

for s in scales:
    exp_name = 'hs_om_scale_%.0f' % s
    omega = earth_omega * (s / 100.0)
    exp = Experiment(exp_name, codebase=cb)
    exp.namelist = namelist.copy()
    exp.diag_table = diag

    exp.update_namelist({'constants_nml': {'omega': omega}})
    try:
        # run with a progress bar with description showing omega
        with exp_progress(exp, description='o%.0f d{day}' % s) as pbar:
            exp.run(1, use_restart=False, num_cores=16)

        for n in range(2, 11):
            with exp_progress(exp, description='o%.0f d{day}' % s) as pbar:
                exp.run(n)
                exp.delete_restart(n - 1)

    except FailedRunError as e:
        # don't let a crash get in the way of good science
        # (we could try and reduce timestep here if we wanted to be smarter)
        continue
示例#6
0
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True
    compile_successful = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name or 'ape_aquaplanet' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        try:
            cb.compile()
            exp = Experiment(exp_name, codebase=cb)
            exp.namelist = nml_use.copy()
            exp.diag_table = diag_use
            exp.inputfiles = input_files_use

            #Only run for 3 days to keep things short.
            exp.update_namelist({'main_nml': {
                'days': 3,
            }})
        except:
            run_complete = False
            test_pass = False
            compile_successful = False
            continue

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

            base_experiment_input_nml = f90nml.read(
                data_dir_dict[base_commit] + '/run0001/input.nml')
            later_commit_input_nml = f90nml.read(data_dir_dict[later_commit] +
                                                 '/run0001/input.nml')

            if base_experiment_input_nml != later_commit_input_nml:
                raise AttributeError(
                    f'The two experiments to be compared have been run using different input namelists, and so the results may be different because of this. This only happens when you have run the trip tests using one of the commit IDs before, and that you happen to have used a different version of the test cases on that previous occasion. Try removing both {data_dir_dict[base_commit]} and {data_dir_dict[later_commit]} and try again.'
                )

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        if compile_successful:
            #This means that the compiles were both successful, but at least one of the runs crashed.
            print('Test failed for ' + test_case_name +
                  ' because the run crashed.')
        else:
            print('Test failed for ' + test_case_name +
                  ' because at least one of the runs failed to compile.')

        return_test_result = 'fail'

    return return_test_result
experiments = [('qflux_on', {
    'mixed_layer_nml': {
        'do_qflux': True
    },
    'qflux_nml': {
        'qflux_amp': 30.0,
        'qflux_width': 16.0
    }
}), ('qflux_off', {
    'mixed_layer_nml': {
        'do_qflux': False
    }
})]

exps = []
for exp_name, parameters in experiments:
    exp = Experiment(exp_name, codebase=codebase)

    # Get some basic paramters from config.py.  We'll copy the Namelist,
    # DiagTable and input files list so for multiple Experiments in the
    # same file, each has their own version that can be edited independently.
    exp.diag_table = diag_table.copy()
    exp.namelist = namelist.copy()
    exp.inputfiles = inputfiles[::]

    exp.set_resolution('T42', 40)
    exp.update_namelist(parameters)
    exps.append(exp)

run_cli(exps)