コード例 #1
0
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        cb.compile()
        exp = Experiment(exp_name, codebase=cb)
        exp.namelist = nml_use.copy()
        exp.diag_table = diag_use
        exp.inputfiles = input_files_use

        #Only run for 3 days to keep things short.
        exp.update_namelist({'main_nml': {
            'days': 3,
        }})

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        print('Test failed for ' + test_case_name +
              ' because the run crashed.')
        return_test_result = 'fail'

    return return_test_result
コード例 #2
0
        'fileset_write': 'single',                           # default: multi
    }
})


#Lets do a run!
if __name__=="__main__":
    cb.compile()

    NCORES=16
    RESOLUTION = 'T42', 25

    make_symmetric_values_list = [False, True]

    for make_symmetric_value in make_symmetric_values_list:

        exp = Experiment('project_2_held_suarez_make_symmetric_'+str(make_symmetric_value), codebase=cb)
        exp.clear_rundir()

        exp.diag_table = diag
        exp.inputfiles = inputfiles

        exp.namelist = namelist.copy()
        exp.namelist['spectral_dynamics_nml']['make_symmetric'] = make_symmetric_value

        exp.set_resolution(*RESOLUTION)

        exp.run(1, use_restart=False, num_cores=NCORES)
        for i in range(2,121):
            exp.run(i, num_cores=NCORES)
コード例 #3
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('full_continents_land_evap_pref07_fixedSSTs', codebase=cb)

#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/all_continents/land.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'),
    os.path.join(GFDL_BASE, 'input/sst_clim_amip.nc')
]
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
コード例 #4
0
                exp.diag_table = diag
                exp.namelist = namelist.copy()
                exp.namelist['constants_nml'][
                    'grav'] = scale * 1.354  #surface gravity
                exp.namelist['constants_nml'][
                    'pstd'] = scale * 14670000.0  #surface pressure - 100 times bigger than below values
                exp.namelist['constants_nml'][
                    'pstd_mks'] = scale * 146700.0  #in Pa
                exp.namelist['spectral_dynamics_nml'][
                    'reference_sea_level_press'] = scale * 146700.0  #in Pa
                exp.namelist['idealized_moist_phys_nml'][
                    'convection_scheme'] = conv
                exp.namelist['mixed_layer_nml']['depth'] = depth_val
                exp.namelist['astronomy_nml']['per'] = per_value
                exp.set_resolution(*RESOLUTION)

                exp.inputfiles = [
                    os.path.join(GFDL_BASE,
                                 'input/land_masks/titan_land_t21.nc')
                ]  #!!! new line - location of topography file

                #            with exp_progress(exp, description='o%.0f d{day}' % scale):
                exp.run(1, use_restart=False, num_cores=NCORES)
                for i in range(2, 179):
                    #                with exp_progress(exp, description='o%.0f d{day}' % scale):
                    exp.run(i, num_cores=NCORES)
                notify(
                    'top down with conv scheme = ' + conv + ' has completed',
                    'isca')
コード例 #5
0
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('flat_continents_land_evap_pref1_qflux', codebase=cb)

#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/all_continents/land.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'),
    os.path.join(
        GFDL_BASE,
        'exp/mp586/bucket/input/flat_continents_newbucket/ocean_qflux.nc')
]
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
コード例 #6
0
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics

#### NORTHLAND : change name of experiment when you're running a different set-up e.g. northland_different_albedo #####
exp = Experiment('northland_different_albedo', codebase=cb)

#Add any input files that are necessary for a particular experiment.
##### NORTHLAND: I've uploaded the half_ocean landmask file (land.nc) to input/half_ocean/, make sure to copy it into your Isca/input folder)
exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/half_ocean/land.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc')
]
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)  # surface temperature
diag.add_field('dynamics', 'sphum', time_avg=True)  # specific humidity
diag.add_field('dynamics', 'ucomp', time_avg=True)  # zonal velocity component
diag.add_field('dynamics', 'vcomp',
               time_avg=True)  # meridional velocity component
コード例 #7
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile()  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('aquaplanet_fixedSSTs', codebase=cb)



#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [os.path.join(GFDL_BASE,'input/rrtm_input_files/ozone_1990.nc'),os.path.join(GFDL_BASE,'input/sst_clim_amip_final.nc')]
#os.path.join(GFDL_BASE,'input/aquaplanet/land.nc')
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
diag.add_field('dynamics', 'temp', time_avg=True)
コード例 #8
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('bucket_test_experiment', codebase=cb)

#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [
    os.path.join(base_dir, 'input/land.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc')
]

#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('atmosphere', 'bucket_depth', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
コード例 #9
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('realistic_continents_fixed_sst_test_experiment', codebase=cb)

#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/land_masks/era_land_t42.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'),
    os.path.join(base_dir, 'input/sst_clim_amip.nc'),
    os.path.join(base_dir, 'input/siconc_clim_amip.nc')
]

#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
コード例 #10
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile()  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('aquaplanet_finalIscaAPqflux_zerointegral', codebase=cb)



#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [os.path.join(GFDL_BASE,'input/aquaplanet/land.nc'),os.path.join(GFDL_BASE,'input/rrtm_input_files/ozone_1990.nc'),os.path.join(GFDL_BASE,'input/aquaplanet/isca_qflux/zero_integral/ocean_qflux.nc')]
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days') # to output data in two different time resolutions
# diag.add_file('atmos_6_hourly', 6, 'hours', time_units='hours') 

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True) # don't specify files, then it writes to both
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True) 
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
diag.add_field('dynamics', 'temp', time_avg=True)
コード例 #11
0
# or it can point to a specific git repo and commit id.
# This method should ensure future, independent, reproducibility of results.
# cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1')

# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile()  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('variable_co2_grey_test_experiment', codebase=cb)

exp.inputfiles = [os.path.join(base_dir,'input/co2.nc')]

#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
diag.add_field('dynamics', 'temp', time_avg=True)
コード例 #12
0
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics

exp = Experiment('soc_aquaplanet_amip', codebase=cb)
exp.clear_rundir()

exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'),
    os.path.join(
        GFDL_BASE,
        'exp/test_cases/realistic_continents/input/era-spectral7_T42_64x128.out.nc'
    ),
    os.path.join(GFDL_BASE,
                 'exp/test_cases/realistic_continents/input/sst_clim_amip.nc'),
    os.path.join(
        GFDL_BASE,
        'exp/test_cases/realistic_continents/input/siconc_clim_amip.nc')
]

#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Write out diagnostics need for vertical interpolation post-processing
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('dynamics', 'zsurf')
コード例 #13
0
# cb = DryCodeBase.from_repo(repo='https://github.com/isca/isca', commit='isca1.1')

# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('mima_test_experiment', codebase=cb)

exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc')
]

#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('mixed_layer', 't_surf', time_avg=True)
diag.add_field('dynamics', 'sphum', time_avg=True)
diag.add_field('dynamics', 'ucomp', time_avg=True)
diag.add_field('dynamics', 'vcomp', time_avg=True)
コード例 #14
0
ファイル: trip_test_functions.py プロジェクト: sit23/Isca
def conduct_comparison_on_test_case(base_commit,
                                    later_commit,
                                    test_case_name,
                                    repo_to_use='[email protected]:execlim/Isca',
                                    num_cores_to_use=4):
    """Process here is to checkout each commit in turn, compiles it if necessary, uses the appropriate nml for the test
    case under consideration, and runs the code with the two commits in turn. The output is then compared for all variables
    in the diag file. If there are any differences in the output variables then the test classed as a failure."""

    data_dir_dict = {}
    nml_use, input_files_use = get_nml_diag(test_case_name)
    diag_use = define_simple_diag_table()
    test_pass = True
    run_complete = True
    compile_successful = True

    #Do the run for each of the commits in turn
    for s in [base_commit, later_commit]:
        exp_name = test_case_name + '_trip_test_21_' + s
        if 'socrates' in test_case_name or 'ape_aquaplanet' in test_case_name:
            cb = SocratesCodeBase(repo=repo_to_use, commit=s)
        else:
            cb = IscaCodeBase(repo=repo_to_use, commit=s)
        try:
            cb.compile()
            exp = Experiment(exp_name, codebase=cb)
            exp.namelist = nml_use.copy()
            exp.diag_table = diag_use
            exp.inputfiles = input_files_use

            #Only run for 3 days to keep things short.
            exp.update_namelist({'main_nml': {
                'days': 3,
            }})
        except:
            run_complete = False
            test_pass = False
            compile_successful = False
            continue

        try:
            # run with a progress bar
            with exp_progress(exp, description=s) as pbar:
                exp.run(1, use_restart=False, num_cores=num_cores_to_use)
        except FailedRunError as e:
            #If run fails then test automatically fails
            run_complete = False
            test_pass = False
            continue

        data_dir_dict[s] = exp.datadir
    if run_complete:
        #For each of the diag files defined, compare the output
        for diag_file_entry in diag_use.files.keys():
            base_commit_dataset = xar.open_dataset(
                data_dir_dict[base_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)
            later_commit_dataset = xar.open_dataset(
                data_dir_dict[later_commit] + '/run0001/' + diag_file_entry +
                '.nc',
                decode_times=False)

            diff = later_commit_dataset - base_commit_dataset

            #Check each of the output variables for differences
            for var in diff.data_vars.keys():
                maxval = np.abs(diff[var]).max()
                if maxval != 0.:
                    print('Test failed for ' + var + ' max diff value = ' +
                          str(maxval.values))
                    test_pass = False

            base_experiment_input_nml = f90nml.read(
                data_dir_dict[base_commit] + '/run0001/input.nml')
            later_commit_input_nml = f90nml.read(data_dir_dict[later_commit] +
                                                 '/run0001/input.nml')

            if base_experiment_input_nml != later_commit_input_nml:
                raise AttributeError(
                    f'The two experiments to be compared have been run using different input namelists, and so the results may be different because of this. This only happens when you have run the trip tests using one of the commit IDs before, and that you happen to have used a different version of the test cases on that previous occasion. Try removing both {data_dir_dict[base_commit]} and {data_dir_dict[later_commit]} and try again.'
                )

        if test_pass:
            print('Test passed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives the same answer as commit ' +
                  base_commit)
            return_test_result = 'pass'
        else:
            print('Test failed for ' + test_case_name + '. Commit ' +
                  later_commit + ' gives a different answer to commit ' +
                  base_commit)
            return_test_result = 'fail'

    else:
        if compile_successful:
            #This means that the compiles were both successful, but at least one of the runs crashed.
            print('Test failed for ' + test_case_name +
                  ' because the run crashed.')
        else:
            print('Test failed for ' + test_case_name +
                  ' because at least one of the runs failed to compile.')

        return_test_result = 'fail'

    return return_test_result
コード例 #15
0
experiments = [('qflux_on', {
    'mixed_layer_nml': {
        'do_qflux': True
    },
    'qflux_nml': {
        'qflux_amp': 30.0,
        'qflux_width': 16.0
    }
}), ('qflux_off', {
    'mixed_layer_nml': {
        'do_qflux': False
    }
})]

exps = []
for exp_name, parameters in experiments:
    exp = Experiment(exp_name, codebase=codebase)

    # Get some basic paramters from config.py.  We'll copy the Namelist,
    # DiagTable and input files list so for multiple Experiments in the
    # same file, each has their own version that can be edited independently.
    exp.diag_table = diag_table.copy()
    exp.namelist = namelist.copy()
    exp.inputfiles = inputfiles[::]

    exp.set_resolution('T42', 40)
    exp.update_namelist(parameters)
    exps.append(exp)

run_cli(exps)
コード例 #16
0
# compilation depends on computer specific settings.  The $GFDL_ENV
# environment variable is used to determine which `$GFDL_BASE/src/extra/env` file
# is used to load the correct compilers.  The env file is always loaded from
# $GFDL_BASE and not the checked out git repo.

cb.compile(
)  # compile the source code to working directory $GFDL_WORK/codebase

# create an Experiment object to handle the configuration of model parameters
# and output diagnostics
exp = Experiment('sqland_newbucket_finalAPqflux', codebase=cb)

#Add any input files that are necessary for a particular experiment.
exp.inputfiles = [
    os.path.join(GFDL_BASE, 'input/squareland/land.nc'),
    os.path.join(GFDL_BASE, 'input/rrtm_input_files/ozone_1990.nc'),
    os.path.join(GFDL_BASE, 'input/aquaplanet/ocean_qflux.nc')
]
#Tell model how to write diagnostics
diag = DiagTable()
diag.add_file('atmos_monthly', 30, 'days', time_units='days')

#Tell model which diagnostics to write
diag.add_field('dynamics', 'ps', time_avg=True)
diag.add_field('dynamics', 'bk')
diag.add_field('dynamics', 'pk')
diag.add_field('atmosphere', 'precipitation', time_avg=True)
diag.add_field('atmosphere', 'bucket_depth', time_avg=True)
diag.add_field('atmosphere', 'bucket_depth_cond', time_avg=True)
diag.add_field('atmosphere', 'bucket_depth_conv', time_avg=True)
diag.add_field('atmosphere', 'bucket_depth_lh', time_avg=True)