コード例 #1
0
    def baselines_are_expired (self, expected_baseline_sha):
    ###############################################################################
        # Baselines are expired if either:
        #  2) there is no file in baseline_dir containing the sha of the baselines
        #  3) the baselines sha does not match the one passed to this function

        # Sanity check
        expect(self._baseline_dir is not None, "Error! This routine should only be called when testing against pre-existing baselines.")

        # The file specifying what baselines were built during last baselines generation msut be there
        if not self._baseline_names_file.exists():
            return True

        # It might happen that we generate baselines for all build types, then later on
        # for some reason we manually generate baselines for only one build type. The other
        # baselines will still be there, but may be expired. Therefore, we check the
        # baselines_names file, to see what baselines were built last time. If all the
        # baselines we need are there, then we're good
        valid_baselines = run_cmd_no_fail("cat {}".format(self._baseline_names_file.resolve()))
        for test in self._tests:
            if not test in valid_baselines:
                return True

        # No sha file => baselines expired
        if not self._baseline_sha_file.exists():
            return True

        # Different sha => baselines expired
        baseline_sha = run_cmd_no_fail("cat {}".format(self._baseline_sha_file))
        return expected_baseline_sha != baseline_sha
コード例 #2
0
ファイル: test_all_scream.py プロジェクト: JS-WRF-SBM/scream
    def test_all_scream(self):
        ###############################################################################
        success = True
        # First, create build directories (one per test)
        for test in self._tests:
            # Get this test's build dir name and cmake args
            full_name = self._test_full_names[test]
            test_dir = "./ctest-build/{}".format(full_name)

            # Create this test's build dir
            if os.path.exists(test_dir):
                shutil.rmtree(test_dir)

            os.makedirs(test_dir)

        if self._baseline_dir is None:
            # Second, generate baselines
            expect(self._baseline_ref is not None, "Missing baseline ref")

            success = self.generate_all_baselines()
            if not success:
                print("Error(s) occurred during baselines generation phase")
                return success

        if self._perform_tests:
            # Finally, run the tests
            success &= self.run_all_tests()
            if not success:
                print("Error(s) occurred during test phase")

        return success
コード例 #3
0
def git_refs_difference(cmp_ref, head="HEAD", repo=None):
    ###############################################################################
    """
    Return the difference in commits between cmp_ref and head.
    In particular, it returns two numbers: the number of commits
    in cmp_ref that are not in head, and the number of commits in head
    that are not in cmp_ref. The former is how much head is behind cmp_ref,
    while the latter is how much head is ahead of cmp_ref.
    """
    if "SCREAM_FAKE_GIT_HEAD" in os.environ:
        expect(
            "SCREAM_FAKE_AHEAD" in os.environ,
            "git_refs_difference cannot be used with SCREAM_FAKE_GIT_HEAD and without SCREAM_FAKE_AHEAD"
        )
        return 0, 0 if cmp_ref == head else int(
            os.environ["SCREAM_FAKE_AHEAD"])

    cmd = "git rev-list --left-right --count {}...{}".format(cmp_ref, head)
    out = run_cmd_no_fail("{}".format(cmd), from_dir=repo)

    behind_ahead = out.split()
    expect(
        len(behind_ahead) == 2,
        "Error! Something went wrong when running {}".format(cmd))
    behind, ahead = int(behind_ahead[0]), int(behind_ahead[1])

    return behind, ahead
コード例 #4
0
    def test_all_scream(self):
    ###############################################################################

        # Setup the env on this machine
        setup_mach_env(self._machine)

        # Add any override the user may have requested
        for env_var in self._custom_env_vars:
            key,val = env_var.split("=",2)
            os.environ.update( { key : val } )

        success = True
        try:
            # If needed, generate baselines first
            if self._must_generate_baselines:
                expect(self._baseline_ref is not None, "Missing baseline ref")

                success = self.generate_all_baselines()
                if not success:
                    print ("Error(s) occurred during baselines generation phase")
                    return False

            # If requested, run tests
            if self._perform_tests:
                success &= self.run_all_tests()
                if not success:
                    print ("Error(s) occurred during test phase")

        finally:
            if not self._keep_tree:
                # Cleanup the repo if needed
                cleanup_repo(self._original_branch, self._original_commit)

        return success
コード例 #5
0
    def import_variables_no_remap(self,ifile):
    ###########################################################################

        ds_out = self.get_database(self._ofile,'a')
        ds_in  = self.get_database(ifile,'r')
        for item in self._ivars:
            if '=' in item:
                tokens = item.split('=')
                expect(len(tokens)==2,
                       "Error! Import variable with either 'name' or 'name=name_in', where name_in is\n"
                       "       is the var name in the input file, and name is the var name in the output file.")
                name_out = tokens[0]
                name_in  = tokens[1]
            else:
                name_out = item
                name_in  = item

            var = ds_in.variables[name_in]

            # Make sure this var's dims are in our output nc file
            self.check_dims(ds_in,ds_out,var.dimensions)
            self.check_overwrite_var(name_out)
            if name_out not in ds_out.variables.keys():
                self.check_var_name(name_out)
                ds_out.createVariable(name_out,var.dtype,var.dimensions)

            ds_out.variables[name_out][:] = var[:]

        ds_in.close()
        ds_out.sync()
        ds_out.close()
コード例 #6
0
def merge_git_ref(git_ref, repo=None, verbose=False, dry_run=False):
    ###############################################################################
    """
    Merge given git ref into the current branch, and updates submodules
    """

    # Even thoguh it can allow some extra corner cases (dirty repo, but ahead of git_ref),
    # this check is mostly for debugging purposes, as it will inform that no merge occurred
    out = get_common_ancestor(git_ref)
    if out == get_current_commit(commit=git_ref):
        if verbose:
            print(
                "Merge of '{}' not necessary. Current HEAD is already ahead.".
                format(git_ref))
        return

    merge_cmd = "git merge {0} -m 'Automatic merge of {0}'".format(git_ref)
    if dry_run:
        print("Would run: {}".format(merge_cmd))
    else:
        expect(is_repo_clean(repo=repo),
               "Cannot merge ref '{}'. The repo is not clean.".format(git_ref))
        run_cmd_no_fail(merge_cmd, from_dir=repo)
        update_submodules(repo=repo)
        expect(
            is_repo_clean(repo=repo),
            "Something went wrong while performing the merge of '{}'".format(
                git_ref))
        if verbose:
            print("git ref {} successfully merged.".format(git_ref))
            print_last_commit()
コード例 #7
0
ファイル: query_scream.py プロジェクト: E3SM-Project/scream
def query_scream(machine, param):
    ###############################################################################
    assert_machine_supported(machine)
    expect(param in CHOICES, f"Unknown param {param}")

    if param == "cxx_compiler":
        return get_mach_cxx_compiler(machine)
    elif param == "c_compiler":
        return get_mach_c_compiler(machine)
    elif param == "f90_compiler":
        return get_mach_f90_compiler(machine)
    elif param == "batch":
        return get_mach_batch_command(machine)
    elif param == "env":
        return get_mach_env_setup_command(machine)
    elif param == "baseline_root":
        return get_mach_baseline_root_dir(machine)
    elif param == "cuda":
        return str(is_cuda_machine(machine))
    elif param == "comp_j":
        return get_mach_compilation_resources()
    elif param == "test_j":
        return get_mach_testing_resources(machine)
    else:
        expect(False, f"Unhandled param {param}")
コード例 #8
0
ファイル: test_all_scream.py プロジェクト: rountree/scream
    def get_taskset_range(self, test, for_compile=True):
        ###############################################################################
        res_count = self._compile_res_count if for_compile else self._testing_res_count

        if not for_compile and is_cuda_machine(self._machine):
            # For GPUs, the cpu affinity is irrelevant. Just assume all GPUS are open
            affinity_cp = list(range(self._ctest_max_jobs))
        else:
            this_process = psutil.Process()
            affinity_cp = list(this_process.cpu_affinity())

        affinity_cp.sort()

        if self._parallel:
            it = itertools.takewhile(lambda name: name != test, self._tests)
            offset = sum(res_count[prevs] for prevs in it)
        else:
            offset = 0

        expect(
            offset < len(affinity_cp),
            f"Offset {offset} out of bounds (max={len(affinity_cp)}) for test {test}\naffinity_cp: {affinity_cp}"
        )
        start = affinity_cp[offset]
        end = start
        for i in range(1, res_count[test]):
            expect(affinity_cp[offset + i] == start + i,
                   "Could not get contiguous range for test {}".format(test))
            end = affinity_cp[offset + i]

        return start, end
コード例 #9
0
def assert_machine_supported(machine):
    ###############################################################################
    expect(
        is_machine_supported(machine),
        "Machine {} is not currently supported by scream testing system.\n"
        " Note: you can also create a file `~/.cime/scream_mach_specs.py` with your local machine specs."
        .format(machine))
コード例 #10
0
ファイル: populate_nc_file.py プロジェクト: xylar/scream
 def check_overwrite_var(self, var_name):
     ###########################################################################
     ds = self.get_database(self._ofile, 'r')
     expect(
         not var_name in ds.variables.keys() or self._overwrite,
         "Error! Variable '{}' already exists. To overwrite values, use -o flag."
         .format(var_name))
     ds.close()
コード例 #11
0
ファイル: populate_nc_file.py プロジェクト: xylar/scream
 def check_var_name(self, var_name):
     ###########################################################################
     # re.match(r'^\w+$', string) is a very compact regexp check, to ensure
     # the string only contains alphanumeric chars and underscores
     expect(
         re.match(r'^\w+$', var_name),
         "Error! Variable names must contain only alphanumeric characters or underscores.\n"
     )
コード例 #12
0
 def is_vector_layout(self,dims):
 ###########################################################################
     valid = ["time", "ncol", "lev", "ilev"]
     for dim in dims:
         if dim not in valid:
             expect (dim.isdigit(), "Error! Unexpected dimension '{}'".format(dim))
             return True
     return False
コード例 #13
0
    def __init__(self, by, max, per, where=None):
        # XX: Hack, need actual rate limit implementation.
        self._state = defaultdict(int)

        where = where or Literal.List([])
        self.by = expect(Entity, by)
        self.max = expect(Literal.Number, max).unwrap()
        self.per = expect(Interval, per).unwrap()
        self.where = expect(Literal.List.Of(BaseNode), where)
コード例 #14
0
def get_mach_testing_resources(machine):
    ###############################################################################

    expect(
        is_machine_supported(machine),
        "Error! Machine {} is not currently supported by scream testing system."
        .format(machine))

    return MACHINE_METADATA[machine][4]
コード例 #15
0
def get_mach_env_setup_command(machine):
    ###############################################################################

    expect(
        is_machine_supported(machine),
        "Error! Machine {} is not currently supported by scream testing system."
        .format(machine))

    return MACHINE_METADATA[machine][0]
コード例 #16
0
ファイル: test_all_scream.py プロジェクト: rountree/scream
    def baselines_are_expired(self):
        ###############################################################################
        """
        Baselines are expired if either:
          1) there is no file in baseline_dir containing the sha of the baselines
          2) the baselines sha does not match baseline_ref
        """
        baseline_ref_sha = get_current_commit(commit=self._baseline_ref)

        # Sanity check
        expect(
            self._baseline_dir is not None,
            "Error! This routine should only be called when testing against pre-existing baselines."
        )

        for test in self._tests:
            if self._test_uses_baselines[
                    test] and test not in self._tests_needing_baselines:
                # this test is not missing a baseline, but it may be expired.

                baseline_file_sha = self.get_baseline_file_sha(test)
                if baseline_file_sha is None:
                    self._tests_needing_baselines.append(test)
                    print(
                        " -> Test {} has no stored sha so must be considered expired"
                        .format(test))
                else:
                    num_ref_is_behind_file, num_ref_is_ahead_file = git_refs_difference(
                        baseline_file_sha, baseline_ref_sha)

                    # If the copy in our repo is behind, then we need to update the repo
                    expect(
                        num_ref_is_behind_file == 0
                        or not self._integration_test,
                        """Error! Your repo seems stale, since the baseline sha in your repo is behind
the one last used to generated them. We do *not* allow an integration
test to replace baselines with older ones, for security reasons.
If this is a legitimate case where baselines need to be 'rewound',
e.g. b/c of a (hopefully VERY RARE) force push to master, then
remove existing baselines first. Otherwise, please run 'git fetch $remote'.
 - baseline_ref: {}
 - repo baseline sha: {}
 - last used baseline sha: {}""".format(self._baseline_ref, baseline_ref_sha,
                                        baseline_file_sha))

                    # If the copy in our repo is not ahead, then baselines are not expired
                    if num_ref_is_ahead_file > 0:
                        self._tests_needing_baselines.append(test)
                        print(
                            " -> Test {} baselines are expired because they were generated with an earlier commit"
                            .format(test))
                    else:
                        print(
                            " -> Test {} baselines are valid and do not need to be regenerated"
                            .format(test))
コード例 #17
0
 def check_dims(self,ds_in,ds_out,dims):
 ###########################################################################
     for dim in dims:
         expect (dim in ds_out.dimensions,
                 "Error! Dimension {} not found in the output file '{}'.".format(dim,self._ofile))
         expect (ds_in.dimensions[dim].size==ds_out.dimensions[dim].size,
                 "Error! Dimension {} in input file '{}' has a different extent than in output file '{}'.\n"
                 "   - {}: {}\n"
                 "   - {}: {}".format(dim,self._ifile,self._ofile,
                                      self._ifile,ds_in.dimensions[dim].size,
                                      self._ofile,ds_out.dimensions[dim].size))
コード例 #18
0
    def split_braket_list(self,string):
    ###########################################################################
        # Parse a string of the form "[a0,...,aN]", and return the list 'a0,...,aN'
        import re

        valid_re = re.compile(r'[[][a-zA-Z_,]+[]]')
        expect (valid_re.match(string),
                "Error! Braket list should be of the form '[a0,...,aN]'\n"
                "       Input string: {}".format(string))

        return string[1:-1].split(',')
コード例 #19
0
ファイル: perf_analysis.py プロジェクト: rountree/scream
    def perf_analysis(self):
        ###############################################################################
        if self._use_existing:
            expect(
                os.path.exists("CMakeCache.txt"),
                "{} doesn't look like a build directory".format(os.getcwd()))

        else:
            if self._scream_docs:
                expect(
                    os.path.basename(os.getcwd()) == "micro-apps",
                    "Please run from micro-apps directory")

            tmpdir = tempfile.mkdtemp(prefix="build", dir=os.getcwd())
            os.chdir(tmpdir)

            if not self._plot_friendly:
                print("BUILDING")

            self.build()

        results = {}
        while (self._scaling_exp.should_continue()):
            if not self._plot_friendly:
                print()
                print("RUNNING {}".format(" ".join([
                    "{}={}".format(name, val) for name, val in zip(
                        self._argmap.keys(),
                        self._scaling_exp.values(incl_threads=False))
                ])))

            reference = None
            for test, test_cmd in self._tests.items():
                med_time, threads = self.run_test(test_cmd)
                self._scaling_exp.threads = threads

                if self._plot_friendly:
                    results.setdefault(test, []).append(
                        (self._scaling_exp.values()[0], med_time,
                         self._scaling_exp.get_scaling_var()))
                else:
                    self.user_explain(test,
                                      self._scaling_exp.values()[0], med_time,
                                      reference, threads)

                reference = med_time if reference is None else reference

            self._scaling_exp.update_values()

        if self._plot_friendly:
            self._scaling_exp.plot(results)

        return True
コード例 #20
0
def get_mach_baseline_root_dir(machine, default_dir):
    ###############################################################################

    expect(
        is_machine_supported(machine),
        "Error! Machine {} is not currently supported by scream testing system."
        .format(machine))

    if MACHINE_METADATA[machine][5] == "":
        return default_dir
    else:
        return MACHINE_METADATA[machine][5]
コード例 #21
0
ファイル: test_all_scream.py プロジェクト: rountree/scream
    def generate_cmake_config(self, extra_configs, for_ctest=False):
        ###############################################################################

        # Ctest only needs config options, and doesn't need the leading 'cmake '
        result = "{}-C {}".format("" if for_ctest else "cmake ",
                                  self.get_machine_file())

        # Netcdf should be available. But if the user is doing a testing session
        # where all netcdf-related code is disabled, he/she should be able to run
        # even if no netcdf is available
        stat, f_path, _ = run_cmd("nf-config --prefix")
        if stat == 0:
            result += " -DNetCDF_Fortran_PATH={}".format(f_path)
        stat, c_path, _ = run_cmd("nc-config --prefix")
        if stat == 0:
            result += " -DNetCDF_C_PATH={}".format(c_path)

        # Test-specific cmake options
        for key, value in extra_configs:
            result += " -D{}={}".format(key, value)

        # The output coming from all tests at the same time will be a mixed-up mess
        # unless we tell test-launcher to buffer all output
        if self._extra_verbose:
            result += " -DEKAT_TEST_LAUNCHER_BUFFER=True "

        # User-requested config options
        custom_opts_keys = []
        for custom_opt in self._custom_cmake_opts:
            expect(
                "=" in custom_opt,
                "Error! Syntax error in custom cmake options. Should be `VAR_NAME=VALUE`."
            )
            if "=" in custom_opt:
                name, value = custom_opt.split("=", 1)
                # Some effort is needed to ensure quotes are perserved
                result += " -D{}='{}'".format(name, value)
                custom_opts_keys.append(name)

        # Common config options (unless already specified by the user)
        if "CMAKE_CXX_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_CXX_COMPILER={}".format(self._cxx_compiler)
        if "CMAKE_C_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_C_COMPILER={}".format(self._c_compiler)
        if "CMAKE_Fortran_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_Fortran_COMPILER={}".format(self._f90_compiler)

        if "SCREAM_DYNAMICS_DYCORE" not in custom_opts_keys:
            result += " -DSCREAM_DYNAMICS_DYCORE=HOMME"

        return result
コード例 #22
0
    def compute_variables(self):
    ###########################################################################

        for expr in self._cvars:
            # Split the expression, to get the output var name
            tokens = expr.split('=')
            expect(len(tokens)==2,"Error! Compute variables with 'var=expr' syntax.")

            var_name = tokens[0]

            self.check_var_name(var_name)
            self.check_overwrite_var(var_name)

            Nco().ncap2(str(self._ofile),output=str(self._ofile),spt=expr)
コード例 #23
0
    def get_threads(self, output):
        ###############################################################################
        r"""
        >>> output = 'Foo\nARCH: dp 1 avx  FPE 0 nthread 48\nTime = 0.047 seconds.\nbar'
        >>> get_threads(output)
        48
        """
        for line in output.splitlines():
            if "nthread" in line:
                items = line.split()
                threads = int(items[items.index("nthread") + 1])
                return threads

        expect(False, "Failed to find threads in:\n\n{}".format(output))
コード例 #24
0
ファイル: populate_nc_file.py プロジェクト: xylar/scream
    def import_variables(self):
        ###########################################################################

        if len(self._ivars) > 0:
            expect(
                self._ifile.exists(),
                "Error! Import file '{}' does not exist.".format(self._ifile))

            ds_out = self.get_database(self._ofile, 'a')
            ds_in = self.get_database(self._ifile, 'r')

            expect('ncol' in ds_in.dimensions,
                   "Error! 'ncol' not found in input file dimensions'")
            expect('lev' in ds_in.dimensions,
                   "Error! 'lev' not found in input file dimensions'")

            ncol_out = ds_out.dimensions['ncol'].size
            nlev_out = ds_out.dimensions['lev'].size
            ncol_in = ds_in.dimensions['ncol'].size
            nlev_in = ds_in.dimensions['lev'].size

            ds_in.close()
            ds_out.close()

            expect(
                nlev_in == nlev_out,
                "Error! Vertical remapping unavailable, due to ncremap assumption that level idx strides slower than column idx."
            )

            if ncol_in == ncol_out:
                self.import_variables_no_remap(self._ifile)
            else:
                self.import_variables_horiz_remap()
コード例 #25
0
def get_current_branch(repo=None):
    ###############################################################################
    """
    Return the name of the current branch for a repository
    If in detached HEAD state, returns None
    """

    stat, output, err = run_cmd("git rev-parse --abbrev-ref HEAD",
                                from_dir=repo)
    expect(
        stat == 0,
        "Error! The command 'git rev-parse --abbrev-ref HEAD' failed with error: {}"
        .format(err))

    return None if output == "HEAD" else output
コード例 #26
0
    def get_dims(self,name_dims):
    ###########################################################################
        opn = name_dims.find('(')
        cls = name_dims.find(')')

        # Check format
        expect (opn!=-1,"Error! Var declaration should be 'name(dim1,...,dimN)'.")
        expect (cls!=-1,"Error! Var declaration should be 'name(dim1,...,dimN)'.")
        expect (cls>opn,"Error! Var declaration should be 'name(dim1,...,dimN)'.")
        expect (cls==len(name_dims)-1,"Error! Var declaration should be 'name(dim1,...,dimN)'.")

        dims = name_dims[opn+1:cls].split(',')
        expect (len(dims)>0,"Error! Var declaration should be 'name(dim1,...,dimN)'.")

        return dims
コード例 #27
0
    def baselines_are_present(self):
        ###############################################################################
        """
        Check that all baselines are present (one subdir for all values of self._tests)
        """
        # Sanity check
        expect(self._baseline_dir is not None,
               "Error! Baseline directory not correctly set.")

        for test in self._tests:
            data_dir = self.get_preexisting_baseline(test)
            if not data_dir.is_dir():
                self._tests_needing_baselines.append(test)
                print(" -> Test {} is missing baselines".format(test))
            else:
                print(" -> Test {} appears to have baselines".format(test))
コード例 #28
0
    def baselines_are_present (self):
    ###############################################################################
        # Check that all baselines are present (one subdir for all values of self._tests)

        # Sanity check
        expect(self._baseline_dir is not None, "Error! This routine should only be called when testing against pre-existing baselines.")

        # Even if a single baseline is missing, we consider all the baselines not present
        for test in self._tests:
            test_baseline_dir = pathlib.Path(self._baseline_dir, self._test_full_names[test], "data")
            if not test_baseline_dir.is_dir():
                return False

        # Note: inside this script we don't know what kind of file should be in the baseline dirs.
        #       If the actual files are missing, some other part of the testing will crash.
        return True
コード例 #29
0
    def get_scalar_dims(self,dims):
    ###########################################################################
        valid = ["ncol", "lev", "ilev"]
        s_dims = []
        vec_dim_id = -1 
        for i in range(0,len(dims)):
            if dims[i] in valid:
                s_dims.append(dims[i])
            else:
                expect (vec_dim_id==-1,
                        "Error! Multiple integer extents found in dims specification '{}'.\n"
                        "       Only vectors are supported, for non-scalar layouts.".format(dims))
                vec_dim_id = i

        expect(vec_dim_id>0, "Error! Something went wrong while detecting vector dim id from '{}'.".format(dims))

        return vec_dim_id, s_dims
コード例 #30
0
ファイル: populate_nc_file.py プロジェクト: xylar/scream
    def add_variables(self):
        ###########################################################################

        # Sync to file and close
        ds = self.get_database(self._ofile, 'a')
        for item in self._avars:
            if '=' in item:
                # User provided initial values
                name_dims_vals = item.split('=')
                expect(
                    len(name_dims_vals) == 2,
                    "Error! Invalid variable declaration: {}".format(item))
                name_dims = name_dims_vals[0]
                vals_str = name_dims_vals[1]
            else:
                name_dims = item
                vals_str = ""

            # From the string name(dim1,...,dimN) extract name and dimensions
            name = self.get_name(name_dims)
            dims = self.get_dims(name_dims)

            is_vector = self.is_vector_layout(dims)

            if is_vector:
                # From the list (dim1,...,dimN), check if it is a vector field,
                # and if so, get the idx of the vector dimension, the extent
                # along that dimension, and the dims list without the vector dim.
                vec_dim_id, scalar_dims = self.get_scalar_dims(dims)

                vec_dim = 1 if vec_dim_id == -1 else int(dims[vec_dim_id])

                # From the string after the = (if any), get the initialization
                # values. The string can be a single value (for scalar or vector
                # fields) or a list of values [v1,...,vn] (for vector field)
                values = self.get_values(vals_str, vec_dim)

                for i in range(0, len(values)):
                    self.add_variable(ds, "{}_{}".format(name, i), scalar_dims,
                                      values[i])
            else:
                value = 0.0 if vals_str == "" else float(vals_str)
                self.add_variable(ds, name, dims, value)

        ds.sync()
        ds.close()