Exemplo n.º 1
0
    def generate_all_baselines(self):
        ###############################################################################
        git_head_commit = get_current_commit()
        git_head_ref = get_current_head()
        git_baseline_commit = get_current_commit(commit=self._baseline_ref)

        print("Generating baselines for ref {}".format(self._baseline_ref))

        checkout_git_ref(git_ref=self._baseline_ref, verbose=True)

        success = True
        num_workers = len(self._tests) if self._parallel else 1
        with threading3.ProcessPoolExecutor(
                max_workers=num_workers) as executor:

            future_to_test = {
                executor.submit(self.generate_baselines, test): test
                for test in self._tests
            }

            for future in threading3.as_completed(future_to_test):
                test = future_to_test[future]
                success &= future.result()

                if not success and self._fast_fail:
                    print('Generation of baselines for build {} failed'.format(
                        self._test_full_names[test]))
                    return False

        checkout_git_ref(git_ref=git_head_ref, verbose=True)

        return success
Exemplo n.º 2
0
    def generate_all_baselines(self):
        ###############################################################################
        git_head_ref = get_current_head()

        print(
            "###############################################################################"
        )
        print("Generating baselines for ref {}".format(self._baseline_ref))
        print(
            "###############################################################################"
        )

        # First, create build directories (one per test)
        for test in self._tests:
            test_dir = self.get_baseline_dir(test)

            # Create this test's build dir
            if test_dir.exists():
                shutil.rmtree(str(test_dir))

            test_dir.mkdir(parents=True)

        checkout_git_ref(self._baseline_ref, verbose=True)

        success = True
        num_workers = len(self._tests) if self._parallel else 1
        with threading3.ProcessPoolExecutor(
                max_workers=num_workers) as executor:

            future_to_test = {
                executor.submit(self.generate_baselines, test): test
                for test in self._tests
            }

            for future in threading3.as_completed(future_to_test):
                test = future_to_test[future]
                success &= future.result()

                if not success and self._fast_fail:
                    print('Generation of baselines for build {} failed'.format(
                        self._test_full_names[test]))
                    return False

        if success:
            # Store the sha used for baselines generation
            run_cmd_no_fail("echo '{}' > {}".format(
                get_current_commit(commit=self._baseline_ref),
                self._baseline_sha_file))
            # Store the name of the builds for which we created a baseline
            tmp_string = ""
            for test in self._tests:
                tmp_string += " {}".format(test)
            run_cmd_no_fail("echo '{}' > {}".format(tmp_string,
                                                    self._baseline_names_file))

        checkout_git_ref(git_head_ref, verbose=True)

        return success
Exemplo n.º 3
0
    def test_all_scream(self):
        ###############################################################################
        git_head_commit = get_current_commit()
        git_head = get_current_head()

        print("Testing git ref {} ({})".format(git_head, git_head_commit))

        success = True
        # First, create build directories (one per test)
        for test in self._tests:
            # Get this test's build dir name and cmake args
            full_name = self._test_full_names[test]
            test_dir = "./ctest-build/{}".format(full_name)

            # Create this test's build dir
            if os.path.exists(test_dir):
                shutil.rmtree(test_dir)

            os.makedirs(test_dir)

        if self._baseline_dir == "NONE":
            # Second, generate baselines
            git_baseline_commit = get_current_commit(commit=self._baseline_ref)
            if git_baseline_commit == git_head_commit:
                self._baseline_ref = None
                print("WARNING: baseline commit is same as current HEAD")

            git_baseline_head = "HEAD" if self._baseline_ref is None else self._baseline_ref
            success = self.generate_all_baselines(git_baseline_head, git_head)
            if not success:
                print("Error(s) occurred during baselines generation phase")
                return success

        if self._perform_tests:
            # Finally, run the tests
            success &= self.run_all_tests()
            if not success:
                print("Error(s) occurred during test phase")

        return success
Exemplo n.º 4
0
    def generate_all_baselines(self, git_baseline_head, git_head):
        ###############################################################################
        print("Generating baselines for ref {}".format(git_baseline_head))

        if git_baseline_head != "HEAD":
            expect(
                is_repo_clean(),
                "If baseline commit is not HEAD, then the repo must be clean before running"
            )
            run_cmd_no_fail("git checkout {}".format(git_baseline_head))
            print("  Switched to {} ({})".format(git_baseline_head,
                                                 get_current_commit()))

        cleanup = git_baseline_head != "HEAD"
        success = True
        num_workers = len(self._tests) if self._parallel else 1
        with threading3.ProcessPoolExecutor(
                max_workers=num_workers) as executor:

            future_to_test = {
                executor.submit(self.generate_baselines, test, cleanup): test
                for test in self._tests
            }

            for future in threading3.as_completed(future_to_test):
                test = future_to_test[future]
                success &= future.result()

                if not success and self._fast_fail:
                    print('Generation of baselines for build {} failed'.format(
                        self._test_full_names[test]))
                    return False

        if git_baseline_head != "HEAD":
            run_cmd_no_fail("git checkout {}".format(git_head))
            print("  Switched back to {} ({})".format(git_head,
                                                      get_current_commit()))

        return success
Exemplo n.º 5
0
    def __init__(self, cxx, kokkos=None, submit=False, parallel=False, fast_fail=False, baseline_ref=None,
                 baseline_dir=None, machine=None, no_tests=False, keep_tree=False,
                 custom_cmake_opts=(), custom_env_vars=(), tests=(),
                 integration_test="JENKINS_HOME" in os.environ, root_dir=None, dry_run=False,
                 make_parallel_level=0, ctest_parallel_level=0):
    ###########################################################################

        self._cxx                     = cxx
        self._kokkos                  = kokkos
        self._submit                  = submit
        self._parallel                = parallel
        self._fast_fail               = fast_fail
        self._baseline_ref            = baseline_ref
        self._machine                 = machine
        self._perform_tests           = not no_tests
        self._keep_tree               = keep_tree
        self._baseline_dir            = baseline_dir
        self._custom_cmake_opts       = custom_cmake_opts
        self._custom_env_vars         = custom_env_vars
        self._tests                   = tests
        self._root_dir                = root_dir
        self._integration_test        = integration_test
        self._dry_run                 = dry_run
        self._must_generate_baselines = False
        self._testing_dir             = "ctest-build"

        ############################################
        #  Sanity checks and helper structs setup  #
        ############################################

        expect (not self._baseline_dir or self._testing_dir != self._baseline_dir,
                "Error! For your safety, do NOT use 'ctest-build' to store baselines. Move them to a different directory.")

        expect(not (self._baseline_ref and self._baseline_dir),
               "Makes no sense to specify a baseline generation commit if using pre-existing baselines ")

        self._tests_cmake_args = {"dbg" : [("CMAKE_BUILD_TYPE", "Debug"),
                                           ("EKAT_DEFAULT_BFB", "ON")],
                                  "sp"  : [("CMAKE_BUILD_TYPE", "Debug"),
                                           ("SCREAM_DOUBLE_PRECISION", "False"),
                                           ("EKAT_DEFAULT_BFB", "ON")],
                                  "fpe" : [("CMAKE_BUILD_TYPE", "Debug"),
                                           ("SCREAM_PACK_SIZE", "1"),
                                           ("SCREAM_SMALL_PACK_SIZE", "1"),
                                           ("EKAT_DEFAULT_BFB", "ON")]}

        self._test_full_names = { "dbg" : "full_debug",
                                  "sp"  : "full_sp_debug",
                                  "fpe" : "debug_nopack_fpe"}

        if not self._tests:
            self._tests = ["dbg", "sp", "fpe"]
        else:
            for t in self._tests:
                expect(t in self._test_full_names,
                       "Requested test '{}' is not supported by test-all-scream, please choose from: {}".\
                           format(t, ", ".join(self._test_full_names.keys())))

        # Compute root dir
        if not self._root_dir:
            self._root_dir = pathlib.Path(__file__).resolve().parent.parent
        else:
            self._root_dir = pathlib.Path(self._root_dir).resolve()
            expect(self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'),
                   "Bad root-dir '{}', should be: $scream_repo/components/scream".format(self._root_dir))

        os.chdir(str(self._root_dir)) # needed, or else every git command will need repo=root_dir
        expect(get_current_commit(), "Root dir: {}, does not appear to be a git repo".format(self._root_dir))

        self._original_branch = get_current_branch()
        self._original_commit = get_current_commit()

        if not self._kokkos:
            expect(self._machine, "If no kokkos provided, must provide machine name for internal kokkos build")
        if self._submit:
            expect(self._machine, "If dashboard submit request, must provide machine name")

        print_last_commit(git_ref=self._original_branch)

        ###################################
        #      Compute baseline info      #
        ###################################

        default_baselines_root_dir = pathlib.Path(self._testing_dir,"baselines")
        if self._baseline_dir is None:
            if self._baseline_ref is None:
                # Compute baseline ref
                if self._keep_tree:
                    self._baseline_ref = "HEAD"
                elif self._integration_test:
                    self._baseline_ref = "origin/master"
                    merge_git_ref(git_ref="origin/master",verbose=True)
                else:
                    self._baseline_ref = get_common_ancestor("origin/master")
                    # Prefer a symbolic ref if possible
                    if self._baseline_ref is None or self._baseline_ref == get_current_commit(commit="origin/master"):
                        self._baseline_ref = "origin/master"
            self._must_generate_baselines = True

            self._baseline_dir = pathlib.Path(default_baselines_root_dir).absolute()

        else:
            # We treat the "AUTO" string as a request for automatic baseline dir.
            if self._baseline_dir == "AUTO":
                self._baseline_dir = get_mach_baseline_root_dir(self._machine,default_baselines_root_dir)

            self._baseline_dir = pathlib.Path(self._baseline_dir).absolute()

            # Make sure the baseline root directory exists
            expect(self._baseline_dir.is_dir(), "Baseline_dir {} is not a dir".format(self._baseline_dir))

            if self._integration_test:
                self._baseline_ref = "origin/master"
                merge_git_ref(git_ref=self._baseline_ref,verbose=True)
            else:
                for test in self._tests:
                    test_baseline_dir = self.get_preexisting_baseline(test)
                    expect(test_baseline_dir.is_dir(), "Missing baseline {}".format(test_baseline_dir))

        # Name of the file used to store/check the git sha of the repo used to generate baselines,
        # and name of the file used to store/check the builds for which baselines are available
        # Store it once to avoid typos-like bugs
        self._baseline_sha_file   = pathlib.Path(self._baseline_dir, "baseline_git_sha")
        self._baseline_names_file = pathlib.Path(self._baseline_dir, "baseline_names")

        if self._integration_test:
            master_sha = get_current_commit(commit=self._baseline_ref)
            if not self.baselines_are_present():
                print ("Some baselines were not found. Rebuilding them.")
                self._must_generate_baselines = True
            elif self.baselines_are_expired(expected_baseline_sha=master_sha):
                print ("Baselines expired. Rebuilding them.")
                self._must_generate_baselines = True
            else:
                print ("Baselines found and not expired. Skipping baselines generation.")

        if self._must_generate_baselines:
            print("Using commit {} to generate baselines".format(self._baseline_ref))

        ##################################################
        #   Deduce how many testing resources per test   #
        ##################################################

        if ctest_parallel_level > 0:
            ctest_max_jobs = ctest_parallel_level
            print("Note: honoring requested value for ctest parallel level: {}".format(ctest_max_jobs))
        elif "CTEST_PARALLEL_LEVEL" in os.environ:
            ctest_max_jobs = int(os.environ["CTEST_PARALLEL_LEVEL"])
            print("Note: honoring environment value for ctest parallel level: {}".format(ctest_max_jobs))
        else:
            ctest_max_jobs = get_mach_testing_resources(self._machine)
            print("Note: no value passed for --ctest-parallel-level. Using the default for this machine: {}".format(ctest_max_jobs))

        self._testing_res_count = {"dbg" : ctest_max_jobs,
                                   "sp"  : ctest_max_jobs,
                                   "fpe" : ctest_max_jobs}

        # Deduce how many compilation resources per test
        if make_parallel_level > 0:
            make_max_jobs = make_parallel_level
            print("Note: honoring requested value for make parallel level: {}".format(make_max_jobs))
        else:
            make_max_jobs = get_mach_compilation_resources(self._machine)
            print("Note: no value passed for --make-parallel-level. Using the default for this machine: {}".format(make_max_jobs))

        self._compile_res_count = {"dbg" : make_max_jobs,
                                   "sp"  : make_max_jobs,
                                   "fpe" : make_max_jobs}

        if self._parallel:
            # We need to be aware that other builds may be running too.
            # (Do not oversubscribe the machine)
            make_remainder = make_max_jobs % len(self._tests)
            make_count     = make_max_jobs // len(self._tests)
            ctest_remainder = ctest_max_jobs % len(self._tests)
            ctest_count     = ctest_max_jobs // len(self._tests)

            # In case we have more items in self._tests than cores/gpus (unlikely)
            if make_count == 0:
                make_count = 1
            if ctest_count == 0:
                ctest_count = 1

            for test in self._tests:
                self._compile_res_count[test] = make_count
                if self._tests.index(test)<make_remainder:
                    self._compile_res_count[test] = make_count + 1

                self._testing_res_count[test] = ctest_count
                if self._tests.index(test)<ctest_remainder:
                    self._testing_res_count[test] = ctest_count + 1

                print("test {} can use {} jobs to compile, and {} jobs for testing".format(test,self._compile_res_count[test],self._testing_res_count[test]))

        if self._keep_tree:
            expect(not is_repo_clean(silent=True), "Makes no sense to use --keep-tree when repo is clean")
            expect(not self._integration_test, "Should not be doing keep-tree with integration testing")
            print("WARNING! You have uncommitted changes in your repo.",
                  "         The PASS/FAIL status may depend on these changes",
                  "         so if you want to keep them, don't forget to create a commit.",sep="\n")
            if self._baseline_dir is None:
                # Make sure the baseline ref is HEAD
                expect(self._baseline_ref == "HEAD",
                       "The option --keep-tree is only available when testing against pre-built baselines "
                       "(--baseline-dir) or HEAD (-b HEAD)")
        else:
            expect(is_repo_clean(),
                   "Repo must be clean before running. If testing against HEAD or pre-built baselines, "
                   "you can pass `--keep-tree` to allow non-clean repo.")
Exemplo n.º 6
0
    def __init__(self, cxx_compiler=None, f90_compiler=None, c_compiler=None,
                 submit=False, parallel=False, fast_fail=False,
                 baseline_ref=None, baseline_dir=None, machine=None, no_tests=False, keep_tree=False,
                 custom_cmake_opts=(), custom_env_vars=(), preserve_env=False, tests=(),
                 integration_test="JENKINS_HOME" in os.environ, local=False, root_dir=None, work_dir=None,
                 quick_rerun=False,quick_rerun_failed=False,dry_run=False,
                 make_parallel_level=0, ctest_parallel_level=0):
    ###########################################################################

        self._cxx_compiler            = cxx_compiler
        self._f90_compiler            = f90_compiler
        self._c_compiler              = c_compiler
        self._submit                  = submit
        self._parallel                = parallel
        self._fast_fail               = fast_fail
        self._baseline_ref            = baseline_ref
        self._machine                 = machine
        self._local                   = local
        self._perform_tests           = not no_tests
        self._keep_tree               = keep_tree
        self._baseline_dir            = baseline_dir
        self._custom_cmake_opts       = custom_cmake_opts
        self._custom_env_vars         = custom_env_vars
        self._preserve_env            = preserve_env
        self._tests                   = tests
        self._root_dir                = root_dir
        self._work_dir                = work_dir
        self._integration_test        = integration_test
        self._quick_rerun             = quick_rerun
        self._quick_rerun_failed      = quick_rerun_failed
        self._dry_run                 = dry_run
        self._must_generate_baselines = False

        if self._quick_rerun_failed:
            self._quick_rerun = True

        ############################################
        #  Sanity checks and helper structs setup  #
        ############################################

        # Probe machine if none was specified
        if self._machine is None:
            # We could potentially integrate more with CIME here to do actual
            # nodename probing.
            if "CIME_MACHINE" in os.environ and is_machine_supported(os.environ["CIME_MACHINE"]):
                self._machine = os.environ["CIME_MACHINE"]
            else:
                expect(self._local,
                       "test-all-scream requires either the machine arg (-m $machine) or the -l flag,"
                       "which makes it lookf for machine specs in '~/.cime/scream_mach_specs.py'.")
                self._machine = "local"
        else:
            expect (not self._local, "Specifying a machine while passing '-l,--local' is ambiguous.")

        ##################################################
        #   Deduce how many testing resources per test   #
        ##################################################

        if ctest_parallel_level > 0:
            ctest_max_jobs = ctest_parallel_level
            print("Note: honoring requested value for ctest parallel level: {}".format(ctest_max_jobs))
        elif "CTEST_PARALLEL_LEVEL" in os.environ:
            ctest_max_jobs = int(os.environ["CTEST_PARALLEL_LEVEL"])
            print("Note: honoring environment value for ctest parallel level: {}".format(ctest_max_jobs))
        else:
            ctest_max_jobs = get_mach_testing_resources(self._machine)
            print("Note: no value passed for --ctest-parallel-level. Using the default for this machine: {}".format(ctest_max_jobs))

        # Unless the user claims to know what he/she is doing, we setup the env.
        if not self._preserve_env:
            # Setup the env on this machine
            setup_mach_env(self._machine, ctest_j=ctest_max_jobs)

        # Compute root dir
        if not self._root_dir:
            self._root_dir = pathlib.Path(__file__).resolve().parent.parent
        else:
            self._root_dir = pathlib.Path(self._root_dir).resolve()
            expect(self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'),
                   "Bad root-dir '{}', should be: $scream_repo/components/scream".format(self._root_dir))

        if self._work_dir is not None:
            expect(pathlib.Path(self._work_dir).absolute().is_dir(),
                   "Error! Work directory '{}' does not exist.".format(self._work_dir))
        else:
            self._work_dir = self._root_dir.absolute().joinpath("ctest-build")

        expect (not self._baseline_dir or self._work_dir != self._baseline_dir,
                "Error! For your safety, do NOT use '{}' to store baselines. Move them to a different directory (even a subdirectory of that works).".format(self._work_dir))

        expect(not (self._baseline_ref and self._baseline_dir),
               "Makes no sense to specify a baseline generation commit if using pre-existing baselines ")

        self._tests_cmake_args = {
            "dbg" : [("CMAKE_BUILD_TYPE", "Debug"),
                     ("EKAT_DEFAULT_BFB", "True")],
            "sp"  : [("CMAKE_BUILD_TYPE", "Debug"),
                    ("SCREAM_DOUBLE_PRECISION", "False"),
                     ("EKAT_DEFAULT_BFB", "True")],
            "fpe" : [("CMAKE_BUILD_TYPE", "Debug"),
                     ("SCREAM_PACK_SIZE", "1"),
                     ("SCREAM_SMALL_PACK_SIZE", "1"),
                     ("EKAT_DEFAULT_BFB", "True")],
            "opt" : [("CMAKE_BUILD_TYPE", "Release")],
            "valg" : [("CMAKE_BUILD_TYPE", "Debug"),
                      ("EKAT_ENABLE_VALGRIND", "True")],
        }

        self._test_full_names = OrderedDict([
            ("dbg" , "full_debug"),
            ("sp"  , "full_sp_debug"),
            ("fpe" , "debug_nopack_fpe"),
            ("opt" , "release"),
            ("valg" , "valgrind"),
        ])

        if not self._tests:
            # default to all test types except do not do fpe on CUDA
            self._tests = list(self._test_full_names.keys())
            self._tests.remove("valg") # don't want this on by default
            if is_cuda_machine(self._machine):
                self._tests.remove("fpe")
        else:
            for t in self._tests:
                expect(t in self._test_full_names,
                       "Requested test '{}' is not supported by test-all-scream, please choose from: {}".\
                           format(t, ", ".join(self._test_full_names.keys())))

        os.chdir(str(self._root_dir)) # needed, or else every git command will need repo=root_dir
        expect(get_current_commit(), "Root dir: {}, does not appear to be a git repo".format(self._root_dir))

        self._original_branch = get_current_branch()
        self._original_commit = get_current_commit()

        print_last_commit(git_ref=self._original_branch, dry_run=self._dry_run)

        ############################################
        #    Deduce compilers if needed/possible   #
        ############################################

        if self._cxx_compiler is None:
            self._cxx_compiler = get_mach_cxx_compiler(self._machine)
        if self._f90_compiler is None:
            self._f90_compiler = get_mach_f90_compiler(self._machine)
        if self._c_compiler is None:
            self._c_compiler = get_mach_c_compiler(self._machine)

        if not self._dry_run:
            self._f90_compiler = run_cmd_no_fail("which {}".format(self._f90_compiler))
            self._cxx_compiler = run_cmd_no_fail("which {}".format(self._cxx_compiler))
            self._c_compiler   = run_cmd_no_fail("which {}".format(self._c_compiler))

        ###################################
        #      Compute baseline info      #
        ###################################

        default_baselines_root_dir = pathlib.Path(self._work_dir,"baselines")
        if self._baseline_dir is None:
            if self._baseline_ref is None:
                # Compute baseline ref
                if self._keep_tree:
                    self._baseline_ref = "HEAD"
                elif self._integration_test:
                    # Make sure our copy of origin/master is up-to-date (at least at the time of this script's execution)
                    git_fetch_remote("origin")
                    self._baseline_ref = "origin/master"
                    merge_git_ref(git_ref="origin/master", verbose=True, dry_run=self._dry_run)
                else:
                    self._baseline_ref = get_common_ancestor("origin/master")
                    # Prefer a symbolic ref if possible
                    if self._baseline_ref is None or self._baseline_ref == get_current_commit(commit="origin/master"):
                        self._baseline_ref = "origin/master"
            self._must_generate_baselines = True

            self._baseline_dir = pathlib.Path(default_baselines_root_dir).absolute()

        else:
            # We treat the "AUTO" string as a request for automatic baseline dir.
            if self._baseline_dir == "AUTO":
                self._baseline_dir = get_mach_baseline_root_dir(self._machine)

            self._baseline_dir = pathlib.Path(self._baseline_dir).absolute()

            # Make sure the baseline root directory exists
            expect(self._baseline_dir.is_dir(), "Baseline_dir {} is not a dir".format(self._baseline_dir))

            if self._integration_test:
                self._baseline_ref = "origin/master"
                merge_git_ref(git_ref=self._baseline_ref, verbose=True, dry_run=self._dry_run)
            else:
                for test in self._tests:
                    test_baseline_dir = self.get_preexisting_baseline(test)
                    expect(test_baseline_dir.is_dir(), "Missing baseline {}".format(test_baseline_dir))

        # Name of the file used to store/check the git sha of the repo used to generate baselines,
        # and name of the file used to store/check the builds for which baselines are available
        # Store it once to avoid typos-like bugs
        self._baseline_sha_file   = pathlib.Path(self._baseline_dir, "baseline_git_sha")
        self._baseline_names_file = pathlib.Path(self._baseline_dir, "baseline_names")

        if self._integration_test:
            master_sha = get_current_commit(commit=self._baseline_ref)
            if not self.baselines_are_present():
                print ("Some baselines were not found. Rebuilding them.")
                self._must_generate_baselines = True
            elif self.baselines_are_expired(expected_baseline_sha=master_sha):
                print ("Baselines expired. Rebuilding them.")
                self._must_generate_baselines = True
            else:
                print ("Baselines found and not expired. Skipping baselines generation.")

        if self._must_generate_baselines:
            print("Using commit {} to generate baselines".format(self._baseline_ref))

        self._testing_res_count = {
            "dbg" : ctest_max_jobs,
            "sp"  : ctest_max_jobs,
            "fpe" : ctest_max_jobs,
            "opt" : ctest_max_jobs,
            "valg" : ctest_max_jobs,
        }

        # Deduce how many compilation resources per test
        if make_parallel_level > 0:
            make_max_jobs = make_parallel_level
            print("Note: honoring requested value for make parallel level: {}".format(make_max_jobs))
        else:
            make_max_jobs = get_mach_compilation_resources(self._machine)
            print("Note: no value passed for --make-parallel-level. Using the default for this machine: {}".format(make_max_jobs))

        self._compile_res_count = {
            "dbg" : make_max_jobs,
            "sp"  : make_max_jobs,
            "fpe" : make_max_jobs,
            "opt" : make_max_jobs,
            "valg" : make_max_jobs,
        }

        if self._parallel:
            # We need to be aware that other builds may be running too.
            # (Do not oversubscribe the machine)
            make_remainder = make_max_jobs % len(self._tests)
            make_count     = make_max_jobs // len(self._tests)
            ctest_remainder = ctest_max_jobs % len(self._tests)
            ctest_count     = ctest_max_jobs // len(self._tests)

            # In case we have more items in self._tests than cores/gpus (unlikely)
            if make_count == 0:
                make_count = 1
            if ctest_count == 0:
                ctest_count = 1

            for test in self._tests:
                self._compile_res_count[test] = make_count
                if self._tests.index(test)<make_remainder:
                    self._compile_res_count[test] = make_count + 1

                self._testing_res_count[test] = ctest_count
                if self._tests.index(test)<ctest_remainder:
                    self._testing_res_count[test] = ctest_count + 1

                print("test {} can use {} jobs to compile, and {} jobs for testing".format(test,self._compile_res_count[test],self._testing_res_count[test]))

        if self._keep_tree:
            expect(not self._integration_test, "Should not be doing keep-tree with integration testing")
            print("WARNING! You have uncommitted changes in your repo.",
                  "         The PASS/FAIL status may depend on these changes",
                  "         so if you want to keep them, don't forget to create a commit.",sep="\n")
            if self._baseline_dir is None:
                # Make sure the baseline ref is HEAD
                expect(self._baseline_ref == "HEAD",
                       "The option --keep-tree is only available when testing against pre-built baselines "
                       "(--baseline-dir) or HEAD (-b HEAD)")
        else:
            expect(self._dry_run or is_repo_clean(),
                   "Repo must be clean before running. If testing against HEAD or pre-built baselines, "
                   "you can pass `--keep-tree` to allow non-clean repo.")
Exemplo n.º 7
0
    def __init__(self,
                 cxx,
                 kokkos=None,
                 submit=False,
                 parallel=False,
                 fast_fail=False,
                 baseline_ref=None,
                 baseline_dir=None,
                 machine=None,
                 no_tests=False,
                 keep_tree=False,
                 custom_cmake_opts=(),
                 tests=(),
                 integration_test="JENKINS_HOME" in os.environ):
        ###########################################################################

        self._cxx = cxx
        self._kokkos = kokkos
        self._submit = submit
        self._parallel = parallel
        self._fast_fail = fast_fail
        self._baseline_ref = baseline_ref
        self._machine = machine
        self._perform_tests = not no_tests
        self._keep_tree = keep_tree
        self._baseline_dir = baseline_dir
        self._custom_cmake_opts = custom_cmake_opts
        self._tests = tests
        self._src_dir = os.getcwd()
        self._integration_test = integration_test

        self._tests_cmake_args = {
            "dbg": [("CMAKE_BUILD_TYPE", "Debug")],
            "sp": [("CMAKE_BUILD_TYPE", "Debug"),
                   ("SCREAM_DOUBLE_PRECISION", "False")],
            "fpe": [("CMAKE_BUILD_TYPE", "Debug"), ("SCREAM_PACK_SIZE", "1"),
                    ("SCREAM_SMALL_PACK_SIZE", "1")]
        }

        self._test_full_names = {
            "dbg": "full_debug",
            "sp": "full_sp_debug",
            "fpe": "debug_nopack_fpe"
        }

        if not self._tests:
            self._tests = ["dbg", "sp", "fpe"]
        else:
            for t in self._tests:
                expect(t in self._test_full_names,
                       "Requested test '{}' is not supported by test-all-scream, please choose from: {}".\
                           format(t, ", ".join(self._test_full_names.keys())))

        expect(self._src_dir.endswith("components/scream"),
               "Run from $scream_repo/components/scream")
        if not self._kokkos:
            expect(
                self._machine,
                "If no kokkos provided, must provide machine name for internal kokkos build"
            )
        if self._submit:
            expect(self._machine,
                   "If dashboard submit request, must provide machine name")

        print_last_commit()

        # Compute baseline info
        expect(
            not (self._baseline_ref and self._baseline_dir),
            "Makes no sense to specify a baseline generation commit if using pre-existing baselines "
        )
        if self._baseline_dir is None:
            if self._baseline_ref is None:
                # Compute baseline ref
                if self._keep_tree:
                    self._baseline_ref = "HEAD"
                elif self._integration_test:
                    self._baseline_ref = "origin/master"
                    merge_git_ref(git_ref="origin/master")
                else:
                    self._baseline_ref = get_common_ancestor("origin/master")
                    # Prefer a symbolic ref if possible
                    if self._baseline_ref is None or self._baseline_ref == get_current_commit(
                            commit="origin/master"):
                        self._baseline_ref = "origin/master"

                print("Using baseline commit {}".format(self._baseline_ref))
        else:
            if self._integration_test:
                if get_current_commit() != get_current_commit(
                        commit="origin/master"):
                    merge_git_ref(git_ref="origin/master")

            print(
                "NOTE: baselines for each build type BT must be in '{}/BT/data'. We don't check this, "
                "but there will be errors if the baselines are not found.".
                format(self._baseline_dir))

        # Deduce how many resources per test
        self._proc_count = 4  # default
        proc_set = False
        if "CTEST_PARALLEL_LEVEL" in os.environ:
            try:
                self._proc_count = int(os.environ["CTEST_PARALLEL_LEVEL"])
                proc_set = True
            except ValueError:
                pass

        if not proc_set:
            print("WARNING: env var CTEST_PARALLEL_LEVEL unset, defaulting to {} which probably underutilizes your machine".\
                      format(self._proc_count))

        if self._parallel:
            # We need to be aware that other builds may be running too.
            # (Do not oversubscribe the machine)
            self._proc_count = self._proc_count // len(self._tests)

            # In case we have more tests than cores (unlikely)
            if self._proc_count == 0:
                self._proc_count = 1

        if self._keep_tree:
            expect(not is_repo_clean(),
                   "Makes no sense to use --keep-tree when repo is clean")
            expect(not self._integration_test,
                   "Should not be doing keep-tree with integration testing")
            print(
                "WARNING! You have uncommitted changes in your repo.",
                "         The PASS/FAIL status may depend on these changes",
                "         so if you want to keep them, don't forget to create a commit.",
                sep="\n")
            if self._baseline_dir is None:
                # Make sure the baseline ref is HEAD
                expect(
                    self._baseline_ref == "HEAD",
                    "The option --keep-tree is only available when testing against pre-built baselines "
                    "(--baseline-dir) or HEAD (-b HEAD)")
        else:
            expect(
                is_repo_clean(),
                "Repo must be clean before running. If testing against HEAD or pre-built baselines, "
                "you can pass `--keep-tree` to allow non-clean repo.")