def test_special_build_vars_should_be_checkpointable(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() # This test is basically a rehash of the object checkpointing test, # but this time with the special vars. test_var1_name = 'SE_TEST_VAR_3' test_var1_data = 'MyData1' test_var1_data2 = 'RevisedData1' test_var1_data3 = 'MoreRevisedData1' test_var2_name = 'SE_TEST_VAR_4' test_var2_data = 'MyData2' # Set the first data and make a checkpoint. build_vars.SetValue(test_var1_name, test_var1_data, 'var1 set', overridable=True) chkpt1 = shell_env.checkpoint() # Update previous value and set second data. Then checkpoint. build_vars.SetValue(test_var1_name, test_var1_data2, 'var1 set', overridable=True) build_vars.SetValue(test_var2_name, test_var2_data, 'var2 set', overridable=True) chkpt2 = shell_env.checkpoint() # Restore the first checkpoint and verify values. shell_env.restore_checkpoint(chkpt1) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data) self.assertIs(shell_env.get_build_var(test_var2_name), None) # Make a change to be tested later. build_vars.SetValue(test_var1_name, test_var1_data3, 'var1 set', overridable=True) self.assertEqual( shell_env.get_build_var(test_var1_name), test_var1_data3, 'even after restore, special build vars should always update current' ) # Restore the second checkpoint and verify values. shell_env.restore_checkpoint(chkpt2) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data2) self.assertEqual(shell_env.get_build_var(test_var2_name), test_var2_data) # Restore the first checkpoint again and make sure orignal value still stands. shell_env.restore_checkpoint(chkpt1) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data)
def test_special_build_vars_should_always_update_current(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() test_var1_name = 'SE_TEST_VAR_update_current1' test_var1_data = 'NewData1' test_var1_data2 = 'NewerData1' test_var2_name = 'SE_TEST_VAR_update_current2' test_var2_data = 'NewData2' # Make a change and checkpoint. build_vars.SetValue(test_var1_name, test_var1_data, 'var1 set', overridable=True) shell_env.checkpoint() # Make a couple more changes. build_vars.SetValue(test_var1_name, test_var1_data2, 'var1 set', overridable=True) build_vars.SetValue(test_var2_name, test_var2_data, 'var2 set', overridable=True) # Make sure that the newer changes are valid. self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data2) self.assertEqual(shell_env.get_build_var(test_var2_name), test_var2_data)
def test_get_build_vars_should_update_vars(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() test_var_name = 'SE_TEST_VAR_4' test_var_data = 'NewData1' build_vars.SetValue(test_var_name, test_var_data, 'random set') self.assertEqual(shell_env.get_build_var(test_var_name), test_var_data)
def test_special_build_vars_should_default_non_overrideable(self): shell_env = SE.ShellEnvironment() build_vars = SE.GetBuildVars() test_var_name = 'SE_TEST_VAR_4' test_var_data = 'NewData1' test_var_data2 = 'NewerData1' build_vars.SetValue(test_var_name, test_var_data, 'random set') build_vars.SetValue(test_var_name, test_var_data2, 'another random set') self.assertEqual(shell_env.get_build_var(test_var_name), test_var_data)
def test_set_pypath_elements(self): shell_env = SE.ShellEnvironment() # Test pass 1. testpath_elems = ['MYPATH'] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_elems) self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements") # Test pass 2. testpath_elems = ['/bin/bash', 'new_path', '/root'] testpath_string = os.pathsep.join(testpath_elems) shell_env.set_pypath(testpath_elems) self.assertEqual(os.environ['PYTHONPATH'], testpath_string, "the final string should be correct") for elem in testpath_elems: self.assertIn(elem, shell_env.active_pypath, "the active path should contain all elements") self.assertIn(elem, sys.path, "the sys path should contain all elements")
def __init__(self, descriptor): super().__init__(descriptor) # Check to see whether this URL should be patched. url_creds_var = descriptor.get('url_creds_var', None) if url_creds_var is not None: env = ShellEnvironment.GetEnvironment() url_creds = env.get_shell_var(url_creds_var) if url_creds is not None: # Break things up. source_parts = urlsplit(self.source) # Modify the URL host with the creds. new_parts = (source_parts.scheme, url_creds + '@' + source_parts.netloc, source_parts.path, source_parts.query, source_parts.fragment) # Put things back together. self.source = urlunsplit(new_parts) self.repo_url = self.source self.commit = self.version self._local_repo_root_path = os.path.join(os.path.abspath(self.contents_dir), self.name) self.logger = logging.getLogger("git-dependency") # valid_attributes = ["Path", "Url", "Branch", "Commit", "ReferencePath", "Full"] self._repo_resolver_dep_obj = {"Path": self.name, "Url": self.repo_url, "Commit": self.commit}
def setUp(self): # Grab the singleton and restore the initial checkpoint. shell_env = SE.ShellEnvironment() shell_env.restore_initial_checkpoint() # For testing, purge all checkpoints each time. shell_env.checkpoints = [ shell_env.checkpoints[SE.ShellEnvironment.INITIAL_CHECKPOINT] ]
def test_can_set_os_vars(self): shell_env = SE.ShellEnvironment() # Remove the test var, if it exists. os.environ.pop("SE-TEST-VAR-1", None) # Set a new value and get it directly from the environment. new_value = 'Dummy' shell_env.set_shell_var('SE-TEST-VAR-1', new_value) self.assertEqual(os.environ['SE-TEST-VAR-1'], new_value)
def __init__(self, OverrideConf, AdditionalTemplateConfDir): self.Logger = logging.getLogger("ConfMgmt") self.env = ShellEnvironment.GetBuildVars() if (self.env.GetValue("WORKSPACE") is None) or \ (self.env.GetValue("EDK2_BASE_TOOLS_DIR") is None): raise Exception( "WORKSPACE and EDK2_BASE_TOOLS_DIR must be set prior to running ConfMgmt" ) self.__PopulateConf(OverrideConf, AdditionalTemplateConfDir)
def test_checkpoint_indices_should_be_unique(self): shell_env = SE.ShellEnvironment() shell_env.append_path('/SE/TEST/PATH/1') chkpt1 = shell_env.checkpoint() shell_env.append_path('/SE/TEST/PATH/2') chkpt2 = shell_env.checkpoint() self.assertNotEqual(chkpt1, SE.ShellEnvironment.INITIAL_CHECKPOINT) self.assertNotEqual(chkpt2, SE.ShellEnvironment.INITIAL_CHECKPOINT) self.assertNotEqual(chkpt1, chkpt2)
def test_can_set_and_get_build_vars(self): shell_env = SE.ShellEnvironment() var_name = 'SE-TEST-VAR-3' var_data = 'Dummy3' # Make sure it doesn't exist beforehand. self.assertIs(shell_env.get_build_var(var_name), None, "test var should not exist before creation") shell_env.set_build_var(var_name, var_data) self.assertEqual(shell_env.get_build_var(var_name), var_data, "get var data should match set var data")
def test_url_should_not_be_modified_without_descriptor_field(self): my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR) env = ShellEnvironment.GetEnvironment() # Add the var to the environment. env.set_shell_var('test_creds_var', 'my_stuff') # Initialize the GitDependency object. gdep = GitDependency(my_test_descriptor) # Assert that the URL is identical. self.assertEqual(gdep.source, my_test_descriptor['source'])
def test_set_build_vars_should_default_overrideable(self): shell_env = SE.ShellEnvironment() var_name = 'SE_TEST_VAR_4' var_data = 'NewData1' var_data2 = 'NewerData1' self.assertIs(shell_env.get_build_var(var_name), None, "test var should not exist before creation") shell_env.set_build_var(var_name, var_data) shell_env.set_build_var(var_name, var_data2) self.assertEqual(shell_env.get_build_var(var_name), var_data2)
def test_url_should_be_modified_if_creds_are_indicated_and_supplied(self): my_test_descriptor = copy.copy(TestGitDependencyUrlPatching.TEST_DESCRIPTOR) # Add the indicator for patching. my_test_descriptor['url_creds_var'] = 'test_creds_var' env = ShellEnvironment.GetEnvironment() # Add the var to the environment. env.set_shell_var('test_creds_var', 'my_stuff') # Initialize the GitDependency object. gdep = GitDependency(my_test_descriptor) # Assert that the URL is identical. self.assertEqual(gdep.source, "https://[email protected]/octocat/Hello-World.git")
def test_restore_initial_checkpoint_should_erase_changes(self): shell_env = SE.ShellEnvironment() # Check to make sure the change doesn't exist. test_path_change = '/SE/TEST/PATH/1' self.assertNotIn(test_path_change, shell_env.active_path, "starting condition should not have the test change") # Make the change and verify. shell_env.append_path(test_path_change) self.assertIn(test_path_change, shell_env.active_path) # Restore initial checkpoint and verify change is gone. shell_env.restore_initial_checkpoint() self.assertNotIn(test_path_change, shell_env.active_path, "restoring checkpoint should remove test change")
def test_checkpointed_objects_should_behave_correctly(self): shell_env = SE.ShellEnvironment() # This test is to make sure that pass-by-reference elements don't persist unexpectedly. test_var1_name = 'SE_TEST_VAR_3' test_var1_data = 'MyData1' test_var1_data2 = 'RevisedData1' test_var1_data3 = 'MoreRevisedData1' test_var2_name = 'SE_TEST_VAR_4' test_var2_data = 'MyData2' # Set the first data and make a checkpoint. shell_env.set_build_var(test_var1_name, test_var1_data) chkpt1 = shell_env.checkpoint() # Update previous value and set second data. Then checkpoint. shell_env.set_build_var(test_var1_name, test_var1_data2) shell_env.set_build_var(test_var2_name, test_var2_data) chkpt2 = shell_env.checkpoint() # Restore the first checkpoint and verify values. shell_env.restore_checkpoint(chkpt1) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data) self.assertIs(shell_env.get_build_var(test_var2_name), None) # Make a change to be tested later. shell_env.set_build_var(test_var1_name, test_var1_data3) # Restore the second checkpoint and verify values. shell_env.restore_checkpoint(chkpt2) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data2) self.assertEqual(shell_env.get_build_var(test_var2_name), test_var2_data) # Restore the first checkpoint again and make sure orignal value still stands. shell_env.restore_checkpoint(chkpt1) self.assertEqual(shell_env.get_build_var(test_var1_name), test_var1_data)
def BootstrapEnvironment(workspace, scopes=()): global ENVIRONMENT_BOOTSTRAP_COMPLETE, ENV_STATE if not ENVIRONMENT_BOOTSTRAP_COMPLETE: # # ENVIRONMENT BOOTSTRAP STAGE 1 # Locate and load all environment description files. # build_env = SelfDescribingEnvironment( workspace, scopes).load_workspace() # # ENVIRONMENT BOOTSTRAP STAGE 2 # Parse all of the PATH-related descriptor files to make sure that # any required tools or Python modules are now available. # shell_env = ShellEnvironment.GetEnvironment() build_env.update_simple_paths(shell_env) # # ENVIRONMENT BOOTSTRAP STAGE 3 # Now that the preliminary paths have been loaded, # we can load the modules that had greater dependencies. # build_env.update_extdep_paths(shell_env) # Bind our current execution environment into the shell vars. shell_env.set_shell_var("PYTHON_HOME", os.path.dirname(sys.executable)) # MU_DEPRECATED - Support legacy variable for older releases. shell_env.set_shell_var("PYTHON3", sys.executable) # PYTHON_COMMAND is required to be set for Linux shell_env.set_shell_var("PYTHON_COMMAND", sys.executable) # Debug the environment that was produced. shell_env.log_environment() ENVIRONMENT_BOOTSTRAP_COMPLETE = True ENV_STATE = (build_env, shell_env) # Return the environment as it's configured. return ENV_STATE
def test_restore_initial_checkpoint_should_erase_changes(self): shell_env = SE.ShellEnvironment() # Check to make sure the change doesn't exist. test_path_change = '/SE/TEST/PATH/1' self.assertNotIn(test_path_change, shell_env.active_path, "starting condition should not have the test change") # Make the change and verify. shell_env.append_path(test_path_change) self.assertIn(test_path_change, shell_env.active_path) # Add a shell_var while we're at it. self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), None) shell_env.set_shell_var('i_should_not_exist', 'a_value') self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), 'a_value') # Restore initial checkpoint and verify change is gone. shell_env.restore_initial_checkpoint() self.assertNotIn(test_path_change, shell_env.active_path, "restoring checkpoint should remove test change") self.assertEqual(shell_env.get_shell_var('i_should_not_exist'), None)
def test_insert_append_remove_replace_pypath(self): shell_env = SE.ShellEnvironment() # Start with a known PATH mid_elem = 'MIDDLEPATH' shell_env.set_pypath(mid_elem) self.assertEqual(1, len(shell_env.active_pypath)) self.assertIn(mid_elem, shell_env.active_pypath) # Add an element to the end. end_elem = 'ENDPATH' shell_env.append_pypath(end_elem) # Add an element to the beginning. start_elem = 'STARTPATH' shell_env.insert_pypath(start_elem) # Test for the realities. self.assertEqual(3, len(shell_env.active_pypath)) self.assertEqual(shell_env.active_pypath[0], start_elem) self.assertEqual(shell_env.active_pypath[1], mid_elem) self.assertEqual(shell_env.active_pypath[2], end_elem) for elem in (start_elem, mid_elem, end_elem): self.assertIn(elem, os.environ["PYTHONPATH"]) self.assertIn(elem, sys.path) # Test replacing an element on the pypath new_mid_elem = 'NEWMIDDLEPATH' shell_env.replace_pypath_element(mid_elem, new_mid_elem) self.assertEqual(shell_env.active_pypath[1], new_mid_elem) # Test replacing an element that doesn't exist old_pypath = shell_env.active_pypath shell_env.replace_pypath_element("PATH1", "PATH2") self.assertEqual(old_pypath, shell_env.active_pypath) # Test that removing an element works as expected shell_env.remove_pypath_element(new_mid_elem) self.assertNotIn(new_mid_elem, shell_env.active_pypath)
def test_can_get_os_vars(self): shell_env = SE.ShellEnvironment() new_value = 'Dummy2' shell_env.set_shell_var('SE-TEST-VAR-2', new_value) self.assertEqual(shell_env.get_shell_var('SE-TEST-VAR-2'), new_value)
def setUpClass(cls): env = ShellEnvironment.GetEnvironment() cls.env_checkpoint = env.checkpoint()
def main(): # Parse command line arguments PROJECT_SCOPES = ("project_mu", ) buildArgs = get_mu_config() mu_config_filepath = os.path.abspath(buildArgs.mu_config) if mu_config_filepath is None or not os.path.isfile(mu_config_filepath): raise FileNotFoundError("Invalid path to mu.json file for build: ", mu_config_filepath) # have a build config file with open(mu_config_filepath, 'r') as mu_config_file: mu_config = yaml.safe_load(mu_config_file) WORKSPACE_PATH = os.path.realpath( os.path.join(os.path.dirname(mu_config_filepath), mu_config["RelativeWorkspaceRoot"])) # Setup the logging to the file as well as the console MuLogging.clean_build_logs(WORKSPACE_PATH) buildlog_path = os.path.join(WORKSPACE_PATH, "Build", "BuildLogs") logging.getLogger("").setLevel(logging.NOTSET) filename = "BUILDLOG_MASTER" MuLogging.setup_section_level() MuLogging.setup_txt_logger(buildlog_path, filename) MuLogging.setup_markdown_logger(buildlog_path, filename) MuLogging.setup_console_logging(use_azure_colors=buildArgs.use_azure_color, use_color=buildArgs.color_enabled, logging_level=logging.WARNING) # Get scopes from config file if "Scopes" in mu_config: PROJECT_SCOPES += tuple(mu_config["Scopes"]) omnicache_path = None if "ReferencePath" in mu_config: omnicache_path = mu_config["ReferencePath"] if buildArgs.omnicache_path is not None: omnicache_path = buildArgs.omnicache_path # SET PACKAGE PATH # # Get Package Path from config file pplist = list() if (mu_config["RelativeWorkspaceRoot"] != ""): # this package is not at workspace root. # Add self pplist.append(os.path.dirname(mu_config_filepath)) # Include packages from the config file if "PackagesPath" in mu_config: for a in mu_config["PackagesPath"]: pplist.append(a) # Check Dependencies for Repo if "Dependencies" in mu_config: logging.log(MuLogging.SECTION, "Resolving Git Repos") pplist.extend( RepoResolver.resolve_all(WORKSPACE_PATH, mu_config["Dependencies"], ignore=buildArgs.git_ignore, force=buildArgs.git_force, update_ok=buildArgs.git_update, omnicache_dir=omnicache_path)) # make Edk2Path object to handle all path operations edk2path = Edk2Path(WORKSPACE_PATH, pplist) logging.info("Running ProjectMu Build: {0}".format(mu_config["Name"])) logging.info("WorkSpace: {0}".format(edk2path.WorkspacePath)) logging.info("Package Path: {0}".format(edk2path.PackagePathList)) logging.info("mu_build version: {0}".format( pkg_resources.get_distribution("mu_build").version)) logging.info("mu_python_library version: " + pkg_resources.get_distribution("mu_python_library").version) logging.info("mu_environment version: " + pkg_resources.get_distribution("mu_environment").version) # which package to build packageList = mu_config["Packages"] # # If mu pk list supplied lets see if they are a file system path # If so convert to edk2 relative path # # if (len(buildArgs.pkglist) > 0): packageList = [] # clear it for mu_pk_path in buildArgs.pkglist: # if abs path lets convert if os.path.isabs(mu_pk_path): temp = edk2path.GetEdk2RelativePathFromAbsolutePath(mu_pk_path) if (temp is not None): packageList.append(temp) else: logging.critical( "pkg-dir invalid absolute path: {0}".format(mu_pk_path)) raise FileNotFoundError("Invalid Package Path") else: # Check if relative path temp = os.path.join(os.getcwd(), mu_pk_path) temp = edk2path.GetEdk2RelativePathFromAbsolutePath(temp) if (temp is not None): packageList.append(temp) else: logging.critical( "pkg-dir invalid relative path: {0}".format(mu_pk_path)) raise FileNotFoundError("Invalid Package Path") # Bring up the common minimum environment. logging.log(MuLogging.SECTION, "Bootstrapping Enviroment") (build_env, shell_env) = SelfDescribingEnvironment.BootstrapEnvironment( edk2path.WorkspacePath, PROJECT_SCOPES) CommonBuildEntry.update_process(edk2path.WorkspacePath, PROJECT_SCOPES) env = ShellEnvironment.GetBuildVars() archSupported = " ".join(mu_config["ArchSupported"]) env.SetValue("TARGET_ARCH", archSupported, "Platform Hardcoded") # Generate consumable XML object- junit format JunitReport = MuJunitReport() # Keep track of failures failure_num = 0 total_num = 0 # Load plugins logging.log(MuLogging.SECTION, "Loading plugins") pluginManager = PluginManager.PluginManager() failedPlugins = pluginManager.SetListOfEnvironmentDescriptors( build_env.plugins) if failedPlugins: logging.critical("One or more plugins failed to load. Halting build.") for a in failedPlugins: logging.error("Failed Plugin: {0}".format(a["name"])) raise RuntimeError("One or more plugins failed to load.") helper = PluginManager.HelperFunctions() if (helper.LoadFromPluginManager(pluginManager) > 0): raise RuntimeError("One or more helper plugins failed to load.") pluginList = pluginManager.GetPluginsOfClass(PluginManager.IMuBuildPlugin) # Check to make sure our configuration is valid ConfigValidator.check_mu_confg(mu_config, edk2path, pluginList) for pkgToRunOn in packageList: # # run all loaded MuBuild Plugins/Tests # logging.log(MuLogging.SECTION, "Building {0} Package".format(pkgToRunOn)) logging.info("Running on Package: {0}".format(pkgToRunOn)) ts = JunitReport.create_new_testsuite( pkgToRunOn, "MuBuild.{0}.{1}".format(mu_config["GroupName"], pkgToRunOn)) packagebuildlog_path = os.path.join(buildlog_path, pkgToRunOn) _, txthandle = MuLogging.setup_txt_logger( packagebuildlog_path, "BUILDLOG_{0}".format(pkgToRunOn), logging_level=logging.DEBUG, isVerbose=True) _, mdhandle = MuLogging.setup_markdown_logger( packagebuildlog_path, "BUILDLOG_{0}".format(pkgToRunOn), logging_level=logging.DEBUG, isVerbose=True) loghandle = [txthandle, mdhandle] ShellEnvironment.CheckpointBuildVars() env = ShellEnvironment.GetBuildVars() # load the package level .mu.json pkg_config_file = edk2path.GetAbsolutePathOnThisSytemFromEdk2RelativePath( os.path.join(pkgToRunOn, pkgToRunOn + ".mu.yaml")) if (pkg_config_file): with open(pkg_config_file, 'r') as f: pkg_config = yaml.safe_load(f) else: logging.info("No Pkg Config file for {0}".format(pkgToRunOn)) pkg_config = dict() # check the resulting configuration ConfigValidator.check_package_confg(pkgToRunOn, pkg_config, pluginList) # get all the defines from the package configuration if "Defines" in pkg_config: for definition_key in pkg_config["Defines"]: definition = pkg_config["Defines"][definition_key] env.SetValue(definition_key, definition, "MuBuild.py from PkgConfig yaml", False) for Descriptor in pluginList: # Get our targets targets = ["DEBUG"] if Descriptor.Obj.IsTargetDependent() and "Targets" in mu_config: targets = mu_config["Targets"] for target in targets: MuLogging.log_progress("--Running {2}: {0} {1} --".format( Descriptor.Name, target, pkgToRunOn)) total_num += 1 ShellEnvironment.CheckpointBuildVars() env = ShellEnvironment.GetBuildVars() env.SetValue("TARGET", target, "MuBuild.py before RunBuildPlugin") (testcasename, testclassname) = Descriptor.Obj.GetTestName(pkgToRunOn, env) tc = ts.create_new_testcase(testcasename, testclassname) # create the stream for the build log plugin_output_stream = MuLogging.create_output_stream() # merge the repo level and package level for this specific plugin pkg_plugin_configuration = merge_config( mu_config, pkg_config, Descriptor.descriptor) # perhaps we should ask the validator to run on the # Check if need to skip this particular plugin if "skip" in pkg_plugin_configuration and pkg_plugin_configuration[ "skip"]: tc.SetSkipped() MuLogging.log_progress("--->Test Skipped! %s" % Descriptor.Name) else: try: # - package is the edk2 path to package. This means workspace/packagepath relative. # - edk2path object configured with workspace and packages path # - any additional command line args # - RepoConfig Object (dict) for the build # - PkgConfig Object (dict) # - EnvConfig Object # - Plugin Manager Instance # - Plugin Helper Obj Instance # - testcase Object used for outputing junit results # - output_stream the StringIO output stream from this plugin rc = Descriptor.Obj.RunBuildPlugin( pkgToRunOn, edk2path, sys.argv, mu_config, pkg_plugin_configuration, env, pluginManager, helper, tc, plugin_output_stream) except Exception as exp: exc_type, exc_value, exc_traceback = sys.exc_info() logging.critical("EXCEPTION: {0}".format(exp)) exceptionPrint = traceback.format_exception( type(exp), exp, exc_traceback) logging.critical(" ".join(exceptionPrint)) tc.SetError("Exception: {0}".format(exp), "UNEXPECTED EXCEPTION") rc = 1 if (rc != 0): failure_num += 1 if (rc is None): logging.error( "--->Test Failed: %s returned NoneType" % Descriptor.Name) else: logging.error("--->Test Failed: %s returned %d" % (Descriptor.Name, rc)) else: MuLogging.log_progress( "--->Test Success {0} {1}".format( Descriptor.Name, target)) # revert to the checkpoint we created previously ShellEnvironment.RevertBuildVars() # remove the logger MuLogging.remove_output_stream(plugin_output_stream) # finished target loop # Finished plugin loop MuLogging.stop_logging( loghandle) # stop the logging for this particular buildfile ShellEnvironment.RevertBuildVars() # Finished buildable file loop JunitReport.Output( os.path.join(WORKSPACE_PATH, "Build", "BuildLogs", "TestSuites.xml")) # Print Overall Success if (failure_num != 0): logging.error("Overall Build Status: Error") MuLogging.log_progress( "There were {0} failures out of {1} attempts".format( failure_num, total_num)) else: MuLogging.log_progress("Overall Build Status: Success") sys.exit(failure_num)
def test_shell_should_always_have_an_initial_checkpoint(self): shell_env = SE.ShellEnvironment() self.assertTrue(( len(shell_env.checkpoints) > 0 ), "a new instance of ShellEnvironment should have at least one checkpoint" )
def tearDown(self): env = ShellEnvironment.GetEnvironment() env.restore_checkpoint(TestGitDependencyUrlPatching.env_checkpoint)
def test_shell_should_be_a_singleton(self): shell_a = SE.ShellEnvironment() shell_b = SE.ShellEnvironment() self.assertIs(shell_a, shell_b, "two instances of ShellEnvironment should be identical")