parser.add_argument('-w', '--workspace', default=None, help="Workspace name") args = parser.parse_args() manifest_file = os.path.abspath(args.manifest) workspace_name = args.workspace # Create new workspace directory try: new_workspace_path = create_workspace_dir(workspace_name) except Exception as e: exit_with_error(e.message) colorprint.info("\nRetrieving components into workspace at '{}'..." "\n".format(new_workspace_path)) # Copy manifest into workspace manifest_copy_path = os.path.join(new_workspace_path, MANIFEST_COPY_NAME) if not os.path.isfile(manifest_copy_path): copyfile(manifest_file, manifest_copy_path) # Retrieve contents try: current_manifest = Manifest(manifest_copy_path, new_workspace_path) current_manifest.retrieve_components() except Exception as e: exit_with_error(e.message) colorprint.success("Guest workspace successfully set up at " "'{}'.".format(new_workspace_path))
if os.environ.get("CONTROLLER_NAME") is None: colorprint.warning( "WARNING: CONTROLLER_NAME env var hasn't been set. " "If you fail to 'vagrant up' your VM, open " "VirtualBox, check the name of your SCSI " "Controller and provide it in the CONTROLLER_NAME " "env var." ) vagrant_env_vars = { "VOL_SIZE": "{}".format(vol_size_in_mebibytes(vol_size)), "VOL_COUNT": "{}".format(vol_count), } try: run_command("vagrant up", cwd=RUNWAY_DIR, env=vagrant_env_vars) colorprint.success("VM successfully created.") colorprint.info("VM needs to be rebooted before container creation.") run_command("vagrant reload", cwd=RUNWAY_DIR) colorprint.info("Creating container...") create_container_cmd = "./create_container.sh -d {}".format(distro) if container_name is not None: create_container_cmd += " --container-name {}".format(container_name) if provided_workspace_name: create_container_cmd += " --workspace {}".format(workspace_name) run_command(create_container_cmd, cwd=BIN_DIR) except Exception as e: exit_on_error(str(e)) elapsed_time = time.time() - start_time # Log into our brand new container? colorprint.success(
def retrieve_components(self): logfile_path = os.path.abspath( os.path.join(self.workspace_dir, DOWNLOAD_LOG_FILE_NAME)) for section in self.sections: colorprint.info("Getting {}...".format(section), logfile_path) section_options = self.components_options[section] dest_path = self.get_absolute_dest_path_for_section(section) component_exists = os.path.isdir(dest_path) # Run any needed command BEFORE cloning if not component_exists and "pre_cmd" in section_options: run_command( section_options["pre_cmd"], cwd=self.workspace_dir, logfile_path=logfile_path, ) if not section_options["local"]: if not component_exists: self.git_clone_component(section, logfile_path=logfile_path) # Git checkout + pull in case "sha" or "tag" option is present # or if the component directory already existed. if (component_exists or "sha" in section_options or "tag" in section_options): self.git_checkout_and_pull_component( section, dest_path, logfile_path=logfile_path) self.git_submodule_update(dest_path, logfile_path=logfile_path) current_sha = self.get_current_sha(dest_path) if current_sha: colorprint.normal( "Using SHA {} for {}\n".format(current_sha, section), logfile_path, ) else: colorprint.error( "Couldn't get the SHA for the current commit. The SHA is only " "printed for informational purposes, but not being able to " "get it might be a symptom of something bad happening.\n" ) else: if not component_exists: colorprint.warning( "Component '{}' has been marked as " "local, but it doesn't exist. You'll " "most probably want to add it before " "doing anything else.".format(section), logfile_path, ) else: colorprint.normal( "Component '{}' is locally managed.".format(section), logfile_path, ) # Run any needed command AFTER cloning if not component_exists and "post_cmd" in section_options: run_command( section_options["post_cmd"], cwd=self.workspace_dir, logfile_path=logfile_path, ) # Just print a new line to keep components' output separated colorprint.normal("", logfile_path)
# Vagrant up if os.environ.get('CONTROLLER_NAME') is None: colorprint.warning("WARNING: CONTROLLER_NAME env var hasn't been set. " "If you fail to 'vagrant up' your VM, open " "VirtualBox, check the name of your SCSI " "Controller and provide it in the CONTROLLER_NAME " "env var.") vagrant_env_vars = { 'DISTRO': distro, 'VOL_SIZE': "{}".format(vol_size_in_mebibytes(vol_size)), 'VOL_COUNT': "{}".format(vol_count), } try: run_command("vagrant up", cwd=RUNWAY_DIR, env=vagrant_env_vars) colorprint.info("VM and container need to be rebooted after install.") colorprint.info("Stopping container...") run_command("./stop_container.sh", cwd=BIN_DIR) colorprint.info("Restarting VM...") run_command("vagrant reload", cwd=RUNWAY_DIR) colorprint.info("Starting container...") run_command("./start_container.sh", cwd=BIN_DIR) except Exception as e: exit_on_error(e.message) # Log into our brand new container? colorprint.success("Your new container is now up and running! If you want " "to log into it, just run the following command from " "the runway directory:\n\n\t" "bin/bash_on_current_container.sh")