def build_nodes(cluster_cfg): config_name = "{}_{}".format(conf.distro, cluster_cfg) if conf.wbatch: wbatch.wbatch_begin_node(config_name) autostart.autostart_reset() autostart.autostart_from_config("scripts." + config_name) if conf.wbatch: wbatch.wbatch_end_file()
def build_nodes(cluster_cfg): logger.info('%s(): caller: %s()', log_utils.get_fname(1), log_utils.get_fname(2)) config_name = "{}_{}".format(conf.distro, cluster_cfg) if conf.wbatch: wbatch.wbatch_begin_node(config_name) autostart.autostart_reset() autostart.autostart_from_config("scripts." + config_name) if conf.wbatch: wbatch.wbatch_end_file()
def vm_install_base(): vm_name = "base" conf.vm[vm_name] = conf.VMconfig(vm_name) base_disk_name = conf.get_base_disk_name() vm.vm_delete(vm_name) if base_disk_exists(): logger.info("Deleting existing basedisk.") base_disk_delete() vm_config = conf.vm[vm_name] if conf.do_build: install_iso = iso_image.find_install_iso() else: install_iso = os.path.join(conf.img_dir, conf.iso_image.name) logger.info("Install ISO:\n\t%s", install_iso) autostart.autostart_reset() autostart.autostart_queue("osbash/base_fixups.sh") autostart.autostart_from_config(conf.base_install_scripts) autostart.autostart_queue("zero_empty.sh", "shutdown.sh") base_disk_size = 10000 vm.disk_create(base_disk_name, base_disk_size) libvirt_connect_uri = "qemu:///system" virt_install_call = [ "sudo", "virt-install", "--connect={}".format(libvirt_connect_uri) ] call_args = virt_install_call call_args.extend(["--name", vm_name]) call_args.extend(["--ram", str(vm_config.vm_mem)]) call_args.extend(["--vcpus", str(1)]) call_args.extend(["--os-type", "linux"]) call_args.extend(["--cdrom", install_iso]) call_args.extend([ "--disk", "vol={}/{},cache=none".format(vm.kvm_vol_pool, base_disk_name) ]) if conf.vm_ui == "headless": call_args.extend(("--graphics", "none", "--noautoconsole")) elif conf.vm_ui == "vnc": call_args.extend(("--graphics", "vnc,listen=127.0.0.1")) # Default (no extra argument) is gui option: should open a console viewer call_args.append("--wait=-1") import subprocess errout = subprocess.STDOUT logger.debug("virt-install call: %s", ' '.join(call_args)) logger.debug("virt-install call: %s", call_args) vm.virsh_log(call_args) subprocess.Popen(call_args, stderr=errout) while True: if vm.vm_is_running(vm_name): break print('.', end='') time.sleep(1) delay = 5 logger.info("\nWaiting %d seconds for VM %s to come up.", delay, vm_name) cs.conditional_sleep(delay) logger.info("Booting into distribution installer.") distro_boot.distro_start_installer(vm_config) # Prevent "time stamp from the future" due to race between two sudos for # virt-install (background) above and virsh below time.sleep(1) logger.info("Waiting for VM %s to be defined.", vm_name) while True: if vm.vm_is_running(vm_name): break time.sleep(1) print(".") ssh_ip = vm.node_to_ip(vm_name) conf.vm[vm_name].ssh_ip = ssh_ip logger.info("Waiting for ping returning from %s.", ssh_ip) hf.wait_for_ping(ssh_ip) autostart.autostart_and_wait(vm_name) vm.vm_wait_for_shutdown(vm_name) logger.info("Compacting %s.", base_disk_name) vm.disk_compress(base_disk_name) vm.virsh("undefine", vm_name) del conf.vm[vm_name] logger.info("Base disk created.") logger.info("stacktrain base disk build ends.")
def main(): abort_if_root_user() configure_logging() logger = logging.getLogger(__name__) logger.debug("Call args: %s", sys.argv) logger.debug(report.get_git_info()) args = parse_args() set_conf_vars(args) import stacktrain.core.autostart as autostart import stacktrain.core.node_builder as node_builder import stacktrain.core.functions_host as host logger.info('%s(): caller: %s()', log_utils.get_fname(1), log_utils.get_fname(2)) log_utils.log_message("this is a test") log_utils.log_entry() #sys.exit(1) # W0612: variable defined but not used # pylint_: disable=W0612 # Only for the benefit of sfood # import stacktrain.virtualbox.install_base logger.debug("importing stacktrain.%s.install_base", conf.provider) install_base = importlib.import_module("stacktrain.%s.install_base" % conf.provider) logger.debug("importing stacktrain.%s.vm_create", conf.provider) vm = importlib.import_module("stacktrain.%s.vm_create" % conf.provider) vm.init() logger.info("stacktrain start at %s", time.strftime("%c")) # OS X sets LC_CTYPE to UTF-8 which results in errors when exported to # (remote) environments if "LC_CTYPE" in os.environ: logger.debug("Removing LC_CTYPE from environment.") del os.environ["LC_CTYPE"] # To be on the safe side, ensure a sane locale os.environ["LC_ALL"] = "C" logger.debug("Environment %s", os.environ) autostart.autostart_reset() if conf.wbatch: wbatch.wbatch_reset() if conf.do_build and not conf.leave_vms_running: vm.stop_running_cluster_vms() if conf.do_build and install_base.base_disk_exists(): if args.target == "basedisk": print("Basedisk exists: %s" % conf.get_base_disk_name()) print("\tDestroy and recreate? [y/N] ", end='') ans = raw_input().lower() if ans == 'y': logger.info("Deleting existing basedisk.") start_time = time.time() install_base.vm_install_base() logger.info("Basedisk build took %s seconds", hf.fmt_time_diff(start_time)) elif conf.wbatch: logger.info("Windows batch file build only.") tmp_do_build = conf.do_build conf.do_build = False install_base.vm_install_base() conf.do_build = tmp_do_build else: print("Nothing to do.") print("Done, returning now.") return elif conf.wbatch: logger.info("Windows batch file build only.") tmp_do_build = conf.do_build conf.do_build = False install_base.vm_install_base() conf.do_build = tmp_do_build else: start_time = time.time() install_base.vm_install_base() logger.info("Basedisk build took %s seconds", hf.fmt_time_diff(start_time)) if args.target == "basedisk": print("We are done.") return host.create_host_networks() start_time = time.time() node_builder.build_nodes(args.target) logger.info("Cluster build took %s seconds", hf.fmt_time_diff(start_time)) report.print_summary()
def vm_install_base(): vm_name = "base" conf.vm[vm_name] = conf.VMconfig(vm_name) base_disk_name = conf.get_base_disk_name() vm.vm_delete(vm_name) vm.disk_delete(base_disk_name) vm_config = conf.vm[vm_name] if conf.do_build: install_iso = iso_image.find_install_iso() else: install_iso = os.path.join(conf.img_dir, conf.iso_image.name) logger.info("Install ISO:\n\t%s", install_iso) autostart.autostart_reset() autostart.autostart_queue("osbash/base_fixups.sh") autostart.autostart_from_config(conf.base_install_scripts) autostart.autostart_queue("zero_empty.sh", "shutdown.sh") base_disk_size = 10000 vm.disk_create(base_disk_name, base_disk_size) libvirt_connect_uri = "qemu:///system" virt_install_call = ["sudo", "virt-install", "--connect={}".format(libvirt_connect_uri)] vm_base_mem = 512 call_args = virt_install_call call_args.extend(["--name", vm_name]) call_args.extend(["--ram", str(vm_base_mem)]) call_args.extend(["--vcpus", str(1)]) call_args.extend(["--os-type", "linux"]) call_args.extend(["--cdrom", install_iso]) call_args.extend(["--disk", "vol={}/{},cache=none".format(vm.kvm_vol_pool, base_disk_name)]) if conf.vm_ui == "headless": call_args.extend(("--graphics", "none", "--noautoconsole")) elif conf.vm_ui == "vnc": call_args.extend(("--graphics", "vnc,listen=127.0.0.1")) # Default (no extra argument) is gui option: should open a console viewer call_args.append("--wait=-1") import subprocess errout = subprocess.STDOUT logger.debug("virt-install call: %s", ' '.join(call_args)) logger.debug("virt-install call: %s", call_args) vm.virsh_log(call_args) subprocess.Popen(call_args, stderr=errout) while True: if vm.vm_is_running(vm_name): break print('.', end='') time.sleep(1) delay = 5 logger.info("\nWaiting %d seconds for VM %s to come up.", delay, vm_name) cs.conditional_sleep(delay) logger.info("Booting into distribution installer.") distro_boot.distro_start_installer(vm_config) # Prevent "time stamp from the future" due to race between two sudos for # virt-install (background) above and virsh below time.sleep(1) logger.info("Waiting for VM %s to be defined.", vm_name) while True: if vm.vm_is_running(vm_name): break time.sleep(1) print(".") ssh_ip = vm.node_to_ip(vm_name) conf.vm[vm_name].ssh_ip = ssh_ip logger.info("Waiting for ping returning from %s.", ssh_ip) hf.wait_for_ping(ssh_ip) autostart.autostart_and_wait(vm_name) vm.vm_wait_for_shutdown(vm_name) logger.info("Compacting %s.", base_disk_name) vm.disk_compress(base_disk_name) vm.virsh("undefine", vm_name) del conf.vm[vm_name] logger.info("Base disk created.") logger.info("stacktrain base disk build ends.")
def main(): abort_if_root_user() configure_logging() logger = logging.getLogger(__name__) logger.debug("Call args: %s", sys.argv) args = parse_args() set_conf_vars(args) import stacktrain.core.autostart as autostart import stacktrain.core.node_builder as node_builder import stacktrain.core.functions_host as host # W0612: variable defined but not used # pylint_: disable=W0612 # Only for the benefit of sfood # import stacktrain.virtualbox.install_base logger.debug("importing stacktrain.%s.install_base", conf.provider) install_base = importlib.import_module("stacktrain.%s.install_base" % conf.provider) logger.debug("importing stacktrain.%s.vm_create", conf.provider) vm = importlib.import_module("stacktrain.%s.vm_create" % conf.provider) vm.init() logger.info("stacktrain start at %s", time.strftime("%c")) # OS X sets LC_CTYPE to UTF-8 which results in errors when exported to # (remote) environments if "LC_CTYPE" in os.environ: logger.debug("Removing LC_CTYPE from environment.") del os.environ["LC_CTYPE"] # To be on the safe side, ensure a sane locale os.environ["LC_ALL"] = "C" logger.debug("Environment %s", os.environ) autostart.autostart_reset() if conf.wbatch: wbatch.wbatch_reset() if conf.do_build and install_base.base_disk_exists(): if args.target == "basedisk": print("Basedisk exists: %s" % conf.get_base_disk_name()) print("\tDestroy and recreate? [y/N] ", end="") ans = raw_input().lower() if ans == "y": logger.info("Deleting existing basedisk.") start_time = time.time() install_base.vm_install_base() logger.info("Basedisk build took %s seconds", hf.fmt_time_diff(start_time)) elif conf.wbatch: logger.info("Windows batch file build only.") tmp_do_build = conf.do_build conf.do_build = False install_base.vm_install_base() conf.do_build = tmp_do_build else: print("Nothing to do.") print("Done, returning now.") return elif conf.wbatch: logger.info("Windows batch file build only.") tmp_do_build = conf.do_build conf.do_build = False install_base.vm_install_base() conf.do_build = tmp_do_build else: start_time = time.time() install_base.vm_install_base() logger.info("Basedisk build took %s seconds", hf.fmt_time_diff(start_time)) if args.target == "basedisk": print("We are done.") return host.create_host_networks() start_time = time.time() node_builder.build_nodes(args.target) logger.info("Cluster build took %s seconds", hf.fmt_time_diff(start_time)) report.print_summary()
def vm_install_base(): vm_name = "base" conf.vm[vm_name] = conf.VMconfig(vm_name) base_disk_path = cvb.get_base_disk_path() base_build_disk = os.path.join(conf.img_dir, "tmp-disk.vdi") logger.info("Creating\n\t%s.", base_disk_path) if conf.wbatch: wbatch.wbatch_begin_base() wbatch.wbatch_delete_disk(base_build_disk) if conf.do_build: if base_disk_exists(): logger.info("Deleting existing basedisk.") base_disk_delete() try: os.remove(base_build_disk) except OSError as err: if err.errno != errno.ENOENT: raise # File doesn't exist, that's fine. vm_config = conf.vm[vm_name] if conf.do_build: install_iso = iso_image.find_install_iso() else: install_iso = os.path.join(conf.img_dir, conf.iso_image.name) logger.info("Install ISO:\n\t%s", install_iso) vm.vm_create(vm_config) vm.vm_mem(vm_config) vm.vm_attach_dvd(vm_name, install_iso) if conf.wbatch: vm.vm_attach_guestadd_iso(vm_name) vm.create_vdi(base_build_disk, conf.base_disk_size) vm.vm_attach_disk(vm_name, base_build_disk) if conf.wbatch: # Automounted on /media/sf_bootstrap for first boot vm.vm_add_share_automount(vm_name, conf.share_dir, "bootstrap") # Mounted on /conf.share_name after first boot vm.vm_add_share(vm_name, conf.share_dir, conf.share_name) else: vm.vm_port(vm_name, "ssh", conf.vm[vm_name].ssh_port, 22) vm.vbm("modifyvm", vm_name, "--boot1", "dvd") vm.vbm("modifyvm", vm_name, "--boot2", "disk") autostart.autostart_reset() if conf.wbatch: autostart.autostart_queue("osbash/activate_autostart.sh") autostart.autostart_queue("osbash/base_fixups.sh") autostart.autostart_from_config(conf.base_install_scripts) autostart.autostart_queue("zero_empty.sh", "shutdown.sh") logger.info("Booting VM %s.", vm_name) vm.vm_boot(vm_name) # Note: It takes about 5 seconds for the installer in the VM to be ready # on a fairly typical laptop. If we don't wait long enough, the # installation will fail. Ideally, we would have a different method # of making sure the installer is ready. For now, we just have to # try and err on the side of caution. delay = 10 logger.info("Waiting %d seconds for VM %s to come up.", delay, vm_name) cs.conditional_sleep(delay) logger.info("Booting into distribution installer.") distro_boot.distro_start_installer(vm_config) autostart.autostart_and_wait(vm_name) vm.vm_wait_for_shutdown(vm_name) # Detach disk from VM now or it will be deleted by vm_unregister_del vm.vm_detach_disk(vm_name) vm.vm_unregister_del(vm_name) del conf.vm[vm_name] logger.info("Compacting %s.", base_build_disk) vm.vbm("modifyhd", base_build_disk, "--compact") # This disk will be moved to a new name, and this name will be used for # a new disk next time the script runs. vm.disk_unregister(base_build_disk) logger.info("Base disk created.") logger.info("Moving base disk to:\n\t%s", base_disk_path) if conf.do_build: import shutil shutil.move(base_build_disk, base_disk_path) if conf.wbatch: wbatch.wbatch_rename_disk(os.path.basename(base_build_disk), os.path.basename(base_disk_path)) wbatch.wbatch_end_file() logger.info("Base disk build ends.")