def clone(backup_name, archive_dir): """ Do Cloning """ validate_controller_state() LOG.info("Cloning [{}] at [{}]".format(backup_name, archive_dir)) check_size(archive_dir) isolinux_dir = os.path.join(archive_dir, 'isolinux') clone_archive_dir = os.path.join(isolinux_dir, CLONE_ARCHIVE_DIR) if os.path.exists(isolinux_dir): LOG.info("deleting old iso_dir %s" % isolinux_dir) shutil.rmtree(isolinux_dir, ignore_errors=True) os.makedirs(clone_archive_dir, 0o644) try: backup_restore.backup(backup_name, clone_archive_dir, clone=True) LOG.info("system backup done") update_backup_archive(backup_name + '_system', clone_archive_dir) create_iso(backup_name, archive_dir) except BackupFail as e: raise CloneFail(e.message) except CloneFail as e: raise finally: if not DEBUG: shutil.rmtree(isolinux_dir, ignore_errors=True)
def finalize_install(): """ Complete the installation """ subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases']) console_log("Updating system parameters...") i = 1 system_update = False # Retries if sysinv is not yet ready while i < 10: time.sleep(20) LOG.info("Attempt %d to update system parameters..." % i) try: if sysinv.update_clone_system('Cloned_from_' + clone_name, utils.get_controller_hostname()): system_update = True break except Exception: # Sysinv might not be ready yet pass i += 1 if not system_update: LOG.error("System update failed") raise CloneFail("System update failed") try: output = subprocess.check_output(["finish_install_clone.sh"], stderr=subprocess.STDOUT) LOG.info("finish_install_clone out: {}".format(output)) except Exception: console_log("Failed to cleanup stale OpenStack resources. " "Manually delete the Volumes and Instances.")
def update_db(archive_dir, backup_name): """ Update DB before restore """ path_to_archive = os.path.join(archive_dir, backup_name) LOG.info("Updating system archive [%s] DB." % path_to_archive) tmpdir = tempfile.mkdtemp(dir=archive_dir) try: subprocess.check_call(['gunzip', path_to_archive + '.tgz'], stdout=DEVNULL, stderr=DEVNULL) # Extract only postgres dir to update system uuid subprocess.check_call([ 'tar', '-x', '--directory=' + tmpdir, '-f', path_to_archive + '.tar', 'postgres' ], stdout=DEVNULL, stderr=DEVNULL) update_sysuuid_in_archive(tmpdir) subprocess.check_call([ 'tar', '--update', '--directory=' + tmpdir, '-f', path_to_archive + '.tar', 'postgres' ], stdout=DEVNULL, stderr=DEVNULL) subprocess.check_call(['gzip', path_to_archive + '.tar']) shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') except Exception as e: LOG.error("Update of system archive {} failed {}".format( path_to_archive, str(e))) raise CloneFail("Failed to update system archive") finally: shutil.rmtree(tmpdir, ignore_errors=True)
def remove_from_archive(archive, unwanted): """ Remove a file from the archive. """ try: subprocess.check_call( ["tar", "--delete", "--file=" + archive, unwanted]) except subprocess.CalledProcessError as e: LOG.error("Delete of {} failed: {}".format(unwanted, e.output)) raise CloneFail("Failed to modify backup archive")
def update_oamip_in_archive(tmpdir): """ Update OAM IP in system archive file. """ oam_list = sysinv_api.get_oam_ip() if not oam_list: raise CloneFail("Failed to get OAM IP") for oamfind in [ oam_list.oam_start_ip, oam_list.oam_end_ip, oam_list.oam_subnet, oam_list.oam_floating_ip, oam_list.oam_c0_ip, oam_list.oam_c1_ip ]: if not oamfind: continue ip = netaddr.IPNetwork(oamfind) find_str = "" if ip.version == 4: # if ipv4, use 192.0.x.x as the temporary oam ip find_str = str(ip.ip) ipstr_list = find_str.split('.') ipstr_list[0] = '192' ipstr_list[1] = '0' repl_ipstr = ".".join(ipstr_list) else: # if ipv6, use 2001:db8:x as the temporary oam ip find_str = str(ip.ip) ipstr_list = find_str.split(':') ipstr_list[0] = '2001' ipstr_list[1] = 'db8' repl_ipstr = ":".join(ipstr_list) if repl_ipstr: find_and_replace([ os.path.join(tmpdir, 'etc/hosts'), os.path.join(tmpdir, 'etc/sysconfig/network-scripts'), os.path.join(tmpdir, 'etc/nfv/vim/config.ini'), os.path.join(tmpdir, 'etc/haproxy/haproxy.cfg'), os.path.join(tmpdir, 'etc/heat/heat.conf'), os.path.join(tmpdir, 'etc/keepalived/keepalived.conf'), os.path.join(tmpdir, 'etc/murano/murano.conf'), os.path.join(tmpdir, 'etc/vswitch/vswitch.ini'), os.path.join(tmpdir, 'etc/nova/nova.conf'), os.path.join(tmpdir, 'config/hosts'), os.path.join(tmpdir, 'hieradata'), os.path.join(tmpdir, 'postgres/keystone.sql.data'), os.path.join(tmpdir, 'postgres/sysinv.sql.data') ], find_str, repl_ipstr) else: LOG.error("Failed to modify OAM IP:[{}]".format(oamfind)) raise CloneFail("Failed to modify OAM IP")
def get_disk_size(disk): """ Get the disk size """ disk_size = "" try: disk_size = subprocess.check_output( ['lsblk', '--nodeps', '--output', 'SIZE', '--noheadings', '--bytes', disk]) except Exception as e: LOG.exception(e) LOG.error("Failed to get disk size [{}]".format(disk)) raise CloneFail("Failed to get disk size") return disk_size.strip()
def validate_controller_state(): """ Cloning allowed now? """ # Check if this Controller is enabled and provisioned try: if not sysinv_api.controller_enabled_provisioned( utils.get_controller_hostname()): raise CloneFail("Controller is not enabled/provisioned") if (tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX or tsconfig.system_mode == si_const.SYSTEM_MODE_DUPLEX_DIRECT): if not sysinv_api.controller_enabled_provisioned( utils.get_mate_controller_hostname()): raise CloneFail("Mate controller is not enabled/provisioned") except CloneFail: raise except Exception: raise CloneFail("Controller is not enabled/provisioned") if utils.get_system_type() != si_const.TIS_AIO_BUILD: raise CloneFail("Cloning supported only on All-in-one systems") if len(sysinv_api.get_alarms()) > 0: raise CloneFail("There are active alarms on this system!")
def check_size(archive_dir): """ Check if there is enough space to create iso. """ overhead_bytes = 1024**3 # extra GB for staging directory # Size of the cloned iso is directly proportional to the # installed package repository (note that patches are a part of # the system archive size below). # 1G overhead size added (above) will accomodate the temporary # workspace (updating system archive etc) needed to create the iso. feed_dir = os.path.join('/www', 'pages', 'feed', 'rel-' + tsconfig.SW_VERSION) overhead_bytes += backup_restore.backup_std_dir_size(feed_dir) cinder_config = False backend_services = sysinv_api.get_storage_backend_services() for services in backend_services.values(): if (services.find(si_const.SB_SVC_CINDER) != -1): cinder_config = True break clone_size = ( overhead_bytes + backup_restore.backup_etc_size() + backup_restore.backup_config_size(tsconfig.CONFIG_PATH) + backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) + backup_restore.backup_keyring_size(backup_restore.keyring_permdir) + backup_restore.backup_ldap_size() + backup_restore.backup_postgres_size(cinder_config) + backup_restore.backup_ceilometer_size( backup_restore.ceilometer_permdir) + backup_restore.backup_std_dir_size(backup_restore.glance_permdir) + backup_restore.backup_std_dir_size(backup_restore.home_permdir) + backup_restore.backup_std_dir_size(backup_restore.patching_permdir) + backup_restore.backup_std_dir_size( backup_restore.patching_repo_permdir) + backup_restore.backup_std_dir_size(backup_restore.extension_permdir) + backup_restore.backup_std_dir_size(backup_restore.patch_vault_permdir) + backup_restore.backup_cinder_size(backup_restore.cinder_permdir)) archive_dir_free_space = \ utils.filesystem_get_free_space(archive_dir) if clone_size > archive_dir_free_space: print("\nArchive directory (%s) does not have enough free " "space (%s), estimated size to create image is %s." % (archive_dir, utils.print_bytes(archive_dir_free_space), utils.print_bytes(clone_size))) raise CloneFail("Not enough free space.\n")
def update_backup_archive(backup_name, archive_dir): """ Update backup archive file to be included in clone-iso """ path_to_archive = os.path.join(archive_dir, backup_name) tmpdir = tempfile.mkdtemp(dir=archive_dir) try: subprocess.check_call( ['gunzip', path_to_archive + '.tgz'], stdout=DEVNULL, stderr=DEVNULL) # 70-persistent-net.rules with the correct MACs will be # generated on the linux boot on the cloned side. Remove # the stale file from original side. remove_from_archive(path_to_archive + '.tar', 'etc/udev/rules.d/70-persistent-net.rules') # Extract only a subset of directories which have files to be # updated for oam-ip and MAC addresses. After updating the files # these directories are added back to the archive. subprocess.check_call( ['tar', '-x', '--directory=' + tmpdir, '-f', path_to_archive + '.tar', 'etc', 'postgres', 'config', 'hieradata'], stdout=DEVNULL, stderr=DEVNULL) update_oamip_in_archive(tmpdir) update_mac_in_archive(tmpdir) update_disk_serial_id_in_archive(tmpdir) update_sysuuid_in_archive(tmpdir) subprocess.check_call( ['tar', '--update', '--directory=' + tmpdir, '-f', path_to_archive + '.tar', 'etc', 'postgres', 'config', 'hieradata'], stdout=DEVNULL, stderr=DEVNULL) subprocess.check_call(['gzip', path_to_archive + '.tar']) shutil.move(path_to_archive + '.tar.gz', path_to_archive + '.tgz') except Exception as e: LOG.error("Update of backup archive {} failed {}".format( path_to_archive, str(e))) raise CloneFail("Failed to update backup archive") finally: if not DEBUG: shutil.rmtree(tmpdir, ignore_errors=True)
def check_size(archive_dir): """ Check if there is enough space to create iso. """ overhead_bytes = 1024 ** 3 # extra GB for staging directory # Size of the cloned iso is directly proportional to the # installed package repository (note that patches are a part of # the system archive size below). # 1G overhead size added (above) will accomodate the temporary # workspace (updating system archive etc) needed to create the iso. feed_dir = os.path.join('/www', 'pages', 'feed', 'rel-' + tsconfig.SW_VERSION) overhead_bytes += backup_restore.backup_std_dir_size(feed_dir) clone_size = ( overhead_bytes + backup_restore.backup_etc_size() + backup_restore.backup_config_size(tsconfig.CONFIG_PATH) + backup_restore.backup_puppet_data_size(constants.HIERADATA_PERMDIR) + backup_restore.backup_keyring_size(backup_restore.keyring_permdir) + backup_restore.backup_ldap_size() + backup_restore.backup_postgres_size() + backup_restore.backup_std_dir_size(backup_restore.home_permdir) + backup_restore.backup_std_dir_size(backup_restore.patching_permdir) + backup_restore.backup_std_dir_size( backup_restore.patching_repo_permdir) + backup_restore.backup_std_dir_size(backup_restore.extension_permdir) + backup_restore.backup_std_dir_size( backup_restore.patch_vault_permdir) + backup_restore.backup_armada_manifest_size( constants.ARMADA_PERMDIR) + backup_restore.backup_std_dir_size( constants.HELM_CHARTS_PERMDIR) + backup_restore.backup_mariadb_size()) archive_dir_free_space = \ utils.filesystem_get_free_space(archive_dir) if clone_size > archive_dir_free_space: print("\nArchive directory (%s) does not have enough free " "space (%s), estimated size to create image is %s." % (archive_dir, utils.print_bytes(archive_dir_free_space), utils.print_bytes(clone_size))) raise CloneFail("Not enough free space.\n")
def config_worker(): """ Enable worker functionality for AIO system. :return: True if worker-config-complete is executed """ if utils.get_system_type() == si_const.TIS_AIO_BUILD: console_log("Applying worker manifests for {}. " "Node will reboot on completion.".format( utils.get_controller_hostname())) sysinv.do_worker_config_complete(utils.get_controller_hostname()) time.sleep(30) # worker-config-complete has no logs to console. So, wait # for some time before showing the login prompt. for i in range(1, 10): console_log("worker-config in progress..") time.sleep(30) console_log("Timed out on do_worker_config_complete") raise CloneFail("Timed out on do_worker_config_complete") return True else: # worker_config_complete is not needed. return False
def create_iso(iso_name, archive_dir): """ Create iso image. This is modelled after the cgcs-root/build-tools/build-iso tool. """ try: controller_0 = sysinv_api.get_host_data('controller-0') except Exception as e: e_log = "Failed to retrieve controller-0 inventory details." LOG.exception(e_log) raise CloneFail(e_log) iso_dir = os.path.join(archive_dir, 'isolinux') clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR) output = None tmpdir = None total_steps = 6 step = 1 print("\nCreating ISO:") # Add the correct kick-start file to the image ks_file = "controller_ks.cfg" if utils.get_system_type() == si_const.TIS_AIO_BUILD: if si_const.LOWLATENCY in tsconfig.subfunctions: ks_file = "smallsystem_lowlatency_ks.cfg" else: ks_file = "smallsystem_ks.cfg" try: # prepare the iso files images_dir = os.path.join(iso_dir, 'images') os.mkdir(images_dir, 0o644) pxe_dir = os.path.join('/pxeboot', 'rel-' + tsconfig.SW_VERSION) os.symlink(pxe_dir + '/installer-bzImage', iso_dir + '/vmlinuz') os.symlink(pxe_dir + '/installer-initrd', iso_dir + '/initrd.img') utils.progress(total_steps, step, 'preparing files', 'DONE') step += 1 feed_dir = os.path.join('/www', 'pages', 'feed', 'rel-' + tsconfig.SW_VERSION) os.symlink(feed_dir + '/Packages', iso_dir + '/Packages') os.symlink(feed_dir + '/repodata', iso_dir + '/repodata') os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS') shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir) update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0) shutil.copyfile('/usr/share/syslinux/isolinux.bin', iso_dir + '/isolinux.bin') os.symlink('/usr/share/syslinux/vesamenu.c32', iso_dir + '/vesamenu.c32') for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')): shutil.copy(os.path.join(feed_dir, filename), iso_dir) utils.progress(total_steps, step, 'preparing files', 'DONE') step += 1 efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT') os.makedirs(efiboot_dir, 0o644) l_efi_dir = os.path.join('/boot', 'efi', 'EFI') shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir) shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir) shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir) shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir) update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0) shutil.copytree(l_efi_dir + '/centos/fonts', efiboot_dir + '/fonts') # copy EFI boot image and update the grub.cfg file efi_img = images_dir + '/efiboot.img' shutil.copy2(pxe_dir + '/efiboot.img', efi_img) tmpdir = tempfile.mkdtemp(dir=archive_dir) output = subprocess.check_output( ["mount", "-t", "vfat", "-o", "loop", efi_img, tmpdir], stderr=subprocess.STDOUT) # replace the grub.cfg file with the updated file efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg') os.remove(efi_grub_f) shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f) subprocess.call(['umount', tmpdir]) shutil.rmtree(tmpdir, ignore_errors=True) tmpdir = None epoch_time = "%.9f" % time.time() disc_info = [epoch_time, tsconfig.SW_VERSION, "x86_64"] with open(iso_dir + '/.discinfo', 'w') as f: f.write('\n'.join(disc_info)) # copy the latest install_clone executable shutil.copy2('/usr/bin/install_clone', iso_dir) subprocess.check_output("cat /pxeboot/post_clone_iso_ks.cfg >> " + iso_dir + "/" + ks_file, shell=True) utils.progress(total_steps, step, 'preparing files', 'DONE') step += 1 # copy patches iso_patches_dir = os.path.join(iso_dir, 'patches') iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata') iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages') iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata') iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied') iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir, 'committed') os.mkdir(iso_patches_dir, 0o755) os.mkdir(iso_patch_repo_dir, 0o755) os.mkdir(iso_patch_pkgs_dir, 0o755) os.mkdir(iso_patch_metadata_dir, 0o755) os.mkdir(iso_patch_applied_dir, 0o755) os.mkdir(iso_patch_committed_dir, 0o755) repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION patch_applied_dir = '/opt/patching/metadata/applied/' patch_committed_dir = '/opt/patching/metadata/committed/' subprocess.check_call( ['rsync', '-a', repodata, '%s/' % iso_patch_repo_dir]) if os.path.exists(pkgsdir): subprocess.check_call( ['rsync', '-a', pkgsdir, '%s/' % iso_patch_pkgs_dir]) if os.path.exists(patch_applied_dir): subprocess.check_call([ 'rsync', '-a', patch_applied_dir, '%s/' % iso_patch_applied_dir ]) if os.path.exists(patch_committed_dir): subprocess.check_call([ 'rsync', '-a', patch_committed_dir, '%s/' % iso_patch_committed_dir ]) utils.progress(total_steps, step, 'preparing files', 'DONE') step += 1 create_ini_file(clone_archive_dir, iso_name) os.chmod(iso_dir + '/isolinux.bin', 0o664) iso_file = os.path.join(archive_dir, iso_name + ".iso") output = subprocess.check_output([ "nice", "mkisofs", "-o", iso_file, "-R", "-D", "-A", "oe_iso_boot", "-V", "oe_iso_boot", "-f", "-quiet", "-b", "isolinux.bin", "-c", "boot.cat", "-no-emul-boot", "-boot-load-size", "4", "-boot-info-table", "-eltorito-alt-boot", "-e", "images/efiboot.img", "-no-emul-boot", iso_dir ], stderr=subprocess.STDOUT) LOG.info("{} created: [{}]".format(iso_file, output)) utils.progress(total_steps, step, 'iso created', 'DONE') step += 1 output = subprocess.check_output( ["nice", "isohybrid", "--uefi", iso_file], stderr=subprocess.STDOUT) LOG.debug("isohybrid: {}".format(output)) output = subprocess.check_output(["nice", "implantisomd5", iso_file], stderr=subprocess.STDOUT) LOG.debug("implantisomd5: {}".format(output)) utils.progress(total_steps, step, 'checksum implanted', 'DONE') print("Cloned iso image created: {}".format(iso_file)) except Exception as e: LOG.exception(e) e_log = "ISO creation ({}) failed".format(iso_name) if output: e_log += ' [' + output + ']' LOG.error(e_log) raise CloneFail("ISO creation failed.") finally: if tmpdir: subprocess.call(['umount', tmpdir], stderr=DEVNULL) shutil.rmtree(tmpdir, ignore_errors=True)
def update_bootloader_default(bl_file, host): """ Update bootloader files for cloned image """ if not os.path.exists(bl_file): LOG.error("{} does not exist".format(bl_file)) raise CloneFail("{} does not exist".format(os.path.basename(bl_file))) # Tags should be in sync with common-bsp/files/centos.syslinux.cfg # and common-bsp/files/grub.cfg STANDARD_STANDARD = '0' STANDARD_EXTENDED = 'S0' AIO_STANDARD = '2' AIO_EXTENDED = 'S2' AIO_LL_STANDARD = '4' AIO_LL_EXTENDED = 'S4' if "grub.cfg" in bl_file: STANDARD_STANDARD = 'standard>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_STANDARD STANDARD_EXTENDED = 'standard>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_EXTENDED AIO_STANDARD = 'aio>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_STANDARD AIO_EXTENDED = 'aio>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_EXTENDED AIO_LL_STANDARD = 'aio-lowlat>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_STANDARD AIO_LL_EXTENDED = 'aio-lowlat>serial>' + \ si_const.SYSTEM_SECURITY_PROFILE_EXTENDED SUBMENUITEM_TBOOT = 'tboot' SUBMENUITEM_SECUREBOOT = 'secureboot' timeout_line = None default_line = None default_label_num = STANDARD_STANDARD if utils.get_system_type() == si_const.TIS_AIO_BUILD: if si_const.LOWLATENCY in tsconfig.subfunctions: default_label_num = AIO_LL_STANDARD else: default_label_num = AIO_STANDARD if (tsconfig.security_profile == si_const.SYSTEM_SECURITY_PROFILE_EXTENDED ): default_label_num = STANDARD_EXTENDED if utils.get_system_type() == si_const.TIS_AIO_BUILD: if si_const.LOWLATENCY in tsconfig.subfunctions: default_label_num = AIO_LL_EXTENDED else: default_label_num = AIO_EXTENDED if "grub.cfg" in bl_file: if host.tboot is not None: if host.tboot == "true": default_label_num = default_label_num + '>' + \ SUBMENUITEM_TBOOT else: default_label_num = default_label_num + '>' + \ SUBMENUITEM_SECUREBOOT try: with open(bl_file) as f: s = f.read() for line in s.split("\n"): if line.startswith("timeout"): timeout_line = line elif line.startswith("default"): default_line = line if "grub.cfg" in bl_file: replace = "default='{}'\ntimeout=10".format(default_label_num) else: # isolinux format replace = "default {}\ntimeout 10".format(default_label_num) if default_line and timeout_line: s = s.replace(default_line, "") s = s.replace(timeout_line, replace) elif default_line: s = s.replace(default_line, replace) elif timeout_line: s = s.replace(timeout_line, replace) else: s = replace + s s = re.sub(r'boot_device=[^\s]*', 'boot_device=%s' % host.boot_device, s) s = re.sub(r'rootfs_device=[^\s]*', 'rootfs_device=%s' % host.rootfs_device, s) s = re.sub(r'console=[^\s]*', 'console=%s' % host.console, s) with open(bl_file, "w") as f: LOG.info( "rewriting {}: label={} find=[{}][{}] replace=[{}]".format( bl_file, default_label_num, timeout_line, default_line, replace.replace('\n', '<newline>'))) f.write(s) except Exception as e: LOG.error("update_bootloader_default failed: {}".format(e)) raise CloneFail("Failed to update bootloader files")