def main(argv): import utils utils.prompt_for_breakpoint_mode() # utils.breakpoint ("Entering BootManager::main") # set to 1 if error occurred error= 0 # all output goes through this class so we can save it and post # the data back to PlanetLab central LOG= log( BM_NODE_LOG ) # NOTE: assume CWD is BM's source directory, but never fail utils.sysexec_noerr("./setup_bash_history_scripts.sh", LOG) LOG.LogEntry( "BootManager started at: %s" % \ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) ) try: forceState = None if len(argv) == 2: fState = argv[1] if BootManager.NodeRunStates.has_key(fState): forceState = fState else: LOG.LogEntry("FATAL: cannot force node run state to=%s" % fState) error = 1 except: traceback.print_exc(file=LOG.OutputFile) traceback.print_exc() if error: LOG.LogEntry( "BootManager finished at: %s" % \ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) ) LOG.Upload() return error try: bm= BootManager(LOG,forceState) if bm.CAN_RUN == 0: LOG.LogEntry( "Unable to initialize BootManager." ) else: LOG.LogEntry( "Running version %s of BootManager." % bm.VARS['VERSION'] ) success= bm.Run() if success: LOG.LogEntry( "\nDone!" ); else: LOG.LogEntry( "\nError occurred!" ); error = 1 except: traceback.print_exc(file=LOG.OutputFile) traceback.print_exc() LOG.LogEntry( "BootManager finished at: %s" % \ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) ) LOG.Upload() return error
def main(argv): import utils utils.prompt_for_breakpoint_mode() # utils.breakpoint("Entering BootManager::main") # set to 1 if error occurred error = 0 # all output goes through this class so we can save it and post # the data back to PlanetLab central LOG = log(BM_NODE_LOG) # NOTE: assume CWD is BM's source directory, but never fail utils.sysexec_noerr("./setup_bash_history_scripts.sh", LOG) LOG.LogEntry("BootManager started at: {}"\ .format(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) try: forceState = None if len(argv) == 2: fState = argv[1] if BootManager.NodeRunStates.has_key(fState): forceState = fState else: LOG.LogEntry("FATAL: cannot force node run state to={}".format(fState)) error = 1 except: traceback.print_exc(file=LOG.OutputFile) traceback.print_exc() if error: LOG.LogEntry("BootManager finished at: {}"\ .format(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) LOG.Upload() return error try: bm = BootManager(LOG, forceState) if bm.CAN_RUN == 0: LOG.LogEntry("Unable to initialize BootManager.") else: LOG.LogEntry("Running version {} of BootManager.".format(bm.VARS['VERSION'])) success = bm.Run() if success: LOG.LogEntry("\nDone!"); else: LOG.LogEntry("\nError occurred!"); error = 1 except: traceback.print_exc(file=LOG.OutputFile) traceback.print_exc() LOG.LogEntry("BootManager finished at: {}"\ .format(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))) LOG.Upload() return error
def run_ansible(ansible_path, ansible_hash, playbook_name, log): try: if (ansible_hash): hash_arg = '-U {}'.format(ansible_hash) else: hash_arg = '' utils.sysexec_noerr('ansible-pull -i hosts {} {} {}'.format(ansible_path, hash_arg, playbook_name), log ) except: pass
def bypassRaidIfNeeded(sysimg_path, log): try: [a, b, c, d] = file("%s/etc/redhat-release" % sysimg_path).readlines()[0].strip().split() if a != "CentOS": return [major, minor] = [int(x) for x in c.split(".")] if minor >= 3: utils.sysexec_noerr('echo "DMRAID=no" >> %s/etc/sysconfig/mkinitrd/noraid' % sysimg_path, log) utils.sysexec_noerr("chmod 755 %s/etc/sysconfig/mkinitrd/noraid" % sysimg_path, log) except: pass
def __init__(self, vars, mode): utils.makedirs(self.mntpnt) try: utils.sysexec('mount -t auto -U %s %s' % (vars['DISCONNECTED_OPERATION'], self.mntpnt)) # make sure it's not read-only f = file('%s/api.cache' % self.mntpnt, 'a') f.close() file.__init__(self, '%s/api.cache' % self.mntpnt, mode) except: utils.sysexec_noerr('umount %s' % self.mntpnt) raise BootManagerException, "Couldn't find API-cache for disconnected operation"
def __init__(self, vars, mode): utils.makedirs(self.mntpnt) try: utils.sysexec('mount -t auto -U {} {}'.format(vars['DISCONNECTED_OPERATION'], self.mntpnt)) # make sure it's not read-only f = file('{}/api.cache'.format(self.mntpnt), 'a') f.close() file.__init__(self, '{}/api.cache'.format(self.mntpnt), mode) except: utils.sysexec_noerr('umount {}'.format(self.mntpnt)) raise BootManagerException("Couldn't find API-cache for disconnected operation")
def create_raid_partition(partitions, vars, log): """ create raid array using specified partitions. """ raid_part = None raid_enabled = False node_tags = BootAPI.call_api_function(vars, "GetNodeTags", ({ 'node_id': vars['NODE_ID'] }, )) for node_tag in node_tags: if node_tag['tagname'] == 'raid_enabled' and \ node_tag['value'] == '1': raid_enabled = True break if not raid_enabled: return raid_part try: log.write("Software raid enabled.\n") # wipe everything utils.sysexec_noerr("mdadm --stop /dev/md0", log) time.sleep(1) for part_path in partitions: utils.sysexec_noerr( "mdadm --zero-superblock {} ".format(part_path), log) # assume each partiton is on a separate disk num_parts = len(partitions) if num_parts < 2: log.write( "Not enough disks for raid. Found: {}\n".format(partitions)) raise BootManagerException( "Not enough disks for raid. Found: {}\n".format(partitions)) if num_parts == 2: lvl = 1 else: lvl = 5 # make the array part_list = " ".join(partitions) raid_part = "/dev/md0" cmd = "mdadm --create {raid_part} --chunk=128 --level=raid{lvl} "\ "--raid-devices={num_parts} {part_list}".format(**locals()) utils.sysexec(cmd, log) except BootManagerException as e: log.write("create_raid_partition failed.\n") raid_part = None return raid_part
def bypassRaidIfNeeded(sysimg_path, log): try: a, b, c, d = file('{}/etc/redhat-release'.format(sysimg_path))\ .readlines()[0].strip().split() if a != 'CentOS': return major, minor = [ int(x) for x in c.split('.') ] if minor >= 3: utils.sysexec_noerr('echo "DMRAID=no" >> {}/etc/sysconfig/mkinitrd/noraid' .format(sysimg_path), log) utils.sysexec_noerr('chmod 755 {}/etc/sysconfig/mkinitrd/noraid' .format(sysimg_path), log) except: pass
def Upload(self, extra_file=None): """ upload the contents of the log to the server """ if self.OutputFile is not None: self.OutputFile.flush() self.LogEntry("Uploading logs to {}".format( self.VARS['UPLOAD_LOG_SCRIPT'])) self.OutputFile.close() self.OutputFile = None hostname = self.VARS['INTERFACE_SETTINGS']['hostname'] + "." + \ self.VARS['INTERFACE_SETTINGS']['domainname'] bs_request = BootServerRequest.BootServerRequest(self.VARS) try: # this was working until f10 bs_request.MakeRequest( PartialPath=self.VARS['UPLOAD_LOG_SCRIPT'], GetVars=None, PostVars=None, DoSSL=True, DoCertCheck=True, FormData=[ "log=@" + self.OutputFilePath, "hostname=" + hostname, "type=bm.log" ]) except: # new pycurl import pycurl bs_request.MakeRequest( PartialPath=self.VARS['UPLOAD_LOG_SCRIPT'], GetVars=None, PostVars=None, DoSSL=True, DoCertCheck=True, FormData=[('log', (pycurl.FORM_FILE, self.OutputFilePath)), ("hostname", hostname), ("type", "bm.log")]) if extra_file is not None: # NOTE: for code-reuse, evoke the bash function 'upload_logs'; # by adding --login, bash reads .bash_profile before execution. # Also, never fail, since this is an optional feature. utils.sysexec_noerr( """bash --login -c "upload_logs {}" """.format(extra_file), self)
def create_raid_partition(partitions, vars, log): """ create raid array using specified partitions. """ raid_part = None raid_enabled = False node_tags = BootAPI.call_api_function(vars, "GetNodeTags", ({"node_id": vars["NODE_ID"]},)) for node_tag in node_tags: if node_tag["tagname"] == "raid_enabled" and node_tag["value"] == "1": raid_enabled = True break if not raid_enabled: return raid_part try: log.write("Software raid enabled.\n") # wipe everything utils.sysexec_noerr("mdadm --stop /dev/md0", log) time.sleep(1) for part_path in partitions: utils.sysexec_noerr("mdadm --zero-superblock %s " % part_path, log) # assume each partiton is on a separate disk num_parts = len(partitions) if num_parts < 2: log.write("Not enough disks for raid. Found: %s\n" % partitions) raise BootManagerException("Not enough disks for raid. Found: %s\n" % partitions) if num_parts == 2: lvl = 1 else: lvl = 5 # make the array part_list = " ".join(partitions) raid_part = "/dev/md0" cmd = ( "mdadm --create %(raid_part)s --chunk=128 --level=raid%(lvl)s " % locals() + "--raid-devices=%(num_parts)s %(part_list)s" % locals() ) utils.sysexec(cmd, log) except BootManagerException, e: log.write("create_raid_partition failed.\n") raid_part = None
def Upload( self, extra_file=None ): """ upload the contents of the log to the server """ if self.OutputFile is not None: self.OutputFile.flush() self.LogEntry( "Uploading logs to %s" % self.VARS['UPLOAD_LOG_SCRIPT'] ) self.OutputFile.close() self.OutputFile= None hostname= self.VARS['INTERFACE_SETTINGS']['hostname'] + "." + \ self.VARS['INTERFACE_SETTINGS']['domainname'] bs_request = BootServerRequest.BootServerRequest(self.VARS) try: # this was working until f10 bs_request.MakeRequest(PartialPath = self.VARS['UPLOAD_LOG_SCRIPT'], GetVars = None, PostVars = None, DoSSL = True, DoCertCheck = True, FormData = ["log=@" + self.OutputFilePath, "hostname=" + hostname, "type=bm.log"]) except: # new pycurl import pycurl bs_request.MakeRequest(PartialPath = self.VARS['UPLOAD_LOG_SCRIPT'], GetVars = None, PostVars = None, DoSSL = True, DoCertCheck = True, FormData = [('log',(pycurl.FORM_FILE, self.OutputFilePath)), ("hostname",hostname), ("type","bm.log")]) if extra_file is not None: # NOTE: for code-reuse, evoke the bash function 'upload_logs'; # by adding --login, bash reads .bash_profile before execution. # Also, never fail, since this is an optional feature. utils.sysexec_noerr( """bash --login -c "upload_logs %s" """ % extra_file, self)
def Run(vars, log): """ Rebuilds the system initrd, on first install or in case the hardware changed. """ log.write("\n\nStep: Rebuilding initrd\n") # make sure we have the variables we need try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) # mkinitrd needs /dev and /proc to do the right thing. # /proc is already mounted, so bind-mount /dev here # xxx tmp - trying to work around the f14 case: # check that /dev/ is mounted with devtmpfs # tmp - sysexec_noerr not returning what one would expect # if utils.sysexec_noerr ("grep devtmpfs /proc/mounts") != 0: utils.sysexec_noerr("mount -t devtmpfs none /dev") utils.sysexec("mount -o bind /dev {}/dev".format(SYSIMG_PATH)) utils.sysexec("mount -t sysfs none {}/sys".format(SYSIMG_PATH)) initrd, kernel_version = systeminfo.getKernelVersion(vars, log) try: utils.removefile("{}/boot/{}".format(SYSIMG_PATH, initrd)) except: log.write("{}/boot/{} is already removed\n".format( SYSIMG_PATH, initrd)) # hack for CentOS 5.3 bypassRaidIfNeeded(SYSIMG_PATH, log) # specify ext3 for fedora14 and above as their default fs is ext4 utils.sysexec_chroot( SYSIMG_PATH, "mkinitrd -v --with=ext3 --allow-missing /boot/initrd-{}.img {}". format(kernel_version, kernel_version), log) utils.sysexec_noerr("umount {}/sys".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/dev".format(SYSIMG_PATH), log)
def Run(vars, log): """ Find any new large block devices we can add to the vservers volume group Expect the following variables to be set: SYSIMG_PATH the path where the system image will be mounted MINIMUM_DISK_SIZE any disks smaller than this size, in GB, are not used NODE_MODEL_OPTIONS the node's model options Set the following variables upon successfully running: ROOT_MOUNTED the node root file system is mounted """ log.write("\n\nStep: Checking for unused disks to add to LVM.\n") # make sure we have the variables we need try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") MINIMUM_DISK_SIZE = int(vars["MINIMUM_DISK_SIZE"]) PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") NODE_MODEL_OPTIONS = vars["NODE_MODEL_OPTIONS"] except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) devices_dict = systeminfo.get_block_devices_dict(vars, log) # will contain the new devices to add to the volume group new_devices = [] # total amount of new space in gb extended_gb_size = 0 utils.display_disks_status(PARTITIONS, "In CheckForNewDisks", log) for device, details in devices_dict.items(): (major, minor, blocks, gb_size, readonly) = details if device[:14] == "/dev/planetlab": log.write("Skipping device {} in volume group.\n".format(device)) continue if readonly: log.write("Skipping read only device {}\n".format(device)) continue if gb_size < MINIMUM_DISK_SIZE: log.write("Skipping too small device {} ({:4.2f}) Gb\n"\ .format(device, gb_size)) continue log.write("Checking device {} to see if it is part " \ "of the volume group.\n".format(device)) # Thierry - June 2015 # when introducing the 'upgrade' verb, we ran into the situation # where 'pvdisplay' at this point displays e.g. /dev/sda, instead # of /dev/sda1 # we thus consider that if either of these is known, then # the disk is already part of LVM first_partition = InstallPartitionDisks.get_partition_path_from_device( device, vars, log) probe_first_part = "pvdisplay {} | grep -q planetlab".format( first_partition) probe_device = "pvdisplay {} | grep -q planetlab".format(device) already_added = utils.sysexec_noerr(probe_first_part, log, shell=True) \ or utils.sysexec_noerr(probe_device, log, shell=True) if already_added: log.write("It appears {} is part of the volume group, continuing.\n"\ .format(device)) continue # just to be extra paranoid, ignore the device if it already has # an lvm partition on it (new disks won't have this, and that is # what this code is for, so it should be ok). cmd = "parted --script --list {} | grep -q lvm$".format(device) has_lvm = utils.sysexec_noerr(cmd, log, shell=True) if has_lvm: log.write( "It appears {} has lvm already setup on it.\n".format(device)) paranoid = False if paranoid: log.write( "Too paranoid to add {} to vservers lvm.\n".format(device)) continue if not InstallPartitionDisks.single_partition_device( device, vars, log): log.write("Unable to partition {}, not using it.\n".format(device)) continue log.write("Successfully partitioned {}\n".format(device)) if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK: log.write("Running on a raw disk node, not using it.\n") continue part_path = InstallPartitionDisks.get_partition_path_from_device( device, vars, log) log.write("Attempting to add {} to the volume group\n".format(device)) if not InstallPartitionDisks.create_lvm_physical_volume( part_path, vars, log): log.write("Unable to create lvm physical volume {}, not using it.\n"\ .format(part_path)) continue log.write("Adding {} to list of devices to add to " "planetlab volume group.\n".format(device)) extended_gb_size = extended_gb_size + gb_size new_devices.append(part_path) if len(new_devices) > 0: log.write("Extending planetlab volume group.\n") log.write("Unmounting disks.\n") try: # backwards compat, though, we should never hit this case post PL 3.2 os.stat("{}/rcfs/taskclass".format(SYSIMG_PATH)) utils.sysexec_chroot_noerr(SYSIMG_PATH, "umount /rcfs", log) except OSError as e: pass # umount in order to extend disk size utils.sysexec_noerr("umount {}/proc".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/vservers".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}".format(SYSIMG_PATH), log) utils.sysexec("vgchange -an", log) vars['ROOT_MOUNTED'] = 0 while True: cmd = "vgextend planetlab {}".format(" ".join(new_devices)) if not utils.sysexec_noerr(cmd, log): log.write("Failed to add physical volumes {} to "\ "volume group, continuing.\n".format(" ".join(new_devices))) res = 1 break # now, get the number of unused extents, and extend the vserver # logical volume by that much. remaining_extents = \ InstallPartitionDisks.get_remaining_extents_on_vg(vars, log) log.write("Extending vservers logical volume.\n") utils.sysexec("vgchange -ay", log) cmd = "lvextend -l +{} {}".format(remaining_extents, PARTITIONS["vservers"]) if not utils.sysexec_noerr(cmd, log): log.write( "Failed to extend vservers logical volume, continuing\n") res = 1 break log.write( "making the ext filesystem match new logical volume size.\n") vars['ROOT_MOUNTED'] = 1 cmd = "mount {} {}".format(PARTITIONS["root"], SYSIMG_PATH) utils.sysexec_noerr(cmd, log) cmd = "mount {} {}/vservers".format(PARTITIONS["vservers"], SYSIMG_PATH) utils.sysexec_noerr(cmd, log) cmd = "resize2fs {}".format(PARTITIONS["vservers"]) resize = utils.sysexec_noerr(cmd, log) utils.sysexec_noerr("umount {}/vservers".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}".format(SYSIMG_PATH), log) vars['ROOT_MOUNTED'] = 0 utils.sysexec("vgchange -an", log) if not resize: log.write("Failed to resize vservers partition, continuing.\n") res = 1 break else: log.write("Extended vservers partition by {:4.2f} Gb\n"\ .format(extended_gb_size)) res = 1 break else: log.write("No new disk devices to add to volume group.\n") res = 1 return res
def Run( vars, log ): """ read the machines node configuration file, which contains the node key and the node_id for this machine. these files can exist in several different locations with several different names. Below is the search order: filename floppy flash ramdisk cd plnode.txt 1 2 4 (/) 5 (/usr/boot), 6 (/usr) planet.cnf 3 The locations will be searched in the above order, plnode.txt will be checked first, then planet.cnf. Flash devices will only be searched on 3.0 cds. Because some of the earlier boot cds don't validate the configuration file (which results in a file named /tmp/planet-clean.cnf), and some do, lets bypass this, and mount and attempt to read in the conf file ourselves. If it doesn't exist, we cannot continue, and a BootManagerException will be raised. If the configuration file is found and read, return 1. Expect the following variables from the store: Sets the following variables from the configuration file: WAS_NODE_ID_IN_CONF Set to 1 if the node id was in the conf file WAS_NODE_KEY_IN_CONF Set to 1 if the node key was in the conf file NONE_ID The db node_id for this machine NODE_KEY The key for this node INTERFACE_SETTINGS A dictionary of the values from the network configuration file. keys set: method IP_METHOD ip IP_ADDRESS mac NET_DEVICE gateway IP_GATEWAY network IP_NETADDR broadcast IP_BROADCASTADDR netmask IP_NETMASK dns1 IP_DNS1 dns2 IP_DNS2 hostname HOST_NAME domainname DOMAIN_NAME -- wlan oriented -- ssid WLAN_SSID iwconfig WLAN_IWCONFIG the mac address is read from the machine unless it exists in the configuration file. """ log.write( "\n\nStep: Reading node configuration file.\n" ) # make sure we have the variables we need INTERFACE_SETTINGS= {} INTERFACE_SETTINGS['method']= "dhcp" INTERFACE_SETTINGS['ip']= "" INTERFACE_SETTINGS['mac']= "" INTERFACE_SETTINGS['gateway']= "" INTERFACE_SETTINGS['network']= "" INTERFACE_SETTINGS['broadcast']= "" INTERFACE_SETTINGS['netmask']= "" INTERFACE_SETTINGS['dns1']= "" INTERFACE_SETTINGS['dns2']= "" INTERFACE_SETTINGS['hostname']= "localhost" INTERFACE_SETTINGS['domainname']= "localdomain" vars['INTERFACE_SETTINGS']= INTERFACE_SETTINGS vars['NODE_ID']= 0 vars['NODE_KEY']= "" vars['WAS_NODE_ID_IN_CONF']= 0 vars['WAS_NODE_KEY_IN_CONF']= 0 vars['DISCONNECTED_OPERATION']= '' # for any devices that need to be mounted to get the configuration # file, mount them here. mount_point= "/tmp/conffilemount" utils.makedirs( mount_point ) old_conf_file_contents= None conf_file_contents= None # 1. check the regular floppy device log.write( "Checking standard floppy disk for plnode.txt file.\n" ) log.write( "Mounting /dev/fd0 on %s\n" % mount_point ) utils.sysexec_noerr( "mount -o ro -t ext2,msdos /dev/fd0 %s " \ % mount_point, log ) conf_file_path= "%s/%s" % (mount_point,NEW_CONF_FILE_NAME) # log.write( "Checking for existence of %s\n" % conf_file_path ) if os.access( conf_file_path, os.R_OK ): try: conf_file= file(conf_file_path,"r") conf_file_contents= conf_file.read() conf_file.close() log.write( "Read in contents of file %s\n" % conf_file_path ) except IOError, e: log.write( "Unable to read file %s\n" % conf_file_path ) pass utils.sysexec_noerr( "umount %s" % mount_point, log ) if __parse_configuration_file( vars, log, conf_file_contents): log.write("ReadNodeConfiguration: [1] using %s from floppy /dev/fd0\n"%NEW_CONF_FILE_NAME) return 1 else: raise BootManagerException( "Found configuration file plnode.txt " \ "on floppy, but was unable to parse it." )
# try to parse it later... conf_file_path= "%s/%s" % (mount_point,OLD_CONF_FILE_NAME) # this message really does not convey any useful information # log.write( "Checking for existence of %s (used later)\n" % conf_file_path ) if os.access( conf_file_path, os.R_OK ): try: old_conf_file= file(conf_file_path,"r") old_conf_file_contents= old_conf_file.read() old_conf_file.close() log.write( "Read in contents of file %s\n" % conf_file_path ) except IOError, e: log.write( "Unable to read file %s\n" % conf_file_path ) pass utils.sysexec_noerr( "umount %s" % mount_point, log ) # 2. check flash devices on 3.0 based cds log.write( "Checking flash devices for plnode.txt file.\n" ) # this is done the same way the 3.0 cds do it, by attempting # to mount and sd*1 devices that are removable devices= os.listdir("/sys/block/") for device in devices: if device[:2] != "sd": log.write( "Skipping non-scsi device %s\n" % device ) continue # test removable removable_file_path= "/sys/block/%s/removable" % device
def mkccissnod(dev,node): dev = dev + " b 104 %d" % (node) cmd = "mknod /dev/cciss/%s" %dev utils.sysexec_noerr(cmd) node = node + 1 return node
def Run(vars, log): """ Setup the block devices for install, partition them w/ LVM Expect the following variables from the store: INSTALL_BLOCK_DEVICES list of block devices to install onto TEMP_PATH somewhere to store what we need to run ROOT_SIZE the size of the root logical volume SWAP_SIZE the size of the swap partition """ log.write("\n\nStep: Install: partitioning disks.\n") # make sure we have the variables we need try: TEMP_PATH = vars["TEMP_PATH"] if TEMP_PATH == "": raise ValueError("TEMP_PATH") INSTALL_BLOCK_DEVICES = vars["INSTALL_BLOCK_DEVICES"] if (len(INSTALL_BLOCK_DEVICES) == 0): raise ValueError("INSTALL_BLOCK_DEVICES is empty") # use vs_ROOT_SIZE or lxc_ROOT_SIZE as appropriate varname = vars['virt'] + "_ROOT_SIZE" ROOT_SIZE = vars[varname] if ROOT_SIZE == "" or ROOT_SIZE == 0: raise ValueError("ROOT_SIZE invalid") SWAP_SIZE = vars["SWAP_SIZE"] if SWAP_SIZE == "" or SWAP_SIZE == 0: raise ValueError("SWAP_SIZE invalid") NODE_MODEL_OPTIONS = vars["NODE_MODEL_OPTIONS"] PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK: VSERVERS_SIZE = "-1" if "VSERVERS_SIZE" in vars: VSERVERS_SIZE = vars["VSERVERS_SIZE"] if VSERVERS_SIZE == "" or VSERVERS_SIZE == 0: raise ValueError("VSERVERS_SIZE") except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) bs_request = BootServerRequest.BootServerRequest(vars) # disable swap if its on utils.sysexec_noerr("swapoff {}".format(PARTITIONS["swap"]), log) # shutdown and remove any lvm groups/volumes utils.sysexec_noerr("vgscan", log) utils.sysexec_noerr("vgchange -ay", log) utils.sysexec_noerr("lvremove -f {}".format(PARTITIONS["root"]), log) utils.sysexec_noerr("lvremove -f {}".format(PARTITIONS["swap"]), log) utils.sysexec_noerr("lvremove -f {}".format(PARTITIONS["vservers"]), log) utils.sysexec_noerr("vgchange -an", log) utils.sysexec_noerr("vgremove -f planetlab", log) log.write("Running vgscan for devices\n") utils.sysexec_noerr("vgscan", log) used_devices = [] INSTALL_BLOCK_DEVICES.sort() for device in INSTALL_BLOCK_DEVICES: if single_partition_device(device, vars, log): if (len(used_devices) > 0 and (vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK)): log.write( "Running in raw disk mode, not using {}.\n".format(device)) else: used_devices.append(device) log.write("Successfully initialized {}\n".format(device)) else: log.write("Unable to partition {}, not using it.\n".format(device)) continue # list of devices to be used with vgcreate vg_device_list = "" # get partitions partitions = [] for device in used_devices: part_path = get_partition_path_from_device(device, vars, log) partitions.append(part_path) # create raid partition raid_partition = create_raid_partition(partitions, vars, log) if raid_partition != None: partitions = [raid_partition] log.write("partitions={}\n".format(partitions)) # initialize the physical volumes for part_path in partitions: if not create_lvm_physical_volume(part_path, vars, log): raise BootManagerException("Could not create lvm physical volume " "on partition {}".format(part_path)) vg_device_list = vg_device_list + " " + part_path # create an lvm volume group utils.sysexec("vgcreate -s32M planetlab {}".format(vg_device_list), log) # create swap logical volume utils.sysexec("lvcreate -L{} -nswap planetlab".format(SWAP_SIZE), log) # check if we want a separate partition for VMs one_partition = vars['ONE_PARTITION'] == '1' if (one_partition): remaining_extents = get_remaining_extents_on_vg(vars, log) utils.sysexec( "lvcreate -l{} -nroot planetlab".format(remaining_extents), log) else: utils.sysexec("lvcreate -L{} -nroot planetlab".format(ROOT_SIZE), log) if vars['NODE_MODEL_OPTIONS'] & ModelOptions.RAWDISK and VSERVERS_SIZE != "-1": utils.sysexec( "lvcreate -L{} -nvservers planetlab".format(VSERVERS_SIZE), log) remaining_extents = get_remaining_extents_on_vg(vars, log) utils.sysexec( "lvcreate -l{} -nrawdisk planetlab".format(remaining_extents), log) else: # create vservers logical volume with all remaining space # first, we need to get the number of remaining extents we can use remaining_extents = get_remaining_extents_on_vg(vars, log) utils.sysexec( "lvcreate -l{} -nvservers planetlab".format(remaining_extents), log) # activate volume group (should already be active) #utils.sysexec(TEMP_PATH + "vgchange -ay planetlab", log) # make swap utils.sysexec("mkswap -f {}".format(PARTITIONS["swap"]), log) # check if badhd option has been set option = '' txt = '' if NODE_MODEL_OPTIONS & ModelOptions.BADHD: option = '-c' txt = " with bad block search enabled, which may take a while" # filesystems partitions names and their corresponding # reserved-blocks-percentages filesystems = {"root": 5, "vservers": 0} # ROOT filesystem is always with ext2 fs = 'root' rbp = filesystems[fs] devname = PARTITIONS[fs] log.write("formatting {} partition ({}){}.\n".format(fs, devname, txt)) utils.sysexec("mkfs.ext2 -q {} -m {} -j {}".format(option, rbp, devname), log) # disable time/count based filesystems checks utils.sysexec_noerr("tune2fs -c -1 -i 0 {}".format(devname), log) # VSERVER filesystem with btrfs to support snapshoting and stuff fs = 'vservers' rbp = filesystems[fs] devname = PARTITIONS[fs] if vars['virt'] == 'vs': log.write("formatting {} partition ({}){}.\n".format(fs, devname, txt)) utils.sysexec( "mkfs.ext2 -q {} -m {} -j {}".format(option, rbp, devname), log) # disable time/count based filesystems checks utils.sysexec_noerr("tune2fs -c -1 -i 0 {}".format(devname), log) elif not one_partition: log.write("formatting {} btrfs partition ({}).\n".format(fs, devname)) # early BootCD's seem to come with a version of mkfs.btrfs that does not support -f # let's check for that before invoking it mkfs = "mkfs.btrfs" if os.system("mkfs.btrfs --help 2>&1 | grep force") == 0: mkfs += " -f" mkfs += " {}".format(devname) utils.sysexec(mkfs, log) # as of 2013/02 it looks like there's not yet an option to set fsck frequency with btrfs # save the list of block devices in the log log.write("Block devices used (in lvm): {}\n".format(repr(used_devices))) # list of block devices used may be updated vars["INSTALL_BLOCK_DEVICES"] = used_devices utils.display_disks_status(PARTITIONS, "End of InstallPartitionDisks", log) return 1
def close(self): file.close(self) utils.sysexec_noerr('umount {}'.format(self.mntpnt))
# if this is a fresh install, then nothing should be # here, but we support restarted installs without rebooting # so who knows what the current state is log.write("Unmounting any previous mounts\n") try: # backwards compat, though, we should never hit this case post PL 3.2 os.stat("{}/rcfs/taskclass".format(SYSIMG_PATH)) utils.sysexec_chroot_noerr(SYSIMG_PATH, "umount /rcfs", log) except OSError, e: pass # NOTE: added /sys and /dev b/c some nodes fail due to this when disk is # nearly full. utils.sysexec_noerr("umount {}/proc".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/mnt/cdrom".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/vservers".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/sys".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/dev".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}".format(SYSIMG_PATH), log) vars['ROOT_MOUNTED'] = 0 # log.write( "Removing any old files, directories\n" ) # utils.removedir(TEMP_PATH) log.write("Cleaning up any existing PlanetLab config files\n") try: flist = os.listdir(PLCONF_DIR) for file in flist: utils.removedir(file)
# first run fsck to prevent fs corruption from hanging mount... log.write( "fsck %s file system\n" % filesystem ) utils.sysexec("e2fsck -v -p %s" % (PARTITIONS[filesystem]), log, fsck=True) except BootManagerException, e: log.write( "BootManagerException during fsck of %s (%s) filesystem : %s\n" % (filesystem, PARTITIONS[filesystem], str(e)) ) try: log.write( "Trying to recover filesystem errors on %s\n" % filesystem ) utils.sysexec("e2fsck -v -y %s" % (PARTITIONS[filesystem]),log, fsck=True) except BootManagerException, e: log.write( "BootManagerException during trying to recover filesystem errors on %s (%s) filesystem : %s\n" % (filesystem, PARTITIONS[filesystem], str(e)) ) return -1 else: # disable time/count based filesystems checks utils.sysexec_noerr( "tune2fs -c -1 -i 0 %s" % PARTITIONS[filesystem], log) try: # then attempt to mount them log.write( "mounting root file system\n" ) utils.sysexec("mount -t ext3 %s %s" % (PARTITIONS["root"],SYSIMG_PATH),log) except BootManagerException, e: log.write( "BootManagerException during mount of /root: %s\n" % str(e) ) return -2 try: PROC_PATH = "%s/proc" % SYSIMG_PATH utils.makedirs(PROC_PATH) log.write( "mounting /proc\n" ) utils.sysexec( "mount -t proc none %s" % PROC_PATH, log ) except BootManagerException, e:
except KeyError, var: raise BootManagerException, "Missing variable in vars: %s\n" % var except ValueError, var: raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var # constants ssh_source_files= "%s/debug_files/" % BM_SOURCE_DIR ssh_dir= "/etc/ssh/" ssh_home= "/root/.ssh" cancel_boot_flag= "/tmp/CANCEL_BOOT" sshd_started_flag= "/tmp/SSHD_RUNNING" # pre-sshd pre_sshd_script= os.path.join(ssh_source_files, "pre-sshd") if os.path.exists(pre_sshd_script): utils.sysexec_noerr( pre_sshd_script, log ) # create host keys if needed if not os.path.isdir (ssh_dir): utils.makedirs (ssh_dir) key=ssh_dir+"/ssh_host_key" if not os.path.isfile (key): log.write("Creating host rsa1 key %s\n"%key) utils.sysexec( "ssh-keygen -t rsa1 -b 1024 -f %s -N ''" % key, log ) key=ssh_dir+"/ssh_host_rsa_key" if not os.path.isfile (key): log.write("Creating host rsa key %s\n"%key) utils.sysexec( "ssh-keygen -t rsa -f %s -N ''" % key, log ) key=ssh_dir+"/ssh_host_dsa_key" if not os.path.isfile (key): log.write("Creating host dsa key %s\n"%key)
def Run(vars, upgrade, log): """ Download core + extensions bootstrapfs tarballs and install on the hard drive the upgrade boolean is True when we are upgrading a node root install while preserving its slice contents; in that case we just perform extra cleanup before unwrapping the bootstrapfs this is because the running system may have extraneous files that is to say, files that are *not* present in the bootstrapfs and that can impact/clobber the resulting upgrade Expect the following variables from the store: SYSIMG_PATH the path where the system image will be mounted PARTITIONS dictionary of generic part. types (root/swap) and their associated devices. NODE_ID the id of this machine Sets the following variables: TEMP_BOOTCD_PATH where the boot cd is remounted in the temp path ROOT_MOUNTED set to 1 when the the base logical volumes are mounted. """ log.write("\n\nStep: Install: bootstrapfs tarball (upgrade={}).\n".format( upgrade)) # make sure we have the variables we need try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") NODE_ID = vars["NODE_ID"] if NODE_ID == "": raise ValueError("NODE_ID") VERSION = vars['VERSION'] or 'unknown' except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) try: # make sure the required partitions exist val = PARTITIONS["root"] val = PARTITIONS["swap"] val = PARTITIONS["vservers"] except KeyError as part: log.write("Missing partition in PARTITIONS: {}\n".format(part)) return 0 bs_request = BootServerRequest.BootServerRequest(vars) # in upgrade mode, since we skip InstallPartitionDisks # we need to run this if upgrade: log.write("Upgrade mode init : Scanning for devices\n") systeminfo.get_block_devices_dict(vars, log) utils.sysexec_noerr("vgscan --mknodes", log) utils.sysexec_noerr("vgchange -ay", log) # debugging info - show in either mode utils.display_disks_status(PARTITIONS, "In InstallBootstrapFS", log) utils.breakpoint("we need to make /dev/mapper/* appear") log.write("turning on swap space\n") utils.sysexec("swapon {}".format(PARTITIONS["swap"]), log) # make sure the sysimg dir is present utils.makedirs(SYSIMG_PATH) log.write("mounting root file system\n") utils.sysexec( "mount -t ext3 {} {}".format(PARTITIONS["root"], SYSIMG_PATH), log) fstype = 'ext3' if vars['virt'] == 'vs' else 'btrfs' one_partition = vars['ONE_PARTITION'] == '1' if (not one_partition): log.write("mounting vserver partition in root file system (type {})\n". format(fstype)) utils.makedirs(SYSIMG_PATH + "/vservers") utils.sysexec("mount -t {} {} {}/vservers"\ .format(fstype, PARTITIONS["vservers"], SYSIMG_PATH), log) if vars['virt'] == 'lxc': # NOTE: btrfs quota is supported from version: >= btrfs-progs-0.20 (f18+) # older versions will not recongize the 'quota' command. log.write( "Enabling btrfs quota on {}/vservers\n".format(SYSIMG_PATH)) utils.sysexec_noerr( "btrfs quota enable {}/vservers".format(SYSIMG_PATH)) vars['ROOT_MOUNTED'] = 1 # this is now retrieved in GetAndUpdateNodeDetails nodefamily = vars['nodefamily'] extensions = vars['extensions'] # in upgrade mode: we need to cleanup the disk to make # it safe to just untar the new bootstrapfs tarball again # on top of the hard drive if upgrade: CleanupSysimgBeforeUpgrade(SYSIMG_PATH, nodefamily, log) # the 'plain' option is for tests mostly plain = vars['plain'] if plain: download_suffix = ".tar" uncompress_option = "" log.write("Using plain bootstrapfs images\n") else: download_suffix = ".tar.bz2" uncompress_option = "-j" log.write("Using compressed bootstrapfs images\n") log.write("Using nodefamily={}\n".format(nodefamily)) if not extensions: log.write("Installing only core software\n") else: log.write("Requested extensions {}\n".format(extensions)) bootstrapfs_names = [nodefamily] + extensions for name in bootstrapfs_names: tarball = "bootstrapfs-{}{}".format(name, download_suffix) source_file = "/boot/{}".format(tarball) dest_file = "{}/{}".format(SYSIMG_PATH, tarball) source_hash_file = "/boot/{}.sha1sum".format(tarball) dest_hash_file = "{}/{}.sha1sum".format(SYSIMG_PATH, tarball) time_beg = time.time() log.write("downloading {}\n".format(source_file)) # 30 is the connect timeout, 14400 is the max transfer time in # seconds (4 hours) result = bs_request.DownloadFile(source_file, None, None, 1, 1, dest_file, 30, 14400) time_end = time.time() duration = int(time_end - time_beg) log.write("Done downloading ({} seconds)\n".format(duration)) if result: # Download SHA1 checksum file log.write("downloading sha1sum for {}\n".format(source_file)) result = bs_request.DownloadFile(source_hash_file, None, None, 1, 1, dest_hash_file, 30, 14400) log.write("verifying sha1sum for {}\n".format(source_file)) if not utils.check_file_hash(dest_file, dest_hash_file): raise BootManagerException( "FATAL: SHA1 checksum does not match between {} and {}"\ .format(source_file, source_hash_file)) time_beg = time.time() log.write("extracting {} in {}\n".format(dest_file, SYSIMG_PATH)) result = utils.sysexec( "tar -C {} -xpf {} {}".format(SYSIMG_PATH, dest_file, uncompress_option), log) time_end = time.time() duration = int(time_end - time_beg) log.write("Done extracting ({} seconds)\n".format(duration)) utils.removefile(dest_file) else: # the main tarball is required if name == nodefamily: raise BootManagerException( "FATAL: Unable to download main tarball {} from server."\ .format(source_file)) # for extensions, just issue a warning else: log.write( "WARNING: tarball for extension {} not found\n".format( name)) # copy resolv.conf from the base system into our temp dir # so DNS lookups work correctly while we are chrooted log.write("Copying resolv.conf to temp dir\n") utils.sysexec("cp /etc/resolv.conf {}/etc/".format(SYSIMG_PATH), log) # Copy the boot server certificate(s) and GPG public key to # /usr/boot in the temp dir. log.write("Copying boot server certificates and public key\n") if os.path.exists("/usr/boot"): # do nothing in case of upgrade if not os.path.exists(SYSIMG_PATH + "/usr/boot"): utils.makedirs(SYSIMG_PATH + "/usr") shutil.copytree("/usr/boot", SYSIMG_PATH + "/usr/boot") elif os.path.exists("/usr/bootme"): # do nothing in case of upgrade if not os.path.exists(SYSIMG_PATH + "/usr/bootme"): utils.makedirs(SYSIMG_PATH + "/usr/boot") boot_server = file("/usr/bootme/BOOTSERVER").readline().strip() shutil.copy("/usr/bootme/cacert/" + boot_server + "/cacert.pem", SYSIMG_PATH + "/usr/boot/cacert.pem") file(SYSIMG_PATH + "/usr/boot/boot_server", "w").write(boot_server) shutil.copy("/usr/bootme/pubring.gpg", SYSIMG_PATH + "/usr/boot/pubring.gpg") # For backward compatibility if os.path.exists("/usr/bootme"): # do nothing in case of upgrade if not os.path.exists(SYSIMG_PATH + "/mnt/cdrom/bootme"): utils.makedirs(SYSIMG_PATH + "/mnt/cdrom") shutil.copytree("/usr/bootme", SYSIMG_PATH + "/mnt/cdrom/bootme") # ONE_PARTITION => new distribution type if (vars['ONE_PARTITION'] != '1'): # Import the GPG key into the RPM database so that RPMS can be verified utils.makedirs(SYSIMG_PATH + "/etc/pki/rpm-gpg") utils.sysexec( "gpg --homedir=/root --export --armor" " --no-default-keyring --keyring {}/usr/boot/pubring.gpg" " > {}/etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab".format( SYSIMG_PATH, SYSIMG_PATH), log) utils.sysexec_chroot( SYSIMG_PATH, "rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-planetlab", log) # keep a log on the installed hdd stamp = file(SYSIMG_PATH + "/bm-install.txt", 'a') now = time.strftime("%Y-%b-%d @ %H:%M %Z", time.gmtime()) stamp.write("Hard drive installed by BootManager {}\n".format(VERSION)) stamp.write("Finished extraction of bootstrapfs on {}\n".format(now)) # do not modify this, the upgrade code uses this line for checking compatibility stamp.write("Using nodefamily {}\n".format(nodefamily)) stamp.close() return 1
if NODE_MODEL_OPTIONS & ModelOptions.RAWDISK: VSERVERS_SIZE = "-1" if "VSERVERS_SIZE" in vars: VSERVERS_SIZE = vars["VSERVERS_SIZE"] if VSERVERS_SIZE == "" or VSERVERS_SIZE == 0: raise ValueError, "VSERVERS_SIZE" except KeyError, var: raise BootManagerException, "Missing variable in vars: %s\n" % var except ValueError, var: raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var bs_request = BootServerRequest.BootServerRequest(vars) # disable swap if its on utils.sysexec_noerr("swapoff %s" % PARTITIONS["swap"], log) # shutdown and remove any lvm groups/volumes utils.sysexec_noerr("vgscan", log) utils.sysexec_noerr("vgchange -ay", log) utils.sysexec_noerr("lvremove -f %s" % PARTITIONS["root"], log) utils.sysexec_noerr("lvremove -f %s" % PARTITIONS["swap"], log) utils.sysexec_noerr("lvremove -f %s" % PARTITIONS["vservers"], log) utils.sysexec_noerr("vgchange -an", log) utils.sysexec_noerr("vgremove -f planetlab", log) log.write("Running vgscan for devices\n") utils.sysexec_noerr("vgscan", log) used_devices = []
raise BootManagerException, "Missing variable in vars: %s\n" % var except ValueError, var: raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var # constants ssh_source_files = "%s/debug_files/" % BM_SOURCE_DIR ssh_dir = "/etc/ssh/" key_gen_prog = "ssh-keygen" ssh_home = "/root/.ssh" cancel_boot_flag = "/tmp/CANCEL_BOOT" sshd_started_flag = "/tmp/SSHD_RUNNING" # pre-sshd pre_sshd_script = os.path.join(ssh_source_files, "pre-sshd") if os.path.exists(pre_sshd_script): utils.sysexec_noerr(pre_sshd_script, log) # create host keys if needed if not os.path.isdir(ssh_dir): utils.makedirs(ssh_dir) # original code used to specify -b 1024 for the rsa1 key # fedora23 seems to come with a release of openssh that lacks suppport # for ssh1, and thus rsa1 keys; so we consider that failing to produce # the rsa1 key is not a showstopper key_specs = [ ("/etc/ssh/ssh_host_key", 'rsa1', "SSH1 RSA", False), ("/etc/ssh/ssh_host_rsa_key", 'rsa', "SSH2 RSA", True), ("/etc/ssh/ssh_host_dsa_key", 'dsa', "SSH2 DSA", True), ]
# if this is a fresh install, then nothing should be # here, but we support restarted installs without rebooting # so who knows what the current state is log.write( "Unmounting any previous mounts\n" ) try: # backwards compat, though, we should never hit this case post PL 3.2 os.stat("%s/rcfs/taskclass"%SYSIMG_PATH) utils.sysexec_chroot_noerr( SYSIMG_PATH, "umount /rcfs", log ) except OSError, e: pass # NOTE: added /sys and /dev b/c some nodes fail due to this when disk is # nearly full. utils.sysexec_noerr( "umount %s/proc" % SYSIMG_PATH , log ) utils.sysexec_noerr( "umount %s/mnt/cdrom" % SYSIMG_PATH , log ) utils.sysexec_noerr( "umount %s/vservers" % SYSIMG_PATH , log ) utils.sysexec_noerr( "umount %s/sys" % SYSIMG_PATH , log ) utils.sysexec_noerr( "umount %s/dev" % SYSIMG_PATH , log ) utils.sysexec_noerr( "umount %s" % SYSIMG_PATH , log ) vars['ROOT_MOUNTED']= 0 # log.write( "Removing any old files, directories\n" ) # utils.removedir( TEMP_PATH ) log.write( "Cleaning up any existing PlanetLab config files\n" ) try: flist = os.listdir( PLCONF_DIR) for file in flist: utils.removedir( file )
vserver_dir ) return file_path= "%s/etc/%s" % (vserver_dir,UPDATE_FILE_FLAG) update_files= 0 if os.access(file_path,os.F_OK): update_files= 1 if '/.vref/' in vserver_dir or \ '/.vcache/' in vserver_dir or \ '/vserver-reference' in vserver_dir: log.write( "Forcing update on vserver reference directory:\n%s\n" % vserver_dir ) utils.sysexec_noerr( "echo '%s' > %s/etc/%s" % (UPDATE_FILE_FLAG,vserver_dir,UPDATE_FILE_FLAG), log ) update_files= 1 if update_files: log.write( "Updating network files in %s.\n" % vserver_dir ) try: # NOTE: this works around a recurring problem on public pl, # suspected to be due to mismatch between 2.6.12 bootcd and # 2.6.22/f8 root environment. files randomly show up with the # immutible attribute set. this clears it before trying to write # the files below. utils.sysexec( "chattr -i %s/etc/hosts" % vserver_dir , log ) utils.sysexec( "chattr -i %s/etc/resolv.conf" % vserver_dir , log ) except:
def update_vserver_network_files(vserver_dir, vars, log): """ Update the /etc/resolv.conf and /etc/hosts files in the specified vserver directory. If the files do not exist, write them out. If they do exist, rewrite them with new values if the file UPDATE_FILE_FLAG exists it /etc. if this is called with the vserver-reference directory, always update the network config files and create the UPDATE_FILE_FLAG. This is currently called when setting up the initial vserver reference, and later when nodes boot to update existing vserver images. Expect the following variables from the store: SYSIMG_PATH the path where the system image will be mounted (always starts with TEMP_PATH) INTERFACE_SETTINGS A dictionary of the values from the network configuration file """ try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") INTERFACE_SETTINGS = vars["INTERFACE_SETTINGS"] if INTERFACE_SETTINGS == "": raise ValueError("INTERFACE_SETTINGS") except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) try: ip = INTERFACE_SETTINGS['ip'] method = INTERFACE_SETTINGS['method'] hostname = INTERFACE_SETTINGS['hostname'] domainname = INTERFACE_SETTINGS['domainname'] except KeyError as var: raise BootManagerException( "Missing network value {} in var INTERFACE_SETTINGS\n".format(var)) try: os.listdir(vserver_dir) except OSError: log.write( "Directory {} does not exist to write network conf in.\n".format( vserver_dir)) return file_path = "{}/etc/{}".format(vserver_dir, UPDATE_FILE_FLAG) update_files = 0 if os.access(file_path, os.F_OK): update_files = 1 # Thierry - 2012/03 - I'm renaming vserver-reference into sliceimage # however I can't quite grasp the reason for this test below, very likely # compatibility with very old node images or something if '/.vref/' in vserver_dir or \ '/.vcache/' in vserver_dir or \ '/vserver-reference' in vserver_dir: log.write( "Forcing update on vserver reference directory:\n{}\n".format( vserver_dir)) utils.sysexec_noerr( "echo '{}' > {}/etc/{}".format(UPDATE_FILE_FLAG, vserver_dir, UPDATE_FILE_FLAG), log) update_files = 1 if update_files: log.write("Updating network files in {}.\n".format(vserver_dir)) try: # NOTE: this works around a recurring problem on public pl, # suspected to be due to mismatch between 2.6.12 bootcd and # 2.6.22/f8 root environment. files randomly show up with the # immutible attribute set. this clears it before trying to write # the files below. utils.sysexec("chattr -i {}/etc/hosts".format(vserver_dir), log) utils.sysexec("chattr -i {}/etc/resolv.conf".format(vserver_dir), log) except: pass file_path = "{}/etc/hosts".format(vserver_dir) hosts_file = file(file_path, "w") hosts_file.write("127.0.0.1 localhost\n") if method == "static": hosts_file.write("{} {}.{}\n".format(ip, hostname, domainname)) hosts_file.close() hosts_file = None file_path = "{}/etc/resolv.conf".format(vserver_dir) if method == "dhcp": # copy the resolv.conf from the boot cd env. utils.sysexec("cp /etc/resolv.conf {}/etc".format(vserver_dir), log) else: # copy the generated resolv.conf from the system image, since # we generated it via static settings utils.sysexec( "cp {}/etc/resolv.conf {}/etc".format(SYSIMG_PATH, vserver_dir), log) return
def Run(vars, log): """ read the machines node configuration file, which contains the node key and the node_id for this machine. these files can exist in several different locations with several different names. Below is the search order: filename floppy flash ramdisk cd plnode.txt 1 2 4 (/) 5 (/usr/boot), 6 (/usr) planet.cnf 3 The locations will be searched in the above order, plnode.txt will be checked first, then planet.cnf. Flash devices will only be searched on 3.0 cds. Because some of the earlier boot cds don't validate the configuration file (which results in a file named /tmp/planet-clean.cnf), and some do, lets bypass this, and mount and attempt to read in the conf file ourselves. If it doesn't exist, we cannot continue, and a BootManagerException will be raised. If the configuration file is found and read, return 1. Expect the following variables from the store: Sets the following variables from the configuration file: WAS_NODE_ID_IN_CONF Set to 1 if the node id was in the conf file WAS_NODE_KEY_IN_CONF Set to 1 if the node key was in the conf file NONE_ID The db node_id for this machine NODE_KEY The key for this node INTERFACE_SETTINGS A dictionary of the values from the network configuration file. keys set: method IP_METHOD ip IP_ADDRESS mac NET_DEVICE gateway IP_GATEWAY network IP_NETADDR broadcast IP_BROADCASTADDR netmask IP_NETMASK dns1 IP_DNS1 dns2 IP_DNS2 hostname HOST_NAME domainname DOMAIN_NAME -- wlan oriented -- ssid WLAN_SSID iwconfig WLAN_IWCONFIG the mac address is read from the machine unless it exists in the configuration file. """ log.write("\n\nStep: Reading node configuration file.\n") # make sure we have the variables we need INTERFACE_SETTINGS = {} INTERFACE_SETTINGS['method'] = "dhcp" INTERFACE_SETTINGS['ip'] = "" INTERFACE_SETTINGS['mac'] = "" INTERFACE_SETTINGS['gateway'] = "" INTERFACE_SETTINGS['network'] = "" INTERFACE_SETTINGS['broadcast'] = "" INTERFACE_SETTINGS['netmask'] = "" INTERFACE_SETTINGS['dns1'] = "" INTERFACE_SETTINGS['dns2'] = "" INTERFACE_SETTINGS['hostname'] = "localhost" INTERFACE_SETTINGS['domainname'] = "localdomain" vars['INTERFACE_SETTINGS'] = INTERFACE_SETTINGS vars['NODE_ID'] = 0 vars['NODE_KEY'] = "" vars['WAS_NODE_ID_IN_CONF'] = 0 vars['WAS_NODE_KEY_IN_CONF'] = 0 vars['DISCONNECTED_OPERATION'] = '' # for any devices that need to be mounted to get the configuration # file, mount them here. mount_point = "/tmp/conffilemount" utils.makedirs(mount_point) old_conf_file_contents = None conf_file_contents = None # 1. check the regular floppy device log.write("Checking standard floppy disk for plnode.txt file.\n") log.write("Mounting /dev/fd0 on {}\n".format(mount_point)) utils.sysexec_noerr( "mount -o ro -t ext2,msdos /dev/fd0 {} ".format(mount_point), log) conf_file_path = "{}/{}".format(mount_point, NEW_CONF_FILE_NAME) # log.write("Checking for existence of {}\n".format(conf_file_path)) if os.access(conf_file_path, os.R_OK): try: conf_file = file(conf_file_path, "r") conf_file_contents = conf_file.read() conf_file.close() log.write("Read in contents of file {}\n".format(conf_file_path)) except IOError as e: log.write("Unable to read file {}\n".format(conf_file_path)) pass utils.sysexec_noerr("umount {}".format(mount_point), log) if __parse_configuration_file(vars, log, conf_file_contents): log.write( "ReadNodeConfiguration: [1] using {} from floppy /dev/fd0\n". format(NEW_CONF_FILE_NAME)) return 1 else: raise BootManagerException( "Found configuration file plnode.txt " "on floppy, but was unable to parse it.") # try the old file name, same device. its actually number 3 on the search # order, but do it now to save mounting/unmounting the disk twice. # try to parse it later... conf_file_path = "{}/{}".format(mount_point, OLD_CONF_FILE_NAME) # this message really does not convey any useful information # log.write("Checking for existence of %s (used later)\n" % conf_file_path) if os.access(conf_file_path, os.R_OK): try: old_conf_file = file(conf_file_path, "r") old_conf_file_contents = old_conf_file.read() old_conf_file.close() log.write("Read in contents of file {}\n".format(conf_file_path)) except IOError as e: log.write("Unable to read file {}\n".format(conf_file_path)) pass utils.sysexec_noerr("umount {}".format(mount_point), log) # 2. check flash devices on 3.0 based cds log.write("Checking flash devices for plnode.txt file.\n") # this is done the same way the 3.0 cds do it, by attempting # to mount and sd*1 devices that are removable devices = os.listdir("/sys/block/") for device in devices: if device[:2] != "sd": log.write("Skipping non-scsi device {}\n".format(device)) continue # test removable removable_file_path = "/sys/block/{}/removable".format(device) try: removable = int(file(removable_file_path, "r").read().strip()) except ValueError as e: continue except IOError as e: continue if not removable: log.write("Skipping non-removable device {}\n".format(device)) continue log.write("Checking removable device {}\n".format(device)) partitions = file("/proc/partitions", "r") for line in partitions: found_file = 0 parsed_file = 0 if not re.search("{}[0-9]*$".format(device), line): continue try: # major minor #blocks name parts = string.split(line) # ok, try to mount it and see if we have a conf file. full_device = "/dev/{}".format(parts[3]) except IndexError as e: log.write( "Incorrect /proc/partitions line:\n{}\n".format(line)) continue log.write("Mounting {} on {}\n".format(full_device, mount_point)) try: utils.sysexec( "mount -o ro -t ext2,msdos {} {}".format( full_device, mount_point), log) except BootManagerException as e: log.write("Unable to mount, trying next partition\n") continue conf_file_path = "{}/{}".format(mount_point, NEW_CONF_FILE_NAME) log.write("Checking for existence of {}\n".format(conf_file_path)) if os.access(conf_file_path, os.R_OK): try: conf_file = file(conf_file_path, "r") conf_file_contents = conf_file.read() conf_file.close() found_file = 1 log.write( "Read in contents of file {}\n".format(conf_file_path)) if __parse_configuration_file(vars, log, conf_file_contents): parsed_file = 1 except IOError as e: log.write( "Unable to read file {}\n".format(conf_file_path)) utils.sysexec_noerr("umount {}".format(mount_point), log) if found_file: if parsed_file: log.write( "ReadNodeConfiguration: [2] using {} from partition {}\n" .format(NEW_CONF_FILE_NAME, full_device)) return 1 else: raise BootManagerException( "Found configuration file on {}, " "but was unable to parse it.".format(full_device)) # 3. check standard floppy disk for old file name planet.cnf log.write( "Checking standard floppy disk for planet.cnf file (for legacy nodes).\n" ) if old_conf_file_contents: if __parse_configuration_file(vars, log, old_conf_file_contents): log.write( "ReadNodeConfiguration: [3] using {} from floppy /dev/fd0\n". format(OLD_CONF_FILE_NAME)) return 1 else: raise BootManagerException( "Found configuration file planet.cnf " "on floppy, but was unable to parse it.") # 4. check for plnode.txt in / (ramdisk) log.write("Checking / (ramdisk) for plnode.txt file.\n") conf_file_path = "/{}".format(NEW_CONF_FILE_NAME) log.write("Checking for existence of {}\n".format(conf_file_path)) if os.access(conf_file_path, os.R_OK): try: conf_file = file(conf_file_path, "r") conf_file_contents = conf_file.read() conf_file.close() log.write("Read in contents of file {}\n".format(conf_file_path)) except IOError as e: log.write("Unable to read file {}\n".format(conf_file_path)) pass if __parse_configuration_file(vars, log, conf_file_contents): log.write( "ReadNodeConfiguration: [4] using {} from ramdisk\n".format( NEW_CONF_FILE_NAME)) return 1 else: raise BootManagerException("Found configuration file plnode.txt " "in /, but was unable to parse it.") # 5. check for plnode.txt in /usr/boot (mounted already) log.write("Checking /usr/boot (cd) for plnode.txt file.\n") conf_file_path = "/usr/boot/{}".format(NEW_CONF_FILE_NAME) log.write("Checking for existence of {}\n".format(conf_file_path)) if os.access(conf_file_path, os.R_OK): try: conf_file = file(conf_file_path, "r") conf_file_contents = conf_file.read() conf_file.close() log.write("Read in contents of file {}\n".format(conf_file_path)) except IOError as e: log.write("Unable to read file {}\n".format(conf_file_path)) pass if __parse_configuration_file(vars, log, conf_file_contents): log.write( "ReadNodeConfiguration: [5] using {} from CD in /usr/boot\n". format(NEW_CONF_FILE_NAME)) return 1 else: raise BootManagerException( "Found configuration file plnode.txt " "in /usr/boot, but was unable to parse it.") # 6. check for plnode.txt in /usr (mounted already) log.write("Checking /usr (cd) for plnode.txt file.\n") conf_file_path = "/usr/{}".format(NEW_CONF_FILE_NAME) log.write("Checking for existence of {}\n".format(conf_file_path)) if os.access(conf_file_path, os.R_OK): try: conf_file = file(conf_file_path, "r") conf_file_contents = conf_file.read() conf_file.close() log.write("Read in contents of file {}\n".format(conf_file_path)) except IOError as e: log.write("Unable to read file {}\n".format(conf_file_path)) pass if __parse_configuration_file(vars, log, conf_file_contents): log.write("ReadNodeConfiguration: [6] using {} from /usr\n".format( NEW_CONF_FILE_NAME)) return 1 else: raise BootManagerException("Found configuration file plnode.txt " "in /usr, but was unable to parse it.") raise BootManagerException( "Unable to find and read a node configuration file.")
if readonly: log.write( "Skipping read only device %s\n" % device ) continue if gb_size < MINIMUM_DISK_SIZE: log.write( "Skipping too small device %s (%4.2f)\n" % (device,gb_size) ) continue log.write( "Checking device %s to see if it is part " \ "of the volume group.\n" % device ) # this is the lvm partition, if it exists on that device lvm_partition= InstallPartitionDisks.get_partition_path_from_device( device, vars, log ) cmd = "pvdisplay %s | grep -q 'planetlab'" % lvm_partition already_added = utils.sysexec_noerr(cmd, log, shell=True) if already_added: log.write( "It appears %s is part of the volume group, continuing.\n" % device ) continue # just to be extra paranoid, ignore the device if it already has # an lvm partition on it (new disks won't have this, and that is # what this code is for, so it should be ok). cmd = "sfdisk -l %s | grep -q 'Linux LVM'" % device has_lvm= utils.sysexec_noerr(cmd, log) if has_lvm: log.write( "It appears %s has lvm already setup on it.\n" % device) paranoid = False if paranoid:
def Run(vars, log): """ Load the kernel off of a node and boot to it. This step assumes the disks are mounted on SYSIMG_PATH. If successful, this function will not return. If it returns, no chain booting has occurred. Expect the following variables: SYSIMG_PATH the path where the system image will be mounted (always starts with TEMP_PATH) ROOT_MOUNTED the node root file system is mounted NODE_SESSION the unique session val set when we requested the current boot state PLCONF_DIR The directory to store PL configuration files in Sets the following variables: ROOT_MOUNTED the node root file system is mounted """ log.write("\n\nStep: Chain booting node.\n") # make sure we have the variables we need try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") PLCONF_DIR = vars["PLCONF_DIR"] if PLCONF_DIR == "": raise ValueError("PLCONF_DIR") # its ok if this is blank NODE_SESSION = vars["NODE_SESSION"] NODE_MODEL_OPTIONS = vars["NODE_MODEL_OPTIONS"] PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") except KeyError as var: raise BootManagerException( "Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException( "Variable in vars, shouldn't be: {}\n".format(var)) ROOT_MOUNTED = 0 if vars.has_key('ROOT_MOUNTED'): ROOT_MOUNTED = vars['ROOT_MOUNTED'] if ROOT_MOUNTED == 0: log.write("Mounting node partitions\n") # simply creating an instance of this class and listing the system # block devices will make them show up so vgscan can find the planetlab # volume group systeminfo.get_block_devices_dict(vars, log) utils.sysexec("vgscan", log) utils.sysexec("vgchange -ay planetlab", log) utils.makedirs(SYSIMG_PATH) cmd = "mount {} {}".format(PARTITIONS["root"], SYSIMG_PATH) utils.sysexec(cmd, log) cmd = "mount -t proc none {}/proc".format(SYSIMG_PATH) utils.sysexec(cmd, log) cmd = "mount {} {}/vservers".format(PARTITIONS["vservers"], SYSIMG_PATH) utils.sysexec(cmd, log) ROOT_MOUNTED = 1 vars['ROOT_MOUNTED'] = 1 utils.display_disks_status(PARTITIONS, "In ChainBootNode", log) # write out the session value /etc/planetlab/session try: session_file_path = "{}/{}/session".format(SYSIMG_PATH, PLCONF_DIR) session_file = file(session_file_path, "w") session_file.write(str(NODE_SESSION)) session_file.close() session_file = None log.write("Updated /etc/planetlab/session\n") except IOError as e: log.write( "Unable to write out /etc/planetlab/session, continuing anyway\n") # update configuration files log.write("Updating configuration files.\n") # avoid using conf_files initscript as we're moving to systemd on some platforms if (vars['ONE_PARTITION'] != '1'): try: cmd = "/usr/bin/env python /usr/share/NodeManager/conf_files.py --noscripts" utils.sysexec_chroot(SYSIMG_PATH, cmd, log) except IOError as e: log.write("conf_files failed with \n {}".format(e)) # update node packages log.write("Running node update.\n") if os.path.exists(SYSIMG_PATH + "/usr/bin/NodeUpdate.py"): cmd = "/usr/bin/NodeUpdate.py start noreboot" else: # for backwards compatibility cmd = "/usr/local/planetlab/bin/NodeUpdate.py start noreboot" utils.sysexec_chroot(SYSIMG_PATH, cmd, log) # Re-generate initrd right before kexec call # this is not required anymore on recent depls. if vars['virt'] == 'vs': MakeInitrd.Run(vars, log) # the following step should be done by NM UpdateNodeConfiguration.Run(vars, log) log.write("Updating ssh public host key with PLC.\n") ssh_host_key = "" try: ssh_host_key_file = file( "{}/etc/ssh/ssh_host_rsa_key.pub".format(SYSIMG_PATH), "r") ssh_host_key = ssh_host_key_file.read().strip() ssh_host_key_file.close() ssh_host_key_file = None except IOError as e: pass update_vals = {} update_vals['ssh_rsa_key'] = ssh_host_key BootAPI.call_api_function(vars, "BootUpdateNode", (update_vals, )) # get the kernel version option = '' if NODE_MODEL_OPTIONS & ModelOptions.SMP: option = 'smp' log.write("Copying kernel and initrd for booting.\n") if vars['virt'] == 'vs': utils.sysexec( "cp {}/boot/kernel-boot{} /tmp/kernel".format(SYSIMG_PATH, option), log) utils.sysexec( "cp {}/boot/initrd-boot{} /tmp/initrd".format(SYSIMG_PATH, option), log) else: # Use chroot to call rpm, b/c the bootimage&nodeimage rpm-versions may not work together try: kversion = os.popen("chroot {} rpm -qa kernel | tail -1 | cut -c 8-"\ .format(SYSIMG_PATH)).read().rstrip() major_version = int( kversion[0]) # Check if the string looks like a kernel version except: # Try a different method for non-rpm-based distributions kversion = os.popen("ls -lrt {}/lib/modules | tail -1 | awk '{print $9;}'"\ .format(SYSIMG_PATH)).read().rstrip() utils.sysexec( "cp {}/boot/vmlinuz-{} /tmp/kernel".format(SYSIMG_PATH, kversion), log) candidates = [] # f16/18: expect initramfs image here candidates.append("/boot/initramfs-{}.img".format(kversion)) # f20: uses a uid of some kind, e.g. /boot/543f88c129de443baaa65800cf3927ce/<kversion>/initrd candidates.append("/boot/*/{}/initrd".format(kversion)) # Ubuntu: candidates.append("/boot/initrd.img-{}".format(kversion)) def find_file_in_sysimg(candidates): import glob for pattern in candidates: matches = glob.glob(SYSIMG_PATH + pattern) log.write("locating initrd: found {} matches in {}\n".format( len(matches), pattern)) if matches: return matches[0] initrd = find_file_in_sysimg(candidates) if initrd: utils.sysexec("cp {} /tmp/initrd".format(initrd), log) else: raise Exception("Unable to locate initrd - bailing out") BootAPI.save(vars) log.write("Unmounting disks.\n") if (vars['ONE_PARTITION'] != '1'): utils.sysexec("umount {}/vservers".format(SYSIMG_PATH), log) utils.sysexec("umount {}/proc".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/dev".format(SYSIMG_PATH), log) utils.sysexec_noerr("umount {}/sys".format(SYSIMG_PATH), log) utils.sysexec("umount {}".format(SYSIMG_PATH), log) utils.sysexec("vgchange -an", log) ROOT_MOUNTED = 0 vars['ROOT_MOUNTED'] = 0 # Change runlevel to 'boot' prior to kexec. StopRunlevelAgent.Run(vars, log) log.write("Unloading modules and chain booting to new kernel.\n") # further use of log after Upload will only output to screen log.Upload("/root/.bash_eternal_history") # regardless of whether kexec works or not, we need to stop trying to # run anything cancel_boot_flag = "/tmp/CANCEL_BOOT" utils.sysexec("touch {}".format(cancel_boot_flag), log) # on 2.x cds (2.4 kernel) for sure, we need to shutdown everything # to get kexec to work correctly. Even on 3.x cds (2.6 kernel), # there are a few buggy drivers that don't disable their hardware # correctly unless they are first unloaded. utils.sysexec_noerr("ifconfig eth0 down", log) utils.sysexec_noerr("killall dhclient", log) if vars['virt'] == 'vs': utils.sysexec_noerr("umount -a -r -t ext2,ext3", log) else: utils.sysexec_noerr("umount -a -r -t ext2,ext3,btrfs", log) utils.sysexec_noerr("modprobe -r lvm-mod", log) # modules that should not get unloaded # unloading cpqphp causes a kernel panic blacklist = ["floppy", "cpqphp", "i82875p_edac", "mptspi"] try: modules = file("/tmp/loadedmodules", "r") for line in modules: module = string.strip(line) if module in blacklist: log.write( "Skipping unload of kernel module '{}'.\n".format(module)) elif module != "": log.write("Unloading {}\n".format(module)) utils.sysexec_noerr("modprobe -r {}".format(module), log) if "e1000" in module: log.write( "Unloading e1000 driver; sleeping 4 seconds...\n") time.sleep(4) modules.close() except IOError: log.write("Couldn't read /tmp/loadedmodules, continuing.\n") try: modules = file("/proc/modules", "r") # Get usage count for USB usb_usage = 0 for line in modules: try: # Module Size UsageCount UsedBy State LoadAddress parts = string.split(line) if parts[0] == "usb_storage": usb_usage += int(parts[2]) except IndexError as e: log.write("Couldn't parse /proc/modules, continuing.\n") modules.seek(0) for line in modules: try: # Module Size UsageCount UsedBy State LoadAddress parts = string.split(line) # While we would like to remove all "unused" modules, # you can't trust usage count, especially for things # like network drivers or RAID array drivers. Just try # and unload a few specific modules that we know cause # problems during chain boot, such as USB host # controller drivers (HCDs) (PL6577). # if int(parts[2]) == 0: if False and re.search('_hcd$', parts[0]): if usb_usage > 0: log.write("NOT unloading {} since USB may be in use\n". format(parts[0])) else: log.write("Unloading {}\n".format(parts[0])) utils.sysexec_noerr("modprobe -r {}".format(parts[0]), log) except IndexError as e: log.write("Couldn't parse /proc/modules, continuing.\n") except IOError: log.write("Couldn't read /proc/modules, continuing.\n") kargs = "root={} ramdisk_size=8192".format(PARTITIONS["root"]) if NODE_MODEL_OPTIONS & ModelOptions.SMP: kargs = kargs + " " + "acpi=off" try: kargsfb = open("/kargs.txt", "r") moreargs = kargsfb.readline() kargsfb.close() moreargs = moreargs.strip() log.write( 'Parsed in "{}" kexec args from /kargs.txt\n'.format(moreargs)) kargs = kargs + " " + moreargs except IOError: # /kargs.txt does not exist, which is fine. Just kexec with default # kargs, which is ramdisk_size=8192 pass utils.sysexec_noerr('hwclock --systohc --utc ', log) # utils.breakpoint("Before kexec"); try: utils.sysexec( 'kexec --force --initrd=/tmp/initrd --append="{}" /tmp/kernel'. format(kargs), log) except BootManagerException as e: # if kexec fails, we've shut the machine down to a point where nothing # can run usefully anymore (network down, all modules unloaded, file # systems unmounted. write out the error, and cancel the boot process log.write("\n\n") log.write("-------------------------------------------------------\n") log.write("kexec failed with the following error. Please report\n") log.write("this problem to [email protected].\n\n") log.write(str(e) + "\n\n") log.write("The boot process has been canceled.\n") log.write( "-------------------------------------------------------\n\n") return
vars['API_SERVER_INST']= api_inst if not __check_boot_version( vars, log ): raise BootManagerException, \ "Boot CD version insufficient to run the Boot Manager" else: log.write( "Running on boot cd version: %s\n" % str(vars['BOOT_CD_VERSION']) ) BOOT_CD_VERSION= vars['BOOT_CD_VERSION'] # In case we are booted with a kernel that does not have the # device mapper code compiled into the kernel. if not os.path.exists("/dev/mapper"): log.write( "Loading support for LVM\n" ) utils.sysexec_noerr( "modprobe dm_mod", log ) # for anything that needs to know we are running under the boot cd and # not the runtime os os.environ['PL_BOOTCD']= "1" return 1 def __check_boot_version( vars, log ): """ identify which version of the boot os we are running on, and whether or not we can run at all on the given version. later, this will be used to identify extra packages to download to enable the boot manager to run on any supported version.
PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError, "PARTITIONS" except KeyError, var: raise BootManagerException, "Missing variable in vars: %s\n" % var except ValueError, var: raise BootManagerException, "Variable in vars, shouldn't be: %s\n" % var # mkinitrd needs /dev and /proc to do the right thing. # /proc is already mounted, so bind-mount /dev here # xxx tmp - trying to work around the f14 case: # check that /dev/ is mounted with devtmpfs # tmp - sysexec_noerr not returning what one would expect # if utils.sysexec_noerr ("grep devtmpfs /proc/mounts") != 0: utils.sysexec_noerr("mount -t devtmpfs none /dev") utils.sysexec("mount -o bind /dev %s/dev" % SYSIMG_PATH) utils.sysexec("mount -t sysfs none %s/sys" % SYSIMG_PATH) initrd, kernel_version = systeminfo.getKernelVersion(vars, log) try: utils.removefile("%s/boot/%s" % (SYSIMG_PATH, initrd)) except: print "%s/boot/%s is already removed" % (SYSIMG_PATH, initrd) # hack for CentOS 5.3 bypassRaidIfNeeded(SYSIMG_PATH, log) # specify ext3 for fedora14 and above as their default fs is ext4 utils.sysexec_chroot( SYSIMG_PATH, "mkinitrd -v --with=ext3 --allow-missing /boot/initrd-%s.img %s" % (kernel_version, kernel_version),
def Run(vars, log): """ See if a node installation is valid. More checks should certainly be done in the future, but for now, make sure that the sym links kernel-boot exist in /boot Expect the following variables to be set: SYSIMG_PATH the path where the system image will be mounted (always starts with TEMP_PATH) ROOT_MOUNTED the node root file system is mounted NODE_ID The db node_id for this machine PLCONF_DIR The directory to store the configuration file in Set the following variables upon successfully running: ROOT_MOUNTED the node root file system is mounted """ log.write("\n\nStep: Validating node installation.\n") # make sure we have the variables we need try: SYSIMG_PATH = vars["SYSIMG_PATH"] if SYSIMG_PATH == "": raise ValueError("SYSIMG_PATH") NODE_ID = vars["NODE_ID"] if NODE_ID == "": raise ValueError("NODE_ID") PLCONF_DIR = vars["PLCONF_DIR"] if PLCONF_DIR == "": raise ValueError("PLCONF_DIR") NODE_MODEL_OPTIONS = vars["NODE_MODEL_OPTIONS"] PARTITIONS = vars["PARTITIONS"] if PARTITIONS == None: raise ValueError("PARTITIONS") except KeyError as var: raise BootManagerException("Missing variable in vars: {}\n".format(var)) except ValueError as var: raise BootManagerException("Variable in vars, shouldn't be: {}\n".format(var)) ROOT_MOUNTED = 0 if vars.has_key('ROOT_MOUNTED'): ROOT_MOUNTED = vars['ROOT_MOUNTED'] # mount the root system image if we haven't already. # capture BootManagerExceptions during the vgscan/change and mount # calls, so we can return 0 instead if ROOT_MOUNTED == 0: # simply creating an instance of this class and listing the system # block devices will make them show up so vgscan can find the planetlab # volume group systeminfo.get_block_devices_dict(vars, log) try: utils.sysexec("vgscan", log) utils.sysexec("vgchange -ay planetlab", log) except BootManagerException as e: log.write("BootManagerException during vgscan/vgchange: {}\n".format(e)) return 0 utils.makedirs(SYSIMG_PATH) # xxx - TODO - need to fsck the btrfs partition if vars['virt'] == 'vs': filesystems_tocheck = ['root', 'vservers'] else: filesystems_tocheck = ['root'] for filesystem in filesystems_tocheck: try: # first run fsck to prevent fs corruption from hanging mount... log.write("fsck {} file system\n".format(filesystem)) utils.sysexec("e2fsck -v -p {}".format(PARTITIONS[filesystem]), log, fsck=True) except BootManagerException as e: log.write("BootManagerException during fsck of {} ({}) filesystem : {}\n"\ .format(filesystem, PARTITIONS[filesystem], str(e))) try: log.write("Trying to recover filesystem errors on {}\n".format(filesystem)) utils.sysexec("e2fsck -v -y {}".format(PARTITIONS[filesystem]), log, fsck=True) except BootManagerException as e: log.write("BootManagerException while trying to recover" "filesystem errors on {} ({}) filesystem : {}\n" .format(filesystem, PARTITIONS[filesystem], str(e))) return -1 else: # disable time/count based filesystems checks utils.sysexec_noerr("tune2fs -c -1 -i 0 {}".format(PARTITIONS[filesystem]), log) try: # then attempt to mount them log.write("mounting root file system\n") utils.sysexec("mount -t ext3 {} {}".format(PARTITIONS["root"], SYSIMG_PATH),log) except BootManagerException as e: log.write("BootManagerException during mount of /root: {}\n".format(str(e))) return -2 try: PROC_PATH = "{}/proc".format(SYSIMG_PATH) utils.makedirs(PROC_PATH) log.write("mounting /proc\n") utils.sysexec("mount -t proc none {}".format(PROC_PATH), log) except BootManagerException as e: log.write("BootManagerException during mount of /proc: {}\n".format(str(e))) return -2 one_partition = vars['ONE_PARTITION']=='1' if (not one_partition): try: VSERVERS_PATH = "{}/vservers".format(SYSIMG_PATH) utils.makedirs(VSERVERS_PATH) log.write("mounting vservers partition in root file system\n") if vars['virt'] == 'vs': utils.sysexec("mount -t ext3 {} {}".format(PARTITIONS["vservers"], VSERVERS_PATH), log) else: utils.sysexec("mount -t btrfs {} {}".format(PARTITIONS["vservers"], VSERVERS_PATH), log) except BootManagerException as e: log.write("BootManagerException while mounting /vservers: {}\n".format(str(e))) return -2 ROOT_MOUNTED = 1 vars['ROOT_MOUNTED'] = 1 # check if the base kernel is installed # these 2 links are created by our kernel's post-install scriplet log.write("Checking for a custom kernel\n") try: if vars['virt'] == 'vs': os.stat("{}/boot/kernel-boot".format(SYSIMG_PATH)) else: try: kversion = os.popen("chroot {} rpm -qa kernel | tail -1 | cut -c 8-"\ .format(SYSIMG_PATH)).read().rstrip() os.stat("{}/boot/vmlinuz-{}".format(SYSIMG_PATH, kversion)) major_version = int(kversion[0]) # Check if the string looks like a kernel version except: kversion = os.popen("ls -lrt {}/lib/modules | tail -1 | awk '{print $9;}'"\ .format(SYSIMG_PATH)).read().rstrip() except OSError as e: log.write("Couldn't locate base kernel (you might be using the stock kernel).\n") return -3 # check if the model specified kernel is installed option = '' if NODE_MODEL_OPTIONS & ModelOptions.SMP: option = 'smp' try: os.stat("{}/boot/kernel-boot{}".format(SYSIMG_PATH, option)) except OSError as e: # smp kernel is not there; remove option from modeloptions # such that the rest of the code base thinks we are just # using the base kernel. NODE_MODEL_OPTIONS = NODE_MODEL_OPTIONS & ~ModelOptions.SMP vars["NODE_MODEL_OPTIONS"] = NODE_MODEL_OPTIONS log.write("WARNING: Couldn't locate smp kernel.\n") # write out the node id to /etc/planetlab/node_id. if this fails, return # 0, indicating the node isn't a valid install. try: node_id_file_path = "{}/{}/node_id".format(SYSIMG_PATH, PLCONF_DIR) node_id_file = file(node_id_file_path, "w") node_id_file.write(str(NODE_ID)) node_id_file.close() node_id_file = None log.write("Updated /etc/planetlab/node_id\n") except IOError as e: log.write("Unable to write out /etc/planetlab/node_id\n") return 0 log.write("Node installation appears to be ok\n") return 1
# get the kernel version option = '' if NODE_MODEL_OPTIONS & ModelOptions.SMP: option = 'smp' log.write( "Copying kernel and initrd for booting.\n" ) utils.sysexec( "cp %s/boot/kernel-boot%s /tmp/kernel" % (SYSIMG_PATH,option), log ) utils.sysexec( "cp %s/boot/initrd-boot%s /tmp/initrd" % (SYSIMG_PATH,option), log ) BootAPI.save(vars) log.write( "Unmounting disks.\n" ) utils.sysexec( "umount %s/vservers" % SYSIMG_PATH, log ) utils.sysexec( "umount %s/proc" % SYSIMG_PATH, log ) utils.sysexec_noerr( "umount %s/dev" % SYSIMG_PATH, log ) utils.sysexec_noerr( "umount %s/sys" % SYSIMG_PATH, log ) utils.sysexec( "umount %s" % SYSIMG_PATH, log ) utils.sysexec( "vgchange -an", log ) ROOT_MOUNTED= 0 vars['ROOT_MOUNTED']= 0 # Change runlevel to 'boot' prior to kexec. StopRunlevelAgent.Run( vars, log ) log.write( "Unloading modules and chain booting to new kernel.\n" ) # further use of log after Upload will only output to screen log.Upload("/root/.bash_eternal_history")
def mkccissnod(dev, node): dev = dev + " b 104 {}".format(node) cmd = "mknod /dev/cciss/{}".format(dev) utils.sysexec_noerr(cmd) node = node + 1 return node
def close(self): file.close(self) utils.sysexec_noerr('umount %s' % self.mntpnt)
def Run(vars, log): """ Setup the boot manager so it can run, do any extra necessary hardware setup (to fix old cd problems) Sets the following variables: PARTITIONS A dictionary of generic partition types and their associated devices. BOOT_CD_VERSION A two number tuple of the boot cd version """ log.write("\n\nStep: Initializing the BootManager.\n") # Default model option. Required in case we go into debug mode # before we successfully called GetAndUpdateNodeDetails(). vars["NODE_MODEL_OPTIONS"] = vars.get("NODE_MODEL_OPTIONS", 0) # define the basic partition paths PARTITIONS = {} # PARTITIONS["root"] = "/dev/planetlab/root" # PARTITIONS["swap"] = "/dev/planetlab/swap" # PARTITIONS["vservers"] = "/dev/planetlab/vservers" # Linux 2.6 mounts LVM with device mapper PARTITIONS["root"] = "/dev/mapper/planetlab-root" PARTITIONS["swap"] = "/dev/mapper/planetlab-swap" PARTITIONS["vservers"] = "/dev/mapper/planetlab-vservers" vars["PARTITIONS"] = PARTITIONS log.write("Opening connection to API server\n") try: server_url = vars['BOOT_API_SERVER'] except: raise BootManagerException( "configuration file does not specify API server URL") api_inst = None # preferred strategy : select tlsv1 as the encryption protocol try: ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) api_inst = xmlrpclib.ServerProxy(server_url, context=ssl_context, verbose=0) # this is only supported in python >= 2.7.9 though, so allow for failure except: print("Default xmlrpclib strategy failed") import traceback traceback.print_exc() pass # if that failed, resort to the old-fashioned code if api_inst is None: api_inst = xmlrpclib.ServerProxy(server_url, verbose=0) vars['API_SERVER_INST'] = api_inst if not __check_boot_version(vars, log): raise BootManagerException( "Boot CD version insufficient to run the Boot Manager") else: log.write("Running on boot cd version: {}\n".format( vars['BOOT_CD_VERSION'])) BOOT_CD_VERSION = vars['BOOT_CD_VERSION'] # In case we are booted with a kernel that does not have the # device mapper code compiled into the kernel. if not os.path.exists("/dev/mapper"): log.write("Loading support for LVM\n") utils.sysexec_noerr("modprobe dm_mod", log) # for anything that needs to know we are running under the boot cd and # not the runtime os os.environ['PL_BOOTCD'] = "1" return 1