Example #1
0
def validate_spec(spec,
                  disk_array = None, size_dict = None):
    if size_dict == None:
	size_dict = get_disk_list()
    
    if disk_array == None:
        disk_array = DiskArray()
        disk_array.fill_from_system_info(spec)

    # all the required drives must be present.
    # we can't use anything other than missing since that requires arrays to be up
    # and we don't care about array status with this command.
    for drive in disk_array.get_drive_list():
        if drive.is_missing() or drive.get_scsi_state() != 'online':
            return (False, '%s is missing.' % drive.get_devname())

        if not drive.get_license():
            # we'll log a message for unlicensed disks, since this shouldnt 
            # ever happen.
            return (False, '%s is unlicensed' % drive.get_devname())
        # the output from size_dict is a string numeral, so need to convert it into an int
        if int(spec.get_disk_size_by_id(drive.portnum)) > int(size_dict['disk%s' % drive.portnum]):
            return (False, 'Disks are the wrong size')

    # double check that we have enough disks for this spec.
    if spec.get_disk_count() > disk_array.get_num_drives():
        return (False, 'Insufficient disks for spec %s' % spec.get_name())

    return (True, 'System hardware and specification are compatible')
Example #2
0
def validate_spec(spec,
                  disk_array = None, size_dict = None):
    if size_dict == None:
	size_dict = get_disk_list()
    
    if disk_array == None:
        disk_array = DiskArray()
        disk_array.fill_from_system_info(spec)

    # all the required drives must be present.
    # we can't use anything other than missing since that requires arrays to be up
    # and we don't care about array status with this command.
    for drive in disk_array.get_drive_list():
        if drive.is_missing() or drive.get_scsi_state() != 'online':
            return (False, '%s is missing.' % drive.get_devname())

        # tests if the physical disks media (hdd or ssd) match what's in the
        # spec file
        # this is kinda complicated: The first thing we need is what
        # kind of media each drive is: drive.get_devname().get_media.
        # then we plug that into the .is_media_valid method of the same 
        # drive in the new spec
        # this will probably bail on on legacy specs that don't use disk
        # zones or maps
        try:
            cur_media = disk_info_map.get_disk_info(drive.get_devname()).get_media()
            if not spec.get_disk_zone_spec(drive.portnum).is_media_valid(cur_media):
                return (False, '%s is wrong type.'  % drive.get_devname())
        except:
            print 'Error getting media for %s: legacy spec error for spec %s' % ( drive.get_devname(), spec.name )
            pass
            
        if not drive.get_license():
            # we'll log a message for unlicensed disks, since this shouldnt 
            # ever happen.
            return (False, '%s is unlicensed' % drive.get_devname())
        # the output from size_dict is a string numeral, so need to convert it into an int
        if int(spec.get_disk_size_by_id(drive.portnum)) > int(size_dict['disk%s' % drive.portnum]):
            return (False, 'Disks are the wrong size')

    # double check that we have enough disks for this spec.
    if spec.get_disk_count() > disk_array.get_num_drives():
        return (False, 'Insufficient disks for spec %s' % spec.get_name())

    return (True, 'System hardware and specification are compatible')
Example #3
0
    def __init__(self, 
                 spec,
                 model,
                 mfg_mode=False,
                 profile = None):
        self.spec           = spec
        self.model          = model
        self.appliance_serial = ''
        
        if profile == None or profile == '':
            # None if there is no storage profile or profiles
            # are not supported
            self.__cur_sprofile = get_storage_profile()
        else:
            # if the user has specified a profile on the cmd line 
            # we'll use that profile (most notably for the mfg option,
            # where we want to reconfigure a system for a new profile)
            self.__cur_sprofile = profile

        # if the user has not specified the storage profile
        # and we have not read it from disk, and we're in 
        # mfg mode then assume the storage profile is the default one
        # in the spec definition
        if self.spec.has_storage_cfg() and \
            self.__cur_sprofile in [ None, '' ]:
                self.__cur_sprofile = self.spec.get_default_scfg_name()
                if self.__cur_sprofile in [ None, '' ]:
                    raise AssertionError('Unable to determine storage profile')

        # we need to associate our view of the spec
        # with the profile we are currently configured for
        self.spec.set_spec_profile_view(self.__cur_sprofile)
        
        self.fill_appliance_serial()
	self.get_rebuild_rate()

        # grab the motherboard from hwtool
        self.motherboard    = get_hwtool_motherboard()
        self.phy_mobo       = get_hwtool_phy_motherboard()
        
        # gather all associated physical disk information
        # should we fill this from the spec info, or the available system info
        # split it out as a query?
        self.disk_array = DiskArray()
        num_physical_disks = hwtool_disk_map.get_num_hard_disk()
        if num_physical_disks <= 0:
            # how did we boot?
            raise rrdm_error ('Appliance detects an invalid number' \
                              ' of disks %d.' % num_physical_disks)

        self.disk_array.fill_from_system_info (self.spec)

        self.volumes        = []
        self.raid_arrays    = RaidDevices()
        self.__ftraid_arrays = []
        self.__zone_map     = {}

        # populate a zone map with pointers to physical disks in the zones.
        #
        if self.spec.has_storage_cfg() == False:
            # fall back to the legacy config mode, where the zones
            # describe exported volumes
            for zone in self.spec.get_zone_list():
                self.__legacy_volume_setup(zone)
        else:
            for z in self.spec.get_zone_list():
                dz = DiskZone(z, self.disk_array)
                self.__zone_map[dz.get_name()] = dz
    
            # Storage config/ storage profile describe the set of exported
            # volumes
            lvm_list = self.spec.get_storage_cfg(). \
                             get_logical_volumes(self.__cur_sprofile)

            for lvm in lvm_list:
                if lvm.get_type() == LogicalVolumeConfig.direct_type:
                    self.__lvm_direct_setup(lvm)
                if lvm.get_type() in LogicalVolumeConfig.raid_types:
                    self.__lvm_raid_setup(lvm)
                if lvm.get_type() == LogicalVolumeConfig.ftraid_type:
                    self.__lvm_ftraid_setup(lvm)

        # update drive status based on raid status.
        self.disk_array.update_status_by_zone(self.raid_arrays.get_array_list(),
                                              self.__ftraid_arrays)
Example #4
0
class System:
    def __init__(self, 
                 spec,
                 model,
                 mfg_mode=False,
                 profile = None):
        self.spec           = spec
        self.model          = model
        self.appliance_serial = ''
        
        if profile == None or profile == '':
            # None if there is no storage profile or profiles
            # are not supported
            self.__cur_sprofile = get_storage_profile()
        else:
            # if the user has specified a profile on the cmd line 
            # we'll use that profile (most notably for the mfg option,
            # where we want to reconfigure a system for a new profile)
            self.__cur_sprofile = profile

        # if the user has not specified the storage profile
        # and we have not read it from disk, and we're in 
        # mfg mode then assume the storage profile is the default one
        # in the spec definition
        if self.spec.has_storage_cfg() and \
            self.__cur_sprofile in [ None, '' ]:
                self.__cur_sprofile = self.spec.get_default_scfg_name()
                if self.__cur_sprofile in [ None, '' ]:
                    raise AssertionError('Unable to determine storage profile')

        # we need to associate our view of the spec
        # with the profile we are currently configured for
        self.spec.set_spec_profile_view(self.__cur_sprofile)
        
        self.fill_appliance_serial()
	self.get_rebuild_rate()

        # grab the motherboard from hwtool
        self.motherboard    = get_hwtool_motherboard()
        self.phy_mobo       = get_hwtool_phy_motherboard()
        
        # gather all associated physical disk information
        # should we fill this from the spec info, or the available system info
        # split it out as a query?
        self.disk_array = DiskArray()
        num_physical_disks = hwtool_disk_map.get_num_hard_disk()
        if num_physical_disks <= 0:
            # how did we boot?
            raise rrdm_error ('Appliance detects an invalid number' \
                              ' of disks %d.' % num_physical_disks)

        self.disk_array.fill_from_system_info (self.spec)

        self.volumes        = []
        self.raid_arrays    = RaidDevices()
        self.__ftraid_arrays = []
        self.__zone_map     = {}

        # populate a zone map with pointers to physical disks in the zones.
        #
        if self.spec.has_storage_cfg() == False:
            # fall back to the legacy config mode, where the zones
            # describe exported volumes
            for zone in self.spec.get_zone_list():
                self.__legacy_volume_setup(zone)
        else:
            for z in self.spec.get_zone_list():
                dz = DiskZone(z, self.disk_array)
                self.__zone_map[dz.get_name()] = dz
    
            # Storage config/ storage profile describe the set of exported
            # volumes
            lvm_list = self.spec.get_storage_cfg(). \
                             get_logical_volumes(self.__cur_sprofile)

            for lvm in lvm_list:
                if lvm.get_type() == LogicalVolumeConfig.direct_type:
                    self.__lvm_direct_setup(lvm)
                if lvm.get_type() in LogicalVolumeConfig.raid_types:
                    self.__lvm_raid_setup(lvm)
                if lvm.get_type() == LogicalVolumeConfig.ftraid_type:
                    self.__lvm_ftraid_setup(lvm)

        # update drive status based on raid status.
        self.disk_array.update_status_by_zone(self.raid_arrays.get_array_list(),
                                              self.__ftraid_arrays)

    def __lvm_ftraid_setup(self,
                           lvm):
        dl = DeviceList(self.spec,
                        lvm_context = (self.disk_array, lvm))
        ftarr = FtRaidArray(dl,
                            lvm.get_name())
        self.__ftraid_arrays.append(ftarr)
        
    def __lvm_raid_setup(self,
                         lvm):
        dl = DeviceList(self.spec,
                        lvm_context = (self.disk_array, lvm))
        r=RaidArray()
        r.fill_from_system_info (dl,
                                 lvm.get_name(),
                                 lvm.get_devname(),
                                 lvm.get_fstype(),
                                 lvm.get_ptype(),
                                 lvm.get_layout(),
                                 lvm.get_type(),
                                 lvm.get_size_mb(),
				 lvm.get_sysfscfg_list())
        self.raid_arrays.add_array(r)

    def __lvm_direct_setup(self, 
                           lvm):
        dev_cfg  = lvm.get_device_list()[0]
        zname    = dev_cfg.get_zone()
        ldisk    = dev_cfg.get_logical_disk()
        partname = dev_cfg.get_part_name()
        zone     = self.spec.get_zone_spec_by_name(zname)

        phys_disk_id = self.spec.get_pdrive_from_zone_logical(zname,
                                                              ldisk)
        # now look up the physical disk and we can make the 
                    # partition
        vol = self.spec.get_zone_volume(zname, partname)
        hd = self.disk_array.find_drive_by_id(phys_disk_id)

        # we re-use the volume information from the zone layout,
        # but use the logical volume name to describe this service
        p = Partition()
        p.make_partition(vol.get_part_id(),
                         vol.get_part_size(zone),
                         vol.get_ptype(),
                         hd,
                         lvm.get_name(),
                         vol.get_fstype())
        self.volumes.append(p)

    ## __legacy_volume_setup
    # For specs without a storage config, we'll fall back to the old mode
    # exporting volumes from the disk-array/zones
    def __legacy_volume_setup(self, 
                              zone):
        dz = DiskZone(zone, self.disk_array)
        self.__zone_map[dz.get_name()] = dz
        zda = dz.get_disk_array()

        for array in zone.get_raid_arrays():
            dl = DeviceList(self.spec,
                            zone_context = (zda, array))
            r=RaidArray()
            r.fill_from_system_info (dl,
                                     array.get_name(),
                                     array.get_rdev(),
                                     array.get_fstype(),
                                     array.get_ptype(),
                                     array.get_layout(),
                                     array.get_type(),
                                     array.get_cfg_size())
            self.raid_arrays.add_array(r)
            dz.add_raid_array(r)

        for ftraid in zone.get_ftraid_arrays():
            dl = DeviceList(self.spec,
                            zone_context = (zda, ftraid))
            ftarr = FtRaidArray(dl,
                                ftraid.get_name())
            self.__ftraid_arrays.append(ftarr)
            dz.add_ftraid_array(ftarr)

        # XXX/munirb: No need for further processing if the zone is FTS
        # Bug 46944: It will add an extra line for segstore to -l and 
        # -u queries which isnt correct for fts models as they 
        # have segstore on multiple disks
        if zone.get_name() == 'fts':
            return

        for vol in zone.get_volumes():
            # volumes are single device exports, without a raid/storage 
            # service

            hd  = self.disk_array.get_drive_list()[0]
            if (int(hd.portnum) == 0) and self.managed_disk(int(hd.portnum)):
                # All BOB and VSH based models

                # Do not assume that the second disk is the disk 
                # that needs partitioning, run through the loop of 
                # all the disks to check if they are managed or not and 
                # partition accordingly For Bluegill machines, the volume 
                # should be added just once even though there may be 
                # multiple disks in the system. The reasoning is that 
                # the volumes aren't RAID'ed for these models so we 
                # do not create a md device for them, this causes 
                # the volumes list to show duplicate entries (rrdm_tool -l)
                p = ''
                for hd in self.disk_array.get_drive_list():
                    if self.managed_disk(int(hd.portnum)):
                        continue
                    else:
                        p = Partition()
                        p.make_partition(vol.get_part_id(),
                                         vol.get_part_size(zone),
                                         vol.get_ptype(),
                                         hd,
                                         vol.get_name(),
                                         vol.get_fstype())
                        self.volumes.append(p)
            else:
                # 1050H case, the second disk is not managed, but will 
                # be setup by hal
                p = Partition()
                p.make_partition(vol.get_part_id(),
                                 vol.get_part_size(zone),
                                 vol.get_ptype(),
                                 hd,
                                 vol.get_name(),
                                 vol.get_fstype())
                self.volumes.append(p)

    ## is_persist_lvm
    # @param lvm_name Name of a logical volume in this Storage config
    # @raise AssertionError on invalid lvm/profile 
    # Check if a given LVM
    def is_persist_lvm(self,
                       lvm_name):
        if self.spec.has_storage_cfg():
            return self.spec.get_storage_cfg(). \
                        is_persist_volume(self.__cur_sprofile, lvm_name)
        else:
            # by default we'll consider all legacy volumes persistant
            return True

    def get_logical_volumes(self):
        result = ''
        result += '<logical_volumes>\n'
        for lvm in self.volumes:
            result += '<logical_volume'
            result += ' name=\"%s\"' % lvm.name
            result += ' devname=\"%s\"' % lvm.dev_name
            result += ' cfg_size_mb=\"%s\"' % lvm.size_mb
            result += ' status=\"%s\"' % lvm.status
            result += ' />\n'
        for lvm in self.raid_arrays.get_array_list():
            result += '<logical_volume'
            result += ' name=\"%s\"' % lvm.name
            result += ' devname=\"%s\"' % lvm.dev_name
            result += ' cfg_size_mb=\"%s\"' % lvm.cfg_size_mb
            result += ' status=\"%s\"' % lvm.status
            result += ' />\n'
        for lvm in self.__ftraid_arrays:
            result += '<logical_volume'
            result += ' name=\"%s\"' % lvm.get_name()
            result += ' devname=\"\"'
            result += ' cfg_size_mb=\"%s\"' % lvm.get_cfg_size()
            result += ' status=\"%s\"' % lvm.get_state()
            result += ' />\n'
            
        result += '</logical_volumes>\n'
        return result

    ## format_volume
    # @param vol_name Logical Volume name
    # @param verbose Display information to stdout on the format command
    #                used
    # @raise AssertionError on invalid request
    #                       *) no storage config
    #                       *) no format method defined for this volume
    #                       *) no device name for the volume
    # @raise rrdm_error on failure to format the device
    #
    # Runs the format method of the given logical volume
    # 
    def format_volume(self, 
                      vol_name,
                      verbose):
        spec = self.spec
        if not spec.has_storage_cfg():
            raise AssertionError('Model %s has no associated storage config' % \
                                 self.model)

        if spec.has_lvm_by_name(vol_name):
            lvm = spec.get_lvm_by_name(vol_name)
            dev_name = None
            if not lvm.has_format_method():
                raise AssertionError('No format method associated with' \
                                     ' volume %s' % vol_name)
            else:
                fmt_method = lvm.get_format_method()

            # we've ensured that we have a valid LogicalVolume name, 
            # logical volume config entries don't always know the device name
            # associated with them, so we look through the list of system objects
            # that do have an associated device name
            for arr in self.raid_arrays.get_array_list():
                if arr.name == vol_name:
                    dev_name = arr.dev_name
                    continue

            if dev_name == None:
                for arr in self.volumes:
                    if arr.name == vol_name:
                        dev_name = arr.dev_name
                        continue
            if dev_name == None or dev_name == '':
                raise AssertionError('Unable to determine dev name for volume %s' \
                                     % vol_name)
            else:
                # run the format method for this volume
                fmt_method.do_format(devname = '/dev/%s' % dev_name,
                                     verbose = verbose)
        else:
            raise AssertionError('Model %s has no volume named %s' % \
                                 (self.model, vol_name))

    def update_system_metadata(self,
                               mfdb_path='/config/mfg/mfdb'):
        if self.__cur_sprofile not in [ None, '' ] and \
            self.spec.has_storage_cfg():

            if not exists(mfdb_path):
                rlog_notice('%s is not accessible, update metadata manually' % \
                            mfdb_path)
                return False

            result =  set_storage_profile(self.__cur_sprofile,
                                          mfdb_path)
            if not result:
                rlog_warning('Unable to update metadata for %s [%s]' % \
                             (self.__cur_sprofile,
                              mfdb_path))
                raise AssertionError('Unable to set storage profile in mfdb' \
                                     ' [%s]' % self.__cur_sprofile)

            
            cur_profile = self.spec.get_storage_cfg(). \
                            get_profile(self.__cur_sprofile)
            metadata_map = cur_profile.get_metadata()
            if metadata_map != None:
                # this profile has metadata associated with it
                for md_entry in metadata_map.get_metadata():
                    set_mfdb_val(mdcfg = md_entry,
                                 mfdb_path = mfdb_path)
            
        else:
            # noop for units that don't have storage profiles. We fall back to
            # legacy mode where rbtmanufacture updates the metadata, or indiv.
            # hardware upgrade scripts do the metadata updates.
            pass



    # these need to reside on the same file system since we want rename to be 
    # atomic
    mfg_serial_file      = '/mfg/serial_num'
    mfdb_path            = '/config/mfg/mfdb'
    raid_rebuild_max_proc = '/proc/sys/dev/raid/speed_limit_max'

    def get_ftraid_arrays(self):
        return self.__ftraid_arrays

    ## Returns a zone object given the zone name
    # or None if the requested name does not exist.
    def get_zone_by_name(self, name):
        if self.__zone_map.has_key(name):
            return self.__zone_map[name]
        else:
            return None

    ###########################################################################
    # get_rebuild_rate
    #
    # Rebuild rate is stored in the proc filesystem for MD.
    #
    # return the system rebuild rate as a string.
    ###########################################################################
    def get_rebuild_rate(self):
    
        try:
            file = open (self.raid_rebuild_max_proc, 'r')
            try:
                rate=file.read()
            
            finally:
                file.close()
        except (IOError, OSError):
            raise rrdm_error ('Unable to read rebuild rate from proc')
        
	return rate.strip()

    def get_motherboard(self):
        return self.motherboard

    def get_phy_mobo(self):
        return self.phy_mobo

    ###########################################################################
    # supports_disk_led_control
    #
    # returns a boolean indicating whether this model / motherboard
    # supports per disk led control.
    #
    ###########################################################################
    def supports_disk_led_control(self):
        # only tyan 1U and tyan 3U units support disk LED control
        if self.get_motherboard() in [ "400-00100-01", "400-00300-01",
                                       "400-00300-10", "425-00140-01",
                                       "425-00205-01"]:
            return True

        return False

    ###########################################################################
    # get_led_status
    #
    # Displays the disk fault led status for each disk in the system.
    #
    ###########################################################################
    def get_led_status(self):
	if self.supports_disk_led_control():
	    for disk in self.disk_array.get_drive_list():
		status = disk.get_led_state()
		print 'Disk %s Fault LED is %s' % (disk.portnum, status)
	else:
	    print 'System does not have Disk LED support.'

    ###########################################################################
    # update_led_state
    #
    # Set all disk LED's in the system to the state that matches
    # the state of the physical disk.
    #
    ###########################################################################
    def update_led_state(self):
        dled_map = DiskLEDMap()

	if self.supports_disk_led_control():
	    for disk in self.disk_array.get_drive_list():
		if not disk.is_online() and not dled_map.led_is_on(disk.portnum):
		    disk.turn_on_led()
		elif disk.is_online() and dled_map.led_is_on(disk.portnum):
		    disk.turn_off_led()

    def get_model(self):
        return self.model

    def get_appliance_serial(self):
        return self.appliance_serial

    ###########################################################################
    # fill_appliance_serial
    #
    # get the serial number for this appliance.  In mfg mode, the serial number
    # is put in /mfg/serial_num
    #
    ###########################################################################
    def fill_appliance_serial(self):
        if exists (self.mfg_serial_file):
            try:
                tfile = open(self.mfg_serial_file, "r")
                output = tfile.read()
                tfile.close()
            except IOError:
                output = ''
        else:
            cmdline = '/opt/tms/bin/mddbreq -v %s query get - /rbt/mfd/serialnum' % self.mfdb_path
            output = run_shell_cmd (cmdline, True)

        output = output.rstrip('\n')
        if len(output) != 13:
            # XXX change to debug info.
            print 'Invalid serial number %s' % output

        self.appliance_serial = output

    ###########################################################################
    # supports_sw_raid
    #
    # Returns true if this system (based on motherboard/model) has any 
    # software raid arrays.
    #
    ###########################################################################
    def supports_sw_raid(self):
        if self.raid_arrays.get_num_arrays() > 0:
            return True
        else:
            return False

    ########################################################################### 
    # add a drive back into the system
    # - iterate over each raid array and attempt to add it to the array
    #   will skip over arrays that already have the device added.
    #
    def hot_add_drive(self, hd, force_add = False):
        do_partition = True 

        if not hd.is_valid_media():
            # error when we try to use a disk that isnt appropriate for the 
            # slot
            rlog_warning ('Disk %s media type is not correct.' % hd.get_devname())
            print 'Unable to add %s, incorrect drive media' % hd.get_devname()
            exit(1)

        if hd.is_invalid() and not force_add:
            # out of position disk, not an error, we just require --force 
            # in order to use an out of position fts disk
            rlog_warning ('Disk %s is not a valid disk - wrong slot perhaps ?' % hd.get_devname())
            print 'Disk %s is not a valid disk - wrong slot perhaps ?' % hd.get_devname()
            exit(1)
	
	for dev in self.raid_arrays.get_array_list():
            rpart = dev.find_dev_by_hd(hd)
            if rpart == None:
                # drive is not part of the array, skip it
                continue

	    dev.purge_faulty_drives()
            drv_status = dev.get_drive_raid_status(hd)
            if drv_status in[ "online", "rebuilding" ] and do_partition:
                rlog_info ('drive %s is online, skipping repartition' % 
                           hd.get_devname())
                do_partition = False

        if do_partition:
            # first repartition the drive, and clear any labels.
            # if we don't have any running raid arrays.
            hd.partition_drive()

        # update the superblock to let us know this has been added to
        # this box at which port.
        hd.update_rvbd_superblock(self.appliance_serial)

        for dev in self.raid_arrays.get_array_list():
            rpart = dev.find_dev_by_hd(hd)
            if rpart != None:
                # if the disk is part of this array, try to add it
                dev.hot_add_drive(hd)

        for dev in self.__ftraid_arrays:
            ftpart = dev.find_dev_by_hd(hd)
            if ftpart != None:
                ftpart.add()
  

        self.__add_disk(hd)

    ## find_sys_dev_by_hd_volname
    # @param hd HardDisk object 
    # @param vol_name name of the volume to search for on the disk
    #
    # Given a hd object and a volume name, return an object corresponding
    # to that volume on the disk, otherwise return None if the volume 
    # does not exist on the given disk
    #
    # Note this only applies to partitions that have status associated with
    # them.  Single or utility partitions don't have a status associated with
    # them
    def find_sys_dev_by_hd_volname(self, 
                                   hd, 
                                   vol_name):
        device_target = None

        # we want to find the raid/fts device that is associated with the
        # given hd
        for dev in self.raid_arrays.get_array_list():
            if dev.name == vol_name:
                device_target = dev.find_dev_by_hd(hd)

        if device_target != None:
            return device_target

        for dev in self.__ftraid_arrays:
            if dev.get_name() == vol_name:
                device_target = dev.find_dev_by_hd(hd)

        return device_target

    ###########################################################################
    # get_info
    # 
    # Displays user visible status about the system object. A passed in flag,
    # determines whether to call the system child object get info routines.
    ###########################################################################
    def get_info(self, do_children = False):
	print 'System Serial: %s' % self.appliance_serial
	print 'System Model: %s' % self.get_model()

    def get_raid_status(self, xml = False):
        self.raid_arrays.get_status(xml)

    def get_raid_detail_status (self, xml = False):
            self.raid_arrays.get_detail_status (xml)

    def start_raid_all(self):
        for r in self.raid_arrays.get_array_list():
            r.start_raid_array()

    def stop_raid_all(self):
        for r in self.raid_arrays.get_array_list():
            r.stop_raid_array()

    def __fail_disk(self, hd_target):
        dconfig_path = '/config/disk'
        dfile = '%s/disk%s_failed' % (dconfig_path, hd_target.portnum)
        try:
            open(dfile, "w").close()
        except IOError:
            raise rrdm_error ('Unable to create disk state file: %s' % dfile)

    def __add_disk(self, hd_target):
        dconfig_path = '/config/disk'
        dfile = '%s/disk%s_failed' % (dconfig_path, hd_target.portnum)

        if exists(dfile):
            remove(dfile)
            
    def fail_disk(self, hd_target):
        for r in self.raid_arrays.get_array_list():
	    if self.supports_disk_led_control():
		hd_target.turn_on_led()

            rpart = r.find_dev_by_hd(hd_target)
            rlog_debug ('found %s' % rpart)
            if (rpart != None):
                rlog_notice ('Failing drive : %s' % rpart.device_name)
                try:
                    rpart.fail()
                except rrdm_error:
                    # couldn't fail the drive.., already failed.
                    continue

        for ftsarr in self.__ftraid_arrays:
            ft_dev = ftsarr.find_dev_by_hd(hd_target)
            if ft_dev != None:
                ft_dev.fail()

        try:
            self.__fail_disk(hd_target)
        except rrdm_error, what:
            rlog_warning("Could not fail disk [%s]" % hd_target)