Esempio n. 1
0
    def __start_raid_array_with_uuid(self, uuid, dry = False):
	if self.__assemble_raid_array(uuid, dry):
	    for spare in self.__spare_device_list(uuid):
		dev_list = ['/dev/%s ' % spare.dev_name]
		opt_list = self.__form_mdadm_add_opt_list(spare)
		cmd_line = self.__form_mdadm_cmd_line(opt_list, dev_list)

		if dry:
		    print 'Running:', cmd_line
		else:
		    try:
		    	rlog_notice('Raid Add spares: [%s]' % cmd_line)
		    	run_shell_cmd(cmd_line)
		    except rrdm_error:
		    	rlog_notice('Failed to add spares with command [%s]' % cmd_line)

	    for invalid in self.__invalid_device_list(uuid):
		cmd_line = 'e2label /dev/%s ""' % invalid.dev_name

		if dry:
		    print 'Running:', cmd_line		
		else:
		    try:
		    	rlog_info('Raid clear invalid drive label: [%s]' % cmd_line)
		    	run_shell_cmd(cmd_line)
		    except rrdm_error:
		    	# we can ignore errors as it is very likely that the
		    	# invalid disk did not have a valid e2label
		    	pass

	    return True
	else:
	    return False
Esempio n. 2
0
    def create_raid_array(self, dry = False):
#       if self.status != 'stopped' and not dry:
#           raise rrdm_error ('RAID device %s is already running' % self.dev_name)

        if (self.found_devices == self.num_drives):
	    dev_list = self.__form_mdadm_create_device_list()
	    if len(dev_list) == 0:
		raise rrdm_error ('Insufficient raid disks to create array [%s]' % 
				  self.dev_name) 

	    opt_list = self.__form_mdadm_create_opt_list()
	    command = self.__form_mdadm_cmd_line(opt_list, dev_list)

            if dry:
                print 'running:', command
            else:
                try:
                    ret = run_shell_cmd(command)
                except rrdm_error:
                    try:
                        ret = run_shell_cmd('/mfg/'+command)
                    except rrdm_error:
                        raise rrdm_error('failed to start RAID with cmdline : %s' % command)
        else:
            raise rrdm_error ('Unable to create raid array with missing disks [%d/%d]' % (self.found_devices, self.num_drives))

        rlog_notice ('Created array [%s:%s]' % (self.name, self.dev_name))
Esempio n. 3
0
    def __assemble_raid_array(self, uuid, dry):
	dev_list = self.__form_mdadm_assemble_device_list(uuid)
	opt_list = self.__form_mdadm_assemble_opt_list()
        cmd_line = self.__form_mdadm_cmd_line(opt_list, dev_list)
	started_array = False
	if dry:
	    print 'Running:', cmd_line
	else:
	    try:
	        if len(dev_list) == 0:
		    raise rrdm_error ('Insufficient raid disks to start array [%s]' % self.dev_name) 
		rlog_notice ('Raid Assemble: [%s]' % cmd_line)
		run_shell_cmd(cmd_line)
		started_array = True
	    except rrdm_error:
		rlog_notice ('Failed to start array with command [%s]' % cmd_line)
		# md often leaves some badness around when it fails an assemble
		# remove it
		self.stop_raid_array(True)

	# since we failed assembly sometimes MD leaves some state around.
	# rescan our raid state.
	self.determine_array_status()

	return started_array
Esempio n. 4
0
    def hot_add_drive(self, hd):
        # the logical drive of the hd is the raid drive
        # also we need to look up this disk in our raidcfg and see
        # which partition is on which disk, a special note,
        # nothing prevents a disk having multiple partitions in
        # the same array
        rdev_list = self.__device_list.find_devices_on_disk(hd.portnum)
        for rdev in rdev_list:
            part_num = rdev.part_id
            # fill in the raid drive based on the logical device
            # associated with the device config
            raid_drive = rdev.get_logical_device()
            disk_dev = "disk%sp%s" % (hd.portnum, part_num)
            if self.is_degraded():
                mdadm_cmd = "/sbin/mdadm --zero-superblock /dev/%s" % (disk_dev)
                rlog_debug("executing command %s" % mdadm_cmd)

                if run_shell_cmd(mdadm_cmd, False) != 0:
                    print "Failed to zero superblock on [%s]" % (disk_dev)
                else:
                    print "Successfully wiped out the superblock on [%s]" % (disk_dev)

                mdadm_cmd = "/sbin/mdadm --add -d %s /dev/%s /dev/%s" % (raid_drive, self.dev_name, disk_dev)
                rlog_debug("executing command %s" % mdadm_cmd)

                if run_shell_cmd(mdadm_cmd, False) != 0:
                    print "Failed to add drive [disk%s] to raid [%s]" % (hd.portnum, self.dev_name)
                else:
                    print "%s successfully added to array %s" % (hd.get_devname(), self.dev_name)

            if self.is_online():
                print "%s is already online in array %s" % (hd.get_devname(), self.dev_name)
Esempio n. 5
0
    def stop_raid_array(self, force = False):
        if force or not self.is_stopped():
            cmd_line='mdadm --stop /dev/%s' % self.dev_name
	    try:
		run_shell_cmd(cmd_line)
	    except rrdm_error:
		raise rrdm_error('failed to stop RAID with cmdline : %s' % cmd_line)
        else:
            print 'Array %s is already stopped' % self.dev_name
Esempio n. 6
0
    def start_raid_array(self, dry=False):
        if self.status != "stopped" and not dry:
            # if its already running, just return ok
            return

        plist = filter(lambda dev: not dev.hd.is_missing(), self.part_list)
        uuids = map(lambda dev: dev.raid_super().uuid(), plist)
        uuids = filter(lambda u: u != None and u != "", uuids)

        # remove duplicates in uuids...
        uuids.sort()
        nuuids = []
        prev = None
        for u in uuids:
            if u != prev:
                nuuids.append(u)

            prev = u

        uuids = nuuids

        # get our "expected uuid"
        uuid = SystemConfig().get_array_uid(self.name)
        array_started = False
        while len(uuids) > 0:
            # first priority our uuid...
            if uuids.count(uuid) > 0:
                u = uuid
            else:
                # next priority, most uuids in list...
                maxu = max(map(lambda a: plist.count(a), uuids))
                u = filter(lambda a: plist.count(a) == maxu, uuids)[0]

            uuids.remove(u)
            if self.__start_raid_array_with_uuid(u, dry):
                array_started = True
                break

        if not array_started:
            raise rrdm_error("failed to start RAID")
        else:
            # raid array has started. If this raid array is a vecache then set the RAID
            # disable_queue_plugging sysfs param for this array
            if self.__sysfscfg_list != []:
                # Setting sysfs param to disable queueing on RAID10 writes on the VE blockstore
                try:
                    for entry in self.__sysfscfg_list:
                        cmd = "echo %s > %s/%s/%s" % (entry.value, entry.type, self.dev_name, entry.path)
                        run_shell_cmd(cmd)
                except IOerror, OSerror:
                    raise rrdm_error(
                        "Could not set sysfs param disable_queue_plugging for vecache device %s" % self.dev_name
                    )
Esempio n. 7
0
    def fill_from_rvbd_super(self, 
                             wait_for_device = False):
	super_path = RVBD_SUPER_PATH
	if not exists (super_path):
	    super_path = RVBD_MFG_SUPER_PATH
	    if not exists (super_path):
		raise rrdm_error ('Unable to locate rvbd_super tool.')

        retries = 0
        if wait_for_device:
            while not exists(self.dev_name) and retries < 3:
                sleep (1)
                retries += 1
            
	if not exists (self.dev_name):
	    raise rrdm_error ('Device does not exist %s' % self.dev_name)

	self.super_path = super_path

	cmdline = '%s -v %s' % (super_path, self.dev_name)
	try:
	    output = run_shell_cmd (cmdline, True)
	except rrdm_error:
	    raise rrdm_error ('Unable to execute rvbd super tool.')
	    
	if output == '':
	    raise rrdm_error ('No output returned from rvbd super tool.');
    
	ver_kvp = output.split('=')
	if ver_kvp[0] != 'version': 
	    raise rrdm_error ('Invalid output returned from rvbd super tool');

	self.version = int (ver_kvp[1], 10)
	# we only do version 1 SB 's today.
	# should probably abstract the routines as a class later if we need to add 
	# more.
	#
	if self.version == 1:
	    # if we have a valid SB version.
	    # just get fetch the output.
	    cmdline = '%s -g %s' % (super_path, self.dev_name)
	    output = run_shell_cmd (cmdline, True)
	    try:
		sb_lines = output.split('\n')
		for line in sb_lines:	
		    sb_kvp = line.split('=')
		    self.update_from_kvp(sb_kvp)
	    except IndexError:
		raise rrdm_error ('invalid SB output returned from rvbd_super')
Esempio n. 8
0
def hal_init():
    """ Initializing global variables """

    global model
    global card_type

    db = MgmtDB.MgmtDB('/config/mfg/mfdb')
    model = db.get_value('/rbt/mfd/model')

    if (model == '3000' or 
        model == '3020' or model == '3510' or
        model == '3520' or model == '5000' or
        model == '5010' or model == '5520' or
        model == '6020' or model == '9200'):
        card_type = 'lsi'
    elif (model == '6520' or model == '6120'):
        card_type = '3ware'
    else :
	try:
	    output = run_shell_cmd ('/opt/hal/bin/raid/rrdm_tool.py --uses-sw-raid', True)
	    if output == 'True':
		card_type = 'swraid'
	except rrdm_error:
            card_type = 'None'
	
    db.close()
Esempio n. 9
0
def exec_rrdm_tool(command):
    global rrdm_tool_py
    try:
	output = run_shell_cmd ('%s %s' % (rrdm_tool_py, command), True)
    except rrdm_error, what:
	print 'Failed to execute command %s' % command
	exit(1)
Esempio n. 10
0
    def add_sb_kvp(self, kvp):
        if len(kvp) != 2:
            raise rrdm_error ('Invalid key value pair parameter')
    
	cmdline = '%s -a %s=%s %s' % (self.super_path, kvp[0], kvp[1], self.dev_name)
        err = run_shell_cmd (cmdline)
        if err != 0:
            raise rrdm_error ('Unable to update superblock on %s' % self.dev_name)
        
        self.__sb_kvp[kvp[0]] = kvp[1]
Esempio n. 11
0
    def sync_disk_sb (self):
        retries = 0
        while retries < 5 and not exists (self.dev_name):
            sleep (1)
            retries = retries + 1
            
	cmdline = '%s -u -s %s -p %d -r %d %s' % (self.super_path, self.serial,  \
				                  self.port_num, self.raid_port_num, self.dev_name)
	err = run_shell_cmd (cmdline)
	if err != 0:
	    raise rrdm_error ('Unable to update superblock on %s' % self.dev_name)
Esempio n. 12
0
    def add(self):
        if not self.is_ok():
            array_name = self.raid_array.get_devname()
            disk_dev = "disk%sp%s" % (self.hd.portnum, self.part_id)
            raid_drive = self.hd.portnum

            mdadm_cmd = "/sbin/mdadm --zero-superblock /dev/%s" % (disk_dev)
            rlog_debug("executing command %s" % mdadm_cmd)

            if run_shell_cmd(mdadm_cmd, False) != 0:
                print "Failed to zero superblock on [%s]" % (disk_dev)
            else:
                print "Successfully wiped out the superblock on [%s]" % (disk_dev)

            mdadm_cmd = "/sbin/mdadm --add -d %s /dev/%s /dev/%s" % (raid_drive, array_name, disk_dev)
            rlog_debug("executing command %s" % mdadm_cmd)

            if run_shell_cmd(mdadm_cmd, False) != 0:
                print "Failed to add drive [disk%s] to raid [%s]" % (self.hd.portnum, self.dev_name)
            else:
                print "%s successfully added to array %s" % (self.hd.get_devname(), self.dev_name)
        else:
            print "%s is already online in array %s" % (self.hd.get_devname(), self.dev_name)
Esempio n. 13
0
    def add(self):
        if not self.is_ok():
            array_name = self.raid_array.get_devname()
            disk_dev   = 'disk%sp%s' % (self.hd.portnum, self.part_id)
            raid_drive = self.hd.portnum
            mdadm_cmd='/sbin/mdadm --add -d %s /dev/%s /dev/%s' % (raid_drive, array_name, disk_dev)
            rlog_debug ('executing command %s' % mdadm_cmd)

            if run_shell_cmd(mdadm_cmd, False) != 0:
                print 'Failed to add drive [disk%s] to raid [%s]' % (self.hd.portnum, self.dev_name)      
            else:
                print '%s successfully added to array %s' % (self.hd.get_devname(),
                        self.dev_name)
        else:
            print '%s is already online in array %s' % (self.hd.get_devname(), self.dev_name)
Esempio n. 14
0
    def fill_appliance_serial(self):
        if exists (self.mfg_serial_file):
            try:
                tfile = open(self.mfg_serial_file, "r")
                output = tfile.read()
                tfile.close()
            except IOError:
                output = ''
        else:
            cmdline = '/opt/tms/bin/mddbreq -v %s query get - /rbt/mfd/serialnum' % self.mfdb_path
            output = run_shell_cmd (cmdline, True)

        output = output.rstrip('\n')
        if len(output) != 13:
            # XXX change to debug info.
            print 'Invalid serial number %s' % output

        self.appliance_serial = output
Esempio n. 15
0
    def __get_smart_info(self):
        smart_cmd = 'smartctl -d %s -i %s' % (self.__cmd_method, self.__device_name)

        try:
            output = run_shell_cmd (smart_cmd, True)
        except rrdm_error:
            raise SmartInfoException('Smart command failed [%s]' % smart_cmd)

        if output == '':
            raise SmartInfoException('Smart command failed [%s]' % smart_cmd)

        fields = {}
        for line in output.split('\n'):
            parts = line.split(":")
            if len (parts) != 2:
                # a field whose format we don't understand, so skip
                continue
            fields[parts[0].strip()] = parts[1].strip()

        return fields
Esempio n. 16
0
    def __display_drive_xml_info(self, hd):
        if hd.superblock == None:
            # XXX/munirb:
            # If there is no superblock on this disk check to see if its an SSD
            # If SSD then it means that this is probably a bludell where the 
            # SSD has no superblock due to the BOOTMGR partition being on there
            # Check to see if there is a MFDB node which indicates the erase block size
            # if so set it or else set the blocks size to 0
            if hd.get_media() == 'ssd':
                cmdline = '/opt/tms/bin/mddbreq -v %s query get - /rbt/mfd/fts/media/ssd_erase_block_sz' % self.mfdb_path
                output = run_shell_cmd (cmdline, True)

                output = output.rstrip('\n')
                if len(output) != 0:
                    block_size = output
                else:
                    block_size = '0'
            else:
                block_size='0'
        else:
            block_size=hd.superblock.get_sb_kvp('erase_block_size')
            if block_size == None:
                block_size ='0'

        result = '<drive '
        result += 'portnum=\"%s\" ' % hd.portnum
        result += 'logical_port=\"%s\" ' % hd.get_logical_port()
        result += 'model=\"%s\" ' % hd.model
        result += 'vendor=\"%s\" ' % hd.vendor
        result += 'size=\"%s\" ' % hd.size
        result += 'serial=\"%s\" ' % hd.serialnum
        result += 'firmware=\"%s\" ' % hd.firmware
        result += 'zone=\"%s\" ' % hd.get_zone().get_name()
        result += 'media=\"%s\" ' % hd.get_media()
        result += 'disk_mode=\"%s\" ' % str(hd.get_zone().get_disk_status_mode())
        result += 'erase_block_size=\"%s\" ' % block_size
        if self.reported_disk(hd.get_logical_port()) == 1:
            # Hardcode the disk status to online for disks managed via 
            # layout settings as they have to be online
            result += 'status=\"online\"'
        else:
            result += 'status=\"%s\" ' % hd.status
        result += '>\n'
        result += '<partition_table num=\"%d\" >\n' % hd.part_tbl.get_num_partitions()
        for part in hd.part_tbl.get_partition_list():
            if not hd.is_missing():
                # a partition can only be part of a single volume (raid or fts)
                # each of the below calls will return exclusive status values
                # i.e. if raid returns a status, fts shall not
                rstatus = self.__get_array_status_by_raid_disk(hd, part.name)
                fstatus = self.__get_array_status_by_fts_disk(hd, part.name)
                if rstatus != None:
                    pstatus = rstatus
                elif fstatus != None:
                    pstatus = fstatus
                else:
                    pstatus = 'online'
            else:
                pstatus = 'missing'
            result += '<part '
            result += 'name=\"%s\" ' % part.name
            result += 'num=\"%s\" ' % part.part_id
            result += 'size_mb=\"%s\" ' % part.size_mb
            result += 'status=\"%s\" ' % pstatus
            result += '/>\n'
        result += '</partition_table>\n'

        result += '</drive>'
        return result