def __lvm_direct_setup(self, lvm): dev_cfg = lvm.get_device_list()[0] zname = dev_cfg.get_zone() ldisk = dev_cfg.get_logical_disk() partname = dev_cfg.get_part_name() zone = self.spec.get_zone_spec_by_name(zname) phys_disk_id = self.spec.get_pdrive_from_zone_logical(zname, ldisk) # now look up the physical disk and we can make the # partition vol = self.spec.get_zone_volume(zname, partname) hd = self.disk_array.find_drive_by_id(phys_disk_id) # we re-use the volume information from the zone layout, # but use the logical volume name to describe this service p = Partition() p.make_partition(vol.get_part_id(), vol.get_part_size(zone), vol.get_ptype(), hd, lvm.get_name(), vol.get_fstype()) self.volumes.append(p)
def __init__(self, part_id, hd, lport): Partition.__init__(self) self.make_partition(part_id, '0', 'fts', hd) self.__ftraid_array = None self.__logical_port = lport self.__hd = hd self.serialnum = hd.serialnum self.__ftdrive_status = FtDiskPartitionStatus(hd, self)
def __init__(self): Partition.__init__(self) self.raid_array=None self.raid_port = '' self.raid_status='online' self.__raid_super = None
def make_partition(self, part_id, hd, raid_array, rport='unknown', rstatus='missing'): Partition.make_partition(self, part_id, "0", 'raid', hd) self.raid_array = raid_array self.raid_port = rport self.raid_status = rstatus self.serialnum = hd.serialnum
def make_partition(self, part_id, hd, raid_array, rport="unknown", rstatus="missing"): Partition.make_partition(self, part_id, "0", "raid", hd) self.raid_array = raid_array self.raid_port = rport self.raid_status = rstatus self.serialnum = hd.serialnum
def __legacy_volume_setup(self, zone): dz = DiskZone(zone, self.disk_array) self.__zone_map[dz.get_name()] = dz zda = dz.get_disk_array() for array in zone.get_raid_arrays(): dl = DeviceList(self.spec, zone_context = (zda, array)) r=RaidArray() r.fill_from_system_info (dl, array.get_name(), array.get_rdev(), array.get_fstype(), array.get_ptype(), array.get_layout(), array.get_type(), array.get_cfg_size()) self.raid_arrays.add_array(r) dz.add_raid_array(r) for ftraid in zone.get_ftraid_arrays(): dl = DeviceList(self.spec, zone_context = (zda, ftraid)) ftarr = FtRaidArray(dl, ftraid.get_name()) self.__ftraid_arrays.append(ftarr) dz.add_ftraid_array(ftarr) # XXX/munirb: No need for further processing if the zone is FTS # Bug 46944: It will add an extra line for segstore to -l and # -u queries which isnt correct for fts models as they # have segstore on multiple disks if zone.get_name() == 'fts': return for vol in zone.get_volumes(): # volumes are single device exports, without a raid/storage # service hd = self.disk_array.get_drive_list()[0] if (int(hd.portnum) == 0) and self.managed_disk(int(hd.portnum)): # All BOB and VSH based models # Do not assume that the second disk is the disk # that needs partitioning, run through the loop of # all the disks to check if they are managed or not and # partition accordingly For Bluegill machines, the volume # should be added just once even though there may be # multiple disks in the system. The reasoning is that # the volumes aren't RAID'ed for these models so we # do not create a md device for them, this causes # the volumes list to show duplicate entries (rrdm_tool -l) p = '' for hd in self.disk_array.get_drive_list(): if self.managed_disk(int(hd.portnum)): continue else: p = Partition() p.make_partition(vol.get_part_id(), vol.get_part_size(zone), vol.get_ptype(), hd, vol.get_name(), vol.get_fstype()) self.volumes.append(p) else: # 1050H case, the second disk is not managed, but will # be setup by hal p = Partition() p.make_partition(vol.get_part_id(), vol.get_part_size(zone), vol.get_ptype(), hd, vol.get_name(), vol.get_fstype()) self.volumes.append(p)