def import_cached(cls, cf = conf.ZPOOL_CACHE): """ Import all the pools from zpool cache Returns [ZPool] """ try: st = os.stat(cf) except: print 'can\'t access %s' % cf return None pools = [] xdr = NVPair.unpack_file(cf) for pool in NVPair.strip(xdr['value']).values(): pools.append(ZPool(pool)) return pools
def import_cached(cls, cf=conf.ZPOOL_CACHE): """ Import all the pools from zpool cache Returns [ZPool] """ try: st = os.stat(cf) except: print 'can\'t access %s' % cf return None pools = [] xdr = NVPair.unpack_file(cf) for pool in NVPair.strip(xdr['value']).values(): pools.append(ZPool(pool)) return pools
def _from_data(self, data): self.boot_header = data[8 << 10: 16 << 10] self.nvlist = NVPair.unpack(data[16 << 10: 128 << 10]) self.data = NVPair.strip(self.nvlist['value']) # find the active uberblock ub_array = data[128 << 10 :] ubbest = None i = 0 for data in split_records(ub_array, UBERBLOCK_SIZE): ub = UberBlock(data) ub.index = i i = i + 1 if not ub.valid(): continue if ub.better_than(ubbest): ubbest = ub # use index here so we don't have to parse blockptr for every ub, that saves a lot data = get_record(ub_array, UBERBLOCK_SIZE, ubbest.index) ubbest.ub_rootbp = BlockPtr(data[40: 168]) self.ubbest = ubbest
def _from_data(self, data): self.boot_header = data[8 << 10:16 << 10] self.nvlist = NVPair.unpack(data[16 << 10:128 << 10]) self.data = NVPair.strip(self.nvlist['value']) # find the active uberblock ub_array = data[128 << 10:] ubbest = None i = 0 for data in split_records(ub_array, UBERBLOCK_SIZE): ub = UberBlock(data) ub.index = i i = i + 1 if not ub.valid(): continue if ub.better_than(ubbest): ubbest = ub # use index here so we don't have to parse blockptr for every ub, that saves a lot data = get_record(ub_array, UBERBLOCK_SIZE, ubbest.index) ubbest.ub_rootbp = BlockPtr(data[40:168]) self.ubbest = ubbest