Beispiel #1
0
 def __init__(self, data=None):
     if data:
         self.dva = []
         dva_size = 16
         for dva in split_records(data[0:dva_size * 3], dva_size):
             self.dva.append(self._parse_dva(dva))
         su = StreamUnpacker(data[dva_size * 3:])
         i = su.uint64()
         #see lib/libzfscommon/include/sys/spa.h
         self.lsize = (get_bits(i, 0, 16) + 1) << SPA_MINBLOCKSHIFT
         self.psize = (get_bits(i, 16, 16) + 1) << SPA_MINBLOCKSHIFT
         self.comp = get_bits(i, 32, 8)
         self.cksum = get_bits(i, 40, 8)
         self.type = get_bits(i, 48, 8)
         self.level = get_bits(i, 56, 5)
         if get_bits(i, 63, 1):
             self.endian = '<'  # little endian
         else:
             self.endian = '>'  # big
         self.cksum = [
             'unknown', 'on', 'off', 'label', 'gang header', 'zilog',
             'fletcher2', 'fletcher4', 'SHA-256'
         ][self.cksum]
         self.comp = ['unknown', 'on', 'off', 'lzjb'][self.comp]
         self.type = self.type
         su.rewind(-24)  # skip 24b paddings
         self.birth_txg, self.fill_count = su.repeat('uint64', 2)
         self.checksum = []
         for i in range(4):
             self.checksum.append(su.uint64())
Beispiel #2
0
 def __init__(self, data = None):
     if data:
         self.dva = []
         dva_size = 16 
         for dva in split_records(data[0 : dva_size * 3], dva_size):
             self.dva.append(self._parse_dva(dva))
         su = StreamUnpacker(data[dva_size * 3 :])
         i = su.uint64()
         #see lib/libzfscommon/include/sys/spa.h
         self.lsize = (get_bits(i, 0, 16) + 1) << SPA_MINBLOCKSHIFT
         self.psize = (get_bits(i, 16, 16) + 1) << SPA_MINBLOCKSHIFT
         self.comp = get_bits(i, 32, 8)
         self.cksum = get_bits(i, 40, 8)
         self.type = get_bits(i, 48, 8)
         self.level = get_bits(i, 56, 5)
         if get_bits(i, 63, 1):
             self.endian = '<' # little endian
         else:
             self.endian = '>' # big
         self.cksum = ['unknown', 'on', 'off', 'label', 'gang header', 'zilog', 'fletcher2', 'fletcher4', 'SHA-256'][self.cksum]
         self.comp = ['unknown', 'on', 'off', 'lzjb'][self.comp]
         self.type = self.type
         su.rewind(-24) # skip 24b paddings
         self.birth_txg, self.fill_count = su.repeat('uint64', 2)
         self.checksum = []
         for i in range(4):
             self.checksum.append(su.uint64())
Beispiel #3
0
    def __init__(self, objset, obj_index):
        self.objset = objset
        self.dnode = objset.get_object(obj_index)
        data = self.dnode.bonus
        f = StreamUnpacker(data)
        self.ds_dir_obj, self.ds_prev_snap_obj, self.ds_prev_snap_txg, self.ds_next_snap_obj, \
        self.ds_snapnames_zapobj, self.ds_num_children, self.ds_creation_time, self.ds_creation_txg, \
        self.ds_deadlist_obj, self.ds_used_bytes, self.ds_compressed_bytes, self.ds_uncompressed_bytes, \
        self.ds_unique_bytes, self.ds_fsid_guid, self.ds_guid, self.ds_flags = f.repeat('uint64', 16)
        self.ds_bp = BlockPtr(data[128:])

        self._load()
Beispiel #4
0
    def __init__(self, vdev, data):
        if not data:
            return
        self.vdev = vdev
        su = StreamUnpacker(data)
        self.type, self.indblkshift, self.nlevels, self.nblkptr, self.bonustype, \
                   self.checksum, self.compress, pad = su.repeat('uint8', 8)
        self.type = DMU_OBJTYPE[self.type]
        self.datablkszsec, self.bonuslen = su.repeat('uint16', 2)
        su.rewind(-4)
        self.maxblkid, self.secphys, pad = su.repeat('uint64', 3)
        bonus_offset = DNODE_CORE_SIZE + BlockPtr_SIZE * self.nblkptr
        self.blkptr = []
        for blk in split_records(data[DNODE_CORE_SIZE : bonus_offset], BlockPtr_SIZE):
            bp = BlockPtr(blk)
            if not bp.is_hole():
                self.blkptr.append(bp)
        self.bonus = data[bonus_offset : bonus_offset + self.bonuslen]

        debug('dnode type=%s nlevels=%s nblkptr=%s bonustype=%s maxblkid=%s' %  \
                (self.type, self.nlevels, self.nblkptr, self.bonustype, self.maxblkid))
Beispiel #5
0
    def __init__(self, objset, obj_index):
        self.objset = objset
        self.dnode = objset.get_object(obj_index)
        data = self.dnode.bonus
        f = StreamUnpacker(data)
        self.ds_dir_obj, self.ds_prev_snap_obj, self.ds_prev_snap_txg, self.ds_next_snap_obj, \
        self.ds_snapnames_zapobj, self.ds_num_children, self.ds_creation_time, self.ds_creation_txg, \
        self.ds_deadlist_obj, self.ds_used_bytes, self.ds_compressed_bytes, self.ds_uncompressed_bytes, \
        self.ds_unique_bytes, self.ds_fsid_guid, self.ds_guid, self.ds_flags = f.repeat('uint64', 16)
        self.ds_bp = BlockPtr(data[128:])

        self._load()
Beispiel #6
0
    def __init__(self, vdev, data):
        if not data:
            return
        self.vdev = vdev
        su = StreamUnpacker(data)
        self.type, self.indblkshift, self.nlevels, self.nblkptr, self.bonustype, \
                   self.checksum, self.compress, pad = su.repeat('uint8', 8)
        self.type = DMU_OBJTYPE[self.type]
        self.datablkszsec, self.bonuslen = su.repeat('uint16', 2)
        su.rewind(-4)
        self.maxblkid, self.secphys, pad = su.repeat('uint64', 3)
        bonus_offset = DNODE_CORE_SIZE + BlockPtr_SIZE * self.nblkptr
        self.blkptr = []
        for blk in split_records(data[DNODE_CORE_SIZE:bonus_offset],
                                 BlockPtr_SIZE):
            bp = BlockPtr(blk)
            if not bp.is_hole():
                self.blkptr.append(bp)
        self.bonus = data[bonus_offset:bonus_offset + self.bonuslen]

        debug('dnode type=%s nlevels=%s nblkptr=%s bonustype=%s maxblkid=%s' %  \
                (self.type, self.nlevels, self.nblkptr, self.bonustype, self.maxblkid))
Beispiel #7
0
    def __init__(self, objset, obj_index):
        self.objset = objset
        self.dnode = objset.get_object(obj_index)  # dnode of this object

        # Fixme, we'd better to check the type of dnode before any further parsing begins

        # dsl_dir and dsl_dataset are in the bonus buffer of a dnode
        data = self.dnode.bonus
        f = StreamUnpacker(data)
        self.dd_creation_time, self.dd_head_dataset_obj, self.dd_parent_obj, \
           self.dd_origin_obj, self.dd_child_dir_zapobj, \
           self.dd_used_bytes, self.dd_compressed_bytes, self.dd_uncompressed_bytes, \
           self.dd_quota, self.dd_reserved, self.dd_props_zapobj, self.dd_deleg_zapobj = f.repeat('uint64', 12)

        self._load()
Beispiel #8
0
 def __init__(self, data):
     if data:
         su = StreamUnpacker(data)
         self.ub_magic, self.ub_version, self.ub_txg, self.ub_guid_sum, self.ub_timestamp = su.repeat('uint64', 5)
Beispiel #9
0
 def __init__(self, data):
     if data:
         su = StreamUnpacker(data)
         self.ub_magic, self.ub_version, self.ub_txg, self.ub_guid_sum, self.ub_timestamp = su.repeat(
             'uint64', 5)
Beispiel #10
0
    def __init__(self, objset, obj_index):
        self.objset = objset
        self.dnode = objset.get_object(obj_index) # dnode of this object

        # Fixme, we'd better to check the type of dnode before any further parsing begins

        # dsl_dir and dsl_dataset are in the bonus buffer of a dnode
        data = self.dnode.bonus
        f = StreamUnpacker(data)
    	self.dd_creation_time, self.dd_head_dataset_obj, self.dd_parent_obj, \
        self.dd_origin_obj, self.dd_child_dir_zapobj, \
        self.dd_used_bytes, self.dd_compressed_bytes, self.dd_uncompressed_bytes, \
        self.dd_quota, self.dd_reserved, self.dd_props_zapobj, self.dd_deleg_zapobj = f.repeat('uint64', 12)

        self._load()