예제 #1
0
    def get_items(self, name):
        if name in self.cache and time.time(
        ) - self.cache[name]["last_update"] < 300:
            return self.cache[name]["items"]

        region_folder = os.path.join(
            self.world_folder,
            self.dimension_dict[self.storage_dict[name]["dimension"]])
        x1, y1, z1 = self.storage_dict[name]["pos1"]
        x2, y2, z2 = self.storage_dict[name]["pos2"]

        items = {}
        for region_x in range(x1 // 512, x2 // 512 + 1):
            for region_z in range(z1 // 512, z2 // 512 + 1):
                region = RegionFile(
                    os.path.join(region_folder,
                                 f"r.{region_x}.{region_z}.mca"))

                for chunk in region.iter_chunks():
                    try:
                        for tile_entity in chunk["Level"]["TileEntities"]:
                            if "Items" in tile_entity and x1 <= tile_entity[
                                    "x"].value <= x2 and y1 <= tile_entity[
                                        "y"].value <= y2 and z1 <= tile_entity[
                                            "z"].value <= z2:
                                items = self.get_items_from_nbt(
                                    tile_entity, items)

                    except KeyError:
                        continue

        self.cache[name] = {"last_update": time.time(), "items": items}

        return items
예제 #2
0
def process_region_file(filename, start, stop):
    """Given a region filename, return the number of blocks of each ID in that file"""
    pieces = filename.split('.')
    rx = int(pieces[-3])
    rz = int(pieces[-2])
    
    block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
    
    # Does the region overlap the bounding box at all?
    if (start != None):
        selectLowX = int(start[0])
        selectLowZ = int(start[2])
        zoneHiX = (rx+1)*512-1 # accorping to placement on world coordinate system
        zoneHiZ = (rz+1)*512-1
        if ( zoneHiX < selectLowX or zoneHiZ < selectLowZ ):
            return block_data_totals
    elif (stop != None):
        selectHiX = int(stop[0])
        selectHiZ = int(stop[2])
        zoneLowX = rx*512
        zoneLowZ = rz*512
        if ( zoneLowX > selectHiX or zoneLowZ > selectHiZ ):
            return block_data_totals
    
    file = RegionFile(filename)
    
    # Get all chunks
    chunks = file.get_chunks()
    print("Parsing %s... %d chunks" % (os.path.basename(filename),len(chunks)))
    for c in chunks:
        # Does the chunk overlap the bounding box at all?
        if (start != None):
            chunkHiX = rx*512 + ( (c['x']+1)*16 - 1)
            chunkHiZ = rz*512 + ( c['z']*16 - 1)
            selectLowX = int(start[0])
            selectLowZ = int(start[2])
            if ( chunkHiX < selectLowX or chunkHiZ < selectLowZ ):
                continue
        elif (stop != None):
            chunkLowX = rx*512 + c['x']*16
            chunkLowZ = rz*512 + c['z']*16
            selectHiX = int(stop[0])
            selectHiZ = int(stop[2])
            if ( chunkLowX > selectHiX or chunkLowZ > selectHiZ ):
                continue
        
        chunk = Chunk(file.get_chunk(c['x'], c['z']))
        assert chunk.get_coords() == (c['x'] + rx*32, c['z'] + rz*32)
        #print("Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")")
        # Parse the blocks

        # Fast code if no start or stop coordinates are specified
        # TODO: also use this code if start/stop is specified, but the complete chunk is included
        if (start == None and stop == None):
            stats_per_chunk(chunk, block_data_totals)
        else:
            # Slow code that iterates through each coordinate
            bounded_stats_per_chunk(chunk, block_data_totals, start, stop)
    
    return block_data_totals
예제 #3
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def setUp(self):
     data = b'\x00\x00\x02\x01' + 8188*b'\x00' + \
            b'\x00\x00\x00\x27\x02\x78\xda\xe3\x62\x60\x71\x49\x2c\x49\x64\x61\x60\x09\xc9\xcc\x4d' + \
            b'\x65\x80\x00\x46\x0e\x06\x16\xbf\x44\x20\x97\x25\x24\xb5\xb8\x84\x01\x00\x6b\xb7\x06\x52'
     self.length = 8235
     self.assertEqual(len(data), self.length)
     stream = BytesIO(data)
     stream.seek(0)
     self.region = RegionFile(fileobj=stream)
예제 #4
0
def process_region_file(filename, start, stop):
    """Given a region filename, return the number of blocks of each ID in that file"""
    pieces = filename.split('.')
    rx = int(pieces[1])
    rz = int(pieces[2])

    block_data_totals = [[0] * 16 for i in range(256)
                         ]  # up to 16 data numbers in 256 block IDs

    # Does the region overlap the bounding box at all?
    if (start != None):
        if ((rx + 1) * 512 - 1 < int(start[0])
                or (rz + 1) * 512 - 1 < int(start[2])):
            return block_data_totals
    elif (stop != None):
        if (rx * 512 - 1 > int(stop[0]) or rz * 512 - 1 > int(stop[2])):
            return block_data_totals

    file = RegionFile(filename)

    # Get all chunks
    chunks = file.get_chunks()
    print("Parsing %s... %d chunks" %
          (os.path.basename(filename), len(chunks)))
    for c in chunks:
        # Does the chunk overlap the bounding box at all?
        if (start != None):
            if ((c['x'] + 1) * 16 + rx * 512 - 1 < int(start[0])
                    or (c['z'] + 1) * 16 + rz * 512 - 1 < int(start[2])):
                continue
        elif (stop != None):
            if (c['x'] * 16 + rx * 512 - 1 > int(stop[0])
                    or c['z'] * 16 + rz * 512 - 1 > int(stop[2])):
                continue

        chunk = Chunk(file.get_chunk(c['x'], c['z']))
        assert chunk.get_coords() == (c['x'] + rx * 32, c['z'] + rz * 32)
        #print("Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")")
        # Parse the blocks

        # Fast code if no start or stop coordinates are specified
        # TODO: also use this code if start/stop is specified, but the complete chunk is included
        if (start == None and stop == None):
            stats_per_chunk(chunk, block_data_totals)
        else:
            # Slow code that iterates through each coordinate
            bounded_stats_per_chunk(chunk, block_data_totals, start, stop)

    return block_data_totals
예제 #5
0
def main(world_path: str, check: bool = False):
    total_found = 0

    for world in WORLDS.keys():
        print(f"Checking the {world}")
        world_folder = AnvilWorldFolder(path.join(world_path, WORLDS[world]))

        regions = world_folder.regionfiles

        if len(regions) == 0:
            print(f"Couldn't find region files for the {world}, skipping")
            continue

        with Bar("Checking Regions", fill="█", max=len(regions)) as bar:
            for region_coords in regions.keys():
                region = RegionFile(regions[region_coords])
                chunks = region.get_metadata()

                for chunk in chunks:
                    chunk_x = region_coords[0] * 32 + chunk.x
                    chunk_z = region_coords[1] * 32 + chunk.z

                    nbt = world_folder.get_nbt(chunk_x, chunk_z)
                    found_errors = False
                    entities = nbt["Level"]["TileEntities"]

                    for entity in entities:
                        if not in_chunk(chunk_x, chunk_z, entity['x'],
                                        entity['z']):
                            total_found += 1
                            found_errors = True

                            # Move the entity to the (hopefully) right coordinates
                            entity["x"].value = chunk_x * 16 + (
                                to_int(entity['x']) % 16)
                            entity["z"].value = chunk_z * 16 + (
                                to_int(entity['z']) % 16)

                    if found_errors and not check:
                        region.write_chunk(chunk.x, chunk.z, nbt)

                bar.next()

        print(
            f"{ 'Found' if check else 'Fixed'} {total_found} entities with wrong coordinates"
        )
예제 #6
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def setUp(self):
     data = b'\x00\x00\x02\x01' + 8188*b'\x00' + \
            b'\x00\x00\x00\x27\x02\x78\xda\xe3\x62\x60\x71\x49\x2c\x49\x64\x61\x60\x09\xc9\xcc\x4d' + \
            b'\x65\x80\x00\x46\x0e\x06\x16\xbf\x44\x20\x97\x25\x24\xb5\xb8\x84\x01\x00\x6b\xb7\x06\x52'
     self.length = 8235
     self.assertEqual(len(data), self.length)
     stream = BytesIO(data)
     stream.seek(0)
     self.region = RegionFile(fileobj=stream)
예제 #7
0
def process_region_file(filename, start, stop):
	"""Given a region filename, return the number of blocks of each ID in that file"""
	pieces = filename.split('.')
	rx = int(pieces[1])
	rz = int(pieces[2])
	
	block_data_totals = [[0]*16 for i in range(256)] # up to 16 data numbers in 256 block IDs
	
	# Does the region overlap the bounding box at all?
	if (start != None):
		if ( (rx+1)*512-1 < int(start[0]) or (rz+1)*512-1 < int(start[2]) ):
			return block_data_totals
	elif (stop != None):
		if ( rx*512-1 > int(stop[0]) or rz*512-1 > int(stop[2]) ):
			return block_data_totals
	
	file = RegionFile(filename)
	
	# Get all chunks
	chunks = file.get_chunks()
	print("Parsing %s... %d chunks" % (os.path.basename(filename),len(chunks)))
	for c in chunks:
		# Does the chunk overlap the bounding box at all?
		if (start != None):
			if ( (c['x']+1)*16 + rx*512 - 1 < int(start[0]) or (c['z']+1)*16 + rz*512 - 1 < int(start[2]) ):
				continue
		elif (stop != None):
			if ( c['x']*16 + rx*512 - 1 > int(stop[0]) or c['z']*16 + rz*512 - 1 > int(stop[2]) ):
				continue
		
		chunk = Chunk(file.get_chunk(c['x'], c['z']))
		assert chunk.get_coords() == (c['x'] + rx*32, c['z'] + rz*32)
		#print("Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")")
		# Parse the blocks

		# Fast code if no start or stop coordinates are specified
		# TODO: also use this code if start/stop is specified, but the complete chunk is included
		if (start == None and stop == None):
			stats_per_chunk(chunk, block_data_totals)
		else:
			# Slow code that iterates through each coordinate
			bounded_stats_per_chunk(chunk, block_data_totals, start, stop)
	
	return block_data_totals
예제 #8
0
    def __init__(self, filename):
        print "Processing region file " + filename
        colormap = BlockColorMap()
        file = open(regionPath + filename, 'rb')
        region = RegionFile(fileobj=file)

        img = Image.new('RGBA', (32 * 16, 32 * 16))
        for c in region.get_chunks():
            cx = c['x']
            cz = c['z']
            chunk = Chunk(region, cx, cz)
            if chunk.status != 'postprocessed':
                continue
            for x in range(0, 16):
                for z in range(0, 16):
                    col = chunk.getColumnInfo(x, z)
                    color = colormap.getColor(col)
                    img.putpixel((cx * 16 + x, cz * 16 + z), color)
        img.save(outputPath + filename + ".png")

        file.close()
예제 #9
0
파일: Players.py 프로젝트: kamyu2/NBT
 def createPlayer(self, name):
         randX = random.randrange(self.lowerX.get(), self.higherX.get())
         randZ = random.randrange(self.lowerZ.get(), self.higherZ.get())
         regionX = math.floor(float(randX)/(32*16))
         regionZ = math.floor(float(randZ)/(32*16))
         chunkX = randX >> 4
         chunkZ = randZ >> 4
         blockX = randX & 0xf
         blockZ = randZ & 0xf
         regionName = "r.%i.%i.mca" % (regionX, regionZ)
         if chunkX < 0:
                 while chunkX < 0:
                         chunkX += 32
         if chunkX > 31:
                 while chunkX > 31:
                         chunkX -= 32
         if chunkZ < 0:
                 while chunkZ < 0:
                         chunkZ += 32
         if chunkZ > 31:
                 while chunkZ > 31:
                         chunkZ -= 32
         if os.path.exists(self.region_folder + regionName):
                 #check heightmap
                 tempRegion = RegionFile(self.region_folder + regionName)
                 tempChunk = tempRegion.get_chunk(chunkX, chunkZ)
                 if tempChunk != None:
                         #chunk exists so use heightmap
                         player = Player(name,randX,tempChunk["Level"]["HeightMap"][self.heights[blockZ][blockX]] + 1,randZ)
                 else:
                         #chunk doesn't exist so use sea level
                         player = Player(name,randX,self.sealevel.get(),randZ)
         else:
                 #use sea level
                 player = Player(name,randX,self.sealevel.get(),randZ)
         return player
예제 #10
0
    def analyse(self, region):
        logger.debug("Start Analse Region File %s", region.filename)
        # analysedChunks = {}
        try:
            region_file = RegionFile(region.path)
            # self.chunks = region_file
            for x in range(32):
                for z in range(32):
                    # start the actual chunk scanning
                    chunk = models.MCRegionFileChunk(region, x, z)
                    if chunk:
                        chunk.scan_results.extend(
                            self.scan_chunk(region_file, chunk))
                        region.chunks[(x, z)] = chunk

            # Now check for chunks sharing offsets:
            # Please note! region.py will mark both overlapping chunks
            # as bad (the one stepping outside his territory and the
            # good one). Only wrong located chunk with a overlapping
            # flag are really BAD chunks! Use this criterion to
            # discriminate
            metadata = region_file.metadata
            sharing = [
                k for k in metadata
                if (metadata[k].status == STATUS_CHUNK_OVERLAPPING
                    and region.chunks[k].scan_results.isErrorExists(
                        models.CHUNK_WRONG_LOCATED))
            ]
            shared_counter = 0
            for k in sharing:
                region.chunks[k].scan_results.append(
                    models.CHUNK_SHARED_OFFSET)
                region.chunks[k].scan_results.remove(
                    models.CHUNK_WRONG_LOCATED)
                shared_counter += 1

            region.shared_offset = shared_counter
            del region_file
        except NoRegionHeader:  # The region has no header
            region.status = models.REGION_TOO_SMALL
        except IOError:
            region.status = models.REGION_UNREADABLE

        region.scan_time = time.time()
        region.scanned = True
        return region
예제 #11
0
def process_region(reg, callback):
    reg_nbt = RegionFile(reg)

    for m in reg_nbt.get_metadata():
        chunk = reg_nbt.get_chunk(m.x, m.z)
        level = chunk['Level']
        tile_entities = level['TileEntities']

        chunk_needs_update = False

        for ent in tile_entities:
            if ent['id'].value == 'minecraft:command_block':
                if callback(ent):
                    chunk_needs_update = True

        if chunk_needs_update:
            reg_nbt.write_chunk(m.x, m.z, chunk)
    
    reg_nbt.close()
예제 #12
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def test02WriteFile(self):
     chunk = self.generate_level()
     region = RegionFile(fileobj=self.stream)
     region.write_chunk(0, 0, chunk)
     self.assertEqual(region.get_size(), 3 * 4096)
     self.assertEqual(region.chunk_count(), 1)
예제 #13
0
파일: regiontests.py 프로젝트: Fenixin/NBT
class TruncatedFileTest(unittest.TestCase):
    """Test for truncated file support.
    These files should be treated as a valid region file without any stored chunk."""

    def setUp(self):
        data = b'\x00\x00\x02\x01' + 8188*b'\x00' + \
               b'\x00\x00\x00\x27\x02\x78\xda\xe3\x62\x60\x71\x49\x2c\x49\x64\x61\x60\x09\xc9\xcc\x4d' + \
               b'\x65\x80\x00\x46\x0e\x06\x16\xbf\x44\x20\x97\x25\x24\xb5\xb8\x84\x01\x00\x6b\xb7\x06\x52'
        self.length = 8235
        self.assertEqual(len(data), self.length)
        stream = BytesIO(data)
        stream.seek(0)
        self.region = RegionFile(fileobj=stream)

    def tearDown(self):
        del self.region

    def test00FileProperties(self):
        self.assertEqual(self.region.get_size(), self.length)
        self.assertEqual(self.region.chunk_count(), 1)
    
    def test01ReadChunk(self):
        """Test if a block can be read, even when the file is truncated right after the block data."""
        data = self.region.get_blockdata(0,0) # This may raise a RegionFileFormatError.
        data = BytesIO(data)
        nbt = NBTFile(buffer=data)
        self.assertEqual(nbt["Time"].value, 1)
        self.assertEqual(nbt["Name"].value, "Test")

    def test02ReplaceChunk(self):
        """Test if writing the last block in a truncated file will extend the file size to the sector boundary."""
        nbt = self.region.get_nbt(0, 0)
        self.region.write_chunk(0, 0, nbt)
        size = self.region.size
        self.assertEqual(size, self.region.get_size())
        self.assertEqual(size, 3*4096)
    
    def test03WriteChunk(self):
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(0, 1, nbt)
        self.assertEqual(self.region.get_size(), 4*4096)
        self.assertEqual(self.region.chunk_count(), 2)
        self.region.file.seek(self.length)
        unusedlength = 3*4096 - self.length
        unused = self.region.file.read(unusedlength)
        zeroes = unused.count(b'\x00')
        self.assertEqual(unusedlength, zeroes)
예제 #14
0
파일: regiontests.py 프로젝트: Fenixin/NBT
class TruncatedFileTest(unittest.TestCase):
    """Test for truncated file support.
    These files should be treated as a valid region file without any stored chunk."""
    def setUp(self):
        data = b'\x00\x00\x02\x01' + 8188*b'\x00' + \
               b'\x00\x00\x00\x27\x02\x78\xda\xe3\x62\x60\x71\x49\x2c\x49\x64\x61\x60\x09\xc9\xcc\x4d' + \
               b'\x65\x80\x00\x46\x0e\x06\x16\xbf\x44\x20\x97\x25\x24\xb5\xb8\x84\x01\x00\x6b\xb7\x06\x52'
        self.length = 8235
        self.assertEqual(len(data), self.length)
        stream = BytesIO(data)
        stream.seek(0)
        self.region = RegionFile(fileobj=stream)

    def tearDown(self):
        del self.region

    def test00FileProperties(self):
        self.assertEqual(self.region.get_size(), self.length)
        self.assertEqual(self.region.chunk_count(), 1)

    def test01ReadChunk(self):
        """Test if a block can be read, even when the file is truncated right after the block data."""
        data = self.region.get_blockdata(
            0, 0)  # This may raise a RegionFileFormatError.
        data = BytesIO(data)
        nbt = NBTFile(buffer=data)
        self.assertEqual(nbt["Time"].value, 1)
        self.assertEqual(nbt["Name"].value, "Test")

    def test02ReplaceChunk(self):
        """Test if writing the last block in a truncated file will extend the file size to the sector boundary."""
        nbt = self.region.get_nbt(0, 0)
        self.region.write_chunk(0, 0, nbt)
        size = self.region.size
        self.assertEqual(size, self.region.get_size())
        self.assertEqual(size, 3 * 4096)

    def test03WriteChunk(self):
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(0, 1, nbt)
        self.assertEqual(self.region.get_size(), 4 * 4096)
        self.assertEqual(self.region.chunk_count(), 2)
        self.region.file.seek(self.length)
        unusedlength = 3 * 4096 - self.length
        unused = self.region.file.read(unusedlength)
        zeroes = unused.count(b'\x00')
        self.assertEqual(unusedlength, zeroes)
예제 #15
0
def analyse_regionfile(filename, warnings=True):
    region = RegionFile(filename)

    statuscounts = Statuses()
    errors = []
    if region.size % 4096 != 0:
        errors.append(
            "File size is %d bytes, which is not a multiple of 4096" %
            region.size)
    sectorsize = region._bytes_to_sector(region.size)
    sectors = sectorsize * [None]
    if region.size == 0:
        errors.append("File size is 0 bytes")
        sectors = []
    elif sectorsize < 2:
        errors.append(
            "File size is %d bytes, too small for the 8192 byte header" %
            region.size)
    else:
        sectors[0] = "locations"
        sectors[1] = "timestamps"
    chunks = {}
    for x in range(32):
        for z in range(32):
            c = ChunkMetadata(x, z)
            (c.sectorstart, c.sectorlen, c.timestamp,
             status) = region.header[x, z]
            (c.length, c.compression, c.status) = region.chunk_headers[x, z]
            c.uncompressedlength = 0
            chunks[x, z] = c

            statuscounts.count(c.status)
            if c.status < 0:
                errors.append("chunk %d,%d has status %d: %s" % \
                    (x, z, c.status, statuscounts.get_name(c.status)))

            try:
                if c.sectorstart == 0:
                    if c.sectorlen != 0:
                        errors.append(
                            "chunk %d,%d is not created, but is %d sectors in length"
                            % (x, z, c.sectorlen))
                    if c.timestamp != 0:
                        errors.append(
                            "chunk %d,%d is not created, but has timestamp %d"
                            % (x, z, c.timestamp))
                    raise RegionFileFormatError('')
                allocatedbytes = 4096 * c.sectorlen
                if c.timestamp == 0:
                    errors.append("chunk %d,%d has no timestamp" % (x, z))
                if c.sectorstart < 2:
                    errors.append(
                        "chunk %d,%d starts at sector %d, which is in the header"
                        % (x, z, c.sectorstart))
                    raise RegionFileFormatError('')
                if 4096 * c.sectorstart >= region.size:
                    errors.append(
                        "chunk %d,%d starts at sector %d, while the file is only %d sectors"
                        % (x, z, c.sectorstart, sectorsize))
                    raise RegionFileFormatError('')
                elif 4096 * c.sectorstart + 5 > region.size:
                    # header of chunk only partially fits
                    errors.append(
                        "chunk %d,%d starts at sector %d, but only %d bytes of sector %d are present in the file"
                        % (x, z, c.sectorstart, sectorsize))
                    raise RegionFileFormatError('')
                elif not c.length:
                    errors.append("chunk %d,%d length is undefined." % (x, z))
                    raise RegionFileFormatError('')
                elif c.length == 1:
                    errors.append("chunk %d,%d has length 0 bytes." % (x, z))
                elif 4096 * c.sectorstart + 4 + c.length > region.size:
                    # header of chunk fits, but not the complete chunk
                    errors.append(
                        "chunk %d,%d is %d bytes in length, which is behind the file end"
                        % (x, z, c.length))
                requiredsectors = region._bytes_to_sector(c.length + 4)
                if c.sectorlen <= 0:
                    errors.append("chunk %d,%d is %d sectors in length" %
                                  (x, z, c.sectorlen))
                    raise RegionFileFormatError('')
                if c.compression == 0:
                    errors.append(
                        "chunk %d,%d is uncompressed. This is deprecated." %
                        (x, z))
                elif c.compression == 1:
                    errors.append(
                        "chunk %d,%d uses GZip compression. This is deprecated."
                        % (x, z))
                elif c.compression > 2:
                    errors.append(
                        "chunk %d,%d uses an unknown compression type (%d)." %
                        (x, z, c.compression))
                if c.length + 4 > allocatedbytes:  # TODO 4 or 5?
                    errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d sectors, " \
                        "but only %d %s allocated" % \
                        (x, z, c.length+4, c.length-1, requiredsectors, c.sectorlen, \
                        "sector is" if (c.sectorlen == 1) else "sectors are"))
                elif c.length + 4 + 4096 == allocatedbytes:
                    # If the block fits in exactly n sectors, Minecraft seems to allocated n+1 sectors
                    # Threat this as a warning instead of an error.
                    if warnings:
                        errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
                            "but %d sectors are allocated" % \
                            (x, z, c.length+4, c.length-1, requiredsectors, \
                            "sector" if (requiredsectors == 1) else "sectors", c.sectorlen))
                elif c.sectorlen > requiredsectors:
                    errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
                        "but %d sectors are allocated" % \
                        (x, z, c.length+4, c.length-1, requiredsectors, \
                        "sector" if (requiredsectors == 1) else "sectors", c.sectorlen))

                # Decompress chunk, check if that succeeds.
                # Check if the header and footer indicate this is a NBT file.
                # (without parsing it in detail)
                compresseddata = None
                data = None
                try:
                    if 0 <= c.compression <= 2:
                        region.file.seek(4096 * c.sectorstart + 5)
                        compresseddata = region.file.read(c.length - 1)
                except Exception as e:
                    errors.append("Error reading chunk %d,%d: %s" %
                                  (x, z, str(e)))
                if (c.compression == 0):
                    data = compresseddata
                if (c.compression == 1):
                    try:
                        data = gzip.decompress(compresseddata)
                    except Exception as e:
                        errors.append(
                            "Error decompressing chunk %d,%d using gzip: %s" %
                            (x, z, str(e)))
                elif (c.compression == 2):
                    try:
                        data = zlib.decompress(compresseddata)
                    except Exception as e:
                        errors.append(
                            "Error decompressing chunk %d,%d using zlib: %s" %
                            (x, z, str(e)))
                if data:
                    c.uncompressedlength = len(data)
                    if data[0] != 10:
                        errors.append(
                            "chunk %d,%d is not a valid NBT file: outer object is not a TAG_Compound, but %r"
                            % (x, z, data[0]))
                    elif data[-1] != 0:
                        errors.append(
                            "chunk %d,%d is not a valid NBT file: files does not end with a TAG_End."
                            % (x, z))
                    else:
                        (length, ) = unpack(">H", data[1:3])
                        name = data[3:3 + length]
                        try:
                            name.decode("utf-8", "strict")
                        except Exception as e:
                            errors.append(
                                "Error decompressing chunk %d,%d using unknown compression: %s"
                                % (x, z, str(e)))

                if warnings:
                    # Read the unused bytes in a sector and check if all bytes are zeroed.
                    unusedlen = 4096 * c.sectorlen - (c.length + 4)
                    if unusedlen > 0:
                        try:
                            region.file.seek(4096 * c.sectorstart + 4 +
                                             c.length)
                            unused = region.file.read(unusedlen)
                            zeroes = unused.count(b'\x00')
                            if zeroes < unusedlen:
                                errors.append("%d of %d unused bytes are not zeroed in sector %d after chunk %d,%d" % \
                                    (unusedlen-zeroes, unusedlen, c.sectorstart + c.sectorlen - 1, x, z))
                        except Exception as e:
                            errors.append(
                                "Error reading tail of chunk %d,%d: %s" %
                                (x, z, str(e)))

            except RegionFileFormatError:
                pass

            if c.sectorlen and c.sectorstart:
                # Check for overlapping chunks
                for b in range(c.sectorlen):
                    m = "chunk %-2d,%-2d part %d/%d" % (x, z, b + 1,
                                                        c.sectorlen)
                    p = c.sectorstart + b
                    if p > sectorsize:
                        errors.append("%s outside file" % (m))
                        break
                    if sectors[p] != None:
                        errors.append("overlap in sector %d: %s and %s" %
                                      (p, sectors[p], m))
                    if (b == 0):
                        if (c.uncompressedlength > 0):
                            m += " (4+1+%d bytes compressed: %d bytes uncompressed)" % (
                                c.length - 1, c.uncompressedlength)
                        elif c.length:
                            m += " (4+1+%d bytes compressed)" % (c.length - 1)
                        else:
                            m += " (4+1+0 bytes)"
                    if sectors[p] != None:
                        m += " (overlapping!)"
                    sectors[p] = m

    e = sectors.count(None)
    if e > 0:
        if warnings:
            errors.append("Fragementation: %d of %d sectors are unused" %
                          (e, sectorsize))
        for sector, content in enumerate(sectors):
            if content == None:
                sectors[sector] = "empty"
                if warnings:
                    region.file.seek(4096 * sector)
                    unused = region.file.read(4096)
                    zeroes = unused.count(b'\x00')
                    if zeroes < 4096:
                        errors.append(
                            "%d bytes are not zeroed in unused sector %d" %
                            (4096 - zeroes, sector))

    return errors, statuscounts, sectors, chunks
예제 #16
0
파일: regiontests.py 프로젝트: Fenixin/NBT
class ReadWriteTest(unittest.TestCase):
    """Test to read, write and relocate chunks in a region file."""
    """
    All tests operate on regiontest.mca, is a 27-sector large region file, which looks like:
    sector 000: locations
    sector 001: timestamps
    sector 002: chunk 6 ,0  part 1/1
    sector 003: chunk 7 ,0  part 1/1 <<-- minor warning: unused bytes not zeroed
    sector 004: empty                <<-- minor warning: bytes not zeroed
    sector 005: chunk 8 ,0  part 1/1
    sector 006: chunk 9 ,0  part 1/1
    sector 007: chunk 10,0  part 1/1 <<-- deprecated encoding (gzip = 1)
    sector 008: chunk 11,0  part 1/1 <<-- unknown encoding (3)
    sector 009: chunk 2 ,0  part 1/1 <<-- uncompressed (encoding 0)
    sector 010: empty
    sector 011: empty
    sector 012: chunk 3 ,0  part 1/1 <<-- garbled data (can't be decoded)
    sector 013: chunk 1 ,0  part 1/1
    sector 014: chunk 4 ,0  part 1/3 <<-- 1 sector required, but 3 sectors allocated
    sector 015: chunk 12,0  part 1/1 <<-- part 2 of chunk 4,0 overlaps
    sector 016: chunk 4, 0  part 3/3
    sector 017: chunk 16,0  part 1/2
    sector 018: chunk 16,0  part 2/2
    sector 019: chunk 5 ,1  part 1/2 <<-- correct encoding, but not a valid NBT file
    sector 020: chunk 5 ,1  part 2/2
    sector 021: chunk 6 ,1  part 1/1 <<-- potential overlap with empty chunk 13,0
    sector 022: chunk 7 ,1  part 1/1 <<-- no timestamp
    sector 023: chunk 4 ,1  part 1/1 <<-- zero-byte length value in chunk (invalid header)
    sector 024: chunk 8 ,1  part 1/1 <<-- one-byte length value in chunk (no data)
    sector 025: chunk 3 ,1  part 1/1 <<-- 2 sectors required, but 1 sector allocated (length 4+1+4092)
    sector 026: empty                <<-- unregistered overlap from chunk 3,1
    
    in addition, the following (corrupted) chunks are defined in the header of regiontest.mca:
    sector 021: 0-sector length chunk 13,0 (and overlapping with chunk 6,1)
    sector 001: chunk 14,0 (in header)
    sector 030: chunk 15,0 (out of file)
    ----------: chunk 17,0 timestamp without data
    
    Thus:
    01. chunk 1 ,0  Readable  
    02. chunk 2 ,0  Readable   <<-- uncompressed (encoding 0)
    03. chunk 3 ,0  Unreadable <<-- garbled data (can't be decoded)
    04. chunk 4 ,0  Readable   <<-- overlaps with chunk 12,0.
    05. chunk 6 ,0  Readable 
    06. chunk 7 ,0  Readable 
    07. chunk 8 ,0  Readable 
    08. chunk 9 ,0  Readable 
    09. chunk 10,0  Readable   <<-- deprecated encoding (gzip = 1)
    10. chunk 11,0  Unreadable <<-- unknown encoding (3)
    11. chunk 12,0  Readable   <<-- Overlaps with chunk 4,0.
    12. chunk 13,0  Unreadable <<-- 0-sector length in header
    13. chunk 14,0  Unreadable <<-- in header
    14. chunk 15,0  Unreadable <<-- out of file
    15. chunk 16,0  Readable  
    --  chunk 17,0  Unreadable <<-- timestamp without data
    16. chunk 3 ,1  Readable   <<-- 2 sectors required, but 1 sector allocated (length 4+1+4092)
    17. chunk 4 ,1  Unreadable <<-- zero-byte length value in chunk (invalid header)
    18. chunk 5 ,1  Readable   <<-- Not a valid NBT file
    19. chunk 6 ,1  Readable   <<-- potential overlap with empty chunk 13,0
    20. chunk 7 ,1  Readable   <<-- no timestamp
    21. chunk 8 ,1  Unreadable <<-- one-byte length value in chunk (no data)
    """
    def setUp(self):
        self.tempdir = tempfile.mkdtemp()
        self.filename = os.path.join(self.tempdir, 'regiontest.mca')
        shutil.copy(REGIONTESTFILE, self.filename)
        self.region = RegionFile(filename=self.filename)

    def tearDown(self):
        del self.region
        try:
            shutil.rmtree(self.tempdir)
        except OSError as e:
            raise

    def test000MethodFileSize(self):
        """
        Test of the get_size() method.
        The regionfile has 27 sectors.
        """
        self.assertEqual(self.region.get_size(), 27 * 4096)

    def test001MethodChunkCount(self):
        """
        Test of the chunk_count() method.
        The regionfile has 21 chunks, including 3-out of file chunks.
        """
        self.assertEqual(self.region.chunk_count(), 21)

    def test002MethodGetChunkCoords(self):
        """
        Test of get_chunk_coords() method.
        Note: this function may be deprecated in a later version of NBT.
        """
        coords_and_lengths = self.region.get_chunk_coords()
        coords = []
        for coord in coords_and_lengths:
            coords.append((coord['x'], coord['z']))

        self.assertIn((1, 0), coords)
        self.assertIn((2, 0), coords)
        self.assertIn((3, 0), coords)
        self.assertIn((4, 0), coords)
        self.assertIn((6, 0), coords)
        self.assertIn((7, 0), coords)
        self.assertIn((8, 0), coords)
        self.assertIn((9, 0), coords)
        self.assertIn((10, 0), coords)
        self.assertIn((11, 0), coords)
        self.assertIn((12, 0), coords)
        self.assertIn((13, 0), coords)
        self.assertIn((14, 0), coords)  # note: length is undefined
        self.assertIn((15, 0), coords)  # note: length is undefined
        self.assertIn((16, 0), coords)
        self.assertNotIn((17, 0), coords)
        self.assertIn((3, 1), coords)
        self.assertIn((4, 1), coords)
        self.assertIn((5, 1), coords)
        self.assertIn((6, 1), coords)
        self.assertIn((7, 1), coords)
        self.assertIn((8, 1), coords)
        self.assertEqual(len(coords_and_lengths), 21)

    def test003MethodIterChunks(self):
        """
        Test of iter_chunks() method.
        """
        chunks = []
        for chunk in self.region.iter_chunks():
            self.assertIsInstance(chunk, TAG_Compound)
            chunks.append(chunk)
        self.assertEqual(len(chunks), 13)

    def test004SyntaxIterChunks(self):
        """
        Test of iter(RegionFile) syntax.
        """
        chunks = []
        for chunk in self.region:
            self.assertIsInstance(chunk, TAG_Compound)
            chunks.append(chunk)
        self.assertEqual(len(chunks), 13)

    def test005ParameterHeaders(self):
        """
        read headers of chunk 9,0: 
        sector 6, 1 sector length, timestamp 1334530101, status STATUS_CHUNK_OK.
        read chunk headers of chunk 9,0: 
        lenght (incl. compression byte): 3969 bytes, zlip (2) compression, status STATUS_CHUNK_OK.
        """
        self.assertEqual(self.region.header[9, 0],
                         (6, 1, 1334530101, RegionFile.STATUS_CHUNK_OK))
        self.assertEqual(self.region.chunk_headers[9, 0],
                         (3969, 2, RegionFile.STATUS_CHUNK_OK))

    def test006ParameterHeadersUndefinedChunk(self):
        """
        read headers & chunk_headers of chunk 2,2
        """
        self.assertEqual(self.region.header[2, 2],
                         (0, 0, 0, RegionFile.STATUS_CHUNK_NOT_CREATED))
        self.assertEqual(self.region.chunk_headers[2, 2],
                         (None, None, RegionFile.STATUS_CHUNK_NOT_CREATED))

    def test010ReadChunkZlibCompression(self):
        """
        chunk 9,0: regular Zlib compression. Should be read OK.
        """
        nbt = self.region.get_nbt(9, 0)
        self.assertIsInstance(nbt, TAG_Compound)
        # get_chunk is currently an alias of get_nbt
        chunk = self.region.get_chunk(9, 0)
        self.assertIsInstance(chunk, TAG_Compound)

    def test011ReadChunkGzipCompression(self):
        """
        chunk 10,0: deprecated GZip compression. Should be read OK.
        """
        nbt = self.region.get_nbt(10, 0)
        self.assertIsInstance(nbt, TAG_Compound)

    def test012ReadChunkUncompressed(self):
        """
        chunk 2,0: no compression. Should be read OK.
        """
        nbt = self.region.get_nbt(2, 0)
        self.assertIsInstance(nbt, TAG_Compound)

    def test013ReadUnknownEncoding(self):
        """
        chunk 11,0 has unknown encoding (3). Reading should raise a ChunkDataError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 11, 0)

    def test014ReadMalformedEncoding(self):
        """
        chunk 3,0 has malformed content. Reading should raise a ChunkDataError.
        This should not raise a MalformedFileError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 3, 0)

    # TODO: raise nbt.region.ChunkDataError instead of nbt.nbt.MalformedFileError, or make them the same.
    def test015ReadMalformedNBT(self):
        """
        read chunk 5,1: valid compression, but not a valid NBT file. Reading should raise a ChunkDataError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 5, 1)

    def test016ReadChunkNonExistent(self):
        """
        read chunk 2,2: does not exist. Reading should raise a InconceivedChunk.
        """
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 2, 2)

    def test017ReadableChunks(self):
        """
        Test which chunks are readable.
        """
        coords = []
        for cc in self.region.get_chunk_coords():
            try:
                nbt = self.region.get_chunk(cc['x'], cc['z'])
                coords.append((cc['x'], cc['z']))
            except RegionFileFormatError:
                pass

        self.assertIn((1, 0), coords)
        self.assertIn((2, 0), coords)
        self.assertNotIn((3, 0), coords)  # garbled data
        self.assertIn((4, 0),
                      coords)  # readable, despite overlapping with chunk 12,0
        self.assertIn((6, 0), coords)
        self.assertIn((7, 0), coords)
        self.assertIn((8, 0), coords)
        self.assertIn((9, 0), coords)
        self.assertIn((10, 0), coords)
        self.assertNotIn((11, 0), coords)  # unknown encoding
        self.assertIn((12, 0),
                      coords)  # readable, despite overlapping with chunk 4,1
        self.assertNotIn((13, 0), coords)  # zero-length (in header)
        self.assertNotIn((14, 0), coords)  # in header
        self.assertNotIn((15, 0), coords)  # out of file
        self.assertIn((16, 0), coords)
        self.assertNotIn((17, 0), coords)  # timestamp without data
        self.assertIn((3, 1), coords)
        self.assertNotIn((4, 1), coords)  # invalid length (in chunk)
        self.assertNotIn((5, 1), coords)  # not a valid NBT file
        self.assertIn((6, 1), coords)
        self.assertIn((7, 1), coords)
        self.assertNotIn((8, 1), coords)  # zero-length (in chunk)
        self.assertEqual(len(coords), 13)

    def test020ReadInHeader(self):
        """
        read chunk 14,0: supposedly located in the header. 
        Reading should raise a RegionHeaderError.
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 14, 0)
        # TODO:
        self.assertEqual(self.region.header[14, 0],
                         (1, 1, 1376433960, RegionFile.STATUS_CHUNK_IN_HEADER))
        self.assertEqual(self.region.chunk_headers[14, 0],
                         (None, None, RegionFile.STATUS_CHUNK_IN_HEADER))

    def test021ReadOutOfFile(self):
        """
        read chunk 15,0: error (out of file)
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 15, 0)
        self.assertEqual(
            self.region.header[15, 0],
            (30, 1, 1376433961, RegionFile.STATUS_CHUNK_OUT_OF_FILE))
        self.assertEqual(self.region.chunk_headers[15, 0],
                         (None, None, RegionFile.STATUS_CHUNK_OUT_OF_FILE))

    def test022ReadZeroLengthHeader(self):
        """
        read chunk 13,0: error (zero-length)
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 13, 0)
        self.assertEqual(
            self.region.header[13, 0],
            (21, 0, 1376433958, RegionFile.STATUS_CHUNK_ZERO_LENGTH))
        self.assertEqual(self.region.chunk_headers[13, 0],
                         (None, None, RegionFile.STATUS_CHUNK_ZERO_LENGTH))

    def test023ReadInvalidLengthChunk(self):
        """
        zero-byte lengths in chunk. (4,1)
        read chunk 4,1: error (invalid)
        """
        self.assertRaises(ChunkHeaderError, self.region.get_nbt, 4, 1)

    def test024ReadZeroLengthChunk(self):
        """
        read chunk 8,1: error (zero-length chunk)
        """
        self.assertRaises(ChunkHeaderError, self.region.get_nbt, 8, 1)

    def test025ReadChunkSizeExceedsSectorSize(self):
        """
        read chunk 3,1: can be read, despite that the chunk content is longer than the allocated sectors.
        In general, reading should either succeeds, or raises a ChunkDataError.
        The status should be STATUS_CHUNK_MISMATCHED_LENGTHS.
        """
        self.assertEqual(self.region.chunk_headers[3, 1][2],
                         RegionFile.STATUS_CHUNK_MISMATCHED_LENGTHS)
        # reading should succeed, despite the overlap (next chunk is free)
        nbt = self.region.get_nbt(3, 1)

    def test026ReadChunkOverlapping(self):
        """
        chunk 4,0 and chunk 12,0 overlap: status should be STATUS_CHUNK_OVERLAPPING
        """
        self.assertEqual(self.region.chunk_headers[4, 0][2],
                         RegionFile.STATUS_CHUNK_OVERLAPPING)
        self.assertEqual(self.region.chunk_headers[12, 0][2],
                         RegionFile.STATUS_CHUNK_OVERLAPPING)

    def test030GetTimestampOK(self):
        """
        get_timestamp
        read chunk 9,0: OK
        """
        self.assertEqual(self.region.get_timestamp(9, 0), 1334530101)

    def test031GetTimestampBadChunk(self):
        """
        read chunk 15,0: OK
        Data is out-out-of-file, but timestamp is still there.
        """
        self.assertEqual(self.region.get_timestamp(15, 0), 1376433961)

    def test032GetTimestampNoChunk(self):
        """
        read chunk 17,0: OK
        no data, but a timestamp
        """
        self.assertEqual(self.region.get_timestamp(17, 0), 1334530101)

    def test033GetTimestampMissing(self):
        """
        read chunk 7,1: OK
        data, but no timestamp
        """
        self.assertEqual(self.region.get_timestamp(7, 1), 0)

    def test040WriteNewChunk(self):
        """
        read chunk 0,2: InconceivedError
        write 1 sector chunk 0,2
        - read location (<= 026), size (001), timestamp (non-zero).
        """
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 0, 2)
        timebefore = int(time.time())
        self.region.write_chunk(0, 2, nbt)
        timeafter = time.time()
        header = self.region.header[0, 2]
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector")
        self.assertGreaterEqual(header[0], 2,
                                "Chunk must not be written in the header")
        self.assertLessEqual(header[0], 26,
                             "Chunk must not be written in an empty sector")
        self.assertGreaterEqual(header[2], timebefore,
                                "Timestamp must be time.time()")
        self.assertLessEqual(header[2], timeafter,
                             "Timestamp must be time.time()")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.chunk_count(), chunk_count + 1)

    def test041WriteAndReadNewChunk(self):
        """
        write 1 sector chunk 0,2
        read chunk 0,2: OK
        - compare writen and read NBT file
        """
        nbtwrite = generate_compressed_level(minsize=100, maxsize=4000)
        writebuffer = BytesIO()
        nbtwrite.write_file(buffer=writebuffer)
        nbtsize = writebuffer.seek(0, 2)
        self.region.write_chunk(0, 2, nbtwrite)
        nbtread = self.region.get_nbt(0, 2)
        readbuffer = BytesIO()
        nbtread.write_file(buffer=readbuffer)
        self.assertEqual(nbtsize, readbuffer.seek(0, 2))
        writebuffer.seek(0)
        writtendata = writebuffer.read()
        readbuffer.seek(0)
        readdata = readbuffer.read()
        self.assertEqual(writtendata, readdata)

    def test042WriteExistingChunk(self):
        """
        write 1 sector chunk 9,0 (should stay in 006)
        - read location (006) and size (001).
        """
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(9, 0, nbt)
        header = self.region.header[9, 0]
        self.assertEqual(header[0], 6, "Chunk should remain at sector 6")
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.chunk_count(), chunk_count)

    def test043DeleteChunk(self):
        """
        read chunk 6,0: OK
        unlink chunk 6,0
        - check location, size, timestamp (all should be 0)
        read chunk 6,0: InconceivedError
        """
        chunk_count = self.region.chunk_count()
        nbt = self.region.get_nbt(6, 0)
        self.region.unlink_chunk(6, 0)
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 6, 0)
        header = self.region.header[6, 0]
        self.assertEqual(header[0], 0)
        self.assertEqual(header[1], 0)
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_NOT_CREATED)
        self.assertEqual(self.region.chunk_count(), chunk_count - 1)

    def test044UseEmptyChunks(self):
        """
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to 010)
        write 1 sector chunk 3,2 (should go to 011)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        availablelocations = (4, 10, 11, 26)
        self.region.write_chunk(1, 2, nbt)
        self.assertIn(self.region.header[1, 2][0], availablelocations)
        self.region.write_chunk(2, 2, nbt)
        self.assertIn(self.region.header[2, 2][0], availablelocations)
        self.region.write_chunk(3, 2, nbt)
        self.assertIn(self.region.header[3, 2][0], availablelocations)

    def test050WriteNewChunk2sector(self):
        """
        write 2 sector chunk 1,2 (should go to 010-011)
        """
        nbt = generate_compressed_level(minsize=5000, maxsize=7000)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 10, "Chunk should be placed in sector 10")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test051WriteNewChunk4096byte(self):
        """
        write 4091+5-byte (1 sector) chunk 1,2 (should go to 004)
        """
        nbt = generate_compressed_level(minsize=4091, maxsize=4091)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        chunk_header = self.region.chunk_headers[1, 2]
        if chunk_header[0] != 4092:
            raise unittest.SkipTest(
                "Can't create chunk of 4091 bytes compressed")
        self.assertEqual(header[1], 1, "Chunk length must be 2 sectors")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test052WriteNewChunk4097byte(self):
        """
        write 4092+5-byte (2 sector) chunk 1,2 (should go to 010-011)
        """
        nbt = generate_compressed_level(minsize=4092, maxsize=4092)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        chunk_header = self.region.chunk_headers[1, 2]
        if chunk_header[0] != 4093:
            raise unittest.SkipTest(
                "Can't create chunk of 4092 bytes compressed")
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 10, "Chunk should be placed in sector 10")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test053WriteNewChunkIncreaseFile(self):
        """
        write 3 sector chunk 2,2 (should go to 026-028 or 027-029) (increase file size)
        verify file size is 29*4096
        """
        nbt = generate_compressed_level(minsize=9000, maxsize=11000)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        self.assertEqual(header[1], 3, "Chunk length must be 3 sectors")
        self.assertIn(header[0], (26, 27),
                      "Chunk should be placed in sector 26")
        self.assertEqual(self.region.get_size(),
                         (header[0] + header[1]) * 4096,
                         "File size should be multiple of 4096")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test054WriteExistingChunkDecreaseSector(self):
        """
        write 1 sector chunk 16,0 (should go to existing 017) (should free sector 018)
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to 010)
        write 1 sector chunk 3,2 (should go to 011)
        write 1 sector chunk 4,2 (should go to freed 018)
        write 1 sector chunk 5,2 (should go to 026)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        header = self.region.header[16, 0]
        self.assertEqual(header[1], 2)
        self.region.write_chunk(16, 0, nbt)
        header = self.region.header[16, 0]
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector1")
        self.assertEqual(header[0], 17, "Chunk should remain in sector 17")
        # Write 1-sector chunks to check which sectors are "free"
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.region.write_chunk(4, 2, nbt)
        locations.append(self.region.header[4, 2][0])
        self.region.write_chunk(5, 2, nbt)
        locations.append(self.region.header[5, 2][0])
        self.assertIn(18, locations)
        # self.assertEqual(locations, [4, 10, 11, 18, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    @unittest.skip('Test takes too much time')
    def test055WriteChunkTooLarge(self):
        """
        Chunks of size >= 256 sectors are not supported by the file format
        attempt to write a chunk 256 sectors in size
        should raise Exception
        """
        maxsize = 256 * 4096
        nbt = generate_compressed_level(minsize=maxsize + 100,
                                        maxsize=maxsize + 4000)
        self.assertRaises(ChunkDataError, self.region.write_chunk, 2, 2, nbt)

    def test060WriteExistingChunkIncreaseSectorSameLocation(self):
        """
        write 2 sector chunk 7,0 (should go to 003-004) (increase chunk size)
        """
        nbt = generate_compressed_level(minsize=5000, maxsize=7000)
        self.region.write_chunk(7, 0, nbt)
        header = self.region.header[7, 0]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 3, "Chunk should remain in sector 3")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test061WriteExistingChunkCorrectSize(self):
        """
        write 2 sector chunk 3,1 (should go to 025-026) (increase sector size)
        """
        nbt = self.region.get_chunk(3, 1)
        self.region.write_chunk(3, 1, nbt)
        header = self.region.header[3, 1]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 25, "Chunk should remain in sector 25")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.get_size(), 27 * 4096)

    def test062WriteExistingChunkIncreaseSectorNewLocation(self):
        """
        write 2 sector chunk 8,0 (should go to 004-005 or 010-011)
        verify chunk_count remains 18
        write 2 sector chunk 2,2 (should go to 010-011 or 004-005)
        verify that file size is not increased <= 027*4096
        verify chunk_count is 19
        """
        locations = []
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize=5000, maxsize=7000)
        self.region.write_chunk(8, 0, nbt)
        header = self.region.header[8, 0]
        self.assertEqual(header[1], 2)  # length
        locations.append(header[0])  # location
        self.assertEqual(self.region.chunk_count(), chunk_count)
        self.region.write_chunk(2, 2, nbt)
        header = self.region.header[2, 2]
        self.assertEqual(header[1], 2)  # length
        locations.append(header[0])  # location
        self.assertEqual(sorted(locations), [4, 10])  # locations
        self.assertEqual(self.region.chunk_count(), chunk_count + 1)

    def test063WriteNewChunkFreedSectors(self):
        """
        unlink chunk 6,0
        unlink chunk 7,0
        write 3 sector chunk 2,2 (should go to 002-004) (file size should remain the same)
        """
        self.region.unlink_chunk(6, 0)
        self.region.unlink_chunk(7, 0)
        nbt = generate_compressed_level(minsize=9000, maxsize=11000)
        self.region.write_chunk(2, 2, nbt)
        header = self.region.header[2, 2]
        self.assertEqual(header[1], 3, "Chunk length must be 3 sectors")
        self.assertEqual(header[0], 2, "Chunk should be placed in sector 2")

    def test070WriteOutOfFileChunk(self):
        """
        write 1 sector chunk 13,0 (should go to 004)
        Should not go to sector 30 (out-of-file location)
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(13, 0, nbt)
        header = self.region.header[13, 0]
        self.assertEqual(header[1], 1)  # length
        self.assertLessEqual(
            header[0], 26,
            "Previously out-of-file chunk should be written in-file")

    def test071WriteZeroLengthSectorChunk(self):
        """
        write 1 sector chunk 13,0 (should go to 004)
        Verify sector 19 remains untouched.
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(13, 0, nbt)
        header = self.region.header[13, 0]
        self.assertEqual(header[1], 1)  # length
        self.assertNotEqual(
            header[0], 19,
            "Previously 0-length chunk should not overwrite existing chunk")

    def test072WriteOverlappingChunkLong(self):
        """
        write 2 sector chunk 4,0 (should go to 010-011) (free 014 & 016)
        verify location is NOT 014 (because of overlap)
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to freed 014)
        write 1 sector chunk 3,2 (should go to freed 016)
        write 1 sector chunk 4,2 (should go to 018)
        write 1 sector chunk 5,2 (should go to 026)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize=5000, maxsize=7000)
        self.region.write_chunk(4, 0, nbt)
        header = self.region.header[4, 0]
        self.assertEqual(header[1], 2)  # length
        self.assertNotEqual(
            header[0], 14,
            "Chunk should not be written to same location when it overlaps")
        self.assertEqual(
            header[0], 10,
            "Chunk should not be written to same location when it overlaps")
        # Write 1-sector chunks to check which sectors are "free"
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.region.write_chunk(4, 2, nbt)
        locations.append(self.region.header[4, 2][0])
        self.region.write_chunk(5, 2, nbt)
        locations.append(self.region.header[5, 2][0])
        self.assertIn(14, locations)
        self.assertIn(16, locations)
        # self.assertEqual(locations, [4, 14, 16, 18, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test073WriteOverlappingChunkSmall(self):
        """
        write 1 sector chunk 12,0 (should go to 004) ("free" 015 for use by 4,0)
        verify location is NOT 015
        verify sectors 15 and 16 are not marked as "free", but remain in use by 4,0
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(12, 0, nbt)
        header = self.region.header[12, 0]
        self.assertEqual(header[1], 1)  # length
        self.assertNotEqual(
            header[0], 15,
            "Chunk should not be written to same location when it overlaps")
        # Write 1-sector chunks to check which sectors are "free"
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.assertNotIn(15, locations)
        self.assertNotIn(16, locations)
        # self.assertEqual(locations, [10, 11, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test074WriteOverlappingChunkSameLocation(self):
        """
        write 1 sector chunk 12,0 (should go to 004) ("free" 012 for use by 4,0)
        write 3 sector chunk 4,0 (should stay in 014-016)
        verify file size remains <= 027*4096
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(12, 0, nbt)
        header = self.region.header[12, 0]
        self.assertEqual(header[1], 1)  # length
        self.assertNotEqual(
            header[0], 15,
            "Chunk should not be written to same location when it overlaps")
        nbt = generate_compressed_level(minsize=9000, maxsize=11000)
        self.region.write_chunk(4, 0, nbt)
        header = self.region.header[4, 0]
        self.assertEqual(header[1], 3)  # length
        self.assertEqual(
            header[0], 14,
            "No longer overlapping chunks should be written to same location when when possible"
        )

    def test080FileTruncateLastChunkDecrease(self):
        """
        write 1 sector chunk 3,1 (should remain in 025) (free 026)
        verify file size is truncated: 26*4096 bytes
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(3, 1, nbt)
        self.assertEqual(
            self.region.get_size(), 26 * 4096,
            "File should be truncated when last chunk is reduced in size")

    def test081FileTruncateFreeTail(self):
        """
        delete chunk 3,1 (free 025: truncate file size)
        verify file size: 25*4096 bytes
        """
        self.region.unlink_chunk(3, 1)
        self.assertEqual(
            self.region.get_size(), 25 * 4096,
            "File should be truncated when last sector(s) are freed")

    def test082FileTruncateMergeFree(self):
        """
        delete chunk 8,1 (free 024)
        delete chunk 3,1 (free 025: truncate file size, including 024)
        verify file size: 24*4096 bytes
        """
        self.region.unlink_chunk(8, 1)
        self.region.unlink_chunk(3, 1)
        self.assertEqual(
            self.region.get_size(), 24 * 4096,
            "File should be truncated as far as possible when last sector(s) are freed"
        )

    def test090DeleteNonExistingChunk(self):
        """
        delete chunk 2,2
        """
        self.region.unlink_chunk(2, 2)
        self.assertFalse(self.region.metadata[2, 2].is_created())

    def test091DeleteNonInHeaderChunk(self):
        """
        delete chunk 14,0. This should leave sector 1 untouched.
        verify sector 1 is unmodified, with the exception of timestamp for chunk 14,0.
        """
        self.region.file.seek(4096)
        before = self.region.file.read(4096)
        chunklocation = 4 * (14 + 32 * 1)
        before = before[:chunklocation] + before[chunklocation + 4:]
        self.region.unlink_chunk(14, 1)
        self.region.file.seek(4096)
        after = self.region.file.read(4096)
        after = after[:chunklocation] + after[chunklocation + 4:]
        self.assertEqual(before, after)

    def test092DeleteOutOfFileChunk(self):
        """
        delete chunk 15,1
        verify file size is not increased.
        """
        size = self.region.get_size()
        self.region.unlink_chunk(15, 1)
        self.assertLessEqual(self.region.get_size(), size)

    def test093DeleteChunkZeroTimestamp(self):
        """
        delete chunk 17,0
        verify timestamp is zeroed. both in get_timestamp() and get_metadata()
        """
        self.assertEqual(self.region.get_timestamp(17, 0), 1334530101)
        self.region.unlink_chunk(17, 0)
        self.assertEqual(self.region.get_timestamp(17, 0), 0)

    def test100WriteZeroPadding(self):
        """
        write 1 sector chunk 16,0 (should go to existing 017) (should free sector 018)
        Check if unused bytes in sector 017 and all bytes in sector 018 are zeroed.
        """
        nbt = generate_compressed_level(minsize=100, maxsize=4000)
        self.region.write_chunk(16, 0, nbt)
        header = self.region.header[16, 0]
        chunk_header = self.region.chunk_headers[16, 0]
        sectorlocation = header[0]
        oldsectorlength = 2 * 4096
        chunklength = 4 + chunk_header[0]
        unusedlength = oldsectorlength - chunklength
        self.region.file.seek(4096 * sectorlocation + chunklength)
        unused = self.region.file.read(unusedlength)
        zeroes = unused.count(b'\x00')
        self.assertEqual(
            zeroes, unusedlength,
            "All unused bytes should be zeroed after writing a chunk")

    def test101DeleteZeroPadding(self):
        """
        unlink chunk 7,1
        Check if all bytes in sector 022 are zeroed.
        """
        header = self.region.header[7, 1]
        sectorlocation = header[0]
        self.region.unlink_chunk(7, 1)
        self.region.file.seek(sectorlocation * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(zeroes, 4096,
                         "All bytes should be zeroed after deleting a chunk")

    def test102DeleteOverlappingNoZeroPadding(self):
        """
        unlink chunk 4,0. Due to overlapping chunks, bytes should not be zeroed.
        Check if bytes in sector 015 are not all zeroed.
        """
        header = self.region.header[4, 0]
        sectorlocation = header[0]
        self.region.unlink_chunk(4, 0)
        self.region.file.seek((sectorlocation + 1) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertNotEqual(
            zeroes, 4096,
            "Bytes should not be zeroed after deleting an overlapping chunk")
        self.region.file.seek((sectorlocation) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(
            zeroes, 4096,
            "Bytes should be zeroed after deleting non-overlapping portions of a chunk"
        )
        self.region.file.seek((sectorlocation + 2) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(
            zeroes, 4096,
            "Bytes should be zeroed after deleting non-overlapping portions of a chunk"
        )

    def test103MoveOverlappingNoZeroPadding(self):
        """
        write 2 sector chunk 4,0 to a different location. Due to overlapping chunks, bytes should not be zeroed.
        Check if bytes in sector 015 are not all zeroed.
        """
        header = self.region.header[4, 0]
        sectorlocation = header[0]
        nbt = generate_compressed_level(minsize=5000, maxsize=7000)
        self.region.write_chunk(4, 0, nbt)
        self.region.file.seek((sectorlocation + 1) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertNotEqual(
            zeroes, 4096,
            "Bytes should not be zeroed after moving an overlapping chunk")

    def test104DeleteZeroPaddingMismatchLength(self):
        """
        unlink chunk 3,1. (which has a length mismatch)
        Check if bytes in sector 025 are all zeroed.
        Check if first byte in sector 026 is not zeroed.
        """
        raise unittest.SkipTest("Test can't use this testfile")
예제 #17
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def setUp(self):
     self.tempdir = tempfile.mkdtemp()
     self.filename = os.path.join(self.tempdir, 'regiontest.mca')
     shutil.copy(REGIONTESTFILE, self.filename)
     self.region = RegionFile(filename=self.filename)
예제 #18
0
파일: regiontests.py 프로젝트: Fenixin/NBT
class ReadWriteTest(unittest.TestCase):
    """Test to read, write and relocate chunks in a region file."""
    
    """
    All tests operate on regiontest.mca, is a 27-sector large region file, which looks like:
    sector 000: locations
    sector 001: timestamps
    sector 002: chunk 6 ,0  part 1/1
    sector 003: chunk 7 ,0  part 1/1 <<-- minor warning: unused bytes not zeroed
    sector 004: empty                <<-- minor warning: bytes not zeroed
    sector 005: chunk 8 ,0  part 1/1
    sector 006: chunk 9 ,0  part 1/1
    sector 007: chunk 10,0  part 1/1 <<-- deprecated encoding (gzip = 1)
    sector 008: chunk 11,0  part 1/1 <<-- unknown encoding (3)
    sector 009: chunk 2 ,0  part 1/1 <<-- uncompressed (encoding 0)
    sector 010: empty
    sector 011: empty
    sector 012: chunk 3 ,0  part 1/1 <<-- garbled data (can't be decoded)
    sector 013: chunk 1 ,0  part 1/1
    sector 014: chunk 4 ,0  part 1/3 <<-- 1 sector required, but 3 sectors allocated
    sector 015: chunk 12,0  part 1/1 <<-- part 2 of chunk 4,0 overlaps
    sector 016: chunk 4, 0  part 3/3
    sector 017: chunk 16,0  part 1/2
    sector 018: chunk 16,0  part 2/2
    sector 019: chunk 5 ,1  part 1/2 <<-- correct encoding, but not a valid NBT file
    sector 020: chunk 5 ,1  part 2/2
    sector 021: chunk 6 ,1  part 1/1 <<-- potential overlap with empty chunk 13,0
    sector 022: chunk 7 ,1  part 1/1 <<-- no timestamp
    sector 023: chunk 4 ,1  part 1/1 <<-- zero-byte length value in chunk (invalid header)
    sector 024: chunk 8 ,1  part 1/1 <<-- one-byte length value in chunk (no data)
    sector 025: chunk 3 ,1  part 1/1 <<-- 2 sectors required, but 1 sector allocated (length 4+1+4092)
    sector 026: empty                <<-- unregistered overlap from chunk 3,1
    
    in addition, the following (corrupted) chunks are defined in the header of regiontest.mca:
    sector 021: 0-sector length chunk 13,0 (and overlapping with chunk 6,1)
    sector 001: chunk 14,0 (in header)
    sector 030: chunk 15,0 (out of file)
    ----------: chunk 17,0 timestamp without data
    
    Thus:
    01. chunk 1 ,0  Readable  
    02. chunk 2 ,0  Readable   <<-- uncompressed (encoding 0)
    03. chunk 3 ,0  Unreadable <<-- garbled data (can't be decoded)
    04. chunk 4 ,0  Readable   <<-- overlaps with chunk 12,0.
    05. chunk 6 ,0  Readable 
    06. chunk 7 ,0  Readable 
    07. chunk 8 ,0  Readable 
    08. chunk 9 ,0  Readable 
    09. chunk 10,0  Readable   <<-- deprecated encoding (gzip = 1)
    10. chunk 11,0  Unreadable <<-- unknown encoding (3)
    11. chunk 12,0  Readable   <<-- Overlaps with chunk 4,0.
    12. chunk 13,0  Unreadable <<-- 0-sector length in header
    13. chunk 14,0  Unreadable <<-- in header
    14. chunk 15,0  Unreadable <<-- out of file
    15. chunk 16,0  Readable  
    --  chunk 17,0  Unreadable <<-- timestamp without data
    16. chunk 3 ,1  Readable   <<-- 2 sectors required, but 1 sector allocated (length 4+1+4092)
    17. chunk 4 ,1  Unreadable <<-- zero-byte length value in chunk (invalid header)
    18. chunk 5 ,1  Readable   <<-- Not a valid NBT file
    19. chunk 6 ,1  Readable   <<-- potential overlap with empty chunk 13,0
    20. chunk 7 ,1  Readable   <<-- no timestamp
    21. chunk 8 ,1  Unreadable <<-- one-byte length value in chunk (no data)
    """

    def setUp(self):
        self.tempdir = tempfile.mkdtemp()
        self.filename = os.path.join(self.tempdir, 'regiontest.mca')
        shutil.copy(REGIONTESTFILE, self.filename)
        self.region = RegionFile(filename = self.filename)

    def tearDown(self):
        del self.region
        try:
            shutil.rmtree(self.tempdir)
        except OSError as e:
            raise

    def test000MethodFileSize(self):
        """
        Test of the get_size() method.
        The regionfile has 27 sectors.
        """
        self.assertEqual(self.region.get_size(), 27*4096)

    def test001MethodChunkCount(self):
        """
        Test of the chunk_count() method.
        The regionfile has 21 chunks, including 3-out of file chunks.
        """
        self.assertEqual(self.region.chunk_count(), 21)

    def test002MethodGetChunkCoords(self):
        """
        Test of get_chunk_coords() method.
        Note: this function may be deprecated in a later version of NBT.
        """
        coords_and_lengths = self.region.get_chunk_coords()
        coords = []
        for coord in coords_and_lengths:
            coords.append((coord['x'], coord['z']))
        
        self.assertIn((1, 0), coords)
        self.assertIn((2, 0), coords)
        self.assertIn((3, 0), coords)
        self.assertIn((4, 0), coords)
        self.assertIn((6, 0), coords)
        self.assertIn((7, 0), coords)
        self.assertIn((8, 0), coords)
        self.assertIn((9, 0), coords)
        self.assertIn((10, 0), coords)
        self.assertIn((11, 0), coords)
        self.assertIn((12, 0), coords)
        self.assertIn((13, 0), coords)
        self.assertIn((14, 0), coords) # note: length is undefined
        self.assertIn((15, 0), coords) # note: length is undefined
        self.assertIn((16, 0), coords)
        self.assertNotIn((17, 0), coords)
        self.assertIn((3, 1), coords)
        self.assertIn((4, 1), coords)
        self.assertIn((5, 1), coords)
        self.assertIn((6, 1), coords)
        self.assertIn((7, 1), coords)
        self.assertIn((8, 1), coords)
        self.assertEqual(len(coords_and_lengths), 21)

    def test003MethodIterChunks(self):
        """
        Test of iter_chunks() method.
        """
        chunks = []
        for chunk in self.region.iter_chunks():
            self.assertIsInstance(chunk, TAG_Compound)
            chunks.append(chunk)
        self.assertEqual(len(chunks), 13)

    def test004SyntaxIterChunks(self):
        """
        Test of iter(RegionFile) syntax.
        """
        chunks = []
        for chunk in self.region:
            self.assertIsInstance(chunk, TAG_Compound)
            chunks.append(chunk)
        self.assertEqual(len(chunks), 13)
    
    def test005ParameterHeaders(self):
        """
        read headers of chunk 9,0: 
        sector 6, 1 sector length, timestamp 1334530101, status STATUS_CHUNK_OK.
        read chunk headers of chunk 9,0: 
        lenght (incl. compression byte): 3969 bytes, zlip (2) compression, status STATUS_CHUNK_OK.
        """
        self.assertEqual(self.region.header[9,0], (6, 1, 1334530101, RegionFile.STATUS_CHUNK_OK))
        self.assertEqual(self.region.chunk_headers[9,0], (3969, 2, RegionFile.STATUS_CHUNK_OK))
    
    def test006ParameterHeadersUndefinedChunk(self):
        """
        read headers & chunk_headers of chunk 2,2
        """
        self.assertEqual(self.region.header[2,2], (0, 0, 0, RegionFile.STATUS_CHUNK_NOT_CREATED))
        self.assertEqual(self.region.chunk_headers[2,2], (None, None, RegionFile.STATUS_CHUNK_NOT_CREATED))
    
    def test010ReadChunkZlibCompression(self):
        """
        chunk 9,0: regular Zlib compression. Should be read OK.
        """
        nbt = self.region.get_nbt(9, 0)
        self.assertIsInstance(nbt, TAG_Compound)
        # get_chunk is currently an alias of get_nbt
        chunk = self.region.get_chunk(9, 0)
        self.assertIsInstance(chunk, TAG_Compound)

    def test011ReadChunkGzipCompression(self):
        """
        chunk 10,0: deprecated GZip compression. Should be read OK.
        """
        nbt = self.region.get_nbt(10, 0)
        self.assertIsInstance(nbt, TAG_Compound)

    def test012ReadChunkUncompressed(self):
        """
        chunk 2,0: no compression. Should be read OK.
        """
        nbt = self.region.get_nbt(2, 0)
        self.assertIsInstance(nbt, TAG_Compound)

    def test013ReadUnknownEncoding(self):
        """
        chunk 11,0 has unknown encoding (3). Reading should raise a ChunkDataError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 11, 0)

    def test014ReadMalformedEncoding(self):
        """
        chunk 3,0 has malformed content. Reading should raise a ChunkDataError.
        This should not raise a MalformedFileError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 3, 0)

    # TODO: raise nbt.region.ChunkDataError instead of nbt.nbt.MalformedFileError, or make them the same.
    def test015ReadMalformedNBT(self):
        """
        read chunk 5,1: valid compression, but not a valid NBT file. Reading should raise a ChunkDataError.
        """
        self.assertRaises(ChunkDataError, self.region.get_nbt, 5, 1)

    def test016ReadChunkNonExistent(self):
        """
        read chunk 2,2: does not exist. Reading should raise a InconceivedChunk.
        """
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 2, 2)

    def test017ReadableChunks(self):
        """
        Test which chunks are readable.
        """
        coords = []
        for cc in self.region.get_chunk_coords():
            try:
                nbt = self.region.get_chunk(cc['x'], cc['z'])
                coords.append((cc['x'], cc['z']))
            except RegionFileFormatError:
                pass

        self.assertIn((1, 0), coords)
        self.assertIn((2, 0), coords)
        self.assertNotIn((3, 0), coords) # garbled data
        self.assertIn((4, 0), coords) # readable, despite overlapping with chunk 12,0
        self.assertIn((6, 0), coords)
        self.assertIn((7, 0), coords)
        self.assertIn((8, 0), coords)
        self.assertIn((9, 0), coords)
        self.assertIn((10, 0), coords)
        self.assertNotIn((11, 0), coords) # unknown encoding
        self.assertIn((12, 0), coords) # readable, despite overlapping with chunk 4,1
        self.assertNotIn((13, 0), coords) # zero-length (in header)
        self.assertNotIn((14, 0), coords) # in header
        self.assertNotIn((15, 0), coords) # out of file
        self.assertIn((16, 0), coords)
        self.assertNotIn((17, 0), coords) # timestamp without data
        self.assertIn((3, 1), coords)
        self.assertNotIn((4, 1), coords) # invalid length (in chunk)
        self.assertNotIn((5, 1), coords) # not a valid NBT file
        self.assertIn((6, 1), coords)
        self.assertIn((7, 1), coords)
        self.assertNotIn((8, 1), coords) # zero-length (in chunk)
        self.assertEqual(len(coords), 13)

    def test020ReadInHeader(self):
        """
        read chunk 14,0: supposedly located in the header. 
        Reading should raise a RegionHeaderError.
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 14, 0)
        # TODO:
        self.assertEqual(self.region.header[14,0], (1, 1, 1376433960, RegionFile.STATUS_CHUNK_IN_HEADER))
        self.assertEqual(self.region.chunk_headers[14,0], (None, None, RegionFile.STATUS_CHUNK_IN_HEADER))

    def test021ReadOutOfFile(self):
        """
        read chunk 15,0: error (out of file)
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 15, 0)
        self.assertEqual(self.region.header[15,0], (30, 1, 1376433961, RegionFile.STATUS_CHUNK_OUT_OF_FILE))
        self.assertEqual(self.region.chunk_headers[15,0], (None, None, RegionFile.STATUS_CHUNK_OUT_OF_FILE))

    def test022ReadZeroLengthHeader(self):
        """
        read chunk 13,0: error (zero-length)
        """
        self.assertRaises(RegionHeaderError, self.region.get_nbt, 13, 0)
        self.assertEqual(self.region.header[13,0], (21, 0, 1376433958, RegionFile.STATUS_CHUNK_ZERO_LENGTH))
        self.assertEqual(self.region.chunk_headers[13,0], (None, None, RegionFile.STATUS_CHUNK_ZERO_LENGTH))

    def test023ReadInvalidLengthChunk(self):
        """
        zero-byte lengths in chunk. (4,1)
        read chunk 4,1: error (invalid)
        """
        self.assertRaises(ChunkHeaderError, self.region.get_nbt, 4, 1)

    def test024ReadZeroLengthChunk(self):
        """
        read chunk 8,1: error (zero-length chunk)
        """
        self.assertRaises(ChunkHeaderError, self.region.get_nbt, 8, 1)

    def test025ReadChunkSizeExceedsSectorSize(self):
        """
        read chunk 3,1: can be read, despite that the chunk content is longer than the allocated sectors.
        In general, reading should either succeeds, or raises a ChunkDataError.
        The status should be STATUS_CHUNK_MISMATCHED_LENGTHS.
        """
        self.assertEqual(self.region.chunk_headers[3,1][2], RegionFile.STATUS_CHUNK_MISMATCHED_LENGTHS)
        # reading should succeed, despite the overlap (next chunk is free)
        nbt = self.region.get_nbt(3, 1)

    def test026ReadChunkOverlapping(self):
        """
        chunk 4,0 and chunk 12,0 overlap: status should be STATUS_CHUNK_OVERLAPPING
        """
        self.assertEqual(self.region.chunk_headers[4,0][2], RegionFile.STATUS_CHUNK_OVERLAPPING)
        self.assertEqual(self.region.chunk_headers[12,0][2], RegionFile.STATUS_CHUNK_OVERLAPPING)

    def test030GetTimestampOK(self):
        """
        get_timestamp
        read chunk 9,0: OK
        """
        self.assertEqual(self.region.get_timestamp(9,0), 1334530101)

    def test031GetTimestampBadChunk(self):
        """
        read chunk 15,0: OK
        Data is out-out-of-file, but timestamp is still there.
        """
        self.assertEqual(self.region.get_timestamp(15,0), 1376433961)

    def test032GetTimestampNoChunk(self):
        """
        read chunk 17,0: OK
        no data, but a timestamp
        """
        self.assertEqual(self.region.get_timestamp(17,0), 1334530101)

    def test033GetTimestampMissing(self):
        """
        read chunk 7,1: OK
        data, but no timestamp
        """
        self.assertEqual(self.region.get_timestamp(7,1), 0)

    def test040WriteNewChunk(self):
        """
        read chunk 0,2: InconceivedError
        write 1 sector chunk 0,2
        - read location (<= 026), size (001), timestamp (non-zero).
        """
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 0, 2)
        timebefore = int(time.time())
        self.region.write_chunk(0, 2, nbt)
        timeafter = time.time()
        header = self.region.header[0,2]
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector")
        self.assertGreaterEqual(header[0], 2, "Chunk must not be written in the header")
        self.assertLessEqual(header[0], 26, "Chunk must not be written in an empty sector")
        self.assertGreaterEqual(header[2], timebefore, "Timestamp must be time.time()")
        self.assertLessEqual(header[2], timeafter, "Timestamp must be time.time()")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.chunk_count(), chunk_count + 1)

    def test041WriteAndReadNewChunk(self):
        """
        write 1 sector chunk 0,2
        read chunk 0,2: OK
        - compare writen and read NBT file
        """
        nbtwrite = generate_compressed_level(minsize = 100, maxsize = 4000)
        writebuffer = BytesIO()
        nbtwrite.write_file(buffer=writebuffer)
        nbtsize = writebuffer.seek(0,2)
        self.region.write_chunk(0, 2, nbtwrite)
        nbtread = self.region.get_nbt(0, 2)
        readbuffer = BytesIO()
        nbtread.write_file(buffer=readbuffer)
        self.assertEqual(nbtsize, readbuffer.seek(0,2))
        writebuffer.seek(0)
        writtendata = writebuffer.read()
        readbuffer.seek(0)
        readdata = readbuffer.read()
        self.assertEqual(writtendata, readdata)

    def test042WriteExistingChunk(self):
        """
        write 1 sector chunk 9,0 (should stay in 006)
        - read location (006) and size (001).
        """
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(9, 0, nbt)
        header = self.region.header[9, 0]
        self.assertEqual(header[0], 6, "Chunk should remain at sector 6")
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.chunk_count(), chunk_count)

    def test043DeleteChunk(self):
        """
        read chunk 6,0: OK
        unlink chunk 6,0
        - check location, size, timestamp (all should be 0)
        read chunk 6,0: InconceivedError
        """
        chunk_count = self.region.chunk_count()
        nbt = self.region.get_nbt(6, 0)
        self.region.unlink_chunk(6, 0)
        self.assertRaises(InconceivedChunk, self.region.get_nbt, 6, 0)
        header = self.region.header[6, 0]
        self.assertEqual(header[0], 0)
        self.assertEqual(header[1], 0)
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_NOT_CREATED)
        self.assertEqual(self.region.chunk_count(), chunk_count - 1)

    def test044UseEmptyChunks(self):
        """
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to 010)
        write 1 sector chunk 3,2 (should go to 011)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        availablelocations = (4, 10, 11, 26)
        self.region.write_chunk(1, 2, nbt)
        self.assertIn(self.region.header[1, 2][0], availablelocations)
        self.region.write_chunk(2, 2, nbt)
        self.assertIn(self.region.header[2, 2][0], availablelocations)
        self.region.write_chunk(3, 2, nbt)
        self.assertIn(self.region.header[3, 2][0], availablelocations)

    def test050WriteNewChunk2sector(self):
        """
        write 2 sector chunk 1,2 (should go to 010-011)
        """
        nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 10, "Chunk should be placed in sector 10")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test051WriteNewChunk4096byte(self):
        """
        write 4091+5-byte (1 sector) chunk 1,2 (should go to 004)
        """
        nbt = generate_compressed_level(minsize = 4091, maxsize = 4091)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        chunk_header = self.region.chunk_headers[1, 2]
        if chunk_header[0] != 4092:
            raise unittest.SkipTest("Can't create chunk of 4091 bytes compressed")
        self.assertEqual(header[1], 1, "Chunk length must be 2 sectors")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test052WriteNewChunk4097byte(self):
        """
        write 4092+5-byte (2 sector) chunk 1,2 (should go to 010-011)
        """
        nbt = generate_compressed_level(minsize = 4092, maxsize = 4092)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        chunk_header = self.region.chunk_headers[1, 2]
        if chunk_header[0] != 4093:
            raise unittest.SkipTest("Can't create chunk of 4092 bytes compressed")
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 10, "Chunk should be placed in sector 10")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test053WriteNewChunkIncreaseFile(self):
        """
        write 3 sector chunk 2,2 (should go to 026-028 or 027-029) (increase file size)
        verify file size is 29*4096
        """
        nbt = generate_compressed_level(minsize = 9000, maxsize = 11000)
        self.region.write_chunk(1, 2, nbt)
        header = self.region.header[1, 2]
        self.assertEqual(header[1], 3, "Chunk length must be 3 sectors")
        self.assertIn(header[0], (26, 27), "Chunk should be placed in sector 26")
        self.assertEqual(self.region.get_size(), (header[0] + header[1])*4096, "File size should be multiple of 4096")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)

    def test054WriteExistingChunkDecreaseSector(self):
        """
        write 1 sector chunk 16,0 (should go to existing 017) (should free sector 018)
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to 010)
        write 1 sector chunk 3,2 (should go to 011)
        write 1 sector chunk 4,2 (should go to freed 018)
        write 1 sector chunk 5,2 (should go to 026)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        header = self.region.header[16, 0]
        self.assertEqual(header[1], 2)
        self.region.write_chunk(16, 0, nbt)
        header = self.region.header[16, 0]
        self.assertEqual(header[1], 1, "Chunk length must be 1 sector1")
        self.assertEqual(header[0], 17, "Chunk should remain in sector 17")
        # Write 1-sector chunks to check which sectors are "free"
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.region.write_chunk(4, 2, nbt)
        locations.append(self.region.header[4, 2][0])
        self.region.write_chunk(5, 2, nbt)
        locations.append(self.region.header[5, 2][0])
        self.assertIn(18, locations)
        # self.assertEqual(locations, [4, 10, 11, 18, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    @unittest.skip('Test takes too much time')
    def test055WriteChunkTooLarge(self):
        """
        Chunks of size >= 256 sectors are not supported by the file format
        attempt to write a chunk 256 sectors in size
        should raise Exception
        """
        maxsize = 256 * 4096
        nbt = generate_compressed_level(minsize = maxsize + 100, maxsize = maxsize + 4000)
        self.assertRaises(ChunkDataError, self.region.write_chunk, 2, 2, nbt)

    def test060WriteExistingChunkIncreaseSectorSameLocation(self):
        """
        write 2 sector chunk 7,0 (should go to 003-004) (increase chunk size)
        """
        nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)
        self.region.write_chunk(7, 0, nbt)
        header = self.region.header[7, 0]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 3, "Chunk should remain in sector 3")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test061WriteExistingChunkCorrectSize(self):
        """
        write 2 sector chunk 3,1 (should go to 025-026) (increase sector size)
        """
        nbt = self.region.get_chunk(3, 1)
        self.region.write_chunk(3, 1, nbt)
        header = self.region.header[3, 1]
        self.assertEqual(header[1], 2, "Chunk length must be 2 sectors")
        self.assertEqual(header[0], 25, "Chunk should remain in sector 25")
        self.assertEqual(header[3], RegionFile.STATUS_CHUNK_OK)
        self.assertEqual(self.region.get_size(), 27*4096)

    def test062WriteExistingChunkIncreaseSectorNewLocation(self):
        """
        write 2 sector chunk 8,0 (should go to 004-005 or 010-011)
        verify chunk_count remains 18
        write 2 sector chunk 2,2 (should go to 010-011 or 004-005)
        verify that file size is not increased <= 027*4096
        verify chunk_count is 19
        """
        locations = []
        chunk_count = self.region.chunk_count()
        nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)
        self.region.write_chunk(8, 0, nbt)
        header = self.region.header[8, 0]
        self.assertEqual(header[1], 2) # length
        locations.append(header[0]) # location
        self.assertEqual(self.region.chunk_count(), chunk_count)
        self.region.write_chunk(2, 2, nbt)
        header = self.region.header[2, 2]
        self.assertEqual(header[1], 2) # length
        locations.append(header[0]) # location
        self.assertEqual(sorted(locations), [4, 10]) # locations
        self.assertEqual(self.region.chunk_count(), chunk_count + 1)

    def test063WriteNewChunkFreedSectors(self):
        """
        unlink chunk 6,0
        unlink chunk 7,0
        write 3 sector chunk 2,2 (should go to 002-004) (file size should remain the same)
        """
        self.region.unlink_chunk(6, 0)
        self.region.unlink_chunk(7, 0)
        nbt = generate_compressed_level(minsize = 9000, maxsize = 11000)
        self.region.write_chunk(2, 2, nbt)
        header = self.region.header[2, 2]
        self.assertEqual(header[1], 3, "Chunk length must be 3 sectors")
        self.assertEqual(header[0], 2, "Chunk should be placed in sector 2")

    def test070WriteOutOfFileChunk(self):
        """
        write 1 sector chunk 13,0 (should go to 004)
        Should not go to sector 30 (out-of-file location)
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(13, 0, nbt)
        header = self.region.header[13, 0]
        self.assertEqual(header[1], 1) # length
        self.assertLessEqual(header[0], 26, "Previously out-of-file chunk should be written in-file")

    def test071WriteZeroLengthSectorChunk(self):
        """
        write 1 sector chunk 13,0 (should go to 004)
        Verify sector 19 remains untouched.
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(13, 0, nbt)
        header = self.region.header[13, 0]
        self.assertEqual(header[1], 1) # length
        self.assertNotEqual(header[0], 19, "Previously 0-length chunk should not overwrite existing chunk")

    def test072WriteOverlappingChunkLong(self):
        """
        write 2 sector chunk 4,0 (should go to 010-011) (free 014 & 016)
        verify location is NOT 014 (because of overlap)
        write 1 sector chunk 1,2 (should go to 004)
        write 1 sector chunk 2,2 (should go to freed 014)
        write 1 sector chunk 3,2 (should go to freed 016)
        write 1 sector chunk 4,2 (should go to 018)
        write 1 sector chunk 5,2 (should go to 026)
        verify file size remains 027*4096
        """
        nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)
        self.region.write_chunk(4, 0, nbt)
        header = self.region.header[4, 0]
        self.assertEqual(header[1], 2) # length
        self.assertNotEqual(header[0], 14, "Chunk should not be written to same location when it overlaps")
        self.assertEqual(header[0], 10, "Chunk should not be written to same location when it overlaps")
        # Write 1-sector chunks to check which sectors are "free"
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.region.write_chunk(4, 2, nbt)
        locations.append(self.region.header[4, 2][0])
        self.region.write_chunk(5, 2, nbt)
        locations.append(self.region.header[5, 2][0])
        self.assertIn(14, locations)
        self.assertIn(16, locations)
        # self.assertEqual(locations, [4, 14, 16, 18, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test073WriteOverlappingChunkSmall(self):
        """
        write 1 sector chunk 12,0 (should go to 004) ("free" 015 for use by 4,0)
        verify location is NOT 015
        verify sectors 15 and 16 are not marked as "free", but remain in use by 4,0
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(12, 0, nbt)
        header = self.region.header[12, 0]
        self.assertEqual(header[1], 1) # length
        self.assertNotEqual(header[0], 15, "Chunk should not be written to same location when it overlaps")
        # Write 1-sector chunks to check which sectors are "free"
        locations = []
        self.region.write_chunk(1, 2, nbt)
        locations.append(self.region.header[1, 2][0])
        self.region.write_chunk(2, 2, nbt)
        locations.append(self.region.header[2, 2][0])
        self.region.write_chunk(3, 2, nbt)
        locations.append(self.region.header[3, 2][0])
        self.assertNotIn(15, locations)
        self.assertNotIn(16, locations)
        # self.assertEqual(locations, [10, 11, 26])
        # self.assertEqual(self.region.get_size(), 27*4096)

    def test074WriteOverlappingChunkSameLocation(self):
        """
        write 1 sector chunk 12,0 (should go to 004) ("free" 012 for use by 4,0)
        write 3 sector chunk 4,0 (should stay in 014-016)
        verify file size remains <= 027*4096
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(12, 0, nbt)
        header = self.region.header[12, 0]
        self.assertEqual(header[1], 1) # length
        self.assertNotEqual(header[0], 15, "Chunk should not be written to same location when it overlaps")
        nbt = generate_compressed_level(minsize = 9000, maxsize = 11000)
        self.region.write_chunk(4, 0, nbt)
        header = self.region.header[4, 0]
        self.assertEqual(header[1], 3) # length
        self.assertEqual(header[0], 14, "No longer overlapping chunks should be written to same location when when possible")

    def test080FileTruncateLastChunkDecrease(self):
        """
        write 1 sector chunk 3,1 (should remain in 025) (free 026)
        verify file size is truncated: 26*4096 bytes
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(3, 1, nbt)
        self.assertEqual(self.region.get_size(), 26*4096, "File should be truncated when last chunk is reduced in size")

    def test081FileTruncateFreeTail(self):
        """
        delete chunk 3,1 (free 025: truncate file size)
        verify file size: 25*4096 bytes
        """
        self.region.unlink_chunk(3, 1)
        self.assertEqual(self.region.get_size(), 25*4096, "File should be truncated when last sector(s) are freed")

    def test082FileTruncateMergeFree(self):
        """
        delete chunk 8,1 (free 024)
        delete chunk 3,1 (free 025: truncate file size, including 024)
        verify file size: 24*4096 bytes
        """
        self.region.unlink_chunk(8, 1)
        self.region.unlink_chunk(3, 1)
        self.assertEqual(self.region.get_size(), 24*4096, "File should be truncated as far as possible when last sector(s) are freed")

    def test090DeleteNonExistingChunk(self):
        """
        delete chunk 2,2
        """
        self.region.unlink_chunk(2, 2)
        self.assertFalse(self.region.metadata[2, 2].is_created())

    def test091DeleteNonInHeaderChunk(self):
        """
        delete chunk 14,0. This should leave sector 1 untouched.
        verify sector 1 is unmodified, with the exception of timestamp for chunk 14,0.
        """
        self.region.file.seek(4096)
        before = self.region.file.read(4096)
        chunklocation = 4 * (14 + 32*1)
        before = before[:chunklocation] + before[chunklocation+4:]
        self.region.unlink_chunk(14, 1)
        self.region.file.seek(4096)
        after = self.region.file.read(4096)
        after = after[:chunklocation] + after[chunklocation+4:]
        self.assertEqual(before, after)

    def test092DeleteOutOfFileChunk(self):
        """
        delete chunk 15,1
        verify file size is not increased.
        """
        size = self.region.get_size()
        self.region.unlink_chunk(15, 1)
        self.assertLessEqual(self.region.get_size(), size)

    def test093DeleteChunkZeroTimestamp(self):
        """
        delete chunk 17,0
        verify timestamp is zeroed. both in get_timestamp() and get_metadata()
        """
        self.assertEqual(self.region.get_timestamp(17, 0), 1334530101)
        self.region.unlink_chunk(17, 0)
        self.assertEqual(self.region.get_timestamp(17, 0), 0)

    def test100WriteZeroPadding(self):
        """
        write 1 sector chunk 16,0 (should go to existing 017) (should free sector 018)
        Check if unused bytes in sector 017 and all bytes in sector 018 are zeroed.
        """
        nbt = generate_compressed_level(minsize = 100, maxsize = 4000)
        self.region.write_chunk(16, 0, nbt)
        header = self.region.header[16, 0]
        chunk_header = self.region.chunk_headers[16, 0]
        sectorlocation = header[0]
        oldsectorlength = 2 * 4096
        chunklength = 4 + chunk_header[0]
        unusedlength = oldsectorlength - chunklength
        self.region.file.seek(4096*sectorlocation + chunklength)
        unused = self.region.file.read(unusedlength)
        zeroes = unused.count(b'\x00')
        self.assertEqual(zeroes, unusedlength, "All unused bytes should be zeroed after writing a chunk")
    
    def test101DeleteZeroPadding(self):
        """
        unlink chunk 7,1
        Check if all bytes in sector 022 are zeroed.
        """
        header = self.region.header[7, 1]
        sectorlocation = header[0]
        self.region.unlink_chunk(7, 1)
        self.region.file.seek(sectorlocation*4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(zeroes, 4096, "All bytes should be zeroed after deleting a chunk")
    
    def test102DeleteOverlappingNoZeroPadding(self):
        """
        unlink chunk 4,0. Due to overlapping chunks, bytes should not be zeroed.
        Check if bytes in sector 015 are not all zeroed.
        """
        header = self.region.header[4, 0]
        sectorlocation = header[0]
        self.region.unlink_chunk(4, 0)
        self.region.file.seek((sectorlocation + 1) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertNotEqual(zeroes, 4096, "Bytes should not be zeroed after deleting an overlapping chunk")
        self.region.file.seek((sectorlocation) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(zeroes, 4096, "Bytes should be zeroed after deleting non-overlapping portions of a chunk")
        self.region.file.seek((sectorlocation + 2) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertEqual(zeroes, 4096, "Bytes should be zeroed after deleting non-overlapping portions of a chunk")
    
    def test103MoveOverlappingNoZeroPadding(self):
        """
        write 2 sector chunk 4,0 to a different location. Due to overlapping chunks, bytes should not be zeroed.
        Check if bytes in sector 015 are not all zeroed.
        """
        header = self.region.header[4, 0]
        sectorlocation = header[0]
        nbt = generate_compressed_level(minsize = 5000, maxsize = 7000)
        self.region.write_chunk(4, 0, nbt)
        self.region.file.seek((sectorlocation + 1) * 4096)
        unused = self.region.file.read(4096)
        zeroes = unused.count(b'\x00')
        self.assertNotEqual(zeroes, 4096, "Bytes should not be zeroed after moving an overlapping chunk")
    
    def test104DeleteZeroPaddingMismatchLength(self):
        """
        unlink chunk 3,1. (which has a length mismatch)
        Check if bytes in sector 025 are all zeroed.
        Check if first byte in sector 026 is not zeroed.
        """
        raise unittest.SkipTest("Test can't use this testfile")
예제 #19
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def setUp(self):
     self.tempdir = tempfile.mkdtemp()
     self.filename = os.path.join(self.tempdir, 'regiontest.mca')
     shutil.copy(REGIONTESTFILE, self.filename)
     self.region = RegionFile(filename = self.filename)
예제 #20
0
def analyse_regionfile(filename, warnings=True):
    region = RegionFile(filename)
    
    statuscounts = Statuses()
    errors = []
    if region.size % 4096 != 0:
        errors.append("File size is %d bytes, which is not a multiple of 4096" % region.size)
    sectorsize = region._bytes_to_sector(region.size)
    sectors = sectorsize*[None]
    if region.size == 0:
        errors.append("File size is 0 bytes")
        sectors = []
    elif sectorsize < 2:
        errors.append("File size is %d bytes, too small for the 8192 byte header" % region.size)
    else:
        sectors[0] = "locations"
        sectors[1] = "timestamps"
    chunks = {}
    for x in range(32):
        for z in range(32):
            c = ChunkMetadata(x,z)
            (c.sectorstart, c.sectorlen, c.timestamp, status) = region.header[x,z]
            (c.length, c.compression, c.status) = region.chunk_headers[x,z]
            c.uncompressedlength = 0
            chunks[x,z] = c
            
            statuscounts.count(c.status)
            if c.status < 0:
                errors.append("chunk %d,%d has status %d: %s" % \
                    (x, z, c.status, statuscounts.get_name(c.status)))
            
            try:
                if c.sectorstart == 0:
                    if c.sectorlen != 0:
                        errors.append("chunk %d,%d is not created, but is %d sectors in length" % (x, z, c.sectorlen))
                    if c.timestamp != 0:
                        errors.append("chunk %d,%d is not created, but has timestamp %d" % (x, z, c.timestamp))
                    raise RegionFileFormatError('')
                allocatedbytes = 4096 * c.sectorlen
                if c.timestamp == 0:
                    errors.append("chunk %d,%d has no timestamp" % (x, z))
                if c.sectorstart < 2:
                    errors.append("chunk %d,%d starts at sector %d, which is in the header" % (x, z, c.sectorstart))
                    raise RegionFileFormatError('')
                if 4096 * c.sectorstart >= region.size:
                    errors.append("chunk %d,%d starts at sector %d, while the file is only %d sectors" % (x, z, c.sectorstart, sectorsize))
                    raise RegionFileFormatError('')
                elif 4096 * c.sectorstart + 5 > region.size:
                    # header of chunk only partially fits
                    errors.append("chunk %d,%d starts at sector %d, but only %d bytes of sector %d are present in the file" % (x, z, c.sectorstart, sectorsize))
                    raise RegionFileFormatError('')
                elif not c.length:
                    errors.append("chunk %d,%d length is undefined." % (x, z))
                    raise RegionFileFormatError('')
                elif c.length == 1:
                    errors.append("chunk %d,%d has length 0 bytes." % (x, z))
                elif 4096 * c.sectorstart + 4 + c.length > region.size:
                    # header of chunk fits, but not the complete chunk
                    errors.append("chunk %d,%d is %d bytes in length, which is behind the file end" % (x, z, c.length))
                requiredsectors = region._bytes_to_sector(c.length + 4)
                if c.sectorlen <= 0:
                    errors.append("chunk %d,%d is %d sectors in length" % (x, z, c.sectorlen))
                    raise RegionFileFormatError('')
                if c.compression == 0:
                    errors.append("chunk %d,%d is uncompressed. This is deprecated." % (x, z))
                elif c.compression == 1:
                    errors.append("chunk %d,%d uses GZip compression. This is deprecated." % (x, z))
                elif c.compression > 2:
                    errors.append("chunk %d,%d uses an unknown compression type (%d)." % (x, z, c.compression))
                if c.length + 4 > allocatedbytes: # TODO 4 or 5?
                    errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d sectors, " \
                        "but only %d %s allocated" % \
                        (x, z, c.length+4, c.length-1, requiredsectors, c.sectorlen, \
                        "sector is" if (c.sectorlen == 1) else "sectors are"))
                elif c.length + 4 + 4096 == allocatedbytes:
                    # If the block fits in exactly n sectors, Minecraft seems to allocated n+1 sectors
                    # Threat this as a warning instead of an error.
                    if warnings:
                        errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
                            "but %d sectors are allocated" % \
                            (x, z, c.length+4, c.length-1, requiredsectors, \
                            "sector" if (requiredsectors == 1) else "sectors", c.sectorlen))
                elif c.sectorlen > requiredsectors:
                    errors.append("chunk %d,%d is %d bytes (4+1+%d) and requires %d %s, " \
                        "but %d sectors are allocated" % \
                        (x, z, c.length+4, c.length-1, requiredsectors, \
                        "sector" if (requiredsectors == 1) else "sectors", c.sectorlen))
                

                # Decompress chunk, check if that succeeds.
                # Check if the header and footer indicate this is a NBT file.
                # (without parsing it in detail)
                compresseddata = None
                data = None
                try:
                    if 0 <= c.compression <= 2:
                        region.file.seek(4096*c.sectorstart + 5)
                        compresseddata = region.file.read(c.length - 1)
                except Exception as e:
                    errors.append("Error reading chunk %d,%d: %s" % (x, z, str(e)))
                if (c.compression == 0):
                    data = compresseddata
                if (c.compression == 1):
                    try:
                        data = gzip.decompress(compresseddata)
                    except Exception as e:
                        errors.append("Error decompressing chunk %d,%d using gzip: %s" % (x, z, str(e)))
                elif (c.compression == 2):
                    try:
                        data = zlib.decompress(compresseddata)
                    except Exception as e:
                        errors.append("Error decompressing chunk %d,%d using zlib: %s" % (x, z, str(e)))
                if data:
                    c.uncompressedlength = len(data)
                    if data[0] != 10:
                        errors.append("chunk %d,%d is not a valid NBT file: outer object is not a TAG_Compound, but %r" % (x, z, data[0]))
                    elif data[-1] != 0:
                        errors.append("chunk %d,%d is not a valid NBT file: files does not end with a TAG_End." % (x, z))
                    else:
                        (length, ) = unpack(">H", data[1:3])
                        name = data[3:3+length]
                        try:
                            name.decode("utf-8", "strict") 
                        except Exception as e:
                            errors.append("Error decompressing chunk %d,%d using unknown compression: %s" % (x, z, str(e)))
                
                if warnings:
                    # Read the unused bytes in a sector and check if all bytes are zeroed.
                    unusedlen = 4096*c.sectorlen - (c.length+4)
                    if unusedlen > 0:
                        try:
                            region.file.seek(4096*c.sectorstart + 4 + c.length)
                            unused = region.file.read(unusedlen)
                            zeroes = unused.count(b'\x00')
                            if zeroes < unusedlen:
                                errors.append("%d of %d unused bytes are not zeroed in sector %d after chunk %d,%d" % \
                                    (unusedlen-zeroes, unusedlen, c.sectorstart + c.sectorlen - 1, x, z))
                        except Exception as e:
                            errors.append("Error reading tail of chunk %d,%d: %s" % (x, z, str(e)))
            
            except RegionFileFormatError:
                pass
            
            if c.sectorlen and c.sectorstart:
                # Check for overlapping chunks
                for b in range(c.sectorlen):
                    m = "chunk %-2d,%-2d part %d/%d" % (x, z, b+1, c.sectorlen)
                    p = c.sectorstart + b
                    if p > sectorsize:
                        errors.append("%s outside file" % (m))
                        break
                    if sectors[p] != None:
                        errors.append("overlap in sector %d: %s and %s" % (p, sectors[p], m))
                    if (b == 0):
                        if (c.uncompressedlength > 0):
                            m += " (4+1+%d bytes compressed: %d bytes uncompressed)" % (c.length-1, c.uncompressedlength)
                        elif c.length:
                            m += " (4+1+%d bytes compressed)" % (c.length-1)
                        else:
                            m += " (4+1+0 bytes)"
                    if sectors[p] != None:
                        m += " (overlapping!)"
                    sectors[p] = m
    
    e = sectors.count(None)
    if e > 0:
        if warnings:
            errors.append("Fragmentation: %d of %d sectors are unused" % (e, sectorsize))
        for sector, content in enumerate(sectors):
            if content == None:
                sectors[sector] = "empty"
                if warnings:
                    region.file.seek(4096*sector)
                    unused = region.file.read(4096)
                    zeroes = unused.count(b'\x00')
                    if zeroes < 4096:
                        errors.append("%d bytes are not zeroed in unused sector %d" % (4096-zeroes, sector))

    return errors, statuscounts, sectors, chunks
예제 #21
0
try:
	for filename in regions:
		print "Parsing",filename,"..."
		pieces = filename.split('.')
		rx = int(pieces[1])
		rz = int(pieces[2])
		
		# Does the region overlap the bounding box at all?
		if (start != None):
			if ( (rx+1)*512-1 < int(start[0]) or (rz+1)*512-1 < int(start[2]) ):
				continue
		elif (stop != None):
			if ( rx*512-1 > int(stop[0]) or rz*512-1 > int(stop[2]) ):
				continue
				
		file = RegionFile(filename=world_folder+'/region/'+filename)
		
		# Get all chunks
		chunks = file.get_chunks()
		for c in chunks:
			# Does the chunk overlap the bounding box at all?
			if (start != None):
				if ( (c['x']+1)*16 + rx*512 - 1 < int(start[0]) or (c['z']+1)*16 + rz*512 - 1 < int(start[2]) ):
					continue
			elif (stop != None):
				if ( c['x']*16 + rx*512 - 1 > int(stop[0]) or c['z']*16 + rz*512 - 1 > int(stop[2]) ):
					continue
			
			chunk = Chunk(file.get_chunk(c['x'], c['z']))
			#print "Parsing chunk ("+str(c['x'])+", "+str(c['z'])+")"
			
예제 #22
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def test01ReadFile(self):
     region = RegionFile(fileobj=self.stream)
     self.assertEqual(region.chunk_count(), 0)
예제 #23
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def test01ReadFile(self):
     region = RegionFile(fileobj=self.stream)
     self.assertEqual(region.chunk_count(), 0)
예제 #24
0
파일: regiontests.py 프로젝트: Fenixin/NBT
 def test02WriteFile(self):
     chunk = self.generate_level()
     region = RegionFile(fileobj=self.stream)
     region.write_chunk(0, 0, chunk)
     self.assertEqual(region.get_size(), 3*4096)
     self.assertEqual(region.chunk_count(), 1)