Beispiel #1
0
	def read(self, raw, offset):
		self.data = dict()

		self.data["terrain_border"] = list()
		for i in range(16):
			t = TerrainBorder()
			offset = t.read(raw, offset)
			self.data["terrain_border"] += [t.data]

		#int8_t zero[28];
		#uint16_t terrain_count_additional;
		zero_terrain_count_struct = Struct(endianness + "28c H")
		pc = zero_terrain_count_struct.unpack_from(raw, offset)
		offset += zero_terrain_count_struct.size

		self.data["terrain_count_additional"] = pc[28]

		tmp_struct = Struct(endianness + "12722s")
		t = tmp_struct.unpack_from(raw, offset)
		offset_begin = offset
		offset += tmp_struct.size

		fname = 'raw/terrain_render_data_%d_to_%d.raw' % (offset_begin, offset)
		filename = file_get_path(fname, write=True)
		file_write(filename, t[0])

		return offset
Beispiel #2
0
	def __init__(self, fname):
		self.files = {}   #(extension, id): (data offset, size)

		self.fname = fname
		fname = file_get_path(fname, write = False)
		f = file_open(fname, binary = True, write = False)

		#read header
		buf = f.read(DRS.drs_header.size)
		self.header = DRS.drs_header.unpack(buf)

		dbg("DRS header [" + fname + "]", 1, push = "drs")
		dbg("copyright:          " + self.header[0].decode('latin-1'))
		dbg("version:            " + self.header[1].decode('latin-1'))
		dbg("ftype:              " + self.header[2].decode('latin-1'))
		dbg("table count:        " + str(self.header[3]))
		dbg("file offset:        " + str(self.header[4]))
		dbg("")

		#read table info
		table_count = self.header[3]

		table_header_buf = f.read(table_count * DRS.drs_table_info.size)
		for i in range(table_count):
			table_header = DRS.drs_table_info.unpack_from(table_header_buf, i * DRS.drs_table_info.size)
			file_type, file_extension, file_info_offset, num_files = table_header

			#flip the extension... it's stored that way...
			file_extension = file_extension.decode('latin-1').lower()[::-1]

			dbg("Table header [" + str(i) + "]", 2, push = "table")
			dbg("file type:        0x" + hexlify(file_type).decode('utf-8'))
			dbg("file extension:   " + file_extension)
			dbg("file_info_offset: " + str(file_info_offset))
			dbg("num_files:        " + str(num_files))
			dbg("")

			f.seek(file_info_offset)
			file_info_buf = f.read(num_files * DRS.drs_file_info.size)

			for j in range(num_files):
				file_header = DRS.drs_file_info.unpack_from(file_info_buf, j * DRS.drs_file_info.size)
				file_id, file_data_offset, file_size = file_header

				dbg("File info header [" + str(j) + "]", 3, push = "fileinfo")
				dbg("file id:        " + str(file_id))
				dbg("data offset:    " + str(file_data_offset))
				dbg("file size:      " + str(file_size))
				dbg("")

				self.files[(file_extension, file_id)] = file_data_offset, file_size
				dbg(pop = "fileinfo")

			dbg(pop = "table")

		self.f = f

		dbg(pop = "drs")
Beispiel #3
0
    def __init__(self, fname):
        self.fname = fname
        dbg("reading empires2_x1_p1 from %s..." % fname, 1)

        fname = file_get_path(fname, write=False)
        f = file_open(fname, binary=True, write=False)

        dbg("decompressing data from %s" % fname, 1)

        compressed_data = f.read()
        # decompress content with zlib (note the magic -15)
        # -15: - -> there is no header, 15 is the max windowsize
        self.content = zlib.decompress(compressed_data, -15)
        f.close()

        compressed_size = len(compressed_data)
        decompressed_size = len(self.content)

        del compressed_data

        dbg("length of compressed data: %d = %d kB" % (compressed_size, compressed_size / 1024), 1)
        dbg("length of decompressed data: %d = %d kB" % (decompressed_size, decompressed_size / 1024), 1)

        from util import file_write

        print("saving uncompressed dat file...")
        file_write(file_get_path("info/empires2x1p1.raw", write=True), self.content)

        # the main data storage
        self.data = dict()

        offset = 0
        offset = self.read(self.content, offset)

        dbg(
            "finished reading empires*.dat at %d of %d bytes (%f%%)."
            % (offset, decompressed_size, 100 * (offset / decompressed_size)),
            1,
        )
Beispiel #4
0
	def __init__(self, fname):
		self.fname = fname
		dbg("reading blendomatic data from %s" % fname, 1, push="blendomatic")

		fname = file_get_path(fname, write = False)
		f = file_open(fname, binary = True, write = False)

		buf = f.read(Blendomatic.blendomatic_header.size)
		self.header = Blendomatic.blendomatic_header.unpack_from(buf)

		blending_mode_count, tile_count = self.header

		dbg("%d blending modes, each %d tiles" % (blending_mode_count, tile_count), 2)

		self.blending_modes = []

		for i in range(blending_mode_count):

			blending_mode = Struct(endianness + "I %dB" % (tile_count))
			blending_mode_buf = f.read(blending_mode.size)
			bmode_header = blending_mode.unpack_from(blending_mode_buf)

			#should be 2353 -> number of pixels (single alpha byte values)
			tile_size = bmode_header[0]

			#tile_flags = bmode_header[1:]  #TODO

			dbg("tile in this blending mode %d has %d pixels" % (i, tile_size), 2)

			#as we draw in isometric tile format, this is the row count
			row_count = int(math.sqrt(tile_size)) + 1  #49


			#alpha_masks_raw is an array of bytes that will draw 32 images,
			#which are bit masks.
			#
			#one of these masks also has 2353 pixels (like terraintile or alpha mask)
			#the storage of the bit masks is 4*tilesize, here's why:
			#
			#4 * 8bit * 2353 pixels = 75296 bitpixels
			#==> 75296/(32 images) = 2353 bitpixel/image
			#
			#this means if we interprete the 75296 bitpixels as 32 images,
			#each of these images gets 2353 bit as data.
			#TODO: why 32 images? isn't that depending on tile_count?

			bitmask_buf_size = tile_size * 4
			dbg("reading 1bit masks -> %d bytes" % (bitmask_buf_size), 2)
			alpha_masks_buf = f.read(bitmask_buf_size)
			alpha_masks_raw = unpack_from("%dB" % (bitmask_buf_size), alpha_masks_buf)


			#list of alpha-mask tiles
			bmode_tiles = []

			dbg("reading 8bit masks for %d tiles -> %d bytes" % (tile_count, tile_size * tile_count), 2)

			#draw mask tiles for this blending mode
			for j in range(tile_count):
				tile_buf = f.read(tile_size)
				pixels = unpack_from("%dB" % tile_size, tile_buf)

				tile = self.get_tile_from_data(row_count, pixels)

				bmode_tiles.append(tile)


			bitvalues = []
			for i in alpha_masks_raw:
				for b_id in range(7, -1, -1):
					#bitmask from 0b00000001 to 0b10000000
					bit_mask = 2 ** b_id
					bit = i & bit_mask
					bitvalues.append(bit)

			#list of bit-mask tiles
			bmode_bittiles = []

			#TODO: again, is 32 really hardcoded?
			for i in range(32):
				data_begin =  i    * tile_size
				data_end   = (i+1) * tile_size
				pixels = bitvalues[ data_begin : data_end ]

				tile = self.get_tile_from_data(row_count, pixels)

				bmode_bittiles.append(tile)


			bmode_data = dict()
			bmode_data["pxcount"] = tile_size
			bmode_data["alphamasks"] = bmode_tiles
			bmode_data["bitmasks"] = bmode_bittiles

			self.blending_modes.append(bmode_data)

		dbg(pop = "blendomatic")
Beispiel #5
0
def main():

	args = parse_args()

	#set verbose value in util
	set_verbosity(args.verbose)

	#assume to extract all files when nothing specified.
	if args.extract == []:
		args.extract.append('*:*.*')

	extraction_rules = [ ExtractionRule(e) for e in args.extract ]

	merge_images = not args.nomerge
	exec_dev = args.development

	#set path in utility class
	dbg("setting age2 input directory to " + args.srcdir, 1)
	set_read_dir(args.srcdir)

	#write mode is disabled by default, unless destdir is set
	if args.destdir != '/dev/null' and not args.listfiles and not args.dumpfilelist:
		dbg("setting write dir to " + args.destdir, 1)
		set_write_dir(args.destdir)
		write_enabled = True
	else:
		write_enabled = False


	drsfiles = {
		"graphics":  DRS("Data/graphics.drs"),
		"interface": DRS("Data/interfac.drs"),
		"sounds0":   DRS("Data/sounds.drs"),
		"sounds1":   DRS("Data/sounds_x1.drs"),
		"gamedata0": DRS("Data/gamedata.drs"),
		"gamedata1": DRS("Data/gamedata_x1.drs"),
		"gamedata2": DRS("Data/gamedata_x1_p1.drs"),
		"terrain":   DRS("Data/terrain.drs")
	}

	palette = ColorTable(drsfiles["interface"].get_file_data('bin', 50500))

	if exec_dev:
		if write_enabled:
			print("no indev function available at the moment.")
			return
		else:
			raise Exception("development mode requires write access")

	if write_enabled:
		file_write(file_get_path('processed/player_color_palette.pal', write=True), palette.gen_player_color_palette())

		import blendomatic
		blend_data = blendomatic.Blendomatic("Data/blendomatic.dat")

		for (modeidx, png, size, metadata) in blend_data.draw_alpha_frames_merged():
			fname = 'alphamask/mode%02d' % (modeidx)
			filename = file_get_path(fname, write=True)
			file_write(filename + ".png", png)
			file_write(filename + ".docx", metadata)
			dbg("blending mode%02d -> saved packed atlas" % (modeidx), 1)

		import gamedata.empiresdat
		datfile = gamedata.empiresdat.Empires2X1P1("Data/empires2_x1_p1.dat")
		filename = file_get_path("processed/terrain_meta.docx", write=True)

		tmeta = "#terrain specification\n"
		tmeta += "#idx=terrain_id, slp_id, sound_id, blend_mode, blend_priority, angle_count, frame_count, terrain_dimensions0, terrain_dimensions1, terrain_replacement_id, name0, name1\n"

		tmeta += "n=%d\n" % len(datfile.data["terrain"]["terrain"])

		i = 0
		blending_modes = set()
		for tk in datfile.data["terrain"]["terrain"]:
			if tk["slp_id"] < 0:
				continue

			blending_modes.add(tk["blend_mode"])

			wanted = ["terrain_id", "slp_id", "sound_id", "blend_mode", "blend_priority", "angle_count", "frame_count", "terrain_dimensions0", "terrain_dimensions1", "terrain_replacement_id", "name0", "name1"]

			line = [tk[w] for w in wanted]

			#as blending mode 0==1 and 7==8, and ice is 5 for sure,
			#we subtract one from the ids, and can map -1 to 0, as mode (0-1) == (1-1)
			#TODO: this can't be correct...
			line[3] -= 1
			if line[3] < 0:
				line[3] = 0

			line = map(str, line)
			tmeta += ("%d=" % i) + ",".join(line) + "\n"
			i += 1

		file_write(filename, tmeta)


		filename = file_get_path("processed/blending_meta.docx", write=True)

		bmeta = "#blending mode specification\n"
		bmeta += "#yeah, i know that this content is totally stupid, but that's how the data can be injected later\n"
		bmeta += "#idx=mode_id\n"

		bmeta += "n=%d\n" % len(blending_modes)

		i = 0
		for m in blending_modes:
			bmeta += "%d=%d\n" % (i, m)
			i += 1

		file_write(filename, bmeta)


		if args.extrafiles:
			file_write(file_get_path('info/colortable.pal.png', write=True), palette.gen_image())


	file_list = dict()
	files_extracted = 0

	for drsname, drsfile in drsfiles.items():
		for file_extension, file_id in drsfile.files:
			if not any((er.matches(drsname, file_id, file_extension) for er in extraction_rules)):
				continue

			if args.listfiles or args.dumpfilelist:
				fid = int(file_id)
				if fid not in file_list:
					file_list[fid] = list()

				file_list[fid] += [(drsfile.fname, file_extension)]
				continue

			if write_enabled:
				fbase = file_get_path('raw/' + drsfile.fname + '/' + str(file_id), write=True)
				fname = fbase + '.' + file_extension

				dbg("Extracting to " + fname + "...", 2)

				file_data = drsfile.get_file_data(file_extension, file_id)

			if file_extension == 'slp':

				if write_enabled:

					s = SLP(file_data)
					out_file_tmp = drsname + ": " + str(file_id) + "." + file_extension

					if merge_images:
						png, (width, height), metadata = s.draw_frames_merged(palette)
						file_write(fname + ".png", png)
						file_write(fname + '.docx', metadata)
						dbg(out_file_tmp + " -> saved packed atlas", 1)

					else:
						for idx, (png, metadata) in enumerate(s.draw_frames(palette)):
							filename = fname + '.' + str(idx)
							file_write(filename + '.png', png.image)
							file_write(filename + '.docx', metadata)

							dbg(out_file_tmp + " -> extracting frame %3d...\r" % (idx), 1, end="")
						dbg(out_file_tmp + " -> saved single frame(s)", 1)

			elif file_extension == 'wav':

				if write_enabled:

					file_write(fname, file_data)

					use_opus = True

					if use_opus:
					#opusenc invokation (TODO: ffmpeg?)
						opus_convert_call = ['opusenc', fname, fbase + '.opus']
						dbg("converting... : " + fname + " to opus.", 1)

						oc = subprocess.Popen(opus_convert_call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
						oc_out, oc_err = oc.communicate()

						if ifdbg(2):
							oc_out = oc_out.decode("utf-8")
							oc_err = oc_err.decode("utf-8")

							dbg(oc_out + "\n" + oc_err, 2)

						#remove original wave file
						remove(fname)


			else:
				#this type is unknown or does not require conversion

				if write_enabled:
					file_write(fname, file_data)

			files_extracted += 1

	if write_enabled:
		dbg(str(files_extracted) + " files extracted", 0)

	if args.listfiles or args.dumpfilelist:
		#file_list = sorted(file_list)
		if not args.dumpfilelist:
			for idx, f in file_list.items():
				ret = "%d = [ " % idx
				for file_name, file_extension in f:
					ret += "%s/%d.%s, " % (file_name, idx, file_extension)
				ret += "]"
				print(ret)
		else:
			ret = "#!/usr/bin/python\n\n#auto generated age2tc file list\n\n"
			import pprint
			ret += "avail_files = "
			ret += pprint.pformat(file_list)
			print(ret)