Exemplo n.º 1
0
 def find_assets(self):
     for pak_file in self.content_pak_files:
         filename = os.path.join(self.root_dir, pak_file)
         if os.path.isfile(filename):
             stream = BinaryStream(filename=filename)
             capacity = stream.read_int32()
             for _ in range(capacity):
                 asset_name = stream.read_string()
                 asset_size = stream.read_int32()
                 asset_data = stream.read(asset_size)
                 asset_name = asset_name.replace('\\', '/')
                 asset_name = asset_name.lower()
                 yield asset_name, asset_data
Exemplo n.º 2
0
 def find_assets(self):
     for pak_file in self.content_pak_files:
         filename = os.path.join(self.root_dir, pak_file)
         if not os.path.isfile(filename):
             raise ReaderError("Content pak not found in content root: '{}'".format(filename))
         stream = BinaryStream(filename=filename)
         capacity = stream.read_int32()
         for _ in range(capacity):
             asset_name = stream.read_string()
             asset_size = stream.read_int32()
             asset_data = stream.read(asset_size)
             asset_name = asset_name.replace('\\', '/')
             asset_name = asset_name.lower()
             yield asset_name, asset_data
Exemplo n.º 3
0
 def find_assets(self):
     for pak_file in self.content_pak_files:
         filename = os.path.join(self.root_dir, pak_file)
         if not os.path.isfile(filename):
             raise ReaderError(
                 "Content pak not found in content root: '{}'".format(
                     filename))
         stream = BinaryStream(filename=filename)
         capacity = stream.read_int32()
         for _ in range(capacity):
             asset_name = stream.read_string()
             asset_size = stream.read_int32()
             asset_data = stream.read(asset_size)
             asset_name = asset_name.replace('\\', '/')
             asset_name = asset_name.lower()
             yield asset_name, asset_data
Exemplo n.º 4
0
 def load(cls, data=None, filename=None, parse=True, expected_type=None):
     if filename is not None:
         filename = os.path.normpath(filename)
     stream = BinaryStream(data=data, filename=filename)
     del data
     (sig, platform, version, attribs, size) = stream.unpack(_XNB_HEADER)
     if sig != XNB_SIGNATURE:
         raise ReaderError("bad sig: '{!r}'".format(sig))
     if platform not in XNB_PLATFORMS:
         raise ReaderError("bad platform: '{!r}'".format(platform))
     if version not in XNB_VERSIONS:
         raise ReaderError("bad version: {}".format(version))
     stream_length = stream.length()
     if stream_length != size:
         raise ReaderError("bad size: {} != {}".format(stream_length, size))
     compressed = False
     profile = 0
     if version >= VERSION_40:
         profile = attribs & _PROFILE_MASK
         if profile not in XNB_PROFILES:
             raise ReaderError("bad profile: {}".format(profile))
     if version >= VERSION_30:
         compressed = bool(attribs & _COMPRESS_MASK)
         size -= stream.calc_size(_XNB_HEADER)
     if compressed:
         uncomp = stream.read_int32()
         size -= 4
         content_comp = stream.read(size)
         content = decompress(content_comp, uncomp)
     else:
         content = stream.read(size)
     return cls(content, platform, version, profile, compressed, parse=parse, expected_type=expected_type)
Exemplo n.º 5
0
 def load(cls, data=None, filename=None, parse=True, expected_type=None):
     if filename is not None:
         filename = os.path.normpath(filename)
     stream = BinaryStream(data=data, filename=filename)
     del data
     (sig, platform, version, attribs, size) = stream.unpack(_XNB_HEADER)
     if sig != XNB_SIGNATURE:
         raise ReaderError("bad sig: '{!r}'".format(sig))
     if platform not in XNB_PLATFORMS:
         raise ReaderError("bad platform: '{!r}'".format(platform))
     if version not in XNB_VERSIONS:
         raise ReaderError("bad version: {}".format(version))
     stream_length = stream.length()
     if stream_length != size:
         raise ReaderError("bad size: {} != {}".format(stream_length, size))
     compressed = False
     profile = 0
     if version >= VERSION_40:
         profile = attribs & _PROFILE_MASK
         if profile not in XNB_PROFILES:
             raise ReaderError("bad profile: {}".format(profile))
     if version >= VERSION_30:
         compressed = bool(attribs & _COMPRESS_MASK)
         size -= stream.calc_size(_XNB_HEADER)
     if compressed:
         uncomp = stream.read_int32()
         size -= 4
         content_comp = stream.read(size)
         content = decompress(content_comp, uncomp)
     else:
         content = stream.read(size)
     return cls(content,
                platform,
                version,
                profile,
                compressed,
                parse=parse,
                expected_type=expected_type)
Exemplo n.º 6
0
    def __init__(self, data=None, filename=None, audio_engine=None):
        self.audio_engine = audio_engine

        # open in little endian initially
        stream = BinaryStream(data=data, filename=filename)
        del data

        # check sig to find actual endianess
        h_sig = stream.peek(len(SB_L_SIGNATURE))
        if h_sig == SB_L_SIGNATURE:
            big_endian = False
        elif h_sig == SB_B_SIGNATURE:
            big_endian = True
        else:
            raise ValueError("bad sig: {!r}".format(h_sig))

        # switch stream to correct endianess
        stream.set_endian(big_endian)
        (h_sig, self.version, self.header_version, self.crc, buildtime_raw_low,
         buildtime_raw_high, self.platform, h_simple_cue_count,
         h_complex_cue_count, h_unknown_count, h_cue_name_hash_count,
         h_wave_bank_count, h_sound_count, h_cue_names_length,
         simple_cue_offset_raw, complex_cue_offset_raw, cue_name_offset_raw,
         unknown_offset_raw, variation_offset_raw, transition_offset_raw,
         wave_bank_offset_raw, cue_name_hash_offset_raw,
         cue_name_table_offset_raw, sound_offset_raw,
         h_name_raw) = stream.unpack(_SB_HEADER)
        h_simple_cue_offset = fix_offset(simple_cue_offset_raw)
        h_complex_cue_offset = fix_offset(complex_cue_offset_raw)
        h_cue_name_offset = fix_offset(cue_name_offset_raw)
        h_unknown_offset = fix_offset(unknown_offset_raw)
        h_variation_offset = fix_offset(variation_offset_raw)
        h_transition_offset = fix_offset(transition_offset_raw)
        h_wave_bank_offset = fix_offset(wave_bank_offset_raw)
        h_cue_name_hash_offset = fix_offset(cue_name_hash_offset_raw)
        h_cue_name_table_offset = fix_offset(cue_name_table_offset_raw)
        h_sound_offset = fix_offset(sound_offset_raw)
        self.name = h_name_raw.rstrip(b'\x00').decode('iso8859-1')
        del h_name_raw
        self.buildtime = filetime_to_datetime(buildtime_raw_low,
                                              buildtime_raw_high)

        self.wave_banks = []
        if h_wave_bank_count and h_wave_bank_offset:
            stream.seek(h_wave_bank_offset)
            self.wave_banks = [
                stream.read(64).rstrip(b'\x00').decode('iso8859-1')
                for _ in range(h_wave_bank_count)
            ]
        else:
            raise ReaderError("No wave banks found in sound bank")

        cue_name_hash = []
        if h_cue_name_hash_count and h_cue_name_hash_offset:
            stream.seek(h_cue_name_hash_offset)
            cue_name_hash = [
                stream.read_int16() for _ in range(h_cue_name_hash_count)
            ]
        cue_name_hash_entry = []
        if h_cue_names_length and h_cue_name_table_offset:
            stream.seek(h_cue_name_table_offset)
            cue_name_hash_entry = [
                (stream.read_int32(), stream.read_int16())
                for _ in range(h_simple_cue_count + h_complex_cue_count)
            ]
        cue_names = []
        for (name_offset, _) in cue_name_hash_entry:
            stream.seek(name_offset)
            cue_names.append(stream.read_cstring())

        self.cues = []
        self.cues_name = OrderedDict()
        if h_simple_cue_count and h_simple_cue_offset:
            stream.seek(h_simple_cue_offset)
            for i in range(h_simple_cue_count):
                cue_name = None
                if cue_names:
                    cue_name = cue_names[i]
                cue = Cue(cue_name, stream)
                self.cues.append(cue)
                if cue_name:
                    self.cues_name[cue_name] = cue
        if h_complex_cue_count and h_complex_cue_offset:
            stream.seek(h_complex_cue_offset)
            for i in range(h_complex_cue_count):
                cue_name = None
                if cue_names:
                    cue_name = cue_names[h_simple_cue_count + i]
                cue = Cue(cue_name, stream, is_complex=True)
                self.cues.append(cue)
                if cue_name:
                    self.cues_name[cue_name] = cue
Exemplo n.º 7
0
    def __init__(self, data=None, filename=None, audio_engine=None):
        self.audio_engine = audio_engine

        # open in little endian initially
        stream = BinaryStream(data=data, filename=filename)
        del data

        # check sig to find actual endianess
        h_sig = stream.peek(len(WB_L_SIGNATURE))
        if h_sig == WB_L_SIGNATURE:
            big_endian = False
        elif h_sig == WB_B_SIGNATURE:
            big_endian = True
        else:
            raise ValueError("bad sig: {!r}".format(h_sig))

        # switch stream to correct endianess
        stream.set_endian(big_endian)
        (h_sig, self.h_version, self.h_header_version) = stream.unpack(_WB_HEADER)
        regions = {k: XWBRegion._make(stream.unpack(_WB_REGION)) for k in _REGIONS}  # pylint: disable-msg=W0212

        # check if we have a valid BANKDATA region and parse it
        bankdata_size = stream.calc_size(_WB_DATA)
        if regions['BANKDATA'].length != bankdata_size:
            raise ReaderError("Invalid BANKDATA size: {} != {}".format(regions['BANKDATA'].length, bankdata_size))
        stream.seek(regions['BANKDATA'].offset)
        (self.flags, h_entry_count, h_bank_name_raw, h_entry_metadata_element_size, h_entry_name_element_size,
         self.alignment, h_compact_format, buildtime_raw_low, buildtime_raw_high) = stream.unpack(_WB_DATA)
        self.bank_name = h_bank_name_raw.rstrip(b'\x00').decode('iso8859-1')
        del h_bank_name_raw
        self.buildtime = filetime_to_datetime(buildtime_raw_low, buildtime_raw_high)

        if self.flags & ~(WB_TYPE_MASK | WB_FLAGS_MASK):
            raise ReaderError("Unknown flags in WAVEBANK")

        # check what type of ENTRYMETADATA we have and parse it
        if self.has_compact:
            raise ReaderError("Compact format not supported")
        bankentry_size = stream.calc_size(_WB_ENTRY)
        if bankentry_size != h_entry_metadata_element_size:
            raise ReaderError("Unknown EntryMetaDataElementSize: {} != {}".format(bankentry_size,
                                                                                  h_entry_metadata_element_size))
        if regions['ENTRYMETADATA'].length != bankentry_size * h_entry_count:
            raise ReaderError("Invalid ENTRYMETADATA size: {} != {}".format(regions['ENTRYMETADATA'].length,
                                                                            bankentry_size * h_entry_count))
        stream.seek(regions['ENTRYMETADATA'].offset)
        entry_metadata = [XWBEntry._make(stream.unpack(_WB_ENTRY))  # pylint: disable-msg=W0212,E1101
                          for _ in range(h_entry_count)]

        # read ENTRYNAMES if present
        entry_names = []
        if self.has_entry_names and regions['ENTRYNAMES'].offset and regions['ENTRYNAMES'].length:
            if regions['ENTRYNAMES'].length != h_entry_name_element_size * h_entry_count:
                raise ReaderError("Invalid ENTRYNAMES region size: {} != {}".format(
                    regions['ENTRYNAMES'].length, h_entry_name_element_size * h_entry_count))
            stream.seek(regions['ENTRYNAMES'].offset)
            entry_names = [stream.read(h_entry_name_element_size).rstrip(b'\x00').decode('iso8859-1')
                           for _ in range(h_entry_count)]

        # read SEEKTABLES if present
        entry_seektables = []
        if self.has_seek_tables and regions['SEEKTABLES'].offset and regions['SEEKTABLES'].length:
            stream.seek(regions['SEEKTABLES'].offset)
            seek_offsets = []
            for _ in range(h_entry_count):
                seek_offsets.append(stream.read_int32())
            seek_data_offset = stream.tell()
            for cur_offset in seek_offsets:
                if cur_offset >= 0:
                    stream.seek(seek_data_offset + cur_offset)
                    packet_count = stream.read_uint32()
                    cur_seek_data = BinaryStream()
                    for _ in range(packet_count):
                        cur_seek_data.write_uint32(stream.read_uint32())
                    entry_seektables.append(cur_seek_data.getvalue())
                else:
                    entry_seektables.append(None)

        self.entries = []
        for i, cur_meta in enumerate(entry_metadata):
            c_entry_flags = cur_meta.flags_duration & WB_ENTRY_FLAGS_MASK
            c_duration = (cur_meta.flags_duration & WB_ENTRY_DURATION_MASK) >> 4
            c_format_tag = cur_meta.format & WB_FORMAT_TAG_MASK
            c_channels = (cur_meta.format & WB_FORMAT_CHANNELS) >> 2
            c_samples_per_sec = (cur_meta.format & WB_FORMAT_SAMPLES_PER_SEC) >> 5
            c_block_align = (cur_meta.format & WB_FORMAT_BLOCK_ALIGN) >> 23
            c_bits_per_sample = (cur_meta.format & WB_FORMAT_BITS_PER_SAMPLE) >> 31
            entry_name = None
            if entry_names:
                entry_name = entry_names[i]
            entry_dpds = None
            entry_seek = None
            extra_header = bytes()
            # build format specific header and seek data
            if c_format_tag == WB_FORMAT_TAG_PCM:
                c_format_tag = WAVE_FORMAT_PCM
                if c_bits_per_sample == 1:
                    c_bits_per_sample = 16
                else:
                    c_bits_per_sample = 8
                c_avg_bytes_per_sec = c_samples_per_sec * c_block_align
            elif c_format_tag == WB_FORMAT_TAG_ADPCM:
                c_format_tag = WAVE_FORMAT_ADPCM
                c_bits_per_sample = 4
                c_block_align = (c_block_align + ADPCM_BLOCK_ALIGN_OFFSET) * c_channels
                cx_samples_per_block = ((c_block_align - (7 * c_channels)) * 8) // (c_bits_per_sample * c_channels) + 2
                c_avg_bytes_per_sec = (c_samples_per_sec // cx_samples_per_block) * c_block_align
                cx_num_coef = len(ADPCM_COEF)
                extra_header = _ADPCM_WAVEFORMAT.pack(cx_samples_per_block, cx_num_coef)
                for coef in ADPCM_COEF:
                    extra_header += _ADPCM_WAVEFORMAT_COEF.pack(coef[0], coef[1])
            elif c_format_tag == WB_FORMAT_TAG_WMA:
                if c_bits_per_sample == 1:
                    c_format_tag = WAVE_FORMAT_WMAUDIO3
                else:
                    c_format_tag = WAVE_FORMAT_WMAUDIO2
                c_bits_per_sample = 16
                c_avg_bytes_per_sec = WMA_AVG_BYTES_PER_SEC[c_block_align >> 5]
                c_block_align = WMA_BLOCK_ALIGN[c_block_align & 0x1f]
                if entry_seektables:
                    entry_dpds = entry_seektables[i]
                else:
                    raise ReaderError("No SEEKTABLES found for xWMA format")
            elif c_format_tag == WB_FORMAT_TAG_XMA:
                # lots of placeholders in here but seems to decode ok
                c_format_tag = WAVE_FORMAT_XMA2
                c_bits_per_sample = 16
                c_avg_bytes_per_sec = 0
                cx_num_streams = 1
                if c_channels == 2:
                    cx_channel_mask = 3
                else:
                    cx_channel_mask = 0
                cx_samples_encoded = 0
                cx_bytes_per_block = 0
                cx_play_begin = 0
                cx_play_length = 0
                cx_loop_begin = 0
                cx_loop_length = 0
                cx_loop_count = 0
                cx_encoder_version = 4
                cx_block_count = 1
                extra_header = _XMA_WAVEFORMAT.pack(cx_num_streams, cx_channel_mask, cx_samples_encoded,
                                                    cx_bytes_per_block, cx_play_begin, cx_play_length, cx_loop_begin,
                                                    cx_loop_length, cx_loop_count, cx_encoder_version, cx_block_count)
                if entry_seektables:
                    entry_seek = entry_seektables[i]
                else:
                    raise ReaderError("No SEEKTABLES found for XMA2 format")
            else:
                raise ReaderError("Unhandled entry format: {}".format(c_format_tag))
            cx_size = len(extra_header)
            entry_header = _WAVEFORMATEX.pack(c_format_tag, c_channels, c_samples_per_sec, c_avg_bytes_per_sec,
                                              c_block_align, c_bits_per_sample, cx_size)
            entry_header += extra_header
            # read entry wave data
            stream.seek(regions['ENTRYWAVEDATA'].offset + cur_meta.play_offset)
            # manually swap PCM data if needed
            entry_data = stream.read(cur_meta.play_length)
            if big_endian and c_format_tag == WAVE_FORMAT_PCM and c_bits_per_sample == 16:
                entry_data = bytearray(entry_data)
                entry_data[1::2], entry_data[0::2] = entry_data[0::2], entry_data[1::2]
            self.entries.append(Entry(entry_name, entry_header, entry_data, entry_dpds, entry_seek))
Exemplo n.º 8
0
    def __init__(self, data=None, filename=None, audio_engine=None):
        self.audio_engine = audio_engine

        # open in little endian initially
        stream = BinaryStream(data=data, filename=filename)
        del data

        # check sig to find actual endianess
        h_sig = stream.peek(len(WB_L_SIGNATURE))
        if h_sig == WB_L_SIGNATURE:
            big_endian = False
        elif h_sig == WB_B_SIGNATURE:
            big_endian = True
        else:
            raise ValueError("bad sig: {!r}".format(h_sig))

        # switch stream to correct endianess
        stream.set_endian(big_endian)
        (h_sig, self.h_version,
         self.h_header_version) = stream.unpack(_WB_HEADER)
        regions = {
            k: XWBRegion._make(stream.unpack(_WB_REGION))
            for k in _REGIONS
        }  # pylint: disable-msg=W0212

        # check if we have a valid BANKDATA region and parse it
        bankdata_size = stream.calc_size(_WB_DATA)
        if regions['BANKDATA'].length != bankdata_size:
            raise ReaderError("Invalid BANKDATA size: {} != {}".format(
                regions['BANKDATA'].length, bankdata_size))
        stream.seek(regions['BANKDATA'].offset)
        (self.flags, h_entry_count, h_bank_name_raw,
         h_entry_metadata_element_size, h_entry_name_element_size,
         self.alignment, h_compact_format, buildtime_raw_low,
         buildtime_raw_high) = stream.unpack(_WB_DATA)
        self.bank_name = h_bank_name_raw.rstrip(b'\x00').decode('iso8859-1')
        del h_bank_name_raw
        self.buildtime = filetime_to_datetime(buildtime_raw_low,
                                              buildtime_raw_high)

        if self.flags & ~(WB_TYPE_MASK | WB_FLAGS_MASK):
            raise ReaderError("Unknown flags in WAVEBANK")

        # check what type of ENTRYMETADATA we have and parse it
        if self.has_compact:
            raise ReaderError("Compact format not supported")
        bankentry_size = stream.calc_size(_WB_ENTRY)
        if bankentry_size != h_entry_metadata_element_size:
            raise ReaderError(
                "Unknown EntryMetaDataElementSize: {} != {}".format(
                    bankentry_size, h_entry_metadata_element_size))
        if regions['ENTRYMETADATA'].length != bankentry_size * h_entry_count:
            raise ReaderError("Invalid ENTRYMETADATA size: {} != {}".format(
                regions['ENTRYMETADATA'].length,
                bankentry_size * h_entry_count))
        stream.seek(regions['ENTRYMETADATA'].offset)
        entry_metadata = [
            XWBEntry._make(
                stream.unpack(_WB_ENTRY))  # pylint: disable-msg=W0212,E1101
            for _ in range(h_entry_count)
        ]

        # read ENTRYNAMES if present
        entry_names = []
        if self.has_entry_names and regions['ENTRYNAMES'].offset and regions[
                'ENTRYNAMES'].length:
            if regions[
                    'ENTRYNAMES'].length != h_entry_name_element_size * h_entry_count:
                raise ReaderError(
                    "Invalid ENTRYNAMES region size: {} != {}".format(
                        regions['ENTRYNAMES'].length,
                        h_entry_name_element_size * h_entry_count))
            stream.seek(regions['ENTRYNAMES'].offset)
            entry_names = [
                stream.read(h_entry_name_element_size).rstrip(b'\x00').decode(
                    'iso8859-1') for _ in range(h_entry_count)
            ]

        # read SEEKTABLES if present
        entry_seektables = []
        if self.has_seek_tables and regions['SEEKTABLES'].offset and regions[
                'SEEKTABLES'].length:
            stream.seek(regions['SEEKTABLES'].offset)
            seek_offsets = []
            for _ in range(h_entry_count):
                seek_offsets.append(stream.read_int32())
            seek_data_offset = stream.tell()
            for cur_offset in seek_offsets:
                if cur_offset >= 0:
                    stream.seek(seek_data_offset + cur_offset)
                    packet_count = stream.read_uint32()
                    cur_seek_data = BinaryStream()
                    for _ in range(packet_count):
                        cur_seek_data.write_uint32(stream.read_uint32())
                    entry_seektables.append(cur_seek_data.getvalue())
                else:
                    entry_seektables.append(None)

        self.entries = []
        for i, cur_meta in enumerate(entry_metadata):
            c_entry_flags = cur_meta.flags_duration & WB_ENTRY_FLAGS_MASK
            c_duration = (cur_meta.flags_duration
                          & WB_ENTRY_DURATION_MASK) >> 4
            c_format_tag = cur_meta.format & WB_FORMAT_TAG_MASK
            c_channels = (cur_meta.format & WB_FORMAT_CHANNELS) >> 2
            c_samples_per_sec = (cur_meta.format
                                 & WB_FORMAT_SAMPLES_PER_SEC) >> 5
            c_block_align = (cur_meta.format & WB_FORMAT_BLOCK_ALIGN) >> 23
            c_bits_per_sample = (cur_meta.format
                                 & WB_FORMAT_BITS_PER_SAMPLE) >> 31
            entry_name = None
            if entry_names:
                entry_name = entry_names[i]
            entry_dpds = None
            entry_seek = None
            extra_header = bytes()
            # build format specific header and seek data
            if c_format_tag == WB_FORMAT_TAG_PCM:
                c_format_tag = WAVE_FORMAT_PCM
                if c_bits_per_sample == 1:
                    c_bits_per_sample = 16
                else:
                    c_bits_per_sample = 8
                c_avg_bytes_per_sec = c_samples_per_sec * c_block_align
            elif c_format_tag == WB_FORMAT_TAG_ADPCM:
                c_format_tag = WAVE_FORMAT_ADPCM
                c_bits_per_sample = 4
                c_block_align = (c_block_align +
                                 ADPCM_BLOCK_ALIGN_OFFSET) * c_channels
                cx_samples_per_block = (
                    (c_block_align - (7 * c_channels)) *
                    8) // (c_bits_per_sample * c_channels) + 2
                c_avg_bytes_per_sec = (c_samples_per_sec //
                                       cx_samples_per_block) * c_block_align
                cx_num_coef = len(ADPCM_COEF)
                extra_header = _ADPCM_WAVEFORMAT.pack(cx_samples_per_block,
                                                      cx_num_coef)
                for coef in ADPCM_COEF:
                    extra_header += _ADPCM_WAVEFORMAT_COEF.pack(
                        coef[0], coef[1])
            elif c_format_tag == WB_FORMAT_TAG_WMA:
                if c_bits_per_sample == 1:
                    c_format_tag = WAVE_FORMAT_WMAUDIO3
                else:
                    c_format_tag = WAVE_FORMAT_WMAUDIO2
                c_bits_per_sample = 16
                c_avg_bytes_per_sec = WMA_AVG_BYTES_PER_SEC[c_block_align >> 5]
                c_block_align = WMA_BLOCK_ALIGN[c_block_align & 0x1f]
                if entry_seektables:
                    entry_dpds = entry_seektables[i]
                else:
                    raise ReaderError("No SEEKTABLES found for xWMA format")
            elif c_format_tag == WB_FORMAT_TAG_XMA:
                # lots of placeholders in here but seems to decode ok
                c_format_tag = WAVE_FORMAT_XMA2
                c_bits_per_sample = 16
                c_avg_bytes_per_sec = 0
                cx_num_streams = 1
                if c_channels == 2:
                    cx_channel_mask = 3
                else:
                    cx_channel_mask = 0
                cx_samples_encoded = 0
                cx_bytes_per_block = 0
                cx_play_begin = 0
                cx_play_length = 0
                cx_loop_begin = 0
                cx_loop_length = 0
                cx_loop_count = 0
                cx_encoder_version = 4
                cx_block_count = 1
                extra_header = _XMA_WAVEFORMAT.pack(
                    cx_num_streams, cx_channel_mask, cx_samples_encoded,
                    cx_bytes_per_block, cx_play_begin, cx_play_length,
                    cx_loop_begin, cx_loop_length, cx_loop_count,
                    cx_encoder_version, cx_block_count)
                if entry_seektables:
                    entry_seek = entry_seektables[i]
                else:
                    raise ReaderError("No SEEKTABLES found for XMA2 format")
            else:
                raise ReaderError(
                    "Unhandled entry format: {}".format(c_format_tag))
            cx_size = len(extra_header)
            entry_header = _WAVEFORMATEX.pack(c_format_tag, c_channels,
                                              c_samples_per_sec,
                                              c_avg_bytes_per_sec,
                                              c_block_align, c_bits_per_sample,
                                              cx_size)
            entry_header += extra_header
            # read entry wave data
            stream.seek(regions['ENTRYWAVEDATA'].offset + cur_meta.play_offset)
            # manually swap PCM data if needed
            entry_data = stream.read(cur_meta.play_length)
            if big_endian and c_format_tag == WAVE_FORMAT_PCM and c_bits_per_sample == 16:
                entry_data = bytearray(entry_data)
                entry_data[1::2], entry_data[0::2] = entry_data[
                    0::2], entry_data[1::2]
            self.entries.append(
                Entry(entry_name, entry_header, entry_data, entry_dpds,
                      entry_seek))
Exemplo n.º 9
0
    def __init__(self, data=None, filename=None, audio_engine=None):
        self.audio_engine = audio_engine

        # open in little endian initially
        stream = BinaryStream(data=data, filename=filename)
        del data

        # check sig to find actual endianess
        h_sig = stream.peek(len(SB_L_SIGNATURE))
        if h_sig == SB_L_SIGNATURE:
            big_endian = False
        elif h_sig == SB_B_SIGNATURE:
            big_endian = True
        else:
            raise ValueError("bad sig: {!r}".format(h_sig))

        # switch stream to correct endianess
        stream.set_endian(big_endian)
        (h_sig, self.version, self.header_version, self.crc, buildtime_raw_low, buildtime_raw_high,
         self.platform, h_simple_cue_count, h_complex_cue_count, h_unknown_count, h_cue_name_hash_count,
         h_wave_bank_count, h_sound_count, h_cue_names_length, simple_cue_offset_raw, complex_cue_offset_raw,
         cue_name_offset_raw, unknown_offset_raw, variation_offset_raw, transition_offset_raw, wave_bank_offset_raw,
         cue_name_hash_offset_raw, cue_name_table_offset_raw, sound_offset_raw, h_name_raw) = stream.unpack(_SB_HEADER)
        h_simple_cue_offset = fix_offset(simple_cue_offset_raw)
        h_complex_cue_offset = fix_offset(complex_cue_offset_raw)
        h_cue_name_offset = fix_offset(cue_name_offset_raw)
        h_unknown_offset = fix_offset(unknown_offset_raw)
        h_variation_offset = fix_offset(variation_offset_raw)
        h_transition_offset = fix_offset(transition_offset_raw)
        h_wave_bank_offset = fix_offset(wave_bank_offset_raw)
        h_cue_name_hash_offset = fix_offset(cue_name_hash_offset_raw)
        h_cue_name_table_offset = fix_offset(cue_name_table_offset_raw)
        h_sound_offset = fix_offset(sound_offset_raw)
        self.name = h_name_raw.rstrip(b'\x00').decode('iso8859-1')
        del h_name_raw
        self.buildtime = filetime_to_datetime(buildtime_raw_low, buildtime_raw_high)

        self.wave_banks = []
        if h_wave_bank_count and h_wave_bank_offset:
            stream.seek(h_wave_bank_offset)
            self.wave_banks = [stream.read(64).rstrip(b'\x00').decode('iso8859-1') for _ in range(h_wave_bank_count)]
        else:
            raise ReaderError("No wave banks found in sound bank")

        cue_name_hash = []
        if h_cue_name_hash_count and h_cue_name_hash_offset:
            stream.seek(h_cue_name_hash_offset)
            cue_name_hash = [stream.read_int16() for _ in range(h_cue_name_hash_count)]
        cue_name_hash_entry = []
        if h_cue_names_length and h_cue_name_table_offset:
            stream.seek(h_cue_name_table_offset)
            cue_name_hash_entry = [(stream.read_int32(), stream.read_int16())
                                   for _ in range(h_simple_cue_count + h_complex_cue_count)]
        cue_names = []
        for (name_offset, _) in cue_name_hash_entry:
            stream.seek(name_offset)
            cue_names.append(stream.read_cstring())

        self.cues = []
        self.cues_name = OrderedDict()
        if h_simple_cue_count and h_simple_cue_offset:
            stream.seek(h_simple_cue_offset)
            for i in range(h_simple_cue_count):
                cue_name = None
                if cue_names:
                    cue_name = cue_names[i]
                cue = Cue(cue_name, stream)
                self.cues.append(cue)
                if cue_name:
                    self.cues_name[cue_name] = cue
        if h_complex_cue_count and h_complex_cue_offset:
            stream.seek(h_complex_cue_offset)
            for i in range(h_complex_cue_count):
                cue_name = None
                if cue_names:
                    cue_name = cue_names[h_simple_cue_count + i]
                cue = Cue(cue_name, stream, is_complex=True)
                self.cues.append(cue)
                if cue_name:
                    self.cues_name[cue_name] = cue