def test_byteswap(self):
     swapped_datas = {
         1: datas[1],
         2: packs[2](0, 0x3412, 0x6745, -0x6646, -0x81, 0x80, -1),
         3: packs[3](0, 0x563412, -0x7698BB, 0x7798BA, -0x81, 0x80, -1),
         4: packs[4](0, 0x78563412, -0x547698BB, 0x557698BA, -0x81, 0x80, -1),
     }
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.byteswap(b"", w), b"")
         self.assertEqual(audioop.byteswap(datas[w], w), swapped_datas[w])
         self.assertEqual(audioop.byteswap(swapped_datas[w], w), datas[w])
         self.assertEqual(audioop.byteswap(bytearray(datas[w]), w), swapped_datas[w])
         self.assertEqual(audioop.byteswap(memoryview(datas[w]), w), swapped_datas[w])
Example #2
0
 def test_byteswap(self):
     swapped_datas = {
         (1): datas[1],
         (2): packs[2](0, 13330, 26437, -26182, -129, 128, -1),
         (3): packs[3](0, 5649426, -7772347, 7837882, -129, 128, -1),
         (4): packs[4](0, 2018915346, -1417058491, 1433835706, -129, 128,
                       -1)
     }
     for w in (1, 2, 3, 4):
         self.assertEqual(audioop.byteswap(b'', w), b'')
         self.assertEqual(audioop.byteswap(datas[w], w), swapped_datas[w])
         self.assertEqual(audioop.byteswap(swapped_datas[w], w), datas[w])
         self.assertEqual(audioop.byteswap(bytearray(datas[w]), w),
                          swapped_datas[w])
         self.assertEqual(audioop.byteswap(memoryview(datas[w]), w),
                          swapped_datas[w])
 def test_byteswap(self):
     swapped_datas = {
         1: datas[1],
         2: packs[2](0, 0x3412, 0x6745, -0x6646, -0x81, 0x80, -1),
         3: packs[3](0, 0x563412, -0x7698bb, 0x7798ba, -0x81, 0x80, -1),
         4: packs[4](0, 0x78563412, -0x547698bb, 0x557698ba, -0x81, 0x80,
                     -1),
     }
     for w in 1, 2, 3, 4:
         self.assertEqual(audioop.byteswap(b'', w), b'')
         self.assertEqual(audioop.byteswap(datas[w], w), swapped_datas[w])
         self.assertEqual(audioop.byteswap(swapped_datas[w], w), datas[w])
         self.assertEqual(audioop.byteswap(bytearray(datas[w]), w),
                          swapped_datas[w])
         self.assertEqual(audioop.byteswap(memoryview(datas[w]), w),
                          swapped_datas[w])
Example #4
0
    def get_aiff_data(self, convert_rate=None, convert_width=None):
        """
        Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.
        If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
        If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
        Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.
        """
        raw_data = self.get_raw_data(convert_rate, convert_width)
        sample_rate = self.sample_rate if convert_rate is None else convert_rate
        sample_width = self.sample_width if convert_width is None else convert_width

        # the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian
        if hasattr(audioop, "byteswap"
                   ):  # ``audioop.byteswap`` was only added in Python 3.4
            raw_data = audioop.byteswap(raw_data, sample_width)
        else:  # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
            raw_data = raw_data[sample_width - 1::-1] + b"".join(
                raw_data[i + sample_width:i:-1]
                for i in range(sample_width - 1, len(raw_data), sample_width))

        # generate the AIFF-C file contents
        with io.BytesIO() as aiff_file:
            aiff_writer = aifc.open(aiff_file, "wb")
            try:  # note that we can't use context manager, since that was only added in Python 3.4
                aiff_writer.setframerate(sample_rate)
                aiff_writer.setsampwidth(sample_width)
                aiff_writer.setnchannels(1)
                aiff_writer.writeframes(raw_data)
                aiff_data = aiff_file.getvalue()
            finally:  # make sure resources are cleaned up
                aiff_writer.close()
        return aiff_data
Example #5
0
def encode_adpcm_samples(in_data, channel_ct, input_big_endian=False,
                         noise_shaping=NOISE_SHAPING_OFF, lookahead=3):
    assert noise_shaping in (NOISE_SHAPING_OFF, NOISE_SHAPING_STATIC, NOISE_SHAPING_DYNAMIC)
    assert lookahead in range(6)

    if channel_ct < 1:
        return b''
    elif not fast_adpcm:
        raise NotImplementedError(
            "Accelerator module not detected. Cannot compress to ADPCM.")

    if (sys.byteorder == "big") != input_big_endian:
        out_data = audioop.byteswap(out_data, 2)

    adpcm_blocksize = constants.XBOX_ADPCM_COMPRESSED_BLOCKSIZE * channel_ct
    pcm_blocksize = constants.XBOX_ADPCM_DECOMPRESSED_BLOCKSIZE * channel_ct

    pad_size = len(in_data) % pcm_blocksize
    if pad_size:
        pad_size = pcm_blocksize - pad_size
        # repeat the last sample to the end to pad to a multiple of blocksize
        pad_piece_size = (channel_ct * 2)
        in_data += in_data[-pad_piece_size: ] * (pad_size // pad_piece_size)

    out_data = bytearray(
        (len(in_data) // pcm_blocksize) * adpcm_blocksize
        )

    adpcm_ext.encode_xbadpcm_samples(
        in_data, out_data, channel_ct, noise_shaping, lookahead)

    return bytes(out_data)
    def raw_sample_data(self):
        """return native endian linear buffer of samples (as expected by python wavfile)"""

        higher_part = self.sf2parser.read(self.duration * 2,
                                          pos=self.smpl_offset +
                                          self.start * 2)

        # soundfont smpl samples are packed as 16bits little endian, switch order if our system is big endian
        if sys.byteorder == 'big':
            higher_part = audioop.byteswap(higher_part, 2)

        # in 16bits only, return the top 16bits
        if self.sm24_offset is None:
            return higher_part

        # else read the complementary 8bits
        lower_part = self.sf2parser.read(self.duration,
                                         pos=self.sm24_offset + self.start)

        # and merge the result
        result = bytearray(self.duration * 3)
        for idx in range(self.duration):
            result[idx * 3:idx * +1] = higher_part[idx * 2:idx * 2 + 1]
            result[idx * 3 + 2] = lower_part[idx]

        return result
Example #7
0
 def modulate_amp(
     self, modulation_source: Union[Oscillator, Sequence[float], 'Sample',
                                    Iterator[float]]
 ) -> 'Sample':
     """
     Perform amplitude modulation by another waveform or oscillator.
     You can use a Sample (or array of sample values) or an oscillator as modulator.
     If you use a Sample (or array), it will be cycled if needed and its maximum amplitude
     is scaled to be 1.0, effectively using it as if it was an oscillator.
     """
     if self.__locked:
         raise RuntimeError("cannot modify a locked sample")
     frames = self.get_frame_array()
     if isinstance(modulation_source, (Sample, list, array.array)):
         # modulator is a waveform, turn that into an 'oscillator' ran
         if isinstance(modulation_source, Sample):
             modulation_source = modulation_source.get_frame_array()
         biggest = max(max(modulation_source), abs(min(modulation_source)))
         actual_modulator = (v / biggest
                             for v in itertools.cycle(modulation_source)
                             )  # type: ignore
     elif isinstance(modulation_source, Oscillator):
         actual_modulator = itertools.chain.from_iterable(
             modulation_source.blocks())  # type: ignore
     else:
         actual_modulator = iter(modulation_source)  # type: ignore
     for i in range(len(frames)):
         frames[i] = int(frames[i] * next(actual_modulator))
     self.__frames = frames.tobytes()
     if sys.byteorder == "big":
         self.__frames = audioop.byteswap(self.__frames, self.__samplewidth)
     return self
Example #8
0
        def read(self, size=-1):
            buffer = self.audio_reader.readframes(
                self.audio_reader.getnframes() if size == -1 else size)
            if not isinstance(buffer, bytes):
                buffer = b""  # workaround for https://bugs.python.org/issue24608

            sample_width = self.audio_reader.getsampwidth()
            if not self.little_endian:  # big endian format, convert to little endian on the fly
                if hasattr(
                        audioop, "byteswap"
                ):  # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
                    buffer = audioop.byteswap(buffer, sample_width)
                else:  # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
                    buffer = buffer[sample_width - 1::-1] + b"".join(
                        buffer[i + sample_width:i:-1]
                        for i in range(sample_width -
                                       1, len(buffer), sample_width))

            # workaround for https://bugs.python.org/issue12866
            if self.samples_24_bit_pretending_to_be_32_bit:  # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
                buffer = b"".join(
                    "\x00" + buffer[i:i + sample_width]
                    for i in range(0, len(buffer), sample_width)
                )  # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
            if self.audio_reader.getnchannels() != 1:  # stereo audio
                buffer = audioop.tomono(buffer, sample_width, 1,
                                        1)  # convert stereo audio data to mono
            return buffer
Example #9
0
    def get_aiff_data(self, convert_rate = None, convert_width = None):
        """
        Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.

        If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.

        If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.

        Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.
        """
        raw_data = self.get_raw_data(convert_rate, convert_width)
        sample_rate = self.sample_rate if convert_rate is None else convert_rate
        sample_width = self.sample_width if convert_width is None else convert_width

        # the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian
        if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4
            raw_data = audioop.byteswap(raw_data, sample_width)
        else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
            raw_data = raw_data[sample_width - 1::-1] + b"".join(raw_data[i + sample_width:i:-1] for i in range(sample_width - 1, len(raw_data), sample_width))

        # generate the AIFF-C file contents
        with io.BytesIO() as aiff_file:
            aiff_writer = aifc.open(aiff_file, "wb")
            try: # note that we can't use context manager, since that was only added in Python 3.4
                aiff_writer.setframerate(sample_rate)
                aiff_writer.setsampwidth(sample_width)
                aiff_writer.setnchannels(1)
                aiff_writer.writeframes(raw_data)
                aiff_data = aiff_file.getvalue()
            finally:  # make sure resources are cleaned up
                aiff_writer.close()
        return aiff_data
        def read(self, size=-1):
            #@noter:崔冰
            #@description: 将待处理的音频文件的字节序和位数进行相应处理
            buffer = self.audio_reader.readframes(
                self.audio_reader.getnframes() if size == -1 else size)
            if not isinstance(buffer, bytes):
                buffer = b""  # 解决方法 https://bugs.python.org/issue24608

            sample_width = self.audio_reader.getsampwidth()
            if not self.little_endian:  # 大端字节序格式转换成小端字节序
                if hasattr(
                        audioop, "byteswap"
                ):  # ``audioop.byteswap``只在Python 3.4 中有(python3.4+ 支持24位)
                    buffer = audioop.byteswap(buffer, sample_width)
                else:  # 手动反转每个样本的字节数,但速度较慢
                    buffer = buffer[sample_width - 1::-1] + b"".join(
                        buffer[i + sample_width:i:-1]
                        for i in range(sample_width -
                                       1, len(buffer), sample_width))

            # 解决方法 https://bugs.python.org/issue12866
            if self.samples_24_bit_pretending_to_be_32_bit:  # 我们需要将样本从24位转换为32位,然后才能使用“audioop”功能处理
                buffer = b"".join(b"\x00" + buffer[i:i + sample_width]
                                  for i in range(0, len(buffer), sample_width)
                                  )  # 由于我们处于小端字节序,因此我们在每个24位采样前添加一个零字节以获取32位采样
                sample_width = 4  # 确保我们将缓冲区转换为32位音频,然后将其转换为24位音频
            if self.audio_reader.getnchannels() != 1:  # 立体声音频
                buffer = audioop.tomono(buffer, sample_width, 1,
                                        1)  # 将立体声音频转换成单声道
            return buffer
class WavePCM24Test(WaveTest, unittest.TestCase):
    sndfilename = 'pluck-pcm24.wav'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 3
    framerate = 11025
    nframes = 48
    comptype = 'NONE'
    compname = 'not compressed'
    frames = bytes.fromhex("""\
      022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
      CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
      B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
      EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
      5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
      978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
      117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
      CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
      EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
      499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
      E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
      51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
      """)
    if sys.byteorder != 'big':
        frames = byteswap(frames, 3)
class WavePCM32Test(WaveTest, unittest.TestCase):
    sndfilename = 'pluck-pcm32.wav'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 4
    framerate = 11025
    nframes = 48
    comptype = 'NONE'
    compname = 'not compressed'
    frames = bytes.fromhex("""\
      022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
      CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
      B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
      EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
      5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
      978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
      117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
      CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
      EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
      499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
      E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
      51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
      """)
    if sys.byteorder != 'big':
        frames = byteswap(frames, 4)
Example #13
0
 def from_array(cls,
                array_or_list: Sequence[Union[int, float]],
                samplerate: int,
                numchannels: int,
                name: str = "") -> 'Sample':
     assert 1 <= numchannels <= 2
     assert samplerate > 1
     if isinstance(array_or_list, list):
         try:
             array_or_list = cls.get_array(2, array_or_list)
         except OverflowError:
             array_or_list = cls.get_array(4, array_or_list)
     elif numpy:
         if isinstance(array_or_list, numpy.ndarray) and any(array_or_list):
             if not isinstance(array_or_list[0], (int, numpy.integer)):
                 raise TypeError("the sample values must be integer")
     else:
         if any(array_or_list):
             if type(array_or_list[0]) is not int:
                 raise TypeError("the sample values must be integer")
     samplewidth = array_or_list.itemsize
     assert 2 <= samplewidth <= 4
     frames = array_or_list.tobytes()
     if sys.byteorder == "big":
         frames = audioop.byteswap(frames, samplewidth)
     return Sample.from_raw_frames(frames,
                                   samplewidth,
                                   samplerate,
                                   numchannels,
                                   name=name)
Example #14
0
 def writeframesraw(self, data):
     if not isinstance(data, (bytes, bytearray)):
         data = memoryview(data).cast('B')
     self._ensure_header_written(len(data))
     nframes = len(data) // (self._sampwidth * self._nchannels)
     if self._convert:
         data = self._convert(data)
     if self._sampwidth != 1 and sys.byteorder == 'big':
         data = audioop.byteswap(data, self._sampwidth)
     self._file.write(data)
     self._datawritten += len(data)
     self._nframeswritten = self._nframeswritten + nframes
 def writeframesraw(self, data):
     if not isinstance(data, (bytes, bytearray)):
         data = memoryview(data).cast('B')
     self._ensure_header_written(len(data))
     nframes = len(data) // (self._sampwidth * self._nchannels)
     if self._convert:
         data = self._convert(data)
     if self._sampwidth != 1 and sys.byteorder == 'big':
         data = audioop.byteswap(data, self._sampwidth)
     self._file.write(data)
     self._datawritten += len(data)
     self._nframeswritten = self._nframeswritten + nframes
Example #16
0
class WavePCM16Test(WaveTest, unittest.TestCase):
    sndfilename = 'pluck-pcm16.wav'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 2
    framerate = 11025
    nframes = 48
    comptype = 'NONE'
    compname = 'not compressed'
    frames = bytes.fromhex(
        '      022EFFEA 4B5C00F9 311404EF 80DC0843 CBDF06B2 48AA03F3 BFE701B2 036BFE7C       B857FA3E B4B2F34F 2999EBCA 1A5FE6D7 EDFCE491 C626E279 0E05E0B8 EF27E02D       5754E275 FB31E843 1373EF89 D827F72C 978BFB7A F5F7FC11 0866FB9C DF30FB42       117FFA36 3EE4FB5D BC75FCB6 66D5FF5F CF16040E 43220978 C1BC0EC8 511F12A4       EEDF1755 82061666 7FFF1446 80001296 499C0EB2 52BA0DB9 EFB70F5C CE400FBC       E4B50CEB 63440A5A 08CA0A1F 2BBA0B0B 51460E47 8BCB113C B6F50EEA 44150A59       '
    )
    if sys.byteorder != 'big':
        frames = byteswap(frames, 2)
Example #17
0
class AifcALAWTest(AifcTest, unittest.TestCase):
    sndfilename = 'pluck-alaw.aifc'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 2
    framerate = 11025
    nframes = 48
    comptype = b'alaw'
    compname = b''
    frames = bytes.fromhex(
        '      0230FFE8 4A0000F8 310004E0 82000840 CB0006A0 4A0003F0 BE0001A8 0370FE78       BA00FA20 B600F340 2900EB80 1A80E680 ED80E480 C700E280 0E40E080 EF80E080       5600E280 FB20E880 1380EF80 D900F740 9600FB60 F5C0FC10 0840FBA0 DF00FB20       1180FA20 3F00FB60 BE00FCB0 6600FF58 CF000420 42000940 C1000EC0 52001280       EE801780 82001680 7E001480 82001280 4A000EC0 52000DC0 EF800F40 CF000FC0       E4800CC0 62000A40 08C00A40 2B000B40 52000E40 8A001180 B6000EC0 46000A40       '
    )
    if sys.byteorder != 'big':
        frames = byteswap(frames, 2)
Example #18
0
class AifcULAWTest(AifcTest, unittest.TestCase):
    sndfilename = 'pluck-ulaw.aifc'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 2
    framerate = 11025
    nframes = 48
    comptype = b'ulaw'
    compname = b''
    frames = bytes.fromhex(
        '      022CFFE8 497C0104 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74       B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84       557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24       11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C093C C1840EBC 517C12FC       EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC       E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C       '
    )
    if sys.byteorder != 'big':
        frames = byteswap(frames, 2)
Example #19
0
 def from_file(cls, audio_file, audio_format=wave):
     ''' Read an audio file.
         :param audio_file: Path to audio file, or a file-like object
         :param audio_format: Python module to read audio data,
             one of wave, aifc, or sunau.
     '''
     with audio_format.open(audio_file, 'rb') as audio:
         params = audio.getparams()
         frames = audio.readframes(params.nframes)
         if audio_format is wave and params.sampwidth == 1:
             frames = audioop.bias(frames, 1, 0x80)
         if audio_format is not wave and sys.byteorder == 'little':
             frames = audioop.byteswap(frames, params.sampwidth)
         return cls(params, frames)
Example #20
0
 def writeframesraw(self, data):
     if not isinstance(data, (bytes, bytearray)):
         data = memoryview(data).cast('B')
     self._ensure_header_written(len(data))
     #FIXME: Who cares?!
     nframes = len(data) // (self._sampwidth * self._nchannels)
     #FIXME: Remove this stupid feature
     if self._convert:
         data = self._convert(data)
     #FIXME: Handle this for ADPCM
     if self._sampwidth != 1 and sys.byteorder == 'big':
         data = audioop.byteswap(data, self._sampwidth)
     self._file.write(data)
     self._datawritten += len(data)
     self._nframeswritten = self._nframeswritten + nframes
Example #21
0
def main():
    email_message = download_email()
    for part in email_message.walk():
        if part.get_content_maintype() == 'audio':
            wave_file = tempfile.NamedTemporaryFile()
            wave_file.write(part.get_payload(decode=True))
    
    wave_read = wave.open(wave_file.name, 'rb')
    
    swapped_wave_file = tempfile.NamedTemporaryFile(suffix='.wav')
    wave_write = wave.open(swapped_wave_file.name, 'wb')
    wave_write.setparams(wave_read.getparams())
    wave_write.writeframes(audioop.byteswap(wave_read.readframes(wave_read.getnframes()), wave_read.getsampwidth()))
     
    subprocess.call(['open', '-a', '/Applications/MPlayerX.app', swapped_wave_file.name])
    time.sleep(1)
Example #22
0
 def readframes(self, nframes):
     if self._data_seek_needed:
         self._data_chunk.seek(0, 0)
         pos = self._soundpos * self._framesize
         if pos:
             self._data_chunk.seek(pos, 0)
         self._data_seek_needed = 0
     if nframes == 0:
         return b''
     data = self._data_chunk.read(nframes * self._framesize)
     if self._sampwidth != 1 and sys.byteorder == 'big':
         data = audioop.byteswap(data, self._sampwidth)
     if self._convert and data:
         data = self._convert(data)
     self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
     return data
 def readframes(self, nframes):
     if self._data_seek_needed:
         self._data_chunk.seek(0, 0)
         pos = self._soundpos * self._framesize
         if pos:
             self._data_chunk.seek(pos, 0)
         self._data_seek_needed = 0
     if nframes == 0:
         return b''
     data = self._data_chunk.read(nframes * self._framesize)
     if self._sampwidth != 1 and sys.byteorder == 'big':
         data = audioop.byteswap(data, self._sampwidth)
     if self._convert and data:
         data = self._convert(data)
     self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
     return data
Example #24
0
        def read(self, size = -1):
            buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
            if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608

            sample_width = self.audio_reader.getsampwidth()
            if not self.little_endian: # big endian format, convert to little endian on the fly
                if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
                    buffer = audioop.byteswap(buffer, sample_width)
                else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
                    buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))

            # workaround for https://bugs.python.org/issue12866
            if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
                buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
            if self.audio_reader.getnchannels() != 1: # stereo audio
                buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
            return buffer
Example #25
0
 def to_file(self, audio_file, audio_format=wave, compression=None):
     ''' Write an audio file.
         :param audio_file: Path to audio file, or a file-like object
         :param audio_format: Python module to write audio data,
             one of wave, aifc, or sunau.
     '''
     with audio_format.open(audio_file, 'wb') as audio:
         audio.setnchannels(self.params.nchannels)
         audio.setsampwidth(self.params.sampwidth)
         audio.setframerate(self.params.framerate)
         audio.setnframes(self.params.nframes)
         if compression: audio.setcomptype(compression)
         frames = self.frames
         if audio_format is wave:
             frames = self._flip_sign().frames
         elif sys.byteorder == 'little':
             frames = audioop.byteswap(frames, self.params.sampwidth)
         audio.writeframesraw(frames)
Example #26
0
 def fadeout(self, seconds, target_volume=0.0):
     """Fade the end of the sample out to the target volume (usually zero) in the given time."""
     assert not self.__locked
     faded = Sample.get_array(self.__samplewidth)
     seconds = min(seconds, self.duration)
     i = self.frame_idx(self.duration-seconds)
     begin = self.__frames[:i]
     end = self.__frames[i:]  # we fade this chunk
     numsamples = len(end)/self.__samplewidth
     decrease = 1-target_volume
     for i in range(int(numsamples)):
         amplitude = 1-(i/numsamples)*decrease
         s = audioop.getsample(end, self.__samplewidth, i)
         faded.append(int(s*amplitude))
     end = faded.tobytes()
     if sys.byteorder == "big":
         end = audioop.byteswap(end, self.__samplewidth)
     self.__frames = begin + end
     return self
Example #27
0
def main():
    email_message = download_email()
    for part in email_message.walk():
        if part.get_content_maintype() == 'audio':
            wave_file = tempfile.NamedTemporaryFile()
            wave_file.write(part.get_payload(decode=True))

    wave_read = wave.open(wave_file.name, 'rb')

    swapped_wave_file = tempfile.NamedTemporaryFile(suffix='.wav')
    wave_write = wave.open(swapped_wave_file.name, 'wb')
    wave_write.setparams(wave_read.getparams())
    wave_write.writeframes(
        audioop.byteswap(wave_read.readframes(wave_read.getnframes()),
                         wave_read.getsampwidth()))

    subprocess.call(
        ['open', '-a', '/Applications/MPlayerX.app', swapped_wave_file.name])
    time.sleep(1)
Example #28
0
 def fadein(self, seconds, start_volume=0.0):
     """Fade the start of the sample in from the starting volume (usually zero) in the given time."""
     assert not self.__locked
     faded = Sample.get_array(self.__samplewidth)
     seconds = min(seconds, self.duration)
     i = self.frame_idx(seconds)
     begin = self.__frames[:i]  # we fade this chunk
     end = self.__frames[i:]
     numsamples = len(begin)/self.__samplewidth
     increase = 1-start_volume
     for i in range(int(numsamples)):
         amplitude = i*increase/numsamples+start_volume
         s = audioop.getsample(begin, self.__samplewidth, i)
         faded.append(int(s*amplitude))
     begin = faded.tobytes()
     if sys.byteorder == "big":
         begin = audioop.byteswap(begin, self.__samplewidth)
     self.__frames = begin + end
     return self
Example #29
0
class SunauULAWTest(SunauTest, unittest.TestCase):
    sndfilename = 'pluck-ulaw.au'
    sndfilenframes = 3307
    nchannels = 2
    sampwidth = 2
    framerate = 11025
    nframes = 48
    comptype = 'ULAW'
    compname = 'CCITT G.711 u-law'
    frames = bytes.fromhex("""\
      022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
      B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
      557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
      11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
      EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
      E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
      """)
    if sys.byteorder != 'big':
        frames = byteswap(frames, 2)
Example #30
0
 def fadeout(self, seconds, target_volume=0.0):
     """Fade the end of the sample out to the target volume (usually zero) in the given time."""
     assert not self.__locked
     seconds = min(seconds, self.duration)
     i = self.frame_idx(self.duration - seconds)
     begin = self.__frames[:i]
     end = self.__frames[i:]  # we fade this chunk
     numsamples = len(end) / self.__samplewidth
     decrease = 1.0 - target_volume
     _sw = self.__samplewidth  # optimization
     _getsample = audioop.getsample  # optimization
     faded = Sample.get_array(_sw, [
         int(_getsample(end, _sw, i) * (1.0 - i * decrease / numsamples))
         for i in range(int(numsamples))
     ])
     end = faded.tobytes()
     if sys.byteorder == "big":
         end = audioop.byteswap(end, self.__samplewidth)
     self.__frames = begin + end
     return self
Example #31
0
 def fadein(self, seconds, start_volume=0.0):
     """Fade the start of the sample in from the starting volume (usually zero) in the given time."""
     assert not self.__locked
     seconds = min(seconds, self.duration)
     i = self.frame_idx(seconds)
     begin = self.__frames[:i]  # we fade this chunk
     end = self.__frames[i:]
     numsamples = len(begin) / self.__samplewidth
     increase = 1.0 - start_volume
     _sw = self.__samplewidth  # optimization
     _getsample = audioop.getsample  # optimization
     _incr = increase / numsamples  # optimization
     faded = Sample.get_array(_sw, [
         int(_getsample(begin, _sw, i) * (i * _incr + start_volume))
         for i in range(int(numsamples))
     ])
     begin = faded.tobytes()
     if sys.byteorder == "big":
         begin = audioop.byteswap(begin, self.__samplewidth)
     self.__frames = begin + end
     return self
Example #32
0
 def from_array(cls, array_or_list, samplerate, numchannels):
     assert 1 <= numchannels <= 2
     assert samplerate > 1
     if isinstance(array_or_list, list):
         try:
             array_or_list = Sample.get_array(2, array_or_list)
         except OverflowError:
             array_or_list = Sample.get_array(4, array_or_list)
     elif numpy:
         if isinstance(array_or_list, numpy.ndarray) and any(array_or_list):
             if not isinstance(array_or_list[0], (int, numpy.integer)):
                 raise TypeError("the sample values must be integer")
     else:
         if any(array_or_list):
             if type(array_or_list[0]) is not int:
                 raise TypeError("the sample values must be integer")
     samplewidth = array_or_list.itemsize
     assert 2 <= samplewidth <= 4
     frames = array_or_list.tobytes()
     if sys.byteorder == "big":
         frames = audioop.byteswap(frames, samplewidth)
     return Sample.from_raw_frames(frames, samplewidth, samplerate, numchannels)
Example #33
0
        def read(self, size=-1):
            # workaround for https://bugs.python.org/issue24608
            buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
            if not isinstance(buffer, bytes):
                buffer = b""

            sample_width = self.audio_reader.getsampwidth()
            # big endian format, convert to little endian on the fly
            if not self.little_endian:
                # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't
                # need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
                if hasattr(audioop, "byteswap"):
                    buffer = audioop.byteswap(buffer, sample_width)
                else:
                    # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
                    buffer = buffer[sample_width - 1::-1] + b"".join(
                        buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))

            # convert stereo audio data to mono
            if self.audio_reader.getnchannels() != 1:
                buffer = audioop.tomono(buffer, sample_width, 1, 1)
            return buffer
Example #34
0
 def modulate_amp(self, modulator):
     """
     Perform amplitude modulation by another waveform or oscillator.
     You can use a Sample (or array of sample values) or an oscillator as modulator.
     If you use a Sample (or array), it will be cycled if needed and its maximum amplitude
     is scaled to be 1.0, effectively using it as if it was an oscillator.
     """
     assert not self.__locked
     frames = self.get_frame_array()
     if isinstance(modulator, (Sample, list, array.array)):
         # modulator is a waveform, turn that into an 'oscillator' ran
         if isinstance(modulator, Sample):
             modulator = modulator.get_frame_array()
         biggest = max(max(modulator), abs(min(modulator)))
         modulator = (v / biggest for v in itertools.cycle(modulator))
     else:
         modulator = iter(modulator)
     for i in range(len(frames)):
         frames[i] = int(frames[i] * next(modulator))
     self.__frames = frames.tobytes()
     if sys.byteorder == "big":
         self.__frames = audioop.byteswap(self.__frames, self.__samplewidth)
     return self
Example #35
0
 def modulate_amp(self, modulator):
     """
     Perform amplitude modulation by another waveform or oscillator.
     You can use a Sample (or array of sample values) or an oscillator as modulator.
     If you use a Sample (or array), it will be cycled if needed and its maximum amplitude
     is scaled to be 1.0, effectively using it as if it was an oscillator.
     """
     assert not self.__locked
     frames = self.get_frame_array()
     if isinstance(modulator, (Sample, list, array.array)):
         # modulator is a waveform, turn that into an 'oscillator' ran
         if isinstance(modulator, Sample):
             modulator = modulator.get_frame_array()
         biggest = max(max(modulator), abs(min(modulator)))
         modulator = (v/biggest for v in itertools.cycle(modulator))
     else:
         modulator = iter(modulator)
     for i in range(len(frames)):
         frames[i] = int(frames[i] * next(modulator))
     self.__frames = frames.tobytes()
     if sys.byteorder == "big":
         self.__frames = audioop.byteswap(self.__frames, self.__samplewidth)
     return self
    def get_aiff_data(self, convert_rate=None, convert_width=None):
        """
        返回一个字节字符串,表示包含由“AudioDATA”实例表示的音频的AIFF-C文件的内容。
        如果指定了“转角宽度”,每个音频样本都不为‘转换宽度’,则将得到的音频转换为匹配。
        如果指定“转换速率”,音频采样率不是“转换速率”Hz,则将所得音频重新采样以匹配。
        这些字节直接写入文件会形成有效的“AIFF-C文件”
        """
        # @noter:余扬名 徐越方洲
        # @description:得到aiff数据,写入AIFF文件中,形成新的AIFF文件
        # @param string convert_rate(转换率)
        # @param string covert_width(转换宽度)
        # return aiff_data 所得到的AIFF文件中的数据
        raw_data = self.get_raw_data(convert_rate,
                                     convert_width)  #传递get_raw_data中的数据
        sample_rate = self.sample_rate if convert_rate is None else convert_rate
        sample_width = self.sample_width if convert_width is None else convert_width

        # AIFF 文件格式的储存方式是大端,我们需要做转化
        if hasattr(audioop, "byteswap"):  # ``audioop.byteswap`` 只适用于Python 3.4
            raw_data = audioop.byteswap(raw_data, sample_width)
        else:  #手动反转每个样本的字节,虽然效率低,但效果很好
            raw_data = raw_data[sample_width - 1::-1] + b"".join(
                raw_data[i + sample_width:i:-1]
                for i in range(sample_width - 1, len(raw_data), sample_width))

        # 初始化 AIFF-C 文件内容
        with io.BytesIO() as aiff_file:
            aiff_writer = aifc.open(aiff_file, "wb")
            try:  # 注意无法使用内容管理器,只有python 3.4 版本置入了此功能
                aiff_writer.setframerate(sample_rate)  #音频采样率为样本采样率
                aiff_writer.setsampwidth(sample_width)  #音频宽度为样本宽度
                aiff_writer.setnchannels(1)  #声道数是1
                aiff_writer.writeframes(raw_data)  #采样字节序列
                aiff_data = aiff_file.getvalue()  #获取AIFF数据
            finally:  # 确保resource清除
                aiff_writer.close()
        return aiff_data
Example #37
0
 def _lin2sowt(self, data):
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', category=DeprecationWarning)
         import audioop
     return audioop.byteswap(data, 2)
Example #38
0
 def _lin2sowt(self, data):
     import audioop
     return audioop.byteswap(data, 2)
Example #39
0
 def _sowt2lin(self, data):
     import audioop
     return audioop.byteswap(data, 2)
Example #40
0
def generate_mouth_data(sample_data, compression, sample_rate, encoding):
    assert compression in constants.PCM_FORMATS
    assert encoding in constants.channel_counts
    assert sample_rate > 0

    sample_width = constants.sample_widths[compression]
    channel_count = constants.channel_counts[encoding]

    if compression == constants.COMPRESSION_PCM_8_UNSIGNED:
        # bias by 128 to shift unsigned into signed
        sample_data = audioop.bias(sample_data, 1, 128)
    elif sample_width > 1 and compression not in constants.NATIVE_ENDIANNESS_FORMATS:
        # byteswap samples to system endianness before processing
        sample_data = audioop.byteswap(sample_data, sample_width)

    if sample_width == 2:
        sample_data = memoryview(sample_data).cast("h")
    elif sample_width == 4:
        sample_data = memoryview(sample_data).cast("i")

    # mouth data is sampled at 30Hz, so we divide the audio
    # sample_rate by that to determine how many samples we must
    # consider for each fragment. also, since mouth data doesn't
    # use multiple channels, and the audio samples are interleaved,
    # we multiply the channel count into the fragment_width.
    samples_per_mouth_sample = sample_rate / constants.SAMPLE_RATE_MOUTH_DATA
    fragment_width = int(channel_count * samples_per_mouth_sample + 0.5)

    # add fragment_width - 1 to round up to next multiple of fragment_width
    fragment_count = (len(sample_data) + fragment_width - 1) // fragment_width

    # used to scale the max fragment to the [0, 255] scale of a uint8
    scale_to_uint8 = 255 / ((1 << (sample_width * 8 - 1)) - 1)

    # generate mouth data samples
    mouth_data = bytearray(fragment_count)
    for i in range(fragment_count):
        fragment = sample_data[i * fragment_width:(i + 1) * fragment_width]
        fragment_avg = sum(map(abs, fragment)) / samples_per_mouth_sample

        mouth_sample = fragment_avg * scale_to_uint8
        if mouth_sample >= 255:
            mouth_data[i] = 255
        else:
            mouth_data[i] = int(mouth_sample)

    # shift/scale the mouth samples based on the range of the mouth data
    mouth_avg = sum(mouth_data) / len(mouth_data)
    mouth_max = max(mouth_data)
    mouth_min = max(0, min(255, 2 * mouth_avg - mouth_max))

    mouth_range = (mouth_avg + mouth_max) / 2 - mouth_min
    if mouth_range == 0:
        # no range in the volume. don't try to scale
        # or shift, or else we'll divide by zero
        return bytes(mouth_data)

    for i in range(len(mouth_data)):
        mouth_sample = (mouth_data[i] - mouth_min) / mouth_range
        if mouth_sample >= 1.0:
            mouth_data[i] = 255
        elif mouth_sample <= 0.0:
            mouth_data[i] = 0
        else:
            mouth_data[i] = int(255 * mouth_sample)

    return bytes(mouth_data)