Ejemplo n.º 1
0
def play_audio(wf:wave.Wave_read):

    CHUNK = 1024

    # instantiate PyAudio (1)
    p = pyaudio.PyAudio()

    # open stream (2)
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True)

    # read data
    data = wf.readframes(CHUNK)

    # play stream (3)
    while len(data) > 0:
        stream.write(data)
        data = wf.readframes(CHUNK)

    stream.stop_stream()
    stream.close()

    p.terminate()
Ejemplo n.º 2
0
def fourier(audio: wave.Wave_read) -> Tuple[Optional[int], Optional[int]]:
    """Fourierova analýza vstupních dat, vracející (nejnižší, nejvyšší) frekvenci."""
    # data
    length = audio.getnframes()
    sample_rate = audio.getframerate()
    windows_count = length // sample_rate
    channels = 1 if audio.getnchannels() == 1 else 2  # Stereo (2) vs. Mono (1)
    frames = sample_rate * windows_count

    data = np.array(unpack(f"{channels * frames}h", audio.readframes(frames)))
    if channels == 2:
        data = merge_channels(data)

    # amplitudy
    low, high = None, None
    for i in range(windows_count):
        bounds = (i * sample_rate, i * sample_rate + sample_rate)
        window = data[bounds[0]:bounds[1]]
        amplitudes = np.abs(np.fft.rfft(window))
        average = np.average(amplitudes)

        # peaks
        peak = lambda amp: amp >= 20 * average  # ze zadání
        for j in range(len(amplitudes)):
            amplitude = amplitudes[j]
            if not peak(amplitude):
                continue
            if not low:
                low = j
                high = j
            else:
                high = j
    if not any((low, high)):
        return None, None
    return (high, low) if high < low else (low, high)  # Může být totiž prohozené
def print_audio_samples(wave_read: wave.Wave_read,
                        pos_sec=0,
                        steps=1,
                        length_ms=2_000):
    rate = wave_read.getframerate()
    start_frame = rate * pos_sec
    wave_read.readframes(start_frame)
    end_frame = start_frame + (rate * length_ms // 1000)
    print("Reading from = %s to = %s, with step = %s" %
          (start_frame, end_frame, steps))
    string_buffer = []
    for i in range(start_frame, end_frame, steps):
        wave_read.setpos(i)
        peak = wave_read.readframes(1)
        string_buffer.append(str(peak[0]))
    print(','.join(string_buffer))
Ejemplo n.º 4
0
 def read(file: wave.Wave_read):
     """
     Reads file and produces an audiodata from its data
     Returns that audiodata
     """
     params = file.getparams()
     frames_number = file.getnframes()
     frames = file.readframes(frames_number)
     characters_per_frame = len(frames) // frames_number
     framesdata = split_frames_into_sounds(frames, characters_per_frame)
     return AudioData(params, framesdata)
 def join(self, inputWavfile: wave.Wave_read, start, end):
     length = end - start
     if start < 0 or end < 0 or length < 0:
         raise ValueError("Invalid start value was given")
     params = inputWavfile.getparams()
     if not self.__compareParams(params):
         raise ValueError("File can not be joined due to inappropriate parameters")
     else:
         inputWavfile.setpos(int(start * self.frameRate))
         data = inputWavfile.readframes(int(length * self.frameRate))
         self.__output.writeframes(data)
def print_audio_samples_all(wave_read: wave.Wave_read):
    n = wave_read.getnframes()
    buffer = []
    count = 0
    for i in range(n):
        sample = wave_read.readframes(1)
        int_version = int.from_bytes(sample, byteorder='little')
        if int_version == 0: count += 1
        if i % 100 == 0:
            # if int_version > (1 << 15): int_version = (1 << 15) - int_version
            buffer.append(int_version)
    print(buffer)
    print(count)
Ejemplo n.º 7
0
def iter_wav_data(wav: wave.Wave_read, chunk_size: int, min_padding=0):
    wav.rewind()
    nchunks = wav.getnframes() // chunk_size
    for n in range(0, nchunks):
        d = wav.readframes(chunk_size)
        if len(d) < chunk_size:
            d += b'\0\0' * (chunk_size - len(d))
        a =  array.array('h')
        a.frombytes(d)
        yield a
    if min_padding:
        a =  array.array('h')
        a.frombytes(b'\0\0'*min_padding)
        yield a
Ejemplo n.º 8
0
def filter_lowpassTest(wav: Wave_read, cutoff: int):
    signal = wav.readframes(-1)
    signal = np.fromstring(signal, "Int16")

    filtered: wave.Wave_write = wave.open(join(const.AUDIO_DIR, 'temp.wav'),
                                          'w')
    filtered.setframerate(wav.getframerate())
    filtered.setsampwidth(wav.getsampwidth())
    filtered.setnchannels(wav.getnchannels())
    for frame in frames:
        data = struct.pack('<h', frame)
        filtered.writeframesraw(data)
    filtered.close()
    return wave.open(join(const.AUDIO_DIR, 'temp.wav'), 'r')
Ejemplo n.º 9
0
def trim(sound_file: wave.Wave_read, ratio, new_file_path):
    """
    Creates a new trimmed file out of the given one
    :param sound_file: Source file
    :param ratio: The ratio by which the function trims
    :param new_file_path: Path to the output file
    """
    frame_count = sound_file.getnframes()
    target_frame_count = int(frame_count * ratio)

    new_frames = sound_file.readframes(target_frame_count)
    new_file = wave.open(new_file_path, 'w')
    new_file.setparams(sound_file.getparams())
    new_file.writeframes(new_frames)
    new_file.close()
Ejemplo n.º 10
0
def transform_nparray(orignal_wave: wave.Wave_read) -> Tuple[np.ndarray, int]:
    """transform wave into ndarray

    Parameters
    ----------
    orignal_wave : file
        wave_read object

    Returns
    -------
    narray : ndarray
        1-d array
    narray_frame : int
        frame_length
    """

    narray_frame = orignal_wave.getnframes()
    narray = orignal_wave.readframes(narray_frame)
    narray = np.frombuffer(narray, dtype="int16")

    return narray, narray_frame
Ejemplo n.º 11
0
    def _send_packet(self, wave_file: wave.Wave_read, first_packet: bool,
                     transport) -> int:
        frames = wave_file.readframes(FRAMES_PER_PACKET)
        if not frames:
            return 0

        header = AudioPacketHeader.encode(
            0x80,
            0xE0 if first_packet else 0x60,
            self.context.rtpseq,
            self.context.rtptime,
            self.context.session_id,
        )

        # ALAC frame with raw data. Not so pretty but will work for now until a
        # proper ALAC encoder is added.
        audio = bitarray("00" + str(self.context.channels - 1) + 19 * "0" +
                         "1")
        for i in range(0, len(frames), 2):
            audio.frombytes(bytes([frames[i + 1], frames[i]]))

        if transport.is_closing():
            _LOGGER.warning("Connection closed while streaming audio")
            return 0

        packet = header + audio.tobytes()

        # Add packet to backlog before sending
        self._packet_backlog[self.context.rtpseq] = packet
        transport.sendto(packet)

        self.context.rtpseq = (self.context.rtpseq + 1) % (2**16)
        self.context.head_ts += int(
            len(frames) /
            (self.context.channels * self.context.bytes_per_channel))

        return int(
            len(frames) /
            (self.context.channels * self.context.bytes_per_channel))
Ejemplo n.º 12
0
    def encode_chunk(self, thread_id: str, file: Wave_read,
                     total_samples_to_read: int, output: BytesIO) -> None:
        options = STARTUPINFO()
        options.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        options.wShowWindow = subprocess.SW_HIDE
        process = Popen(self.command,
                        stdin=PIPE,
                        stdout=PIPE,
                        stderr=PIPE,
                        startupinfo=options)

        read_data_thread = Thread(
            target=lambda: output.write(process.stdout.read()))
        read_data_thread.daemon = True
        read_data_thread.start()

        samples_to_read, samples_left = self.update_samples_to_read(
            total_samples_to_read, 1024)
        last_progress = 0
        while samples_left > 0:
            process.stdin.write(file.readframes(samples_to_read))

            progress = int((total_samples_to_read - samples_left) * 100 /
                           total_samples_to_read)
            if progress != last_progress:
                self.listener.encode_update(thread_id, progress)
                last_progress = progress

            samples_to_read, samples_left = self.update_samples_to_read(
                samples_left, 1024)

        self.listener.encode_update(thread_id, 100)
        process.stdin.close()
        read_data_thread.join()
        process.stdout.close()
        process.stderr.close()
        file.close()