Beispiel #1
0
    def generatefromwave(wave, name):
        maxchannels = wave.getnchannels()
        amp = wave.getsampwidth()

        raw = wave.readframes(wave.getnframes())
        track = []
        i = 0
        while (i != len(raw)):
            value = struct.unpack("<h", raw[i:i + amp])
            value = value[0]
            #value de -2**15 a 2**15 (ou outros valores dependendo de amp)
            value = value / 2**(8 * (amp) - 1)

            track.append(value)
            i += amp

        #TODO:descobrir como o readnbytes me da os canais
        #primeira tentativa: assumir que os canais estao concatenados:
        channels = []
        size = wave.getnframes()
        for i in range(maxchannels):
            channels.append([])

            j = 0
            while ((j + 1) * 728) <= len(track):
                channels[i].append(track[j * 728:(j + 1) * 728])
                j += 1

        return AudioTrack(channels, name)
Beispiel #2
0
def extractSamples(wave, start, end):
    sampleRate = wave.getframerate()
    duration = end - start
    assert duration > 0
    wave.setpos(start * sampleRate)
    return [ struct.unpack_from("<h", wave.readframes(1))[0]
                    for i in range(0, int(duration * sampleRate))]
Beispiel #3
0
def get_speech_int_array(wave, start, end):
    vad = webrtcvad.Vad(3)

    start = max(0, start)

    samples_per_second = wave.getframerate()

    #print "Framerate: %i" % samples_per_second

    samples_per_frame = int(SPEECH_FRAME_SEC * samples_per_second)

    total_samples = wave.getnframes()

    #print "Samples per frame: %i" % samples_per_frame
    wave.rewind()
    try:
        wave.setpos(start * samples_per_second)
    except:
        print "faield to set pos %f" % start

    wave_view_int = []
    while wave.tell() < min(end * samples_per_second, total_samples):
        #wave_view_str += "1" if vad.is_speech(wave.readframes(samples_to_get), sample_rate) else "0"
        try:
            wav_samples = wave.readframes(samples_per_frame)
            wave_view_int.append(
                1 if vad.is_speech(wav_samples, samples_per_second) else 0)
        except Exception as ex:
            print("Exception: " + str(ex))
            return []

    return wave_view_int
Beispiel #4
0
    def __init__(self, wavfile):
      # Load wav file
      # Fetch sampling rate adn signal object
      wf = w.open(wavfile)
      self.sampling_rate = wf.getframerate()
      s = w.readframes(w.getnframes())
      self.signal = np.frombuffer(s, dtype="int16")
      ef.close()

      # Define constants
      n0 = 0      # start point for sa
      N = 1024    # number of sample
Beispiel #5
0
    def __init__(self, wavfile):
        # Load wav file
        # Fetch sampling rate adn signal object
        wf = w.open(wavfile)
        self.sampling_rate = wf.getframerate()
        s = w.readframes(w.getnframes())
        self.signal = np.frombuffer(s, dtype="int16")
        ef.close()

        # Define constants
        n0 = 0  # start point for sa
        N = 1024  # number of sample
Beispiel #6
0
def get_speech_int_array(wave, start, end):
    vad = webrtcvad.Vad(3)

    samples_per_second = wave.getframerate()

    samples_per_frame = int(SPEECH_FRAME_SEC * samples_per_second)

    wave.setpos(start * samples_per_second)

    wave_view_int = []
    while wave.tell() < end * samples_per_second:
        #wave_view_str += "1" if vad.is_speech(wave.readframes(samples_to_get), sample_rate) else "0"
        try:
            wav_samples = wave.readframes(samples_per_frame)
            wave_view_int.append(
                1 if vad.is_speech(wav_samples, samples_per_second) else 0)
        except:
            return []

    return wave_view_int
def get_speech_int_array(wave, start=0, end=-1):
    vad = webrtcvad.Vad(0)

    samples_per_second = wave.getframerate()

    samples_per_frame = int(SPEECH_FRAME_SEC * samples_per_second)

    if end == -1:
        end = float(wave.getnframes()) / samples_per_second

    wave.setpos(start * samples_per_second)

    wave_view_int = []
    while wave.tell() < end * samples_per_second:
        #wave_view_str += "1" if vad.is_speech(wave.readframes(samples_to_get), sample_rate) else "0"
        try:
            wav_samples = wave.readframes(samples_per_frame)
            wave_view_int.append(
                1 if vad.is_speech(wav_samples, samples_per_second) else 0)
        except:
            print "Exception reading frames"
            return []

    return wave_view_int
def trim_silence(wave, output_file_path):
    vad = webrtcvad.Vad(3)

    VAD_WINDOW_SEC = 0.01
    samples_per_second = wave.getframerate()
    samples_per_frame = int(VAD_WINDOW_SEC * samples_per_second)
    total_samples = wave.getnframes()

    #print('samples_step: %i' % samples_per_frame)
    wave_view_str = ""
    wave_view_int = []
    while wave.tell() < total_samples:
        #wave_view_str += "1" if vad.is_speech(wave.readframes(samples_to_get), sample_rate) else "0"
        try:
            wav_samples = wave.readframes(samples_per_frame)
            val = 1 if vad.is_speech(wav_samples, samples_per_second) else 0
            wave_view_int.append(val)
            wave_view_str += str(val)
        #print "current_pos: %i" % wave.tell()
        except Exception as ex:
            print("Exception: " + str(ex))
            return []

    print wave_view_str
Beispiel #9
0
#Visualizer help from 'courageousillumination'
#https://github.com/courageousillumination/visualizer/blob/master/visualizer.py#L36
import pygame as pgame
import numpy as np
import wave 
import array

pgame.mixer.init() #initalizes the mixer element
pgame.init() #initalizes the game screen


pgame.mixer.music.load('ArchieCut.wav')
sound_file = wave.open('ArchieCut.wav')
data = wave.readframes()


print sound_file.getparams()
stringOfBytes = np.fromstring(data, '<H')
#'<H' = little endian unsigned short
#needed to grab the data from the sound
Beispiel #10
0
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys

wave = wave.open('imp.wav', 'r')
signal = wave.readframes(-1)
signal = np.fromstring(signal, 'Int16')
plt.plot(signal)
plt.show()
Beispiel #11
0
 def play(self, wave):
     samples = wave.readframes(self.AUDIO_CHUNK)
     while samples:
         self.stream.write(samples)
         samples = wave.readframes(self.AUDIO_CHUNK)
     wave.rewind()