예제 #1
0
파일: yid.py 프로젝트: carlsverre/yid
def stream_audio():
    s = Stream(samplerate=RATE, blocksize=CHUNK, callback=process_chunk)
    s.start()
    try:
        secs = 1000
        print("Controlling cursor for %d seconds, Ctrl-C to stop" % secs)
        time.sleep(secs)
    except KeyboardInterrupt:
        pass
    print("Stopping...")
    s.stop()
예제 #2
0
 def __init__(self, device=None):
     super(self.__class__, self).__init__()
     self.device = device if device is not None else self._findDevice()
     self.sr = consts.sr
     self.inputStream = PyScStream(
         sample_rate=self.sr, block_length=consts.window_hop,
         callback=self.inputStreamCallback, input_device=self.device)
     self.iteration = 0
예제 #3
0
파일: tut2main2.py 프로젝트: ac2cz/pySdr
def audio_thread():
    print("Starting Audio")
    global reading_data
    global win
    fs = 48000
    fftLength = 255
    blocksize = 255
    bin_bandwidth = fs / fftLength
    avg_num = 10

    s = Stream(samplerate=fs, blocksize=blocksize)
    s.start()
    psd = [0] * fftLength
    while reading_data:
        data = s.read(fftLength)
        left, right = map(list, zip(*data))
        out = np.fft.rfft(left)

        psd[0] = average(psd[0],
                         calc_psd(out[0].real, out[0].real, bin_bandwidth),
                         avg_num)
        half = int(len(out))
        for k in range(1, half):
            psd[k] = average(psd[k],
                             calc_psd(out[k].real, out[k].imag, bin_bandwidth),
                             avg_num)
        psd[half - 1] = average(
            psd[k], calc_psd(out[0].imag, out[half - 1].imag, bin_bandwidth),
            avg_num)
        if (reading_data):
            win.setData(psd)
    s.stop()
예제 #4
0
    def run(self):
        with Stream(sample_rate=44100, block_length=16) as s:
            while self.running:
                vec = s.read(NUM_SAMPLES)

                # Downsample to mono
                mono_vec = vec.sum(-1) / float(s.input_channels)

                self._spectrogram = np.fft.fft(mono_vec)
예제 #5
0
class SysAudioStream(object):
    """Loop back system audio."""
    def __init__(self, rate, blocksize):
        self._rate = rate
        self._blocksize = blocksize
        self._buff = queue.Queue()
        self.closed = True

    def __enter__(self):
        self._audio_stream = Stream(samplerate=self._rate, blocksize=self._blocksize)
        self._audio_stream.start()
        self.closed = False



    def __exit__(self, type, value, traceback):
        self._audio_stream.stop()
        self.closed = True
        # Signal the generator to terminate so that the client's
        # streaming_recognize method will not block the process termination.
        self._buff.put(None)

    def generator(self):
        while not self.closed:
            # Use a blocking get() to ensure there's at least one chunk of
            # data, and stop iteration if the chunk is None, indicating the
            # end of the audio stream.
            chunk = self._audio_stream.read(16)
            if chunk is None:
                return
            data = [chunk]

            # Now consume whatever other data's still buffered.
            # while True:
            #     try:
            #         chunk = self._buff.get(block=False)
            #         if chunk is None:
            #             return
            #         data.append(chunk)
            #     except queue.Empty:
            #         break

            yield b''.join(data)
예제 #6
0
class InputStream(Stream):

    def __init__(self, device=None):
        super(self.__class__, self).__init__()
        self.device = device if device is not None else self._findDevice()
        self.sr = consts.sr
        self.inputStream = PyScStream(
            sample_rate=self.sr, block_length=consts.window_hop,
            callback=self.inputStreamCallback, input_device=self.device)
        self.iteration = 0

    def _findDevice(self):
        inputPriority = [
            "FastTrack Pro",
            "Built-in Input",
            "Soundflower (2ch)",
        ]
        for input_ in inputPriority:
            for dev in pysoundcard.devices():
                if dev['name'] == input_ and dev['input_channels'] > 0:
                    return dev

    def inputStreamCallback(self, inData, numFrames, timeInfo, status):
        '''
        Wrap our stream callback in the default pysoundcard callback
        '''
        self.iteration += 1
        if self.iteration % 3 == 0:
            print "%.2f" % np.sum(inData)
        self.enqueueChunk(inData)
        return (np.zeros((numFrames, 2)), pysoundcard.continue_flag)

    def read(self, *args, **kwargs):
        return self.inputStream.read(*args, **kwargs)

    def start(self):
        self.inputStream.start()

    def stop(self):
        self.inputStream.stop()
예제 #7
0
    def __init__(self):
        """
        """

        self.fObj = None
        self.track = None  # the currently playing SoundFile
        self.buffer_size = 0  # the number of PCM frames to process
        self.state = PLAYER_STOPPED
        self.progress = [0, 1]  # an Array of current/total frames
        self.task = None
        #self.stream = Stream(callback=self.stream_callback)
        self.stream = Stream()
        self.queue = deque()
        self.played = deque()
예제 #8
0
 def start_playing(self):
     self.buffer_size = min(int(round(self.BUFFER_SIZE *
                                      self.track.sample_rate)),
                            2048)
     log.msg('buf size: %s' % self.buffer_size)
     #reopen stream if necessary based on file's parameters
     if self.stream.sample_rate != self.track.sample_rate or \
        self.stream.block_length != self.buffer_size:
         self.stream = Stream(sample_rate=self.track.sample_rate,
                              block_length=self.buffer_size)
                              #callback=self.stream_callback)
     self.state = PLAYER_PLAYING
     self.stream.start()
     #self.set_progress(0, self.track.frames)
     self.task = cooperate(self)
예제 #9
0
def play_source(source_path):
    """Play an audio file using pysoundcard."""

    from aubio import source
    from pysoundcard import Stream

    hop_size = 256
    f = source(source_path, hop_size=hop_size)
    samplerate = f.samplerate

    s = Stream(sample_rate=samplerate, block_length=hop_size)
    s.start()
    read = 0
    while 1:
        vec, read = f()
        s.write(vec)
        if read < hop_size: break
    s.stop()
예제 #10
0
    def init_stream(self, sample_rate, block_length):
        self._psc_sample_rate = sample_rate
        self._psc_block_length = block_length
        self._level = -120

        def callback(in_data, frame_count, time_info, status):
            new_value = self._b0*np.mean(in_data**2)  - self._a1*10**(self._level*0.1)
            self._level = 10*np.log10(new_value)
            self.write_message('{:.1f}'.format(self._level))
            return in_data, continue_flag

        self._psc_stream = Stream(
            callback=callback,
            sample_rate=self._psc_sample_rate,
            block_length=self._psc_block_length,
            output_device=False
        )
예제 #11
0
def main():
    vo = VoiceOver()
    soundcard = Stream(
        blocksize=BLOCK_SIZE,
        channels=1,
        dtype='int16',
        samplerate=44100,
        callback=vo.callback,
    )
    soundcard.start()
    while True:
        time.sleep(5)
    soundcard.stop()
def play_source(source_path):
    """Play an audio file using pysoundcard."""

    from aubio import source
    from pysoundcard import Stream
    
    hop_size = 256
    f = source(source_path, hop_size = hop_size)
    samplerate = f.samplerate

    s = Stream(sample_rate = samplerate, block_length = hop_size)
    s.start()
    read = 0
    while 1:
        vec, read = f()
        s.write(vec)
        if read < hop_size: break
    s.stop()
예제 #13
0
def record_sink(sink_path):
    """Record an audio file using pysoundcard."""

    from aubio import sink
    from pysoundcard import Stream

    hop_size = 256
    duration = 5  # in seconds
    s = Stream(block_length=hop_size)
    g = sink(sink_path, samplerate=s.sample_rate)

    s.start()
    total_frames = 0
    while total_frames < duration * s.sample_rate:
        vec = s.read(hop_size)
        # mix down to mono
        mono_vec = vec.sum(-1) / float(s.input_channels)
        g(mono_vec, hop_size)
        total_frames += hop_size
    s.stop()
예제 #14
0
def iter_capture_and_detect_gender(sample_rate=11025,
                                   hop_size=256,
                                   confidence_threshold=0.8):
    """Capture audio and yield gender.

    Parameters
    ----------
    sample_rate : int
        Sample rate in Hertz
    confidence_threshold : float
        Pitch confidence threshold for yielding

    Yields
    ------
    gender : str
        String representing gender either 'male' or 'female'

    """
    stream = Stream(blocksize=hop_size, channels=1, samplerate=sample_rate)
    stream.start()
    try:
        while True:
            samples = stream.read(hop_size).flatten()
            pitch_and_confidence = get_pitches(samples,
                                               buf_size=hop_size,
                                               hop_size=hop_size,
                                               sample_rate=sample_rate)
            pitch = pitch_and_confidence[0, 0]
            confidence = pitch_and_confidence[0, 1]
            if pitch > 145:
                gender = 'female'
            else:
                gender = 'male'
            if confidence >= confidence_threshold:
                yield gender
    except KeyboardInterrupt:
        pass
    stream.stop()
def record_sink(sink_path):
    """Record an audio file using pysoundcard."""

    from aubio import sink
    from pysoundcard import Stream

    hop_size = 256
    duration = 5 # in seconds
    s = Stream(block_length = hop_size)
    g = sink(sink_path, samplerate = s.sample_rate)

    s.start()
    total_frames = 0
    while total_frames < duration * s.sample_rate:
        vec = s.read(hop_size)
        # mix down to mono
        mono_vec = vec.sum(-1) / float(s.input_channels)
        g(mono_vec, hop_size)
        total_frames += hop_size
    s.stop()
예제 #16
0
def record_sink(sink_path):
    """Record an audio file using pysoundcard."""

    from aubio import sink
    from pysoundcard import Stream

    hop_size = 256
    duration = 5  # in seconds
    s = Stream(blocksize=hop_size, channels=1)
    g = sink(sink_path, samplerate=int(s.samplerate))

    s.start()
    total_frames = 0
    try:
        while total_frames < duration * s.samplerate:
            vec = s.read(hop_size)
            # mix down to mono
            mono_vec = vec.sum(-1) / float(s.channels[0])
            g(mono_vec, hop_size)
            total_frames += hop_size
    except KeyboardInterrupt:
        duration = total_frames / float(s.samplerate)
        print("stopped after %.2f seconds" % duration)
    s.stop()
예제 #17
0
def track_energy():
    """Track attacks of instrument, maintain global float energy"""

    global energy
    energy = 0.25

    win_size = 512                 # fft size
    hop_size = 256
    s = Stream(block_length = hop_size)

    o = onset("default", win_size, hop_size, s.sample_rate)


    come_up_steps = ceil(come_up_secs * s.sample_rate/hop_size)
    built_energy = 0
    built_steps = 0

    onsets = 0
    

    s.start()
    while True:
        vec = s.read(hop_size)
        # mix down to mono
        mono_vec = vec.sum(-1) / float(s.input_channels)
        if o(mono_vec):
            print "beat" + str(onsets)
            onsets += 1
            built_energy = (nu * (energy + built_steps * built_energy) + (1-nu) - energy)/(built_steps+come_up_steps)
            built_steps += come_up_steps
        if built_steps == 0 :
            energy = (1-eta) * energy
        else:
            energy += built_energy
            built_steps -= 1
#        print "energy = %f, total = %f" % (energy,energy + built_energy * built_steps)
    s.stop()
예제 #18
0
def iter_capture_and_detect_gender(sample_rate=11025, hop_size=256,
                                   confidence_threshold=0.8):
    """Capture audio and yield gender.

    Parameters
    ----------
    sample_rate : int
        Sample rate in Hertz
    confidence_threshold : float
        Pitch confidence threshold for yielding

    Yields
    ------
    gender : str
        String representing gender either 'male' or 'female'

    """
    stream = Stream(blocksize=hop_size, channels=1, samplerate=sample_rate)
    stream.start()
    try:
        while True:
            samples = stream.read(hop_size).flatten()
            pitch_and_confidence = get_pitches(
                samples, buf_size=hop_size, hop_size=hop_size,
                sample_rate=sample_rate)
            pitch = pitch_and_confidence[0, 0]
            confidence = pitch_and_confidence[0, 1]
            if pitch > 145:
                gender = 'female'
            else:
                gender = 'male'
            if confidence >= confidence_threshold:
                yield gender
    except KeyboardInterrupt:
        pass
    stream.stop()
예제 #19
0
def record_sink(sink_path):
    """Record an audio file using pysoundcard."""

    from aubio import sink
    from pysoundcard import Stream

    hop_size = 256
    duration = 5 # in seconds
    s = Stream(blocksize = hop_size, channels = 1)
    g = sink(sink_path, samplerate = int(s.samplerate))

    s.start()
    total_frames = 0
    try:
        while total_frames < duration * s.samplerate:
            vec = s.read(hop_size)
            # mix down to mono
            mono_vec = vec.sum(-1) / float(s.channels[0])
            g(mono_vec, hop_size)
            total_frames += hop_size
    except KeyboardInterrupt:
        duration = total_frames / float(s.samplerate)
        print("stopped after %.2f seconds" % duration)
    s.stop()
예제 #20
0
from pysoundcard import Stream, continue_flag
import time

"""Loop back five seconds of audio data."""


def callback(in_data, out_data, time_info, status):
    out_data[:] = in_data
    return continue_flag

s = Stream(samplerate=44100, blocksize=16, callback=callback)
s.start()
time.sleep(5)
s.stop()
예제 #21
0
    inputdata.append(line)
header = inputdata[0]
hop_s = int(header.rstrip("\n").split("\t")[0])
frames_limit = int(header.rstrip("\n").split("\t")[1])
sample_rate = int(header.rstrip("\n").split("\t")[2])
inputdata = inputdata[1:]

for line in inputdata:
    data_point = np.asarray(line.rstrip("\n").split("\t")[:-1]).astype(
        np.float32)
    data_point = data_point.reshape((frames_limit, hop_s))
    data.append(data_point)
data = np.asarray(data)
print("Loaded data " + str(data.shape))

s = Stream(samplerate=sample_rate, blocksize=hop_s)
beat_no = 0

#Get input from user and save to file
outputfilename = "_".join(data_name.split('_')[0:-1]) + "_labelled.txt"
outputfile = open(outputfilename, "w")

#Checking if Auto Mode
default_chord = ""
if (getClassLabel((data_name.split('_'))[0]) != -1):
    default_chord = (data_name.split('_'))[0]
    print("Auto mode")

output_data = []
previous_chord = "null"
num_chords = 0
예제 #22
0
"""
import sys, os
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream

()

path = '/home/muenker/Daten/share/Musi/wav/'
#path = '../_media/'
#filename = 'chord.wav'
filename = '07 - Danny Gottlieb with John McLaughlin - Duet.wav'
filename = 'Ole_16bit.wav'  #
filename = '01 - Santogold - L.E.S Artistes.wav'
filename = '01 - Santogold - L.E.S Artistes_20s.wav'
#filename = 'ComputerBeeps2.wav'
filename = 'SpaceRipple.wav'

fs, wave = wavread(os.path.join(path, filename))
"""Play an audio file."""

#fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.float32)
wave /= 2**15  # normalize -max_int16..max_int16 to -1..1

blocksize = 512
s = Stream(samplerate=fs, blocksize=blocksize)
s.start()
s.write(wave)
s.stop
예제 #23
0
__test__ = False

import imp
import numpy as np
import pyfilterbank
from soundfile import SoundFile
from pysoundcard import Stream


filename = r'bonsai.wav'

ofb = filterbank.OctaveFilterbank(nth_oct=3.0, start_band=-18, end_band=12, lphp_bounds=True, filterfun='cffi')

st = Stream(input_device=False)
sf = SoundFile(filename)

st.start()

def play_further(nblocks=120, blen=4096):
    states = np.zeros(2)
    for i in range(nblocks):
        x = sf.read(blen)[:,0]
        y, states = ofb.filter(x,states=states)
        out = y.sum(axis=1).values.flatten()
        st.write(out)

play_further()
예제 #24
0
from pysoundcard import Stream
"""Loop back five seconds of audio data."""

fs = 44100
block_length = 16
s = Stream(sample_rate=fs, block_length=block_length)
s.start()
for n in range(int(fs * 5 / block_length)):
    s.write(s.read(block_length))
s.stop()
예제 #25
0
import sys
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream
"""Play an audio file."""

fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.float32)
wave /= 2**15  # normalize -max_int16..max_int16 to -1..1

block_length = 16
s = Stream(sample_rate=fs, block_length=block_length)
s.start()
s.write(wave)
s.stop()
예제 #26
0
from pysoundcard import Stream, continue_flag
import time

"""Loop back five seconds of audio data."""

def callback(in_data, out_data, time_info, status):
    out_data[:] = in_data
    return continue_flag

with Stream(sample_rate=44100, block_length=16, callback=callback):
    time.sleep(5)
예제 #27
0
from pysoundcard import Stream

"""Loop back five seconds of audio data."""

fs = 44100
blocksize = 16
s = Stream(samplerate=fs, blocksize=blocksize)
s.start()
for n in range(int(fs * 5 / blocksize)):
    s.write(s.read(blocksize))
s.stop()
# Rauschen abspielen und kontinuierlich in Blöcken aufnehmen

from pysoundcard import Stream
import pysoundcard
import numpy as np

fs = 44100
block_length = 1024*8
s = Stream(sample_rate=fs, block_length=block_length)
time = 1 # Laenge des Rauschens in Sekunden
noise = np.random.randn(block_length,2)/20.0 
n_blocks = int(fs*time/block_length)

# für das erste hinzufügen des ersten arrays einen start arry
# in passender größe, hier erstmal Nullen
rec_file=np.zeros([block_length,2],float) 

s.start()
for n in range(n_blocks):
	s.write(noise)
	rec = s.read(block_length)
	rec_file=np.vstack([rec_file,rec]) # hinzufügen des aufgenommenen blocks zu rec_file als zeilenvektor in einem array!
	
#s.write(rec_file)
#print(rec_file)
s.stop()
from pysoundcard import Stream
p = pyaudio.PyAudio() 

#################### vorheriges Fenster schliessen ###############
plt.close() #Fenster von der vorhergehenden Aufnahme schliessen

############# Aufnahmeparameter #############
fs = 44100
block_length = 64
record_seconds = 10
CHANNELS = 2
n_down_plot = 50 #Variable Anzahl Plotts pro Sekunde

############ Stream mit Zugriffswegen fuer externe Soundkarte ##########
s = Stream(sample_rate = fs, 
           block_length = block_length,
           input_device_index = 1, #im CommandWindow den Weg herausfinden mit import pyaudio, py=pyaudio.PyAudio(), pa.get_device_info_by_index(NUMMERVERSUCH)
           output_device_index = 4) 

fig, ax = plt.subplots()
s.start()
plt.show(block=False) #Plot wird im Fenster gelöscht und neu geschrieben

ca_whole_record = []
num_of_blocks = int(fs*record_seconds/block_length)
line, = ax.plot(np.random.randn(num_of_blocks)*15-50) #Skalierung der y-Achse
pegel = np.zeros(num_of_blocks) #Nullvektor erstellen als Ausgang

for n in range(num_of_blocks): #geht die Schleife solange durch, bis 
    ca_record = s.read(block_length)
    pegel[n] = 10*np.log10(np.sum(np.square(ca_record))/block_length) #Pegel auf logarithmischer y-Achse darstellen 
    ca_whole_record.append(ca_record) #haengt alle in blocklaenge aufgenommenes Array
예제 #30
0
 def __enter__(self):
     self._audio_stream = Stream(samplerate=self._rate, blocksize=self._blocksize)
     self._audio_stream.start()
     self.closed = False
예제 #31
0
class SlmWebSocket(tornado.websocket.WebSocketHandler):
    def open(self):
        print("WebSocket opened")
        self.set_exponential_smoothing_tau(tau=tau)
        self.init_stream(sample_rate=sample_rate,
                         block_length=block_length)
        self.start_stream()

    def on_message(self, message):

        if '#start#' in message:
            self.start_stream()

        if '#stop#' in message:
            self.stop_stream()

        if '#set_tau#' in message:
            msg = message.split()
            print(msg)
            if len(msg)>1:
                tau = float(msg[1])
                self.set_exponential_smoothing_tau(tau)

    def on_close(self):
        print("Websocket is closed")

    def set_exponential_smoothing_tau(self, tau):
        if tau > 0:
            alpha = np.exp(-1.0/(tau*(sample_rate/block_length)))
            self._b0 = 1 - alpha
            self._a1 = -alpha
            print("Tau accepted.")
        else:
            print("Tau '{}' not accepted.".format(tau))


    def init_stream(self, sample_rate, block_length):
        self._psc_sample_rate = sample_rate
        self._psc_block_length = block_length
        self._level = -120

        def callback(in_data, frame_count, time_info, status):
            new_value = self._b0*np.mean(in_data**2)  - self._a1*10**(self._level*0.1)
            self._level = 10*np.log10(new_value)
            self.write_message('{:.1f}'.format(self._level))
            return in_data, continue_flag

        self._psc_stream = Stream(
            callback=callback,
            sample_rate=self._psc_sample_rate,
            block_length=self._psc_block_length,
            output_device=False
        )

    def start_stream(self):
        if not self._psc_stream.is_active():
            print('Start stream')
            self._psc_stream.start()

    def stop_stream(self):
        if self._psc_stream.is_active():
            print('Stop stream')
            self._psc_stream.stop()
예제 #32
0
import sys
import time
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream, continue_flag, complete_flag
"""Play an audio file."""

fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.float32)
wave /= 2**15  # normalize -max_int16..max_int16 to -1..1
play_position = 0


def callback(in_data, out_data, time_info, status):
    global play_position
    out_data[:] = wave[play_position:play_position + block_length]
    # TODO: handle last (often incomplete) block
    play_position += block_length
    if play_position + block_length < len(wave):
        return continue_flag
    else:
        return complete_flag


block_length = 16
s = Stream(sample_rate=fs, block_length=block_length, callback=callback)
s.start()
while s.is_active():
    time.sleep(0.1)
예제 #33
0
class Player(object):
    BUFFER_SIZE = 0.25  # in seconds

    def __init__(self):
        """
        """

        self.fObj = None
        self.track = None  # the currently playing SoundFile
        self.buffer_size = 0  # the number of PCM frames to process
        self.state = PLAYER_STOPPED
        self.progress = [0, 1]  # an Array of current/total frames
        self.task = None
        #self.stream = Stream(callback=self.stream_callback)
        self.stream = Stream()
        self.queue = deque()
        self.played = deque()

    def enqueue(self, fObj):
        self.queue.append(fObj)

    def close(self):
        self.stop_playing()

    def pause(self):
        if (self.state == PLAYER_PLAYING):
            self.task.pause()
            self.stream.stop()
            self.state = PLAYER_PAUSED

    def play(self):
        log.msg('Play: %r, %d' % (self.track, len(self.queue)))
        if self.track is None and self.queue:
            self.fObj = self.queue.popleft()
            self.track = SoundFile(self.fObj, virtual_io=True)
        if (self.track is not None):
            if (self.state == PLAYER_STOPPED):
                self.start_playing()
            elif (self.state == PLAYER_PAUSED):
                self.stream.start()
                self.task.resume()
                self.state = PLAYER_PLAYING
            elif (self.state == PLAYER_PLAYING):
                pass

    def next_track(self):
        self.stop_playing()
        self.play()

    def previous_track(self):
        self.queue.appendleft(self.played.pop())
        self.stop_playing()
        self.play()

    def toggle_play_pause(self):
        if (self.state == PLAYER_PLAYING):
            self.pause()
        elif ((self.state == PLAYER_PAUSED) or
              (self.state == PLAYER_STOPPED)):
            self.play()

    def start_playing(self):
        self.buffer_size = min(int(round(self.BUFFER_SIZE *
                                         self.track.sample_rate)),
                               2048)
        log.msg('buf size: %s' % self.buffer_size)
        #reopen stream if necessary based on file's parameters
        if self.stream.sample_rate != self.track.sample_rate or \
           self.stream.block_length != self.buffer_size:
            self.stream = Stream(sample_rate=self.track.sample_rate,
                                 block_length=self.buffer_size)
                                 #callback=self.stream_callback)
        self.state = PLAYER_PLAYING
        self.stream.start()
        #self.set_progress(0, self.track.frames)
        self.task = cooperate(self)

    def stop_playing(self):
        if self.task is not None:
            self.task.stop()
            self.task = None
        if self.state > PLAYER_STOPPED:
            self.state = PLAYER_STOPPED
            if self.stream.is_active():
                self.stream.stop()
        self.set_progress(0, 1)
        if self.track is not None:
            self.track = None
            self.fObj.close()
            self.played.append(self.fObj)
            self.fObj = None

    def stream_callback(self, in_data, frame_count, time_info, status):
        # This method doesn't seem to play nice with Twisted.
        # It seems to behave as a large blocking thread;
        # i.e., not releasing back to Twisted after each iteration
        out_data = self.track.read(frame_count)
        self.progress[0] += len(out_data)
        if self.progress[0] < self.track.frames:
            if len(out_data) >= frame_count:
                return (out_data, continue_flag)
            else:
                return (out_data, complete_flag)
        else:
            return (out_data, complete_flag)

    def output_chunk(self):
        frame = self.track.read(self.buffer_size)
        frame_len = len(frame)
        if (frame_len > 0):
            self.progress[0] += frame_len
            self.stream.write(frame)
            return frame_len
        else:
            self.stop_playing()

    def set_progress(self, current, total):
        self.progress[0] = current
        self.progress[1] = total

    def __iter__(self):
        #log.msg('__iter__')
        return self

    def next(self):
        #log.msg('next state: %s' % self.state)
        if self.state == PLAYER_PLAYING:
            return self.output_chunk()
        else:
            raise StopIteration
예제 #34
0
import sys
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream

"""Play an audio file."""

fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.float32)
wave /= 2**15 # normalize -max_int16..max_int16 to -1..1

block_length = 16
s = Stream(sample_rate=fs, block_length=block_length)
s.start()
s.write(wave)
s.stop()
예제 #35
0
import sys
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream
"""Play an audio file."""

fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.int16)

blocksize = 256
s = Stream(samplerate=fs, blocksize=blocksize, dtype='int16')
s.start()
while True:
    s.write(wave[0:(1024 * 100)])
s.stop()
예제 #36
0
import sys
import time
import numpy as np
from scipy.io.wavfile import read as wavread
from pysoundcard import Stream, continue_flag, complete_flag

"""Play an audio file."""

fs, wave = wavread(sys.argv[1])
wave = np.array(wave, dtype=np.float32)
wave /= 2 ** 15  # normalize -max_int16..max_int16 to -1..1
play_position = 0

def callback(in_data, out_data, time_info, status):
    global play_position
    out_data[:] = wave[play_position:play_position + block_length]
    # TODO: handle last (often incomplete) block
    play_position += block_length
    if play_position + block_length < len(wave):
        return continue_flag
    else:
        return complete_flag

block_length = 16
s = Stream(sample_rate=fs, block_length=block_length, callback=callback)
s.start()
while s.is_active():
    time.sleep(0.1)
예제 #37
0
파일: main.py 프로젝트: bastibe/QtWorkshop
 def run(self):
     with Stream() as s:
         while self.soundFile.seek(0) < len(self.soundFile):
             s.write(self.soundFile.read(1024))
             self.frameChanged.emit(self.soundFile.seek(0))
         self.frameChanged.emit(0)
예제 #38
0
from pysoundcard import Stream

"""Loop back five seconds of audio data."""

fs = 44100
block_length = 16
s = Stream(sample_rate=fs, block_length=block_length)
s.start()
for n in range(int(fs*5/block_length)):
    s.write(s.read(block_length))
s.stop()
__test__ = False

import imp
import numpy as np
import pyfilterbank
from pysoundfile import SoundFile
from pysoundcard import Stream


filename = r"bonsai.wav"

ofb = filterbank.OctaveFilterbank(nth_oct=3.0, start_band=-18, end_band=12, lphp_bounds=True, filterfun="cffi")

st = Stream(input_device=False)
sf = SoundFile(filename)

st.start()


def play_further(nblocks=120, blen=4096):
    states = np.zeros(2)
    for i in range(nblocks):
        x = sf.read(blen)[:, 0]
        y, states = ofb.filter(x, states=states)
        out = y.sum(axis=1).values.flatten()
        st.write(out)


play_further()
## Signal erzeugen und abspielen
## bei mir stürzt ipython3 leider nach dem Rauschen ab! noch unklar warum

import numpy as np
from pysoundcard import Stream
  
block_length = 1024
fs =44100
time = 3
sample = time*fs

data = np.random.uniform(-0.7,0.7,sample)

s = Stream(fs, block_length)
s.start()
s.write(data)
s.stop()





s.stop()



예제 #41
0
from pysoundcard import Stream, continue_flag
import time

"""Loop back five seconds of audio data."""

def callback(in_data, out_data, time_info, status):
    out_data[:] = in_data
    return continue_flag

s = Stream(sample_rate=44100, block_length=16, callback=callback)
s.start()
time.sleep(5)
s.stop()