コード例 #1
0
    def run(self, time_limit):
        self.start_time = datetime.now()
        with sd.RawStream(channels=1, dtype='int32',
                          callback=self.sd_callback):
            sd.sleep(int(time_limit * 1000))

        return self.postprocess(self.received_samples)
コード例 #2
0
    def mesh(self):
        self.stream = sd.RawStream(
            samplerate=self.RATE,
            dtype=self.FORMAT,
            channels=self.CHANNELS,
            blocksize=self.CHUNK,
        )

        with self.stream:
            wf_data = self.stream.read(self.CHUNK)[0][:]

        wf_data = struct.unpack(str(2 * self.CHUNK) + 'B', wf_data)
        wf_data = np.array(wf_data, dtype='b')[::2] + 128
        wf_data = np.array(wf_data, dtype='int32') - 128
        wf_data = wf_data * 0.04
        wf_data = wf_data.reshape((len(self.xpoints), len(self.ypoints)))

        verts = np.array([[
            x, y,
            wf_data[xid][yid] * self.open_simplex.noise2d(x=xid / 5, y=yid / 5)
        ] for xid, x in enumerate(self.xpoints)
                          for yid, y in enumerate(self.ypoints)],
                         dtype=np.float32)

        r = self.startR
        g = self.startG
        b = self.startB

        faces = []
        colours = []
        for m in range(self.nfaces - 1):
            yoff = m * self.nfaces

            r += self.rStep
            g += self.gStep
            b += self.bStep
            col = [r, g, b, 1]

            for n in range(self.nfaces - 1):
                faces.append([
                    n + yoff, yoff + n + self.nfaces,
                    yoff + n + self.nfaces + 1
                ])
                faces.append(
                    [n + yoff, yoff + n + 1, yoff + n + self.nfaces + 1])

                colours.append([
                    n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.7
                ])
                colours.append([
                    n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.7
                ])

                # colours.append(col)
                # colours.append(col)

        faces = np.array(faces)
        colours = np.array(colours)

        return verts, faces, colours
コード例 #3
0
    def assist(self, text_query):
        global s
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=50,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query="オウム返し " + text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = None
        html_response = None

        bytes = 0
        if not s is None:
            s = None
        s = sd.RawStream(
            samplerate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
            dtype='int16',
            channels=1,
            blocksize=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE)
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            bytes += len(resp.audio_out.audio_data)
            s.write(resp.audio_out.audio_data)
            s.start()
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response, html_response
コード例 #4
0
ファイル: __init__.py プロジェクト: loretod/Digital_Lit_Box
 def __init__(self, sample_rate, sample_width, block_size, flush_size):
     if sample_width == 2:
         audio_format = 'int16'
     else:
         raise Exception('unsupported sample size:', sample_width)
     self._audio_stream = sd.RawStream(
         samplerate=sample_rate, dtype=audio_format, channels=1,
         blocksize=int(block_size/2),  # blocksize is in number of frames.
     )
     self._block_size = block_size
     self._flush_size = flush_size
コード例 #5
0
ファイル: audio.py プロジェクト: olemb/overdub
    def __init__(self, callback):
        def callback_wrapper(inblock, outblock, *_):
            outblock[:] = callback(bytes(inblock))

        self.stream = sounddevice.RawStream(
            samplerate=frame_rate,
            channels=2,
            dtype='int16',
            blocksize=frames_per_block,
            callback=callback_wrapper,
        )
        self.latency = sum(self.stream.latency)
        self.play_ahead = int(round(self.latency * blocks_per_second))
コード例 #6
0
    def __init__(self, callback):
        def callback_wrapper(inblock, outblock, *_):
            outblock[:] = callback(bytes(inblock))

        self.stream = sounddevice.RawStream(
            samplerate=FRAME_RATE,
            channels=2,
            dtype='int16',
            blocksize=FRAMES_PER_BLOCK,
            callback=callback_wrapper,
        )
        self.latency = sum(self.stream.latency)
        self.play_ahead = int(round(self.latency * BLOCKS_PER_SECOND))
コード例 #7
0
ファイル: main6.py プロジェクト: Jsb3099/voip_udp
def stre(qplay, qinmic):
    #with qinmic.mutex:
    #	print("\r[stre] qinmic clear.",end="")
    #	qinmic.queue.clear()
    event = threading.Event()
    stream = sd.RawStream(samplerate=8000,
                          blocksize=1024,
                          device=None,
                          channels=1,
                          dtype='int16',
                          callback=callback,
                          finished_callback=event.set)
    with stream:
        event.wait()
コード例 #8
0
ファイル: main4.py プロジェクト: Jsb3099/voip_udp
def stre(qplay, qinmic):
    #while qplay.empty():
    with qinmic.mutex:
        qinmic.queue.clear()
    event = threading.Event()
    stream = sd.RawStream(samplerate=8000,
                          blocksize=1024,
                          device=None,
                          channels=1,
                          dtype='int16',
                          callback=callback,
                          finished_callback=event.set)
    with stream:
        event.wait()
コード例 #9
0
ファイル: client.py プロジェクト: JacobAsmuth/Chatter
    def connect(self, ip, voice_port, data_port):
        self.closing = False
        self.sent_frames_count = 0
        self.release_frame = -1
        self.ip = ip
        self.voice_port = voice_port
        self.data_port = data_port
        self.sent_audio = False
        self.encoder = Encoder()
        self.all_audio = bytearray()
        self.voice_buffer = JitterBuffer(consts.MIN_BUFFER_SIZE,
                                         consts.MAX_BUFFER_SIZE)

        self.audio_stream = sounddevice.RawStream(
            blocksize=consts.SAMPLES_PER_FRAME,
            channels=consts.CHANNELS,
            samplerate=consts.SAMPLE_RATE,
            dtype=np.int16,
            callback=self.audio_callback,
        )

        self.voice_addr = (self.ip, self.voice_port)
        self.data_addr = (self.ip, self.data_port)
        self.voice_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.udp_data_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.tcp_data_socket = socket.socket(socket.AF_INET,
                                             socket.SOCK_STREAM)
        self.tcp_data_socket.connect(self.data_addr)
        self.tcp_data_socket.sendall(
            pickle.dumps(packets.ClientPacket(self.client_id),
                         consts.PICKLE_PROTOCOL))

        self.receive_offsets()
        self.receive_settings()

        self.audio_stream.start()

        # beep beep
        winsound.Beep(300, 80)
        winsound.Beep(350, 90)
        winsound.Beep(400, 200)
        winsound.Beep(500, 200)

        print("Connected to server, initializing...")
        threading.Thread(target=self.receive_audio_loop, daemon=True).start()
        threading.Thread(target=self.read_memory_loop, daemon=True).start()
        threading.Thread(target=self.receive_tcp_data_loop,
                         daemon=True).start()
コード例 #10
0
ファイル: main8.py プロジェクト: Jsb3099/voip_udp
def stre(
    qplay, qinmic
):  # handling audio streams to and from system(mic and speaker) also uses callback function defined above
    try:

        event = threading.Event()
        stream = sd.RawStream(samplerate=8000,
                              blocksize=1024,
                              channels=1,
                              dtype='int16',
                              callback=callback,
                              finished_callback=event.set)
        with stream:
            event.wait()
    except:
        nothing()
コード例 #11
0
ファイル: tutorial3.py プロジェクト: Q-Mart/playWatch
    def __init__(self):
        self.MAX_UINT16 = (2**15)-1
        self.MIN_UINT16 = -self.MAX_UINT16

        self.CHUNK = 1024 * 2
        self.FORMAT = 'int16'
        self.CHANNELS = 1
        self.RATE = 44100

        self.stream = sd.RawStream(
            samplerate=self.RATE,
            dtype=self.FORMAT,
            channels=self.CHANNELS,
            blocksize=self.CHUNK,
            )

        self.init_plots()
        self.start_plots()
コード例 #12
0
    def start(self):
        if MinimalIntercom.NUMPY == 1:
            with sd.Stream(samplerate=self.sample_rate,
                           blocksize=self.chunk_size,
                           dtype=self.data_type,
                           channels=self.channels,
                           callback=self.callback):

                while True:
                    self.receive()
                #   print('#' * 80)
                #   print('press Return to quit')
                #   print('#' * 80)
                #   input()

        else:
            with sd.RawStream(samplerate=self.sample_rate,
                              blocksize=self.chunk_size,
                              dtype=self.data_type,
                              channels=self.channels,
                              callback=self.callback):
                pass
コード例 #13
0
    def __init__(self, sample_rate, sample_width, block_size, flush_size,
                 procedural_audio_wave):

        self.ue_procedural_audio_wave = procedural_audio_wave
        self.ue_procedural_audio_wave.SampleRate = sample_rate
        self.ue_procedural_audio_wave.NumChannels = 1
        self.ue_procedural_audio_wave.Duration = 10000.0
        self.ue_procedural_audio_wave.SoundGroup = 4
        self.ue_procedural_audio_wave.bLooping = False

        if sample_width == 2:
            audio_format = 'int16'
        else:
            raise Exception('unsupported sample width:', sample_width)
        self._system_audio_stream = sd.RawStream(
            samplerate=sample_rate,
            dtype=audio_format,
            channels=1,
            blocksize=int(block_size / 2),  # blocksize is in number of frames.
        )
        self._block_size = block_size
        self._flush_size = flush_size
        self._sample_rate = sample_rate
コード例 #14
0
ファイル: tutorial1.py プロジェクト: Q-Mart/playWatch
import sounddevice as sd
import struct
import numpy as np
import matplotlib.pyplot as plt

CHUNK = 1024 * 4
FORMAT = 'int16'
CHANNELS = 1
RATE = 44100

stream = sd.RawStream(
    samplerate=RATE,
    dtype=FORMAT,
    channels=CHANNELS,
    blocksize=CHUNK,
)

fig, ax = plt.subplots()
fig.show()

x = np.arange(0, CHUNK)
line, = ax.plot(x, np.random.rand(CHUNK))

MAX_UINT16 = (2**15)-1
MIN_UINT16 = -MAX_UINT16

ax.set_ylim(MIN_UINT16, MAX_UINT16)


with stream:
    while True:
コード例 #15
0
# Basic wire using blocking I/O. Python buffers are used.

import sounddevice as sd

CHUNK_SIZE = 1024

stream = sd.RawStream(samplerate=44100, channels=2, dtype='int16')
stream.start()
while True:
    chunk, overflowed = stream.read(CHUNK_SIZE)
    if overflowed:
        print("Overflow")
    stream.write(chunk)
コード例 #16
0
    return w


y = generate_chirp()
y = np.array(y)
# with open('chirp.pickle', 'rb') as f:
# 	y = pickle.load(f)
input_device = 0
i = 0
device_list = list(sd.query_devices())
for device in device_list:
    if device['max_input_channels'] == 4:
        input_device = device_list.index(device)
        break


def callback(indata, outdata, frames, time, status):
    global i
    outdata = y


with sd.RawStream(device=(0, 1),
                  callback=callback,
                  blocksize=960,
                  samplerate=48000):
    print('#' * 80)
    print('press Return to quit')
    print('#' * 80)
    input()
コード例 #17
0
import sounddevice as sd


def callback(indata, outdata, frames, time, status):
    outdata[:] = indata


with sd.RawStream(device=(2, 6), samplerate=44100, channels=2, callback=callback) as s:
    input()




コード例 #18
0
ファイル: test.py プロジェクト: Henri2h/SONAR_Python
import sounddevice as sd
duration = 5.5  # seconds


def callback(indata, outdata, frames, time, status):
    if status:
        print(status)
    outdata[:] = indata


with sd.RawStream(channels=2, dtype='int24', callback=callback):
    sd.sleep(int(duration * 1000))