示例#1
0
def main():

    vocal = Voc(samplerate)
    output = []

    while len(output) * CHUNK < samplerate * duration:
        _osc = 0
        _voc = 0
        if vocal.counter == 0:
            _osc = 12 + 16 * (0.5 * (_osc + 1))
            vocal.tongue_shape(_osc, 2.9)

        out = np.array(vocal.compute(), dtype=np.float32)[:CHUNK]

        output.append(out.reshape(-1))

        # try:
        #     vocal.frequency += random.randint(-5,5)
        # except ValueError as e:
        #     pass
        #
        # print(vocal.frequency)

        # if random.randint(0,100) > 90:
        #     print('DISABLED')
        #     vocal.glottis_disable()
        # else:
        #     print('ENABLED')

    # wavfile.write('test.wav', samplerate, np.concatenate(output))

    sd.play(np.concatenate(output), samplerate=samplerate, blocking=True)
示例#2
0
def main():
    vocal = Voc(sd.default.samplerate)

    outdata = np.array([], dtype=np.float32)

    # minimum = [0.0, 200.0, 0.6, 12.0, 2.0, 0.01]
    # maximum = [1.0, 800.0, 0.9, 30.0, 3.5, 0.04]
    # [touch, frequency, tenseness, tongue_index, tongue_diameter, velum] = activations

    for i in range(200):
        # Tenseness
        t = remap(np.sin(i / 30.0 - np.pi / 2), 0.9, 0.6)
        # vocal.tenseness = t

        # Velum
        v = remap(np.sin(i / 20.0 - np.pi / 2), 5.0, 0.00)
        # vocal.velum = v

        # Frequency
        f = remap(np.sin(i / 20.0 - np.pi / 2), 1000.0, 100.0)
        # vocal.frequency = f

        # Tongue
        td = remap(np.sin(i / 2.0 - np.pi / 2), 3.5, 2.0)
        ti = remap(np.sin(i / 50.0 - np.pi / 2), 32.0, 10.0)
        # vocal.tongue_shape(ti, td)

        # Lips
        l = remap(np.sin(i / 2.0 - np.pi / 2), 3.5, 0)
        # vocal.tract.lips = l

        # Epiglottis
        e = remap(np.sin(i / 5.0 - np.pi / 2), 3.5, 0)
        # vocal.tract.epiglottis = e

        # Trachea
        t = remap(np.sin(i / 5.0 - np.pi / 2), 3.5, 0)
        vocal.tract.trachea = t

        print(t, v, f, ti, td, l)
        out = np.array(vocal.compute(randomize=True), dtype=np.float32)

        outdata = np.append(outdata, out.reshape(-1, 1))

    m = hashlib.sha256()
    m.update(outdata)
    print(m.digest())

    # assert m.digest() == b'8i\xa9\t\x0e\xc2f\xc4\x03na\xeb\xcb\xe8\x89\x92\xde\xf2\x8a\xdb\xbcl\xb8,(\x93\xf5\x16\xd9\xb0S\xec'

    outdata = np.repeat(outdata.reshape(-1, 1), 2, axis=1)

    print(outdata, outdata.shape)

    sd.play(outdata, samplerate=sd.default.samplerate, blocking=True)

    print("Done")
示例#3
0
def main():

    vocal = Voc(sd.default.samplerate)

    def process(outdata, frames, time, status):

        if status:
            print(status)

        _osc = 0
        _voc = 0
        if vocal.counter == 0:
            _osc = 12 + 16 * (0.5 * (_osc + 1))
            vocal.tongue_shape(_osc, 2.9)

        out = np.array(vocal.compute(), dtype=np.float32)[:CHUNK]

        print(outdata.shape, out.shape)

        outdata[:] = out.reshape(-1, 1)


    with sd.OutputStream(channels=1, callback=process, blocksize=CHUNK, samplerate=sd.default.samplerate) as ostream:
        print(ostream.cpu_load)
        sd.sleep(int(duration * 1000))
        print(ostream.cpu_load)
示例#4
0
def tone_generator(stream, buffer, loop=False):
    fs = stream.samplerate

    vocal = Voc(fs)

    # Loop until the stream stops
    while not stream.finished:
        frames = buffer.write_available
        if frames < CHUNK:
            time.sleep(0.010)
            continue

        # Get the write buffers directly to avoid making any extra copies
        frames, part1, part2 = buffer.get_write_buffers(frames)

        # Calculate vocal data
        _osc = 0
        _voc = 0
        if vocal.counter == 0:
            _osc = 12 + 16 * (0.5 * (_osc + 1))
            vocal.tongue_shape(_osc, 2.9)

        out = np.array(vocal.compute(use_np=True),
                       dtype=np.float32)[:CHUNK].reshape(-1)

        outbuff = np.frombuffer(part1, dtype=stream.dtype)
        first_buff_len = len(outbuff)
        if first_buff_len < CHUNK:
            outbuff[:] = out[:first_buff_len]
            if len(part2):
                # part2 will be nonempty whenever we wrap around the end of the ring buffer
                outbuff = np.frombuffer(part2, dtype=stream.dtype)
                outbuff[:(CHUNK - first_buff_len)] = out[first_buff_len:]
        else:
            outbuff[:CHUNK] = out

        # flag that we've added data to the buffer
        buffer.advance_write_index(CHUNK)
        print('Status: {}, Frames: {}, Framecount: {}, XRuns: {}, CPU: {}'.
              format(stream.status, frames, stream.frame_count, stream.xruns,
                     stream.cpu_load))

    print('Final Status: {}, Aborted ({}), Finished ({})'.format(
        stream.status, stream.aborted, stream.finished))
示例#5
0
class TestVocComputes(unittest.TestCase):
    def setUp(self):
        self.data = joblib.load('test_data.jbl')
        self.voc = Voc(48000.0)

    def test_basic_voc_output(self):
        out = self.voc.compute(randomize=False)
        np.testing.assert_allclose(out, self.data['basic_voc_output'])

        out2 = self.voc.compute(randomize=False)
        np.testing.assert_allclose(out2,
                                   self.data['basic_voc_output_2'],
                                   rtol=1e-6,
                                   atol=1e-1)

    def test_basic_glottis_output(self):
        self.voc.glottis.update(self.voc.tract.block_time)
        glot = self.voc.glottis.compute(randomize=False)
        self.assertEqual(glot[0], self.data['basic_glot_output'])

    def test_basic_tract_output(self):
        self.voc.glottis.update(self.voc.tract.block_time)
        self.voc.tract.reshape()
        self.voc.tract.calculate_reflections()
        buf = []
        lambda1 = 0
        lambda2 = 0.5 / float(CHUNK)
        glot = self.voc.glottis.compute(randomize=False)

        self.voc.tract.compute(glot[0], lambda1)
        vocal_output_1 = self.voc.tract.lip_output + self.voc.tract.nose_output

        self.voc.tract.compute(glot[0], lambda2)
        vocal_output_2 = vocal_output_1 + self.voc.tract.lip_output + self.voc.tract.nose_output
        buf.append(vocal_output_2 * 0.125)

        self.assertEqual(self.data['basic_tract_output_1'], vocal_output_1)
        self.assertEqual(self.data['basic_tract_output_2'], vocal_output_2)
        self.assertEqual(self.data['basic_buffer_output'], buf[-1])
示例#6
0
import joblib

from pynkTrombone.voc import Voc, CHUNK

v = Voc(48000.0)
voc_out = v.compute(randomize=False)
voc_out2 = v.compute(randomize=False)

# Restart
v = Voc(48000.0)
v.glottis.update(v.tract.block_time)
v.tract.reshape()
v.tract.calculate_reflections()
buf = []
lambda1 = 0
lambda2 = 0.5 / float(CHUNK)
glot = v.glottis.compute(randomize=False)

v.tract.compute(glot, lambda1)
vocal_output_1 = v.tract.lip_output + v.tract.nose_output

v.tract.compute(glot, lambda2)
vocal_output_2 = vocal_output_1 + v.tract.lip_output + v.tract.nose_output
buf.append(vocal_output_2 * 0.125)

# Full Chunk

test_values = {
    'basic_voc_output': voc_out,
    'basic_voc_output_2': voc_out2,
    'basic_glot_output': glot,
示例#7
0
import sounddevice as sd

from pynkTrombone.voc import Voc

sd.default.samplerate = 48000

duration = 5.0  # seconds

vocal = Voc(sd.default.samplerate)

_osc = 0
_voc = 0
if vocal.counter == 0:
    _osc = 12 + 16 * (0.5 * (_osc + 1))
    vocal.tongue_shape(_osc, 2.9)

for i in range(500):
    vocal.compute(randomize=False)
示例#8
0
from timeit import timeit

import numpy as np

from pynkTrombone.voc import Voc

n = int(round(48000 /
              512))  # The number of times needed to produce a second of sound.

t = []
v = Voc(48000.0)
for i in range(5):
    t.append(timeit(lambda: v.compute(randomize=False), number=n))

print(np.average(t), np.std(t))

t = []
v = Voc(48000.0)
for i in range(5):
    t.append(timeit(lambda: v.compute(randomize=False, use_np=True), number=n))

print(np.average(t), np.std(t))
示例#9
0
 def setUp(self):
     self.data = joblib.load('test_data.jbl')
     self.voc = Voc(48000.0)
示例#10
0
"""PyAudio Example: Play a wave file (callback version)."""

import pyaudio
import time
import numpy as np

from pynkTrombone.voc import Voc

starting = 0
samplerate = 48000

# instantiate PyAudio (1)
p = pyaudio.PyAudio()

vocal = Voc(samplerate)


# define callback (2)
def callback(in_data, frame_count, time_info, status):
    # if status:
    print(status)

    _osc = 0
    _voc = 0
    if vocal.counter == 0:
        _osc = 12 + 16 * (0.5 * (_osc + 1))
        vocal.tongue_shape(_osc, 2.9)

    out = np.array(vocal.compute(), dtype=np.float32)[:frame_count]

    data = np.repeat(out.reshape(-1, 1), 2, axis=1)