def m21_to_stream(score,
                  synth=ks_synth,
                  beat=90,
                  fdur=2.,
                  pad_dur=.5,
                  rate=lz.DEFAULT_SAMPLE_RATE):
    """
  Converts Music21 data to a Stream object.

  Parameters
  ----------
  score :
    A Music21 data, usually a music21.stream.Score instance.
  synth :
    A function that receives a frequency as input and should yield a Stream
    instance with the note being played.
  beat :
    The BPM (beats per minute) value to be used in playing.
  fdur :
    Relative duration of a fermata. For example, 1.0 ignores the fermata, and
    2.0 (default) doubles its duration.
  pad_dur :
    Duration in seconds, but not multiplied by ``s``, to be used as a
    zero-padding ending event (avoids clicks at the end when playing).
  rate :
    The sample rate, given in samples per second.

  """
    # Configuration
    s, Hz = lz.sHz(rate)
    step = 60. / beat * s

    # Creates a score from the music21 data
    score = reduce(
        operator.concat,
        [
            [
                (
                    pitch.frequency * Hz,  # Note
                    note.offset * step,  # Starting time
                    note.quarterLength * step,  # Duration
                    Fermata in note.expressions) for pitch in note.pitches
            ] for note in score.flat.notes
        ])

    # Mix all notes into song
    song = lz.Streamix()
    last_start = 0
    for freq, start, dur, has_fermata in score:
        delta = start - last_start
        if has_fermata:
            delta *= 2
        song.add(delta, synth(freq).limit(dur))
        last_start = start

    # Zero-padding and finishing
    song.add(dur + pad_dur * s, lz.Stream([]))
    return song
Example #2
0
def pitch_from_mic(upd_time_in_ms):
  rate = 44100
  s, Hz = sHz(rate)

  with AudioIO() as recorder:
    snd = recorder.record(rate=rate)
    sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
    hop = int(upd_time_in_ms * 1e-3 * s)
    for pitch in freq2str(dft_pitch(sndlow, size=2*hop, hop=hop) / Hz):
      yield pitch
Example #3
0
def pitch_from_mic(upd_time_in_ms):
    rate = 44100
    s, Hz = sHz(rate)

    with AudioIO() as recorder:
        snd = recorder.record(rate=rate)
        sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
        hop = int(upd_time_in_ms * 1e-3 * s)
        for pitch in freq2str(dft_pitch(sndlow, size=2 * hop, hop=hop) / Hz):
            yield pitch
Example #4
0
def pitch_from_mic(upd_time_in_ms):
  rate = 44100
  s, Hz = sHz(rate)

  api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line
  chunks.size = 1 if api == "jack" else 16

  with AudioIO(api=api) as recorder:
    snd = recorder.record(rate=rate)
    sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
    hop = int(upd_time_in_ms * 1e-3 * s)
    for pitch in freq2str(dft_pitch(sndlow, size=2*hop, hop=hop) / Hz):
      yield pitch
def pitch_from_mic(upd_time_in_ms):
    rate = 44100
    s, Hz = sHz(rate)

    api = sys.argv[1] if sys.argv[1:] else None  # Choose API via command-line
    chunks.size = 1 if api == "jack" else 16

    with AudioIO(api=api) as recorder:
        snd = recorder.record(rate=rate)
        sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
        hop = int(upd_time_in_ms * 1e-3 * s)
        for pitch in freq2str(dft_pitch(sndlow, size=2 * hop, hop=hop) / Hz):
            yield pitch
Example #6
0
def m21_to_stream(score, synth=ks_synth, beat=90, fdur=2., pad_dur=.5,
                  rate=lz.DEFAULT_SAMPLE_RATE):
  """
  Converts Music21 data to a Stream object.

  Parameters
  ----------
  score :
    A Music21 data, usually a music21.stream.Score instance.
  synth :
    A function that receives a frequency as input and should yield a Stream
    instance with the note being played.
  beat :
    The BPM (beats per minute) value to be used in playing.
  fdur :
    Relative duration of a fermata. For example, 1.0 ignores the fermata, and
    2.0 (default) doubles its duration.
  pad_dur :
    Duration in seconds, but not multiplied by ``s``, to be used as a
    zero-padding ending event (avoids clicks at the end when playing).
  rate :
    The sample rate, given in samples per second.

  """
  # Configuration
  s, Hz = lz.sHz(rate)
  step = 60. / beat * s

  # Creates a score from the music21 data
  score = reduce(operator.concat,
                 [[(pitch.frequency * Hz, # Note
                    note.offset * step, # Starting time
                    note.quarterLength * step, # Duration
                    Fermata in note.expressions) for pitch in note.pitches]
                                                 for note in score.flat.notes]
                )

  # Mix all notes into song
  song = lz.Streamix()
  last_start = 0
  for freq, start, dur, has_fermata in score:
    delta = start - last_start
    if has_fermata:
      delta *= 2
    song.add(delta, synth(freq).limit(dur))
    last_start = start

  # Zero-padding and finishing
  song.add(dur + pad_dur * s, lz.Stream([]))
  return song
Example #7
0
  def test_empty(self):
    ns = run_source("", "named.py")
    assert ns["__file__"] == "named.py"

    # Some few "global" values that might be used by the plugin
    for k in ["rate", "s", "Hz", "ms", "kHz"]:
      assert k in ns
      assert isinstance(ns[k], int if k == "rate" else float)

    # Ensure AudioLazy was imported
    for k in audiolazy.__all__:
      assert ns[k] is getattr(audiolazy, k)

    s, Hz = audiolazy.sHz(1)
    assert ns["rate"] == 1
    assert ns["s"] == s
    assert ns["Hz"] == Hz
    assert ns["ms"] == 1e-3 * s
    assert ns["kHz"] == 1e3 * Hz
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Gammatone frequency and impulse response plots example
"""

from __future__ import division
from audiolazy import erb, gammatone, gammatone_erb_constants, sHz, impulse, dB20
from numpy import linspace, ceil
from matplotlib import pyplot as plt

# Initialization info
rate = 44100
s, Hz = sHz(rate)
ms = 1e-3 * s
plot_freq_time = {80.0: 60 * ms, 100.0: 50 * ms, 200.0: 40 * ms, 500.0: 25 * ms, 800.0: 20 * ms, 1000.0: 15 * ms}
freq = linspace(0.1, 2 * max(freq for freq in plot_freq_time), 100)

fig1 = plt.figure("Frequency response", figsize=(16, 9), dpi=60)
fig2 = plt.figure("Impulse response", figsize=(16, 9), dpi=60)

# Plotting loop
for idx, (fc, endtime) in enumerate(sorted(plot_freq_time.items()), 1):
    # Configuration for the given frequency
    num_samples = int(round(endtime))
    time_scale = linspace(0, num_samples / ms, num_samples)
    bw = gammatone_erb_constants(4)[0] * erb(fc * Hz, Hz)

    # Subplot configuration
Example #9
0
Call with the API name like ...
  ./animated_plot.py jack
... or without nothing for the default PortAudio API.
"""
from __future__ import division
from audiolazy import sHz, chunks, AudioIO, line, pi, window
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
from numpy.fft import rfft
import numpy as np
import collections, sys, threading

# AudioLazy init
rate = 44100
s, Hz = sHz(rate)
ms = 1e-3 * s

length = 2**12
data = collections.deque([0.] * length, maxlen=length)
wnd = np.array(window.hamming(length))  # For FFT

api = sys.argv[1] if sys.argv[1:] else None  # Choose API via command-line
chunks.size = 1 if api == "jack" else 16


# Creates a data updater callback
def update_data():
    with AudioIO(api=api) as rec:
        for el in rec.record(rate=rate):
            data.append(el)
Example #10
0
# The GUI in this example is based on the dose TDD semaphore source code
# https://github.com/danilobellini/dose

import wx
from math import pi
from audiolazy import (ControlStream, modulo_counter, AudioIO, sHz, sinusoid)

MIN_WIDTH = 15  # pixels
MIN_HEIGHT = 15
FIRST_WIDTH = 200
FIRST_HEIGHT = 200
MOUSE_TIMER_WATCH = 50  # ms
DRAW_TIMER = 50

s, Hz = sHz(44100)


class McFMFrame(wx.Frame):
    def __init__(self, parent):
        frame_style = (
            wx.FRAME_SHAPED |  # Allows wx.SetShape
            wx.FRAME_NO_TASKBAR | wx.STAY_ON_TOP | wx.NO_BORDER)
        super(McFMFrame, self).__init__(parent, style=frame_style)
        self.Bind(wx.EVT_ERASE_BACKGROUND, lambda evt: None)
        self._paint_width, self._paint_height = 0, 0  # Ensure update_sizes at
        # first on_paint
        self.ClientSize = (FIRST_WIDTH, FIRST_HEIGHT)
        self.Bind(wx.EVT_PAINT, self.on_paint)
        self._draw_timer = wx.Timer(self)
        self.Bind(wx.EVT_TIMER, self.on_draw_timer, self._draw_timer)
Example #11
0
# The GUI in this example is based on the dose TDD semaphore source code
# https://github.com/danilobellini/dose

import wx
from math import pi
from audiolazy import (ControlStream, modulo_counter,
                       AudioIO, sHz, sinusoid)

MIN_WIDTH = 15 # pixels
MIN_HEIGHT = 15
FIRST_WIDTH = 200
FIRST_HEIGHT = 200
MOUSE_TIMER_WATCH = 50 # ms
DRAW_TIMER = 50

s, Hz = sHz(44100)

class McFMFrame(wx.Frame):

  def __init__(self, parent):
    frame_style = (wx.FRAME_SHAPED |     # Allows wx.SetShape
                   wx.FRAME_NO_TASKBAR |
                   wx.STAY_ON_TOP |
                   wx.NO_BORDER
                  )
    super(McFMFrame, self).__init__(parent, style=frame_style)
    self.Bind(wx.EVT_ERASE_BACKGROUND, lambda evt: None)
    self._paint_width, self._paint_height = 0, 0 # Ensure update_sizes at
                                                 # first on_paint
    self.ClientSize = (FIRST_WIDTH, FIRST_HEIGHT)
    self.Bind(wx.EVT_PAINT, self.on_paint)
Example #12
0
      3rd -> drone Bravo, direction east, distance 1m
      Tonic -> drone Charlie, direction south, distance 2m
      Tonic -> drone Delta, direction west, distance 4m

Some parts of this file were adapted from the AudioLazy example "animated_plot.py".
"""
from __future__ import division
from audiolazy import sHz, chunks, AudioIO, line, pi, window
import numpy as np
import collections, sys, threading, time
import rospy
from std_msgs.msg import String

# static configurations
rate = 44100
s, Hz = sHz(rate)  # s = rate, Hz = tau / rate
length = 2**14
noteNames = ["tonic", "3rd", "5th", "octave"]
relativeNotes = np.log([1, 5 / 4, 3 / 2, 2])
#api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line
api = None
chunks.size = 1 if api == "jack" else 16


class WhitleDetector(object):
    def __init__(self):
        # Allocating Data
        self.data = collections.deque([0.] * length, maxlen=length)
        self.wnd = np.array(window.hamming(length))  # For FFT
        # TODO: Maybe break into an object
        self.lastWhistleTime = None
Example #13
0
"""
Musical Mines - A minesweeper game to help learning musical skills.
"""

from _mmines.core import GameGrid
from _mmines import (MIN_TILE_SIZE, PI, DEFAULT_GRID_SIZES, DSIZE, DCOLOR,
                    NCOLOR)
import wx
import audiolazy as lz
import random

__version__ = "0.1"
__author__ = "Danilo de Jesus da Silva Bellini"

rate = 44100
s, Hz = lz.sHz(rate)
ms = 1e-3 * s

SYNTH_ADSR_PARAMS = dict(a=40*ms, d=20*ms, s=.7, r=50*ms)
SYNTH_DURATION = .4 * s
SYNTH_ENVELOPE = list(lz.adsr(SYNTH_DURATION, **SYNTH_ADSR_PARAMS) * .55)
SYNTH_PAUSE_AT_END = lz.zeroes(.25 * s).take(lz.inf)
synth_table = lz.sin_table.harmonize(dict(enumerate(
                  [.1, .15, .08, .05, .04, .03, .02]
              )))

class GameScreenArea(wx.Panel):

    def __init__(self, parent, rows, cols, nmines, *args, **kwargs):
        super(GameScreenArea, self).__init__(parent, *args, **kwargs)
        self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) # Avoids flicker
Example #14
0
plt.xlabel('frequency [Hz]')
plt.ylabel('power [dB]')

plt.title('background noise, spectra averaged')

plt.gca().xaxis.set_major_locator( plt.MaxNLocator(nbins = 10) )

plt.savefig('backgroundnoise_subglottalresonance.pdf', orientation = 'landscape', bbox_inches = 'tight')

# <codecell>

import audiolazy as lz

# <codecell>

s, Hz = lz.sHz(fs)
print "s: {}, Hz: {}".format(s, Hz)

# <codecell>

Hz == (2 * lz.pi / fs)

# <codecell>

print "Nfft: {}, overlap: {}, hop: {}".format(Nfft, overlap, hop)

stream_chest = lz.Stream(chest).blocks(size = Nfft, hop = hop)
stream_falsetto = lz.Stream(falsetto).blocks(size = Nfft, hop = hop)

stream_background = lz.Stream(background).blocks(size = Nfft, hop = hop)