def unpitched_high(dur, idx):
  """
  Non-harmonic treble/higher frequency sound as a list (due to memoization).

  Parameters
  ----------
  dur:
    Duration, in samples.
  idx:
    Zero or one (integer), for a small difference to the sound played.

  Returns
  -------
  A list with the synthesized note.

  """
  first_dur, a, d, r, gain = [
    (30 * ms, 10 * ms, 8 * ms, 10 * ms, .4),
    (60 * ms, 20 * ms, 8 * ms, 20 * ms, .5)
  ][idx]
  env = chain(adsr(first_dur, a=a, d=d, s=.2, r=r),
              adsr(dur - first_dur,
                   a=10 * ms, d=30 * ms, s=.2, r=dur - 50 * ms))
  result = gauss_noise(dur) * env * gain
  return list(result)
def unpitched_high(dur, idx):
  """
  Non-harmonic treble/higher frequency sound as a list (due to memoization).

  Parameters
  ----------
  dur:
    Duration, in samples.
  idx:
    Zero or one (integer), for a small difference to the sound played.

  Returns
  -------
  A list with the synthesized note.

  """
  first_dur, a, d, r, gain = [
    (30 * ms, 10 * ms, 8 * ms, 10 * ms, .4),
    (60 * ms, 20 * ms, 8 * ms, 20 * ms, .5)
  ][idx]
  env = chain(adsr(first_dur, a=a, d=d, s=.2, r=r),
              adsr(dur - first_dur,
                   a=10 * ms, d=30 * ms, s=.2, r=dur - 50 * ms))
  result = gauss_noise(dur) * env * gain
  return list(result)
示例#3
0
        remain -= gain
    return out


#
# Audio mixture
#
tracks = 3  # besides unpitched track
dur_note = 120 * ms
dur_perc = 100 * ms
smix = Streamix()

# Pitched tracks based on a 1:2 triangular wave
table = TableLookup(line(100, -1, 1).append(line(200, 1, -1)).take(inf))
for track in xrange(tracks):
    env = adsr(dur_note, a=20 * ms, d=10 * ms, s=.8, r=30 * ms) / 1.7 / tracks
    smix.add(0, geometric_delay(new_note_track(env, table), 80 * ms, 2))

# Unpitched tracks
pfuncs = [unpitched_low] * 4 + [unpitched_high]
snd = chain.from_iterable(
    choice(pfuncs)(dur_perc, randint(0, 1)) for unused in zeros())
smix.add(0, geometric_delay(snd * (1 - 1 / 1.7), 20 * ms, 1))

#
# Finishes (save in a wave file)
#
data = lowpass(5000 * Hz)(smix).limit(180 * s)
fname = "audiolazy_save_and_memoize_synth.wav"
save_to_16bit_wave_file(fname, data, rate)
示例#4
0
from _mmines import (MIN_TILE_SIZE, PI, DEFAULT_GRID_SIZES, DSIZE, DCOLOR,
                    NCOLOR)
import wx
import audiolazy as lz
import random

__version__ = "0.1"
__author__ = "Danilo de Jesus da Silva Bellini"

rate = 44100
s, Hz = lz.sHz(rate)
ms = 1e-3 * s

SYNTH_ADSR_PARAMS = dict(a=40*ms, d=20*ms, s=.7, r=50*ms)
SYNTH_DURATION = .4 * s
SYNTH_ENVELOPE = list(lz.adsr(SYNTH_DURATION, **SYNTH_ADSR_PARAMS) * .55)
SYNTH_PAUSE_AT_END = lz.zeroes(.25 * s).take(lz.inf)
synth_table = lz.sin_table.harmonize(dict(enumerate(
                  [.1, .15, .08, .05, .04, .03, .02]
              )))

class GameScreenArea(wx.Panel):

    def __init__(self, parent, rows, cols, nmines, *args, **kwargs):
        super(GameScreenArea, self).__init__(parent, *args, **kwargs)
        self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) # Avoids flicker

        # Event handlers binding
        self.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
        self.Bind(wx.EVT_MIDDLE_DOWN, self.on_mouse_down)
        self.Bind(wx.EVT_RIGHT_DOWN, self.on_mouse_down)
示例#5
0
wave_data = np.fromstring(string_wav, dtype=np.short)
wave_data.shape = -1, 2
wave_data = wave_data.T
time = np.arange(0, voice_frame) * (1.0 / voice_sampl_rt)
plt.subplot(211)
plt.plot(time, wave_data[0])
plt.xlabel("time domain")

N=50000
df = voice_frame/(N-1)
freq = [df*n for n in range(0,N)]
wave_data2=wave_data[0][0:N]
c=np.fft.fft(wave_data2)*2/N
plt.subplot(212)
plt.plot(freq[:round(len(freq)/2)],abs(c[:round(len(c)/2)]))
plt.xlabel("freq domain")
plt.show()

SYNTH_ADSR_PARAMS = dict(a=40*ms, d=20*ms, s=.7, r=50*ms)
SYNTH_DURATION = .4 * s
SYNTH_ENVELOPE = list(lz.adsr(SYNTH_DURATION, **SYNTH_ADSR_PARAMS) * .55)
SYNTH_PAUSE_AT_END = lz.zeroes(.25 * s).take(lz.inf)
synth_table = lz.sin_table.harmonize(dict(enumerate([.1, .15, .08, .05, .04, .03, .02])))
(1 + z ** -2).plot().show()

trace0 = go.Scatter( x = time, y = wave_data[0], mode = 'markers', name = 'time domain') 
trace1 = go.Scatter( x = freq[:round(len(freq)/2)], y = abs(c[:round(len(c)/2)]), mode = 'markers', name = 'freq domain') 
data = [trace0, trace1]
py.iplot(data, filename='scatter-mode')
        remain -= gain
    return out


#
# Audio mixture
#
tracks = 3  # besides unpitched track
dur_note = 120 * ms
dur_perc = 100 * ms
smix = Streamix()

# Pitched tracks based on a 1:2 triangular wave
table = TableLookup(line(100, -1, 1).append(line(200, 1, -1)).take(inf))
for track in xrange(tracks):
    env = adsr(dur_note, a=20 * ms, d=10 * ms, s=0.8, r=30 * ms) / 1.7 / tracks
    smix.add(0, geometric_delay(new_note_track(env, table), 80 * ms, 2))

# Unpitched tracks
pfuncs = [unpitched_low] * 4 + [unpitched_high]
snd = chain.from_iterable(choice(pfuncs)(dur_perc, randint(0, 1)) for unused in zeros())
smix.add(0, geometric_delay(snd * (1 - 1 / 1.7), 20 * ms, 1))


#
# Finishes (save in a wave file)
#
data = lowpass(5000 * Hz)(smix).limit(180 * s)
fname = "audiolazy_save_and_memoize_synth.wav"
save_to_16bit_wave_file(fname, data, rate)
示例#7
0
  print("\n".join("".join(el) for el in zip(*(heading_cols + staff))))
  print()

#
# Audio
#

# Useful values from AudioLazy
rate = 44100
s, Hz = sHz(rate)
ms = 1e-3 * s
kHz = 1e3 * Hz
beat_duration = 60. / beat * s # In samples
dur = beat_duration / notes_per_beat # Per note
smix = Streamix() # That's our sound mixture
env = adsr(dur, a=40*ms, d=35*ms, s=.6, r=70*ms).take(inf) # Envelope

# Effects used
def distortion(sig, multiplier=18):
  return atan(multiplier * sig) * (2 / pi)

# Intro count synth
filt = (1 - z ** -2) * .5
if starting_beats > 0:
  inoisy_stream = filt(gauss_noise()) * env
  inoisy_thub = thub(inoisy_stream.append(0).limit(beat_duration),
                     starting_beats)
  inoisy = chain.from_iterable(repeat(inoisy_thub).limit(starting_beats))
  smix.add(.1 * s, inoisy)
  smix.add(starting_beats * beat_duration - dur, []) # Event timing
示例#8
0
    print("\n".join("".join(el) for el in zip(*(heading_cols + staff))))
    print()

#
# Audio
#

# Useful values from AudioLazy
rate = 44100
s, Hz = sHz(rate)
ms = 1e-3 * s
kHz = 1e3 * Hz
beat_duration = 60. / beat * s  # In samples
dur = beat_duration / notes_per_beat  # Per note
smix = Streamix()  # That's our sound mixture
env = adsr(dur, a=40 * ms, d=35 * ms, s=.6, r=70 * ms).take(inf)  # Envelope


# Effects used
def distortion(sig, multiplier=18):
    return atan(multiplier * sig) * (2 / pi)


# Intro count synth
filt = (1 - z**-2) * .5
if starting_beats > 0:
    inoisy_stream = filt(gauss_noise()) * env
    inoisy_thub = thub(
        inoisy_stream.append(0).limit(beat_duration), starting_beats)
    inoisy = chain.from_iterable(repeat(inoisy_thub).limit(starting_beats))
    smix.add(.1 * s, inoisy)