Ejemplo n.º 1
0
def setup_mixer():
    mixer = Mixer(44100, 0.5)
    attack_s = 0.01
    decay_s = 0.1
    mixer.create_track(0, SINE_WAVE, attack=attack_s, decay=decay_s)
    mixer.create_track(1, SINE_WAVE, attack=attack_s, decay=decay_s)
    return mixer
Ejemplo n.º 2
0
def play():
    class Play(Program):
        def __init__(self, commands: List[Command]):
            super().__init__(commands)
            self.sounds = []

        def snd(self, x):
            self.sounds.append(res := super().snd(x))
            return res

    p = Play(parse_data())
    p.run()
    print(p.sounds)

    import simpleaudio as sa
    from tones import SAWTOOTH_WAVE, SINE_WAVE, SQUARE_WAVE, TRIANGLE_WAVE
    from tones.mixer import Mixer
    mixer = Mixer(amplitude=.3)
    name = 'track'
    mixer.create_track(name, SAWTOOTH_WAVE, vibrato_frequency=None, vibrato_variance=3)
    for s in p.sounds:
        mixer.add_tone(name, s / 3, duration=.1)

    bio = BytesIO()
    mixer.write_wav(bio)
    bio.seek(0)

    wo = sa.WaveObject.from_wave_file(bio)
    wo.play().wait_done()
Ejemplo n.º 3
0
def _generate_sample_data(parsed, amplitude, wavetype):
    if wavetype not in [tones.SINE_WAVE, tones.SQUARE_WAVE]:
        raise ValueError("Invalid wave type '%s'" % wavetype)

    mixer = Mixer(SAMPLE_RATE, amplitude)
    numchannels = 0

    for i in range(len(parsed)):
        mixer.create_track(i, wavetype=wavetype, attack=0.01, decay=0.01)

    for i in range(len(parsed)):
        for pitch, time in parsed[i]:
            if pitch <= 0.0:
                mixer.add_silence(i, duration=time)
            else:
                mixer.add_tone(i, frequency=pitch, duration=time)

    return mixer.sample_data()
Ejemplo n.º 4
0
def string_to_wav(string, bpm):
    spb = (1 / bpm) * 60
    mixer = Mixer(44100, 0.5)
    mixer.create_track(1, SINE_WAVE, attack=0.05, decay=0.05)

    x = re.findall("\*?[0-9a-b][1-6]", string)
    if '00' in x:
        x.remove('00')
    octaves = [(5 + (len(i) > 2)) for i in x]
    notes = [code_to_note(i[len(i)//3]) for i in x]
    lengths = [(code_to_frac(i[(len(i)*2)//3]) * spb / 8) for i in x]

    for i in range(len(notes)):
        mixer.add_note(1, note=notes[i], octave=octaves[i], duration=lengths[i])
    mixer.write_wav('doorbell.wav')
Ejemplo n.º 5
0
class AudioManager:
    __instance__ = None

    def __init__(self):
        if AudioManager.__instance__ is None:
            AudioManager.__instance__ = self
            # create event emitter
            self.ee = EventEmitter()
        else:
            raise Exception("You cannot create another AudioManager class")

    # method to access sigleton
    @staticmethod
    def instance():
        if not AudioManager.__instance__:
            AudioManager()
        return AudioManager.__instance__

    # creates a mixer
    def create_mixer(self, sample_rate, amplitude):
        self.mixer = Mixer(sample_rate, amplitude)
        self.curr_track_id = 0
        return self.mixer

    # creates a track
    def create_track(self, track_type, vibrato_freq, vibrato_variance, attack,
                     decay):
        if not self.mixer:
            print("You need to create a Mixer first")
            return

        self.mixer.create_track(self.curr_track_id,
                                track_type,
                                vibrato_frequency=vibrato_freq,
                                vibrato_variance=vibrato_variance,
                                attack=attack,
                                decay=decay)
        # increment current track id
        self.curr_track_id += 1
        # return id of this track
        return self.curr_track_id - 1

    # adds a tone to a specified track
    def add_tone(self, track_id, startNote, octave, duration, endNote):
        self.mixer.add_note(track_id,
                            note=startNote,
                            octave=octave,
                            duration=duration,
                            endnote=endNote)
        return (startNote, octave, duration, endNote)

    # audio preview
    def preview_audio(self):
        self.mixer.write_wav('temp.wav')
        sound = SoundLoader.load('temp.wav')
        if sound:
            sound.play()
Ejemplo n.º 6
0
def fun(wave_type: WaveType,
        notes: Optional[str] = None,
        mixer: Optional[Mixer] = None,
        duration=.2):
    mixer = mixer or Mixer(44100, .3)
    track_args = TrackArgs()  # vibrato_frequency=5, vibrato_variance=5)
    name = str(uuid4())
    mixer.create_track(name, wave_type.value, **asdict(track_args))
    if notes:
        octave = 4
        for n in notes.split():
            if n == '+':
                octave += 1
            elif n == '-':
                octave -= 1
            else:
                buckets = notes, nums = [], []
                exhaust(buckets[c.isdigit() or c == '.'].append(c) for c in n)
                mult = float(''.join(nums)) if nums else 1
                note = ''.join(notes)
                if note == 's':
                    mixer.add_note(name,
                                   'a',
                                   duration=mult * duration,
                                   octave=octave,
                                   amplitude=0)
                else:
                    mixer.add_note(name,
                                   note,
                                   duration=mult * duration,
                                   octave=octave)

    else:
        mixer.add_note(name, duration=.5)
        mixer.add_note(name, endnote='f', duration=.5)
        mixer.add_note(name, 'f')
    return mixer
Ejemplo n.º 7
0
def to_wav(mixer: Mixer) -> BytesIO:
    bio = BytesIO()
    mixer.write_wav(bio)
    bio.seek(0)
    return bio
Ejemplo n.º 8
0
#    t = numpy.linspace(0, 1, 500 * 440/hz, endpoint=False)
#    wave = scipy.signal.square(2 * numpy.pi * 5 * t, duty=duty_cycle)
#    wave = numpy.resize(wave, (n_samples,))
#    return (peak / 2 * wave.astype(numpy.int16))
#def audio_freq(freq = 800):
#    global sound
#    sample_wave = square_wave(freq, 4096)
#    sound = pygame.sndarray.make_sound(sample_wave)
#audio_freq()
#sound.play(-1)
#sleep(0.5)
#sound.stop()

#import pysine
#pysine.sine(frequency=440.0, duration=1.0) 

from tones import SINE_WAVE, SAWTOOTH_WAVE
from tones.mixer import Mixer

mixer = Mixer(44100, 0.5)
mixer.create_track(0, SAWTOOTH_WAVE, vibrato_frequency=7.0, vibrato_variance=30.0, attack=0.01, decay=0.1)
mixer.create_track(1, SINE_WAVE, attack=0.01, decay=0.1)
mixer.add_note(0, note='c#', octave=5, duration=1.0, endnote='f#')
mixer.write_wav('tones.wav')
samples = mixer.mix()


plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
plt.show()
Ejemplo n.º 9
0

if maxOutlier[0] is None or colorfulness < 50:
    secondary_pitchset_choice = "none"
elif maxOutlier[0] == 0:
    secondary_pitchset_choice = "virtue"
elif maxOutlier[0] == 1:
    secondary_pitchset_choice = "legacy"
elif maxOutlier[0] == 2:
    secondary_pitchset_choice = "passion"

log.debug("Secondary pitch-set \"%s\" selected", secondary_pitchset_choice)


log.info("Writing/playing audio...")
mixer = Mixer(44100, volume)

# [ [ color, anal, synth, attack, decay, vibrato_frequency, vibrato_variance, octave, pitch_1], ... ]
# imgAnal = analyze_image(img[0], img[2], img[1])

# [ [channel attribute, mean, range, ...]
# imgAnal = analyze_image(img[0], img[2], img[1])

# write tones based on pitchset selected
for pitch_id in range(len(primary_pitchsets[primary_pitchset_choice])):
    pitch_content = primary_pitchsets[primary_pitchset_choice][pitch_id]
    # log.debug(pitch_content)

    pitch_offset = 0 # change to edit key of playback
    played_pitch = notes[( pitch_content[0] + pitch_offset) % 12]
Ejemplo n.º 10
0
 def create_mixer(self, sample_rate, amplitude):
     self.mixer = Mixer(sample_rate, amplitude)
     self.curr_track_id = 0
     return self.mixer
Ejemplo n.º 11
0
def _main():
    m = Mixer(44100, 0.5)
    m.create_track(0, SINE_WAVE, vibrato_frequency=7.0)
    m.create_track(1, SINE_WAVE)

    m.add_note(0, note='c', octave=5, duration=1.0)
    m.add_note(0, note='c', octave=5, duration=0.2, endnote='d')
    m.add_note(0, note='d', octave=5, duration=1.0)
    m.add_note(0, note='d', octave=5, duration=0.2, endnote='c')
    m.add_note(0, note='c', octave=5, duration=1.0)
    m.add_note(0, note='c', octave=5, duration=1.0)
    m.add_note(0,
               note='c',
               octave=5,
               duration=1.0,
               endnote='f',
               endoctave=5,
               vibrato_variance=30,
               attack=None,
               decay=1.0)

    m.add_notes(1, [('d', 5, 1.0), ('d', 5, 0.2, 'f'), ('f', 5, 1.0),
                    ('f', 5, 0.2, 'd'),
                    ('d', 5, 1.0, None, None, None, 0.2, 1.0, 7, 30)])

    m.write_wav('super.wav')
Ejemplo n.º 12
0
def writeAudio(imageID, filename, path):
    img = [imageID, filename]
    file_path = os.path.join(path, filename)
    img.append(cv2.imread(file_path))

    try:
        img[2][0][0]  # test to get first pixel value
    except TypeError:
        log.error("Inputted path ({}) must be an image".format(filename))
        log.error(
            "Check file names and extensions in image_pool to ensure they match the image (explicitly write out the file extension)."
        )
        sys.exit()

    width, height = __getImageDimensions__(img[2])
    if (width > 1500) or (
            height >
            1500):  # if image is larger than 800 pixels in either dimension
        factor = 0.5  # percent of original size
        width = int(img[2].shape[1] * factor)
        height = int(img[2].shape[0] * factor)
        dimensions = (width, height)

        log.warning("Resized image {} ({}, {}) for quicker analysis".format(
            img[1], width, height))
        img[2] = cv2.resize(img[2], dimensions, interpolation=cv2.INTER_AREA)
        cv2.imwrite('project/static/uuids/' + img[1], img[2])

    # log.info("Writing audio: {}/{}.wav".format(path, imageID))
    img[2], color_ratios, COMs = colorMark(img[2])  # mark it up yo

    # log.debug("Color ratios: %s", str(color_ratios))
    # log.debug("Center of masses: %s", str(COMs))

    avgDist = 0
    ds = []
    for COM_index in range(len(COMs)):
        # distance between any one to the others is larger than x
        i1 = COM_index % 3
        i2 = (COM_index + 1) % 3

        xs = (COMs[i1][0] - COMs[i2][0])**2
        ys = (COMs[i1][1] - COMs[i2][1])**2
        d = (xs + ys)**(1 / 2)
        avgDist += d
        ds.append(d)

    avgDist = round(avgDist / 3, 15)
    # log.debug("Average distance between COMs: %s", avgDist)
    outlierCOMs = 0
    for distIndex in range(len(ds)):
        if (ds[distIndex] > avgDist * 1.5):
            outlierCOMs += 1
            # log.debug("Upper outlier COM %s in channel %s detected", str(ds[distIndex]), str(distIndex))
        if (ds[distIndex] < avgDist * 0.5):
            outlierCOMs += 1
            # log.debug("Lower outlier COM %s in channel %s detected", str(ds[distIndex]), str(distIndex))

    if outlierCOMs == 1:
        primary_pitchset_choice = "lonely"  # complexity in this decision leaves much to be desired
    else:
        primary_pitchset_choice = "powerful"

    log.debug("Primary pitch-set \"%s\" selected", primary_pitchset_choice)

    colorfulness = image_colorfulness(img[2])
    # log.debug("Colorfulness is : %s", str(colorfulness))

    if colorfulness < 50:  # determine if image is colorful enough to deserve a second pitchset (bass notes)
        secondary_pitchset_choice = "none"
    else:  # determine most prominent color for secondary_pitchset selection

        # log.debug("Max outlier found for 2nd_pitchset in channel %s", str(maxOutlier))
        maxRatIndex = color_ratios.index(max(color_ratios))

        if maxRatIndex == 0:
            secondary_pitchset_choice = "virtue"
        elif maxRatIndex == 1:
            secondary_pitchset_choice = "legacy"
        elif maxRatIndex == 2:
            secondary_pitchset_choice = "passion"
        else:
            secondary_pitchset_choice = "none"

    log.debug("Secondary pitch-set \"%s\" selected", secondary_pitchset_choice)

    log.info("Writing audio...")

    mixer = Mixer(44100, volume)
    pitch_offset = 0  # change to edit key of playback
    saw_pitches = round(
        colorfulness /
        10) - 4  # measure of how many sawtooth synths to use on playback
    # log.debug("Sawtooth pitches to be created: %s", str(colorful_pitches+1))
    instrument = SAWTOOTH_WAVE

    ampBase = 0.5
    for pitch_id in range(len(primary_pitchsets[primary_pitchset_choice])):
        pitch_content = primary_pitchsets[primary_pitchset_choice][pitch_id]
        played_pitch = notes[(pitch_content[0] + pitch_offset) % 12]
        if pitch_id > saw_pitches:
            instrument = SINE_WAVE
            ampBase = 0.6
        octave = pitch_content[1]
        if octave > 4:  # lower volume on higher notes
            amp = ampBase + 0.2
        else:
            amp = ampBase + 0.3
        mixer.create_track(pitch_id,
                           instrument,
                           vibrato_frequency=0,
                           vibrato_variance=0,
                           attack=1,
                           decay=1)
        mixer.add_note(pitch_id,
                       note=played_pitch,
                       octave=octave,
                       duration=duration,
                       amplitude=amp)

    pitch_iter_offset = len(primary_pitchsets[primary_pitchset_choice]
                            )  # calculate pitch_id offset

    amp = 0.8
    for pitch_id in range(len(secondary_pitchsets[secondary_pitchset_choice])):
        pitch_content = secondary_pitchsets[secondary_pitchset_choice][
            pitch_id]
        played_pitch = notes[(pitch_content[0] + pitch_offset) % 12]
        octave = pitch_content[1]
        mixer.create_track(pitch_id + pitch_iter_offset,
                           SINE_WAVE,
                           vibrato_frequency=0,
                           vibrato_variance=0,
                           attack=1,
                           decay=1)
        mixer.add_note(pitch_id + pitch_iter_offset,
                       note=played_pitch,
                       octave=octave,
                       duration=duration,
                       amplitude=amp)

    mixer.write_wav(path + '/' + imageID + '.wav')
    log.info("Audio written...")

    return imageID + '.wav'
Ejemplo n.º 13
0
def _generate_wav_file(parsed, amplitude, wavetype, filename):
    samples = _generate_sample_data(parsed, amplitude, wavetype)
    Mixer(SAMPLE_RATE, amplitude).mix(filename, samples)
Ejemplo n.º 14
0
from tones import SINE_WAVE, SAWTOOTH_WAVE
from tones.mixer import Mixer

# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)

# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency (these can
# be changed again at any time, see documentation for tones.Mixer
mixer.create_track(0,
                   SAWTOOTH_WAVE,
                   vibrato_frequency=20.0,
                   vibrato_variance=30.0,
                   attack=0.01,
                   decay=0.1)
#mixer.create_track(1, SINE_WAVE, attack=0.01, decay=0.1)

# Add a 1-second tone on track 0, slide pitch from c# to f#)
mixer.add_note(0, note='c#', octave=5, duration=1.0, endnote='f#')

# Add a 1-second tone on track 1, slide pitch from f# to g#)
# mixer.add_note(0, note='f#', octave=5, duration=1.0, endnote='g#')

# Mix all tracks into a single list of samples and write to .wav file
mixer.write_wav('complete.wav')

# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)

# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency (these can
Ejemplo n.º 15
0
        else:
            notelist_sa[i * 24 + j] = notelist_sa[i * 24 + j]
        #uranus
        if rise[i][7] + (i * 24) - 1 == i * 24 + j or sett[i][7] + (
                i * 24) - 1 == i * 24 + j:
            notelist_ur[i * 24 + j] = bnotes[7]
        else:
            notelist_ur[i * 24 + j] = notelist_ur[i * 24 + j]
        #neptune
        if rise[i][8] + (i * 24) - 1 == i * 24 + j or sett[i][8] + (
                i * 24) - 1 == i * 24 + j:
            notelist_ne[i * 24 + j] = bnotes[8]
        else:
            notelist_ne[i * 24 + j] = notelist_ne[i * 24 + j]

mixer = Mixer(44100, 0.5)
mixer.create_track(0, SINE_WAVE)
mixer.create_track(1, SINE_WAVE)
mixer.create_track(2, SINE_WAVE)
mixer.create_track(3, SINE_WAVE)
mixer.create_track(4, SINE_WAVE)
mixer.create_track(5, SINE_WAVE)
mixer.create_track(6, SINE_WAVE)
mixer.create_track(7, SINE_WAVE)
mixer.create_track(8, SINE_WAVE)

mixer.add_notes(0, notelist_sun)
mixer.add_notes(1, notelist_moon)
mixer.add_notes(2, notelist_me)
mixer.add_notes(3, notelist_ve)
mixer.add_notes(4, notelist_ma)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 00:41:20 2020

@author: vaishakh
"""

from tones import SINE_WAVE, SAWTOOTH_WAVE
from tones.mixer import Mixer

# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)

# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency
mixer.create_track(0,
                   SAWTOOTH_WAVE,
                   vibrato_frequency=7.0,
                   vibrato_variance=30.0,
                   attack=0.01,
                   decay=0.1)
mixer.create_track(1, SINE_WAVE, attack=0.01, decay=0.1)

# Add a 1-second tone on track 0, slide pitch from c# to f#)
#mixer.add_note(0, note='a#', octave=5, duration=1.0, endnote='f#')

# You can add your own musical note over here!
text_file = open("musical-notes.txt", "r")
musical_notes = text_file.read().split(' ')[:-1]