Esempio n. 1
0
def read_note_from_sound_file(filename: str,
                              samplerate: int = DEFAULT_SAMPLE_RATE):
    """
    this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration
    """
    print("====> reading notes from sound file")
    win_s = 512 // DOWN_SAMPLE  # fft size
    hop_s = 256 // DOWN_SAMPLE  # hop size
    # adjust sample rate
    s = source(filename, samplerate, hop_s)
    samplerate = s.samplerate
    notes_o = notes("default", win_s, hop_s, samplerate)

    result = []
    total_frames = 0
    while True:
        samples, read = s()
        new_note = notes_o(samples)
        # note too high considered as noise
        if new_note[0] != 0 and new_note[0] <= 120:
            note_klass = Note(time=total_frames / float(samplerate),
                              pitch=new_note[0],
                              volume=new_note[1] - 20,
                              duration=new_note[2])
            result.append(note_klass)
        total_frames += read
        if read < hop_s:
            break

    return result
Esempio n. 2
0
def getOnset(f):
    s = source(f, 0, hop_s)
    samplerate = s.samplerate

    tolerance = 0.8
    pitch_o = pitch("yin", win_s, hop_s, samplerate)
    pitch_o.set_unit("Hz")
    pitch_o.set_tolerance(tolerance)

    o = onset("default", win_s, hop_s, samplerate)
    notes_o = notes("default", win_s, hop_s, samplerate)

    # list of onsets, in seconds
    onsets = []
    vel = []
    pitches = []

    # total number of frames read
    total_frames = 0
    while True:
        samples, read = s()
        p = pitch_o(samples)[0]
        new_note = notes_o(samples)
        t = total_frames / float(samplerate)

        if o(samples) and p > 0:
            onsets.append(o.get_last_s())
            pitches.append(p)

        if new_note[0] != 0:
            vel.append(new_note[1])
        total_frames += read
        if read < hop_s:
            break
    return onsets, vel, pitches
Esempio n. 3
0
    def __init__(self):
        self.redis = redis.StrictRedis(host=redishost, port=6379, password="", decode_responses=True)
        self.p = pyaudio.PyAudio()
        stream = self.p.open(format=self.FORMAT,
                        channels=self.CHANNELS,
                        rate=self.RATE,
                        input=True,
                        output=True,
                        input_device_index = self.get_input_device_index(),
                        output_device_index = self.get_output_device_index(),
                        frames_per_buffer = self.CHUNK,
                        stream_callback=self.callback)

        self.a_onset = aubio.onset("default", self.CHUNK, self.hop_s, self.RATE)
        self.a_tempo = aubio.tempo("specflux", self.CHUNK, self.hop_s, self.RATE)
        self.a_pitch = aubio.pitch("default", self.CHUNK, self.hop_s, self.RATE)
        self.a_notes = aubio.notes("default", self.CHUNK, self.hop_s, self.RATE)
        n_filters = 40 # required
        n_coeffs = 13 # I wonder if i made this 1....
        self.a_pvoc = aubio.pvoc(self.CHUNK, self.hop_s)
        self.a_mfcc = aubio.mfcc(self.CHUNK, n_filters, n_coeffs, self.RATE)

        self.tolerance = 0.8
        self.a_pitch.set_tolerance(self.tolerance)
        self.highest_pitch = 0
        self.lowest_pitch = 99999999
        self.average_pitch = 0
        self.average_pitch_samples = 0
        self.last_average = 0
        self.colors = None
        self.pitch_range = None
        self.range_counter = 0
        self.all_notes = set()
        stream.start_stream()
Esempio n. 4
0
    def initializeAudio(self):
        global file
        # opens file as wave file
        self.src = file + ".wav"
        samplerate = 0
        self.total_frames = 0

        # initialize aubio data
        self.a_source = aubio.source(self.src, samplerate, self.hop_s)
        self.samplerate = self.a_source.samplerate
        self.p = pyaudio.PyAudio()
        self.format = pyaudio.paFloat32
        self.frames = self.hop_s
        self.channels = 1
        self.p = pyaudio.PyAudio()

        self.a_tempo = aubio.tempo("default", self.win_s, self.hop_s,
                                   self.samplerate)
        self.pitch_o = aubio.pitch("yin", self.win_s, self.hop_s,
                                   self.samplerate)
        self.notes_o = aubio.notes("default", self.win_s, self.hop_s,
                                   self.samplerate)
        self.o = aubio.onset("default", self.win_s, self.hop_s,
                             self.samplerate)
        self.o2 = aubio.onset("hfc", self.win_s, self.hop_s, self.samplerate)

        print("Audio set up for", file)
Esempio n. 5
0
File: cmd.py Progetto: aubio/aubio
 def __init__(self, args):
     self.parse_options(args, self.valid_opts)
     self.notes = aubio.notes(**self.options)
     if args.silence is not None:
         self.notes.set_silence(args.silence)
     if args.release_drop is not None:
         self.notes.set_release_drop(args.release_drop)
     super(process_notes, self).__init__(args)
Esempio n. 6
0
 def __init__(self, args):
     self.parse_options(args, self.valid_opts)
     self.notes = aubio.notes(**self.options)
     if args.silence is not None:
         self.notes.set_silence(args.silence)
     if args.release_drop is not None:
         self.notes.set_release_drop(args.release_drop)
     super(process_notes, self).__init__(args)
Esempio n. 7
0
def audio_to_midi(filename, midioutput, samplerate=44100, downsample=1):
    samplerate = 44100 // downsample

    win_s = 512 // downsample # fft size
    hop_s = 128 // downsample # hop size

    s = source(filename, samplerate, hop_s)
    samplerate = s.samplerate

    tolerance = 0.8

    notes_o = notes("default", win_s, hop_s, samplerate)

    print("%8s" % "time","[ start","vel","last ]")

    # create a midi file
    mid = MidiFile()
    track = MidiTrack()
    mid.tracks.append(track)

    ticks_per_beat = mid.ticks_per_beat # default: 480
    bpm = 120 # default midi tempo

    tempo = bpm2tempo(bpm)
    track.append(MetaMessage('set_tempo', tempo=tempo))
    track.append(MetaMessage('time_signature', numerator=4, denominator=4))

    def frames2tick(frames, samplerate=samplerate):
        sec = frames / float(samplerate)
        return int(second2tick(sec, ticks_per_beat, tempo))

    last_time = 0

    # total number of frames read
    total_frames = 0
    while True:
        samples, read = s()
        new_note = notes_o(samples)
        if (new_note[0] != 0):
            note_str = ' '.join(["%.2f" % i for i in new_note])
            print("%.6f" % (total_frames/float(samplerate)), new_note)
            delta = frames2tick(total_frames) - last_time
            if new_note[2] > 0:
                track.append(Message('note_off', note=int(new_note[2]),
                    velocity=127, time=delta)
                    )
            track.append(Message('note_on',
                note=int(new_note[0]),
                velocity=int(new_note[1]),
                time=delta)
                )
            last_time = frames2tick(total_frames)
        total_frames += read
        if read < hop_s: break

    mid.save(midioutput)
Esempio n. 8
0
def points_from_notes_aubio(audio_file_name):
    source = aubio_source(audio_file_name)
    notes = aubio.notes(samplerate=source.samplerate)
    notes.set_minioi_ms(args.labels_from_notes_min_length * 1000)
    notes.set_silence(args.labels_from_notes_min_volume)
    points = []
    frames = 0
    while True:
        samples, read = source()
        if notes(samples)[0] != 0:
            points.append(frames / source.samplerate)
        frames += read
        if read < source.hop_size:
            break
    return set(points)
Esempio n. 9
0
    def __init__(self, song, samplerate=44100, win_s=1024, hop_s=256):
        # Create a list of notes
        self.src = aubio.source(fileName, samplerate, hop_s)
        samplerate = self.src.samplerate
        tolerance = 0.9

        notes_obj = aubio.notes('default', win_s, hop_s, samplerate)
        # Note: notes object uses ONSET and OFFSET based on the perceived
        #       start and end of a pitch
        # Notes object gives [onset, offset, midi number]
        # Midi number = standard conversion of notes to frequency/tuning
        # Formula: frequency = 440 * (2 ^ ( (n - 69) /12) )
        # n = Midi number
        # https://www.inspiredacoustics.com/en/MIDI_note_numbers_and_center_frequencies

        print('Starting...')

        totalFrames = 0
        frameIndex = 0

        self.notes = dict()
        self.noteVals = []

        while True:
            samples, read = self.src()
            newNote = notes_obj(samples)
            if (newNote[0] != 0):  # newNote is a list of ints
                print(f'Frame {frameIndex}:', end='  ')
                print(f'Start, end = {newNote[0], newNote[2]}', end='\t')
                print(f'Midi value = {newNote[1]}')
                self.notes[frameIndex] = newNote[1]
                self.noteVals.append(newNote[1])
            totalFrames += read
            frameIndex += 1
            if (read < hop_s):
                break

        self.totalFrames = totalFrames

        self.highest = max(self.noteVals)
        self.lowest = min(self.noteVals)
        print(
            f'Highest = {self.highest}, lowest = {self.lowest}, range = {self.highest-self.lowest}'
        )
        self.range = self.highest - self.lowest
        print(self.totalFrames)
        print(f'Notes dictionary, frames : notes\n{self.notes}')
        print('Done!')
Esempio n. 10
0
def convert_wav_to_notes(input_file: str) -> List[Tuple[float, str]]:
    """Convert simple single melody WAV file to notes"""
    melody = source(input_file)
    melody_note = notes(samplerate=melody.samplerate)
    notes_ = []
    total_frames = 0
    while True:
        samples, read = melody()
        note = int(melody_note(samples)[0])
        if note:
            time = total_frames / float(melody.samplerate)
            notes_.append((time, midi2note(note)))
        total_frames += read
        if read < melody_note.hop_size:
            break
    return notes_
Esempio n. 11
0
def find_notes(filename):
    win_s = 512  # fft size
    hop_s = win_s // 2  # hop size
    samplerate = 0

    source = aubio.source(filename, samplerate, hop_s)
    samplerate = source.samplerate
    notes_o = aubio.notes("default", win_s, hop_s, samplerate)

    notes = []

    total_frames = 0
    while True:
        samples, read = source()
        new_note = notes_o(samples)
        if new_note[0] != 0:
            note_time = int(100 * total_frames / float(samplerate))
            this_note = (note_time, new_note[0], new_note[1], new_note[2])
            notes.append(this_note)
        total_frames += read
        if read < hop_s: break
    return notes
Esempio n. 12
0
    def __init__(self):
        self.redis = redis.StrictRedis(host=redishost,
                                       port=6379,
                                       password="",
                                       decode_responses=True)

        self.a_onset = aubio.onset("default", self.CHUNK, self.hop_s,
                                   self.RATE)
        self.a_tempo = aubio.tempo("specflux", self.CHUNK, self.hop_s,
                                   self.RATE)
        self.a_notes = aubio.notes("default", self.CHUNK, self.hop_s,
                                   self.RATE)
        n_filters = 40  # required
        n_coeffs = 13  # I wonder if i made this 1....
        self.a_pvoc = aubio.pvoc(self.CHUNK, self.hop_s)
        self.a_mfcc = aubio.mfcc(self.CHUNK, n_filters, n_coeffs, self.RATE)

        self.last_average = 0
        self.colors = None
        self.range_counter = 0
        self.all_notes = set()
        self.start_stream()
Esempio n. 13
0
    def analyze_file(self, filepath, samplerate=0):
        win_s = 512  # fft size
        hop_s = 256  # hop size

        s = source(filepath, samplerate, hop_s)
        samplerate = s.samplerate

        tolerance = 0.8

        notes_o = notes("default", win_s, hop_s, samplerate)
        total_frames = 0

        results = []
        while True:
            samples, read = s()
            new_note = notes_o(samples)
            if (new_note[0] != 0):
                note_str = ' '.join(["%.2f" % i for i in new_note])
                results.append([total_frames, np.copy(new_note)])
            total_frames += read
            if read < hop_s: break
        return results
Esempio n. 14
0
    def analyze_file(self, filepath, samplerate=0):
        win_s = 512 # fft size
        hop_s = 256 # hop size

        s = source(filepath, samplerate, hop_s)
        samplerate = s.samplerate

        tolerance = 0.8

        notes_o = notes("default", win_s, hop_s, samplerate)
        total_frames = 0

        results = []
        while True:
            samples, read = s()
            new_note = notes_o(samples)
            if (new_note[0] != 0):
                note_str = ' '.join(["%.2f" % i for i in new_note])
                results.append( [total_frames, np.copy(new_note)] )
            total_frames += read
            if read < hop_s: break
        return results
Esempio n. 15
0
 def extractNotesAndBeats(self, audioFilePath):
     # Format everything for analysis
     aubioSource = source(audioFilePath)
     sampleRate = aubioSource.samplerate
     hopSize = aubioSource.hop_size
     notesObject = notes()
     tempoObject = tempo()
     framesRead = 0
     noteCountDict = {}
     notesList = []
     totalBeats = 0
     audioSamples, audioRead = aubioSource()
     # Loop through song and extract all note data
     # And puts note counts into a dictionary
     while audioRead >= hopSize:  # Run until you run out of audio to read
         audioSamples, audioRead = aubioSource()
         noteFrame = notesObject(audioSamples)
         beatSample = tempoObject(audioSamples)[0]
         # noteFrame formatted as a list, [startNote, volume, endNote]
         # where notes are MIDI values
         startNote, endNote = noteFrame[0], noteFrame[2]
         if startNote != 0:
             # A lot of the note frames are just silence, so check to
             # make sure it isn't 0 before doing anything
             if startNote not in noteCountDict:
                 noteCountDict[startNote] = 0
             if endNote not in noteCountDict:
                 noteCountDict[endNote] = 0
             noteCountDict[startNote] += 1
             noteCountDict[endNote] += 1
             notesList.append(startNote)
             notesList.append(endNote)
         framesRead += audioRead
         if beatSample != 0:
             totalBeats += beatSample
     return (noteCountDict, notesList, totalBeats)
Esempio n. 16
0
    def __init__(self, parent):
        self.stream = None
        self.p = None
        self.parent = parent
        self.dc = None
        self.synth = None

        super().__init__(parent=None, title='Audio Recorder')
        plt.style.use('dark_background')

        #prep input stream for audio
        self.p = pyaudio.PyAudio()
        self.stream = self.p.open(format=pyaudio.paFloat32,
                                  channels=1,
                                  rate=RATE,
                                  input=True,
                                  frames_per_buffer=CHUNK)

        win_s = 4096
        hop_s = CHUNK
        self.notes_o = aubio.notes("default", win_s, hop_s, RATE)

        #prep variables for plotting
        self.xs = []
        self.ys = []

        self.fig = plt.figure()
        self.ax = plt.axes(xlim=(0, 100), ylim=(0, 2000))
        self.line, = self.ax.plot([], [])
        self.line.set_data(self.xs, self.ys)

        # scrape midi number to note conversion data online
        self.miditonote = {}
        url = "https://www.inspiredacoustics.com/en/MIDI_note_numbers_and_center_frequencies"
        page = get(url, timeout=5)
        soup = BeautifulSoup(page.content, 'html.parser')
        table = soup.find('table')
        rows = table.find_all('tr')

        for row in rows:
            col = row.find_all('td')
            if (len(col) > 0):
                if col[0].text.isdigit():
                    midinumber = int(col[0].text)
                    self.miditonote[midinumber] = col[3].text

        #prep GUI
        self.panel = wx.Panel(self, size=(780, 480))
        self.canvas = FigureCanvas(self.panel, -1, self.fig)

        #sizer for graph
        self.graphsizer = wx.BoxSizer(wx.VERTICAL)
        self.graphsizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.GROW)

        #sizer for note and audio
        self.audiosizer = wx.BoxSizer(wx.VERTICAL)
        self.notetext = wx.StaticText(self.panel, style=wx.ALIGN_LEFT)
        self.notetext.SetForegroundColour('blue')
        self.notebox = wx.StaticBox(self.panel, size=(150, 50))
        self.noteboxsizer = wx.StaticBoxSizer(self.notebox, wx.VERTICAL)
        self.noteboxsizer.Add(self.notetext, 0, wx.ALIGN_LEFT)
        self.audiobutton = wx.Button(self.panel, -1, "Pause")
        self.synthbutton = wx.Button(self.panel, -1, "Open Synthesizer")
        self.audiosizer.AddSpacer(10)
        self.audiosizer.Add(self.audiobutton,
                            0,
                            wx.ALIGN_CENTER | wx.ALL,
                            border=5)
        self.audiosizer.AddSpacer(10)
        self.audiosizer.Add(self.noteboxsizer,
                            0,
                            wx.ALIGN_CENTER | wx.ALL,
                            border=5)
        self.audiosizer.AddSpacer(280)
        self.audiosizer.Add(self.synthbutton,
                            0,
                            wx.ALIGN_CENTER | wx.ALL,
                            border=5)

        self.audiobutton.Bind(wx.EVT_BUTTON, self.pause_play)
        self.Bind(wx.EVT_PAINT, self.OnPaint)
        self.synthbutton.Bind(wx.EVT_BUTTON, self.open_synth)

        #add both components to 1 sizer
        self.mainsizer = wx.BoxSizer(wx.HORIZONTAL)
        self.mainsizer.Add(self.graphsizer, 1)
        self.mainsizer.Add(self.audiosizer, 1, wx.RIGHT)
        self.panel.SetSizer(self.mainsizer)
        self.Fit()
        self.panel.Layout()

        plt.ion()
        #prep timer
        self.timercount = 0
        self.timer = wx.Timer(self)
        self.Bind(wx.EVT_TIMER, self.record, self.timer)
        self.Bind(wx.EVT_CLOSE, self.OnClose)
        self.timer.Start(100)
Esempio n. 17
0
 def __init__(self, args):
     self.parse_options(args, self.valid_opts)
     self.notes = aubio.notes(**self.options)
     super(process_notes, self).__init__(args)
Esempio n. 18
0
def ExtractMelody(filename, outputfilename):
    def midi2Hz(d):
        f = 2**((d - 69) / 12)
        f *= 440
        return f

    downsample = 1
    samplerate = 44100 // downsample

    win_s = 512 // downsample  # fft size
    hop_s = 256 // downsample  # hop size

    s = source(filename, samplerate, hop_s)
    samplerate = s.samplerate

    wavef = wave.open(outputfilename, 'w')
    wavef.setnchannels(1)  # mono
    wavef.setsampwidth(2)
    wavef.setframerate(samplerate)

    notes_o = notes("default", win_s, hop_s, samplerate)
    notes_o.set_silence(-40)

    print("%8s" % "time", "[ start", "vel", "last ]")

    samenotecounter = 1
    lastnote = 0
    lastvelocity = 0
    volumeunit = 32767 / 127
    # total number of frames read
    total_frames = 0

    while True:

        samples, read = s()
        new_note = notes_o(samples)

        if (new_note[0] != 0):

            for i in range(int(hop_s) * samenotecounter):
                value = int(volumeunit * lastvelocity * math.sin(
                    2 * lastnote * math.pi * float(i) / float(samplerate)))
                data = struct.pack('<h', value)
                wavef.writeframesraw(data)

            samenotecounter = 1
            lastnote = midi2Hz(new_note[0])
            lastvelocity = new_note[1]

            print("%.6f" % (total_frames / float(samplerate)), new_note[0],
                  new_note[1])

        else:

            samenotecounter += 1

            total_frames += read

        if read < hop_s:

            for i in range(int(hop_s) * samenotecounter):
                value = int(volumeunit * lastvelocity * math.sin(
                    2 * lastnote * math.pi * float(i) / float(samplerate)))
                value = value % 32766
                data = struct.pack('<h', value)
                wavef.writeframesraw(data)

            break

    wavef.close()
Esempio n. 19
0
from aubio import source, notes, midi2note

downsample = 1
hop_size = 256 // downsample
samplerate = 0

s = source("pinda.wav", samplerate, hop_size)
samplerate = s.samplerate

win_s = 512 // downsample  # fft size
print(f"{samplerate=}")
print(f"{win_s=}")
print(f"{hop_size=}")

notes_ = notes("default", win_s, hop_size, samplerate)

print("%8s" % "time", "[ start", "vel", "last ]")
total_frames = 0
while True:
    samples, read = s()
    new_note = notes_(samples)
    if (new_note[0] != 0):
        note_str = ' '.join(["%.2f" % i for i in new_note])
        print("%.6f" % (total_frames / float(samplerate)), note_str, new_note)
        print("%.6f" % (total_frames / float(samplerate)), new_note[0],
              midi2note(int(new_note[1])))
        # print(new_note)

    total_frames += read
    if read < hop_size:
        break
Esempio n. 20
0
    def compute_key(self,
                    in_file,
                    out_file,
                    samplerate=44100,
                    win_size=512,
                    hop_size=256):
        """Compute key for a given song.

        :param str in_file: input song, all formats supported by ffmpeg are allowed
        :param str out_file: output temporary midi file that will be deleted as soon as the analysis is over
        :param int samplerate: sampling rate
        :param int win_size: window size
        :param int hop_size: hop size
        """
        downsample = 1
        samplerate = samplerate // downsample
        win_s = win_size // downsample  # fft size
        hop_s = hop_size // downsample  # hop size

        s = aubio.source(in_file, samplerate, hop_s)
        samplerate = s.samplerate
        notes_o = aubio.notes('default', win_s, hop_s, samplerate)
        # convert the track to midi with everything on a single track
        mid = MidiFile()
        track = MidiTrack()
        ticks_per_beat = mid.ticks_per_beat  # default: 480
        bpm = 120  # default midi tempo
        tempo = bpm2tempo(bpm)
        track.append(MetaMessage('set_tempo', tempo=tempo))
        track.append(MetaMessage('time_signature', numerator=4, denominator=4))
        mid.tracks.append(track)
        last_time = 0
        total_frames = 0
        log.info('%s: Computing key...', in_file)
        try:
            while True:
                samples, read = s()
                new_note = notes_o(samples)
                if new_note[0] != 0:
                    delta = self.frames2tick(total_frames,
                                             ticks_per_beat,
                                             tempo,
                                             samplerate=samplerate) - last_time
                    if new_note[2] > 0:
                        track.append(
                            Message('note_off',
                                    note=int(new_note[2]),
                                    velocity=127,
                                    time=delta))
                    track.append(
                        Message('note_on',
                                note=int(new_note[0]),
                                velocity=int(new_note[1]),
                                time=delta))
                    last_time = self.frames2tick(total_frames, ticks_per_beat,
                                                 tempo)
                total_frames += read
                if read < hop_s:
                    break
        except:
            return {}
        mid.save(out_file)
        score = music21.converter.parse(out_file)
        key = score.analyze('key')
        key.tonic.name = key.tonic.name.replace('-', '')
        key.camelot = self.key_to_camelot(key)
        return key
Esempio n. 21
0
filename = sys.argv[1]
midioutput = sys.argv[2]

downsample = 1
samplerate = 44100 // downsample
if len( sys.argv ) > 3: samplerate = int(sys.argv[3])

win_s = 512 // downsample # fft size
hop_s = 256 // downsample # hop size

s = source(filename, samplerate, hop_s)
samplerate = s.samplerate

tolerance = 0.8

notes_o = notes("default", win_s, hop_s, samplerate)

print("%8s" % "time","[ start","vel","last ]")

# create a midi file
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)

ticks_per_beat = mid.ticks_per_beat # default: 480
bpm = 120 # default midi tempo

tempo = bpm2tempo(bpm)
track.append(MetaMessage('set_tempo', tempo=tempo))
track.append(MetaMessage('time_signature', numerator=4, denominator=4))
Esempio n. 22
0
 def test_members(self):
     o = notes()
     assert_equal([o.buf_size, o.hop_size, o.method, o.samplerate],
                  [1024, 512, 'default', 44100])
Esempio n. 23
0
 def test_members(self):
     o = notes()
     assert_equal ([o.buf_size, o.hop_size, o.method, o.samplerate],
         [1024,512,'default',44100])
Esempio n. 24
0
 def setUp(self):
     self.o = notes(samplerate = self.samplerate)
Esempio n. 25
0
 def setUp(self):
     self.o = notes(samplerate=self.samplerate)
 def setup_analyzer(self):
     note_analyzer = notes(method="default", buf_size=win_s, hop_size=hop_s, samplerate=samplerate)
     note_analyzer.set_silence(-50)
     return note_analyzer
Esempio n. 27
0
File: key.py Progetto: hejmsdz/oiga
 def __init__(self, path):
     self.src = aubio.source(path, channels=1)
     self.find_note = aubio.notes('default', samplerate=self.src.samplerate)
Esempio n. 28
0
def getNotes(filename):

	'''Downsampling factor, makes audio signal smaller by lowering 
		sample rate. Can be changed to downsample by that factor'''
	downsample = 1

	#Sampling rate of file
	samplerate = 44100 // downsample

	'''Fast Fourier Transform: Method of applying several fourier
	transforms to a amplitude vs time graph to convert it into a 
	amp vs freq graph, also known as a spectra plot.
	
	FFT Size affects resolution of end spectra. 
	- Number of lines = 1/2 of FFT Size.
	- Freq resolution of each spectral line: sample rate / FFT Size
	- Larger the size, greater the resolution, but more time needed
	
	Essentially window size of overlapping windows, in terms 
	of samples
	Done in blocks of 2 so the size must be a power of two.
	512 is Default
	'''

	fftSize = 512 // downsample

	'''Hop Size: Overlap factor, number of samples between each window,
	determines overlap.

	How many samples are read at each consecutive call

	= FFT size / overlap factor (default is 2)

	I/O delay = window size - hop size
	
	'''
	hopSize = 256
	
	#Get source class from filename, get the samplerate property
	s = source(filename, samplerate, hopSize)
	samplerate = s.samplerate

	#Initialize notes class with default method, other params
	notes_o = notes("default", fftSize, hopSize, samplerate)

	#Header
	#print("%8s" % "time","[ start","vel","last ]")

	# total number of frames read
	total_frames = 0

	#List to hold final notes
	notesList = []

	#Get samples from source object, run notes functions on them
	while True:
		#Get current sample and number of samples read
		samples, read = s()

		#Get the notes vector using the notes object
		new_note = notes_o(samples)

		#If the notes vector is not blank store + print the note vector
		if (new_note[0] != 0):
			noteArray = new_note
			print(noteArray)
			notesList.append(aubio.midi2note(int(new_note[0])))
		
		total_frames += read

		'''As the source is called repeatedly, towards the end of the
			stream read will become less than hop size. '''
		if read < hopSize:
			return notesList
Esempio n. 29
0
filename = sys.argv[1]

downsample = 1
samplerate = 44100 // downsample
if len(sys.argv) > 2: samplerate = int(sys.argv[2])

win_s = 1024 // downsample  # fft size
hop_s = 512 // downsample  # hop size

s = source(filename, samplerate, hop_s)
samplerate = s.samplerate

tolerance = 0.8

notes_o = notes("default", win_s, hop_s, samplerate)

# cordinate
cordinate_samples = [(352, 332), (224, 232)]
cordinate_samples_max = len(cordinate_samples) - 1
cordinate_i = 0


def cordinate_increase():
    global cordinate_i
    if cordinate_i == cordinate_samples_max:
        cordinate_i = 0
    else:
        cordinate_i += 1

Esempio n. 30
0
def run_queue_in(listener):
    p = pyaudio.PyAudio()
    # open stream
    pyaudio_format = pyaudio.paFloat32
    n_channels = 1
    stream = p.open(format=pyaudio_format,
                    channels=n_channels,
                    rate=sample_rate,
                    input=True,
                    frames_per_buffer=buffer_size)
    '''
    s = aubio.source('/home/nikolay/pahanini.mp3', sample_rate, buffer_size)
    '''

    notes_o = notes("default", win_s, hop_s, sample_rate)
    onset_o = onset("default", win_s, hop_s, sample_rate)
    temp_o = aubio.tempo("specdiff", win_s, hop_s, sample_rate)
    last_onset = 0
    beats = []
    last_beat = 0
    count_beat = 0
    last_downbeat = 0
    bar_start = False
    # the stream is read until you call stop
    prev_time = 0
    start_time = time.monotonic()
    while (listener.running.value):
        # read data from audio input
        audiobuffer = stream.read(buffer_size, exception_on_overflow=False)
        samples = np.fromstring(audiobuffer, dtype=np.float32)
        # samples = audiobuffer

        if (onset_o(samples)):
            last_onset = onset_o.get_last_ms()
        if (temp_o(samples)):
            tmp = temp_o.get_last_ms()
            beats.append(tmp - last_beat)
            count_beat = (count_beat + 1) % 4
            last_beat = tmp
            if (count_beat == 0):
                last_downbeat = last_beat
                bar_start = True
        new_note = notes_o(samples)
        if (new_note[0] != 0):
            if (len(beats) != 0):
                listener.set_tempo(60 * 1000.0 / np.median(beats))
            chord = Chord([Note(int(new_note[0]))],
                          from_ms_to_our_time(last_onset - prev_time,
                                              listener.tempo.value),
                          int(new_note[1]), bar_start)
            # print(bar_start, listener.tempo.value, listener.deadline.value, time.monotonic())
            bar_start = False
            listener.queue_in.put(chord)
            KOLYA_time = start_time + (
                last_downbeat +
                (4 - count_beat) * 60 * 1000.0 / listener.tempo.value) / 1000.0
            print(bar_start, listener.tempo.value, listener.deadline.value,
                  time.monotonic(), KOLYA_time)
            # print(count_beat, time.monotonic(), KOLYA_time, listener.deadline.value)
            if (count_beat != 0):
                listener.set_deadline(KOLYA_time)
            prev_time = last_onset