예제 #1
0
파일: voodoo.py 프로젝트: PseudoSky/voodoo
def sample(samples):
    


    pitch = pitch_o(samples)[0]
    is_beat= T.tempo(samples)
    # print(pitch)
    if is_beat:
        # print "\nBPMS",is_beat,"\n"
        tb=T.tempo.get_last_s()
        T.beats.append(tb)
        if(len(T.beats)>T.median_win_s):
            bpms = 60./ diff(T.beats)
            T.bpms = median(bpms[-T.median_win_s:])
            print "\nBPMS",bpms,"\n"
        
    pitch = int(round(pitch))
    confidence = pitch_o.get_confidence()
    print confidence
    if confidence < 0.8: pitch = 0.
    if confidence > 0.8:
        n=midi2note(min(pitch,127))
        print confidence,pitch,miditofreq(min(pitch,127)),midi2note(min(pitch,127))
        # st+=' '+midi2note(int(pitch))
        if(n[-1]!='1'):noter(n)
예제 #2
0
 def drawInstructions(self, canvas):
     keysPlayed = list(aubio.midi2note(key.midi) for key in self.keysPlayed)
     keysPlayed = ', '.join(keysPlayed)
     canvas.create_text(50, 50, text='You played: ' + keysPlayed,
                        font='Avenir 20 bold', fill='white', anchor='w')
     keysGenerated = list(aubio.midi2note(key.midi) for key in self.keysGenerated)
     keysGenerated = ', '.join(keysGenerated)
     canvas.create_text(50, 100, text='Keys generated: ' + keysGenerated,
                        font='Avenir 20 bold', fill='white', anchor='w')
     canvas.create_text(self.width - 50, 50, text='Time elapsed: %0.2f seconds' % self.timeElapsed,
                        font='Avenir 20 bold', fill='white', anchor='e')
예제 #3
0
 def drawKeyNotations(self, canvas):
     for midi in self.midiToPositions:
         x0, x1 = self.midiToPositions[midi]
         x = (x0 + x1) / 2
         note = aubio.midi2note(midi)
         canvas.create_text(x, self.height - 120,
                            text=note, font='Avenir 8', fill='white')
예제 #4
0
def getNoteFromWavFile(filename, samplerate = 44100):
    from aubio import source, pitch, midi2note
    from numpy import mean, array, ma

    try:
        downsample = 1
        win_s = 4096 // downsample # fft size
        hop_s = 512  // downsample # hop size

        s = source(filename, samplerate, hop_s)
        samplerate = s.samplerate

        tolerance = 0.8

        pitch_o = pitch("yin", win_s, hop_s, samplerate)
        pitch_o.set_unit("midi")
        pitch_o.set_tolerance(tolerance)

        pitches = []
        confidences = []

        # total number of frames read
        total_frames = 0
        while True:
            samples, read = s()
            pitch = pitch_o(samples)[0]
            #pitch = int(round(pitch))
            confidence = pitch_o.get_confidence()
            #if confidence < 0.8: pitch = 0.
            #print("%f %f %f" % (total_frames / float(samplerate), pitch, confidence))
            pitches += [pitch]
            confidences += [confidence]
            total_frames += read
            if read < hop_s: break



        #print pitches

        skip = 1

        pitches = array(pitches[skip:])
        confidences = array(confidences[skip:])

        # plot cleaned up pitches
        cleaned_pitches = pitches
        #cleaned_pitches = ma.masked_where(cleaned_pitches < 0, cleaned_pitches)
        #cleaned_pitches = ma.masked_where(cleaned_pitches > 120, cleaned_pitches)
        cleaned_pitches = ma.masked_where(confidences < tolerance, cleaned_pitches)
        cleaned_pitches = ma.masked_where(cleaned_pitches==0, cleaned_pitches)
        note = int(round(mean(cleaned_pitches.compressed())))

        print(note, midi2note(note))

        return note
    except RuntimeError as err:
        print ("Could not find note from WAV "+ filename)
        print (err)
        return None
예제 #5
0
    def get_pitch(self):
        if path.exists(self.filename) == False:
            raise Exception(f"File Path to {self.filename} does not exist")

        else:
            downsample = 1
            samplerate = 44100 // downsample
            if len(sys.argv) > 2: samplerate = int(sys.argv[2])

            win_s = 4096 // downsample  # fft size
            hop_s = 512 // downsample  # hop size

            s = source(self.filename, samplerate, hop_s)
            samplerate = s.samplerate

            tolerance = 0.8

            pitch_o = pitch("yin", win_s, hop_s, samplerate)
            pitch_o.set_unit("midi")
            pitch_o.set_tolerance(tolerance)

            pitches = []
            confidences = []

            # Total number of frames read
            total_frames = 0
            while True:
                samples, read = s()
                pitch_midi = pitch_o(samples)[0]
                pitch_midi = int(round(pitch_midi))
                confidence = pitch_o.get_confidence()
                if confidence < 0.9: pitch_midi = 0.
                #print("%f %f %f" % (total_frames / float(samplerate), pitch, confidence))
                if len(pitches) == 0 and pitch_midi != 0:
                    pitches.append(pitch_midi)
                elif len(pitches) > 0:
                    if pitch_midi != pitches[-1] and pitch_midi != 0:
                        pitches.append(pitch_midi)
                else:
                    pass

                #print(pitches)
                confidences += [confidence]
                total_frames += read
                if read < hop_s: break

            if 0: sys.exit(0)
            notes = []
            for midi in pitches:
                note = midi2note(midi)
                notes.append(note.strip("0123456789"))

            print(notes)
            self.pitch_text = str(notes)
            self.q.put(notes)
            raise Exception("Thread Terminated")
예제 #6
0
 def drawInstructions(self, canvas):
     keysToPlay = list(aubio.midi2note(midi) for midi in self.keysToPlay)
     keysToPlay = ', '.join(keysToPlay)
     canvas.create_text(50, 50, text='Keys to play: ' + keysToPlay,
                        font='Avenir 20 bold', fill='white', anchor='w')
     
     if (self.keyboard.wrongKeyPressed == 0):
         wrongKeyPlayed = ''
     else:
         wrongKeyPlayed = aubio.midi2note(self.keyboard.wrongKeyPressed)
     keyPlayed = f'You played: {wrongKeyPlayed}'
     canvas.create_text(50, 100, text=keyPlayed,
                        font='Avenir 20 bold', fill='white', anchor='w')
     keysBeingPlayed = list(aubio.midi2note(midi) for midi in self.keysBeingPlayed)
     keysBeingPlayed = ', '.join(keysBeingPlayed)
     canvas.create_text(50, 150, text='Keys being played: ' + keysBeingPlayed,
                        font='Avenir 20 bold', fill='white', anchor='w')
     canvas.create_text(self.width - 50, 50, text=f'Nonstop mode: {self.nonStop}',
                        font='Avenir 20 bold', fill='white', anchor='e')
     canvas.create_text(self.width - 50, 100, text=f'Key#: {self.numberOfKeysPlayed}',
                        font='Avenir 20 bold', fill='white', anchor='e')
 def callback(in_data, frame_count, time_info, status_flags):
     #Callback function for  pyAudio stream
     pitchDetectionObject = pitchDetectionInit()
     # Convert pyaudio data into array readable by aubio.
     # *** Not copied exactly, but made with reference to:
     # https://github.com/aubio/aubio/tree/master/python/demos/demo_pyaudio.py
     dataAubio = numpy.frombuffer(in_data, dtype=numpy.float32)
     # Get pitch as midi val by passing in data (returns list of one item)
     pitchVal = pitchDetectionObject(dataAubio)[0]
     note = (aubio.midi2note(int(numpy.around(pitchVal))))
     # Convert Midi val to note
     checkNote(note, data)
     return (in_data, pyaudio.paContinue)
예제 #8
0
 def getKeyPlayed(self, targetTime):
     keysPlayed = dict()
     startTime = time.time()
     while True:
         timeElapsed = time.time() - startTime
         # if the player don't find the key in five seconds, return.
         if (timeElapsed > 5): return 0
         else:
             pitch = self.readPitch()
             note = aubio.midi2note(pitch)
             timesDetected = keysPlayed.get(pitch, 0) + 1
             # return the detected note
             if (pitch != 0 and timesDetected == targetTime):
                 return pitch
             keysPlayed[pitch] = timesDetected
예제 #9
0
def convert_wav_to_notes(input_file: str) -> List[Tuple[float, str]]:
    """Convert simple single melody WAV file to notes"""
    melody = source(input_file)
    melody_note = notes(samplerate=melody.samplerate)
    notes_ = []
    total_frames = 0
    while True:
        samples, read = melody()
        note = int(melody_note(samples)[0])
        if note:
            time = total_frames / float(melody.samplerate)
            notes_.append((time, midi2note(note)))
        total_frames += read
        if read < melody_note.hop_size:
            break
    return notes_
예제 #10
0
 def getLengthOfKeyPlayed(self, target, period):
     startTime = time.time()
     keyStartTime = None
     keyEndTime = None
     while True:
         timeElapsed = time.time() - startTime
         # if the player don't find the key in five seconds, return.
         if (timeElapsed > period): return 0
         else:
             pitch = self.readPitch()
             note = aubio.midi2note(pitch)
             if (pitch == target and keyStartTime == None):
                 keyStartTime = time.time()
             elif (keyStartTime != None and pitch != target):
                 keyEndTime = time.time()
                 return keyEndTime - keyStartTime
예제 #11
0
 def getKeyPlayed(self, targetTime, period):
     previousKey = 0
     keyPressTime = 0
     startTime = time.time()
     while True:
         timeElapsed = time.time() - startTime
         # if the player don't find the key in five seconds, return.
         if (timeElapsed > period): return 0
         else:
             pitch = self.readPitch()
             note = aubio.midi2note(pitch)
             if (pitch != 0 and pitch == previousKey):
                 keyPressTime += 1
                 if (keyPressTime == targetTime):
                     return pitch
             else:
                 keyPressTime = 0
                 previousKey = pitch
예제 #12
0
 def checkKeyPlayed(self, target, targetTime):
     targetValue = 0
     startTime = time.time()
     while True:
         timeElapsed = time.time() - startTime
         # if the player don't find the key in five seconds, return.
         if (timeElapsed > 5): return 0
         else:
             pitch = self.readPitch()
             note = aubio.midi2note(pitch)
             #storage.append(pitch)
             #print("{}".format(note))
             # return the note if the correct note is played
             if (pitch in target):
                 targetValue += 1
                 if (targetValue == targetTime):
                     #print(f'Target note {pitch} detected!')
                     return pitch
예제 #13
0
    def process_octave(self, ret):
        midi = self.a_notes(ret)[0]
        if 0 < midi <= 127:
            note_octave = aubio.midi2note(int(midi))
            if note_octave != "C-1":
                note = note_octave[:-1]
                self.all_notes.add(note)
                octave = int(note_octave[-1])
                ranged_octave = min(max(octave, 1), 5)
                self.redis.lpush("beat_queue", "noteoctave:{},{}".format(note, ranged_octave))
                self.redis.publish("beats", "noteoctave:{},{}".format(note, ranged_octave))

                if octave < 1 or octave > 5:
                    print("OUTSIDE EXPECTED RANGE\t\t{}".format(note_octave))
                    print("# in expected range prior:{}".format(self.range_counter))
                    print(self.all_notes)
                    self.range_counter = 0
                else:
                    self.range_counter+=1
예제 #14
0
 def test_midi2note_known_values(self):
     " known values are correctly converted "
     for midi, note in list_of_known_midis:
         self.assertEqual ( midi2note(midi), note )
예제 #15
0
 def test_midi2note_known_values(self, midi, note):
     " known values are correctly converted "
     self.assertEqual ( midi2note(midi), note )
예제 #16
0
def getNotes(filename):

	'''Downsampling factor, makes audio signal smaller by lowering 
		sample rate. Can be changed to downsample by that factor'''
	downsample = 1

	#Sampling rate of file
	samplerate = 44100 // downsample

	'''Fast Fourier Transform: Method of applying several fourier
	transforms to a amplitude vs time graph to convert it into a 
	amp vs freq graph, also known as a spectra plot.
	
	FFT Size affects resolution of end spectra. 
	- Number of lines = 1/2 of FFT Size.
	- Freq resolution of each spectral line: sample rate / FFT Size
	- Larger the size, greater the resolution, but more time needed
	
	Essentially window size of overlapping windows, in terms 
	of samples
	Done in blocks of 2 so the size must be a power of two.
	512 is Default
	'''

	fftSize = 512 // downsample

	'''Hop Size: Overlap factor, number of samples between each window,
	determines overlap.

	How many samples are read at each consecutive call

	= FFT size / overlap factor (default is 2)

	I/O delay = window size - hop size
	
	'''
	hopSize = 256
	
	#Get source class from filename, get the samplerate property
	s = source(filename, samplerate, hopSize)
	samplerate = s.samplerate

	#Initialize notes class with default method, other params
	notes_o = notes("default", fftSize, hopSize, samplerate)

	#Header
	#print("%8s" % "time","[ start","vel","last ]")

	# total number of frames read
	total_frames = 0

	#List to hold final notes
	notesList = []

	#Get samples from source object, run notes functions on them
	while True:
		#Get current sample and number of samples read
		samples, read = s()

		#Get the notes vector using the notes object
		new_note = notes_o(samples)

		#If the notes vector is not blank store + print the note vector
		if (new_note[0] != 0):
			noteArray = new_note
			print(noteArray)
			notesList.append(aubio.midi2note(int(new_note[0])))
		
		total_frames += read

		'''As the source is called repeatedly, towards the end of the
			stream read will become less than hop size. '''
		if read < hopSize:
			return notesList
예제 #17
0
 def test_midi2note_known_values(self, midi, note):
     " known values are correctly converted "
     assert midi2note(midi) == (note)
예제 #18
0
 def test_midi2note_known_values(self, midi, note):
     " known values are correctly converted "
     self.assertEqual(midi2note(midi), note)
예제 #19
0
 def test_midi2note_known_values(self, midi, note):
     " known values are correctly converted "
     assert midi2note(midi) == (note)
예제 #20
0
downsample = 1
hop_size = 256 // downsample
samplerate = 0

s = source("pinda.wav", samplerate, hop_size)
samplerate = s.samplerate

win_s = 512 // downsample  # fft size
print(f"{samplerate=}")
print(f"{win_s=}")
print(f"{hop_size=}")

notes_ = notes("default", win_s, hop_size, samplerate)

print("%8s" % "time", "[ start", "vel", "last ]")
total_frames = 0
while True:
    samples, read = s()
    new_note = notes_(samples)
    if (new_note[0] != 0):
        note_str = ' '.join(["%.2f" % i for i in new_note])
        print("%.6f" % (total_frames / float(samplerate)), note_str, new_note)
        print("%.6f" % (total_frames / float(samplerate)), new_note[0],
              midi2note(int(new_note[1])))
        # print(new_note)

    total_frames += read
    if read < hop_size:
        break
예제 #21
0
파일: pitch.py 프로젝트: NickCul30/Tabber
pitches = []
confidences = []

notes = []
lastNote = ""

# total number of frames read
total_frames = 0
while True:
    samples, read = s()
    pitch = pitch_o(samples)[0]
    #pitch = int(round(pitch))
    confidence = pitch_o.get_confidence()
    #if confidence < 0.8: pitch = 0.
    if pitch <= 127 and pitch > 0 and confidence > .9:
        note = midi2note(int(pitch))
        print("%f %s %f" %
              (total_frames / float(samplerate), note, confidence))

        if lastNote != note:
            notes += [note]

        lastNote = note
    #else:
    #   print("%f %s %f" % (total_frames / float(samplerate), pitch, confidence))

    pitches += [pitch]
    confidences += [confidence]
    total_frames += read
    if read < hop_s: break
예제 #22
0
파일: index.py 프로젝트: PseudoSky/voodoo
# For posting to ly-server
# st='{"commands":[{"command":"musicxml"},{"command":"mode"}],"data":"relative c {'


st='relative c {'
while True:
    samples, read = s()
    print samples[0], len(samples)
    pitch = pitch_o(samples)[0]
    # print(pitch)
    pitch = int(round(pitch))
    confidence = pitch_o.get_confidence()
    if confidence < 0.8: pitch = 0.
    if confidence > 0.8:
        print len(pitches) * hop_s,miditofreq(pitch),midi2note(int(pitch))
        st+=' '+midi2note(int(pitch))
        noter(midi2note(int(pitch)))

    
    #print "%f %f %f" % (total_frames / float(samplerate), pitch, confidence)
    
    pitches += [pitch]
    confidences += [confidence]
    total_frames += read
    if read < hop_s: break
# st+=' }"}'
st+=' }'
st=st.replace('#','').lower()
print st
# post(st)
예제 #23
0
 def __repr__(self):
     note = aubio.midi2note(self.midi)
     return note
예제 #24
0
 def test_midi2note_known_values(self):
     " known values are correctly converted "
     for midi, note in list_of_known_midis:
         self.assertEqual(midi2note(midi), note)
예제 #25
0
expected_taamim = args.taam

expected_taamim = getTrop(1, 1, 1)

for taam in expected_taamim:
    if taam not in trop:
        print("Taam '%s' not found" % taam)
        sys.exit(1)

given = extract_notes_from_file(0, 'test.m4a')

offset = given[0] - trop_notes[expected_taamim[0]][0]
given_transpose = [i - offset for i in given]

expected_notes, expected_timing, pronunc = get_notes(expected_taamim)

transposed_expected = [i + 70 for i in expected_notes]

# play_taam(transposed_expected)

# print(expected_timing)

# changed_times = grad_descent(given_transpose, np.linspace(0, 1, len(given_transpose)), expected_notes, expected_timing)
changed_times = np.linspace(0, 1, len(given_transpose))

# print(changed_times)
# print(np.linspace(0, 1, len(given)))

plot_taam(expected_notes, expected_timing, given_transpose, changed_times,
          midi2note(given[0]), pronunc)