Ejemplo n.º 1
0
def runTranscribe(show=True, plot=True, useMic=True,
                  seconds=20.0, useScale=None, saveFile=True):
    '''
    runs all the methods to record from audio for `seconds` length (default 10.0)
    and transcribe the resulting melody returning a music21.Score object
    
    if `show` is True, show the stream.  
    
    if `plot` is True then a Tk graph of the frequencies will be displayed.
    
    if `useMic` is True then use the microphone.  If False it will load the file of `saveFile`
    or the default temp file to run transcriptions from.
        
    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    if `saveFile` is False then then the recorded audio is saved to disk.  If
    set to `True` then `environLocal.getRootTempDir() + os.path.sep + 'ex.wav'` is
    used as the filename.  If set to anything else then it will use that as the
    filename. 
    '''
    from music21 import audioSearch as audioSearchBase

    if useScale is None:
        useScale = scale.ChromaticScale('C4')
    #beginning - recording or not
    if saveFile != False:
        if saveFile == True:
            WAVE_FILENAME = environLocal.getRootTempDir() + os.path.sep + 'ex.wav'
        else:
            WAVE_FILENAME = saveFile
    else:
        WAVE_FILENAME = False
    
    # the rest of the score
    if useMic is True:
        freqFromAQList = audioSearchBase.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=WAVE_FILENAME)
    else:
        freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=WAVE_FILENAME)
        
    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq)
    (detectedPitchObjects, listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale)
    (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(notesList, durationList, removeRestsAtBeginning=True)    

    if show == True:
        myScore.show()        
    
    if plot == True:
        try:
            import matplotlib.pyplot # for find
        except ImportError:
            raise audioSearchBase.AudioSearchException("Cannot plot without matplotlib installed.")
        matplotlib.pyplot.plot(listplot)
        matplotlib.pyplot.show()
    environLocal.printDebug("* END")    
        
    return myScore
Ejemplo n.º 2
0
def runTranscribe(show=True, plot=True, useMic=True,
                  seconds=20.0, useScale=None, saveFile=True):
    '''
    runs all the methods to record from audio for `seconds` length (default 10.0)
    and transcribe the resulting melody returning a music21.Score object
    
    if `show` is True, show the stream.  
    
    if `plot` is True then a Tk graph of the frequencies will be displayed.
    
    if `useMic` is True then use the microphone.  If False it will load the file of `saveFile`
    or the default temp file to run transcriptions from.
        
    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    if `saveFile` is False then then the recorded audio is saved to disk.  If
    set to `True` then `environLocal.getRootTempDir() + os.path.sep + 'ex.wav'` is
    used as the filename.  If set to anything else then it will use that as the
    filename. 
    '''
    from music21 import audioSearch as audioSearchBase

    if useScale is None:
        useScale = scale.ChromaticScale('C4')
    #beginning - recording or not
    if saveFile != False:
        if saveFile == True:
            WAVE_FILENAME = environLocal.getRootTempDir() + os.path.sep + 'ex.wav'
        else:
            WAVE_FILENAME = saveFile
    else:
        WAVE_FILENAME = False
    
    # the rest of the score
    if useMic is True:
        freqFromAQList = audioSearchBase.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=WAVE_FILENAME)
    else:
        freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=WAVE_FILENAME)
        
    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq)
    (detectedPitchObjects, listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale)
    (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(notesList, durationList, removeRestsAtBeginning=True)    

    if show == True:
        myScore.show()        
    
    if plot == True:
        try:
            import matplotlib.pyplot # for find
        except ImportError:
            raise audioSearchBase.AudioSearchException("Cannot plot without matplotlib installed.")
        matplotlib.pyplot.plot(listplot)
        matplotlib.pyplot.show()
    environLocal.printDebug("* END")    
        
    return myScore
Ejemplo n.º 3
0
def monophonicStreamFromFile(fileName, useScale=None):
    '''
    Reads in a .wav file and returns a stream representing the transcribed, monophonic audio.

    `fileName` should be the complete path to a file on the disk.

    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    We demonstrate with an audio file beginning with an ascending scale.

    >>> import os #_DOCS_HIDE
    >>> taw = 'test_audio.wav' #_DOCS_HIDE
    >>> waveFile = str(common.getSourceFilePath() / 'audioSearch' / taw) #_DOCS_HIDE
    >>> #_DOCS_SHOW waveFile = 'test_audio.wav'
    >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile)
    >>> p
    <music21.stream.Part ...>
    >>> p.show('text')
    {0.0} <music21.note.Note C>
    {0.25} <music21.note.Note C>
    {0.75} <music21.note.Note D>
    {1.75} <music21.note.Note E>
    {2.75} <music21.note.Note F>
    {4.25} <music21.note.Note G>
    {5.25} <music21.note.Note A>
    {6.25} <music21.note.Note B>
    {7.25} <music21.note.Note C>
    ...
    '''
    from music21 import audioSearch as audioSearchBase

    freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(
        waveFilename=fileName)

    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(
        freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(
        detectedPitchesFreq)
    (detectedPitchObjects,
     unused_listplot) = audioSearchBase.pitchFrequenciesToObjects(
         detectedPitchesFreq, useScale)
    (notesList, durationList
     ) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(
        notesList, durationList, removeRestsAtBeginning=True)
    return myScore.parts.first()
Ejemplo n.º 4
0
def monophonicStreamFromFile(fileName, useScale=None):
    '''
    Reads in a .wav file and returns a stream representing the transcribed, monophonic audio.

    `fileName` should be the complete path to a file on the disk.

    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    We demonstrate with an audio file beginning with an ascending scale.
    
    >>> import os #_DOCS_HIDE
    >>> taw = 'test_audio.wav' #_DOCS_HIDE
    >>> waveFile = os.path.join(common.getSourceFilePath(), 'audioSearch', taw) #_DOCS_HIDE
    >>> #_DOCS_SHOW waveFile = 'test_audio.wav'
    >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile)
    >>> p
    <music21.stream.Part ...>
    >>> p.show('text')
    {0.0} <music21.note.Note C>
    {0.25} <music21.note.Note C>
    {0.75} <music21.note.Note D>
    {1.75} <music21.note.Note E>
    {2.75} <music21.note.Note F>
    {4.25} <music21.note.Note G>
    {5.25} <music21.note.Note A>
    {6.25} <music21.note.Note B>
    {7.25} <music21.note.Note C>
    ...
    '''
    from music21 import audioSearch as audioSearchBase

    freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=fileName)
        
    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq)
    (detectedPitchObjects, 
        unused_listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale)
    (notesList, 
        durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(
                                        notesList, durationList, removeRestsAtBeginning=True)    
    return myScore.parts[0]
Ejemplo n.º 5
0
    def repeatTranscription(self):
        '''
        First, it records from the microphone (or from a file if is used for
        test). Later, it processes the signal in order to detect the pitches.
        It converts them into music21 objects and compares them with the score.
        It finds the best matching position of the recorded signal with the
        score, and decides, depending on matching accuracy, the last note
        predicted and some other parameters, in which position the recorded
        signal is.

        It returns a value that is False if the song has not finished, or true
        if there has been a problem like some consecutive bad matchings or the
        score has finished.

        >>> from music21.audioSearch import scoreFollower
        >>> scoreNotes = ' '.join(['c4', 'd', 'e', 'f', 'g', 'a', 'b', "c'", 'c', 'e',
        ...     'g', "c'", 'a', 'f', 'd', 'c#', 'd#', 'f#', 'c', 'e', 'g', "c'",
        ...     'a', 'f', 'd', 'c#', 'd#', 'f#'])
        >>> scNotes = converter.parse('tinynotation: 4/4 ' + scoreNotes, makeNotation=False)
        >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
        >>> ScF.useMic = False
        >>> import os #_DOCS_HIDE
        >>> ScF.waveFile = str(common.getSourceFilePath() #_DOCS_HIDE
        ...                 / 'audioSearch' / 'test_audio.wav') #_DOCS_HIDE
        >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav'
        >>> ScF.seconds_recording = 10
        >>> ScF.useScale = scale.ChromaticScale('C4')
        >>> ScF.currentSample = 0
        >>> exitType = ScF.repeatTranscription()
        >>> print(exitType)
        False
        >>> print(ScF.lastNotePosition)
        10

        '''
        from music21 import audioSearch

        # print('WE STAY AT:',)
        # print(self.lastNotePosition, len(self.scoreNotesOnly),)
        # print('en percent %d %%' % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),)
        # print(' this search begins at: ', self.startSearchAtSlot,)
        # print('countdown %d' % self.countdown)
        # print('Measure last note', self.scoreStream[self.lastNotePosition].measureNumber)

        environLocal.printDebug('repeat transcription starting')

        if self.useMic is True:
            freqFromAQList = audioSearch.getFrequenciesFromMicrophone(
                length=self.seconds_recording,
                storeWaveFilename=None,
            )
        else:
            getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile
            freqFromAQList, self.waveFile, self.currentSample = getFreqFunc(
                self.waveFile,
                length=self.seconds_recording,
                startSample=self.currentSample,
            )
            if self.totalFile == 0:
                self.totalFile = self.waveFile.getnframes()

        environLocal.printDebug('got Frequencies from Microphone')

        time_start = time()
        detectedPitchesFreq = audioSearch.detectPitchFrequencies(
            freqFromAQList, self.useScale)
        detectedPitchesFreq = audioSearch.smoothFrequencies(
            detectedPitchesFreq)
        detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects(
            detectedPitchesFreq, self.useScale)
        notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(
            detectedPitchObjects)
        self.silencePeriodDetection(notesList)
        environLocal.printDebug('made it to here...')
        scNotes = self.scoreStream[self.
                                   lastNotePosition:self.lastNotePosition +
                                   len(notesList)]
        # print('1')
        transcribedScore, self.qle = audioSearch.notesAndDurationsToStream(
            notesList,
            durationList,
            scNotes=scNotes,
            qle=self.qle,
        )
        # print('2')
        totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes(
            self.scoreStream,
            transcribedScore,
            self.startSearchAtSlot,
            self.lastNotePosition,
        )
        # print('3')
        self.processing_time = time() - time_start
        environLocal.printDebug('and even to here...')
        if END_OF_SCORE is True:
            exitType = 'endOfScore'  # 'endOfScore'
            return exitType

        # estimate position, or exit if we can't at all...
        exitType = self.updatePosition(prob, totalLengthPeriod, time_start)

        if self.useMic is False:  # reading from the disc (only for TESTS)
            # skip ahead the processing time.
            getFreqFunc = audioSearch.getFrequenciesFromPartialAudioFile
            freqFromAQList, junk, self.currentSample = getFreqFunc(
                self.waveFile,
                length=self.processing_time,
                startSample=self.currentSample,
            )

        if self.lastNotePosition > len(self.scoreNotesOnly):
            # print('finishedPerforming')
            exitType = 'finishedPerforming'
        elif (self.useMic is False and self.currentSample >= self.totalFile):
            # print('waveFileEOF')
            exitType = 'waveFileEOF'

        environLocal.printDebug('about to return -- exitType: %s ' % exitType)
        return exitType
Ejemplo n.º 6
0
    def repeatTranscription(self):
        '''
        First, it records from the microphone (or from a file if is used for
        test). Later, it processes the signal in order to detect the pitches.
        It converts them into music21 objects and compares them with the score.
        It finds the best matching position of the recorded signal with the
        score, and decides, depending on matching accuracy, the last note
        predicted and some other parameters, in which position the recorded
        signal is.

        It returns a value that is False if the song has not finished, or true
        if there has been a problem like some consecutive bad matchings or the
        score has finished.

        >>> from music21.audioSearch import scoreFollower
        >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e",
        ...     "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'",
        ...     "a", "f", "d", "c#", "d#", "f#"])
        >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes, makeNotation=False)
        >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
        >>> ScF.useMic = False
        >>> import os #_DOCS_HIDE
        >>> ScF.waveFile = os.path.join(common.getSourceFilePath(), #_DOCS_HIDE
        ...                 'audioSearch', 'test_audio.wav') #_DOCS_HIDE
        >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav'
        >>> ScF.seconds_recording = 10
        >>> ScF.useScale = scale.ChromaticScale('C4')
        >>> ScF.currentSample = 0
        >>> exitType = ScF.repeatTranscription()
        >>> print(exitType)
        False
        >>> print(ScF.lastNotePosition)
        10

        '''
        from music21 import audioSearch

#        print "WE STAY AT:",
#        print self.lastNotePosition, len(self.scoreNotesOnly),
#        print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),
#        print " this search begins at: ", self.startSearchAtSlot,
#        print "countdown %d" % self.countdown
#        print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber

        environLocal.printDebug("repeat transcription starting")

        if self.useMic is True:
            freqFromAQList = audioSearch.getFrequenciesFromMicrophone(
                length=self.seconds_recording,
                storeWaveFilename=None,
                )
        else:
            freqFromAQList, self.waveFile, self.currentSample = \
                audioSearch.getFrequenciesFromPartialAudioFile(
                    self.waveFile,
                    length=self.seconds_recording,
                    startSample=self.currentSample,
                    )
            if self.totalFile == 0:
                self.totalFile = self.waveFile.getnframes()

        environLocal.printDebug("got Frequencies from Microphone")

        time_start = time()
        detectedPitchesFreq = audioSearch.detectPitchFrequencies(freqFromAQList, self.useScale)
        detectedPitchesFreq = audioSearch.smoothFrequencies(detectedPitchesFreq)
        detectedPitchObjects, unused_listplot = audioSearch.pitchFrequenciesToObjects(
                                                            detectedPitchesFreq, self.useScale)
        notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(
            detectedPitchObjects)
        self.silencePeriodDetection(notesList)
        environLocal.printDebug("made it to here...")
        scNotes = self.scoreStream[self.lastNotePosition:self.lastNotePosition + len(notesList)]
        #print "1"
        transcribedScore, self.qle = audioSearch.notesAndDurationsToStream(
            notesList,
            durationList,
            scNotes=scNotes,
            qle=self.qle,
            )
        #print "2"
        totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = self.matchingNotes(
                                                                            self.scoreStream,
                                                                            transcribedScore,
                                                                            self.startSearchAtSlot,
                                                                            self.lastNotePosition,
                                                                            )
        #print "3"
        self.processing_time = time() - time_start
        environLocal.printDebug("and even to here...")
        if END_OF_SCORE is True:
            exitType = "endOfScore"  # "endOfScore"
            return exitType

        # estimate position, or exit if we can't at all...
        exitType = self.updatePosition(prob, totalLengthPeriod, time_start)

        if self.useMic is False:  # reading from the disc (only for TESTS)
            # skip ahead the processing time.
            freqFromAQList, junk, self.currentSample = \
                audioSearch.getFrequenciesFromPartialAudioFile(
                    self.waveFile,
                    length=self.processing_time,
                    startSample=self.currentSample,
                    )

        if self.lastNotePosition > len(self.scoreNotesOnly):
            #print "finishedPerforming"
            exitType = "finishedPerforming"
        elif (self.useMic is False and self.currentSample >= self.totalFile):
            #print "waveFileEOF"
            exitType = "waveFileEOF"

        environLocal.printDebug("about to return -- exitType: %s " % exitType)
        return exitType
Ejemplo n.º 7
0
    def repeatTranscription(self):
        '''
        First, it records from the microphone (or from a file if is used for
        test). Later, it processes the signal in order to detect the pitches.
        It converts them into music21 objects and compares them with the score.
        It finds the best matching position of the recorded signal with the
        score, and decides, depending on matching accuracy, the last note
        predicted and some other parameters, in which position the recorded
        signal is.

        It returns a value that is False if the song has not finished, or true
        if there has been a problem like some consecutive bad matchings or the
        score has finished.

        >>> from music21 import common, converter
        >>> from music21.audioSearch import scoreFollower
        >>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e",
        ...     "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'",
        ...     "a", "f", "d", "c#", "d#", "f#"])
        >>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes)
        >>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
        >>> ScF.useMic = False
        >>> import os #_DOCS_HIDE
        >>> readPath = os.path.join(common.getSourceFilePath(), 'audioSearch', 'test_audio.wav') #_DOCS_HIDE
        >>> ScF.waveFile = readPath #_DOCS_HIDE
        >>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav'
        >>> ScF.seconds_recording = 10
        >>> ScF.useScale = scale.ChromaticScale('C4')
        >>> ScF.currentSample = 0
        >>> exitType = ScF.repeatTranscription()
        >>> print exitType
        False
        >>> print ScF.lastNotePosition
        10

        '''
        from music21 import audioSearch

        #        print "WE STAY AT:",
        #        print self.lastNotePosition, len(self.scoreNotesOnly),
        #        print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),
        #        print " this search begins at: ", self.startSearchAtSlot,
        #        print "countdown %d" % self.countdown
        #        print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber

        environLocal.printDebug("repeat transcription starting")

        if self.useMic is True:
            freqFromAQList = audioSearch.getFrequenciesFromMicrophone(
                length=self.seconds_recording,
                storeWaveFilename=None,
            )
        else:
            freqFromAQList, self.waveFile, self.currentSample = \
                audioSearch.getFrequenciesFromPartialAudioFile(
                    self.waveFile,
                    length=self.seconds_recording,
                    startSample=self.currentSample,
                    )
            if self.totalFile == 0:
                self.totalFile = self.waveFile.getnframes()

        environLocal.printDebug("got Frequencies from Microphone")

        time_start = time()
        detectedPitchesFreq = audioSearch.detectPitchFrequencies(
            freqFromAQList, self.useScale)
        detectedPitchesFreq = audioSearch.smoothFrequencies(
            detectedPitchesFreq)
        detectedPitchObjects, unused_listplot = \
            audioSearch.pitchFrequenciesToObjects(
                detectedPitchesFreq, self.useScale)
        notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(
            detectedPitchObjects)
        self.silencePeriodDetection(notesList)
        environLocal.printDebug("made it to here...")
        scNotes = self.scoreStream[self.
                                   lastNotePosition:self.lastNotePosition +
                                   len(notesList)]
        #print "1"
        transcribedScore, self.qle = audioSearch.notesAndDurationsToStream(
            notesList,
            durationList,
            scNotes=scNotes,
            qle=self.qle,
        )
        #print "2"
        totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = \
            self.matchingNotes(
                self.scoreStream,
                transcribedScore,
                self.startSearchAtSlot,
                self.lastNotePosition,
                )
        #print "3"
        self.processing_time = time() - time_start
        environLocal.printDebug("and even to here...")
        if END_OF_SCORE is True:
            exitType = "endOfScore"  # "endOfScore"
            return exitType

        # estimate position, or exit if we can't at all...
        exitType = self.updatePosition(prob, totalLengthPeriod, time_start)

        if self.useMic is False:  # reading from the disc (only for TESTS)
            # skip ahead the processing time.
            freqFromAQList, junk, self.currentSample = \
                audioSearch.getFrequenciesFromPartialAudioFile(
                    self.waveFile,
                    length=self.processing_time,
                    startSample=self.currentSample,
                    )

        if self.lastNotePosition > len(self.scoreNotesOnly):
            #print "finishedPerforming"
            exitType = "finishedPerforming"
        elif (self.useMic is False and self.currentSample >= self.totalFile):
            #print "waveFileEOF"
            exitType = "waveFileEOF"

        environLocal.printDebug("about to return -- exitType: %s " % exitType)
        return exitType