Exemplo n.º 1
0
def runTranscribe(show=True, plot=True, useMic=True, seconds=20.0, useScale=None, saveFile=True):
    """
    runs all the methods to record from audio for `seconds` length (default 10.0)
    and transcribe the resulting melody returning a music21.Score object
    
    if `show` is True, show the stream.  
    
    if `plot` is True then a Tk graph of the frequencies will be displayed.
    
    if `useMic` is True then use the microphone.  If False it will load the file of `saveFile`
    or the default temp file to run transcriptions from.
        
    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    if `saveFile` is False then then the recorded audio is saved to disk.  If
    set to `True` then `environLocal.getRootTempDir() + os.path.sep + 'ex.wav'` is
    used as the filename.  If set to anything else then it will use that as the
    filename. 
    """
    if useScale is None:
        useScale = scale.ChromaticScale("C4")
    # beginning - recording or not
    if saveFile != False:
        if saveFile == True:
            WAVE_FILENAME = environLocal.getRootTempDir() + os.path.sep + "ex.wav"
        else:
            WAVE_FILENAME = saveFile
    else:
        WAVE_FILENAME = False

    # the rest of the score
    if useMic is True:
        freqFromAQList = audioSearchBase.getFrequenciesFromMicrophone(length=seconds, storeWaveFilename=WAVE_FILENAME)
    else:
        freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=WAVE_FILENAME)

    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq)
    (detectedPitchObjects, listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale)
    (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(
        notesList, durationList, removeRestsAtBeginning=True
    )

    if show == True:
        myScore.show()

    if plot == True:
        try:
            import matplotlib.pyplot  # for find
        except ImportError:
            raise audioSearchBase.AudioSearchException("Cannot plot without matplotlib installed.")
        matplotlib.pyplot.plot(listplot)
        matplotlib.pyplot.show()
    environLocal.printDebug("* END")

    return myScore
Exemplo n.º 2
0
def monophonicStreamFromFile(fileName, useScale=None):
    '''
    Reads in a .wav file and returns a stream representing the transcribed, monophonic audio.

    `fileName` should be the complete path to a file on the disk.

    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    
    >>> from music21 import *
    >>> waveFile = os.path.dirname(__file__) + os.path.sep + 'test_audio.wav' #_DOCS_HIDE
    >>> #_DOCS_SHOW waveFile = 'test_audio.wav'
    >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile)
    >>> p
    <music21.stream.Part ...>
    >>> p.show('text')
    {0.0} <music21.note.Note C>
    {0.25} <music21.note.Note C>
    {0.75} <music21.note.Note D>
    {1.75} <music21.note.Note E>
    {2.75} <music21.note.Note F>
    {3.75} <music21.note.Note G>
    {4.75} <music21.note.Note A>
    {5.75} <music21.note.Note B>
    {6.75} <music21.note.Note C>
    {7.25} <music21.note.Rest rest>
    {7.75} <music21.note.Note C>
    {8.25} <music21.note.Note C>
    {8.5} <music21.note.Note E>
    {9.5} <music21.note.Note G>
    {10.5} <music21.note.Note C>
    {11.0} <music21.note.Note C>
    {11.25} <music21.note.Note A>
    {12.25} <music21.note.Note F>
    {13.25} <music21.note.Note D>
    {15.25} <music21.note.Note D>
    {16.25} <music21.note.Rest rest>
    {17.25} <music21.note.Note G>
    '''
    freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(waveFilename=fileName)
        
    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(detectedPitchesFreq)
    (detectedPitchObjects, listplot) = audioSearchBase.pitchFrequenciesToObjects(detectedPitchesFreq, useScale)
    (notesList, durationList) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, length_part = audioSearchBase.notesAndDurationsToStream(notesList, durationList, removeRestsAtBeginning=True)    
    return myScore.parts[0]
Exemplo n.º 3
0
def monophonicStreamFromFile(fileName, useScale=None):
    '''
    Reads in a .wav file and returns a stream representing the transcribed, monophonic audio.

    `fileName` should be the complete path to a file on the disk.

    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    We demonstrate with an audio file beginning with an ascending scale.
    
    >>> import os
    >>> import os #_DOCS_HIDE
    >>> waveFile = common.getSourceFilePath() + os.path.sep + 'audioSearch' + os.path.sep + 'test_audio.wav' #_DOCS_HIDE
    >>> #_DOCS_SHOW waveFile = 'test_audio.wav'
    >>> p = audioSearch.transcriber.monophonicStreamFromFile(waveFile)
    >>> p
    <music21.stream.Part ...>
    >>> p.show('text')
    {0.0} <music21.note.Note C>
    {0.25} <music21.note.Note C>
    {0.75} <music21.note.Note D>
    {1.75} <music21.note.Note E>
    {2.75} <music21.note.Note F>
    {3.75} <music21.note.Note G>
    {4.75} <music21.note.Note A>
    {5.75} <music21.note.Note B>
    {6.75} <music21.note.Note C>
    ...
    '''
    freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(
        waveFilename=fileName)

    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(
        freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(
        detectedPitchesFreq)
    (detectedPitchObjects,
     unused_listplot) = audioSearchBase.pitchFrequenciesToObjects(
         detectedPitchesFreq, useScale)
    (notesList, durationList
     ) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(
        notesList, durationList, removeRestsAtBeginning=True)
    return myScore.parts[0]
Exemplo n.º 4
0
def runTranscribe(show=True,
                  plot=True,
                  useMic=True,
                  seconds=20.0,
                  useScale=None,
                  saveFile=True):
    '''
    runs all the methods to record from audio for `seconds` length (default 10.0)
    and transcribe the resulting melody returning a music21.Score object
    
    if `show` is True, show the stream.  
    
    if `plot` is True then a Tk graph of the frequencies will be displayed.
    
    if `useMic` is True then use the microphone.  If False it will load the file of `saveFile`
    or the default temp file to run transcriptions from.
        
    a different scale besides the chromatic scale can be specified by setting `useScale`.
    See :ref:`moduleScale` for a list of allowable scales. (or a custom one can be given).
    Microtonal scales are totally accepted, as are retuned scales where A != 440hz.

    if `saveFile` is False then then the recorded audio is saved to disk.  If
    set to `True` then `environLocal.getRootTempDir() + os.path.sep + 'ex.wav'` is
    used as the filename.  If set to anything else then it will use that as the
    filename. 
    '''
    if useScale is None:
        useScale = scale.ChromaticScale('C4')
    #beginning - recording or not
    if saveFile != False:
        if saveFile == True:
            WAVE_FILENAME = environLocal.getRootTempDir(
            ) + os.path.sep + 'ex.wav'
        else:
            WAVE_FILENAME = saveFile
    else:
        WAVE_FILENAME = False

    # the rest of the score
    if useMic is True:
        freqFromAQList = audioSearchBase.getFrequenciesFromMicrophone(
            length=seconds, storeWaveFilename=WAVE_FILENAME)
    else:
        freqFromAQList = audioSearchBase.getFrequenciesFromAudioFile(
            waveFilename=WAVE_FILENAME)

    detectedPitchesFreq = audioSearchBase.detectPitchFrequencies(
        freqFromAQList, useScale)
    detectedPitchesFreq = audioSearchBase.smoothFrequencies(
        detectedPitchesFreq)
    (detectedPitchObjects,
     listplot) = audioSearchBase.pitchFrequenciesToObjects(
         detectedPitchesFreq, useScale)
    (notesList, durationList
     ) = audioSearchBase.joinConsecutiveIdenticalPitches(detectedPitchObjects)
    myScore, unused_length_part = audioSearchBase.notesAndDurationsToStream(
        notesList, durationList, removeRestsAtBeginning=True)

    if show == True:
        myScore.show()

    if plot == True:
        try:
            import matplotlib.pyplot  # for find
        except ImportError:
            raise audioSearchBase.AudioSearchException(
                "Cannot plot without matplotlib installed.")
        matplotlib.pyplot.plot(listplot)
        matplotlib.pyplot.show()
    environLocal.printDebug("* END")

    return myScore