Beispiel #1
0
def changeDuration(fromWavFN, durationParameters, stepList, outputName,
                   outputMinPitch, outputMaxPitch, praatEXE):
    '''
    Uses praat to morph duration in one file to duration in another

    Praat uses the PSOLA algorithm
    '''

    rootPath = os.path.split(fromWavFN)[0]

    # Prep output directories
    outputPath = join(rootPath, "duration_resynthesized_wavs")
    utils.makeDir(outputPath)
    
    durationTierPath = join(rootPath, "duration_tiers")
    utils.makeDir(durationTierPath)

    fromWavDuration = audio_scripts.getSoundFileDuration(fromWavFN)

    durationParameters = copy.deepcopy(durationParameters)
    # Pad any gaps with values of 1 (no change in duration)
    
    # No need to stretch out any pauses at the beginning
    if durationParameters[0][0] != 0:
        tmpVar = (0, durationParameters[0][0] - PRAAT_TIME_DIFF, 1)
        durationParameters.insert(0, tmpVar)

    # Or the end
    if durationParameters[-1][1] < fromWavDuration:
        durationParameters.append((durationParameters[-1][1] + PRAAT_TIME_DIFF,
                                   fromWavDuration, 1))

    # Create the praat script for doing duration manipulation
    for stepAmount in stepList:
        durationPointList = []
        for start, end, ratio in durationParameters:
            percentChange = 1 + (ratio - 1) * stepAmount
            durationPointList.append((start, percentChange))
            durationPointList.append((end, percentChange))
        
        outputPrefix = "%s_%0.3g" % (outputName, stepAmount)
        durationTierFN = join(durationTierPath,
                              "%s.DurationTier" % outputPrefix)
        outputWavFN = join(outputPath, "%s.wav" % outputPrefix)
        durationTier = dataio.PointObject2D(durationPointList, dataio.DURATION,
                                            0, fromWavDuration)
        durationTier.save(durationTierFN)
        
        praat_scripts.resynthesizeDuration(praatEXE,
                                           fromWavFN,
                                           durationTierFN,
                                           outputWavFN,
                                           outputMinPitch, outputMaxPitch)
Beispiel #2
0
def changeDuration(fromWavFN, durationParameters, stepList, outputName,
                   outputMinPitch, outputMaxPitch, praatEXE):
    '''
    Uses praat to morph duration in one file to duration in another

    Praat uses the PSOLA algorithm
    '''

    rootPath = os.path.split(fromWavFN)[0]

    # Prep output directories
    outputPath = join(rootPath, "duration_resynthesized_wavs")
    utils.makeDir(outputPath)

    durationTierPath = join(rootPath, "duration_tiers")
    utils.makeDir(durationTierPath)

    fromWavDuration = audio_scripts.getSoundFileDuration(fromWavFN)

    durationParameters = copy.deepcopy(durationParameters)
    # Pad any gaps with values of 1 (no change in duration)

    # No need to stretch out any pauses at the beginning
    if durationParameters[0][0] != 0:
        tmpVar = (0, durationParameters[0][0] - PRAAT_TIME_DIFF, 1)
        durationParameters.insert(0, tmpVar)

    # Or the end
    if durationParameters[-1][1] < fromWavDuration:
        durationParameters.append(
            (durationParameters[-1][1] + PRAAT_TIME_DIFF, fromWavDuration, 1))

    # Create the praat script for doing duration manipulation
    for stepAmount in stepList:
        durationPointList = []
        for start, end, ratio in durationParameters:
            percentChange = 1 + (ratio - 1) * stepAmount
            durationPointList.append((start, percentChange))
            durationPointList.append((end, percentChange))

        outputPrefix = "%s_%0.3g" % (outputName, stepAmount)
        durationTierFN = join(durationTierPath,
                              "%s.DurationTier" % outputPrefix)
        outputWavFN = join(outputPath, "%s.wav" % outputPrefix)
        durationTier = dataio.PointObject2D(durationPointList, dataio.DURATION,
                                            0, fromWavDuration)
        durationTier.save(durationTierFN)

        praat_scripts.resynthesizeDuration(praatEXE, fromWavFN, durationTierFN,
                                           outputWavFN, outputMinPitch,
                                           outputMaxPitch)
Beispiel #3
0
def getBareParameters(wavFN):
    wavDuration = audio_scripts.getSoundFileDuration(wavFN)
    return [
        (0, wavDuration, ''),
    ]
Beispiel #4
0
def getBareParameters(wavFN):
    wavDuration = audio_scripts.getSoundFileDuration(wavFN)
    return [(0, wavDuration, ''), ]
Beispiel #5
0
def f0Morph(fromWavFN,
            pitchPath,
            stepList,
            outputName,
            doPlotPitchSteps,
            fromPitchData,
            toPitchData,
            outputMinPitch,
            outputMaxPitch,
            praatEXE,
            keepPitchRange=False,
            keepAveragePitch=False):
    '''
    Resynthesizes the pitch track from a source to a target wav file

    fromPitchData and toPitchData should be segmented according to the
    portions that you want to morph.  The two lists must have the same
    number of sublists.

    Occurs over a three-step process.

    This function can act as a template for how to use the function
    morph_sequence.morphChunkedDataLists to morph pitch contours or
    other data.
    
    By default, everything is morphed, but it is possible to maintain elements
    of the original speaker's pitch (average pitch and pitch range) by setting
    the appropriate flag)
    '''

    fromDuration = audio_scripts.getSoundFileDuration(fromWavFN)

    # Iterative pitch tier data path
    pitchTierPath = join(pitchPath, "pitchTiers")
    resynthesizedPath = join(pitchPath, "f0_resynthesized_wavs")
    for tmpPath in [pitchTierPath, resynthesizedPath]:
        utils.makeDir(tmpPath)

    # 1. Prepare the data for morphing - acquire the segments to merge
    # (Done elsewhere, with the input fed into this function)

    # 2. Morph the fromData to the toData
    try:
        finalOutputList = morph_sequence.morphChunkedDataLists(
            fromPitchData, toPitchData, stepList)
    except IndexError:
        raise MissingPitchDataException()

    fromPitchData = [row for subList in fromPitchData for row in subList]
    toPitchData = [row for subList in toPitchData for row in subList]

    # 3. Save the pitch data and resynthesize the pitch
    mergedDataList = []
    for i in range(0, len(finalOutputList)):

        outputDataList = finalOutputList[i]

        if keepPitchRange is True:
            outputDataList = morph_sequence.morphRange(outputDataList,
                                                       fromPitchData)

        if keepAveragePitch is True:
            outputDataList = morph_sequence.morphAveragePitch(
                outputDataList, fromPitchData)

        stepOutputName = "%s_%0.3g" % (outputName, stepList[i])
        pitchFNFullPath = join(pitchTierPath, "%s.PitchTier" % stepOutputName)
        outputFN = join(resynthesizedPath, "%s.wav" % stepOutputName)
        pointObj = dataio.PointObject2D(outputDataList, dataio.PITCH, 0,
                                        fromDuration)
        pointObj.save(pitchFNFullPath)

        outputTime, outputVals = list(zip(*outputDataList))
        mergedDataList.append((outputTime, outputVals))

        praat_scripts.resynthesizePitch(praatEXE, fromWavFN, pitchFNFullPath,
                                        outputFN, outputMinPitch,
                                        outputMaxPitch)

    # 4. (Optional) Plot the generated contours
    if doPlotPitchSteps:

        fromTime, fromVals = list(zip(*fromPitchData))
        toTime, toVals = list(zip(*toPitchData))

        plot_morphed_data.plotF0((fromTime, fromVals), (toTime, toVals),
                                 mergedDataList,
                                 join(pitchTierPath, "%s.png" % outputName))
Beispiel #6
0
def f0Morph(fromWavFN,
            pitchPath,
            stepList,
            outputName,
            doPlotPitchSteps,
            fromPitchData,
            toPitchData,
            outputMinPitch,
            outputMaxPitch,
            praatEXE,
            keepPitchRange=False,
            keepAveragePitch=False,
            sourcePitchDataList=None,
            minIntervalLength=0.3):
    '''
    Resynthesizes the pitch track from a source to a target wav file

    fromPitchData and toPitchData should be segmented according to the
    portions that you want to morph.  The two lists must have the same
    number of sublists.

    Occurs over a three-step process.

    This function can act as a template for how to use the function
    morph_sequence.morphChunkedDataLists to morph pitch contours or
    other data.
    
    By default, everything is morphed, but it is possible to maintain elements
    of the original speaker's pitch (average pitch and pitch range) by setting
    the appropriate flag)
    
    sourcePitchDataList: if passed in, any regions unspecified by
                         fromPitchData will be sampled from this list.  In
                         essence, this allows one to leave segments of
                         the original pitch contour untouched by the
                         morph process.
    '''

    fromDuration = audio_scripts.getSoundFileDuration(fromWavFN)

    # Find source pitch samples that will be mixed in with the target
    # pitch samples later
    nonMorphPitchData = []
    if sourcePitchDataList is not None:
        timeList = sorted(fromPitchData)
        timeList = [(row[0][0], row[-1][0]) for row in timeList]
        endTime = sourcePitchDataList[-1][0]
        invertedTimeList = praatio_utils.invertIntervalList(timeList, endTime)
        invertedTimeList = [(start, stop) for start, stop in invertedTimeList
                            if stop - start > minIntervalLength]

        for start, stop in invertedTimeList:
            pitchList = praatio_utils.getValuesInInterval(
                sourcePitchDataList, start, stop)
            nonMorphPitchData.extend(pitchList)

    # Iterative pitch tier data path
    pitchTierPath = join(pitchPath, "pitchTiers")
    resynthesizedPath = join(pitchPath, "f0_resynthesized_wavs")
    for tmpPath in [pitchTierPath, resynthesizedPath]:
        utils.makeDir(tmpPath)

    # 1. Prepare the data for morphing - acquire the segments to merge
    # (Done elsewhere, with the input fed into this function)

    # 2. Morph the fromData to the toData
    try:
        finalOutputList = morph_sequence.morphChunkedDataLists(
            fromPitchData, toPitchData, stepList)
    except IndexError:
        raise MissingPitchDataException()

    fromPitchData = [row for subList in fromPitchData for row in subList]
    toPitchData = [row for subList in toPitchData for row in subList]

    # 3. Save the pitch data and resynthesize the pitch
    mergedDataList = []
    for i in range(0, len(finalOutputList)):

        outputDataList = finalOutputList[i]

        if keepPitchRange is True:
            outputDataList = morph_sequence.morphRange(outputDataList,
                                                       fromPitchData)

        if keepAveragePitch is True:
            outputDataList = morph_sequence.morphAveragePitch(
                outputDataList, fromPitchData)

        if sourcePitchDataList is not None:
            outputDataList.extend(nonMorphPitchData)
            outputDataList.sort()

        stepOutputName = "%s_%0.3g" % (outputName, stepList[i])
        pitchFNFullPath = join(pitchTierPath, "%s.PitchTier" % stepOutputName)
        outputFN = join(resynthesizedPath, "%s.wav" % stepOutputName)
        pointObj = dataio.PointObject2D(outputDataList, dataio.PITCH, 0,
                                        fromDuration)
        pointObj.save(pitchFNFullPath)

        outputTime, outputVals = zip(*outputDataList)
        mergedDataList.append((outputTime, outputVals))

        praat_scripts.resynthesizePitch(praatEXE, fromWavFN, pitchFNFullPath,
                                        outputFN, outputMinPitch,
                                        outputMaxPitch)

    # 4. (Optional) Plot the generated contours
    if doPlotPitchSteps:

        fromTime, fromVals = zip(*fromPitchData)
        toTime, toVals = zip(*toPitchData)

        plot_morphed_data.plotF0((fromTime, fromVals), (toTime, toVals),
                                 mergedDataList,
                                 join(pitchTierPath, "%s.png" % outputName))
Beispiel #7
0
def f0Morph(fromWavFN, pitchPath, stepList,
            outputName, doPlotPitchSteps, fromPitchData, toPitchData,
            outputMinPitch, outputMaxPitch, praatEXE, keepPitchRange=False,
            keepAveragePitch=False):
    '''
    Resynthesizes the pitch track from a source to a target wav file

    fromPitchData and toPitchData should be segmented according to the
    portions that you want to morph.  The two lists must have the same
    number of sublists.

    Occurs over a three-step process.

    This function can act as a template for how to use the function
    morph_sequence.morphChunkedDataLists to morph pitch contours or
    other data.
    
    By default, everything is morphed, but it is possible to maintain elements
    of the original speaker's pitch (average pitch and pitch range) by setting
    the appropriate flag)
    '''

    fromDuration = audio_scripts.getSoundFileDuration(fromWavFN)

    # Iterative pitch tier data path
    pitchTierPath = join(pitchPath, "pitchTiers")
    resynthesizedPath = join(pitchPath, "f0_resynthesized_wavs")
    for tmpPath in [pitchTierPath, resynthesizedPath]:
        utils.makeDir(tmpPath)

    # 1. Prepare the data for morphing - acquire the segments to merge
    # (Done elsewhere, with the input fed into this function)
    
    # 2. Morph the fromData to the toData
    try:
        finalOutputList = morph_sequence.morphChunkedDataLists(fromPitchData,
                                                               toPitchData,
                                                               stepList)
    except IndexError:
        raise MissingPitchDataException()

    fromPitchData = [row for subList in fromPitchData for row in subList]
    toPitchData = [row for subList in toPitchData for row in subList]

    # 3. Save the pitch data and resynthesize the pitch
    mergedDataList = []
    for i in range(0, len(finalOutputList)):
        
        outputDataList = finalOutputList[i]
        
        if keepPitchRange is True:
            outputDataList = morph_sequence.morphRange(outputDataList,
                                                       fromPitchData)
            
        if keepAveragePitch is True:
            outputDataList = morph_sequence.morphAveragePitch(outputDataList,
                                                              fromPitchData)
        
        stepOutputName = "%s_%0.3g" % (outputName, stepList[i])
        pitchFNFullPath = join(pitchTierPath, "%s.PitchTier" % stepOutputName)
        outputFN = join(resynthesizedPath, "%s.wav" % stepOutputName)
        pointObj = dataio.PointObject2D(outputDataList, dataio.PITCH,
                                        0, fromDuration)
        pointObj.save(pitchFNFullPath)

        outputTime, outputVals = zip(*outputDataList)
        mergedDataList.append((outputTime, outputVals))
        
        praat_scripts.resynthesizePitch(praatEXE, fromWavFN, pitchFNFullPath,
                                        outputFN, outputMinPitch,
                                        outputMaxPitch)

    # 4. (Optional) Plot the generated contours
    if doPlotPitchSteps:

        fromTime, fromVals = zip(*fromPitchData)
        toTime, toVals = zip(*toPitchData)

        plot_morphed_data.plotF0((fromTime, fromVals),
                                 (toTime, toVals),
                                 mergedDataList,
                                 join(pitchTierPath,
                                      "%s.png" % outputName))