def test_analyze_long_file_formants(acoustic_corpus_path, formants_func):
    segments = [(1, 2, 0)]
    output = analyze_long_file(acoustic_corpus_path, segments, formants_func)
    print(sorted(output[(1, 2, 0)].keys()))
    assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
    assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
    output = analyze_long_file(acoustic_corpus_path,
                               segments,
                               formants_func,
                               padding=0.5)
    print(sorted(output[(1, 2, 0)].keys()))
    assert (all(x >= 1 for x in output[(1, 2, 0)].keys()))
    assert (all(x <= 2 for x in output[(1, 2, 0)].keys()))
Beispiel #2
0
def analyze_pitch(corpus_context, sound_file, stop_check = None):
    if not os.path.exists(sound_file.filepath):
        return
    algorithm = corpus_context.config.pitch_algorithm
    if corpus_context.has_pitch(sound_file.discourse.name, algorithm):
        return
    if algorithm == 'reaper':
        if getattr(corpus_context.config, 'reaper_path', None) is not None:
            pitch_function = partial(ReaperPitch, reaper = corpus_context.config.reaper_path,
                                time_step = 0.01, freq_lims = (50,500))
        else:
            return
    elif algorithm == 'praat':
        if getattr(corpus_context.config, 'praat_path', None) is not None:
            pitch_function = partial(PraatPitch, praatpath = corpus_context.config.praat_path,
                                time_step = 0.01, freq_lims = (75,500))
        else:
            return
    else:
        pitch_function = partial(ASPitch, time_step = 0.01, freq_lims = (75,500), window_shape = 'gaussian')

    atype = corpus_context.hierarchy.highest
    prob_utt = getattr(corpus_context, atype)
    q = corpus_context.query_graph(prob_utt)
    q = q.filter(prob_utt.discourse.name == sound_file.discourse.name)
    q = q.preload(prob_utt.discourse, prob_utt.speaker)
    utterances = q.all()
    segments = []
    for u in utterances:
        segments.append((u.begin, u.end, u.channel))

    output = analyze_long_file(sound_file.vowel_filepath, segments, pitch_function, padding = 1)

    for k, track in output.items():
        corpus_context.save_pitch(sound_file, track, channel = k[-1], source = algorithm)
Beispiel #3
0
def analyze_formants(corpus_context, sound_file, stop_check = None):
    if not os.path.exists(sound_file.filepath):
        return
    algorithm = corpus_context.config.formant_algorithm
    if corpus_context.has_formants(sound_file.discourse.name, algorithm):
        return
    if algorithm == 'praat':
        if getattr(corpus_context.config, 'praat_path', None) is not None:
            formant_function = partial(PraatFormants,
                                praatpath = corpus_context.config.praat_path,
                                max_freq = 5500, num_formants = 5, win_len = 0.025,
                                time_step = 0.01)
        else:
            return
    else:

        formant_function = partial(ASFormants, freq_lims = (0, 5500),
                                        time_step = 0.01, num_formants = 5,
                                        win_len = 0.025, window_shape = 'gaussian')
    atype = corpus_context.hierarchy.highest
    prob_utt = getattr(corpus_context, atype)
    q = corpus_context.query_graph(prob_utt)
    q = q.filter(prob_utt.discourse.name == sound_file.discourse.name)
    utterances = q.all()
    segments = []
    for i, u in enumerate(utterances):
        segments.append((u.begin, u.end, u.channel))

    output = analyze_long_file(sound_file.vowel_filepath, segments, formant_function, padding = 1)
    for k, track in output.items():
        corpus_context.save_formants(sound_file, track, channel = k[-1], source = algorithm)
    corpus_context.sql_session.flush()
Beispiel #4
0
def analyze_pitch(corpus_context, sound_file, stop_check=None):
    filepath = os.path.expanduser(sound_file.vowel_filepath)
    if not os.path.exists(filepath):
        return
    algorithm = corpus_context.config.pitch_algorithm
    if corpus_context.has_pitch(sound_file.discourse.name, algorithm):
        return
    if algorithm == 'reaper':
        if getattr(corpus_context.config, 'reaper_path', None) is not None:
            pitch_function = partial(ReaperPitch,
                                     reaper=corpus_context.config.reaper_path,
                                     time_step=0.01,
                                     freq_lims=(50, 500))
        else:
            return
    elif algorithm == 'praat':
        if getattr(corpus_context.config, 'praat_path', None) is not None:
            pitch_function = partial(
                PraatPitch,
                praatpath=corpus_context.config.praat_path,
                time_step=0.01,
                freq_lims=(75, 500))
        else:
            return
    else:
        pitch_function = partial(ASPitch,
                                 time_step=0.01,
                                 freq_lims=(75, 500),
                                 window_shape='gaussian')

    atype = corpus_context.hierarchy.highest
    prob_utt = getattr(corpus_context, atype)
    q = corpus_context.query_graph(prob_utt)
    q = q.filter(prob_utt.discourse.name == sound_file.discourse.name)
    q = q.preload(prob_utt.discourse, prob_utt.speaker)
    utterances = q.all()
    segments = []
    for u in utterances:
        segments.append((u.begin, u.end, u.channel))

    output = analyze_long_file(filepath, segments, pitch_function, padding=1)

    for k, track in output.items():
        corpus_context.save_pitch(sound_file,
                                  track,
                                  channel=k[-1],
                                  source=algorithm)
Beispiel #5
0
def analyze_formants(corpus_context, sound_file, stop_check=None):
    filepath = os.path.expanduser(sound_file.vowel_filepath)
    if not os.path.exists(filepath):
        return
    algorithm = corpus_context.config.formant_algorithm
    if corpus_context.has_formants(sound_file.discourse.name, algorithm):
        return
    if algorithm == 'praat':
        if getattr(corpus_context.config, 'praat_path', None) is not None:
            formant_function = partial(
                PraatFormants,
                praatpath=corpus_context.config.praat_path,
                max_freq=5500,
                num_formants=5,
                win_len=0.025,
                time_step=0.01)
        else:
            return
    else:

        formant_function = partial(ASFormants,
                                   freq_lims=(0, 5500),
                                   time_step=0.01,
                                   num_formants=5,
                                   win_len=0.025,
                                   window_shape='gaussian')
    atype = corpus_context.hierarchy.highest
    prob_utt = getattr(corpus_context, atype)
    q = corpus_context.query_graph(prob_utt)
    q = q.filter(prob_utt.discourse.name == sound_file.discourse.name)
    utterances = q.all()
    segments = []
    for i, u in enumerate(utterances):
        segments.append((u.begin, u.end, u.channel))

    output = analyze_long_file(filepath, segments, formant_function, padding=1)
    for k, track in output.items():
        corpus_context.save_formants(sound_file,
                                     track,
                                     channel=k[-1],
                                     source=algorithm)
    corpus_context.sql_session.flush()