Example #1
0
def _textgrid2sfs(txtlines, output_path):
    logger = logging.getLogger('mtts')
    textgrid_path = os.path.join(output_path, 'textgrid/mandarin_voice')
    sfs_path = os.path.join(output_path, 'sfs')
    csv_path = os.path.join(output_path, 'csv')
    os.system('mkdir -p %s' % sfs_path)
    os.system('mkdir -p %s' % csv_path)

    for line in txtlines:
        numstr, txt = line.split(' ', 1)
        textgrid_file = os.path.join(textgrid_path, numstr + '.TextGrid')
        csv_file = os.path.join(csv_path, numstr + '.csv')
        sfs_file = os.path.join(sfs_path, numstr + '.sfs')

        if os.path.exists(textgrid_file):
            # textgrid to csv
            tgrid = tg.read_textgrid(textgrid_file)
            tg.write_csv(tgrid, csv_file, sep=' ', header=False, meta=False)

            # csv to sfs
            total_list = []
            with open(csv_file) as fid:
                for line in fid.readlines():
                    #start, end, name, label = line.strip().split(' ')
                    csv_list = line.strip().split(' ')
                    if csv_list[3] == 'phones':
                        total_list.append(_standard_sfs(csv_list))
            with open(sfs_file, 'w') as fid:
                for item in total_list:
                    fid.write(' '.join(item) + '\n')
        else:
            logger.warning('--Miss: %s' % textgrid_file)
            with open(os.path.join(output_path, 'error.log'), 'a+') as fid:
                fid.write('--Miss: %s \n' % textgrid_file)
Example #2
0
def analyze_sentiment():
    
    input_file, language, output_file = parse_arguments()

    # Import pattern module corresponding to language
    pattern = importlib.import_module(f'pattern.{language}')

    # Load file
    blob = open(input_file, 'rb').read()
    m = magic.Magic(mime_encoding=True)
    encoding = m.from_buffer(blob)
    tgrid = textgrid.read_textgrid(input_file, encoding)
    df = pd.DataFrame(tgrid)

    # Filter for Transcripts, remove empty
    df = df[df['tier'] == 'Transcript']
    df = df[df['name'] != '']

    # Calculate sentiment
    # TODO: figure out how to unpack tuples to multiple columns:
    df['sentiment_polarity'] = df.apply(lambda row: pattern.sentiment(row['name'])[0], axis=1)
    df['sentiment_subjectivity'] = df.apply(lambda row: pattern.sentiment(row['name'])[1], axis=1)

    # Save final result
    print(df.head())
    df.to_csv(output_file, index=False)
Example #3
0
def test_read_short():
    tgrid = textgrid.read_textgrid(example_file2)
    assert len(tgrid) == 1
    entry = tgrid[0]
    assert entry.start == 0
    assert entry.stop == 5.537098932314087
    assert entry.name == "z"
    assert entry.tier == "Mary"
Example #4
0
def test_read_long():
    tgrid = textgrid.read_textgrid(example_file1)
    assert len(tgrid) == 11
    entry = tgrid[0]
    assert entry.start == 0
    assert entry.stop == 5.537098932314087
    assert entry.name == ""
    entry2 = tgrid[-1]
    assert entry2.name == "cool"
    assert entry2.tier == "bell"
def parse_tg(tgd, ad, uttd, labd):

    problemnums = [
        ".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8", ".9", "1.0", "2.0",
        "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0"
    ]

    # ------------------------------ CHANGE BASED ON TIER NAMES FOR SPEAKERS ----------------------
    speakers = ["one", "two"]

    counter = 0.101
    for file in sorted(os.listdir(tgd)):
        if file.endswith(".TextGrid"):
            print("Working on: ", file)
            # -----------------------------  CHANGE BASED ON FILE NAME ---------------------------
            dyad = file.split("C0")[1].split("_")[0]

            tg = textgrid.read_textgrid(os.path.join(tgd, file))
            df = pd.DataFrame(tg)

            for speaker in speakers:
                print("     Speaker: ", speaker)
                # -----------------------------  CHANGE BASED ON DYAD vs. MONOLOGUE ---------------------------
                audio_file = os.path.join(ad, dyad + "_" + speaker + ".wav")

                # data = df.loc[df["tier"] == (speaker + " [main]")]
                data = df.loc[df["tier"] == speaker]
                for index, row in data.iterrows():
                    # row[name] refers to the text of the utterance
                    if row["name"] != "":
                        write_audio(audio_file, row['start'], row['stop'],
                                    counter, dyad, speaker, uttd)
                        write_lab(row["name"], counter, dyad, speaker, labd)

                        counter = round(counter + .001, 3)

                        for p in problemnums:
                            if str(counter) == p:
                                counter = round(counter + .001, 3)
                                break
                        while abs(
                                decimal.Decimal(
                                    str(counter)).as_tuple().exponent) < 3:
                            counter = round(counter + .001, 3)
Example #6
0
def extract_features(dataframe):
    input = []
    target = []
    segment_labels = []

    for i, (file, emotion) in enumerate(dataframe.values):
        script_path = '/scratch/speech/modularProsodyTagger/mod01.praat'
        index = file.rfind('/')
        basename = file[(index + 1):-4]
        directory = file[:60] + basename[:-5] + '/'
        endpoint = '/scratch/speech/textgrids/'
        cmd = 'praat --run {} {} {} {}'.format(script_path, directory,
                                               basename, endpoint)
        #os.system(cmd)

        tgrid_path = endpoint + basename + '_result.TextGrid'
        tgrid = textgrid.read_textgrid(tgrid_path)
        tgrid_df = pd.DataFrame(tgrid)

        data = []
        indices = []
        labels = []

        sample_rate, data = wavfile.read(file)

        for start, stop, name, tier in tgrid_df.values:
            if tier != 'silences':
                break
            else:
                indices.append(round(stop * sample_rate))
                labels.append(name)

        #data = data.tolist()
        data = [data[i:j] for i, j in zip(([0] + indices)[:-1], indices)]
        #print(data)

        input.append(data)
        target.append(encode[emotion])
        segment_labels.append(labels)

        print(i)

    return input, target, segment_labels
Example #7
0
def load_silences(silences_file, time_series):

    # Load file
    blob = open(silences_file, 'rb').read()
    m = magic.Magic(mime_encoding=True)
    encoding = m.from_buffer(blob)
    silences_tgrid = textgrid.read_textgrid(silences_file, encoding)
    silences = pd.DataFrame(silences_tgrid)

    # Add column
    def isSilent(timestamp):
        filt = (silences['start'] <= timestamp) & (timestamp <
                                                   silences['stop'])
        silence = silences.loc[filt, 'name']
        if len(silence) > 0:
            return silence.iat[0] == 'silent'
        return True

    time_series['silence'] = time_series.apply(
        lambda row: isSilent(row.timestamp), axis=1)
def link_uttid_start(tgd):
    problemnums = [
        ".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8", ".9", "1.0", "2.0",
        "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0"
    ]

    # ------------------------------ CHANGE BASED ON TIER NAMES FOR SPEAKERS ----------------------
    speakers = ["one", "two"]

    for file in sorted(os.listdir(tgd)):
        counter = 0.101
        if file.endswith(".TextGrid"):
            print("Working on: ", file)
            # next two lines take file name and pull out the identifier for the dyad / monologue speaker
            # -----------------------------  CHANGE BASED ON FILE NAME ---------------------------
            dyad = file.split("C0")[1].split("_")[0]
            tg = textgrid.read_textgrid(os.path.join(tgd, file))
            df = pd.DataFrame(tg)
            for speaker in speakers:
                print("     Speaker: ", speaker)
                data = df.loc[df["tier"] == speaker]
                for index, row in data.iterrows():
                    if row["name"] != "":
                        #write_start_to_uttID(row['start'], row['stop'], counter, dyad, speaker)
                        write_len(row["name"], counter, dyad, speaker,
                                  row['start'], row['stop'])

                        counter = round(counter + .001, 3)

                        for p in problemnums:
                            if str(counter) == p:
                                counter = round(counter + .001, 3)
                                break
                        while abs(
                                decimal.Decimal(
                                    str(counter)).as_tuple().exponent) < 3:
                            counter = round(counter + .001, 3)
Example #9
0
import textgrid as tg

tgrid = tg.read_textgrid(r'E:\Biaobei_Demo\000001.TextGrid', 'Phon')
for entry in tgrid:
    print(entry.name)
print(len(tgrid))
print('Done!')
@License :   (C)Copyright 2019-2020
@Desc    :   stats some complex infos in TextGrids
"""

import textgrid as tg
import os

input_textgrid_dir = r'E:\006_TTS\biaobei\CMU_Demo'
save_result_file = r'E:\006_TTS\biaobei\CMU_Demo_complex.txt'

results = {}
for file_name in os.listdir(input_textgrid_dir):
    if file_name.find('.TextGrid') != -1:
        print(file_name)
        input_textgrid_file = os.path.join(input_textgrid_dir, file_name)
        ph_tiers = tg.read_textgrid(input_textgrid_file, 'phones')
        wd_tiers = tg.read_textgrid(input_textgrid_file, 'words')
        for wd_tier in wd_tiers:
            cur_word = wd_tier.name
            for punc in [',', ',', '。', '.']:
                cur_word = cur_word.replace(punc, '')
            cur_phons = []
            for ph_tier in ph_tiers:
                if ph_tier.start >= wd_tier.start and ph_tier.stop <= wd_tier.stop:
                    cur_phons.append(ph_tier.name)
            if cur_word not in results:
                results[cur_word] = [' '.join(cur_phons)]
            else:
                if ' '.join(cur_phons) not in results[cur_word]:
                    results[cur_word].append(' '.join(cur_phons))
with open(save_result_file, 'w', encoding='utf-8') as wid:
Example #11
0
def test_points():
    tgrid = textgrid.read_textgrid(example_file3)
    assert len(tgrid) == 2