Esempio n. 1
0
def mlf2PhonemesAndTsList(inputFileName):
    '''
    parse output of alignment in mlf format ( with words) 
    output: phonemes with begin and end ts 
    
    # TODO: change automatically extension from txt to mlf
    
    '''

    allLines = loadTextFile(inputFileName)

    listPhonemesAndTs = []
    prevStartTime = -1

    # when reading lines from MLF, skip first 2 and last
    for line in allLines[2:-1]:

        tokens = line.split(" ")

        startTime = float(tokens[0]) / 10000000

        endTime = float(tokens[1]) / 10000000

        # if Praat does not allow insertion of new token with same timestamp. This happend when prev. token was 'sp'. So remove it and insert current
        if (prevStartTime == startTime):
            listPhonemesAndTs.pop()

        phoneme = tokens[2].strip()

        listPhonemesAndTs.append([startTime, endTime, phoneme])

        # remember startTime
        prevStartTime = startTime

    return listPhonemesAndTs
Esempio n. 2
0
    def _loadSyllables(self, pathToSymbTrFile):
             
        allLines = loadTextFile(pathToSymbTrFile)
        
        currSyllable = None
        currSyllTotalDuration = None
        
        # skip first line. 
        for  i in range( 1, len(allLines) ):
            
            line = allLines[i]
            line = line.replace(os.linesep,'') # remove end line in an os-independent way 
            line = line.replace('\r','') 
            
            tokens = line.split("\t")
            if len(tokens) < 12:
                print "TOKENS ARE 11, no syllable ";  sys.exit()
            
            # sanity check  MINIMAL_DURATION of embelishments. 
#             hack: change crazy small notes to min duration. it does not matter a lot
            if tokens[7] > MINIMAL_DURATION_UNIT and  tokens[1] == '8':
                tokens[7] = MINIMAL_DURATION_UNIT


            currDuration = float(tokens[6]) / float(tokens[7]) * MINIMAL_DURATION_UNIT
                
            currSyllable, currSyllTotalDuration = self.parseCurrTxtToken(currSyllable, currSyllTotalDuration, tokens, currDuration)
            
            
        #end parsing loop
            
        # store last
        currSyllable.setDuration(currSyllTotalDuration)
        self.listSyllables.append(currSyllable)
    def _loadSectionBoundaries(self, URISectionFIle):
        if not os.path.isfile(URISectionFIle):
            sys.exit("no file {}".format(URISectionFIle))

        ext = os.path.splitext(os.path.basename(URISectionFIle))[1]
        if ext == '.tsv':
            allLines = loadTextFile(URISectionFIle)

            for line in allLines[1:]:
                #  triples of sectin name, start note number, end note number
                tokens = line.strip().split("\t")
                if not len(tokens) == 3:
                    sys.exit(
                        "tokens in line {} from file {} are not 3. make sure /t  are used"
                        .format(line, URISectionFIle))

                tmpTriplet = tokens[0], int(tokens[1]), int(tokens[2])
                self.sectionboundaries.append(tmpTriplet)
        ######################
        elif ext == '.json':

            b = open(URISectionFIle)
            scoreAnno = json.load(b)
            b.close()
            scoreSectionAnnos = scoreAnno['sections']

            for section in scoreSectionAnnos:
                tmpTriplet = section['name'], int(section['startNote']), int(
                    section['endNote'])
                self.sectionboundaries.append(tmpTriplet)
Esempio n. 4
0
    def _loadSyllables(self, pathToSymbTrFile):

        allLines = loadTextFile(pathToSymbTrFile)

        # skip first line

        for line in allLines[1:]:
            line = line.replace(os.linesep,
                                '')  # remove end line in a os-independent way
            line = line.replace('\r', '')

            tokens = line.split("\t")

            if len(tokens) == 12:
                # TUK ZABIVA.
                if tokens[11] != '.' and tokens[11] != '. ' and tokens[
                        11] != '.  ' and tokens[11] != 'SAZ' and tokens[
                            11] != 'SAZ ' and tokens[11] != 'SAZ  ' and tokens[
                                11] != u'ARANA\u011eME' and tokens[
                                    11] != u'ARANA\u011eME ' and tokens[
                                        11] != u'ARANA\u011eME  ' and tokens[
                                            11] != u'ARANA\\\\u011eME' and tokens[
                                                11] != u'ARANAGME':
                    #           note number and syllable
                    text = tokens[11].replace('_', ' ')
                    tupleSyllable = int(tokens[0]), text

                    self.listSyllables.append(tupleSyllable)
Esempio n. 5
0
def readLookupTable(withSynthesis):
    '''
        read lookup table from file
        '''
    if not withSynthesis:
        lookupTableURI = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'grapheme2METUphonemeLookupTable')
    else:
        lookupTableURI = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            'grapheme2METUphonemeLookupTableSYNTH')

    lookupTableRaw = loadTextFile(lookupTableURI)
    lookupTable = dict()
    for lineTable in lookupTableRaw:
        tokens = lineTable.split("\t")
        grapheme = tokens[0]
        if not isinstance(grapheme, unicode):
            grapheme = unicode(grapheme, 'utf-8')
        if len(grapheme) == 4 and grapheme[0].isdigit():
            grapheme = "\u" + grapheme
            grapheme = grapheme.decode('unicode-escape')

        lookupTable[grapheme] = tokens[1].rstrip()

    return lookupTable
Esempio n. 6
0
    def _loadsectionTimeStamps(self, URILinkedSectionsFile):

        if not os.path.isfile(URILinkedSectionsFile):
            sys.exit("no file {}".format(URILinkedSectionsFile))

        ext = os.path.splitext(os.path.basename(URILinkedSectionsFile))[1]
        if ext == '.txt' or ext == '.tsv':
            lines = loadTextFile(URILinkedSectionsFile)

            for line in lines:
                tokens = line.split()

                self.beginTs.append(float(tokens[0]))
                self.endTs.append(float(tokens[1]))
                self.sectionNamesSequence.append(tokens[2])

                # WORKAROUND for section mapping. read mapping index from 4th field in .annotations file
                # sanity check:


#                 if len(tokens) == 4:
#                     self.sectionIndices.append(int(tokens[3]))

#####################
        elif ext == '.json':

            b = open(URILinkedSectionsFile)
            sectionLinks = json.load(b)
            b.close()

            sectionAnnos = sectionLinks['annotations']
            for sectionAnno in sectionAnnos:

                beginTimeStr = str(sectionAnno['time'][0])
                beginTimeStr = beginTimeStr.replace("[", "")
                beginTimeStr = beginTimeStr.replace("]", "")

                endTimeStr = str(sectionAnno['time'][1])
                endTimeStr = endTimeStr.replace("[", "")
                endTimeStr = endTimeStr.replace("]", "")

                self.beginTs.append(float(beginTimeStr))
                self.endTs.append(float(endTimeStr))
                self.sectionNamesSequence.append(str(sectionAnno['name']))
        else:
            sys.exit("section annotation file {} has not know file extension.".
                     format(URILinkedSectionsFile))

        # match automatically section names from sectionLinks to scoreSections
        indices = []
        s1 = []
        for s in self.makamScore.sectionToLyricsMap:
            s1.append(s[0])
        self.sectionIndices = matchSections(s1, self.sectionNamesSequence,
                                            indices)

        if len(self.sectionIndices) != len(self.sectionNamesSequence):
            sys.exit(
                "number of sections and number of matched sections not same!")
Esempio n. 7
0
def readLookupTable(withSynthesis):
        '''
        read lookup table from file
        '''
        if not withSynthesis:
            lookupTableURI= os.path.join(os.path.dirname(os.path.realpath(__file__)) , 'grapheme2METUphonemeLookupTable' )
        else:
            lookupTableURI= os.path.join(os.path.dirname(os.path.realpath(__file__)) , 'grapheme2METUphonemeLookupTableSYNTH' )
            

        lookupTableRaw = loadTextFile(lookupTableURI)
        lookupTable = dict()
        for lineTable in lookupTableRaw:
            tokens = lineTable.split("\t")
            grapheme = tokens[0]
            if not isinstance(grapheme, unicode):
                    grapheme = unicode(grapheme,'utf-8')
            if len(grapheme) == 4 and grapheme[0].isdigit(): 
                grapheme = "\u" + grapheme
                grapheme = grapheme.decode('unicode-escape')
            
            lookupTable[grapheme] = tokens[1].rstrip()
            
        return lookupTable
Esempio n. 8
0
    def _loadSyllables(self, pathToSymbTrFile):

        allLines = loadTextFile(pathToSymbTrFile)

        currSyllable = None
        currSyllTotalDuration = None

        # skip first line.
        for i in range(1, len(allLines)):

            line = allLines[i]
            line = line.replace(os.linesep,
                                '')  # remove end line in an os-independent way
            line = line.replace('\r', '')

            tokens = line.split("\t")
            if len(tokens) < 12:
                print "TOKENS ARE 11, no syllable "
                sys.exit()

            # sanity check  MINIMAL_DURATION of embelishments.
#             hack: change crazy small notes to min duration. it does not matter a lot
            if tokens[7] > MINIMAL_DURATION_UNIT and tokens[1] == '8':
                tokens[7] = MINIMAL_DURATION_UNIT

            currDuration = float(tokens[6]) / float(
                tokens[7]) * MINIMAL_DURATION_UNIT

            currSyllable, currSyllTotalDuration = self.parseCurrTxtToken(
                currSyllable, currSyllTotalDuration, tokens, currDuration)

        #end parsing loop

        # store last
        currSyllable.setDuration(currSyllTotalDuration)
        self.listSyllables.append(currSyllable)
Esempio n. 9
0
def mlf2WordAndTsList(inputFileName):
    '''
    parse output of alignment in mlf format ( with words) 
    output: words with begin and end ts 
    NOTE: length of tokens=5 if no -o option is set on HVite
    TODO: change automatically extension from txt to mlf
    '''

    extracedWordList = []

    LENGTH_TOKENS_NEW_WORD = 5

    allLines = loadTextFile(inputFileName)

    listWordsAndTs = allLines[2:-1]

    currentTokenIndex = 0
    tokens = listWordsAndTs[currentTokenIndex].split(" ")

    while currentTokenIndex < len(listWordsAndTs):

        # get begin ts
        startTime = float(tokens[0]) / 10000000
        wordMETU = tokens[-1].strip()

        # move to next
        prevTokens = tokens
        currentTokenIndex += 1

        # sanity check
        if currentTokenIndex >= len(listWordsAndTs):
            endTime = float(prevTokens[1]) / 10000000
            extracedWordList.append([startTime, endTime, wordMETU])

            break

        tokens = listWordsAndTs[currentTokenIndex].split(" ")

        # fast forward phonemes while end of word
        while len(tokens
                  ) == LENGTH_TOKENS_NEW_WORD - 1 and currentTokenIndex < len(
                      listWordsAndTs):

            # end of word is last phoneme before 'sp'
            if tokens[2] == "sp":
                # move to next
                currentTokenIndex += 1
                if currentTokenIndex < len(listWordsAndTs):
                    tokens = listWordsAndTs[currentTokenIndex].split(" ")

                break

            prevTokens = tokens
            currentTokenIndex += 1
            tokens = listWordsAndTs[currentTokenIndex].split(" ")

        # end of word. after inner while loop
        endTime = float(prevTokens[1]) / 10000000

        extracedWordList.append([startTime, endTime, wordMETU])

    return extracedWordList