示例#1
0
 def load_textgrid(self, path):
     tg = TextGrid()
     try:
         tg.read(path)
     except ValueError as e:
         raise (TextGridError('The file {} could not be parsed: {}'.format(
             path, str(e))))
     return tg
示例#2
0
def guess_textgrid_format(path):
    """
    Given a directory, tries to guess what format the textgrids are in

    Parameters
    ----------
    path : str
        the path of the directory containing the textgrids

    Returns
    -------
    str or None
        textgrid format or None if file is not textgrid and directory doesn't contain textgrids
    """
    from .inspect import inspect_labbcat, inspect_mfa, inspect_fave
    if os.path.isdir(path):
        counts = {'mfa': 0, 'labbcat': 0, 'fave': 0, None: 0}
        for root, subdirs, files in os.walk(path):
            for f in files:
                if not f.lower().endswith('.textgrid'):
                    continue
                tg_path = os.path.join(root, f)
                tg = TextGrid()
                try:
                    tg.read(tg_path)
                except ValueError as e:
                    raise (TextGridError(
                        'The file {} could not be parsed: {}'.format(
                            tg_path, str(e))))

                labbcat_parser = inspect_labbcat(tg_path)
                mfa_parser = inspect_mfa(tg_path)
                fave_parser = inspect_fave(tg_path)
                if labbcat_parser._is_valid(tg):
                    counts['labbcat'] += 1
                elif mfa_parser._is_valid(tg):
                    counts['mfa'] += 1
                elif fave_parser._is_valid(tg):
                    counts['fave'] += 1
                else:
                    counts[None] += 1
        return max(counts.keys(), key=lambda x: counts[x])
    elif path.lower().endswith('.textgrid'):
        tg = TextGrid()
        tg.read(path)
        labbcat_parser = inspect_labbcat(path)
        mfa_parser = inspect_mfa(path)
        fave_parser = inspect_fave(path)
        if labbcat_parser._is_valid(tg):
            return 'labbcat'
        elif mfa_parser._is_valid(tg):
            return 'mfa'
        elif fave_parser._is_valid(tg):
            return 'fave'
    return None
示例#3
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = self.load_textgrid(path)

        if len(tg.tiers) != len(self.annotation_types):
            raise (TextGridError(
                "The TextGrid ({}) does not have the same number of interval tiers as the number of annotation types specified."
                .format(path)))
        name = os.path.splitext(os.path.split(path)[1])[0]

        if self.speaker_parser is not None:
            speaker = self.speaker_parser.parse_path(path)
        else:
            speaker = None

        for a in self.annotation_types:
            a.reset()
            a.speaker = speaker

        #Parse the tiers
        for i, ti in enumerate(tg.tiers):

            if isinstance(ti, IntervalTier):
                self.annotation_types[i].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            else:
                self.annotation_types[i].add(
                    ((x.mark.strip(), x.time) for x in ti))
        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        for a in self.annotation_types:
            a.reset()

        data.wav_path = find_wav_path(path)
        return data
示例#4
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = TextGrid()
        tg.read(path)
        if not self._is_valid(tg):
            raise (
                TextGridError('This file cannot be parsed by the MFA parser.'))
        name = os.path.splitext(os.path.split(path)[1])[0]

        if self.speaker_parser is not None:
            speaker = self.speaker_parser.parse_path(path)
        else:
            speaker = None

        for a in self.annotation_types:
            a.reset()
            a.speaker = speaker

        #Parse the tiers
        for i, ti in enumerate(tg.tiers):
            if ti.name == 'words':
                self.annotation_types[0].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            elif ti.name == 'phones':
                self.annotation_types[1].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        for a in self.annotation_types:
            a.reset()

        data.wav_path = find_wav_path(path)
        return data
示例#5
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = self.load_textgrid(path)
        if not self._is_valid(tg):
            raise (TextGridError(
                'The file "{}" cannot be parsed by the LaBB-CAT parser.'.
                format(path)))
        name = os.path.splitext(os.path.split(path)[1])[0]
        self.speaker_parser = DirectorySpeakerParser()
        if self.speaker_parser is not None:
            speaker = self.speaker_parser.parse_path(path)
        else:
            speaker = None

        for a in self.annotation_types:
            a.reset()
            a.speaker = speaker

        #Parse the tiers
        for i, ti in enumerate(tg.tiers):
            if ti.name.startswith('transcrip'):
                self.annotation_types[0].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            elif ti.name.startswith('segment'):
                self.annotation_types[1].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        for a in self.annotation_types:
            a.reset()

        data.wav_path = find_wav_path(path)
        return data
示例#6
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = TextGrid()
        tg.read(path)
        if not self._is_valid(tg):
            raise (TextGridError(
                'The file "{}" cannot be parsed by the FAVE parser.'.format(
                    path)))
        name = os.path.splitext(os.path.split(path)[1])[0]

        dummy = self.annotation_types
        self.annotation_types = []

        #Parse the tiers
        for i, ti in enumerate(tg.tiers):
            try:
                speaker, type = ti.name.split(' - ')
            except ValueError:
                continue
            at = OrthographyTier(type, type)
            at.speaker = speaker
            at.add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            self.annotation_types.append(at)
        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        data.wav_path = find_wav_path(path)

        self.annotation_types = dummy

        return data
示例#7
0
    def load_textgrid(self, path):
        """
        Load a TextGrid file

        Parameters
        ----------
        path : str
            Path to the TextGrid file

        Returns
        -------
        :class:`~textgrid.TextGrid`
            TextGrid object
        """
        tg = TextGrid()
        try:
            tg.read(path)
        except ValueError as e:
            raise (TextGridError('The file {} could not be parsed: {}'.format(
                path, str(e))))
        return tg
示例#8
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = TextGrid()
        try:
            tg.read(path)
        except Exception as e:
            print('There was an issue parsing {}:'.format(path))
            raise

        multiple_speakers, is_valid = self._is_valid(tg)

        if not is_valid:
            raise (TextGridError('This file ({}) cannot be parsed by the {} parser.'.format(path, self.name)))
        name = os.path.splitext(os.path.split(path)[1])[0]

        # Format 1
        if not multiple_speakers:
            if self.speaker_parser is not None:
                speaker = self.speaker_parser.parse_path(path)
            else:
                speaker = None

            for a in self.annotation_types:
                a.reset()
                a.speaker = speaker

            # Parse the tiers
            for i, ti in enumerate(tg.tiers):
                if ti.name.lower().startswith(self.word_label):
                    self.annotation_types[0].add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
                elif ti.name.lower().startswith(self.phone_label):
                    self.annotation_types[1].add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            pg_annotations = self._parse_annotations(types_only)

            data = DiscourseData(name, pg_annotations, self.hierarchy)
            for a in self.annotation_types:
                a.reset()

        # Format 2
        else:
            dummy = self.annotation_types
            self.annotation_types = []
            wav_path = find_wav_path(path)
            speaker_channel_mapping = {}
            if wav_path is not None:
                n_channels = get_n_channels(wav_path)
                if n_channels > 1:
                    # Figure speaker-channel mapping
                    n_tiers = 0
                    for ti in tg.tiers:
                        try:
                            speaker, type = ti.name.split(' - ')
                        except ValueError:
                            continue
                        n_tiers += 1
                    ind = 0
                    cutoffs = [x / n_channels for x in range(1, n_channels)]
                    for ti in tg.tiers:
                        try:
                            if self.speaker_first:
                                speaker, type = ti.name.split(' - ')
                            else:
                                type, speaker = ti.name.split(' - ')
                        except ValueError:
                            continue
                        if speaker in speaker_channel_mapping:
                            continue
                        for i, c in enumerate(cutoffs):
                            if ind / n_channels < c:
                                speaker_channel_mapping[speaker] = i
                                break
                        else:
                            speaker_channel_mapping[speaker] = i + 1
                        ind += 1

            # Parse the tiers
            for ti in tg.tiers:
                try:
                    if self.speaker_first:
                        speaker, type = ti.name.split(' - ')
                    else:
                        type, speaker = ti.name.split(' - ')
                except ValueError:
                    continue
                if type.lower().startswith(self.word_label):
                    type = 'word'
                elif type.lower().startswith(self.phone_label):
                    type = 'phone'
                if len(ti) == 1 and ti[0].mark.strip() == '':
                    continue
                at = OrthographyTier(type, type)
                at.speaker = speaker
                at.add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
                self.annotation_types.append(at)
            pg_annotations = self._parse_annotations(types_only)
            data = DiscourseData(name, pg_annotations, self.hierarchy)
            data.speaker_channel_mapping = speaker_channel_mapping

            self.annotation_types = dummy

        data.wav_path = find_wav_path(path)
        return data
示例#9
0
    def parse_discourse(self, path, types_only=False):
        '''
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        '''
        tg = TextGrid()
        tg.read(path)
        if not self._is_valid(tg):
            raise (TextGridError(
                'The file "{}" cannot be parsed by the FAVE parser.'.format(
                    path)))
        name = os.path.splitext(os.path.split(path)[1])[0]

        dummy = self.annotation_types
        self.annotation_types = []
        wav_path = find_wav_path(path)
        speaker_channel_mapping = {}
        if wav_path is not None:
            n_channels = get_n_channels(wav_path)
            if n_channels > 1:
                #Figure speaker-channel mapping
                n_tiers = 0
                for ti in tg.tiers:
                    try:
                        speaker, type = ti.name.split(' - ')
                    except ValueError:
                        continue
                    n_tiers += 1
                ind = 0
                cutoffs = [x / n_channels for x in range(1, n_channels)]
                print(cutoffs)
                for ti in tg.tiers:
                    try:
                        speaker, type = ti.name.split(' - ')
                    except ValueError:
                        continue
                    if speaker in speaker_channel_mapping:
                        continue
                    print(ind / n_channels)
                    for i, c in enumerate(cutoffs):
                        print(c)
                        if ind / n_channels < c:
                            speaker_channel_mapping[speaker] = i
                            break
                    else:
                        speaker_channel_mapping[speaker] = i + 1
                    ind += 1

        #Parse the tiers
        for ti in tg.tiers:
            try:
                speaker, type = ti.name.split(' - ')
            except ValueError:
                continue
            if len(ti) == 1 and ti[0].mark.strip() == '':
                continue
            at = OrthographyTier(type, type)
            at.speaker = speaker
            at.add(((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            self.annotation_types.append(at)
        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        data.speaker_channel_mapping = speaker_channel_mapping
        data.wav_path = wav_path

        self.annotation_types = dummy

        return data
示例#10
0
    def parse_discourse(self, path, types_only=False):
        """
        Parse a TextGrid file for later importing.

        Parameters
        ----------
        path : str
            Path to TextGrid file
        types_only : bool
            Flag for whether to only save type information, ignoring the token information

        Returns
        -------
        :class:`~polyglotdb.io.discoursedata.DiscourseData`
            Parsed data from the file
        """
        tg = self.load_textgrid(path)

        if len(tg.tiers) != len(self.annotation_tiers):
            raise (TextGridError(
                "The TextGrid ({}) does not have the same number of interval tiers as the number of annotation types specified."
                .format(path)))
        name = os.path.splitext(os.path.split(path)[1])[0]

        if self.speaker_parser is not None:
            speaker = self.speaker_parser.parse_path(path)
        else:
            speaker = None

        for a in self.annotation_tiers:
            a.reset()
            a.speaker = speaker

        # Parse the tiers
        for i, ti in enumerate(tg.tiers):

            if isinstance(ti, IntervalTier):
                self.annotation_tiers[i].add(
                    ((x.mark.strip(), x.minTime, x.maxTime) for x in ti))
            else:
                self.annotation_tiers[i].add(
                    ((x.mark.strip(), x.time) for x in ti))

        is_empty_textgrid = True

        for t in self.annotation_tiers:
            for interval in t:
                if isinstance(interval, Orthography):
                    if interval.label != "":
                        is_empty_textgrid = False
                        break
                if isinstance(interval, Transcription):
                    if interval._list != []:
                        is_empty_textgrid = False
                        break
        if is_empty_textgrid:
            return None

        pg_annotations = self._parse_annotations(types_only)

        data = DiscourseData(name, pg_annotations, self.hierarchy)
        for a in self.annotation_tiers:
            a.reset()
        data.wav_path = find_wav_path(path)
        return data