コード例 #1
0
ファイル: __init__.py プロジェクト: dhillonr/db
 def load_speaker(self, uri):
     speaker = Annotation(uri=uri)
     path = self.get_audio_path(uri)
     with open(path, 'r') as fp:
         for line in fp:
             start, duration, name, _, _ = line.strip().split()
             start = float(start)
             end = start + float(duration)
             speaker[Segment(start, end)] = name
     return speaker.smooth()
コード例 #2
0
    def run(self):

        # wav file duration
        wav = self.in_wav().path
        with contextlib.closing(wave.open(wav, 'r')) as f:
            frames = f.getnframes()
            rate = f.getframerate()
        duration = frames / rate
        extent = Segment(0., duration)

        with self.in_speaker().open('r') as fp:
            speaker = pyannote.core.json.load(fp)

        segmentation = Annotation()
        for segment, _ in speaker.itertracks():
            segmentation[segment] = 'speech'
        segmentation = segmentation.smooth()

        for gap in segmentation.get_timeline().gaps(extent):
                segmentation[gap] = 'non_speech'
        segmentation = segmentation.smooth()

        with self.out_put().open('w') as fp:
            pyannote.core.json.dump(segmentation, fp)
コード例 #3
0
ファイル: thread.py プロジェクト: Key-Zone/pyannote-video
    def __call__(self):

        # list of chronologically sorted list of shots
        graph = self._threads_graph()
        threads = [sorted(cc) for cc in nx.connected_components(graph)]

        annotation = Annotation()
        labelGenerator = getLabelGenerator()

        # chronologically sorted threads (based on their first shot)
        for thread in sorted(threads, key=lambda thread: thread[0]):
            label = next(labelGenerator)
            for shot in thread:
                annotation[shot] = label

        return annotation.smooth()
コード例 #4
0
ファイル: thread.py プロジェクト: Key-Zone/pyannote-video
    def __call__(self):

        # list of chronologically sorted list of shots
        graph = self._threads_graph()
        threads = [sorted(cc) for cc in nx.connected_components(graph)]

        annotation = Annotation()
        labelGenerator = getLabelGenerator()

        # chronologically sorted threads (based on their first shot)
        for thread in sorted(threads, key=lambda thread: thread[0]):
            label = next(labelGenerator)
            for shot in thread:
                annotation[shot] = label

        return annotation.smooth()
コード例 #5
0
    def regression(self, reference, before, after, uem=None, uemified=False):

        _, before, errors_before = self.difference(
            reference, before, uem=uem, uemified=True)

        reference, after, errors_after = self.difference(
            reference, after, uem=uem, uemified=True)

        behaviors = Annotation(uri=reference.uri, modality=reference.modality)

        # common (up-sampled) timeline
        common_timeline = errors_after.get_timeline().union(
            errors_before.get_timeline())
        common_timeline = common_timeline.segmentation()

        # align 'before' errors on common timeline
        B = self._tagger(errors_before, common_timeline)

        # align 'after' errors on common timeline
        A = self._tagger(errors_after, common_timeline)

        for segment in common_timeline:

            old_errors = B.get_labels(segment, unique=False)
            new_errors = A.get_labels(segment, unique=False)

            n1 = len(old_errors)
            n2 = len(new_errors)
            n = max(n1, n2)

            match = np.zeros((n, n), dtype=int)
            for i1, e1 in enumerate(old_errors):
                for i2, e2 in enumerate(new_errors):
                    match[i1, i2] = self._match_errors(e1, e2)

            mapping = self.munkres.compute(2 - match)

            for i1, i2 in mapping:

                if i1 >= n1:
                    track = behaviors.new_track(segment,
                                                candidate=REGRESSION,
                                                prefix=REGRESSION)
                    behaviors[segment, track] = (
                        REGRESSION, None, new_errors[i2])

                elif i2 >= n2:
                    track = behaviors.new_track(segment,
                                                candidate=IMPROVEMENT,
                                                prefix=IMPROVEMENT)
                    behaviors[segment, track] = (
                        IMPROVEMENT, old_errors[i1], None)

                elif old_errors[i1][0] == MATCH_CORRECT:

                    if new_errors[i2][0] == MATCH_CORRECT:
                        track = behaviors.new_track(segment,
                                                    candidate=BOTH_CORRECT,
                                                    prefix=BOTH_CORRECT)
                        behaviors[segment, track] = (
                            BOTH_CORRECT, old_errors[i1], new_errors[i2])

                    else:
                        track = behaviors.new_track(segment,
                                                    candidate=REGRESSION,
                                                    prefix=REGRESSION)
                        behaviors[segment, track] = (
                            REGRESSION, old_errors[i1], new_errors[i2])

                else:

                    if new_errors[i2][0] == MATCH_CORRECT:
                        track = behaviors.new_track(segment,
                                                    candidate=IMPROVEMENT,
                                                    prefix=IMPROVEMENT)
                        behaviors[segment, track] = (
                            IMPROVEMENT, old_errors[i1], new_errors[i2])

                    else:
                        track = behaviors.new_track(segment,
                                                    candidate=BOTH_INCORRECT,
                                                    prefix=BOTH_INCORRECT)
                        behaviors[segment, track] = (
                            BOTH_INCORRECT, old_errors[i1], new_errors[i2])

        behaviors = behaviors.smooth()

        if uemified:
            return reference, before, after, behaviors
        else:
            return behaviors
コード例 #6
0
    def __call__(self, reference, hypothesis):

        if isinstance(reference, Annotation):
            reference = reference.get_timeline()

        if isinstance(hypothesis, Annotation):
            hypothesis = hypothesis.get_timeline()

        # over-segmentation
        over = Timeline(uri=reference.uri)
        prev_r = reference[0]
        intersection = []
        for r, h in reference.co_iter(hypothesis):

            if r != prev_r:
                intersection = sorted(intersection)
                for _, segment in intersection[:-1]:
                    over.add(segment)
                intersection = []
                prev_r = r

            segment = r & h
            intersection.append((segment.duration, segment))

        intersection = sorted(intersection)
        for _, segment in intersection[:-1]:
            over.add(segment)

        # under-segmentation
        under = Timeline(uri=reference.uri)
        prev_h = hypothesis[0]
        intersection = []
        for h, r in hypothesis.co_iter(reference):

            if h != prev_h:
                intersection = sorted(intersection)
                for _, segment in intersection[:-1]:
                    under.add(segment)
                intersection = []
                prev_h = h

            segment = h & r
            intersection.append((segment.duration, segment))

        intersection = sorted(intersection)
        for _, segment in intersection[:-1]:
            under.add(segment)

        # extent
        extent = reference.extent()

        # correct (neither under- nor over-segmented)
        correct = under.union(over).gaps(focus=extent)

        # frontier error (both under- and over-segmented)
        frontier = under.crop(over)

        # under-segmented
        not_over = over.gaps(focus=extent)
        only_under = under.crop(not_over)

        # over-segmented
        not_under = under.gaps(focus=extent)
        only_over = over.crop(not_under)

        status = Annotation(uri=reference.uri)
        for segment in correct:
            status[segment, '_'] = 'correct'
        for segment in frontier:
            status[segment, '_'] = 'frontier'
        for segment in only_over:
            status[segment, '_'] = 'over'
        for segment in only_under:
            status[segment, '_'] = 'under'

        return status.smooth()