Example #1
0
def random_mixed(supervision_manifest: Pathlike, feature_manifest: Pathlike,
                 output_cut_manifest: Pathlike, snr_range: Tuple[float, float],
                 offset_range: Tuple[float, float]):
    """
    Create a CutSet stored in OUTPUT_CUT_MANIFEST that contains supervision regions from SUPERVISION_MANIFEST
    and features supplied by FEATURE_MANIFEST. It first creates a trivial CutSet, splits it into two equal, randomized
    parts and mixes their features.
    The parameters of the mix are controlled via SNR_RANGE and OFFSET_RANGE.
    """
    supervision_set = SupervisionSet.from_json(supervision_manifest)
    feature_set = FeatureSet.from_json(feature_manifest)

    source_cut_set = CutSet.from_manifests(supervisions=supervision_set,
                                           features=feature_set)
    left_cuts, right_cuts = source_cut_set.split(num_splits=2, shuffle=True)

    snrs = np.random.uniform(*snr_range, size=len(left_cuts)).tolist()
    relative_offsets = np.random.uniform(*offset_range,
                                         size=len(left_cuts)).tolist()

    mixed_cut_set = CutSet.from_cuts(
        left_cut.mix(right_cut,
                     offset_other_by=left_cut.duration * relative_offset,
                     snr=snr)
        for left_cut, right_cut, snr, relative_offset in zip(
            left_cuts, right_cuts, snrs, relative_offsets))
    mixed_cut_set.to_json(output_cut_manifest)
Example #2
0
def test_mixed_cut_set_serialization(cut_set_with_mixed_cut, format, compressed):
    with NamedTemporaryFile(suffix='.gz' if compressed else '') as f:
        if format == 'yaml':
            cut_set_with_mixed_cut.to_yaml(f.name)
            restored = CutSet.from_yaml(f.name)
        if format == 'json':
            cut_set_with_mixed_cut.to_json(f.name)
            restored = CutSet.from_json(f.name)
    assert cut_set_with_mixed_cut == restored
Example #3
0
def mix_sequential(cut_manifests: List[Pathlike], output_cut_manifest: Pathlike):
    """
    Create a CutSet stored in OUTPUT_CUT_MANIFEST by iterating jointly over CUT_MANIFESTS and mixing the Cuts
    on the same positions. E.g. the first output cut is created from the first cuts in each input manifest.
    The mix is performed by summing the features from all Cuts.
    If the CUT_MANIFESTS have different number of Cuts, the mixing ends when the shorter manifest is depleted.
    """
    cut_manifests = [CutSet.from_json(path) for path in cut_manifests]
    mixed_cut_set = CutSet.from_cuts(mix_cuts(cuts) for cuts in zip(*cut_manifests))
    mixed_cut_set.to_file(output_cut_manifest)
Example #4
0
def mix_by_recording_id(cut_manifests: List[Pathlike],
                        output_cut_manifest: Pathlike):
    """
    Create a CutSet stored in OUTPUT_CUT_MANIFEST by matching the Cuts from CUT_MANIFESTS by their recording IDs
    and mixing them together.
    """
    all_cuts = combine(*[CutSet.from_json(path) for path in cut_manifests])
    recording_id_to_cuts = groupby(lambda cut: cut.recording_id, all_cuts)
    mixed_cut_set = CutSet.from_cuts(
        mix_cuts(cuts) for recording_id, cuts in recording_id_to_cuts.items())
    mixed_cut_set.to_json(output_cut_manifest)
Example #5
0
 def _collect_batch(self) -> CutSet:
     """
     Return a sub-CutSet that represents a full batch.
     This is quick, as it does not perform any I/O in the process.
     """
     # Keep iterating the underlying CutSet as long as we hit or exceed the constraints
     # provided by user (the max number of frames or max number of cuts).
     # Note: no actual data is loaded into memory yet because the manifests contain all the metadata
     # required to do this operation.
     num_frames = 0
     cuts = []
     while True:
         # Check that we have not reached the end of the dataset.
         if self.current_idx < self.partition_end:
             # We didn't - grab the next cut
             next_cut_id = self.cut_ids[self.current_idx]
         else:
             if cuts:
                 # We did and we have a partial batch - return it.
                 return CutSet.from_cuts(cuts)
             else:
                 # We did and there is nothing more to return - signal the iteration code to stop.
                 raise StopIteration()
         next_cut = self.cuts[next_cut_id]
         next_num_frames = num_frames + next_cut.num_frames
         next_num_cuts = len(cuts) + 1
         # Did we exceed the max_frames and max_cuts constraints?
         if next_num_frames <= self.max_frames and (
                 self.max_cuts is None or next_num_cuts <= self.max_cuts):
             # No - add the next cut to the batch, and keep trying.
             num_frames = next_num_frames
             cuts.append(next_cut)
             self.current_idx += 1
         else:
             # Yes. Do we have at least one cut in the batch?
             if cuts:
                 # Yes. Return it.
                 break
             else:
                 # No. We'll warn the user that the constrains might be too tight,
                 # and return the cut anyway.
                 warnings.warn(
                     "The first cut drawn in batch collection violates the max_frames or max_cuts "
                     "constraints - we'll return it anyway. Consider increasing max_frames/max_cuts."
                 )
                 cuts.append(next_cut)
                 self.current_idx += 1
     if self.concat_cuts:
         cuts = concat_cuts(cuts,
                            gap=self.concat_cuts_gap,
                            max_duration=self.concat_cuts_duration_factor *
                            cuts[0].duration)
     return CutSet.from_cuts(cuts)
Example #6
0
def append(
    cut_manifests: List[Pathlike],
    output_cut_manifest: Pathlike,
):
    """
    Create a new CutSet by appending the cuts in CUT_MANIFESTS. CUT_MANIFESTS are iterated position-wise (the
    cuts on i'th position in each manfiest are appended to each other).
    The cuts are appended in the order in which they appear in the
    input argument list.
    If CUT_MANIFESTS have different lengths, the script stops once the shortest CutSet is depleted.
    """
    cut_sets = [CutSet.from_file(path) for path in cut_manifests]
    appended_cut_set = CutSet.from_cuts(append_cuts(cuts) for cuts in zip(*cut_sets))
    appended_cut_set.to_file(output_cut_manifest)
Example #7
0
def cut_set_with_mixed_cut(cut1, cut2):
    mixed_cut = MixedCut(
        id="mixed-cut-id",
        tracks=[MixTrack(cut=cut1),
                MixTrack(cut=cut2, offset=1.0, snr=10)],
    )
    return CutSet({cut.id: cut for cut in [cut1, cut2, mixed_cut]})
def k2_cut_set(libri_cut_set):
    # Create a cut set with 4 cuts, one of them having two supervisions
    return CutSet.from_cuts([
        libri_cut_set[0], libri_cut_set[0].with_id('copy-1'),
        libri_cut_set[0].with_id('copy-2'),
        libri_cut_set[0].append(libri_cut_set[0])
    ])
Example #9
0
    def test_make_cuts_from_recordings_features_supervisions(
            self, dummy_recording_set, dummy_feature_set,
            dummy_supervision_set):
        cut_set = CutSet.from_manifests(recordings=dummy_recording_set,
                                        supervisions=dummy_supervision_set,
                                        features=dummy_feature_set)
        cut1 = cut_set[0]
        assert cut1.start == 0
        assert cut1.duration == 10.0
        assert cut1.end == 10.0
        assert cut1.channel == 0

        assert len(cut1.supervisions) == 1
        assert cut1.supervisions[0].id == 'sup1'
        assert cut1.supervisions[0].recording_id == 'rec1'
        assert cut1.supervisions[0].start == 3.0
        assert cut1.supervisions[0].end == 7.0
        assert cut1.supervisions[0].channel == 0
        assert cut1.supervisions[0].text == 'dummy text'

        assert cut1.has_recording
        assert cut1.recording == dummy_recording_set.recordings['rec1']
        assert cut1.sampling_rate == 16000
        assert cut1.recording_id == 'rec1'
        assert cut1.num_samples == 160000

        assert cut1.has_features
        assert cut1.features == dummy_feature_set.features[0]
        assert cut1.frame_shift == 0.01
        assert cut1.num_frames == 1000
        assert cut1.num_features == 23
        assert cut1.features_type == 'fbank'
Example #10
0
    def test_make_cuts_from_recordings_supervisions(self, dummy_recording_set,
                                                    dummy_supervision_set):
        cut_set = CutSet.from_manifests(recordings=dummy_recording_set,
                                        supervisions=dummy_supervision_set)
        cut1 = cut_set[0]
        assert cut1.start == 0
        assert cut1.duration == 10.0
        assert cut1.end == 10.0
        assert cut1.channel == 0

        assert len(cut1.supervisions) == 1
        assert cut1.supervisions[0].id == "sup1"
        assert cut1.supervisions[0].recording_id == "rec1"
        assert cut1.supervisions[0].start == 3.0
        assert cut1.supervisions[0].end == 7.0
        assert cut1.supervisions[0].channel == 0
        assert cut1.supervisions[0].text == "dummy text"

        assert cut1.has_recording
        assert cut1.recording == dummy_recording_set.recordings["rec1"]
        assert cut1.sampling_rate == 16000
        assert cut1.recording_id == "rec1"
        assert cut1.num_samples == 160000

        assert not cut1.has_features
        assert cut1.features is None
        assert cut1.frame_shift is None
        assert cut1.num_frames is None
        assert cut1.num_features is None
        assert cut1.features_type is None
Example #11
0
def mixed_audio_cut() -> MixedCut:
    cut_set = CutSet.from_json(
        "test/fixtures/mix_cut_test/overlayed_audio_cut_manifest.json"
    )
    mixed_cut = cut_set["mixed-cut-id"]
    assert isclose(mixed_cut.duration, 14.4)
    return mixed_cut
Example #12
0
def test_cut_set_decompose():
    c = dummy_cut(
        0,
        start=5.0,
        duration=10.0,
        supervisions=[
            dummy_supervision(0, start=0.0),
            dummy_supervision(1, start=6.5)
        ],
    )
    assert c.start == 5.0
    assert c.end == 15.0
    cuts = CutSet.from_cuts([c])

    recs, sups, feats = cuts.decompose()

    assert isinstance(recs, RecordingSet)
    assert len(recs) == 1
    assert recs[0].id == "dummy-recording-0000"

    assert isinstance(sups, SupervisionSet)
    assert len(sups) == 2
    assert sups[0].id == "dummy-segment-0000"
    assert sups[0].start == 5.0
    assert sups[0].end == 6.0
    assert sups[1].id == "dummy-segment-0001"
    assert sups[1].start == 11.5
    assert sups[1].end == 12.5

    assert isinstance(feats, FeatureSet)
    assert len(feats) == 1
Example #13
0
def test_trim_to_unsupervised_segments():
    cut_set = CutSet.from_cuts([
        # Yields 3 unsupervised cuts - before first supervision,
        # between sup2 and sup3, and after sup3.
        Cut('cut1', start=0, duration=30, channel=0, supervisions=[
            SupervisionSegment('sup1', 'rec1', start=1.5, duration=8.5),
            SupervisionSegment('sup2', 'rec1', start=10, duration=5),
            SupervisionSegment('sup3', 'rec1', start=20, duration=8),
        ]),
        # Does not yield any "unsupervised" cut.
        Cut('cut2', start=0, duration=30, channel=0, supervisions=[
            SupervisionSegment('sup4', 'rec1', start=0, duration=30),
        ]),
    ])
    unsupervised_cuts = cut_set.trim_to_unsupervised_segments()

    assert len(unsupervised_cuts) == 3

    assert unsupervised_cuts[0].start == 0
    assert unsupervised_cuts[0].duration == 1.5
    assert unsupervised_cuts[0].supervisions == []

    assert unsupervised_cuts[1].start == 15
    assert unsupervised_cuts[1].duration == 5
    assert unsupervised_cuts[1].supervisions == []

    assert unsupervised_cuts[2].start == 28
    assert unsupervised_cuts[2].duration == 2
    assert unsupervised_cuts[2].supervisions == []
Example #14
0
def test_mix_same_recording_channels():
    recording = Recording('rec',
                          sampling_rate=8000,
                          num_samples=30 * 8000,
                          duration=30,
                          sources=[
                              AudioSource('file',
                                          channels=[0],
                                          source='irrelevant1.wav'),
                              AudioSource('file',
                                          channels=[1],
                                          source='irrelevant2.wav')
                          ])
    cut_set = CutSet.from_cuts([
        Cut('cut1', start=0, duration=30, channel=0, recording=recording),
        Cut('cut2', start=0, duration=30, channel=1, recording=recording)
    ])

    mixed = cut_set.mix_same_recording_channels()
    assert len(mixed) == 1

    cut = mixed[0]
    assert isinstance(cut, MixedCut)
    assert len(cut.tracks) == 2
    assert cut.tracks[0].cut == cut_set[0]
    assert cut.tracks[1].cut == cut_set[1]
Example #15
0
def mixed_feature_cut() -> MixedCut:
    cut_set = CutSet.from_json(
        'test/fixtures/mix_cut_test/overlayed_cut_manifest.json')
    mixed_cut = cut_set['mixed-cut-id']
    assert mixed_cut.num_frames == 1360
    assert isclose(mixed_cut.duration, 13.595)
    return mixed_cut
Example #16
0
def mixed_overlapping_cut_set():
    """
    Input mixed cut::
        |---------------mixedcut--------------------|
        |--------rec1 0-30s--------|
                     |-------rec2 15-45s--------|
         |---sup1--|         |-----sup3-----|
                 |sup2|
    """
    cut_set = CutSet.from_cuts([
        MonoCut(
            'cut1', start=0, duration=30, channel=0,
            recording=Recording(
                id='rec1', sources=[], sampling_rate=16000, num_samples=160000, duration=60.0
            ),
            supervisions=[
                SupervisionSegment('sup1', 'rec1', start=1.5, duration=10.5),
                SupervisionSegment('sup2', 'rec1', start=10, duration=6),
            ]
        ).mix(
            MonoCut(
                'cut2', start=15, duration=30, channel=0,
                recording=Recording(
                    id='rec2', sources=[], sampling_rate=16000, num_samples=160000, duration=60.0
                ),
                supervisions=[
                    SupervisionSegment('sup3', 'rec2', start=8, duration=18),
                ]
            ),
            offset_other_by=15.0
        )
    ])
    assert isinstance(cut_set[0], MixedCut)
    return cut_set
Example #17
0
def test_serialize_padded_cut_set(cut_set):
    # cut_set fixture is defined in test/cut/conftest.py
    padded_cut_set = cut_set.pad(60.1)
    with NamedTemporaryFile() as f:
        padded_cut_set.to_json(f.name)
        restored = CutSet.from_json(f.name)
    assert padded_cut_set == restored
Example #18
0
def to_manifest(items: Iterable[ManifestItem]) -> Optional[Manifest]:
    """
    Take an iterable of data types in Lhotse such as Recording, SupervisonSegment or Cut, and create the manifest of the
    corresponding type. When the iterable is empty, returns None.
    """
    items = iter(items)
    try:
        first_item = next(items)
    except StopIteration:
        return None
    items = chain([first_item], items)

    if isinstance(first_item, Recording):
        return RecordingSet.from_recordings(items)
    if isinstance(first_item, SupervisionSegment):
        return SupervisionSet.from_segments(items)
    if isinstance(first_item, (Cut, MixedCut)):
        return CutSet.from_cuts(items)
    if isinstance(first_item, Features):
        raise ValueError(
            "FeatureSet generic construction from iterable is not possible, as the config information "
            "would have been lost. Call FeatureSet.from_features() directly instead."
        )

    raise ValueError(f"Unknown type of manifest item: {first_item}")
Example #19
0
def test_mixed_cut_set_prefix(cut_with_relative_paths):
    cut_set = CutSet.from_cuts([cut_with_relative_paths.mix(cut_with_relative_paths)])
    for c in cut_set.with_recording_path_prefix('/data'):
        for t in c.tracks:
            assert t.cut.recording.sources[0].source == '/data/audio.wav'
    for c in cut_set.with_features_path_prefix('/data'):
        for t in c.tracks:
            assert t.cut.features.storage_path == '/data/storage_dir'
Example #20
0
def pad(cut_manifest: Pathlike, output_cut_manifest: Pathlike,
        duration: Optional[float]):
    """
    Create a new CutSet by padding the cuts in CUT_MANIFEST. The cuts will be right-padded, i.e. the padding
    is placed after the signal ends.
    """
    cut_set = CutSet.from_json(cut_manifest)
    padded_cut_set = cut_set.pad(desired_duration=duration)
    padded_cut_set.to_json(output_cut_manifest)
Example #21
0
def test_store_audio(cut_set):
    cut_set = CutSet.from_json('test/fixtures/libri/cuts.json')
    with TemporaryDirectory() as tmpdir:
        stored_cut_set = cut_set.compute_and_store_recordings(tmpdir)
        for cut1, cut2 in zip(cut_set, stored_cut_set):
            samples1 = cut1.load_audio()
            samples2 = cut2.load_audio()
            assert np.array_equal(samples1, samples2)
        assert len(stored_cut_set) == len(cut_set)
Example #22
0
def test_cut_into_windows():
    cuts0 = CutSet.from_json(
        "test/fixtures/ljspeech/cuts.json")  # has 2 cuts of 1.54s and 1.6s
    cuts = cuts0.cut_into_windows(duration=0.5, hop=0.4)  # 0, 0.4, 0.8, 1.2
    starts = [cut.start for cut in cuts]
    assert starts == approx([0, 0.4, 0.8, 1.2, 0, 0.4, 0.8, 1.2])
    durations = [cut.duration for cut in cuts]
    assert durations == approx(
        [0.5, 0.5, 0.5, 0.3396371882, 0.5, 0.5, 0.5, 0.39768707483])
Example #23
0
def test_trim_to_supervisions_mixed_cuts():
    cut_set = CutSet.from_cuts([
        Cut('cut1',
            start=0,
            duration=30,
            channel=0,
            recording=Recording(id='rec1',
                                sources=[],
                                sampling_rate=16000,
                                num_samples=160000,
                                duration=10.0),
            supervisions=[
                SupervisionSegment('sup1', 'rec1', start=1.5, duration=8.5),
                SupervisionSegment('sup2', 'rec1', start=10, duration=5),
                SupervisionSegment('sup3', 'rec1', start=20, duration=8),
            ]).append(
                Cut('cut2',
                    start=0,
                    duration=30,
                    channel=0,
                    recording=Recording(id='rec1',
                                        sources=[],
                                        sampling_rate=16000,
                                        num_samples=160000,
                                        duration=10.0),
                    supervisions=[
                        SupervisionSegment('sup4',
                                           'rec1',
                                           start=0,
                                           duration=30),
                    ]))
    ])
    assert isinstance(cut_set[0], MixedCut)
    cuts = cut_set.trim_to_supervisions()
    assert len(cuts) == 4
    # After "trimming", the MixedCut "decayed" into simple, unmixed cuts, as they did not overlap
    assert all(isinstance(cut, Cut) for cut in cuts)
    assert all(len(cut.supervisions) == 1 for cut in cuts)
    assert all(cut.supervisions[0].start == 0 for cut in cuts)
    cut = cuts[0]
    # Check that the cuts preserved their start/duration/supervisions after trimming
    assert cut.start == 1.5
    assert cut.duration == 8.5
    assert cut.supervisions[0].id == 'sup1'
    cut = cuts[1]
    assert cut.start == 10
    assert cut.duration == 5
    assert cut.supervisions[0].id == 'sup2'
    cut = cuts[2]
    assert cut.start == 20
    assert cut.duration == 8
    assert cut.supervisions[0].id == 'sup3'
    cut = cuts[3]
    assert cut.start == 0
    assert cut.duration == 30
    assert cut.supervisions[0].id == 'sup4'
Example #24
0
def test_mixed_cut_load_features():
    expected_frame_count = 1360
    cut_set = CutSet.from_yaml(
        'test/fixtures/mix_cut_test/overlayed_cut_manifest.yml')
    mixed_cut = cut_set['mixed-cut-id']
    assert mixed_cut.num_frames == expected_frame_count
    assert isclose(mixed_cut.duration, 13.595)

    feats = mixed_cut.load_features()
    assert feats.shape[0] == expected_frame_count
Example #25
0
def test_compute_cmvn_stats():
    cut_set = CutSet.from_json('test/fixtures/libri/cuts.json')
    with NamedTemporaryFile() as f:
        stats = cut_set.compute_global_feature_stats(storage_path=f.name)
        f.flush()
        read_stats = pickle.load(f)
    assert stats['norm_means'].shape == (cut_set[0].num_features, )
    assert stats['norm_stds'].shape == (cut_set[0].num_features, )
    assert (stats['norm_means'] == read_stats['norm_means']).all()
    assert (stats['norm_stds'] == read_stats['norm_stds']).all()
Example #26
0
 def __init__(
     self,
     cuts: CutSet,
     uem: Optional[SupervisionSet] = None,
     min_speaker_dim: Optional[int] = None,
     global_speaker_ids: bool = False,
 ) -> None:
     super().__init__()
     validate(cuts)
     if not uem:
         self.cuts = cuts
     else:
         # We use the `overlap` method in intervaltree to get overlapping regions
         # between the supervision segments and the UEM segments
         recordings = RecordingSet(
             {c.recording.id: c.recording
              for c in cuts if c.has_recording})
         uem_intervals = CutSet.from_manifests(
             recordings=recordings,
             supervisions=uem,
         ).index_supervisions()
         supervisions = []
         for cut_id, tree in cuts.index_supervisions().items():
             if cut_id not in uem_intervals:
                 supervisions += [it.data for it in tree]
                 continue
             supervisions += {
                 it.data.trim(it.end, start=it.begin)
                 for uem_it in uem_intervals[cut_id]
                 for it in tree.overlap(begin=uem_it.begin, end=uem_it.end)
             }
         self.cuts = CutSet.from_manifests(
             recordings=recordings,
             supervisions=SupervisionSet.from_segments(supervisions),
         )
     self.speakers = ({
         spk: idx
         for idx, spk in enumerate(self.cuts.speakers)
     } if global_speaker_ids else None)
     self.min_speaker_dim = min_speaker_dim
Example #27
0
def test_known_issue_with_overlap():
    r = dummy_recording(0)
    rec = RecordingSet.from_recordings([r])

    # Make two segments. The first segment is 1s long. The segment segment
    # is 0.3 seconds long and lies entirely within the first. Both have the
    # same recording_id as the single entry in rec.
    sup = SupervisionSet.from_segments(
        [
            SupervisionSegment(
                id="utt1",
                recording_id=r.id,
                start=0.0,
                duration=1.0,
                channel=0,
                text="Hello",
            ),
            SupervisionSegment(
                id="utt2",
                recording_id=r.id,
                start=0.2,
                duration=0.5,
                channel=0,
                text="World",
            ),
        ]
    )

    cuts = CutSet.from_manifests(recordings=rec, supervisions=sup)
    assert len(cuts) == 1

    cuts_trim = cuts.trim_to_supervisions(keep_overlapping=False)
    assert len(cuts_trim) == 2

    cut = cuts_trim[0]
    assert cut.start == 0
    assert cut.duration == 1
    assert len(cut.supervisions) == 1
    sup = cut.supervisions[0]
    assert sup.start == 0
    assert sup.duration == 1
    assert sup.text == "Hello"

    cut = cuts_trim[1]
    assert cut.start == 0.2
    assert cut.duration == 0.5
    assert len(cut.supervisions) == 1
    sup = cut.supervisions[0]
    assert sup.start == 0
    assert sup.duration == 0.5
    assert sup.text == "World"
Example #28
0
 def __getitem__(self, cuts: CutSet) -> Dict[str, torch.Tensor]:
     validate(cuts)
     cuts = cuts.sort_by_duration()
     for tfnm in self.cut_transforms:
         cuts = tfnm(cuts)
     inputs, input_lens = self.input_strategy(cuts)
     for tfnm in self.input_transforms:
         inputs = tfnm(inputs)
     return {
         "inputs": inputs,
         "input_lens": input_lens,
         "is_voice": self.input_strategy.supervision_masks(cuts),
         "cut": cuts,
     }
Example #29
0
def split(manifest: Manifest,
          num_splits: int,
          randomize: bool = False) -> List[Manifest]:
    """Split a manifest into `num_splits` equal parts. The element order can be randomized."""
    num_items = len(manifest)
    if num_splits > num_items:
        raise ValueError(
            f"Cannot split manifest into more chunks ({num_splits}) than its number of items {num_items}"
        )
    chunk_size = int(ceil(num_items / num_splits))
    split_indices = [(i * chunk_size, min(num_items, (i + 1) * chunk_size))
                     for i in range(num_splits)]

    def maybe_randomize(items: Iterable[Any]) -> List[Any]:
        items = list(items)
        if randomize:
            random.shuffle(items)
        return items

    if isinstance(manifest, RecordingSet):
        contents = maybe_randomize(manifest.recordings.items())
        return [
            RecordingSet(recordings=dict(contents[begin:end]))
            for begin, end in split_indices
        ]

    if isinstance(manifest, SupervisionSet):
        contents = maybe_randomize(manifest.segments.items())
        return [
            SupervisionSet(segments=dict(contents[begin:end]))
            for begin, end in split_indices
        ]

    if isinstance(manifest, FeatureSet):
        contents = maybe_randomize(manifest.features)
        return [
            FeatureSet(features=contents[begin:end],
                       feature_extractor=manifest.feature_extractor)
            for begin, end in split_indices
        ]

    if isinstance(manifest, CutSet):
        contents = maybe_randomize(manifest.cuts.items())
        return [
            CutSet(cuts=dict(contents[begin:end]))
            for begin, end in split_indices
        ]

    raise ValueError(f"Unknown type of manifest: {type(manifest)}")
Example #30
0
def DummyManifest(type_: Type, *, begin_id: int, end_id: int) -> Manifest:
    if type_ == RecordingSet:
        return RecordingSet.from_recordings(
            dummy_recording(idx) for idx in range(begin_id, end_id))
    if type_ == SupervisionSet:
        return SupervisionSet.from_segments(
            dummy_supervision(idx) for idx in range(begin_id, end_id))
    if type_ == FeatureSet:
        # noinspection PyTypeChecker
        return FeatureSet.from_features(
            dummy_features(idx) for idx in range(begin_id, end_id))
    if type_ == CutSet:
        # noinspection PyTypeChecker
        return CutSet.from_cuts(
            dummy_cut(idx) for idx in range(begin_id, end_id))