def transform(self, *metadata: Meta) -> Meta: """ Compute metadata for concatenated streams. :param metadata: concatenated streams metadata :returns: Metadata for resulting stream with duration set to a sum of stream durations. Scenes and streams are also concatenated. """ duration = TS(0) scenes = [] streams: List[str] = [] frames: int = 0 for meta in metadata: duration += meta.duration scenes.extend(meta.scenes) for stream in meta.streams: if not streams or streams[-1] != stream: # Add all streams for each concatenated metadata and remove # contiguous duplicates. streams.append(stream) if isinstance(meta, VideoMeta): frames += meta.frames kwargs = dict(duration=duration, scenes=scenes, streams=streams) meta = metadata[0] if isinstance(meta, AudioMeta): # Recompute samples and sampling rate: sampling rate from first # input, samples count corresponds duration. kwargs['samples'] = round(meta.sampling_rate * duration) if isinstance(meta, VideoMeta): # Sum frames count from all input streams kwargs['frames'] = frames return replace(metadata[0], **kwargs)
def transform(self, *metadata: Meta) -> Meta: meta = metadata[0] expr = self.expr.replace(' ', '') if expr == self.RESET_PTS: duration = meta.duration - meta.start return replace(meta, start=TS(0), duration=duration) raise NotImplementedError()
def transform(self, *metadata: Meta) -> Meta: """ Computes metadata for trimmed stream. :param metadata: single incoming stream metadata. :returns: metadata with initial start (this is fixed with SetPTS) and duration set to trim end. Scenes list is intersected with trim interval, scene borders are aligned to trim borders. """ meta = metadata[0] scenes = [] streams: List[str] = [] start = self.start or TS(0) end = min(meta.duration, self.end or TS(0)) for scene in meta.scenes: if scene.stream and (not streams or streams[0] != scene.stream): # Adding an input stream without contiguous duplicates. streams.append(scene.stream) # intersect scene with trim interval start = cast(TS, max(self.start, scene.position)) end = cast(TS, min(self.end, scene.position + scene.duration)) if start < end: # If intersection is not empty, add intersection to resulting # scenes list. # This will allow detecting buffering when multiple scenes are # reordered in same file: input[3:4] + input[1:2] offset = start - scene.position scenes.append( Scene(stream=scene.stream, start=scene.start + offset, position=scene.position + offset, duration=end - start)) kwargs = { 'start': start, 'duration': end, 'scenes': scenes, 'streams': streams } interval = cast(TS, end) - cast(TS, start) if isinstance(meta, AudioMeta): kwargs['samples'] = round(meta.sampling_rate * interval) if isinstance(meta, VideoMeta): kwargs['frames'] = round(meta.frame_rate * interval) return replace(meta, **kwargs)
def assert_ts_equal(self, ts: meta.TS, expected: float): self.assertIsInstance(ts, meta.TS) self.assertAlmostEqual(ts.total_seconds(), expected, places=4)
def __post_init__(self) -> None: if not isinstance(self.start, (TS, type(None))): self.start = TS(self.start) if not isinstance(self.end, (TS, type(None))): self.end = TS(self.end) super().__post_init__()