def _processFrames(self, iterable): for frame in iterable: if ((self.croptop % 2 or self.cropbottom % 2 or self.cropleft % 2 or self.cropright % 2) and frame.format.name != "rgb24"): frame = frame.to_rgb() if frame.format.name == "rgb24": A = toNDArray(frame) A = A[ self.croptop:-self.cropbottom if self.cropbottom else None, self.cropleft:-self.cropright if self.cropright else None ] newframe = toVFrame(A, frame.format.name) elif frame.format.name == "yuv420p": Y, U, V = toNDArray(frame) Y = Y[ self.croptop:-self.cropbottom if self.cropbottom else None, self.cropleft:-self.cropright if self.cropright else None ] U = U[ self.croptop//2:-self.cropbottom//2 if self.cropbottom else None, self.cropleft//2:-self.cropright//2 if self.cropright else None ] V = V[ self.croptop//2:-self.cropbottom//2 if self.cropbottom else None, self.cropleft//2:-self.cropright//2 if self.cropright else None ] newframe = toVFrame((Y, U, V), frame.format.name) newframe.time_base = frame.time_base newframe.pts = frame.pts newframe.pict_type = frame.pict_type yield newframe
def iterFrames(self, start=0, end=None, whence="pts"): if self.matrix is not None: M = numpy.matrix(self.matrix, dtype=numpy.float32) else: M = None for frame in self.prev.iterFrames(start, end, whence): frame = aconvert(frame, "fltp") if M is not None: A = toNDArray(frame) if (self._layout is None and len(frame.layout.channels) == self.channels): newframe = toAFrame((M * A.transpose()).transpose(), layout=frame.layout.name) else: newframe = toAFrame((M * A.transpose()).transpose(), layout=self.layout) newframe.rate = frame.rate newframe.pts = frame.pts newframe.time_base = frame.time_base yield newframe else: yield frame
def _iterFrames(self, duration=None, logfile=None): source = self.source if self.filters: if duration is not None: frames = self.filters.iterFrames( end=int(duration/self.filters.time_base), whence="pts") else: frames = self.filters.iterFrames() rate = self.filters.rate elif source is not None: if duration is not None: frames = self.source.iterFrames( end=int(duration/self.source.time_base), whence="pts") else: frames = self.source.iterFrames() rate = self.source.rate if self.type == "audio": samples = 0 N = int(rate*duration + 0.5) for frame in frames: self.container._checkpause() if samples + frame.samples == N: yield frame break elif samples + frame.samples > N: pts = frame.pts time_base = frame.time_base A = toNDArray(frame) frame = toAFrame(A[:N - samples], frame.layout.name) frame.pts = pts frame.time_base = time_base frame.rate = rate yield frame break yield frame samples += frame.samples else: for frame in frames: self.container._checkpause() if (duration is not None and frame.pts*frame.time_base < duration): yield frame
def _processFrames(self, iterable): for frame in iterable: if frame.format.name != "rgb24": frame = frame.to_rgb() A = toNDArray(frame) / 256 R, G, B = moveaxis(A, 2, 0) V = A.max(axis=2) C = V - A.min(axis=2) L = V - C / 2 H = zeros(A.shape[:2], dtype=float64) case1 = C == 0 case2 = (V == R) * (~case1) case3 = (V == G) * (~case1) * (~case2) case4 = (V == B) * (~case1) * (~case2) * (~case3) H[case2] = (60 * (G[case2] - B[case2]) / C[case2]) % 360 H[case3] = 60 * (2 + (B[case3] - R[case3]) / C[case3]) H[case4] = 60 * (4 + (R[case4] - G[case4]) / C[case4]) SL = zeros(A.shape[:2], dtype=float64) case5 = (L > 0) * (L < 1) SL[case5] = ((V[case5] - L[case5]) / npmin( (L[case5], 1 - L[case5]), axis=0)) # --- Adjustments to HSL go here --- H += self.dh H %= 360 SL *= self.sfactor L = 1 - (1 - L)**self.lgamma C = (1 - abs(2 * L - 1)) * SL H /= 60 X = C * (1 - abs(H % 2 - 1)) case1 = (H <= 1) case2 = (1 < H) * (H <= 2) case3 = (2 < H) * (H <= 3) case4 = (3 < H) * (H <= 4) case5 = (4 < H) * (H <= 5) case6 = H > 5 m = L - C / 2 R = zeros(R.shape, dtype=float64) G = zeros(G.shape, dtype=float64) B = zeros(B.shape, dtype=float64) R[case1] = C[case1] G[case1] = X[case1] R[case2] = X[case2] G[case2] = C[case2] G[case3] = C[case3] B[case3] = X[case3] G[case4] = X[case4] B[case4] = C[case4] B[case5] = C[case5] R[case5] = X[case5] B[case6] = X[case6] R[case6] = C[case6] R += m G += m B += m A = (256 * moveaxis((R, G, B), 0, 2)).clip(min=0, max=255) A = uint8(A) newframe = toVFrame(A, frame.format.name) newframe.time_base = frame.time_base newframe.pts = frame.pts newframe.pict_type = frame.pict_type yield newframe
def iterFrames(self, start=0, end=None, whence=None): if self.type == "video": if whence is None: whence = "framenumber" if whence == "framenumber": N = count(start) start = self.prev_start + start if end is not None and self.endpts is not None: end = min(self.prev_start + end, self.prev_end) elif end is not None: end = self.prev_start + end else: end = self.prev_end elif whence == "pts": N = count(self.frameIndexFromPts(start)) start = (self.startpts - 0.0005) / \ self.prev.time_base + max(start, 0) if end is not None and self.endpts is not None: end = min((self.startpts - 0.0005)/self.prev.time_base + end, (self.endpts - 0.0005)/self.prev.time_base) elif end is not None: end = (self.startpts - 0.0005)/self.prev.time_base + end elif self.endpts is not None: end = (self.endpts - 0.0005)/self.prev.time_base else: end = None elif whence == "seconds": N = count(self.frameIndexFromPts(start/self.prev.time_base)) start = self.startpts - 0.0005 + max(start, 0) if end is not None and self.endpts is not None: end = min(self.startpts - 0.0005 + end, self.endpts - 0.0005) elif end is not None: end = self.startpts - 0.0005 + end elif self.endpts is not None: end = self.endpts - 0.0005 else: end = None frames = self.prev.iterFrames(start, end, whence) pts = self.pts for n, frame in zip(N, frames): frame.pts = pts[n] if n == 0 and self.firstframekey: frame.pict_type = "I" yield frame elif self.type == "audio": if whence is None: whence = "seconds" if whence == "pts": start = self.startpts/self.prev.time_base + max(start, 0) if end is not None and self.endpts is not None: end = min(self.startpts/self.prev.time_base + end, self.endpts/self.prev.time_base) elif end is not None: end = self.startpts/self.prev.time_base + end elif self.endpts is not None: end = self.endpts/self.prev.time_base else: end = self.prev.duration/self.prev.time_base frames = self.prev.iterFrames( start - 0.001/self.prev.time_base, end, whence) elif whence == "seconds": start = self.startpts + max(start, 0) if end is not None and self.endpts is not None: end = min(self.startpts + end, self.endpts) elif end is not None: end = self.startpts + end elif self.endpts is not None: end = self.endpts else: end = self.prev.duration frames = self.prev.iterFrames(start - 0.001, end, whence) for frame in frames: if whence == "seconds": n1 = int( max(numpy.floor(( start - frame.pts*frame.time_base) * frame.rate + 0.0001), 0)) n2 = int(max(numpy.floor(( frame.pts*frame.time_base + frame.samples/frame.rate - end)*frame.rate + 0.0001), 0)) elif whence == "pts": n1 = int(max(numpy.floor( (start*frame.time_base - frame.pts*frame.time_base) * frame.rate + 0.0001), 0)) n2 = int(max(numpy.floor(( frame.pts*frame.time_base + frame.samples / frame.rate - end*frame.time_base) * frame.rate + 0.0001), 0)) if n1 or n2: pts = frame.pts tb = frame.time_base r = frame.rate A = toNDArray(frame)[n1 or None:(-n2) or None] if len(A) == 0: continue frame = toAFrame(A, layout=frame.layout.name) frame.rate = r frame.time_base = tb if whence == "seconds": frame.pts = max( pts - int(self.startpts/tb + 0.001), int((start - self.startpts)/tb + 0.001)) elif whence == "pts": frame.pts = max( pts - int(self.startpts/tb + 0.001), int(start - self.startpts/tb + 0.001)) else: frame.pts -= int(self.startpts/self.time_base + 0.001) yield frame
def iterFrames(self, start=0, end=None, whence="pts"): frames1 = self.source1.iterFrames(start, end, whence) frames2 = self.source2.iterFrames(start, end, whence) if self.type == "video": for frame1, frame2 in zip(frames1, frames2): k = self.source1.frameIndexFromPts(frame1.pts) A = frame1.to_rgb().to_ndarray() B = frame2.to_rgb().to_ndarray() if not 1 & self.flags: A = (1 - (k + 1)/(self.framecount + 2))*A if not 2 & self.flags: B = ((k + 1)/(self.framecount + 2))*B C = numpy.uint8((A + B).clip(max=255) + 0.5).copy(order="C") newframe = VideoFrame.from_ndarray(C) newframe.time_base = frame1.time_base newframe.pts = frame1.pts if frame1.pict_type == "I" or frame2.pict_type == "I": newframe.pict_type = "I" yield newframe elif self.type == "audio": AA = numpy.zeros((0, self.source1.channels), dtype=numpy.float32) BB = numpy.zeros((0, self.source2.channels), dtype=numpy.float32) T = None while True: while len(AA) < 1536: try: frame1 = next(frames1) except StopIteration: break if T is None: T = frame1.pts*frame1.time_base frame1 = aconvert(frame1, self.format) A = toNDArray(frame1) AA = numpy.concatenate((AA, A)) while len(BB) < 1536: try: frame2 = next(frames2) except StopIteration: break frame2 = aconvert(frame2, self.format) B = toNDArray(frame2) BB = numpy.concatenate((BB, B)) N = min(len(AA), len(BB), 1536) if N == 0: break if not 1 & self.flags: A = AA[:N]*numpy.cos(T*numpy.pi/2/self.duration)**2 else: A = AA[:N] if not 2 & self.flags: B = BB[:N]*numpy.sin(T*numpy.pi/2/self.duration)**2 else: B = BB[:N] newframe = toAFrame(A + B, layout=self.layout) newframe.rate = frame1.rate newframe.pts = int(T/frame1.time_base + 0.00001) newframe.time_base = frame1.time_base yield newframe T += N/self.rate AA = AA[N:] BB = BB[N:]