Esempio n. 1
0
 def __init__(self, ismask=False):
     Clip.__init__(self)
     self.mask = None
     self.audio = None
     self.pos = lambda t: (0, 0)
     self.relative_pos = False
     self.ismask = ismask
Esempio n. 2
0
    def __init__(self, clips):

        Clip.__init__(self)
        self.clips = clips

        ends = [c.end for c in self.clips]
        self.nchannels = max([c.nchannels for c in self.clips])
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        def make_frame(t):

            played_parts = [c.is_playing(t) for c in self.clips]

            sounds = [
                c.get_frame(t - c.start) * np.array([part]).T
                for c, part in zip(self.clips, played_parts)
                if (part is not False)
            ]

            if isinstance(t, np.ndarray):
                zero = np.zeros((len(t), self.nchannels))

            else:
                zero = np.zeros(self.nchannels)

            return zero + sum(sounds)

        self.make_frame = make_frame
Esempio n. 3
0
 def __init__(self, ismask=False):
     Clip.__init__(self)
     self.mask = None
     self.audio = None
     self.pos = lambda t: (0, 0)
     self.relative_pos = False
     self.ismask = ismask
Esempio n. 4
0
    def __init__(self, array, fps):
        
        Clip.__init__(self)
        self.array = array
        self.fps = fps
        self.duration = 1.0 * len(array) / fps
        
        
        def make_frame(t):
            """ complicated, but must be able to handle the case where t
            is a list of the form sin(t) """
            
            if isinstance(t, np.ndarray):
                array_inds = (self.fps*t).astype(int)
                in_array = (array_inds>0) & (array_inds < len(self.array))
                result = np.zeros((len(t),2))
                result[in_array] = self.array[array_inds[in_array]]
                return result
            else:
                i = int(self.fps * t)
                if i < 0 or i >= len(self.array):
                    return 0*self.array[0]
                else:
                    return self.array[i]

        self.make_frame = make_frame
        self.nchannels = len(list(self.get_frame(0)))
Esempio n. 5
0
    def __init__(self, array, fps):

        Clip.__init__(self)
        self.array = array
        self.fps = fps
        self.duration = 1.0 * len(array) / fps

        def make_frame(t):
            """complicated, but must be able to handle the case where t
            is a list of the form sin(t)"""

            if isinstance(t, np.ndarray):
                array_inds = np.round(self.fps * t).astype(int)
                in_array = (array_inds >= 0) & (array_inds < len(self.array))
                result = np.zeros((len(t), 2))
                result[in_array] = self.array[array_inds[in_array]]
                return result
            else:
                i = int(self.fps * t)
                if i < 0 or i >= len(self.array):
                    return 0 * self.array[0]
                else:
                    return self.array[i]

        self.make_frame = make_frame
        self.nchannels = len(list(self.get_frame(0)))
Esempio n. 6
0
    def __init__(self, clips):

        Clip.__init__(self)
        self.clips = clips
        
        ends = [c.end for c in self.clips]
        self.nchannels = max([c.nchannels for c in self.clips])
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)
            self.end = max(ends)

        def make_frame(t):
            
            played_parts = [c.is_playing(t) for c in self.clips]
            
            sounds= [c.get_frame(t - c.start)*np.array([part]).T
                     for c,part in zip(self.clips, played_parts)
                     if (part is not False) ]
                     
            if isinstance(t,np.ndarray):
                zero = np.zeros((len(t),self.nchannels))
                
            else:
                zero = np.zeros(self.nchannels)
                
            return zero + sum(sounds)

        self.make_frame = make_frame
Esempio n. 7
0
    def _concatenate(self, clip: Clip, buffer=5):
        if self._transition == 'crossfadein':
            try:
                before_cross_part = self._final_clip.subclip(
                    t_end=self._final_clip.duration - buffer)
                before_buffer = self._final_clip.subclip(
                    t_start=self._final_clip.duration - buffer)
                after_cross_part = clip.subclip(
                    t_start=self._transition_padding)
                after_buffer = clip.subclip(
                    t_end=self._transition_padding).set_start(
                        buffer - self._transition_padding)
            except OSError:
                if buffer >= 60:
                    raise Exception('Buffer {} is very big'.format(buffer))
                self._concatenate(clip, buffer=buffer * 2)
                return

            crossfade = CompositeVideoClip([
                before_buffer,
                after_buffer.crossfadein(self._transition_padding)
            ],
                                           use_bgclip=True)
            crossfade = crossfade.set_audio(before_buffer.audio)

            self._final_clip = concatenate_videoclips(
                [before_cross_part, crossfade, after_cross_part])
        else:
            self._final_clip = concatenate_videoclips([self._final_clip, clip])
Esempio n. 8
0
 def __init__(self, get_frame = None):
     Clip.__init__(self)
     if get_frame:
         self.get_frame = get_frame
         frame0 = self.get_frame(0)
         if hasattr(frame0, '__iter__'):
             self.nchannels = len(list(frame0))
         else:
             self.nchannels = 1
Esempio n. 9
0
 def __init__(self, get_frame=None):
     Clip.__init__(self)
     if get_frame:
         self.get_frame = get_frame
         frame0 = self.get_frame(0)
         if hasattr(frame0, '__iter__'):
             self.nchannels = len(list(frame0))
         else:
             self.nchannels = 1
Esempio n. 10
0
    def __init__(self, filename, buffersize=200000, nbytes=2, fps=44100):
        

        Clip.__init__(self)
            
        self.filename = filename
        self.reader = FFMPEG_AudioReader(filename,fps=fps,nbytes=nbytes,
                                         bufsize=buffersize+100)
        self.fps = fps
        self.duration = self.reader.duration
        self.end = self.duration
        
        self.nframes = self.reader.nframes
        self.buffersize= buffersize
        self.buffer= None
        self._fstart_buffer = 1
        self._buffer_around(1)
        
        def gf(t):
            bufsize = self.buffersize
            if isinstance(t,np.ndarray):
                # lazy implementation, but should not cause problems in
                # 99.99 %  of the cases
                result = np.zeros((len(t),2))
                in_time = (t>=0) & (t < self.duration)
                inds = (self.fps*t+1).astype(int)[in_time]
                f_tmin, f_tmax = inds.min(), inds.max()
                
                if not (0 <= (f_tmin - self._fstart_buffer) < len(self.buffer)):
                    self._buffer_around(f_tmin)
                elif not (0 <= (f_tmax - self._fstart_buffer) < len(self.buffer)):
                    self._buffer_around(f_tmax)
                    
                try:
                    tup = in_time.nonzero()
                    inds2 = inds - self._fstart_buffer
                    result[in_time] = self.buffer[inds - self._fstart_buffer]
                    return result
                except IndexError as error:
                    print ("Error: wrong indices in video buffer. Maybe"+
                           " buffer too small.")
                    raise error
                    
            else:
                ind = int(self.fps*t)
                if ind<0 or ind> self.nframes: # out of time: return 0
                    return np.zeros(self.nchannels)
                    
                if not (0 <= (ind - self._fstart_buffer) <len(self.buffer)):
                    # out of the buffer: recenter the buffer
                    self._buffer_around(ind)
                    
                # read the frame in the buffer
                return self.buffer[ind - self._fstart_buffer]

        self.get_frame = gf
Esempio n. 11
0
 def __init__(self, make_frame = None, duration=None):
     Clip.__init__(self)
     if make_frame is not None:
         self.make_frame = make_frame
         frame0 = self.get_frame(0)
         if hasattr(frame0, '__iter__'):
             self.nchannels = len(list(frame0))
         else:
             self.nchannels = 1
     if duration is not None:
         self.duration = duration
         self.end = duration
Esempio n. 12
0
 def __init__(self, make_frame=None, duration=None):
     Clip.__init__(self)
     if make_frame is not None:
         self.make_frame = make_frame
         frame0 = self.get_frame(0)
         if hasattr(frame0, '__iter__'):
             self.nchannels = len(list(frame0))
         else:
             self.nchannels = 1
     if duration is not None:
         self.duration = duration
         self.end = duration
Esempio n. 13
0
    def __init__(self, filename, buffersize=200000, nbytes=2, fps=44100):
        

        Clip.__init__(self)
            
        self.filename = filename
        self.reader = FFMPEG_AudioReader(filename,fps=fps,nbytes=nbytes)
        self.fps = fps
        self.duration = self.reader.duration
        self.nframes = self.reader.nframes
        self.buffersize= buffersize
        self.buffer=None
        self._fstart_buffer = 1
        self._buffer_around(1)
        self.nchannels = self.reader.nchannels
        
        def gf(t):
            bufsize = self.buffersize
            if isinstance(t,np.ndarray):
                # lazy implementation, but should not cause problems in
                # 99.99 %  of the cases
                result = np.zeros((len(t),2))
                in_time = (t>=0) & (t < self.duration)
                inds = (self.fps*t+1).astype(int)[in_time]
                f_tmin, f_tmax = inds.min(), inds.max()
                
                if not (0 <= (f_tmin - self._fstart_buffer) < len(self.buffer)):
                    self._buffer_around(f_tmin)
                elif not (0 <= (f_tmax - self._fstart_buffer) < len(self.buffer)):
                    self._buffer_around(f_tmax)
                    
                try:
                    result[in_time] = self.buffer[inds - self._fstart_buffer]
                    return result
                except:
                    print ("Error: wrong indices in video buffer. Maybe"+
                           " buffer too small.")
                    raise
            else:
                ind = int(self.fps*t)+1
                if ind<1 or ind> self.nframes: # out of time: return 0
                    return np.zeros(self.nchannels)
                    
                if not (0 <= (ind - self._fstart_buffer) <len(self.buffer)):
                    # out of the buffer: recenter the buffer
                    self._buffer_around(ind)
                    
                # read the frame in the buffer
                return self.buffer[ind - self._fstart_buffer]

        self.get_frame = gf
Esempio n. 14
0
 def __init__(
     self, make_frame=None, ismask=False, duration=None, has_constant_size=True
 ):
     Clip.__init__(self)
     self.mask = None
     self.audio = None
     self.pos = lambda t: (0, 0)
     self.relative_pos = False
     if make_frame:
         self.make_frame = make_frame
         self.size = self.get_frame(0).shape[:2][::-1]
     self.ismask = ismask
     self.has_constant_size = has_constant_size
     if duration is not None:
         self.duration = duration
         self.end = duration
Esempio n. 15
0
def test_clip_copy(copy_func):
    """Clip must be copied with `.copy()` method, `copy.copy()` and
    `copy.deepcopy()` (same behaviour).
    """
    clip = Clip()
    other_clip = Clip()

    # shallow copy of clip
    for attr in clip.__dict__:
        setattr(clip, attr, "foo")

    copied_clip = copy_func(clip)

    # assert copied attributes
    for attr in copied_clip.__dict__:
        assert getattr(copied_clip, attr) == getattr(clip, attr)

        # other instances are not edited
        assert getattr(copied_clip, attr) != getattr(other_clip, attr)
Esempio n. 16
0
    def __init__(self, clips):

        Clip.__init__(self)
        self.clips = clips
        
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)

        def gf(t):
            sounds= [c.get_frame(t - c.start)
                     for c in clips if c.is_playing(t)]
            if isinstance(t,np.ndarray):
                zero = np.zeros((len(t),2))
            else:
                zero = np.zeros(2)
            return zero + sum(sounds)

        self.get_frame = gf
Esempio n. 17
0
    def __init__(self, clips):

        Clip.__init__(self)
        self.clips = clips
        
        ends = [c.end for c in self.clips]
        if not any([(e is None) for e in ends]):
            self.duration = max(ends)

        def gf(t):
            sounds= [c.get_frame(t - c.start)
                     for c in clips if c.is_playing(t)]
            if isinstance(t,np.ndarray):
                zero = np.zeros((len(t),2))
            else:
                zero = np.zeros(2)
            return zero + sum(sounds)

        self.get_frame = gf
Esempio n. 18
0
 def __init__(self):
     Clip.__init__(self)
Esempio n. 19
0
 def __init__(self):
     Clip.__init__(self)
Esempio n. 20
0
 def set_get_frame(self, gf):
     newclip = Clip.set_get_frame(self, gf)
     newclip.size = newclip.get_frame(0).shape[:2][::-1]
     return newclip
Esempio n. 21
0
 def set_get_frame(self, gf):
     newclip = Clip.set_get_frame(self, gf)
     newclip.size = newclip.get_frame(0).shape[:2][::-1]
     return newclip