Exemple #1
0
def condense_clip(clip,
                  silence_threshold=0.1,
                  min_silence_length=0.15,
                  playback_speed=1.1):

    silence_segments = get_silent_segments(clip, silence_threshold,
                                           min_silence_length)

    spoken_clips = []
    print("Extracting subclips")
    clip_start = 0.0
    for i, seg in enumerate(silence_segments):
        not_silences = clip.subclip(clip_start, seg[0])
        clip_start = seg[1]
        spoken_clips.append(not_silences)
        if i % 50 == 0:
            print(f"extracted subclip {i}")
    print("Concatenating subclips...")
    final_clip = concatenate_videoclips(spoken_clips)
    print("Finished ConcatOp")
    final_clip.fps = 30
    final_clip.audio.fps = 30
    print("Speeding up clip")
    final_clip = vfx.speedx(final_clip, playback_speed)
    print(f"Length of Shortened Clip: {final_clip.duration}")
    return final_clip
Exemple #2
0
def render_clip(frame, filename):
    """
    Render single clip with delayed frame
    """
    vid = speedx(frame, 0.1)
    video_path = os.path.join(data_dir, filename)
    vid.write_videofile(video_path, verbose=False)
Exemple #3
0
def render_clip(frames, filename):
    vid = mp.concatenate_videoclips(frames)
    vid = speedx(vid, 0.1)

    final_vid = vid  # mp.clips_array([[legend, vid]])
    fp = os.path.join(data_dir, filename)
    final_vid.write_videofile(fp)
Exemple #4
0
    def jumpcut(self, magnitude_threshold_ratio, duration_threshold_in_seconds,
                failure_tolerance_ratio, space_on_edges, silence_part_speed,
                min_loud_part_duration):

        intervals_to_cut = self.audio.get_intervals_to_cut(
            magnitude_threshold_ratio, duration_threshold_in_seconds,
            failure_tolerance_ratio, space_on_edges)
        jumpcutted_clips = []
        previous_stop = 0
        for start, stop in intervals_to_cut:
            clip_before = self.clip.subclip(previous_stop, start)

            if clip_before.duration > min_loud_part_duration:
                jumpcutted_clips.append(clip_before)

            if silence_part_speed is not None:
                silence_clip = self.clip.subclip(start, stop)
                silence_clip = speedx(silence_clip,
                                      silence_part_speed).without_audio()
                jumpcutted_clips.append(silence_clip)

            previous_stop = stop

        last_clip = self.clip.subclip(stop, self.clip.duration)
        jumpcutted_clips.append(last_clip)

        return concatenate_videoclips(jumpcutted_clips), intervals_to_cut
Exemple #5
0
 async def spedup(self,ctx, multi=12):
     import moviepy.video.fx.all as vfx
     os.chdir(path+"\\sounds")
     url = await AV.mhwnt(ctx)
     form = url[-3:]
     AV.dwn(url,"base."+form)
     video = VideoFileClip("base."+form)
     video = vfx.speedx(video, multi)
     video.write_videofile("res.mp4")
     try :
         await ctx.send(file=discord.File('res.mp4')) 
     except:
         await ctx.send("File to large")
def render_clip(frames, filename):
    """
    The successful and failure episodes will be rendered into gif  
    """
        
    #vid = mp.concatenate_videoclips(frames)
    vid = speedx(frames, 0.1)
    final_vid = vid  # mp.clips_array([[legend, vid]])

    video_dir = os.path.join(data_dir, 'outcome_videos')
    if not os.path.exists(video_dir):
        os.mkdir(video_dir)
    video_path = os.path.join(video_dir, filename)          
    final_vid.write_gif(video_path)
Exemple #7
0
    def jumpcut_silent_parts(self, intervals_to_cut):
        jumpcutted_clips = []
        previous_stop = 0
        for start, stop in tqdm(intervals_to_cut,
                                desc="Cutting silent intervals"):
            clip_before = self.clip.subclip(previous_stop, start)

            if clip_before.duration > self.min_loud_part_duration:
                jumpcutted_clips.append(clip_before)

            if self.silence_part_speed is not None:
                silence_clip = self.clip.subclip(start, stop)
                silence_clip = speedx(silence_clip,
                                      self.silence_part_speed).without_audio()
                jumpcutted_clips.append(silence_clip)

            previous_stop = stop

        last_clip = self.clip.subclip(stop, self.clip.duration)
        jumpcutted_clips.append(last_clip)
        return jumpcutted_clips
Exemple #8
0
def set_video_dur(origin_clip, dur, fps=25):
    clip = vfx.speedx(origin_clip, final_duration=dur).set_fps(fps)
    return clip
def randomizeClip(clip, x):
    try:
        if x == "accel_decel":
            dur = random.randint(0, np.floor(clip.duration) * 2)
            if dur == 0:
                dur == None
            a = random.uniform(-1, 1)
            s = random.uniform(0, 100)
            return vfx.accel_decel(clip, new_duration=dur, abruptness=a, soonness=s)
        elif x == "blackwhite":
            return vfx.blackwhite(clip)
        elif x == "blink":
            do = random.randint(0, 10)
            doff = random.randint(0, 10)
            return vfx.blink(clip, d_on=do, d_off=doff)
        elif x == "colorx":
            factor = random.randint(1, 1000)
            return vfx.colorx(clip, factor=factor)
        elif x == "crop":
            return clip
        elif x == "even_size":
            return vfx.even_size(clip)
        elif x == "fadein":
            d = random.randint(0, np.floor(clip.duration))
            i = random.random()
            return vfx.fadein(clip, d, i)
        elif x == "fadeout":
            d = random.randint(0, np.floor(clip.duration))
            i = random.random()
            return vfx.fadeout(clip, d, i)
        elif x == "freeze":
            t = random.randint(0, np.floor(clip.duration))
            td = random.randint(0, np.floor(clip.duration))
            return vfx.freeze(clip, t=t, total_duration=td)
        elif x == "freeze_region":
            return vfx.freeze_region(clip, mask=ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "gamma_corr":
            g = random.randint(0, 10)
            return vfx.gamma_corr(clip, g)
        elif x == "headblur":
            pass
        elif x == "invert_colors":
            return vfx.invert_colors(clip)
        elif x == "loop":
            ls = random.randint(0, 10)
            return vfx.loop(clip, n=ls)
        elif x == "lum_contrast":
            return vfx.lum_contrast(clip)
        elif x == "make_loopable":
            ls = random.randint(0, np.floor(clip.duration))
            return vfx.make_loopable(clip, ls)
        elif x == "margin":
            s = clip.size(0) / random.randint(2, 10)
            o = random.random()
            return vfx.margin(clip, left=s, right=s, top=s, bottom=s, opacity=o)
        elif x == "mask_and":
            return vfx.mask_and(clip, ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "mask_color":
            thr = random.random()
            return vfx.mask_color(clip, thr=thr)
        elif x == "mask_or":
            return vfx.mask_or(clip, ImageClip(np.random.rand(clip.size[0], clip.size[1]), ismask=True))
        elif x == "mirror_x":
            return vfx.mirror_x(clip)
        elif x == "mirror_y":
            return vfx.mirror_y(clip)
        elif x == "painting":
            s = random.uniform(0, np.floor(clip.duration))
            b = random.randint(0, 100)/1000
            return vfx.painting(clip, saturation=s, black=b)
        elif x == "resize":
            u = random.random()
            return vfx.resize(clip, u)
        elif x == "rotate":
            u = random.uniform(0, 360)
            return vfx.rotate(clip, u)
        elif x == "scroll":
            return clip
        elif x == "speedx":
            u = random.uniform(0, 100)
            return vfx.speedx(clip, u)
        elif x == "supersample":
            g = random.randint(0, 10)
            d = int(clip.duriation/2)
            return vfx.supersample(clip, d, g)
        elif x == "time_mirror":
            return vfx.time_mirror(clip)
        elif x == "time_symmetrize":
            return vfx.time_symmetrize(clip)
        else:
            return clip
    except:
        return clip