Exemple #1
0
def resize_cropped(filename, side):
	"resize video denoted by filename to (side x side) video and output it as a numpy array of (frames x h x w x 3)"
	
	# Taking video file
	clip = mp.VideoFileClip(filename)
	(w, h) = clip.size

	# checking the type of video file
	landscape = (w >= h)

	# new dimensions
	h2 = w2 = side

	# rescaling
	clip_h = clip.resize(height = h2)       # height is constrained
	(w_h, h_h) = clip_h.size

	clip_w = clip.resize(width = w2)        # width is constrained
	(w_w, h_w) = clip_w.size

	# creating videos
	if landscape:
		cropped = crop(clip_h, width=w2, height=h2, x_center=w_h/2, y_center=h_h/2)
	else:
		cropped = crop(clip_w, width=w2, height=h2, x_center=w_w/2, y_center=h_w/2)
	
	# video to array
	cropped_array = np.asarray(list(cropped.iter_frames()))
	
	return cropped_array
Exemple #2
0
def scheduled_time_scene_transition(schedule, resource_folder_name="res"):
    '''
    params:
    - schedule: a list of tuples of (file name, dur)
    '''
    clips = []
    print(schedule)  #DEBUG
    for res, dur, params in schedule:
        # EH: use a better way to detect the type of a file
        file_name = os.path.join(resource_folder_name, res)
        if not os.path.exists(file_name):
            print("File not found! {}".format(file_name))
            raise FileNotFoundError()
        file_type = res.split(".")[-1]
        if file_type in ["mov", "mp4", "avi", "flv"]:
            origin_video_clip = mpy.VideoFileClip(os.path.join(
                resource_folder_name, res),
                                                  audio=False)
            if params["part"]:
                #print(params["part"])
                parts = params["part"]
                origin_video_clip = origin_video_clip.subclip(
                    parts[0], parts[1])
            if params["crop"]:
                w = origin_video_clip.w
                h = origin_video_clip.h
                rect = params["crop"]
                origin_video_clip = vfx.crop(origin_video_clip, w * rect[0],
                                             h * rect[1], w * rect[2],
                                             h * rect[3])
            clips.append(
                set_video_dur(resize_and_fit(origin_video_clip, PREVIEW_SIZE),
                              dur))
        elif file_type in ["jpg", "png", "jpeg"]:
            origin_img_clip = mpy.ImageClip(
                os.path.join(resource_folder_name, res))
            if params["crop"]:
                w = origin_img_clip.w
                h = origin_img_clip.h
                rect = params["crop"]
                #print("Crop", w, h, rect, rect[0]*w)
                origin_img_clip = vfx.crop(origin_img_clip, w * rect[0],
                                           h * rect[1], w * rect[2],
                                           h * rect[3])
            clips.append(
                set_img_dur(resize_and_fit(origin_img_clip, PREVIEW_SIZE),
                            dur))
        elif file_type in ["txt"]:
            print(res)
            print(os.path.join(resource_folder_name, res))
            origin_txt_clip = mpy.TextClip(
                open(os.path.join(resource_folder_name, res)).read(),
                color="white",
                font="ArialUnicode",
                fontsize=100).on_color(PREVIEW_SIZE).set_position("center")
            clips.append(
                set_scene_dur(resize_and_fit(origin_txt_clip, PREVIEW_SIZE),
                              dur))

    return mpy.concatenate_videoclips(clips)
Exemple #3
0
def generate_clips(video):
    video_path = "raw_videos/%s" % str(video)
    clip = VideoFileClip(video_path)
    clip = crop(clip, y2=635)
    duration = clip.duration
    clip_start = 0
    clip_end = 60
    number = 1
    if duration > 31:
        trim = duration - 31
    clip = clip.subclip(clip_start, trim)
    duration = clip.duration
    if duration > 60:
        parts = duration / 60
        frac, whole = math.modf(parts)

        while clip_end <= duration:
            temp = clip.subclip(clip_start, clip_end)
            text = 'Part %s' % str(number)
            temp_txt = TextClip(text, fontsize=50, color='white')
            temp_txt = temp_txt.set_pos('top').set_duration(clip_end -
                                                            clip_start)
            temp = CompositeVideoClip([temp, temp_txt])
            clip_start += 60
            if clip_end == (whole * 60):
                clip_end += 60 * frac
            else:
                clip_end += 60
            output = "final_videos/clip%s.mp4" % str(number)
            temp.write_videofile(output)
            number += 1
    clip.close()
    temp.close()
Exemple #4
0
def cropping(fin, left_up, right_down, fout):
    clip = mpy.VideoFileClip(fin)
    cropped = crop(clip,
                   x1=left_up[0],
                   y1=left_up[1],
                   x2=right_down[0],
                   y2=right_down[1])
    cropped.write_videofile(fout)
Exemple #5
0
def scheduled_time_scene_transition(schedule, resource_folder_name="res"):
    '''
    params:
    - schedule: a list of tuples of (file name, dur)
    '''
    clips = []
    for res, dur, params in schedule:
        # EH: use a better way to detect the type of a file
        file_type = res[-3:]
        if file_type in ["mov", "mp4", "avi", "flv"]:
            origin_video_clip = mpy.VideoFileClip(resource_folder_name + "/" +
                                                  res)
            if params["part"]:
                #print(params["part"])
                parts = params["part"]
                origin_video_clip = origin_video_clip.subclip(
                    parts[0], parts[1])
            if params["crop"]:
                w = origin_video_clip.w
                h = origin_video_clip.h
                rect = params["crop"]
                origin_video_clip = vfx.crop(origin_video_clip, w * rect[0],
                                             h * rect[1], w * rect[2],
                                             h * rect[3])
            clips.append(
                set_video_dur(resize_and_fit(origin_video_clip, PREVIEW_SIZE),
                              dur))
        elif file_type in ["jpg", "png"]:
            origin_img_clip = mpy.ImageClip(resource_folder_name + "/" + res)
            if params["crop"]:
                w = origin_img_clip.w
                h = origin_img_clip.h
                rect = params["crop"]
                #print("Crop", w, h, rect, rect[0]*w)
                origin_img_clip = vfx.crop(origin_img_clip, w * rect[0],
                                           h * rect[1], w * rect[2],
                                           h * rect[3])
            clips.append(
                set_img_dur(resize_and_fit(origin_img_clip, PREVIEW_SIZE),
                            dur))

    return mpy.concatenate_videoclips(clips)
Exemple #6
0
    def create(self, desired_length):

        if self.text_boolean == "y":
            self.random_word_screen()

        while self.total_duration < desired_length:
            self.add_clip()
        final = mpe.concatenate_videoclips(self.clip_list)
        image = mpe.ImageClip("assets/colors/" + self.colorEffect +
                              ".png").resize(self.clip.size).set_opacity(
                                  0.35).set_duration(self.total_duration)
        final = mpe.CompositeVideoClip([final, image])
        self.audio = self.audio.set_duration(self.total_duration)
        final = final.set_audio(self.audio)
        if self.resizeForTikTok == "y":
            (w, h) = final.size
            if h == 1080:
                cropClip = crop(final,
                                width=607.50,
                                height=1080,
                                x_center=w / 2,
                                y_center=h / 2)
                finalClip = cropClip.resize(height=1080)
            else:
                cropClip = crop(final,
                                width=405,
                                height=720,
                                x_center=w / 2,
                                y_center=h / 2)
                finalClip = cropClip.resize(height=720)
            finalClip.write_videofile("TikTok_" + self.output_file_name +
                                      ".mp4",
                                      temp_audiofile="temp-audio.m4a",
                                      remove_temp=True,
                                      codec="libx264",
                                      audio_codec="aac")
        else:
            final.write_videofile(self.output_file_name + ".mp4",
                                  temp_audiofile="temp-audio.m4a",
                                  remove_temp=True,
                                  codec="libx264",
                                  audio_codec="aac")
def remove_frame(in_fname, out_fname, n_sample_frames=100):
    sample_frames = get_frames(in_fname, n_sample_frames)
    input_frame = get_median_frame(sample_frames)
    res = get_frame_box_coords(input_frame)
    if res is None:
        print("No border was detected in {}".format(in_fname))
        return None
    else:
        x, y, w, h = res
    clip = VideoFileClip(in_fname)
    crop_clip = crop(clip, x1=x, y1=y, x2=x + w, y2=y + h)
    crop_clip.write_videofile(out_fname)
def make_collage(videos0,
                 width,
                 height,
                 collage_folder,
                 savestr,
                 cropvid=True,
                 filext='mp4'):
    os.makedirs(collage_folder, exist_ok=True)
    n_collages = int(np.ceil(len(videos0) / (width * height)))
    for n in range(n_collages):
        start_ix = n * width * height
        pn = os.path.join(
            collage_folder, f'{savestr}_'
            f'{start_ix}-{start_ix + width*height-1}.{filext}')
        if os.path.exists(pn):
            print(f'EXISTS: {pn}')
            continue

        clips_arr = []
        for h in range(height):
            temp = []
            for w in range(width):
                ix = h * width + w + start_ix
                if ix < len(videos0):
                    clip = VideoFileClip(videos0[ix])
                    if cropvid:
                        clip = crop(clip, x1=100, y1=0, x2=350, y2=190)
                    clip = clip.resize(width=125)
                    temp.append(clip)
                else:
                    clip = VideoFileClip(videos0[-1])
                    if cropvid:
                        clip = crop(clip, x1=100, y1=0, x2=350, y2=190)
                    clip = clip.resize(width=5)
                    temp.append(clip)

            clips_arr.append(temp)
        final_clip = clips_array(clips_arr)
        final_clip.write_videofile(pn, codec='libx264')
Exemple #9
0
def watermarkandcrop(path):
    #cv2video = cv2.VideoCapture(path)
    #height = cv2video.get(cv2.CAP_PROP_FRAME_HEIGHT)
    #width = cv2video.get(cv2.CAP_PROP_FRAME_WIDTH)
    # print(height)
    # print(width)
    #height = int(height)
    #width = int(width)
    # border=30
    global border
    clip = VideoFileClip(path)
    height = clip.h
    width = clip.w
    new_clip = vfx.crop(clip,
                        x1=border,
                        y1=border,
                        x2=width - border,
                        y2=height - border)
    watermark = VideoFileClip(
        "watermark.gif", has_mask=True).loop().set_duration(
            clip.duration).resize(height=50).margin(right=8,
                                                    bottom=8,
                                                    opacity=0).set_pos(
                                                        ("right", "bottom"))
    watermark_video = CompositeVideoClip([new_clip, watermark])
    watermarkpath = "watermarkvideo.mp4"
    #print("idhar tak chala")
    watermark_video.write_videofile(watermarkpath, threads=200)

    #$    watermarkandcrop(path)
    # clip.reader.close()
    # clip.audio.reader.close_proc()

    clip.close()
    del clip
    # new_clip.reader.close()
    # new_clip.audio.reader.close_proc()
    # clip.reader.close()

    # _clip.close()
    # watermark_video.reader.close()
    #  watermark_video.audio.reader.close_proc()

    watermark_video.close()
    del watermark_video
    #del clip
    new_clip.close()
    del new_clip
    #del watermark_video
    return watermarkpath
Exemple #10
0
def saveFileFunc (audio, files, filePath, saveFiles):
     if audio and allowed_file(audio.filename, ["mp3"]):
          filename = audio.filename.rsplit('.', 1)[1]
          audio.save(os.path.join(filePath, "audio." + filename))

     for file in files:
          if file and allowed_file(file.filename, ['mp4', 'webm', 'mov']):
               filename = secure_filename(file.filename)
               saveFiles.append(filename)
               file.save(os.path.join(filePath, filename))

     videos = []

     # if size == "9:16":
     #      height = 1280
     # else:
     #      height = 720

     for file in saveFiles:
          try:
               videoTmp = VideoFileClip(os.path.join(filePath, file))
               (w, h) = videoTmp.size
               if w >= h:
                    videoTmp = videoTmp.resize(height=720)
               else:
                    videoTmp = videoTmp.resize(width=720)
               
               (w, h) = videoTmp.size
               cropped_clip = crop(videoTmp, width=720, height=720, x_center=w/2, y_center=h/2)
               
          except KeyError:
               pass
          
          if videoTmp:
               videos.append({
                         "video": cropped_clip,
                         "duration": float(cropped_clip.duration),
                         "name": file,
                         "start": 0,
                         "block": 0,
                    });

     videos.sort(key=lambda k: k["duration"], reverse=True)
     return videos
Exemple #11
0
def adjust_clip(driving_video_path, fps, t, target_resolution):
    sparser_driving_video = '.'.join(
        driving_video_path.split('.')[:-1]) + '_sparse.mp4'
    (ffmpeg.input(driving_video_path).output(
        sparser_driving_video, r=fps, t=t, acodec="aac",
        vcodec="libx264").overwrite_output().run())

    orig_clip = VideoFileClip(sparser_driving_video)
    (w, h) = orig_clip.size
    new_dim = min(w, h)
    print("og size:", w, h)

    cropped_clip = crop(orig_clip,
                        width=new_dim,
                        height=new_dim,
                        x_center=w / 2,
                        y_center=h / 2)
    resized_clip = cropped_clip.resize(width=target_resolution[0])
    return resized_clip
Exemple #12
0
def crop_word(found_word: str, start: float, end: float, videoid: str,
              save_loc: Tuple[str]):
    # pylint: disable=too-many-arguments, too-many-locals
    '''crops a video into a small segment, including padding, face detection
    and face bounding.'''
    # get full video
    full_video = VideoFileClip(f'videos/{videoid}.mp4')

    # pad video
    pad = 0.25
    start, end = apply_padding(start, end, pad, full_video.duration)

    # get subclip
    word_subclip = full_video.subclip(start, end)

    # get frame
    start_frame = word_subclip.get_frame(t=0)
    end_frame = word_subclip.get_frame(t=end - start)

    # detect faces
    start_faces = get_faces(start_frame)
    end_faces = get_faces(end_frame)

    if len(start_faces) == 1 and len(end_faces) == 1:
        bound_face = get_face_bounds(start_faces[0], end_faces[0])

        final_word = crop(
            word_subclip,
            x1=bound_face['left'],
            x2=bound_face['right'],
            y1=bound_face['top'],
            y2=bound_face['bottom'],
        )

        filename = f'{videoid}_{start:.2f}'
        save_to_file(final_word, found_word, filename, save_loc)

    elif len(start_faces) == 0 or len(end_faces) == 0:
        print('No faces found in either the start or end frame.')
    else:
        print('Multiple faces found')
Exemple #13
0
        def videocut(video, loopinfo, loopcount, audio):

            audio1 = AudioFileClip("./data/audio/lol.mp3")
            audio1 = audio1.volumex(0)
            if loopinfo != 0:
                clip = VideoFileClip(video).subclip(0, loopinfo[2])
                clip1 = VideoFileClip(video).subclip(loopinfo[0], loopinfo[1])

                loopcount1 = 0

                clip1 = loop(clip1, loopcount)
                countlooptime = loopinfo[1] - loopinfo[0]
                countlooptime = countlooptime * loopcount + loopinfo[2]

                clip = concatenate_videoclips([clip, clip1])
            else:
                clip = VideoFileClip(video)
            (w, h) = clip.size

            clip = crop(clip,
                        width=800,
                        height=800,
                        x_center=w / 2,
                        y_center=h / 2)
            if loopinfo != 0:
                clip = clip.set_audio(audio1)
                clip = clip.subclip(0, countlooptime)

            if audio:
                audio = AudioFileClip(audio)
                clip = clip.set_audio(audio)
                if loopinfo != 0:
                    clip = clip.subclip(0, countlooptime)
                else:
                    clip = clip.subclip(0, clip.duration)
            else:
                clip = clip

            return clip
Exemple #14
0
def watermarkandcrop(path):
    cv2video = cv2.VideoCapture(path)
    height = cv2video.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = cv2video.get(cv2.CAP_PROP_FRAME_WIDTH)
    print(height)
    print(width)
    height = int(height)
    width = int(width)
    border = 30
    clip = VideoFileClip("test.mp4")
    new_clip = vfx.crop(clip,
                        x1=border,
                        y1=border,
                        x2=width - border,
                        y2=height - border)
    watermark = VideoFileClip(
        "watermark.gif", has_mask=True).loop().set_duration(
            clip.duration).resize(height=50).margin(right=8,
                                                    bottom=8,
                                                    opacity=0).set_pos(
                                                        ("right", "bottom"))
    watermark_video = CompositeVideoClip([new_clip, watermark])
    watermark_video.write_videofile('watermarkvideo.mp4', threads=200)
def prepare_video(vid, thumbnail_frame_ts=0.0,
                  max_size=(1080, 1350),
                  aspect_ratios=(4.0 / 5.0, 90.0 / 47.0),
                  max_duration=60.0,
                  save_path=None,
                  skip_reencoding=False,
                  **kwargs):
    """
    Prepares a video file for posting.
    Defaults for size and aspect ratio from https://help.instagram.com/1469029763400082

    :param vid: file path
    :param thumbnail_frame_ts: the frame of clip corresponding to time t (in seconds) to be used as the thumbnail
    :param max_size: tuple of (max_width,  max_height)
    :param aspect_ratios: single float value or tuple of (min_ratio, max_ratio)
    :param max_duration: maximum video duration in seconds
    :param save_path: optional output video file path
    :param skip_reencoding: if set to True, the file will not be re-encoded
        if there are no modifications required. Default: False.
    :param kwargs:
         - **min_size**: tuple of (min_width,  min_height)
         - **progress_bar**: bool flag to show/hide progress bar
         - **save_only**: bool flag to return only the path to the saved video file. Requires save_path be set.
         - **preset**: Sets the time that FFMPEG will spend optimizing the compression.
         Choices are: ultrafast, superfast, veryfast, faster, fast, medium,
         slow, slower, veryslow, placebo. Note that this does not impact
         the quality of the video, only the size of the video file. So
         choose ultrafast when you are in a hurry and file size does not matter.
    :return:
    """
    from moviepy.video.io.VideoFileClip import VideoFileClip
    from moviepy.video.fx.all import resize, crop

    min_size = kwargs.pop('min_size', (612, 320))
    logger = 'bar' if kwargs.pop('progress_bar', None) else None
    save_only = kwargs.pop('save_only', False)
    preset = kwargs.pop('preset', 'medium')
    if save_only and not save_path:
        raise ValueError('"save_path" cannot be empty.')
    if save_path:
        if not save_path.lower().endswith('.mp4'):
            raise ValueError('You must specify a .mp4 save path')

    vid_is_modified = False     # flag to track if re-encoding can be skipped

    temp_video_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.mp4', delete=False)

    if is_remote(vid):
        # Download remote file
        res = requests.get(vid)
        temp_video_file.write(res.content)
        video_src_filename = temp_video_file.name
    else:
        shutil.copyfile(vid, temp_video_file.name)
        video_src_filename = vid

    # Ref: https://github.com/Zulko/moviepy/issues/833#issuecomment-537885162
    with VideoFileClip(temp_video_file.name) as vidclip:

        if vidclip.duration < 3 * 1.0:
            raise ValueError('Duration is too short')

        if vidclip.duration > max_duration * 1.0:
            vidclip = vidclip.subclip(0, max_duration)
            vid_is_modified = True

        if thumbnail_frame_ts > vidclip.duration:
            raise ValueError('Invalid thumbnail frame')

        if aspect_ratios:
            crop_box = calc_crop(aspect_ratios, vidclip.size)
            if crop_box:
                vidclip = crop(vidclip, x1=crop_box[0], y1=crop_box[1], x2=crop_box[2], y2=crop_box[3])
                vid_is_modified = True

        if max_size or min_size:
            new_size = calc_resize(max_size, vidclip.size, min_size=min_size)
            if new_size:
                vidclip = resize(vidclip, newsize=new_size)
                vid_is_modified = True

        temp_vid_output_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.mp4', delete=False)
        if vid_is_modified or not skip_reencoding:
            # write out
            vidclip.write_videofile(
                temp_vid_output_file.name, codec='libx264', audio=True, audio_codec='aac',
                verbose=False, logger=logger, preset=preset, remove_temp=True)
        else:
            # no reencoding
            shutil.copyfile(video_src_filename, temp_vid_output_file.name)

        if save_path:
            shutil.copyfile(temp_vid_output_file.name, save_path)

        # Temp thumbnail img filename
        temp_thumbnail_file = tempfile.NamedTemporaryFile(prefix='ipae_', suffix='.jpg', delete=False)
        vidclip.save_frame(temp_thumbnail_file.name, t=thumbnail_frame_ts)

        video_duration = vidclip.duration
        video_size = vidclip.size

        video_thumbnail_content = temp_thumbnail_file.read()

        if not save_only:
            video_content_len = os.path.getsize(temp_vid_output_file.name)
            video_content = temp_vid_output_file.read()
        else:
            video_content_len = os.path.getsize(save_path)
            video_content = save_path    # return the file path instead

        if video_content_len > 50 * 1024 * 1000:
            raise ValueError('Video file is too big.')

        return video_content, video_size, video_duration, video_thumbnail_content
Exemple #16
0
theta = np.linspace(0, 2. * np.pi, n)
phi = np.linspace(0, 2. * np.pi, n)
theta, phi = np.meshgrid(theta, phi)
c, a = 2, 1
x = (c + a * np.cos(theta)) * np.cos(phi)
y = (c + a * np.cos(theta)) * np.sin(phi)
z = a * np.sin(theta)


def make_frame(t):
    for ax in (ax1, ax2):
        ax.clear()
        ax.grid(False)
        ax.axis("off")
        ax.plot_surface(x, y, z, rstride=5, cstride=5, edgecolors='w')
        ax.set_zlim(-3, 3)
        if ax == ax1:
            ax.view_init(15, int(30 * t))
        else:
            ax.view_init(int(30 * t), 36)
    return mplfig_to_npimage(fig)


animation = VideoClip(make_frame, duration=6)
#animation.preview(fps=10)
#animation.show(1.5, interactive=True)
final = crop(animation, x_center=900, y_center=700, width=1200, height=600)
#final.show(1.5, interactive=True)
#final.preview(fps=5)
final.write_videofile('torus.mp4', fps=30)
Exemple #17
0
import moviepy.editor as mpy
import PIL
from moviepy.video.fx.all import crop

file_path = "C:/Users/masho/Desktop/work/python/Python/lib/movie/Café_22728.mp4"  #編集したい動画のパス
save_path = "C:/Users/masho/Desktop/work/python/Python/lib/movie/sample.mp4"  #トリミングしたい動画のパス

clip = mpy.VideoFileClip(file_path)
(w, h) = clip.size
cropped_clip = crop(clip,
                    width=1000,
                    height=600,
                    x_center=w / 4,
                    y_center=h / 4)

cropped_clip.write_videofile(save_path)
Exemple #18
0
def generate_video_matrix(vid_files,
                          titles=None,
                          out_file=None,
                          columns=None,
                          fps=None,
                          crop_rect=None):
    os.environ["IMAGEMAGICK_BINARY"] = glob.glob(
        r"C:\Program Files\ImageMagick-*\magick.exe")[0]
    try:
        import moviepy
    except:
        subprocess.call("pip install moviepy")

    from moviepy.editor import VideoFileClip, TextClip, ColorClip, clips_array, vfx
    import numpy as np
    from moviepy.video.fx.all import crop

    if out_file is None:
        out_file = "combined.mp4"

    if type(vid_files[0]) == str:
        vid_clips = [
            VideoFileClip(x, resize_algorithm="fast_bilinear")
            for x in vid_files
        ]
    else:
        vid_clips = vid_files
    max_h = np.max([x.h for x in vid_clips])

    vid_clips = [x.fx(vfx.resize, max_h / x.h) for x in vid_clips]
    if crop_rect:
        vid_clips = [
            crop(
                x,
                x1=crop_rect[0],
                y1=crop_rect[1],
                width=crop_rect[2],
                height=crop_rect[3],
            ) for x in vid_clips
        ]

    vid_clips = [v.margin(2) for v in vid_clips]

    min_duration = np.min([v.duration for v in vid_clips])
    print("Set duration to min of all videos: %i" % min_duration)
    vid_clips = [v.set_duration(min_duration) for v in vid_clips]

    def create_text_clip(text, dura):
        global src
        return TextClip(text,
                        font="Verdana",
                        fontsize=max_h / 20,
                        color="white").set_duration(dura)

    if titles is None:
        titles = [os.path.splitext(os.path.basename(x))[0] for x in vid_files]
    text_clips = [create_text_clip(x, min_duration) for x in titles]

    arr = []
    if columns is not None:
        for i in range(0, len(vid_clips), columns):
            arr.append(vid_clips[i:i + columns])
            arr.append(text_clips[i:i + columns])

        remainder = len(vid_clips) % columns
        if remainder != 0:
            remainder = columns - remainder
            blank_clip = ColorClip((1, 1), color=(0, 0, 0), duration=0)
            arr[-1].extend([blank_clip] * remainder)
            arr[-2].extend([blank_clip] * remainder)

    else:
        arr.append(vid_clips)
        arr.append(text_clips)

    final = clips_array(arr)

    final.write_videofile(out_file, fps=fps)
Exemple #19
0
# configure the reader to only replay the clip for the a duration of 2 seconds between `00:10` and
# `00:12`.
import datetime
from stonesoup.reader.video import VideoClipReader
start_time = datetime.timedelta(minutes=0, seconds=10)
end_time = datetime.timedelta(minutes=0, seconds=12)
frame_reader = VideoClipReader(VIDEO_PATH, start_time, end_time)

# %%
# It is also possible to apply clip transformations and effects, as per the
# `MoviePy documentation <https://zulko.github.io/moviepy/getting_started/effects.html>`_.
# The underlying MoviePy :class:`~VideoFileClip` instance can be accessed through the
# :attr:`~.VideoClipReader.clip` class property. For example, we can crop out 100 pixels from
# the top and left of the frames, as they are read by the reader, as shown below.
from moviepy.video.fx import all
frame_reader.clip = all.crop(frame_reader.clip, 100, 100)
num_frames = len(list(frame_reader.clip.iter_frames()))

# %%
# FFmpegVideoStreamReader
# ***********************
# For reference purposes, we also include here an example of how to build a
# :class:`~.FFmpegVideoStreamReader`. Let's assume that we have a camera which broadcasts its feed
# through a public RTSP stream, under the URL ``rtsp://192.168.55.10:554/stream``. We can build a
# :class:`~.FFmpegVideoStreamReader` object to read frames from this stream as follows:
#
# .. code:: python
#
#   in_opts = {'threads': 1, 'fflags': 'nobuffer'}
#   out_opts = {'format': 'rawvideo', 'pix_fmt': 'bgr24'}
#   stream_url = 'rtsp://192.168.55.10:554/stream'
Exemple #20
0

# it will pixelated gif because of ffmpeg
clip = VideoFileClip(source_path)
fps = clip.fps
sub_clip1 = clip.subclip(t_start=10, t_end=20)
sub_clip1 = sub_clip1.resize(width=500)
sub_clip1.write_gif(output_path, program='ffmpeg', fps=fps)
# ffmpeg will give small size gifs compared to imageio(default)
# but imageio will not give pixelated gif.


# solution:- make mp4 from this subclip and then do the gif part.
# >>> but very large file. <<<
# output_path = os.path.join(GIF_DIR, 'sample2.gif')
# output_path2 = os.path.join(GIF_DIR, 'sample.mp4')
# sub_clip2 = clip.subclip(t_start=10, t_end=20)
# sub_clip2.write_videofile(output_path2)
# sub_clip_path = os.path.join(GIF_DIR, 'sample.mp4')
# sub_clip2.write_gif(output_path, program='ffmpeg', fps=fps)


# creating a cropped clip
width, height = clip.size
sub_clip3 = clip.subclip(t_start=10, t_end=20)
cropped_clip = crop(sub_clip3, width=320, height=320,
                    x_center=width / 2, y_center=height / 2)

output_path = os.path.join(GIF_DIR, 'sample3.gif')
cropped_clip.write_gif(output_path, program='ffmpeg', fps=fps)
Exemple #21
0
# load frame
clip = VideoFileClip("tdf2018-20.mp4").without_audio().set_fps(fps)

# save frame for reference pixel finding
if DEBUG_FLAG:
    clip.save_frame("frame_riding.png", t='03:06:30')
    clip.save_frame("frame_finish.png", t='03:07:00')

substart = '03:26:00'
subend = '04:26:00'
#%%
subclip = clip.subclip(substart, subend)
# crop subframes
overlay_clip = crop(subclip,
                    x1=x1_overlay,
                    y1=y1_overlay,
                    x2=x1_overlay + width_overlay,
                    y2=y1_overlay + height_overlay)

# crop in subframes
name_clip = crop(overlay_clip, x2=width_name)
team_clip = crop(overlay_clip, x1=width_overlay - width_team, x2=width_overlay)
colorpixel_clip = crop(overlay_clip, x2=width_name)
pixelcolor = crop(name_clip,
                  x1=width_name - 2 * windowsize,
                  y1=round(height_overlay / 2) - round(windowsize / 2),
                  x2=width_name - windowsize,
                  y2=round(height_overlay / 2) + round(windowsize / 2))
whitecolor = crop(
    subclip,
    x1=x1_overlay + width_overlay + width_white_pad,
Exemple #22
0
def run_cloning(source_image_path,
                driving_video_path,
                result_video_path=None,
                run_fer=run_fer,
                fps=12,
                t=30,
                resized_clip=None):
    global generator, kp_detector
    target_resolution = (256, 256)

    source_image = imageio.imread(source_image_path)
    source_image = crop_square_center(source_image)

    if resized_clip is None:
        sparser_driving_video = '.'.join(driving_video_path.split('.')[:-1]) \
            + '_' + str(int(time.time())) + '_sparse.mp4'
        (ffmpeg.input(driving_video_path).output(
            sparser_driving_video, r=fps, t=t, acodec="aac",
            vcodec="libx264").overwrite_output().run())

        orig_clip = VideoFileClip(sparser_driving_video)
        (w, h) = orig_clip.size
        new_dim = min(w, h)
        print("og size:", w, h)

        cropped_clip = crop(orig_clip,
                            width=new_dim,
                            height=new_dim,
                            x_center=w / 2,
                            y_center=h / 2)
        resized_clip = cropped_clip.resize(width=target_resolution[0])

    print("resized clip", resized_clip.size)
    resized_clip = resized_clip.set_fps(fps)
    # resized_clip.write_videofile('resizing_test.mp4')

    orig_audio = resized_clip.audio
    driving_video = resized_clip.iter_frames()

    if run_fer:
        classes_by_score = video_to_fer(driving_video)

    # reset driving video
    driving_video = resized_clip.iter_frames()
    source_image = resize(source_image, target_resolution)[..., :3]
    driving_video = [
        resize(frame, target_resolution)[..., :3] for frame in driving_video
    ]

    predictions = make_animation(source_image,
                                 driving_video,
                                 generator,
                                 kp_detector,
                                 relative=opt.relative,
                                 adapt_movement_scale=opt.adapt_scale,
                                 cpu=opt.cpu)

    if result_video_path is None:
        output_filename = 'mind_theft_' + str(int(time.time()))
        if run_fer:
            top3_classes = '_'.join(list(classes_by_score.keys())[:3])
            output_filename += '_' + top3_classes

        output_filename += '.mp4'
        result_video_path = os.path.join(app.config['UPLOAD_FOLDER'],
                                         output_filename)
    else:
        output_filename = result_video_path.split('/')[-1]

    # print('predictions shape', len(predictions), predictions[0].shape, predictions[0].dtype)
    new_video = ImageSequenceClip(
        [img_as_ubyte(frame) for frame in predictions], fps=fps)
    # H x W x 3 format
    new_video = new_video.set_audio(orig_audio)
    new_video.write_videofile(result_video_path, audio_codec='aac')

    return output_filename
Exemple #23
0
mheight = 720

print("\n\n" + str(mwidth) + ", " + str(mheight) + "\n\n")

croppedClips = []
for clip in clips:
    # first scale it up
    croppedClip = clip.resize(width=mwidth)
    if mheight > mwidth:
        croppedClip = clip.resize(height=mheight)

    # then crop it down
    (w, h) = croppedClip.size
    croppedClip = vfx.crop(croppedClip,
                           width=mwidth,
                           height=mheight,
                           x_center=w / 2,
                           y_center=h / 2)
    croppedClips.append(croppedClip.set_duration(clip.duration))

# prepare vignette overlay
vignette = ImageClip(
    base.pixabay.downloadPicFromUrl('http://i.stack.imgur.com/UsSV9.png'))
vignette = vignette.resize((mwidth, mheight)).set_duration(totalDur)

print(clips)
# compose the clips and add vignette
concat_clip = concatenate_videoclips(croppedClips,
                                     method='compose').set_duration(totalDur)
final_clip = CompositeVideoClip([concat_clip, vignette]).set_duration(totalDur)
final_clip.write_videofile('test.webm', fps=20, bitrate='15000k')
Exemple #24
0
    i = i + 29

    # read the video file

    clip = VideoFileClip("thang/thang (" + str(i) + ").mp4")

    # crop to the smallest dimension in square ratio

    w = clip.size[0]
    h = clip.size[1]

    if w > h:
        clip = crop(clip,
                    width=h - (h / 20),
                    height=h - (h / 20),
                    x_center=w / 2,
                    y_center=h / 2)
    else:
        clip = crop(clip,
                    width=w - (w / 20),
                    height=w - (w / 20),
                    x_center=w / 2,
                    y_center=h / 2)

    # resize video to 1080x1080

    clip = clip.resize(height=1080)

    # trim the clip if longer than 60 sec
Exemple #25
0
from conf import SAMPLE_INPUTS, SAMPLE_OUTPUTS
from moviepy.editor import *  # ImageClip
from PIL import Image
from moviepy.video.fx.all import crop

source_path = os.path.join(SAMPLE_INPUTS, 'sample.mp4')

GIF_DIR = os.path.join(SAMPLE_OUTPUTS, "gifs")
os.makedirs(GIF_DIR, exist_ok=True)

output_path1 = os.path.join(GIF_DIR, 'sample1.gif')
output_path2 = os.path.join(GIF_DIR, 'sample2.gif')

clip = VideoFileClip(source_path)
fps = clip.reader.fps
subclip = clip.subclip(10, 20)
subclip = subclip.resize(width=500)
# subclip.write_gif(output_path1, fps=fps, program='ffmpeg')

w, h = clip.size
subclip2 = clip.subclip(10, 20)
square_cropped_clip = crop(subclip2,
                           width=320,
                           height=320,
                           x_center=w / 2,
                           y_center=h / 2)

# square_cropped_clip.write_gif(output_path2, fps=fps, program='ffmpeg')
Exemple #26
0
import moviepy.video.fx.all as vfx
import os
import gc

n_sessions = 5
var_aux = 0
aux = 0

#for i in range(0,n_sessions):
i = 1
cont = 1
print("Session", i + 1)
dir_videos = 'D:/IEMOCAP_full_release/IEMOCAP/Video'
dir_videos_s = dir_videos + '/Session' + str(i + 1)
files = os.listdir(dir_videos_s)
dir_videos_cropped = 'D:/IEMOCAP_full_release/IEMOCAP/Video_cropped_2/Session' + str(
    i + 1)
#os.mkdir(dir_videos_cropped)
for j in range(len(files)):
    gc.collect()
    if j == 2:
        dir_video_s = dir_videos_s + '/' + files[j + var_aux]
        clip = VideoFileClip(dir_video_s)
        # 450,650
        new_clip_2 = vfx.crop(clip, x1=450, x2=645, y1=0, y2=280)
        video_cropped_2 = dir_videos_cropped + '/2_' + files[
            j + var_aux][:-3] + 'mp4'
        new_clip_2.write_videofile(video_cropped_2, fps=29.97, audio=False)
        new_clip_2.close()
        clip.close()
Exemple #27
0
import os
from conf import SAMPLE_INPUTS, SAMPLE_OUTPUTS
from moviepy.editor import *
from PIL import Image
from moviepy.video.fx.all  import crop


source_path = os.path.join(SAMPLE_INPUTS, 'sample.mp4')

GIF_DIR = os.path.join(SAMPLE_OUTPUTS, "gifs")
os.makedirs(GIF_DIR, exist_ok=True)

salida_ruta1 = os.path.join(GIF_DIR, 'ejemplo1.gif')
salida_ruta2 = os.path.join(GIF_DIR, 'ejemplo2.gif')

clip = VideoFileClip(source_path)
fps = clip.reader.fps
subclip = clip.subclip(10, 20)
#subclip = subclip.resize(width=320)
#subclip.write_gif(salida_ruta1, fps=20, program='ffmpeg')

w, h = clip.size
subclip2 = clip.subclip(10, 20)
cuadro_crpped_clip = crop(subclip2, width=320, height=320, x_center=w/2, y_center=h/2)

cuadro_crpped_clip.write_gif(salida_ruta2, fps=fps, program='ffmpeg')
    def split(
        self,
        video_path,
        offset=DEFUALT["offset"],
        duration=DEFUALT["duration"],
        n_frames=DEFUALT["n_frames"],
        frame_size=DEFUALT["frame_size"],
        mutate_end_time=DEFUALT["mutate_end_time"],
    ):
        if not os.path.isfile(video_path):
            raise ValueError(f"FileNotExisted:{video_path}")

        video_name = Path(video_path).stem

        # load video
        video = VideoFileClip(video_path)

        # if whole duration size is smaller than duration, just pass.
        if video.duration < duration:
            return None

        end_time = offset + duration

        if mutate_end_time is False:
            if end_time > math.floor(video.duration):
                return None

        end_time = min(end_time, math.floor(video.duration))
        start_time = end_time - duration

        video = video.subclip(start_time, end_time)
        assert video.duration == duration

        # crop and resize
        (w, h) = video.size
        min_size = min(w, h)
        video = crop(video,
                     width=min_size,
                     height=min_size,
                     x_center=w / 2,
                     y_center=h / 2).resize(width=frame_size,
                                            height=frame_size)

        # frame part
        time_ranges = range(0, duration)
        pick_frame_times = random.sample(time_ranges, n_frames)

        frames = []
        for idx, t in enumerate(pick_frame_times):
            try:
                frame = video.get_frame(t)
                frames.append(frame)
                continue
            except:
                try:
                    frame = video.get_frame(time_ranges[-1])
                    frames.append(frame)
                except:
                    return None

        # audio part
        audios = [video.audio.to_soundarray(fps=DEFUALT["sr"])]

        video.close()

        return {
            "frames": frames,
            "audios": audios,
        }
import os.path




#videos_path = '/media/zaigham/Data4/normal_no_human_videos'

# x = glob.glob("/home/zaigham/Desktop/FC test videos/**/*.mp4")
x = glob.glob("/media/zaigham/SSD_1TB/Pohang dataset/normal/*.mp4")
target_dir = '/media/zaigham/SSD_1TB/Pohang dataset/center_cropped/normal/'


#Use os.path.basename(path) to get the filename.




num = 1

for i in x:

    clip = mp.VideoFileClip(i)
    (w, h) = clip.size
    cropped_clip = crop(clip, width=h, height=h, x_center=w / 2, y_center=h / 2)
    # cropped_clip.write_videofile(target_dir + 'anomaly' + str(num)+'.mp4')


    clip_resized = cropped_clip.resize((112,112)) # make the height 360px ( According to moviePy documenation The width is then computed so that the width/height ratio is conserved.)
    clip_resized.write_videofile(target_dir + 'anomaly' + str(num)+'.mp4')
    num += 1