Ejemplo n.º 1
0
def side_by_side_videos(test_video_file_path, ref_video_file_path,
                        target_file_path):
    # aiming for 800 x 1200 pixels
    w0 = 800
    h0 = 1200
    desired_ratio = w0 / h0

    clip1 = VideoFileClip(test_video_file_path)
    clip2 = VideoFileClip(ref_video_file_path)

    w1 = clip1.w
    h1 = clip1.h
    if w1 / h1 > desired_ratio:
        # scale width
        clip1 = clip1.resize(w0 / w1)
    else:
        # scale height
        clip1 = clip1.resize(h0 / h1)

    w2 = clip2.w
    h2 = clip2.h
    if w2 / h2 > desired_ratio:
        clip2 = clip2.resize(w0 / w1)
    else:
        clip2 = clip2.resize(h0 / h2)
    video = clips_array([[clip1, clip2]])
    video.write_videofile(target_file_path)
Ejemplo n.º 2
0
def process_vid(video, args):
    """
    If video is not valid we will print error and pass
    out_directory must exist!
    """

    out_directory = get_output_directory(args.outdir, video, args)

    try:
        clip = VideoFileClip(video)
    except Exception as e:
        print(f"Unable to open clip{video}: {e}")
        return

    save_file = get_save_file(out_directory, video, args)
    if args.time_start is not None:
        clip = clip.subclip(t_start=args.time_start, t_end=args.time_end)

    if exists(save_file):
        scenes = FramesMatches.load(save_file)
    else:
        scenes = FramesMatches.from_clip(clip.resize(width=120), dist_thr=args.dist_thr, max_d=args.max_d)
        try:
            scenes.save(save_file)
        except Exception as e:
            print(f"Unable to save matches: {e}")

    selected_scenes = scenes.select_scenes(match_thr=args.match_thr, min_time_span=args.min_time_span,
                                           nomatch_thr=args.nomatch_thr, time_distance=args.time_distance)
    selected_scenes.write_gifs(clip.resize(width=450), out_directory)
    optimize_dir(out_directory)
Ejemplo n.º 3
0
def merge_videos(path_source_1, path_source_2, mixed_path):

    clip01 = VideoFileClip(path_source_1)
    clip02 = VideoFileClip(path_source_2)

    clip01 = clip01.resize(0.60)
    clip02 = clip02.resize(0.60)

    final_clip = CompositeVideoClip([clip01.set_position(("left","center")), clip02.set_position(("right","center"))], size=(720, 460))
    final_clip.write_videofile(mixed_path)
Ejemplo n.º 4
0
def process_video(filename, overwrite=False, max_width=1600, max_height=1600, max_file_size=5*1024**2, gifdir='gifs/'):

    gif_name = gifdir + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."

    if video_file.h > max_height:
        video_file = video_file.resize(height=max_height)

    if video_file.w > max_width:
        video_file = video_file.resize(width=max_width)

    end_image = video_file.to_ImageClip(video_file.end-(1/video_file.fps)).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])
    fadein_video_file = CompositeVideoClip(
        [video_file,
         (video_file.to_ImageClip()
          .set_duration(0.7)
          .crossfadein(0.4)
          .set_start(video_file.duration-0.7)),
     ]
    )
    
    logo_size = video_file.h/6
    text = ImageClip(
        expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(
            video_file.duration).resize(width=logo_size).set_pos(
                (video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([fadein_video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > max_file_size:
        process_video(filename,
                      max_height=video_file.h*0.95,
                      overwrite=True,
                      gifdir=gifdir,
                      max_file_size=max_file_size)
Ejemplo n.º 5
0
 def resize_video(self,
                  source: str,
                  destination: str,
                  scale: float = None,
                  width: int = None,
                  height: int = None) -> None:
     video = VideoFileClip(source)
     if scale:
         video = video.resize(scale)
     if width:
         video = video.resize(width=width)
     if height:
         video = video.resize(height=height)
     video.write_videofile(destination)
Ejemplo n.º 6
0
def playVideo():
    clip = VideoFileClip(
        f'videos/SciFi-teens-p5.mp4'
    )  #, target_resolution = (largeSize[1],largeSize[0]))#, target_resolution=(480,800))
    clip = clip.volumex(0.05)
    clip = clip.resize(width=800)
    clip.preview(fullscreen=True)
Ejemplo n.º 7
0
def get_video(path, with_audio):
    video = VideoFileClip(path).subclip(start_sec, end_sec)
    if not with_audio:
        video = video.without_audio()
    if do_downscale:
        video = video.resize(0.25)
    return video
Ejemplo n.º 8
0
 def get_outputs(self):
     super(OriginalVideoClipJob, self).get_outputs()
     clip = VideoFileClip(
         os.path.join(self.video_location, self.video_filename))
     if (self.frame_size is not None):
         clip = clip.resize(self.frame_size)
     return clip
Ejemplo n.º 9
0
def compile_clips_to_video(path):
    output_video_path = path + "/output/"
    allvids = path + "/downloadedfiles/"
    all_file_names = [
        f for f in os.listdir(allvids)
        if os.path.isfile(os.path.join(allvids, f)) and ".mp4" in f
    ]

    #combine all clips to one file
    filename_list = []
    for x in all_file_names:
        out_path = os.path.join(allvids, x)
        print(f"adding file:{out_path}")
        filename_list.append(out_path)

    video_clips = []
    for x in filename_list:
        logging.info(x)
        clip = VideoFileClip(x)
        if clip.size != (1280, 720):
            clip = clip.resize((1280, 720))

        video_clips.append(clip)

    final_clip = concatenate_videoclips(video_clips)
    os.makedirs(os.path.dirname(output_video_path), exist_ok=True)
    final_clip.write_videofile(f"{output_video_path}/exportedvideo.mp4")
Ejemplo n.º 10
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Detecting lane from a dashcam video')

    parser.add_argument('-i',
                        '--input-video',
                        required=True,
                        type=str,
                        help='Path to the input video')
    parser.add_argument(
        '-v',
        '--visualisation',
        required=False,
        type=int,
        help='Visualise backend components of a specific frame at t timestep')
    args = parser.parse_args(args)
    car_vid = VideoFileClip(args.input_video)  #.subclip(40,43)
    output_name = 'output.mp4'
    cal_cam = CameraCalibration('camera_cal/*.jpg', 'camera_cal/cal_pickle.p')
    cal_cam.undistort_img()
    car_vid = car_vid.resize((1280, 720))
    f_vid = car_vid.fl_image(lane_detector)
    f_vid.write_videofile(output_name, audio=False)
    if args.visualisation is not None:
        if args.visualisation <= car_vid.duration:
            visualise(car_vid, args.visualisation)
        else:
            print("Please input timestep less than video duration!!")
Ejemplo n.º 11
0
    async def widepcmd(self, message):
        """.widep <reply to video>
		    Эффект wideputin
		"""
        reply = await message.get_reply_message()
        if not reply:
            await message.edit("А где реплай на видео?")
            return
        await message.edit("<b>Скачиваем...</b>")
        await message.client.download_file(reply.media, "video.mp4")
        subprocess.check_output(
            "ffmpeg -y -i {0} -vn -ar 44100 -ac 2 -ab 192K -f mp3 sound.mp3".
            format("video.mp4"),
            shell=True)
        await message.edit("<b>Wide'им...</b>")
        video = VideoFileClip("video.mp4")
        video.reader.close()
        w, h = video.size
        video = video.resize((w * 2, h // 2))
        await message.edit("<b>Экспортируем...</b>")
        video.write_videofile("result.mp4")
        subprocess.check_output(
            "ffmpeg -y -i workwide/audio/sound.mp3 -i {0} {1}".format(
                "video.mp4", "out.mp4"),
            shell=True)
        await message.edit("<b>Отправляем...</b>")
        await message.client.send_file(message.to_id, "result.mp4")
        await message.delete()
        os.remove("video.mp4")
        os.remove("out.mp4")
        os.remove("sound.mp3")
        os.remove("result.mp4")
Ejemplo n.º 12
0
def main(details_of_video, orientation, background_video_path):

    background_video = VideoFileClip(background_video_path)
    trimmed_square_video = VideoFileClip(os.path.join(details_of_video["relative_path"], "trimmed_square.mp4"))

    # TODO: Check with recording screen at different scale
    # TODO: Check with changing screen size once
    # 30 mins with resizing
    # 15 mins without resizing
    # https://stackoverflow.com/questions/25122740/different-between-s-and-vf-scale-in-ffmpeg-especially-in-two-pass-transc
    resized_raw_square_video = trimmed_square_video.resize((1080, 1080))
    # resized_raw_square_video = raw_square_video

    # TODO: Tidy up
    if orientation == "landscape":
        final_clip = (overlay_clips(resized_raw_square_video, background_video, (420, 0)))

    elif orientation == "portrait":
        final_clip = (overlay_clips(resized_raw_square_video, background_video, (0, 420)))

    else:
        print("orientation not supported")

    final_clip_path = os.path.join(details_of_video["relative_path"], "raw_with_background_" + orientation + ".mp4")
    # final_clip.write_videofile(final_clip_path, fps=60, logger=None)
    final_clip.write_videofile(final_clip_path, threads=4)

    background_video.reader.close()
    trimmed_square_video.reader.close()

    return final_clip_path
 def process_package(source: str):
     log.info(''.join(["SOURCE: ", source]))
     main_clip = VideoFileClip(source).margin(10)
     clip2 = main_clip.fx(vfx.mirror_x)
     clip3 = main_clip.fx(vfx.mirror_y)
     clip4 = main_clip.resize(0.60)  # downsize 60%
     final_clip = clips_array([[main_clip, clip2],
                               [clip3, clip4]])
     final_clip.resize(width=480)
     return final_clip
Ejemplo n.º 14
0
def addedVideos():
    clip1 = VideoFileClip("text.mp4").margin(15)
    clip2 = clip1.fx(vfx.mirror_x)  # X 轴镜像
    clip3 = clip1.fx(vfx.mirror_y)  # Y 轴镜像
    clip4 = clip1.resize(0.6)  # 尺寸等比例缩放0。6

    final_clip = clips_array([
        [clip1], [clip3]
    ])
    final_clip.write_videofile("my_stack.mp4")
Ejemplo n.º 15
0
 def _read_exp_assets(self, assets_path):
     """
     Read expression gif files to dict of images `self.exp_assets'.
     """
     self.exp_assets = dict()
     for k in os.listdir(assets_path):
         assert k.endswith('.mp4')
         video_file = os.path.join(assets_path, k)
         clip = VideoFileClip(video_file, has_mask=True)
         self.exp_assets[k[:-4]] = clip.resize(width=168)
Ejemplo n.º 16
0
def gen_output(url, subreddit):
    if not os.path.exists('D:\\Reddit\\output.mp4'):
        print('output file does not exist')
        transfilepath = ('D:\\Reddit\\transition.mp4')
        intro = ('D:\\Reddit\\intro.mp4')
        outro = ('D:\\Reddit\\outro.mp4')
        no_of_vids = len([
            name for name in os.listdir(watermarkDir)
            if os.path.isfile(os.path.join(watermarkDir, name))
        ])
        print(no_of_vids)
        for file in os.listdir(watermarkDir):
            if file.endswith(".mp4"):
                filePath = os.path.join('D:\\Reddit\\watermark', file)
                print(filePath)
                video = VideoFileClip(filePath)
                w, h = video.size  # size of the clip
                print("width and height: " + str(w) + " " + str(h))
                if h != 1080:
                    video = video.resize(height=1080)
                videos.append(video)
                print("Normal video appended")

        for vid in range(0, no_of_vids):
            print(vid)
            print("rem 2 =", vid % 2)
            if (vid % 2 == 1):
                video = VideoFileClip(transfilepath)
                w, h = video.size  # size of the clip
                if h != 1080:
                    video = video.resize(height=1080)
                if w >= 1000:
                    video = video.resize(width=1920)
                videos.insert(vid, video)
                print("Trans video Inserted")

        video = VideoFileClip(intro)
        w, h = video.size  # size of the clip
        if h != 1080:
            video = video.resize(height=1080)
        if w >= 1000:
            video = video.resize(width=1920)
        videos.insert(0, video)

        video = VideoFileClip(outro)
        w, h = video.size  # size of the clip
        if h != 1080:
            video = video.resize(height=1080)
        if w >= 1000:
            video = video.resize(width=1920)
        videos.insert(len(videos), video)

        print(videos)
        final_clip = concatenate_videoclips(videos, method="compose")
        final_clip.to_videofile(os.path.join('D:\\Reddit\\watermark',
                                             "output.mp4"),
                                fps=60,
                                remove_temp=False)
Ejemplo n.º 17
0
    def post(self, instance_id):
        if not self.is_exist(instance_id):
            abort(404)

        if not self.is_allowed(instance_id):
            abort(403)

        uploaded_file = request.files["file"]

        folder_path = thumbnail_utils.get_preview_folder_name(
            "originals",
            instance_id
        )
        extension = uploaded_file.filename[-4:]
        if extension in [".png", ".jpg"]:
            thumbnail_utils.save_file(
                folder_path,
                instance_id,
                uploaded_file,
                size=None
            )
            if extension == ".jpg":
                thumbnail_utils.convert_jpg_to_png(
                    folder_path,
                    instance_id
                )

            thumbnail_utils.generate_preview_variants(instance_id)
            self.emit_app_preview_event(instance_id)

            return thumbnail_utils.get_preview_url_path(instance_id), 201

        elif extension in [".mp4", ".mov"]:
            from moviepy.editor import VideoFileClip
            file_name = "%s%s" % (instance_id, extension)
            folder = thumbnail_utils.create_folder(folder_path)
            file_path = os.path.join(folder, file_name)
            picture_path = os.path.join(folder, "%s.png" % instance_id)
            uploaded_file.save(file_path + '.tmp')
            clip = VideoFileClip(file_path + '.tmp')

            clip = clip.resize(height=720)
            clip.save_frame(picture_path, round(clip.duration / 2))
            thumbnail_utils.generate_preview_variants(instance_id)

            file_name = "%s%s" % (instance_id, extension)
            clip.write_videofile(os.path.join(folder, instance_id + ".mp4"))
            self.emit_app_preview_event(instance_id)

            return {}, 201

        else:
            abort(400, "Wrong file format")
Ejemplo n.º 18
0
def make_sentence(sayer, text):
    word_list = text.lower().split(" ")

    list_dir = os.listdir(s_dir)
    sentences = sorted(list_dir)
    wrote = 0
    cuts_dirs = []
    cuts = []
    not_found = []
    for single in word_list:
        print "Word = " + single
        count = 0
        flag = 0
        for sen in sentences:
            print "Searhing sentence #" + str(count)
            words = os.listdir(s_dir + "/" + sen + "/words")
            words_dir = s_dir + "/" + sen + "/words"
            for word in words:
                if single == word and (single in sen.lower()):
                    cut_single = words_dir + "/" + single + "/0.mp4"
                    cuts_dirs.append(cut_single)
                    flag = 1
                    break
            count += 1
            if flag == 1:
                break

        if flag == 0:
            not_found.append(single)
    if not len(not_found) == 0:
        print "Keywords that are not found are: " + str(not_found)
        return
    video_cuts = []
    for file in cuts_dirs:
        print file
        video = VideoFileClip(file)
        video = video.resize((1280, 720))
        video_cuts.append(video)
    final = concatenate_videoclips(video_cuts)

    word_text = TextClip(text, fontsize=40, color="white",
                         bg_color="black").set_pos("bottom").set_duration(
                             final.duration)
    final = CompositeVideoClip([final, word_text])
    if not os.path.exists(sayer + "* " + text):
        os.makedirs(sayer + "* " + text, 0777)
    final.write_videofile(sayer + "* " + text + "/" + text + ".mp4",
                          codec='libx264',
                          audio_codec='aac',
                          temp_audiofile=sayer + "* " + text + "/" + text +
                          ".m4a",
                          remove_temp=True)
Ejemplo n.º 19
0
def make_collage(videos0,
                 width,
                 height,
                 collage_folder,
                 savestr,
                 cropvid=True,
                 filext='mp4'):
    os.makedirs(collage_folder, exist_ok=True)
    n_collages = int(np.ceil(len(videos0) / (width * height)))
    for n in range(n_collages):
        start_ix = n * width * height
        pn = os.path.join(
            collage_folder, f'{savestr}_'
            f'{start_ix}-{start_ix + width*height-1}.{filext}')
        if os.path.exists(pn):
            print(f'EXISTS: {pn}')
            continue

        clips_arr = []
        for h in range(height):
            temp = []
            for w in range(width):
                ix = h * width + w + start_ix
                if ix < len(videos0):
                    clip = VideoFileClip(videos0[ix])
                    if cropvid:
                        clip = crop(clip, x1=100, y1=0, x2=350, y2=190)
                    clip = clip.resize(width=125)
                    temp.append(clip)
                else:
                    clip = VideoFileClip(videos0[-1])
                    if cropvid:
                        clip = crop(clip, x1=100, y1=0, x2=350, y2=190)
                    clip = clip.resize(width=5)
                    temp.append(clip)

            clips_arr.append(temp)
        final_clip = clips_array(clips_arr)
        final_clip.write_videofile(pn, codec='libx264')
Ejemplo n.º 20
0
def normalize_movie(movie_path):
    """
    Turn movie in a 720p movie file.
    """
    folder_path = os.path.dirname(movie_path)
    file_source_name = os.path.basename(movie_path)
    file_target_name = "%s.mp4" % file_source_name[:-8]
    file_target_path = os.path.join(folder_path, file_target_name)

    movie_clip = VideoFileClip(movie_path)
    movie_clip = movie_clip.resize(height=720)
    movie_clip.write_videofile(file_target_path)
    return file_target_path
Ejemplo n.º 21
0
def Video_Resize(now_path,video_lists,save_path,mp4 = '.mp4',avi = '.avi',flv = '.flv',mkv = '.mkv'):
    save_video_lists = []
    save_img_path_dicts = {}

    #这个循环用于转换视频分辨率
    all_path = os.path.abspath(now_path)
    for video_list in video_lists:
        video_name = video_list.lstrip(all_path)
        save_video = os.path.abspath(os.path.join(save_path,video_name))
        clip = VideoFileClip(video_list)
        clip = clip.resize(newsize=(416,416))
        clip.write_videofile(save_video)

    #这个循环用于保存已经转换的视频到一个列表中
    for dir_item in os.listdir(save_path):
        all_path = os.path.abspath(os.path.join(save_path, dir_item))
        if os.path.isdir(all_path):
            continue
        else:
            if dir_item.endswith(mp4):
                save_video_lists.append(all_path)
            else:
                pass

    #这个循环用于为转换的视频创建对应的文件夹并且将对应的视频和文件夹生成到字典里
    for save_video_list in save_video_lists:
        video_dir_name = save_video_list.lstrip(save_path)
        if video_dir_name.endswith(mp4):
            value = save_video_list.rstrip(mp4) + 'mp4'
            save_img_path_dicts[save_video_list] = value
            video_dir_name = video_dir_name.strip(mp4) + 'mp4'
            if is_dir_there(save_path,video_dir_name) is not True:
                mkdir_dir(save_path,video_dir_name)
            else:
                pass
        # elif video_dir_name.endswith(avi):
        #     video_dir_name = video_dir_name.strip(avi) + 'avi'
        #     if is_dir_there(save_path, video_dir_name) is not True:
        #         mkdir_dir(save_path, video_dir_name)
        #     else:
        #         pass
        # elif video_dir_name.endswith(flv):
        #     video_dir_name = video_dir_name.strip(flv) + 'flv'
        #     if is_dir_there(save_path, video_dir_name) is not True:
        #         mkdir_dir(save_path, video_dir_name)
        #     else:
        #         pass
        else:
            continue
    return save_video_lists,save_img_path_dicts
class Video:
    def __init__(self, filename, path):
        self.filename = filename
        self.path = path
        self.file = VideoFileClip(path)
    
    def width(self):
        return float(self.file.w)
    
    def height(self):
        return float(self.file.h)
    
    def duration(self):
        return self.file.duration
    
    def resize(self, newsize=None, height=None, width=None, apply_to_mask=True):
        self.file = self.file.resize(newsize=newsize, height=height, width=width, apply_to_mask=apply_to_mask)
Ejemplo n.º 23
0
 def make_clip_from_filenames(self, start_dt: dt, end_dt: dt, file_list: List[str],
                              trim_files: bool = True, prefix: str = 'motion') -> str:
     """Takes in a list of file paths, determines the cropping necessary
     based on the timerange in the path and downloads the video clip to a temp filepath"""
     clips = []
     for dl_file in file_list:
         clip = VideoFileClip(dl_file)
         if trim_files:
             trim_st, trim_end = self._get_trim_range_from_filename(dl_file, start_dt, end_dt)
             clip = clip.subclip(trim_st, trim_end)
         clip = (clip.resize(self.resize_perc).speedx(self.speed_x))
         # Append to our clips
         clips.append(clip)
     final = concatenate_videoclips(clips, method='compose')
     fpath = os.path.join(self.temp_dir, f'{prefix}_{start_dt:%T}_to_{end_dt:%T}.mp4')
     final.write_videofile(fpath)
     return fpath
Ejemplo n.º 24
0
def play_movie(path, videos, title):
    # verifica se o usuario digitou um video.mp4 ou o indice
    try:
        parse = re.findall(r"[\w']+", title)
        print("parse = ", parse[-1])
        if parse[-1] != 'mp4':  # se condicao for verdadeira, entao usuario passou um indice
            title = videos[int(
                parse[0]
            )][0]  # videos[indice][0] - [0] representa o indice do titulo do video

        print("Reproduzindo: " + title)
        pygame.display.set_caption(title)
        clip = VideoFileClip(path + "/" + title)
        clipresized = clip.resize(width=500, height=400)
        clipresized.preview()
        pygame.quit()
    except:
        print("Video nao encontrado")
        input()
Ejemplo n.º 25
0
def display_animation(filename,
                      keypoints,
                      clip_height=768,
                      display_height=240,
                      temp_file='__temp__.avi',
                      include_source_video=True):
    """
    Display a side-by-side animation comprising the skeleton and (optionally) the source
    video.

    Parameters
    ----------
    filename : str
        The video file to read
    keypoints : numpy array
        Array of keypoints in the form [frame,landmark,coords]
    clip_height : int
        Desired clip height (applies to both source video and skeleton, the former is upscaled)
    display_height : int
        Desired display height
    temp_file : int
        Temporary file for transcoding
    include_source_video: bool
        Whether to include the source video
    """
    clip_original = VideoFileClip(filename)
    rescaling_factor = clip_height / clip_original.h
    clip_original = clip_original.resize(height=clip_height)
    keypoints = np.copy(keypoints) * rescaling_factor

    skeleton = Skeleton(target_width=clip_original.w,
                        target_height=clip_original.h)
    skeleton.animate(keypoints,
                     temp_file,
                     fps=len(keypoints) / clip_original.duration)
    clip_skeleton = VideoFileClip(temp_file)
    if include_source_video:
        clip = clips_array([[clip_original, clip_skeleton]])
    else:
        clip = clip_skeleton

    return clip.ipython_display(height=display_height,
                                rd_kwargs=dict(logger=None))
Ejemplo n.º 26
0
def process_playlist(playlist_filename):
    clips = []
    with open(playlist_filename) as fp:
        reader = csv.DictReader(fp)
        for video in reader:
            print('Processing clip: ', video['url'])
            filename = download_video(video['url'])
            if not filename:  # Video or user was deleted
                continue
            clip = VideoFileClip(filename)
            if video['start'] or video['end']:
                clip = clip.subclip(int(video['start']), int(video['end']))
            if clip.w < 720:
                clip = clip.resize(height=720)
            clip = normalize_audio(clip)
            clips.append(clip)
    print('Combining clips:\n', clips)
    concat_videos(clips)
    print('Video Complete.')
Ejemplo n.º 27
0
def load_video(filename, params_substitute=None):
    # To use in external scripts
    if params_substitute is not None:
        params = params_substitute
    # Load videos (fps = 30)
    clip = VideoFileClip(filename)

    # Resize to 100 x 100
    clip_resized = clip.resize(newsize=(params['new_size'], params['new_size']))

    # Downsample
    downsampled_frames, _ = utils.downsample_video(clip_resized, params, save_downsampled=False)

    # Frames colour conversion
    frame_hsv_arr = []
    for frame in downsampled_frames:
        frame_hsv = color.rgb2hsv(frame)
        frame_hsv_arr.append(frame_hsv)
    return frame_hsv_arr
Ejemplo n.º 28
0
def dump_frames(video_path, output_directory, frames_per_second):
    """Dump frames at frames_per_second from a video to output_directory.

    If frames_per_second is None, the clip's fps attribute is used instead."""
    clip = VideoFileClip(video_path)
    if clip.rotation == 90:
        clip = clip.resize(clip.size[::-1])
        clip.rotation = 0
    info_path = '{}/info.json'.format(output_directory)
    name_format = '{}/frame%04d.png'.format(output_directory)

    if frames_per_second is None:
        frames_per_second = clip.fps
    frames_already_dumped_helper = lambda: \
            frames_already_dumped(video_path, output_directory,
                                  frames_per_second, info_path,
                                  name_format, clip.duration)
    if frames_already_dumped_helper():
        logging.info('Frames for {} exist, skipping...'.format(video_path))
        return

    successfully_wrote_images = False
    try:
        clip.write_images_sequence(name_format.format(output_directory),
                                   fps=frames_per_second)
        successfully_wrote_images = True
    except Exception as e:
        logging.error("Failed to dump images for %s", video_path)
        logging.error(e)

    if successfully_wrote_images:
        info = {
            'frames_per_second': frames_per_second,
            'input_video_path': os.path.abspath(video_path)
        }
        with open(info_path, 'w') as info_file:
            json.dump(info, info_file)

        if not frames_already_dumped_helper():
            logging.error(
                "Images for {} don't seem to be dumped properly!".format(
                    video_path))
def make_vid():
	#code to concatenate video clips
	#go into videos folder, iterate through file names
	#print("made it")
	filename_list = []
	for filename in glob.iglob('videos/*.mp4'):
		filename_list.append(filename)

	video_clips = []
	for x in filename_list:
		clip = VideoFileClip(x)
		if clip.size != (1280,720):
			clip = clip.resize( (1280,720) )

		video_clips.append(clip)

	final_clip = concatenate_videoclips(video_clips)
	final_clip.write_videofile("exportedvideo.mp4")

	'''clip1 = VideoFileClip("myvideo.mp4")
Ejemplo n.º 30
0
def addHeader():

    # 片头素材 位置 帧高度 帧宽度
    clip1 = VideoFileClip("头条号片头.mp4")
    pre_width = 1444
    pre_height = 820

    path = os.path.abspath('.')
    filenames = os.listdir('movie/')  # 源视频路径
    for filename in filenames:
        clip2 = VideoFileClip(path + '/movie/' + filename)
        width_temp = clip2.size[0]  # 目标视频帧宽
        height_temp = clip2.size[1]
        rate1 = pre_width / width_temp
        rate2 = pre_height / height_temp
        rate = rate1 if (rate1 > rate2) else rate2  # 缩放比例,选择缩放程度较大的
        clip1 = clip1.resize(1 / rate)
        final_clip = concatenate_videoclips([clip1, clip2])
        final_clip.resize(newsize=(width_temp,
                                   height_temp)).write_videofile('movie1/' +
                                                                 filename)
Ejemplo n.º 31
0
def changesize(inputname, outputname):
    ship = VideoFileClip(inputname)
    w, h = ship.size
    w_r = w / 720
    h_r = h / 1280
    minr = min(w_r, h_r)
    new_w = int(w / minr)
    new_h = int(h / minr)
    clip = ship.resize([new_w, new_h])
    jianqie = clip.crop(x_center=int(new_w / 2), y_center=int(new_h / 2), width=720, height=1280)
    end = ship.duration
    if end > 20:
        shijian = jianqie.subclip(t_start=end / 2, t_end=end / 2 + 10)
    elif 10 <= end <= 20:
        shijian = jianqie.subclip(t_start=0, t_end=10)
    else:
        shijian = jianqie.subclip(t_start=0, t_end=end)
    patha, filename = os.path.split(outputname)
    if not os.path.exists(patha):
        os.makedirs(patha)
    shijian.write_videofile(outputname)
Ejemplo n.º 32
0
def process_video(filename, video_height=480, overwrite=False):

    gif_name = 'gifs/' + filename + '.gif'

    if isfile(gif_name) and overwrite == False:
        print "Skipping " + gif_name + " as it already exists."
        return 
    
    video_file = VideoFileClip(filename)

    try:
        assert_approx_equal(float(video_file.w)/float(video_file.h),16.0/9.0)
        video_file = video_file.crop(x1=video_file.w/8, x2=7*video_file.w/8)
    except:
        print "Not resizing video."


    video_file = video_file.resize(height=video_height)

    end_image = video_file.to_ImageClip(0).set_duration(0.7)
    
    video_file = concatenate([video_file, end_image])

    logo_size = video_height/6
    text = ImageClip(expanduser("~/dropbox/bslparlour/twitter_logo2.png")).set_duration(video_file.duration).resize(width=logo_size).set_pos((video_file.w-logo_size,video_file.h-logo_size))


    composite_video_file = CompositeVideoClip([video_file, text])
    composite_video_file.write_gif(gif_name,fps=20)

    fuzz_amt = 5
    commands = 'gifsicle "'+gif_name+'" -O3 | convert -fuzz '+str(fuzz_amt)+'% - -ordered-dither o8x8,16 -layers optimize-transparency "'+gif_name+'"'

    process = call(commands, shell=True)

    if getsize(gif_name) > 5*1024**2:
        process_video(filename, video_height=video_height*0.75, overwrite=True)
Ejemplo n.º 33
0
from kelpy.tobii.TobiiSprite import *
import csv
import glob
import imageio
imageio.plugins.ffmpeg.download()
from moviepy.editor import VideoFileClip

BGCOLOR = (0,0,0)
IMAGE_SCALE = .8
BOX_SCALE = 1
MAX_DISPLAY_TIME =100
TRIAL = 1

BOXES = []
clip = VideoFileClip(kstimulus('gifs/babylaugh.mov'))
clip=clip.resize(height=800,width=1300)
print kstimulus('gifs/babylaugh.mov')
for filename in glob.glob('../../kelpy/stimuli/boximages/*'):
    im=filename
    BOXES.append(im)


OBJECTS = []
for filename in glob.glob('../../kelpy/stimuli/socialstim/*'):
    im=filename
    OBJECTS.append(im)

PROBABILITIES = [0.1, 0.25, 0.5, 0.75, 0.9]
shuffle(PROBABILITIES)

use_tobii_sim = True #toggles between using the tobii simulator or the actual tobii controller
Ejemplo n.º 34
0
import random
import urllib
import sys
import os
import json
from bs4 import BeautifulSoup
import requests
from moviepy.editor import VideoFileClip, CompositeVideoClip, ImageClip, vfx
from moviepy.video.fx.resize import resize
from PIL import Image, ImageDraw, ImageFont

fname = sys.argv[1]

clip = VideoFileClip(fname)
# clip.write_gif('t0.gif', fps=15, loop=1)
# clip.speedx(1.8).write_gif('t1.gif', fps=7, loop=1)
# clip.speedx(1.8).write_gif('t2.gif', fps=5, loop=1)
# clip.speedx(2.4).write_gif('t3.gif', fps=4, loop=1)
# clip.speedx(2.3).write_gif('t4.gif', fps=4, loop=1)
# clip.speedx(2.2).write_gif('t5.gif', fps=4, loop=1)
# clip.speedx(2.1).write_gif('t6.gif', fps=4, loop=1)
# clip.speedx(2.4).write_gif('t7.gif', fps=5, loop=1)
clip.resize(.8)
clip.speedx(1.8).write_gif('t8.gif', fps=7, loop=1)
# clip.speedx(1.8).write_gif('t4.gif', fps=5, loop=1, opt='wu')
# clip.speedx(1.7).write_gif('t2.gif', fps=7, loop=1, opt='wu')
# clip.speedx(1.7).write_gif('t3.gif', fps=7, loop=1, program='ffmpeg')
# clip.speedx(1.7).write_gif('t4.gif', fps=7, loop=1, program='ImageMagick', opt='optimizeplus', fuzz=10)
Ejemplo n.º 35
0
Archivo: pitch.py Proyecto: jaflo/misc
def poop(source, destination, midi_file, stretch, fadeout, rebuild, max_stack):
    """
    Create multiple pitchshifted versions of source video and arrange them to
    the pattern of the midi_file, also arrange the video if multiple notes play
    at the same time.
    """

    print "Reading input files"
    video = VideoFileClip(source, audio=False)
    """
    Non-main tracks are 30% the size of the main and have a white border and a
    margin around them.
    """
    smaller = video.resize(0.3)\
        .margin(mar=2, color=3*[255])\
        .margin(mar=8, opacity=0)
    audio = AudioFileClip(source, fps=44100)
    mid = MidiFile(midi_file)
    ignoredtracks = ["Percussion", "Bass"]

    print "Analysing MIDI file"
    notes = []   # the number of messages in each track
    lowest = 127 # will contain the lowest note
    highest = 0  # will contain the highest note
    for i, track in enumerate(mid.tracks):
        notes.append(0)
        #if track.name in ignoredtracks: continue
        for message in track:
            if message.type == "note_on":
                lowest = min(lowest, message.note)
                highest = max(highest, message.note)
                notes[-1] += 1
    """
    The main track is the one featured in the center. It is probably the one
    with the most notes. Also record the lowest, highest, and average note to
    generate the appropriate pitches.
    """
    maintrack = max(enumerate(notes), key=lambda x: x[1])[0]
    midpitch = int((lowest+highest)/2)
    print "Main track is probably", str(maintrack)+":", mid.tracks[maintrack].name
    mid.tracks.insert(0, mid.tracks.pop(maintrack)) # move main track to front
    notes.insert(0, notes.pop(maintrack)) # move main note count to front
    print sum(notes), "notes ranging from", lowest, "to", highest, "centering around", midpitch

    print "Transposing audio"
    sound = audio.to_soundarray(fps=44100) # source, original audio
    tones = range(lowest-midpitch, highest-midpitch) # the range of pitches we need
    pitches = [] # this will contain the final AudioFileClips
    if not os.path.exists("pitches/"):
        print "Creating folder for audio files"
        os.makedirs("pitches/")
    for n in tones:
        """
        Pitches only need to be generated if they do not already exist or if
        we force the creation of new ones. Save them in order in pitches.
        """
        name = "pitches/"+source+"_"+str(n)+".mp3"
        if not os.path.isfile(name) or rebuild:
            print "Transposing pitch", n
            splitshift(sound, n).write_audiofile(name)
        pitches.append(AudioFileClip(name, fps=44100))

    print "Adding video clips"
    clips = [video.set_duration(1)] # to set the video size
    positions = [("left", "bottom"), ("right", "bottom"), ("left", "top"),
        ("right", "top"), ("center", "bottom"), ("center", "top"),
        ("left", "center"), ("right", "center")] # non-main tracks
    """
    curpos is the current corner position on the screen and changes with each track.
    cache is used to make a unique file name whenever a new temporary file is created.
    endtime will be used at the end to set the end TextClip. It is the latest time any clip ends.
    """
    curpos = -2
    cache = endtime = 0
    for i, track in enumerate(mid.tracks):
        #if track.name in ignoredtracks: continue
        print("Processing {} notes: {}".format(notes[i], track.name))
        t = 1.0 # not 0 because we added one second of original video for size
        opennotes = [] # will contain all notes that are still playing
        curpos += 1
        for message in track:
            if not isinstance(message, MetaMessage):
                message.time *= stretch
                t += message.time
                if message.type == "note_on":
                    """
                    Add a video clip with the appropriate starting time and
                    pitch. Also add an entry to opennotes (we don't know when
                    the note ends yet).
                    """
                    part = video
                    mainvid = i is 0# and len(opennotes) is 0
                    if not mainvid: part = smaller
                    part = part\
                        .set_audio(pitches[min(len(pitches)-1, max(0, message.note-lowest))])\
                        .set_start(t/1000)
                    opennotes.append((message.note, len(clips), t))
                    """
                    If this isn't the main track, the video will be smaller and
                    placed at the edge. We'll get a position for each track.
                    If there is more than one video playing in this track, it
                    will be placed slighly closer to the center.
                    """
                    if not mainvid:
                        stackheight = 6
                        part = part.set_position(positions[curpos % len(positions)])
                    clips.append(part)
                elif message.type == "note_off":
                    reference = message.note
                    index = 0
                    """
                    Find the note that ended in opennotes using the note.
                    Get the index and start time, remove it from opennotes.
                    """
                    for note in reversed(opennotes):
                        n, j, d = note
                        if n == reference:
                            index = j
                            opennotes.remove(note)
                            break
                    """
                    Get the clip for the open note, set its time to the
                    difference between time now and start time. Have it fade out
                    and update the endtime if needed.
                    """
                    clips[index] = clips[index].set_duration((t-d)/1000+fadeout)
                    clips[index] = clips[index].crossfadeout(fadeout)
                    endtime = max(endtime, t/1000+fadeout)
                if len(clips) == max_stack:
                    """
                    To save some memory, the clips in memory are emptied
                    whenever they reach a certain size. All clips that are closed
                    are merged into one file on disk.
                    """
                    upuntil = len(clips) # the first open note
                    if len(opennotes) > 0: _, upuntil, _ = opennotes[0]
                    stillopen = clips[upuntil:]
                    print "Stack reached", len(clips), "clips, merging", upuntil
                    """
                    Save a temporary file to disk with all clips we can safely
                    discard from clips.
                    """
                    newcache = destination+".temporary"+str(cache)+".mp4"
                    CompositeVideoClip(clips[:upuntil]).write_videofile(newcache)
                    cache += 1
                    """
                    Shift all opennotes' indices down by the number of clips
                    merged and saved to disk. Set clips to be the new, merged
                    clip and any leftover clips.
                    """
                    for i, note in enumerate(opennotes):
                        n, j, d = note
                        opennotes[i] = (n, j-upuntil+1, d)
                    clips = [VideoFileClip(newcache)]+stillopen

    end = TextClip("pitch.py", font="Arial", color="white", fontsize=70)\
        .set_pos("center")\
        .set_duration(1)\
        .set_start(endtime)
    clips.append(end) # add an ending frame

    """
    Combine all leftover clips, write them to the final file and remove
    temporary files created before.
    """
    print "Combining", len(clips), "clips"
    final = CompositeVideoClip(clips).set_start(1)
    final.write_videofile(destination)
    clips = []
    if cache == 1:
        print "Removing one temporary file"
    elif cache > 1:
        print "Removing", cache, "temporary files"
    for i in range(0, cache):
        os.remove(destination+".temporary"+str(i)+".mp4")