Ejemplo n.º 1
0
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        calendar(self.config, 'fifrom', txnId)
        calendar(self.config, 'fito', txnId)
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_7.png")
        calendar_from_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fifrom-' + txnId + '.png'). \
            set_position((W / 2 - 170, H / 4)).resize(width=self.config.ICON_SIZE)
        calendar_to_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fito-' + txnId + '.png'). \
            set_position((W / 2 + 80, H / 4)).resize(width=self.config.ICON_SIZE)
        self.text_to_speech(self.fill_text(self.fill_text(Frame4.lang_map.get('audio4'), 1), 2), Frame4.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-4.mp3")
        Frame4.map['text4'] = self.fill_text(self.fill_text(Frame4.lang_map.get('text4'), 1), 2)
        straight_text(Frame4.map['text4'], Frame4.lang_map.get('font'), Frame4.lang_map.get('fontsize4'), txnId, 4, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-4-' + txnId + '.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                calendar_from_logo,
                calendar_to_logo,
                text.set_position(('center', calendar_to_logo.size[1] + 20)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)

        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-4.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-4-' + txnId + '.png')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fifrom-' + txnId + '.png')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + 'fito-' + txnId + '.png')
        return video, 4
Ejemplo n.º 2
0
 def generate_video_part(self, txnId):
     if not self.config.LOCAL:
         os.chdir("/var/task/")
     W, H = self.config.VIDEO_SIZE
     bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_5.png")
     type_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("type"))). \
         set_position((W/2-50, H/5)).resize(width=self.config.ICON_SIZE)
     self.text_to_speech(self.fill_text(self.fill_text(Frame5.lang_map.get('audio5'), 1), 2), Frame5.lang_map.get('lan'), txnId)
     audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-5.mp3")
     Frame5.map['text5'] = self.fill_text(self.fill_text(Frame5.lang_map.get('text5'), 1), 2)
     straight_text(Frame5.map['text5'], Frame5.lang_map.get('font'), Frame5.lang_map.get('fontsize5'), txnId, 5, self.config)
     text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-5-' + txnId+'.png')
     video = mpy.CompositeVideoClip(
         [
             bgImage,
             type_logo,
             text.set_position(('center', type_logo.size[1] + 40)),
         ],
         size=self.config.VIDEO_SIZE). \
         on_color(
         color=self.config.WHITE,
         col_opacity=1).set_duration(audioclip.duration)
     new_audioclip = CompositeAudioClip([audioclip])
     video.audio = new_audioclip
     os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-5.mp3')
     os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-5-' + txnId+'.png')
     return video, 5
Ejemplo n.º 3
0
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_1.png")
        fiu_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("fiu"))). \
            set_position((W/2-150, H/5)).resize(height=self.config.ICON_SIZE)
        arrow_gif = VideoFileClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get("left_arrow")). \
            set_position((W/2-30, H/5)).resize(height=self.config.ICON_SIZE)
        account_logo = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(self.input_map.get("account")[0])). \
            set_position((W/2+100, H/5)).resize(height=self.config.ICON_SIZE)
        self.text_to_speech(self.fill_text(self.fill_text(Frame1.lang_map.get('audio1'), 1), 2), Frame1.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" + txnId + "-1.mp3")
        Frame1.map['text1'] = self.fill_text(self.fill_text(Frame1.lang_map.get('text1'), 1), 2)
        straight_text(Frame1.map['text1'], Frame1.lang_map.get('font'), Frame1.lang_map.get('fontsize1'), txnId, 1, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-1-' + txnId+'.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                fiu_logo,
                arrow_gif,
                account_logo,
                text.set_position(('center', fiu_logo.size[1] + 40)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)

        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId + '-1.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE+'-text-1-' + txnId+'.png')
        return video, 1
Ejemplo n.º 4
0
    def generate_video_part(self, txnId):
        if not self.config.LOCAL:
            os.chdir("/var/task/")
        W, H = self.config.VIDEO_SIZE
        bgImage = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX + "bg_2.png")
        fipList = self.input_map.get("fip")

        fip_x_position = 0
        fip_y_position = 0
        fip_img_path = ''

        num_images_pow = pow(2, len(fipList))

        if len(fipList) == 1:
            fip_x_position = W / 4 - self.config.BANK_ICON_SIZE / 3
            fip_img_path = self.config.SB_LOGO_PATH_PREFIX + self.image_map.get(
                fipList[0])
            fip_y_position = H / 4
            single = True
        else:
            fip_x_position = W / 6 - self.config.BANK_ICON_SIZE / 3
            fip_y_position = int(H / num_images_pow)
            self.concatenate_images(fipList, txnId)
            fip_img_path = self.config.SB_LOGO_PATH_PREFIX_WRITE + 'combined-' + txnId + '-banks.png'
            single = False

        height_final_image = self.config.BANK_ICON_SIZE * int(
            math.ceil(len(fipList) / 2))

        fip_logo = mpy.ImageClip(fip_img_path).set_position(
            (fip_x_position, fip_y_position)).resize(height=height_final_image)
        self.text_to_speech(self.fill_text(Frame2.lang_map.get('audio2')),
                            Frame2.lang_map.get('lan'), txnId)
        audioclip = AudioFileClip(self.config.SB_AUDIO_PATH_PREFIX + "audio-" +
                                  txnId + "-2.mp3")
        Frame2.map['text2'] = self.fill_text(Frame2.lang_map.get('text2'))
        straight_text(Frame2.map['text2'], Frame2.lang_map.get('font'),
                      Frame2.lang_map.get('fontsize2'), txnId, 2, self.config)
        text = mpy.ImageClip(self.config.SB_LOGO_PATH_PREFIX_WRITE +
                             '-text-2-' + txnId + '.png')
        video = mpy.CompositeVideoClip(
            [
                bgImage,
                fip_logo,
                text.set_position((W / 5 + 50, H / 5)),
            ],
            size=self.config.VIDEO_SIZE). \
            on_color(
            color=self.config.WHITE,
            col_opacity=1).set_duration(audioclip.duration)
        new_audioclip = CompositeAudioClip([audioclip])
        video.audio = new_audioclip
        os.remove(self.config.SB_AUDIO_PATH_PREFIX + 'audio-' + txnId +
                  '-2.mp3')
        os.remove(self.config.SB_LOGO_PATH_PREFIX_WRITE + '-text-2-' + txnId +
                  '.png')
        if not single:
            os.remove(fip_img_path)
        return video, 2
Ejemplo n.º 5
0
def scheduled_time_scene_transition(schedule, resource_folder_name="res"):
    '''
    params:
    - schedule: a list of tuples of (file name, dur)
    '''
    clips = []
    print(schedule)  #DEBUG
    for res, dur, params in schedule:
        # EH: use a better way to detect the type of a file
        file_name = os.path.join(resource_folder_name, res)
        if not os.path.exists(file_name):
            print("File not found! {}".format(file_name))
            raise FileNotFoundError()
        file_type = res.split(".")[-1]
        if file_type in ["mov", "mp4", "avi", "flv"]:
            origin_video_clip = mpy.VideoFileClip(os.path.join(
                resource_folder_name, res),
                                                  audio=False)
            if params["part"]:
                #print(params["part"])
                parts = params["part"]
                origin_video_clip = origin_video_clip.subclip(
                    parts[0], parts[1])
            if params["crop"]:
                w = origin_video_clip.w
                h = origin_video_clip.h
                rect = params["crop"]
                origin_video_clip = vfx.crop(origin_video_clip, w * rect[0],
                                             h * rect[1], w * rect[2],
                                             h * rect[3])
            clips.append(
                set_video_dur(resize_and_fit(origin_video_clip, PREVIEW_SIZE),
                              dur))
        elif file_type in ["jpg", "png", "jpeg"]:
            origin_img_clip = mpy.ImageClip(
                os.path.join(resource_folder_name, res))
            if params["crop"]:
                w = origin_img_clip.w
                h = origin_img_clip.h
                rect = params["crop"]
                #print("Crop", w, h, rect, rect[0]*w)
                origin_img_clip = vfx.crop(origin_img_clip, w * rect[0],
                                           h * rect[1], w * rect[2],
                                           h * rect[3])
            clips.append(
                set_img_dur(resize_and_fit(origin_img_clip, PREVIEW_SIZE),
                            dur))
        elif file_type in ["txt"]:
            print(res)
            print(os.path.join(resource_folder_name, res))
            origin_txt_clip = mpy.TextClip(
                open(os.path.join(resource_folder_name, res)).read(),
                color="white",
                font="ArialUnicode",
                fontsize=100).on_color(PREVIEW_SIZE).set_position("center")
            clips.append(
                set_scene_dur(resize_and_fit(origin_txt_clip, PREVIEW_SIZE),
                              dur))

    return mpy.concatenate_videoclips(clips)
Ejemplo n.º 6
0
def create_example(id):
    image = mpy.ImageClip('score/cropped-score-page{}.png'.format(id + 1)) \
        .set_start(0) \
        .set_position((0, 'center'))
    note_from = mpy.TextClip(INTERVAL_FROM[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'bottom'))
    note_to = mpy.TextClip(INTERVAL_TO[id][0], **STYLE['note']) \
        .set_start(1) \
        .set_position(('right', 'top'))

    notes_vertical_gap = int(
        (EXAMPLE_HEIGHT - (len(BETWEEN_NOTES[id]) + 2) * FONT_BOX['note'][1]) /
        (len(BETWEEN_NOTES[id]) + 1))
    between_notes = [
        mpy.TextClip(note, **STYLE['note_between']).set_start(
            2 + (len(BETWEEN_NOTES[id]) - i) * 0.1).set_position((
                'right',
                FONT_BOX['note'][1] * (i + 1) + notes_vertical_gap * (i + 1),
            )) for i, note in enumerate(reversed(BETWEEN_NOTES[id]))
    ]
    return mpy.CompositeVideoClip(
        [image, note_from, note_to] + between_notes,
        size=(EXAMPLE_WIDTH, EXAMPLE_HEIGHT),
    )
Ejemplo n.º 7
0
def watermark_vid():

    # myClip.resize( (460,720) ) # New resolution: (460,720)

    video = mp.VideoFileClip("files/to_watermark_vid.mp4")
    # print(video.duration)
    logo_path = 'files/' + choice(LOGOS)

    logo_width, logo_height = Image.open(logo_path).size
    width, height = video.size
    offset = int(width / (width_scale * 2.7))
    resized_logo_size = (int(width / width_scale),
                         int(logo_height * (width / width_scale) / logo_width))

    logo = (
        mp.ImageClip(logo_path).set_duration(video.duration).resize(
            width=resized_logo_size[0],
            height=resized_logo_size[1])  # if you need to resize...
        .margin(left=offset, bottom=offset,
                opacity=0)  # (optional) logo-border padding
        .set_pos(("left", "bottom")))

    final = mp.CompositeVideoClip([video, logo])
    final.subclip(0,
                  video.duration).write_videofile("files/watermarked_vid.mp4")
    return video.duration, video.size[0], video.size[1]
 def to_videoClip(self):
     # imgs = [cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) for img in self.imgs]
     imgs = gray2rgb(self.imgs)
     clips = [med.ImageClip(img).set_duration(1 / self.fps) for img in imgs]
     mov = med.concatenate_videoclips(clips, method='compose')
     mov = mov.set_fps(self.fps)
     return mov
Ejemplo n.º 9
0
def make_draw_gif(frameList, num):
    imageClipLists = []
    frameLength = 1.0 / 24.0
    for frame in frameList:
        if num == 0:
            frame = overlay_image(frame, Path("memes/barrington/bdraw.png"),
                                  barr_origin)
        elif num == 1:
            frame = overlay_image(frame, Path("memes/marius/draw.png"),
                                  marius_origin)
        elif num == 2:
            frame = overlay_image(frame, Path("memes/tim/tdraw.png"),
                                  tim_origin)
        elif num == 3:
            frame = overlay_image(frame, Path("memes/sheldraw.png"),
                                  shel_origin)
        elif num == 4:
            frame = overlay_image(frame, Path("memes/lan/lan-draw.png"),
                                  lan_origin)
        elif num == 5:
            frame = overlay_image(frame, Path("memes/hand.png"), hand_origin)
        elif num == 6:
            frame = overlay_image(frame, Path("memes/lan/landrew.png"),
                                  landrew_origin)
        arr = np.array(frame)
        clip = mp.ImageClip(arr)
        clip = clip.set_duration(frameLength)
        imageClipLists.append(clip)
    #print(imageClipLists)
    concatClip = mp.concatenate_videoclips(imageClipLists, method="compose")
    return concatClip
Ejemplo n.º 10
0
def text_overlay(top_text, bottom_text, vid_file_path, output_vid_file_path):
    import moviepy.editor as mp  # this here so long load and print dosnt happen when open gui
    vid_dims = vid_edit_utils.get_vid_dims(vid_file_path)

    #     print('vid_dims:  ', vid_dims)

    # make img with text that will be overlayed on video -- same dims as video
    make_transparent_text_img(top_text, bottom_text, vid_dims, 'text.png')
    #     text_img = Image.open('text.png')

    video = mp.VideoFileClip(vid_file_path)
    og_vid_duration = video.duration

    text_img_overlay = (
        mp.ImageClip('text.png').set_duration(video.duration)
        #           .resize(height=0) # if you need to resize...
        .margin(right=0, top=0, opacity=0)  # (optional) logo-border padding
        .set_pos(("center", "center")))

    final = mp.CompositeVideoClip([video, text_img_overlay])
    final.write_videofile(output_vid_file_path)

    video.reader.close()
    video.audio.reader.close_proc()


# meme_caption.add_caption('green.png', 't.png', 'THIS is SomE TexT', 'bopoooooooottom')
# text_overlay('tooooooooop teeeeeeext', 'booooooottommmm tesssssssefsfdf', 'i.mp4', 'o.mp4')
# vid_h =
Ejemplo n.º 11
0
def render(files, dirs, mixer=95, portat=0, music=False):
    videofile = mp.VideoFileClip(dirs + files)
    if portat == "portat":
        video = videofile.resize((360, 480))
    else:
        video = videofile.resize(width=480)
    if video.duration <= 120:
        if music != False:
            musics = mp.AudioFileClip(f"videoassets/{music}.mp3")
            video = video.set_audio(mp.CompositeAudioClip([musics.volumex(
                1 - mixer/100), video.audio.volumex(mixer/100)]).set_duration(video.duration))
        intro = mp.VideoFileClip("videoassets/quickskitsoliveintro.mp4").resize(
            video.size
        )
        logoimage = mp.ImageClip("videoassets/logo.png")
        logo = (
            logoimage.set_duration(video.duration)
            .resize(height=40)
            .margin(right=50, bottom=50, opacity=0)
            .set_pos(("right", "bottom"))
        )
        final = mp.CompositeVideoClip([video, logo])
        final = mp.concatenate_videoclips([final, intro.fadein(1).fadeout(1)])

        newformat = changeformat("rendered_" + files, ".mp4")
        final.write_videofile(dirs + newformat, fps=20,
                              codec="mpeg4", audio_codec='aac', threads=4)
        intro.close()
        logoimage.close()
        videofile.close()
        if music != False:
            musics.close()
        return newformat
    else:
        return False
Ejemplo n.º 12
0
def test_save():
    img_data = request.form.get("imgData")
    img_data = img_data.split(",")[1].strip()
    print("png size", len(img_data))

    image = base64.urlsafe_b64decode(img_data.encode('utf-8'))
    with open("test.png", "wb") as fout:
        fout.write(image)

    #frame = cv2.imread("test.png")
    #height, width, layers = frame.shape
    #print(height, width, layers)

    #fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
    #video = cv2.VideoWriter("project.mp4", fourcc, 24.0, (width, height))
    #video.write(frame)

    #cv2.destroyAllWindows()
    #video.release()

    img = ['test.png']

    clips = [mp.ImageClip(m).set_duration(2) for m in img]

    concat_clip = mp.concatenate_videoclips(clips, method="compose")
    concat_clip.write_videofile("test.mp4", fps=24)

    return json.dumps({"success": True, "msg": ""})
Ejemplo n.º 13
0
    def overlayWatermark(self, dur, watermarkPath):
        watermark = (moviepy.ImageClip(watermarkPath)
            .set_position(("left", "bottom"))
            .margin(left=25, bottom=25, opacity=0)
            .set_duration(dur)
        )

        return watermark
Ejemplo n.º 14
0
    def _create_faceplate_clip(self, var="_FaceplateImage"):
        """
        Create clip with stanting image of faceplate.
        """

        clip = mpy.ImageClip(getattr(self, var))
        clip = clip.set_duration(self._WpInst.getDuration())
        return clip
Ejemplo n.º 15
0
def avMerge(song, photo):
    video = mpy.ImageClip(f"{photo}")
    audio = mpy.AudioFileClip(f"{song}")
    video = (video.set_audio(audio)
            .set_duration(audio.duration)
            .fadein(fadein_dur)
            .fadeout(fadeout_dur))
    video.write_videofile(str(pathlib.PurePath(str(save_dir), str(song.stem))) + ".mp4", fps=24)
Ejemplo n.º 16
0
 def make_video(self, image: io.BytesIO, user_id: int):
     base_path = cog_data_path(self)
     image_clip: editor.ImageClip = editor.ImageClip(imageio.imread(image))
     audio_clip = editor.AudioFileClip(str(base_path / AUDIO_FILE_NAME))
     image_clip = image_clip.set_duration(
         audio_clip.duration).set_audio(audio_clip)
     image_clip.write_videofile(
         f"{base_path / 'output'}/{user_id}{OUTPUT_FILE_EXT}", fps=30)
Ejemplo n.º 17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--input", type=str, required=True, help="Input .FIT file")
    parser.add_argument("--output", type=str, default="out.mov", help="Output video clip file")
    parser.add_argument("--duration", type=int, default=None, help="video clip duration")
    args = parser.parse_args()

    fit = FitFile(args.input)
    df = FitDataFrame(fit).as_df()
    df = df.fillna(0)
    df['power'] = df['power'].astype(np.int)
    df['heart_rate'] = df['heart_rate'].astype(np.int)

    def make_frame(t):
        s = math.floor(t)
        surface = gz.Surface(1280, 720)
        xy = [158, 54]
        for c in reversed(list(str(df.iloc[s]['power']))):
            power = gz.text(c, "Helvetica Neue", 83, fontweight="bold",
                            xy=xy, fill=(1, 1, 1), v_align="center", h_align="center")
            power.draw(surface)
            xy[0] -= 45
        xy = [170, 115]
        for c in reversed(list(str(df.iloc[s]['heart_rate']))):
            hr = gz.text(c, "Helvetica Neue", 38, fontweight="bold",
                         xy=xy, fill=(0, 0, 0), v_align="center", h_align="center")
            hr.draw(surface)
            xy[0] -= 22
        return surface.get_npimage(transparent=True)

    duration = len(df.index)
    if args.duration is not None:
        duration = args.duration

    background = mpy.ImageClip("base-clip.png", duration=duration)
    bg_mask = mpy.ImageClip("base-clip.png", duration=duration, ismask=True, fromalpha=True)

    data = mpy.VideoClip(lambda t: make_frame(t)[:, :, :3], duration=duration)
    data_mask = mpy.VideoClip(lambda t: make_frame(t)[:, :, 3] / 255.0, duration=duration, ismask=True)
    data = data.set_mask(data_mask)

    clip = mpy.CompositeVideoClip(clips=[background, data], size=(1280, 720), use_bgclip=True)
    clip = clip.set_mask(bg_mask)
    clip = clip.set_duration(duration)
    clip.write_videofile(args.output, codec="prores_ks", fps=1, withmask=True)
Ejemplo n.º 18
0
def make_training_video(folder_dir):
    files = sorted([
        os.path.join(folder_dir, f) for f in os.listdir(folder_dir)
        if f.endswith('.png')
    ])
    frames = [mpy.ImageClip(f).set_duration(1) for f in files]
    clip = mpy.concatenate_videoclips(frames, method="compose")
    clip.write_videofile("movie.mp4", fps=15)
    return Video("movie.mp4")
Ejemplo n.º 19
0
def _extract_images_random(source_path, data_label, folder_name,
                           nr_videos, nr_frames,
                           storage_data_path=storage_data_path):
    r"""Extracts random images from videos based on input and save
        as new videos."""
    # Define target_path
    target_path = os.path.join(storage_data_path, data_label, folder_name)

    # Create directories if not existing
    if not os.path.isdir(target_path):
        os.makedirs(target_path)

    # Filenames have the form 'videoXX.mp4'
    filenames = [x.split('.')[0] for x in os.listdir(source_path) if '._' not in x
                 and '.mp4' in x]

    # Reset nr_videos if needed
    nr_videos = len(filenames) if nr_videos > len(filenames) else nr_videos

    # Select random filenames from list based on nr_videos
    if nr_videos != len(filenames):
            filenames = random.sample(filenames, nr_videos)

    for idx, filename in enumerate(filenames):
        msg = "Loading the .mp4 videos and selecting random frames: "
        msg += str(idx + 1) + " of " + str(len(filenames)) + "."
        print (msg, end = "\r")
        # Extract the video
        video = mpy.VideoFileClip(os.path.join(source_path, filename+'.mp4'))
        # Calculate the number of frames in the video
        # Substract 2 since the dataset has been downsamlped from 25 fps to 1 fps and the
        # number of labels is exactly two frames shorter than the video.
        # --> 2 times video transformation with indexing from 1 results in a difference of 2.
        frames = int(video.fps * video.duration)-2
        # Get list of random frame IDs
        if nr_frames >= frames:
            random_frames_idx = list(range(1, frames))
        else:
            random_frames_idx = random.sample(range(1, frames), nr_frames)
        # Load labels
        with open(os.path.join(source_path, filename+'-tool.json'), 'r') as fp:
            labels = json.load(fp)
        labels_keys = list(labels.keys())
        labels_values = list(labels.values())
        label_dict = dict()
        # Select random frames from video and from labels
        random_frames = list()
        for frame_id in random_frames_idx:
            random_frames.append(mpy.ImageClip(video.get_frame(frame_id-1 * video.fps)).set_duration(1))
            label_dict[labels_keys[frame_id-1]] = labels_values[frame_id-1]

        # Save random frames as video
        random_video = mpy.concatenate_videoclips(random_frames, method="compose")
        random_video.write_videofile(join_path([target_path, filename+'.mp4']), fps=1)
        # Save corresponding labels
        with open(os.path.join(target_path, filename+'-tool.json'), 'w') as fp:
            json.dump(label_dict, fp, sort_keys=False, indent=4)
Ejemplo n.º 20
0
    def build_movie(self, total_secs=54, w=850, h=480):
        print('正在生成主动画……', end='')
        pics = self.read_pics()
        drtn = total_secs / len(pics)  #总共54秒
        clips = []
        cover = mpy.ImageClip(
            os.path.join(self.src_dir, self.crs_name, self.crs_name[4:] +
                         '.jpg')).set_fps(25).set_duration(2).resize((w, h))
        clips.append(cover)
        # clips=ImageSequenceClip(pics,fps=25)
        n = 0
        for fn in pics:
            img = mpy.ImageClip(fn).set_fps(25).set_duration(drtn).resize(
                (w, h)).set_start(2 + drtn * n)
            clips.append(img)
            n += 1

        print('完成')
        return clips
Ejemplo n.º 21
0
    def setNeedle(self, path=None, filename="needle.png", var="BaseNeedle"):
        """
        Set base image containing the needle and convert it to be processed
        further.
        """

        if path is None:
            path = self._PathPrefix

        setattr(self, var, mpy.ImageClip(path+filename))
Ejemplo n.º 22
0
def save_video_file_from_images(img_arr,
                                vid_fname,
                                duration=2,
                                fps=30,
                                logger=Logger.get_stdout_logger()):
    clips = [mpy.ImageClip(m).set_duration(duration) for m in img_arr]
    concat_clip = mpy.concatenate_videoclips(clips, method='compose')
    concat_clip.write_videofile(vid_fname, fps=fps)
    logger.info(" # save video file from {:d} images, {}.".format(
        len(img_arr), vid_fname))
    return True
Ejemplo n.º 23
0
def addLogo():
    video = mp.VideoFileClip("test.mp4")                                        #videoprogress output "test.mp4". 

    logo = (mp.ImageClip("logo.png")                                            #logo name in the folder
            .set_duration(video.duration)
            .resize(height=400)                                                 # if you need to resize...              
            .margin(right=8, top=8, opacity=0)                                  # (optional) logo-border padding
            .set_pos(("right","top")))                                          #position

    final = mp.CompositeVideoClip([video, logo])            
    final.write_videofile("test1.mp4")
Ejemplo n.º 24
0
def image_to_rotate_clip(image_path):
    clip = mpe.ImageClip(image_path)
    rotated_clip = (clip.add_mask().fx(
        mpe.vfx.rotate, lambda t: 4 * t,
        expand=False).set_duration(4)).resize(lambda t: 1 + 0.115 * t)
    final_clip = mpe.CompositeVideoClip([rotated_clip.set_pos("center")])
    reverse_clip = final_clip.fx(mpe.vfx.time_mirror)
    final_clip = final_clip.subclip(0.5, -0.5)
    reverse_clip = reverse_clip.subclip(0.5, -0.5)
    to_write = mpe.concatenate_videoclips([final_clip, reverse_clip])
    return to_write
Ejemplo n.º 25
0
def video_img(path, length):

    try:
        length = float(length)

        img = mpy.ImageClip(path).set_duration(length).write_videofile(path +
                                                                       ".mp4",
                                                                       fps=10)

    except Exception as e:
        return str(e)

    return "OKVID"
Ejemplo n.º 26
0
    def cut_every_movies(self, movie, start_time, end_time, targetname):
        video = mp.VideoFileClip(movie)
        print(logo_path)
        logo = (
            mp.ImageClip(logo_path).set_duration(video.duration).resize(
                height=75)  # if you need to resize...
            .margin(right=10, top=10,
                    opacity=0)  # (optional) logo-border padding
            .set_pos(("right", "bottom")))

        final = mp.CompositeVideoClip([video, logo])
        final.subclip(start_time, end_time).write_videofile(targetname,
                                                            audio_codec='aac')
Ejemplo n.º 27
0
def audio_to_video_with_static_image(image_path, image_name, audio_path,
                                     audio_name, video_path, video_name):

    audio = mpy.AudioFileClip(os.path.join(audio_path, f'{audio_name}.mp3'))
    clip = mpy.ImageClip(os.path.join(image_path, f'{image_name}.jpg'))
    clip = clip.set_duration(audio.duration)
    clip = clip.set_audio(audio)

    video_path = os.path.join(video_path, f'{video_name}.mp4')
    # has to have audio_codec='aac', otherwise no sound
    # see https://stackoverflow.com/questions/40445885/no-audio-when-adding-mp3-to-videofileclip-moviepy
    clip.write_videofile(video_path, audio_codec='aac', fps=24)
    return video_path
Ejemplo n.º 28
0
def add_mosac(video,start_time_s,end_time_s,img_path,postition_x_y):
    logo = (mp.ImageClip(img_path)
            .set_duration(video.duration)  # 时长
            # .resize(height=100)  # 水印高度,等比缩放
            .margin(left=0, top=0, opacity=1) # 水印边距和透明度
            # .set_pos(("left","top")))
            .set_pos(postition_x_y))  # 水印位置
    # 设置遮盖起始时间
    logo.start = start_time_s
    # 设置遮盖结束时间
    logo.end=end_time_s
    result = mp.CompositeVideoClip([video, logo])
    return result
Ejemplo n.º 29
0
def f2mp4(path, fps):
    """
    function to convert any input file to H264 High quality mp4
    Inputs: filepath, output fps,is_gray: 1 for grayscale, 0 for rgb
    Output: file in the same folder named '..._cv.mp4'
    """

    print("==============================================")
    print("Convert file to MP4!")
    pathout = path[:-4] + '_' + str(fps) + '.mp4'
    if path.endswith('.tif'):
        #        import tiffile
        im = tiffile.imread(path)
        nFrames, h, w = im.shape
        fps = int(input("Enter desired fps: "))
        dur = 1 / fps
        clip = []
        print("---------------------")
        print("Read TIF file!")
        for i in tqdm.tqdm(range(nFrames)):
            fr = cv2.cvtColor(im[i], cv2.COLOR_GRAY2RGB)
            clip.append(mp.ImageClip(fr).set_duration(dur))
        video = mp.concatenate_videoclips(
            clip, method="compose",
            ismask=False)  #ismask=True to make grayscale

    else:
        video = mp.VideoFileClip(path)
        fpsIn = int(video.fps)
        if fps != fpsIn:
            print("Conflict in fps! \n", "[0] Use fps of input file;\n",
                  "[1] Use desired fps w/o speedup;\n",
                  "[2] Use desired fps w/ speedup:")
            k = input('Input your selection: ')
            if k == 2:
                sf = fps / fpsIn
                video = video.fx(mp.vfx.speedx, sf)
            elif k == 0:
                fps = fpsIn
        video.reader.close()  # To fix handel error problem
    print("---------------------")

    print("Save to mp4!")
    video.write_videofile(pathout,
                          fps=fps,
                          codec='libx264',
                          bitrate='32 M',
                          preset='ultrafast')

    print("==============================================")
    print("MP4 convertion Done!")
Ejemplo n.º 30
0
def procesar_video(name, id, filtro):
    VIDEO_FPS = 20
    VIDEO_CODEC = 'libx264'
    FOLDER_FINAL = './uploads/'
    LOGO = "./recursos/logo.jpg"
    AUDIO = "./recursos/audio.mp3"
    EFECT = "./recursos/" + filtro

    vid_title = id
    #video = mp.VideoFileClip("./video1.mov").set_start(0).audio_fadein(1).audio_fadeout(1).fadein(1).fadeout(1)
    video = mp.VideoFileClip(FOLDER_FINAL + name).set_start(0).audio_fadein(
        0).audio_fadeout(0).fadein(0).fadeout(0)
    TIME = video.duration if video.duration <= 15 else 15
    W, H = video.size

    logo = (
        mp.ImageClip(LOGO).set_duration(TIME).resize(
            height=65)  # if you need to resize...
        .margin(left=10, top=10, opacity=0).set_pos(("left", "top")))

    audio = mp.AudioFileClip(AUDIO).audio_fadein(0).audio_fadeout(0)
    video = video.set_audio(audio.set_duration(TIME))
    final = mp.CompositeVideoClip([video, logo])
    final.subclip(0, TIME).write_videofile('./procesar/' + vid_title +
                                           "ready.mp4",
                                           fps=VIDEO_FPS,
                                           codec=VIDEO_CODEC,
                                           verbose=False,
                                           progress_bar=True)

    cmd = [
        "ffmpeg", "-i", "./procesar/" + vid_title + "ready.mp4", "-i", EFECT,
        "-filter_complex",
        "[0:v] scale={w}:{h}[a]; [1:v] scale={w}:{h}[b]; [a][b] blend=all_mode='overlay':all_opacity=0.7"
        .format(w=W, h=H), "-t", "0:{time}".format(time=TIME),
        "{title}.mp4".format(title='./procesar/' + vid_title), "-y"
    ]

    cmd2 = [
        "ffmpeg", "-i", './procesar/' + vid_title + ".mp4", "-ss", "00:00:01",
        "-vframes", "1", "{title}.jpg".format(title='./procesar/' + vid_title)
    ]

    #subprocess.run(cmd)
    subprocess.call(cmd)
    subprocess.call(cmd2)
    shutil.move('./procesar/' + vid_title + ".mp4", FOLDER_FINAL)
    shutil.move('./procesar/' + vid_title + ".jpg", FOLDER_FINAL)
    os.remove('./procesar/' + vid_title + "ready.mp4")
    os.remove(FOLDER_FINAL + name)