Example #1
0
    def gen_mv(self):
        # generates the music video combining gif and audio
        def make_frame(t):
            ind = int(t//self.period)
            frame = self.screens[ind]
            return frame[:,:,None] * np.ones((1,1,3),np.uint8)

        total_duration = len(self.screens)*self.period # in seconds
        animation = VideoClip(make_frame, duration=total_duration)

        # pdb.set_trace()
        audio = AudioFileClip(self.mfile)

        animation.set_audio(audio).write_videofile("test.mp4",fps=20)
Example #2
0
def demo(filename, tracking, output, t_start=0., t_end=None, shift=0.,
         labels=None, landmark=None, height=200):

    # parse label file
    if labels is not None:
        with open(labels, 'r') as f:
            labels = {}
            for line in f:
                identifier, label = line.strip().split()
                identifier = int(identifier)
                labels[identifier] = label

    video = Video(filename)

    import os
    os.environ['IMAGEIO_FFMPEG_EXE'] = 'ffmpeg'
    # from moviepy.video.io.VideoFileClip import VideoFileClip

    from moviepy.editor import VideoClip, AudioFileClip

    make_frame = get_make_frame(video, tracking, landmark=landmark,
                                labels=labels, height=height, shift=shift)
    video_clip = VideoClip(make_frame, duration=video.duration)
    audio_clip = AudioFileClip(filename)
    clip = video_clip.set_audio(audio_clip)

    if t_end is None:
        t_end = video.duration

    clip.subclip(t_start, t_end).write_videofile(output, fps=video.frame_rate)
Example #3
0
def video():
    if 1:
        snd = AudioFileClip("space.mp3")
        clip = VideoClip(c.animation, duration=snd.duration / 30.)

        clip = clip.set_audio(snd).set_duration(snd.duration / 30.)
        clip.write_videofile('cam.mp4', fps=24)
Example #4
0
def create_videoclip(frames, duration, frame_rate, audio_in=None):
    """
    Create a VideoClip object
    :param frames: a iterator returning numpy frame objects
    :param duration: Duration of clip in seconds
    :param audio_in: file name of audio file, or None
    :return:
    """
    def make_frame(t):
        nonlocal current_frame
        nonlocal current_frame_index
        required_frame_index = int(t * frame_rate)
        if required_frame_index > current_frame_index:
            current_frame = next(frames)
            current_frame_index += 1
        rgb_frame = np.empty(
            (current_frame.shape[0], current_frame.shape[1], 3),
            dtype=np.uint8)
        rgb_frame[:, :] = current_frame[:, :, 0:3]
        return rgb_frame

    current_frame = next(frames)
    current_frame_index = 0
    video_clip = VideoClip(make_frame, duration=duration)
    if audio_in:
        print("Adding audio clip", audio_in)
        audio_clip = AudioFileClip(audio_in).subclip(0, duration)
        video_clip = video_clip.set_audio(audio_clip)
    return video_clip
Example #5
0
    def save_video(self,
                   video: VideoClip,
                   video_filename: str,
                   output_appendix: str = "_output") -> None:
        """Save the resulting video.

        :param video: the processed VideoClip
        :param video_filename: original file name
        :param output_appendix: appendix to add to file
        :return: None
        """
        output_filename = append_to_file_name(video_filename, output_appendix)
        video.set_audio(self.audio)
        video.write_videofile(
            output_filename,
            codec="libx264",
            audio_codec="aac",
            temp_audiofile=output_filename + ".tmp",
            remove_temp=True,
            fps=self.video.fps,
        )
Example #6
0
def run_inference(
    request: types.Request,
    credentials: HTTPAuthorizationCredentials = security.http_credentials,
):
    print("************ Getting avatar image...")
    avatar, _ = handle_image_request(request.avatar)
    avatar = cv2.resize(avatar, model_input_size)
    if avatar.ndim == 2:
        avatar = np.tile(avatar[..., None], [1, 1, 3])
    print("************ Done!")

    print("************* Setting avatar image to model ...")
    model.set_source_image(avatar)
    print("************ Done!")

    print("************* Getting video frames ...")
    video_bytes = base64.b64decode(request.video.content)
    video_frames = list(io.bytes2video(video_bytes, fps=request.fps))
    print("************ Done!")

    # video_frames = video_frames[:5]

    video_name = uuid.uuid4().hex
    io.write_fn(f"app/static/{video_name}_orig.webm", video_bytes)

    video_path = f"app/static/{video_name}.mp4"

    print("************* Getting audio Object ...")
    audio = io.get_audio_obj(video_bytes)
    print("************ Done!")

    bbox = model.get_face_bbox(video_frames[0])

    print("************* Getting transform video ...")
    output_frames = model_funs.generate_video(
        model,
        video_frames,
        merge=request.merge,
        axis=request.axis,
        verbose=True,
        model_input_size=model_input_size,
        horizontal_flip=request.flip,
        relative=request.transferFace,
        # relative=True,
        crop_bbox=bbox,
        # debug=False,
    )
    model.reset_frames()
    print("************ Done!")

    print("************* Getting video in moviepy ...")

    def gen_video(t):
        return output_frames[min(int(t * request.fps), len(output_frames) - 1)]

    video = VideoClip(gen_video, duration=len(output_frames) / request.fps,)
    print("************ Done!")

    # Calculate min duration between audio and video (for some odd reason may differ)
    final_duration = min(video.duration, audio.duration)
    video = video.set_duration(final_duration)
    audio = audio.set_duration(final_duration)

    print("************* Setting audio to video ...")
    video = video.set_audio(audio.set_duration(video.duration))
    print("************ Done!")

    print("************* Saving and decoding video ...")
    video.write_videofile(video_path, fps=request.fps)

    # output_frames = video_frames
    # io.write_video(video_path, output_frames)

    video_bytes = io.read_fn(video_path)
    result = base64.b64encode(video_bytes).decode()
    print("************ Done!")

    return Response(video=types.Video(content=result))
        idx = [i for i in range(len(time_sum)-1) if t<=time_sum[i]][0]
#        print "======", jpegs[idx/2], "======", idx
        
        delta_fade = time_sum[idx]-time_sum[idx-1]
        fade_to = (t-time_sum[idx-1])/delta_fade # fraction
        fade_from = 1-fade_to # fraction

        frame_for_time_t_BGR_frame0 = fade_from * cv2.imread(jpegs[idx/2],cv2.CV_LOAD_IMAGE_COLOR)
        frame_for_time_t_BGR_frame1 = fade_to   * cv2.imread(jpegs[idx/2+1],cv2.CV_LOAD_IMAGE_COLOR)
        
        # BLENDED FRAME
        frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame0 + frame_for_time_t_BGR_frame1
        frame_for_time_t_BGR_frame01 = frame_for_time_t_BGR_frame01.astype('uint8') # convert from float to uint8
        
        frame_for_time_t = cv2.cvtColor(frame_for_time_t_BGR_frame01, cv2.COLOR_BGR2RGB) # BGR-RGB COLOR

        
    return frame_for_time_t
    



clip = VideoClip(make_frame, duration=time_sum[-1])#.set_audio(audio) # x-second clip

if audio_on:
    audio = AudioFileClip("audioclip.mp3")
    audio = audio.set_duration(time_sum[-1])
    clip = clip.set_audio(audio)

clip.write_videofile("my_animation_%sfps_dummy.mp4" %fps, fps=fps) # export as video
#clip.write_gif("my_animation.gif", fps=24) # export as GIF