Example #1
0
 def write_frames(self, frames: List[np.ndarray], filepath: str, audio: CompositeAudioClip) -> str:
     """Writes the frames to a given .mp4 filepath (h264 codec)"""
     vclip = ImageSequenceClip(frames, fps=self.fps)
     audio.set_duration(vclip.duration)
     vclip.audio = audio
     vclip.write_videofile(filepath, codec='libx264', fps=self.fps)
     return filepath
Example #2
0
    def develop_drawn_clip(org_clip: VideoFileClip, sq_frames: List[float], all_frames: List[np.ndarray],
                           buffer_s: float = 1) -> VideoClip:
        """Calculates subclip start and end time, creates a subclip to reference.
        Combines the drawn frames (with buffer) before transforming into a video clip
        Adds original clip's audio to the video containing drawn frames.

        Args:
            org_clip: the original clip to leverage audio, duration data from
            sq_frames: the sequence of frames that have motion annotations draw in them
            all_frames: the full list of frames that we'll be slicing
            buffer_s: the seconds of buffer to add before and after the motion area
        """
        duration = org_clip.duration
        tot_frames = len(all_frames)
        # Calculate number of frames to buffer before and after motion areas
        buffer_fr = int(org_clip.fps * buffer_s)

        # Calculate the start and end frames with the buffers
        st_with_buffer = sq_frames[0]
        end_with_buffer = sq_frames[-1] + buffer_fr
        start_frame_pos = st_with_buffer if st_with_buffer > 0 else 0
        end_frame_pos = end_with_buffer if end_with_buffer < tot_frames else tot_frames - 1

        # Calculate the start and end times for the start and end frames
        start_t = ((start_frame_pos / tot_frames) * duration)
        end_t = ((end_frame_pos / tot_frames) * duration)

        # Cut the original clip to fit the buffer
        cut_clip = org_clip.subclip(start_t, end_t)

        # Generate the sequence of drawn clips
        drawn_clip = ImageSequenceClip(all_frames[start_frame_pos:end_frame_pos], fps=org_clip.fps)
        if drawn_clip.duration != cut_clip.duration:
            # Cut the tail off the drawn clip to match the cut_clip.
            drawn_clip = drawn_clip.subclip(0, end_t - start_t)
        # Make the drawn clip a VideoClip by concatenating it with only itself. Add original clip's audio
        drawn_clip = concatenate_videoclips([drawn_clip])
        drawn_clip.audio = cut_clip.audio
        return drawn_clip
Example #3
0
    img3.save(path+str(i2)+".jpg", "JPEG")

    output_array_.append(path+str(i)+".jpg")
    output_array_.append(path+str(i1)+".jpg")
    output_array_.append(path+str(i2)+".jpg")

    img.close()
    img1.close()
    img2.close()
    img3.close()

    i += 3
    i1 += 3
    i2 += 3
    i3 += 1

    if i3 == frame_count-1 or i >= frame_count or i1 >= frame_count or i2 >= frame_count:
        done = True


output_video = ImageSequenceClip(
    output_array_, fps=video.fps, load_images=False)

output_video.duration = video.duration

output_video.audio = video.audio
output_video.write_videofile("UnCompressed.mp4", fps=video.fps)

shutil.rmtree(output_array_path)
os.mkdir("frames")
Example #4
0
    name = hex(hash(frame)).replace('0x', '') + '.png'

    frame.image.save(name)
    frames.append(name)

    print(f"Generating '{post.title}' audio...'")

    voice = tts.TTS.Post(post)
    name = hex(hash(frame)).replace('0x', '') + '.mp3'

    voice.tts.save(name)
    audio.append(name)

print(frames, audio)


print('Rendering video...')
clips = []

for img, audio in zip(frames, audio):
    clip = ImageSequenceClip([img], fps = 24)
    audio_clip = AudioFileClip(audio)

    clip.audio = audio_clip
    name = 'CLIP_' + hex(hash(img)).replace('0x', '') + '.mp4'
    clips.append(name)

    clip.write_videofile(name)

clip = concatenate_videoclips([VideoFileClip(e) for e in clips])
clip.write_videofile('final.mp4')