示例#1
0
    def _generate_from_frames(self, frames: List[Frame], fps):
        images = []

        for f in frames:
            images.append(f.image)

        clip = ImageSequenceClip(images, fps=fps)
        audio_clip = self.audio.subclip(frames[0].timestamp,
                                        frames[-1].timestamp)
        new_clip = clip.set_audio(audio_clip)

        return new_clip
def merge_images_and_audio(images: DataList, audio: np.ndarray,
                           video_duration: float, sound_hz: int,
                           video_name: str):
    """
    Creates video with sound from image list and music.

    Args:
        images: List of images represented by a h x w x 3 numpy array.
        audio: A Numpy array representing the sound, of size Nx1 for mono, Nx2 for stereo.
        video_duration: Duration of the video in seconds (should be the same as the audio file).
        sound_hz: The hz of the audio file.
        video_name: The name of the resulting video file
    """
    # todo there is still a problem with the audio here
    # the audio should always contain two channels
    # then the hz should also work for mono and dual
    clip = ImageSequenceClip(images,
                             durations=[video_duration / len(images)] *
                             len(images))
    s = audio.reshape((len(audio), 2))  # transform it from (N) to (N, 2)
    audio = AudioArrayClip(s, sound_hz)
    clip = clip.set_audio(audio)
    clip.write_videofile(video_name, fps=len(images) / video_duration)