예제 #1
0
    def render(self, name, start_offset, duration, fps=30, audio=None):
        """
        render object
        :param name: file to save as
        :param start_offset: where to start
        :param duration: how long the clip will be
        :param fps: default: 30
        :param audio: audio file path
        :return: None. saves file
        """
        self.offset = start_offset
        clip = mpy.VideoClip(self.make_frame, duration=duration * self.speed).speedx(self.speed)
        if audio is not None:
            subprocess.run(["ffmpeg", "-loglevel", "quiet", "-i", audio, "-filter:a",
                            f"atempo={self.speed}", "-vn", f"{audio}.mp3"])
            audio = f"{audio}.mp3"
            audio_start_offset = start_offset
            acl = mpy.AudioFileClip(audio)
            blnk = mpy.AudioClip(lambda x: 0, duration=self.minimum / 1000)
            aftr = max(0, (duration + audio_start_offset) - acl.duration)
            ablnk = mpy.AudioClip(lambda x: 0, duration=aftr)
            snd = mpy.concatenate_audioclips([blnk, acl, ablnk])
            clip = clip.set_audio(snd.subclip(audio_start_offset, duration + audio_start_offset))
            # remove(audio)

        if name.endswith(".gif"):
            clip.write_gif(name, fps=fps)
            subprocess.run(
                ["../gifsicle-1.82.1-lossy/mac/gifsicle", "-O3", "--lossy=30", "-o",
                 "circle.gif", "circle.gif"])
        else:
            clip.write_videofile(name, fps=fps)
예제 #2
0
def change_audio(dir, vid_file, cap_file):
    video = mp.VideoFileClip(dir + vid_file)

    with open(dir + cap_file) as f:
        j = json.loads(f.read())
        starts = list(map(lambda c: float(c['start']), j))

    nbrClips = len(starts)

    clips = list(map(lambda s: mp.AudioFileClip("./media/tmp/{}.mp3".format(s)), range(nbrClips)))
    clips = list(map(lambda c: c.subclip(0, c.duration*0.8), clips))

    # print(starts)

    current = 0

    make_frame = lambda t: [0]
    padded_clips = []
    for i in range(nbrClips):
        if current < starts[i]:
            padded_clips.append(mp.AudioClip(make_frame, duration=starts[i] - current))
            current = starts[i]
            padded_clips.append(clips[i])
            current += clips[i].duration

    concat = mp.concatenate_audioclips(padded_clips)
    concat.write_audiofile(dir + 'audio.mp3')
    video.write_videofile('media/ready/' + vid_file, audio = dir + 'audio.mp3')
예제 #3
0
 def save_to_file(self, filename):
     scipy.io.wavfile.write(filename, int(AudioBlock.SampleRate),
                            self.samples)
     return
     clip = movie_editor.AudioClip(self.make_audio_frame,
                                   duration=self.get_time_duration())
     #clip = movie_editor.AudioArrayClip(self.samples, fps=AudioBlock.SampleRate)
     clip.write_audiofile(filename, fps=int(AudioBlock.SampleRate))
     del clip
예제 #4
0
 def get_audio_clip(self):
     if not os.path.isfile(self.filename):
         audio_clip = movie_editor.AudioClip(self._blank_clip_make_frame,
                                             duration=1)
         audio_clip = audio_clip.set_fps(AudioBlock.SampleRate)
         return audio_clip
     audioclip = movie_editor.AudioFileClip(self.filename)
     if self.preload and audioclip.duration > self.MAX_DURATION_SECONDS:
         audioclip = audioclip.set_duration(self.MAX_DURATION_SECONDS)
     return audioclip
예제 #5
0
        def pad_effect(start, effect, duration):
            def pad_effect_frame(times, start=start, effect=effect):
                if type(times) == int: return [0, 0]
                return [
                    effect.get_frame(time - start)
                    if start < time < start + effect.duration else [0, 0]
                    for time in times
                ]

            return editor.AudioClip(pad_effect_frame, duration=duration)
def make_video(profile, name='test', bbox=None, plot_type='profile_plot'):
    """
    plot_type .. 'power_plot', 'profile_plot', 'peaks_plot'
    """
    if bbox is None: bbox = Bbox(profile.spectrum, end_freq=500.)
    registry = ['power_plot', 'profile_plot', 'peaks_plot']
    if plot_type == 'all': plot_type = registry
    if type(plot_type) is not str:
        for ptype in plot_type:
            make_video(profile, name, bbox, ptype)
        return
    import moviepy.editor as mpy  # deferred import in case moviepy is not installed
    from moviepy.video.io.bindings import mplfig_to_npimage
    sound = profile.spectrum.sound
    # normalize in case very quiet
    sound_array = sound.data / (2 * np.abs(sound.data).max())

    def video_fn(t):
        """make one frame of video"""
        t += bbox.start_time
        fn = getattr(profile, plot_type)
        fig = fn(t=t, bbox=bbox)
        return mplfig_to_npimage(fig)

    def audio_fn(t):
        """make one "frame" of audio"""
        t += bbox.start_time
        if type(t) is int:
            i = t * sound.samplerate
        elif type(t) is float:
            i = int(t * sound.samplerate)
        else:
            i = (t * sound.samplerate).astype(int)
        return sound_array[i]

    duration = bbox.end_time - bbox.start_time
    video_clip = mpy.VideoClip(video_fn, duration=duration)
    audio_clip = mpy.AudioClip(audio_fn, duration=duration)
    animation = video_clip.set_audio(audio_clip)
    animation.to_videofile(name + '_' + plot_type + '.avi',
                           codec='libx264',
                           fps=24)  # codec='mpeg4'
예제 #7
0
 def add_beep_audio(self) -> None:
     """Add a single tone as audio track."""
     tone = med.AudioClip(make_sin, duration=self.clip.duration)
     self.clip = self.clip.set_audio(tone)
예제 #8
0
 def add_silence(self, duration_s: float) -> None:
     """Add a silence of a certain duration the an audio clip."""
     silence_clip = med.AudioClip(silence, duration=duration_s)
     self.clip = med.concatenate_audioclips([silence_clip, self.clip])
예제 #9
0
def get_effects(video, effect):

    if 'image' in effect:
        clip = (mp.ImageClip(effect['image']['file_or_url']))

        clip = clip.set_start(effect['duration']['start'])
        clip = clip.set_duration(effect['duration']['end'] -
                                 effect['duration']['start'])
        clip = clip.set_pos(
            (effect['position']['width'], effect['position']['height']))

        if effect.get('fade', {}).get('in'):
            clip = clip.crossfadein(effect['fade']['in'])

        if effect.get('fade', {}).get('out'):
            clip = clip.crossfadeout(effect['fade']['out'])

        if effect.get('position', {}).get('rotate'):
            clip = clip.rotate(effect['position']['rotate'])

        yield clip

    if 'text' in effect:

        # Requires working installation of ImageMagick
        try:
            clip = mp.TextClip(txt=effect['text']['message'],
                               color=effect['text'].get('color', '#666666'),
                               font=effect['text'].get('font', 'Courier'),
                               fontsize=effect['text'].get('size', 12),
                               align=effect['text'].get('align', 'center'),
                               kerning=effect['text'].get('kerning', 0))

        # Alternate method using Pillow - no need to set position text is already positioned within image
        except:
            clip = (mp.ImageClip(get_text_image(effect)))

        clip = clip.set_start(effect['duration']['start'])
        clip = clip.set_duration(effect['duration']['end'] -
                                 effect['duration']['start'])
        clip = clip.set_pos(
            (effect['position']['width'], effect['position']['height']))

        if effect.get('fade', {}).get('in'):
            clip = clip.crossfadein(effect['fade']['in'])

        if effect.get('fade', {}).get('out'):
            clip = clip.crossfadeout(effect['fade']['out'])

        if effect.get('position', {}).get('rotate'):
            clip = clip.rotate(effect['position']['rotate'])

        yield clip

    elif 'audio' in effect:
        clip = (mp.AudioClip(effect['audio']['file_or_url']))

        clip = clip.set_start(effect['duration']['start'])
        clip = clip.set_duration(effect['duration']['end'] -
                                 effect['duration']['start'])

        yield clip
예제 #10
0
def main():
    extensions = ['ogv', 'mov']
    trial = ['326', '160']
    folders = [
        '../video_302_20210514-143755-6691',
        '../video_305_20210514-141912-2142'
    ]
    filter_bad_frames = [True, False]
    for index_trial in [0, 1]:
        results_df = pd.read_csv(
            f'{folders[index_trial]}/structured_output.csv')
        results_df = results_df[results_df['trial'] == float(
            trial[index_trial])]
        table_et_pt1 = ASC2CSV(
            f'{folders[index_trial]}/et{trial[index_trial]}.asc')
        table_et_pt2 = ASC2CSV(
            f'{folders[index_trial]}/et{trial[index_trial]}pt2.asc')
        list_of_videos = []
        for screen in range(1, 13):
            results_df_this_screen = results_df[results_df['screen'] == screen]
            video_filename = f'{folders[index_trial]}/recorded_screen_{screen}_trial_{trial[index_trial]}_0001.{extensions[index_trial]}'
            if os.path.isfile(video_filename):
                my_clip = mpe.VideoFileClip(video_filename)
                if screen in [2, 4, 7, 9]:
                    start_video = results_df_this_screen[
                        results_df_this_screen['title'] ==
                        'start_video_recording']['timestamp'].values[
                            0] * 24 * 60 * 60

                    #the only screen with audio is screen 2
                    if screen == 2:
                        table_et_2 = table_et_pt1.copy()
                        start_video_2 = start_video
                        my_clip = fl(
                            my_clip, lambda get_frame, t: scroll(
                                get_frame, t, table_et_2, start_video_2),
                            filter_bad_frames[index_trial])
                        delay_audio = results_df_this_screen[
                            results_df_this_screen['title'] ==
                            'start_audio_recording']['timestamp'].values[
                                0] * 24 * 60 * 60 - start_video

                        #generate the audio from the timestamped transcription
                        if use_digital_audio:
                            full_audio = AudioSegment.empty()
                            previous_end = 0
                            with open(
                                    f'{folders[index_trial]}/{trial[index_trial]}_joined.json',
                                    'r') as f:
                                table_text = json.load(f)['timestamps']
                            with open(
                                    f'{folders[index_trial]}/{trial[index_trial]}_trim.json',
                                    'r') as f:
                                b = json.load(f)
                            trim_value = float(b['start_trim']) / 1000
                            for row in table_text:
                                print(row[1])
                                print(trim_value)

                                #row[1] is the timestamp for the start of the word, and row[2] the timestamp for the end of the word
                                row[1] += trim_value
                                row[2] += trim_value
                                print(row[1])

                                # if start and end of the word are at the same time, it was not captured by the original transcription, so we do not use it in the audio, only in subtitle
                                if row[1] == row[2]:
                                    continue

                                # text to speech
                                tts = SaveTTSFile('create_video_temp.wav')
                                tts.start(
                                    row[0].replace('.', 'period').replace(
                                        ',', 'comma').replace('/', 'slash'),
                                    row[1], row[2])
                                for i in range(10):
                                    if not os.path.exists(
                                            './create_video_temp.wav'):
                                        time.sleep(1)
                                    else:
                                        break
                                    if i > 10:
                                        assert (False)
                                del (tts)

                                # add silence between words if they did not end/start at the same time
                                if row[1] > previous_end:
                                    full_audio += AudioSegment.silent(
                                        duration=(row[1] - previous_end) *
                                        1000)
                                print(full_audio.duration_seconds)
                                print(row[1])
                                assert (abs(full_audio.duration_seconds -
                                            row[1]) < 0.002)

                                #change the duration of the word sound to the duration it took for the radiologist to say it
                                word_audio = AudioSegment.from_file(
                                    'create_video_temp.wav', format="wav")
                                word_audio = stretch_audio(
                                    word_audio, 'create_video_temp.wav',
                                    word_audio.duration_seconds /
                                    (row[2] - row[1]))

                                full_audio += word_audio
                                assert (abs(full_audio.duration_seconds -
                                            row[2]) < 0.002)
                                previous_end = row[2]
                            full_audio.export("create_video_temp.wav",
                                              format="wav")
                            audio_background = mpe.AudioFileClip(
                                'create_video_temp.wav')
                            os.remove('./create_video_temp.wav')
                        else:
                            audio_background = mpe.AudioFileClip(
                                f'{folders[index_trial]}/{trial[index_trial]}.wav'
                            )
                            # delay_audio = round(delay_audio*my_clip.fps)/my_clip.fps
                        if delay_audio > 0:
                            null_audio = mpe.AudioClip(lambda t: 0,
                                                       duration=delay_audio)
                            audio_background = mpe.concatenate_audioclips(
                                [null_audio, audio_background])
                            delay_audio = 0
                        delay_end_video = my_clip.duration - audio_background.duration
                        if delay_end_video > 0:
                            null_audio = mpe.AudioClip(
                                lambda t: 0, duration=delay_end_video)
                            audio_background = mpe.concatenate_audioclips(
                                [audio_background, null_audio])
                            delay_end_video = 0
                        audio_background.write_audiofile('temp_crop_audio.wav')
                        trim_audio('temp_crop_audio.wav', -delay_audio,
                                   -delay_end_video)
                        audio_background = mpe.AudioFileClip(
                            'temp_crop_audio.wav')

                    else:
                        if screen == 4:
                            table_et_this_screen_4 = table_et_pt2[
                                table_et_pt2['index_edf'] == 0]
                            start_video_4 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_4,
                                    start_video_4),
                                filter_bad_frames[index_trial])
                        if screen == 7:
                            table_et_this_screen_7 = table_et_pt2[
                                table_et_pt2['index_edf'] == 1]
                            start_video_7 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_7,
                                    start_video_7),
                                filter_bad_frames[index_trial])
                        if screen == 9:
                            table_et_this_screen_9 = table_et_pt2[
                                table_et_pt2['index_edf'] == 2]
                            start_video_9 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_9,
                                    start_video_9),
                                filter_bad_frames[index_trial])
                else:
                    my_clip = fl(my_clip, clean,
                                 filter_bad_frames[index_trial])
                if screen != 2:
                    audio_background = mpe.AudioClip(lambda t: 0,
                                                     duration=my_clip.duration)
                my_clip = my_clip.set_audio(audio_background)
                list_of_videos.append(my_clip)
        final = mpe.concatenate_videoclips(list_of_videos)
        final.write_videofile(f"movie_{extensions[index_trial]}.mp4",
                              audio_codec='aac',
                              codec="libx264",
                              temp_audiofile='temp-audio.m4a',
                              remove_temp=True,
                              fps=30,
                              bitrate="5000k")
        os.remove('./create_video_temp.wav')