示例#1
0
def vid_from_media(vid_path, media, song_path, titles, img_duration=8):
    clips = []
    print "sequencing media..."
    for m in media:
        print m.path
        if good_file(m.path):
            try:
                if is_img(m.path):
                    new_clip = ImageClip(m.path)
                    new_clip.fps = 1.0 / img_duration
                    new_clip.duration = img_duration
                else:
                    new_clip = VideoFileClip(m.path)
                text = m.title if titles else None
                new_clip = format_clip(new_clip, text)
                clips.append(new_clip)
            except Exception as err:
                "COULDN'T CREAT CLIP BECAUSE: " + str(err)
        else:
            print 'CORRUPT FILE FOUND: ' + m.path + ', skipping.'
    vid = concatenate_videoclips(clips)
    print song_path
    audio = AudioFileClip(song_path)
    audio_loops = int(vid.duration / audio.duration) + 1  #times to loop audio
    audio = concatenate_audioclips([audio] * audio_loops)
    print audio.duration
    print vid.duration
    audio = audio.set_duration(vid.duration)
    vid = vid.set_audio(audio)
    print "writing video..."
    vid.write_videofile(vid_path, progress_bar=False, preset='ultrafast')
    return abspath(vid_path)
示例#2
0
    def render(self, name, start_offset, duration, fps=30, audio=None):
        """
        render object
        :param name: file to save as
        :param start_offset: where to start
        :param duration: how long the clip will be
        :param fps: default: 30
        :param audio: audio file path
        :return: None. saves file
        """
        self.offset = start_offset
        clip = mpy.VideoClip(self.make_frame, duration=duration * self.speed).speedx(self.speed)
        if audio is not None:
            subprocess.run(["ffmpeg", "-loglevel", "quiet", "-i", audio, "-filter:a",
                            f"atempo={self.speed}", "-vn", f"{audio}.mp3"])
            audio = f"{audio}.mp3"
            audio_start_offset = start_offset
            acl = mpy.AudioFileClip(audio)
            blnk = mpy.AudioClip(lambda x: 0, duration=self.minimum / 1000)
            aftr = max(0, (duration + audio_start_offset) - acl.duration)
            ablnk = mpy.AudioClip(lambda x: 0, duration=aftr)
            snd = mpy.concatenate_audioclips([blnk, acl, ablnk])
            clip = clip.set_audio(snd.subclip(audio_start_offset, duration + audio_start_offset))
            # remove(audio)

        if name.endswith(".gif"):
            clip.write_gif(name, fps=fps)
            subprocess.run(
                ["../gifsicle-1.82.1-lossy/mac/gifsicle", "-O3", "--lossy=30", "-o",
                 "circle.gif", "circle.gif"])
        else:
            clip.write_videofile(name, fps=fps)
示例#3
0
    def combine(self, other: 'Moviepy', other_first: bool = False,  # type: ignore
                crossfade_duration: float = 0) -> None:
        """Combines this video stream with another stream"""
        self.reader_refs += other.reader_refs
        clips = [other.clip, self.clip] if other_first else [self.clip, other.clip]

        if self.has_video and other.has_video:
            if crossfade_duration == 0:
                self.clip = med.concatenate_videoclips(clips)
            else:
                # Have clips[1] start while clips[0] is not finished yet
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(transfx.crossfadein, crossfade_duration)
                self.clip = med.CompositeVideoClip([clips[0], clips[1]])
                # TODO: consider calling set_duration?
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
        else:
            if crossfade_duration == 0:
                assert self.has_video is False and other.has_video is False
                self.clip = med.concatenate_audioclips(clips)
            else:
                # Audio crossfade in: start earlier, fade in with normal audio_fadein effect.
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(afx.audio_fadein, crossfade_duration)
                self.clip = med.CompositeAudioClip([clips[0], clips[1]])
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
示例#4
0
def change_audio(dir, vid_file, cap_file):
    video = mp.VideoFileClip(dir + vid_file)

    with open(dir + cap_file) as f:
        j = json.loads(f.read())
        starts = list(map(lambda c: float(c['start']), j))

    nbrClips = len(starts)

    clips = list(map(lambda s: mp.AudioFileClip("./media/tmp/{}.mp3".format(s)), range(nbrClips)))
    clips = list(map(lambda c: c.subclip(0, c.duration*0.8), clips))

    # print(starts)

    current = 0

    make_frame = lambda t: [0]
    padded_clips = []
    for i in range(nbrClips):
        if current < starts[i]:
            padded_clips.append(mp.AudioClip(make_frame, duration=starts[i] - current))
            current = starts[i]
            padded_clips.append(clips[i])
            current += clips[i].duration

    concat = mp.concatenate_audioclips(padded_clips)
    concat.write_audiofile(dir + 'audio.mp3')
    video.write_videofile('media/ready/' + vid_file, audio = dir + 'audio.mp3')
示例#5
0
	def _compose_buffer(self):
		audio = concatenate_audioclips(self.sounds)
		video = CompositeVideoClip(self.images, 
			size=(self.w, self.h)).set_duration(audio.duration)
		video = video.set_audio(audio)
		self.clips.append(video)
		self.sounds, self.images = [], []
		self._push_image(self.background_image)
示例#6
0
def tracks():
    return concatenate_audioclips([
        AudioFileClip(assets(MODULE_NAME, 'track1.mp3'))
            .fx(afx.volumex, 0.5),
        AudioFileClip(assets(MODULE_NAME, 'track2.mp3'))
            .fx(afx.volumex, 0.5),
        AudioFileClip(assets(MODULE_NAME, 'track3.mp3'))
            .fx(afx.volumex, 0.5),
    ])
示例#7
0
文件: editor.py 项目: tnoff/hathor
def commercial_remove(input_file, output_file, non_commercial_intervals, verbose=True):
    '''Remove commercials from audio file
       input_file: audio file to remove commercials from
       output_file: output path of new audio file with no commercials
       audio_data: result from commercial_identify call
    '''
    audio_clip = AudioFileClip(input_file)
    clips = []
    for start, end in non_commercial_intervals:
        clips.append(audio_clip.subclip(start, end))
    final = concatenate_audioclips(clips)
    final.write_audiofile(output_file, verbose=verbose)
示例#8
0
def get_audiocut(url, verbose=False, duration=None):
    """
    Given an "audio cut" url, return a moviepy's AudioClip instance with the cut
    """

    if verbose:
        print('Retrieving {}'.format(url))

    pq = PyQuery(url)
    seconds = pq('li.audio_seconds').text()
    if duration is None:
        duration = float(pq('li.audio_duration').text())
    station = pq('li.audio_station').text()
    base_url = pq('li.audio_base_url').text()

    start_folder = int(seconds[:6])
    chunks = []
    while True:
        chunks_url = "{}/server/get_chunks/{}/{:d}/".format(base_url, station, start_folder)
        if verbose:
            print('Getting chunks index {}'.format(chunks_url))
        chunks_json = requests.get(chunks_url, headers=HEADERS).json()[str(start_folder)]
        for chunk_data in chunks_json['chunks']:
            # set the base_url if isnt defined
            chunk_data['base_url'] = chunk_data.get('base_url', chunks_json['baseURL'])
            chunks.append(chunk_data)
        c = chunks[-1]
        if c['start'] + c['length'] > float(seconds) + float(duration):
            break
        # if the last chunk isn't in this index, get the next one
        start_folder += 1

    if verbose:
        print('Need to download {} chunks'.format(len(chunks)))
        print('Looking for first chunk')
    for i, c in enumerate(chunks):
        if c['start'] + c['length'] > float(seconds):
            first_chunk = i
            break
    if verbose:
        print('Looking for last chunk')
    for i, c in enumerate(chunks[first_chunk:]):
        if c['start'] + c['length'] > float(seconds) + float(duration):
            last_chunk = min(len(chunks), first_chunk + i + 1)
            break

    audios = [get_mp3(chunk, verbose=verbose) for chunk in chunks[first_chunk:last_chunk]]
    start_offset = float(seconds) - chunks[first_chunk]['start']
    cut = concatenate_audioclips(audios)
    cut = cut.subclip(start_offset, start_offset + float(duration))
    return cut
示例#9
0
def get_audiocut(url, verbose=False):
    """
    Given an "audio cut" url, return a moviepy's AudioClip instance with the cut
    """

    if verbose:
        print('Retrieving {}'.format(url))

    pq = PyQuery(url)
    seconds = pq('li.audio_seconds').text()

    duration = pq('li.audio_duration').text()
    station = pq('li.audio_station').text()
    base_url = pq('li.audio_base_url').text()

    start_folder = int(seconds[:6])
    chunks = []
    while True:
        chunks_url = "{}/server/get_chunks/{}/{:d}/".format(base_url, station, start_folder)
        if verbose:
            print('Getting chunks index {}'.format(chunks_url))
        chunks_json = requests.get(chunks_url).json()[str(start_folder)]
        for chunk_data in chunks_json['chunks']:
            # set the base_url if isnt defined
            chunk_data['base_url'] = chunk_data.get('base_url', chunks_json['baseURL'])
            chunks.append(chunk_data)
        c = chunks[-1]
        if c['start'] + c['length'] > float(seconds) + float(duration):
            break
        # if the last chunk isn't in this index, get the next one
        start_folder += 1

    if verbose:
        print(len(chunks))
        print('Looking for first chunk')
    for i, c in enumerate(chunks):
        if c['start'] + c['length'] > float(seconds):
            first_chunk = i
            break
    if verbose:
        print('Looking for last chunk')
    for i, c in enumerate(chunks[first_chunk:]):
        if c['start'] + c['length'] > float(seconds) + float(duration):
            last_chunk = min(len(chunks), first_chunk + i + 1)
            break

    audios = [get_mp3(chunk, verbose=verbose) for chunk in chunks[first_chunk:last_chunk]]
    start_offset = float(seconds) - chunks[first_chunk]['start']
    cut = concatenate_audioclips(audios)
    cut = cut.subclip(start_offset, start_offset + float(duration))
    return cut
示例#10
0
 def clips(self):
     print "clips start"
     filelist = os.listdir(AUDIO_PATH)
     audiolist=[]
     if filelist == []:
         print "no audio file find"
         return
     print filelist
     for file_temp in filelist:
          audiolist.append(AudioFileClip("%s/%s"%(AUDIO_PATH,file_temp)))
    
     final_clip = concatenate_audioclips(audiolist)
     final_clip.write_audiofile("static/abc.wav")
     self.write_message("/static/abc.wav")
     print "clips end"
示例#11
0
def main():
    from docopt import docopt
    arguments = docopt(__doc__, version=__version__)

    url = arguments['<url>'].partition('#')[0]
    duration = None
    output_filename = ''
    is_audiocut = re.match(AUDIOCUT_PATTERN, url)
    is_podcast = re.match(PODCAST_PATTERN, url)
    is_radiostation = re.match(RADIOSTATION_PATTERN, url)
    is_show = re.match(SHOW_PATTERN, url)
    if not any([is_audiocut, is_podcast, is_radiostation, is_show]):
        print(NOT_VALID_MSG)
        sys.exit(1)

    if not url.endswith('/'):
        url += '/'
    verbose = bool(arguments['--verbose'])

    if is_podcast:
        urls = get_urls_from_podcast(url, verbose)
    elif is_show:
        urls, duration, output_filename = get_show(url, is_show.group(1), verbose)
    else:
        urls = [url]

    duration = arguments['--duration'] or duration
    if duration is not None:
        duration = int(duration)

    audioclips = [get_audiocut(url, verbose, duration) for url in urls]
    background = arguments['--background']
    extension = 'mp4' if background else 'mp3'

    if arguments['--join'] or is_audiocut:
        audioclips = [concatenate_audioclips(audioclips)]
        output_filenames = output_file_names(
            [url],
            given_filename=arguments['<output-file-name>'],
            extension=extension)
    else:
        output_filenames = output_file_names(
            urls,
            given_filename=arguments['<output-file-name>'] or output_filename,
            extension=extension)

    for clip, filename in zip(audioclips, output_filenames):
        write_output(clip, filename, background, verbose=verbose)
示例#12
0
def main():
    from docopt import docopt
    arguments = docopt(__doc__, version=__version__)

    url = arguments['<url>'].partition('#')[0]
    duration = None
    output_filename = ''
    is_audiocut = re.match(AUDIOCUT_PATTERN, url)
    is_podcast = re.match(PODCAST_PATTERN, url)
    is_radiostation = re.match(RADIOSTATION_PATTERN, url)
    is_show = re.match(SHOW_PATTERN, url)
    if not any([is_audiocut, is_podcast, is_radiostation, is_show]):
        print(NOT_VALID_MSG)
        sys.exit(1)

    if not url.endswith('/'):
        url += '/'
    verbose = bool(arguments['--verbose'])

    if is_podcast:
        urls = get_urls_from_podcast(url, verbose)
    elif is_show:
        urls, duration, output_filename = get_show(url, is_show.group(1), verbose)
    else:
        urls = [url]

    duration = arguments['--duration'] or duration
    if duration is not None:
        duration = int(duration)

    audioclips = [get_audiocut(url, verbose, duration) for url in urls]
    background = arguments['--background']
    extension = 'mp4' if background else 'mp3'

    if arguments['--join'] or is_audiocut:
        audioclips = [concatenate_audioclips(audioclips)]
        output_filenames = output_file_names(
            [url],
            given_filename=arguments['<output-file-name>'],
            extension=extension)
    else:
        output_filenames = output_file_names(
            urls,
            given_filename=arguments['<output-file-name>'] or output_filename,
            extension=extension)

    for clip, filename in zip(audioclips, output_filenames):
        write_output(clip, filename, background, verbose=verbose)
示例#13
0
def radiocut(url, output_file_name=None):

    print('Retrieving {}'.format(url))


    pq = PyQuery(url)
    seconds = pq('li.audio_seconds').text()

    duration = pq('li.audio_duration').text()
    station = pq('li.audio_station').text()
    base_url = pq('li.audio_base_url').text()

    start_folder = int(seconds[:6])
    chunks = []
    while True:
        chunks_url = "{}/server/get_chunks/{}/{:d}/".format(base_url, station, start_folder)
        print('Getting chunks index {}'.format(chunks_url))
        chunks_json = requests.get(chunks_url).json()[str(start_folder)]
        for chunk_data in chunks_json['chunks']:
            # set the base_url if isnt defined
            chunk_data['base_url'] = chunk_data.get('base_url', chunks_json['baseURL'])
            chunks.append(chunk_data)
        c = chunks[-1]
        if c['start'] + c['length'] > float(seconds) + float(duration):
            break
        # if the last chunk isn't in this index, get the next one
        start_folder += 1

    print(len(chunks))
    print('Looking for first chunk')
    for i, c in enumerate(chunks):
        if c['start'] + c['length'] > float(seconds):
            first_chunk = i
            break
    print('Looking for last chunk')
    for i, c in enumerate(chunks[first_chunk:]):
        if c['start'] + c['length'] > float(seconds) + float(duration):
            last_chunk = min(len(chunks), first_chunk + i + 1)
            break

    audios = [get_mp3(chunk) for chunk in chunks[first_chunk:last_chunk]]
    start_offset = float(seconds) - chunks[first_chunk]['start']
    cut = concatenate_audioclips(audios)
    cut = cut.subclip(start_offset, start_offset + float(duration))
    if output_file_name is None:
        output_file_name = url.split('/')[-2] + '.mp3'
    cut.write_audiofile(str(output_file_name))
def write_audio_of_videos_in_parts(name,
                                   num_parts,
                                   video_folder="./videos/",
                                   audio_folder="./audios/",
                                   audio_prefix="audio_",
                                   lazy_update=True):
    all_parts_folder = f"{audio_folder}{name}/"
    get_subpart_path = lambda num_part: get_path(
        all_parts_folder, f"{audio_prefix}{num_parts}_part_", num_part, "mp3")
    os.makedirs(all_parts_folder,
                exist_ok=True)  #creating necessary folder if necessary
    total_audio_path = get_path(all_parts_folder, audio_prefix, name, "mp3")
    if os.path.exists(total_audio_path) and lazy_update:
        print(
            f"The audio: {total_audio_path} already exists and lazy_update is set to {lazy_update}, so we are skipping this."
        )
    else:
        print(get_subpart_path(1))
        list_all_clips = []
        all_elements = os.listdir(f"{video_folder}{name}/")
        only_mp4 = list(
            filter(lambda name: name.endswith(".mp4"),
                   all_elements))  #only keeping the videos
        sorted_videos = sorted(
            only_mp4, key=extract_number_from_video
        )  # ordering with the number, not with the str rep
        # 'video_2' goes before 'video_10'
        print(
            f"Found {len(sorted_videos)} videos, now trying to combine all audios!\n"
        )
        for sub_video in sorted_videos:
            print(".", end="")  #just to keep track of where we are
            subclip = AudioFileClip(f"{video_folder}{name}/{sub_video}")
            list_all_clips.append(subclip)
        clips_combined = concatenate_audioclips(list_all_clips)
        print()
        print("Clips combined, now writing to {}".format(total_audio_path))
        clips_combined.write_audiofile(total_audio_path)
    print(f"\nNow trying to divide the audio in {num_parts} parts \n"
          )  #TODO: check if the parts already exist
    divide_audio(total_audio_path, num_parts, all_parts_folder, audio_prefix,
                 lazy_update)
    audio = AudioFileClip(total_audio_path)
    every_part_duration = (audio.duration) / num_parts
    audio.close()
    return every_part_duration
示例#15
0
def audio_cut_at(filename, cuts):
    """Cuts audio based on (start,end) tuples

    Args:
        filename (string): the name of the file from which parts are to be cut
        cuts (list): list of (start,end) tuples, each tuple represents the parts to be cut, both start and end are float values

    Returns:
        moviepy.VideoClip
    """
    audio_to_cut = AudioFileClip(filename)
    final_audio = audio_to_cut.subclip(cuts[0][0], cuts[0][1])
    for i in range(1, len(cuts)):
        a = audio_to_cut.subclip(cuts[i][0], cuts[i][1])
        final_audio = concatenate_audioclips([final_audio, a])
    final_audio.write_audiofile('done_proc.wav')
    return final_audio
示例#16
0
def create_bg_music(music_fp, time_len):
    """
    create an audio with the give time duration. If `time_len` is less than 
    the time-length of the music, cut it from the beginning;
    if `time_len` is greater than music's duration, loop it until satisfied.

    music_fp: file path of music
    time_len: the wanted duration, in seconds
    """
    audio = mpe.AudioFileClip(music_fp)
    if audio.duration < time_len:
        n = int(time_len / audio.duration)
        print("yes")
        audio_clips = [audio for _ in range(n+1)]
        combined_audio = mpe.concatenate_audioclips(audio_clips)
        return combined_audio
    else:
        return audio.subclip(0, time_len)
示例#17
0
    def convert_imgs_to_movie(self, **kwargs) -> None:
        uid = kwargs['UID']
        slide_images = list(
            filter(
                lambda x: x['step'] ==
                f'{StepName.CONVERT_SLIDES.value}_convert_pdf_to_imgs',
                kwargs['dependent_results']))[0]['results']
        slide_sounds = list(
            filter(
                lambda x: x['step'] == f'{StepName.GET_TTS.value}_convert_tts',
                kwargs['dependent_results']))[0]['results']
        sld_clips = []
        t = 0
        for (imgkey, imgval), (sndkey, sndvals) in zip(slide_images.items(),
                                                       slide_sounds.items()):
            audio_clips = []
            t_a = 0
            for snd in sndvals:
                _audio = mpy.AudioFileClip(snd)
                audio_clips.append(_audio.set_start(t_a))
                # account for current audio clip length
                t_a = _audio.duration

            sld_audio = mpy.concatenate_audioclips(audio_clips)
            sld = (
                mpy.ImageClip(imgval).set_duration(
                    sld_audio.duration
                )  # using the fx library to effortlessly transform the video clip # .on_color(size=DIM, color=dark_grey)
                .set_fps(
                    5
                )  # if we want to use transition we would need to increase fps to > 24
                .set_audio(sld_audio))
            sld_clips.append(sld.set_start(t))
            # account for current compound clip length
            t += sld.duration
        video = mpy.CompositeVideoClip(sld_clips)

        # prepare target dir
        dest_path = os.path.join('.', kwargs['MOV_DIR'])
        if not os.path.isdir(dest_path):
            os.makedirs(dest_path)

        video_path = os.path.join(dest_path, f'{uid}.mp4')
        video.write_videofile(video_path, threads=4, logger=None)
示例#18
0
def main():
    from docopt import docopt
    arguments = docopt(__doc__, version=__version__)

    url = arguments['<audiocut_or_podcast>'].partition('#')[0]
    is_audiocut = re.match(AUDIOCUT_PATTERN, url)
    is_podcast = re.match(PODCAST_PATTERN, url)
    if not is_audiocut and not is_podcast:
        print("""The given URL is not a valid audiocut or podcast from radiocut.fm.
Examples:
    - http://radiocut.fm/audiocut/macri-gato/
    - http://radiocut.fm/pdc/tin_nqn_/test
""")
        sys.exit(1)
    if is_audiocut and not url.endswith('/'):
        url += '/'
    verbose = bool(arguments['--verbose'])

    if is_podcast:
        urls = get_urls_from_podcast(url, verbose)

    else:
        urls = [url]

    audioclips = [get_audiocut(url, verbose) for url in urls]
    background = arguments['--background']
    extension = 'mp4' if background else 'mp3'

    if arguments['--join'] or is_audiocut:
        audioclips = [concatenate_audioclips(audioclips)]
        output_filenames = output_file_names(
            [url],
            given_filename=arguments['<output-file-name>'],
            extension=extension)
    else:
        output_filenames = output_file_names(
            urls,
            given_filename=arguments['<output-file-name>'],
            extension=extension)

    for clip, filename in zip(audioclips, output_filenames):
        write_output(clip, filename, background, verbose=verbose)
 def concat_video(self, files, outfile):
     from moviepy import editor
     audio_format = ["mp3", "wav", "m4a"]
     is_audio = False
     for format in audio_format:
         if format in files[0]:
             is_audio = True
     clips = []
     if is_audio:
         for file in files:
             clip = editor.AudioFileClip(file)
             clips.append(clip)
         audioclips = editor.concatenate_audioclips(clips)
         audioclips.write_audiofile(outfile)
     else:
         for file in files:
             clip = editor.VideoFileClip(file)
             clips.append(clip)
         videoclips = editor.concatenate_videoclips(clips)
         videoclips.write_videofile(outfile)
示例#20
0
def ttsVideo(subreddit="entitledparents", filter="month", limit=5):
    deleteTTS()
    for filename in os.listdir("tts/final/"):
        file_path = os.path.join("tts/final/", filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
        except Exception as e:
            print("Failed to delete %s. Reason: %s" % (file_path, e))

    for submission in reddit.subreddit(subreddit).hot(limit=limit):
        path = "./tts/"
        videos = []

        if len(submission.selftext) > 100:
            ttsMerge(submission)
            cliparray = []
            audioarray = []
            for file in os.listdir(path + "videos/"):
                cliparray.append(VideoFileClip(path + "videos/" + file))

            for file in os.listdir(path + "audio/"):
                audioarray.append(AudioFileClip(path + "audio/" + file))

            audio = concatenate_audioclips(audioarray)

            final_clip = concatenate_videoclips(cliparray, method="compose")
            final_clip.audio = audio
            final_clip.write_videofile(
                path + "final/" + submission.id + ".mp4",
                temp_audiofile="tts/tmp/tmp_audio.mp3",
            )
            deleteTTS()

    for file in os.listdir(path + "final/"):
        videos.append(VideoFileClip(path + "final/" + file))

    final_clip = concatenate_videoclips(videos, method="compose")
    final_clip.write_videofile("output2.mp4", temp_audiofile="tmp/tmp_audio.mp3")
示例#21
0
    def create_audio_file(self):
        """ 
        Reads all the mp3 files in the comment_files directory and converts them to AudioFileClips, also sets up the transition audio.
        Loops through the comment names and appends them to a new list, whenever a comment and it replies ends a transition is added. The length of each clip is also recorded to
        a dictionary. All audio clips are combined and outputted to all.mp3.
        """
        all_comments = [
            AudioFileClip(mp3_file)
            for mp3_file in glob.glob("temp_files/comment_files/*.mp3")
        ]
        transition = AudioFileClip(r"transitions/bar_transition.mp3")
        self.all_comments_names = [
            name for name in glob.glob("temp_files/comment_files/*.mp3")
        ]

        all_comments_final = []
        self.lendict = {}
        title = AudioFileClip('temp_files/title/title.mp3')
        self.title_dur = title.duration
        all_comments_final.append(title)
        all_comments_final.append(transition)
        count = 0
        # Make list with [title, transition, comment_top, comment_second, comment_third, transition, etc]
        for comment_count, indiv in enumerate(all_comments):
            comment_num = self.all_comments_names[comment_count].split('$')[1]
            all_comments_final.append(indiv)
            self.lendict[comment_num + str(count)] = indiv.duration
            count += 1
            if count % self.num_comments_dict[comment_num] == 0:
                self.lendict[comment_num +
                             str(count - 1)] = indiv.duration + 0.5
                count = 0
                all_comments_final.append(transition)

        self.status = "Writing Audio"
        print("Writing Audio")
        audio_concat = concatenate_audioclips(all_comments_final)
        audio_concat.write_audiofile("comments/all.mp3", 44100)
示例#22
0
def concat_audio():
    print(
        "You have selected concatenate-audio! A folder of audio files must be provided. Audio files will be concatenated alphabetically or randomly if specified!"
    )
    print("Make sure you only have audio-files in folder!")
    random1 = input("Audio concatenated randomly? (yes || no) : ")
    randomly = False
    if (equals(random1, 'yes')):
        randomly = True
    name = input(
        "Enter the name you want your file to be named (Do not include file ext): "
    )
    if (name == ""):
        print("No name was given so the file will be named default")
        name = "default"
    folderPath = input("Enter path to your folder of audio files : ")
    if (path.exists(folderPath) == False):
        print("Invalid Path")
        return -1
    folderPath += '/'
    try:
        files = os.listdir(folderPath)
        for i in range(len(files)):
            files[i] = folderPath + files[i]
        proc_files = []
        currExt = ''
        for i in files:
            if (isAudio(i)):
                currExt = getExt(i)
                proc_files.append(AudioFileClip(i))
            else:
                print(i + " was dropped as it was not a audio file")
        concat_audio = mp.concatenate_audioclips(proc_files)
        concat_audio.write_audiofile(name + currExt)
    except:
        print("An error Occured :(")
示例#23
0
def prepare_background_music(video_length):
    background_songs = os.listdir(music_dir)
    song_rng = np.random.randint(0, len(background_songs))
    background_song = mpy.AudioFileClip(music_dir + '/' +
                                        background_songs[song_rng])
    background_audio = background_song

    extend_audio_factor = int(
        video_length / background_audio.duration
    ) + 1  # number of times to repeat audio in order to have the correct length

    if video_length > background_audio.duration:
        print('extending audiotrack to match video.')
        for i in range(0, extend_audio_factor):
            song_rng = np.random.randint(0, len(background_songs))
            background_song = mpy.AudioFileClip(music_dir + '/' +
                                                background_songs[song_rng])
            background_audio = mpy.concatenate_audioclips(
                [background_audio, background_song])

    background_audio = background_audio.set_duration(video_length)
    background_audio = volumex(background_audio, 0.1)

    return background_audio
        print "[MoviePy] Cutting audio and video up..."
        notification = True
    randArr.append(random.randint(startSec, endSec))
    clipArr.append(VideoFileClip(video).subclip(randArr[x], randArr[x] + 1))
    soundArr.append(AudioFileClip(video).subclip(randArr[x], randArr[x] + 1))
    sFXArr.append((soundArr[x].audio_fadein(0.01).audio_fadeout(0.01)))
    startSec = endSec
    endSec += oneFifteenth

# Combine Video Clips
# final_clip = concatenate_videoclips([clipArr[0], clipArr[1], clipArr[2], clipArr[3], clipArr[4], clipArr[5], clipArr[6], clipArr[7], clipArr[8], clipArr[9], clipArr[10], clipArr[11], clipArr[12], clipArr[13], clipArr[14]])
final_clip = concatenate_videoclips(clipArr)

final_clip_rot = final_clip.rotate(90)

# Combine Audio Clips
# final_audio = concatenate_audioclips([sFXArr[0], sFXArr[1], sFXArr[2], sFXArr[3], sFXArr[4], sFXArr[5], sFXArr[6], sFXArr[7], sFXArr[8], sFXArr[9], sFXArr[10], sFXArr[11], sFXArr[12], sFXArr[13], sFXArr[14]])
final_audio = concatenate_audioclips(sFXArr)

# Stitch Audio and Video Together
combClip = final_clip_rot.set_audio(final_audio)

# Write Video File to Disk
combClip.write_videofile("my_concatenation.mp4",
                         codec='libx264',
                         audio_codec='aac',
                         temp_audiofile='temp-audio.m4a',
                         remove_temp=True)

if deleteTmp == True:
    os.remove(tempName)
示例#25
0
                    r = requests.get(videoUrl + '/audio/zho/' + fileAudio)
                    with open(join(baseName, fileAudio), 'wb') as f:
                        f.write(r.content)
                FFmpeg(global_options='-y',
                       inputs={
                           join(baseName, fileAudio): None
                       },
                       outputs={
                           join(baseName, fileAudio + '.wav'): None
                       }).run()
                clips.append(
                    AudioFileClip(join(baseName, fileAudio + '.wav'),
                                  fps=48000))

            # Concat audio clips and save in one PCM wave format file
            audioClip = concatenate_audioclips(clips)
            audioClip.write_audiofile(baseName + '.wav')
            audioClip.close()

    # Get list of subtitles in VTT format and save them
    if not exists(baseName + '.srt'):
        sentences = []
        subtitleIndex = m3u8.load(videoUrl + '/subtitles/zho/prog_index.m3u8')
        if subtitleIndex.files:
            for fileSeq in subtitleIndex.files:
                sleep(randint(2, 3))
                print(videoUrl + '/subtitles/zho/' + fileSeq)
                r = requests.get(videoUrl + '/subtitles/zho/' + fileSeq)
                sentences = sentences + vtt.parse_auto_sub(r.text)
                with open(join(baseName, fileSeq), 'w') as f:
                    f.write(r.text)
示例#26
0
def render_video(user, send_end=None, compress_render=False, chunk_render=False):
    """
    User: String -> The ID of the project (User is just a hangover from previous builds)
    compress_render: Bool -> Set to true if you want this function to return a quick render
    """
    try:
        if chunk_render:
            chunk.get_chunk(sherpaUtils.open_proj(user), user, 1)
        else:
            log_name = datetime.now().strftime("%Y.%m.%d-%H-%M-%S") + "_render_service_instance_id_{}.log".format(user)

            # Collecting garbage to clear out memory
            gc.collect()

            # Creating a logging instance for testing
            log_file_name = os.path.join(
                Config.BASE_DIR,
                Config.LOGS_LOCATION,
                Config.RENDER_LOGS, 
                log_name
            )

            logging.basicConfig(
                level=logging.DEBUG, 
                format='%(asctime)s %(levelname)-8s %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S',
                filename=log_file_name)
            logging.debug("Beginning render instance of project id {}".format(user))

            # For logging
            start_time = time.time()

            # Finished timeline video
            video_list = []

            # Top audio timeline
            top_audio = []

            # Define current length of video, in terms of the 'main' timeline
            cutaway_timeline = 0

            # Look for the json file in the project folder
            try:
                json_file = sherpaUtils.open_proj(user)
            except FileNotFoundError as e:
                logging.error("File or folder cannot be found")
                logging.error(e)
                results = "Render exited without error [Unable to find file or folder]", 0        
                if send_end is not None:
                    send_end.send(results)
                return results


            # If a file can be found, but no edit data exists in the file
            if not json_file['CutAwayFootage'] and not json_file['InterviewFootage']:
                logging.error("This project seems to have no edit data recorded. Exiting render session")
                results = "Render exited without error [No edit data exists in JSON]", 0        
                if send_end is not None:
                    send_end.send(results)            
                return results


            # Get timeline lengths
            cutaway_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['CutAwayFootage']), 2)
            interview_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['InterviewFootage']), 2)

            logging.debug("Cutaway length: {}s      Interview length: {}s".format(cutaway_timeline_length, interview_timeline_length))

            # Find the smallest timeline length
            smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)

            if smallest_timeline == "CutAwayFootage":
                if not json_file['CutAwayFootage']:
                    logging.debug("Cutaways is empty, making interview line the new cutaway line")
                    json_file['CutAwayFootage'] = json_file['InterviewFootage']
                    json_file['InterviewFootage'] = dict()
                    cutaway_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['CutAwayFootage']), 2)
                    interview_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['InterviewFootage']), 2)        
                    smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)
                logging.debug("Smallest timeline is currently the Cut Away Timeline, correcting timelines as necessary")
                blank_no = 1

            # While the smallest timeline is the cut away timeline
            # TODO: THIS ISSUE MAY ONLY OCCUR IF THE CUTAWAY TIMELINE IS SHORTER THAN THE TOP TIMELINE
            while smallest_timeline == 'CutAwayFootage':
                if blank_no > 100:
                    logging.debug("There's something wrong with the blank placement for {}. Terminating project".format(user))            
                    results = "Fatal error, blank placement is in infinite loop", 99        
                    if send_end is not None:
                        send_end.send(results)
                    return results

                # Calculate the length of the blank that should be playing at the smallest timeline 
                current_interview_clip = sherpaUtils.current_interview_footage(
                    json_file, 
                    cutaway_timeline_length
                )[0]

                # Calculate when the clip om the interview timeline should be playing (globally), and returns the length that the blank clip should be
                blank_len = sherpaUtils.calculate_time_at_clip(
                    current_interview_clip['Meta'], 
                    json_file['InterviewFootage'], 
                    timeline_len=cutaway_timeline_length
                )

                # Creating a blank clip to insert into time
                blank_name = "end_of_line_blank_" + str(blank_no)

                end_of_line_blank = {
                blank_name: {
                        "Meta": {
                            "name": blank_name,
                            "startTime": 0,
                            "endTime": blank_len,
                            "audioLevel": 1,
                            "order": len(json_file[smallest_timeline])+1,
                            "clipType": "Blank"
                        },
                        "edit": {

                        }
                    }
                }

                blank_no += 1
                logging.debug(blank_name + ":")
                logging.debug(end_of_line_blank)
                # Insert it into the timeline
                json_file[smallest_timeline].update(end_of_line_blank)

                # Update the length
                cutaway_timeline_length = round((cutaway_timeline_length+blank_len),2)
                logging.debug("Cutaway length: {}, Inteview length: {}".format(cutaway_timeline_length, interview_timeline_length))
                    
                smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)

            # Automated all the clips - Run through all the cutaway footage
            for clip_name in json_file['CutAwayFootage']:

                logging.debug(clip_name + ":")
                logging.debug("Cutaway Timeline: {}".format(cutaway_timeline))

                # Initialise clip data first
                clip_data = json_file['CutAwayFootage'][clip_name]

                clip_type = clip_data['Meta'].get('clipType')

                # If its a cutaway, just generate the clip and add a caption if it exists
                if clip_type == "CutAway" or clip_type == "Interview":
                    logging.debug(clip_name + " is a cutaway.")
                    clip = generateEffects.generate_clip(clip_data=clip_data['Meta'], user=user, compressed=compress_render or chunk_render)
                    # Generate caption data
                    logging.debug("Generating audio for {}".format(clip_name))
                    clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)
                    logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(clip_name, clip.audio, clip.duration))
                    top_audio.insert(clip_data['Meta'].get('order'), clip.audio)

                # Generate image
                elif clip_type == "Image":
                    logging.debug(clip_name + " is an image.")
                    clip = generateEffects.generate_image_clip(clip_data['Meta'], user)            
                    logging.debug("Generating audio for {}".format(clip_name))
                    clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)
                    logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(clip_name, clip.audio, clip.duration))
                    top_audio.insert(clip_data['Meta'].get('order'), clip.audio)

                # If it's a blank
                elif clip_type == "Blank":
                    # These values are used later in the blank process
                    some_filler = False
                    total_insert_length = 0                            
                    logging.debug("Inserting audio for blank '{}'".format(clip_name))
                    top_audio.insert(clip_data['Meta'].get('order'), generateEffects.get_blank_audio(clip_data))
                    # We need to see if we can find any clips to replace the blank with
                    try:
                        logging.debug(clip_name + " is a blank.")
                        # We need to find the clip that should be playing in the interview timeline
                        cutaway_blank_len = sherpaUtils.calculate_clip_length(clip_data['Meta'])

                        # Gets clip on interview timeline that should be playing, as well as its start time on the interview timeline
                        relevant_interview_clip_data, interview_start_time = sherpaUtils.current_interview_footage(
                            data=json_file,
                            clip_timeline=cutaway_timeline
                        )

                        # rounding for simple calculation
                        interview_start_time = round(interview_start_time, 2)

                        # Set metadata for clip rendering and order for timeline insert
                        interview_clip_meta_data = relevant_interview_clip_data['Meta']
                        interview_clip_ord = interview_clip_meta_data.get('order')

                        # Difference between the current time in the video, and the start time of the interview clip
                        dif = round(cutaway_timeline-interview_start_time, 2)

                        
                        logging.debug("Interview clip starts at {}, Blank clip starts at {}, so difference is {}".format(
                            interview_start_time,
                            cutaway_timeline,
                            dif)
                        )

                        # Define clip length
                        clip_dur = sherpaUtils.calculate_clip_length(clip_data['Meta'])

                        sub_clip_start = (interview_clip_meta_data.get('startTime')) + dif
                        # Get end of clip or end of blank, whichever comes first
                        sub_clip_end = min(
                            ((interview_clip_meta_data.get('startTime')) + dif + clip_dur), 
                            interview_clip_meta_data.get('endTime')
                        )

                        # Round data off for simple calculation 
                        sub_clip_start = round(sub_clip_start, 2)
                        sub_clip_end = round(sub_clip_end, 2)

                        logging.debug("Sub clip starts at {}, ends at {}".format(sub_clip_start, sub_clip_end))

                        sub_clip_length = sub_clip_end - sub_clip_start
                        total_insert_length += sub_clip_length

                        interview_clip_type = interview_clip_meta_data.get('clipType')

                        # Create video clip from data found above
                        # Audio is not needed, we will insert it later
                        if interview_clip_type == "Interview":
                            logging.debug("Replacing blank {} with interview clip {}".format(
                                clip_data['Meta'].get('name'),
                                interview_clip_meta_data.get('name')
                            ))
                            # Create clip with parameterised start and end times
                            clip = generateEffects.generate_clip(
                                clip_data=interview_clip_meta_data,
                                user=user,
                                start=sub_clip_start,
                                end=sub_clip_end,
                                compressed=compress_render
                            )

                            clip = generateEffects.better_generate_text_caption(clip, relevant_interview_clip_data['edit'], compressed=compress_render or chunk_render)

                        # Blanks from the cutaway can be placed instead
                        elif interview_clip_type == "Blank":
                            
                            logging.debug("Replacing blank {} with interview blank {}".format(
                                clip_data['Meta'].get('name'),
                                interview_clip_meta_data.get('name')
                            ))
                            clip = generateEffects.generate_blank(interview_clip_meta_data, start=sub_clip_start, end=sub_clip_end, compressed=compress_render or chunk_render)
                            clip = generateEffects.better_generate_text_caption(clip, relevant_interview_clip_data['edit'], compressed=compress_render or chunk_render)

                        # TODO: Careful here, rounding could cause issues
                        total_insert_length = round(total_insert_length, 2)
                        logging.debug("Insert lenght: {}".format(total_insert_length))

                        # If the blank length is longer than the length of the videos being inserted
                        while not isclose(total_insert_length, cutaway_blank_len):
                            some_filler = True
                            logging.debug("Clip length didn't suffice for blank, adding more files as necessary")

                            time_to_fill = cutaway_blank_len - total_insert_length

                            time_to_fill = round(time_to_fill, 2)

                            logging.debug("Time left to fill is {}".format(time_to_fill))

                            interview_clip_ord+=1

                            next_clip_data = sherpaUtils.give_clip_order(interview_clip_ord, json_file['InterviewFootage'])

                            # Clip should be the the same size as the time to fill if possible
                            # But it's also possible that the next clip isn't bi enough either
                            # So we'll need to go further on
                            # To stop bugs, we need to set our end time as either the time left to fill, or the length of the clip
                            end_time = round(min(
                                next_clip_data['Meta'].get('startTime') + time_to_fill,
                                sherpaUtils.calculate_clip_length(next_clip_data['Meta'])
                            ), 2)

                            logging.debug("End time for clip is {}".format(end_time))


                            if next_clip_data['Meta'].get('clipType') == "Interview":
                                next_clip = generateEffects.generate_clip(
                                    clip_data=next_clip_data['Meta'],
                                    end=next_clip_data['Meta'].get('startTime')+end_time,
                                    user=user,
                                    compressed=compress_render or chunk_render
                                )
            
                                next_clip = generateEffects.better_generate_text_caption(next_clip, next_clip_data['edit'], compressed=compress_render or chunk_render)
            
                            elif next_clip_data['Meta'].get('clipType') == "Blank":
                                next_clip = generateEffects.generate_blank(next_clip_data['Meta'], end=end_time, compressed=compress_render or chunk_render)
                                next_clip = generateEffects.better_generate_text_caption(next_clip, next_clip_data['edit'], compressed=compress_render or chunk_render)

                            total_insert_length += next_clip.duration
                            logging.debug("Total insert length {}".format(total_insert_length))

                            clip = concatenate_videoclips([clip, next_clip])
                        logging.debug("Blank clip '{}' has been replaced with interview clips as necessary".format(clip_name))

                    # No clip can be found, generate the clip from the blank data in the cutaway timeline
                    except TypeError:
                        if some_filler:
                            logging.debug("Some suitable clips have been found from the interview clip, but a discrepency has still occured")
                            logging.debug("{}s of footage failed to be found in the interview footage".format(time_to_fill))
                            logging.debug("Inserting interview clips that have been found.")
                        if some_filler == False:
                            logging.error("TypeError in render - No clip found to replace blank '{}'".format(clip_data['Meta'].get("name")))
                            logging.debug("Rendering out blank file found in cutaway timeline instead")
                            clip = generateEffects.generate_blank(clip_data['Meta'], compressed=compress_render or chunk_render)            
                            logging.debug("Generating audio for {}".format(clip_name))
                            clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)


                # Insert clip into correct position in array
                logging.debug("Inserted clip '{}' into pos {}.".format(clip_name, clip_data['Meta'].get('order')-1))

                cutaway_timeline = round((cutaway_timeline+clip.duration), 2)
                video_list.insert(clip_data['Meta'].get('order')-1, clip)

            # Video list
            logging.debug("Video list:")
            logging.debug(video_list)

            # Create audio from the interview Footage
            bottom_audio = generateEffects.interview_audio_builder(interview_data=json_file['InterviewFootage'], user=user)

            # We need to insert the intro if it exists
            if os.path.exists(os.path.join(attach_dir, user, "intro.mp4")):
                logging.debug("Intro clip found")
                logging.error("WE ARE CURRENTLY NOT IMPLEMENTING INTROS")
                """       
                intro_clip = generateEffects.create_intro_clip(user, compressed=compress_render or chunk_render)
                video_list.insert(0, intro_clip)
                logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(intro_clip, intro_clip.audio, intro_clip.duration))
                top_audio.insert(0, intro_clip.audio)
                bottom_audio.insert(0, intro_clip.audio)"""
            else:
                logging.error("No intro clip found, continuing")

            # Concatenate the clips together
            top_audio = concatenate_audioclips(top_audio)    
            logging.debug("Top audio len: {}".format(round(top_audio.duration, 2)))

                
            # Try adding the music if it exists
            logging.debug("Attempting to add music...")
            try:
                music_data = json_file['MusicTrackURL']
                music_audio_lvl = float(json_file['MusicAudioLevel'])
                music = generateEffects.open_music(music_data, music_audio_lvl, cutaway_timeline)
                # If the video is longer than the music, replay it
                if music.duration > cutaway_timeline:
                    music = CompositeAudioClip([music, generateEffects.open_music(music_data, music_audio_lvl, cutaway_timeline - music.duration)])
                top_audio = CompositeAudioClip([top_audio, music])
                logging.debug("Music added successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during music audio building:")
                logging.debug(e)
                finished_audio = top_audio

            # Try adding the voice over 
            logging.debug("Attempting to add voice over...")
            try:
                voice_data = json_file['VoiceTrackURL']
                voice_audio_lvl = float(json_file['VoiceoverAudioLevel'])
                voice = generateEffects.open_voice(voice_data, voice_audio_lvl, user)
                top_audio = CompositeAudioClip([top_audio, voice])
                logging.debug("Music added successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during voiceover audio building:")
                logging.debug(e)
                finished_audio = top_audio

            # Try concatenating the top and bottom audio lines together
            logging.debug("Attepting to add interview audio...")
            try:
                bottom_audio = concatenate_audioclips(bottom_audio)    
                logging.debug("Bottom audio len: {}".format(round(bottom_audio.duration, 2)))
                finished_audio = CompositeAudioClip([top_audio, bottom_audio])
                logging.debug("Interview audio addedd successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during interview audio building:")
                logging.debug(e)
                finished_audio = top_audio

            logging.debug("Finished audio len: {}".format(round(finished_audio.duration, 2)))

            # Concatenate the video files together
            finished_video = concatenate_videoclips(video_list)
            finished_video = finished_video.set_audio(finished_audio)


            # Defining path here is cleaner
            vid_name = user + "_com_preview_edited.mp4" if compress_render else user + "_edited.mp4"
            vid_dir = os.path.join(attach_dir, user, vid_name)


            logging.debug("Rendering {} clip(s) together, of total length {}.".format(len(video_list), round(finished_video.duration, 2)))
            logging.debug("Writing '{}' to {}".format(vid_name, vid_dir))

            logging.debug("Videos placed in {} seconds".format(time.time() - start_time))

            # Render the finished project out into an mp4
            if chunk_render:
                if finished_video.duration<Config.PREVIEW_CHUNK_LENGTH:
                        logging.debug("Rendering Video as it's smaller than chunk length")
                        vid_dir = os.path.join(attach_dir, user, user + "_com_chunk_0_edited.mp4")
                        finished_video.write_videofile(
                            vid_dir,
                            threads=8,
                            preset="ultrafast",
                            bitrate="1000k",
                            audio_codec="aac",
                            remove_temp=True,
                            fps=24
                        )
                        results = "Chunk Rendered Successfully", 1
                        if send_end is not None:
                            send_end.send(results)            
                        return results

                logging.debug("Running chunk render instance")
                # Get 10 second chunks of videos
                logging.debug("Splitting video up into 10s chunks.")
                
                # Initialising variables
                finished_dur = round(finished_video.duration, 2)
                chunk_len = Config.PREVIEW_CHUNK_LENGTH
                preview_chunks = []
                playtime = 0

                # Getting segment amount (rounded up to account for section that doesn't fit within chunk lenght)
                segment_no = ceil(finished_dur/chunk_len)
                # hangover segment

                logging.debug("Video duration: {}s  /{}s = {} segments      full segments: {}".format(finished_dur, chunk_len, finished_dur/chunk_len, segment_no))

                # _ is for non important variable
                for i in range(segment_no):
                    preview_clip = finished_video.subclip(playtime, min(playtime+chunk_len, finished_dur))
                    logging.debug("Clip is currently from {} to {}".format(playtime, round(min(playtime+chunk_len, finished_dur), 2)))

                    playtime+=chunk_len
                    logging.debug("Segment {} is {}s long".format(i, round(preview_clip.duration, 2)))
                    preview_clip.fps = 24
                    if preview_clip.duration < chunk_len/2:
                        logging.debug("Clip is smaller than {}s, so appending it to last clip instead.".format(chunk_len/2))
                        preview_clip = concatenate_videoclips([preview_clip, preview_chunks[-1]])
                        del preview_chunks[-1]
                    preview_chunks.append(preview_clip)


                
                logging.debug("Preview chunk list: ")
                logging.debug(preview_chunks)

                logging.debug("Rendering out {} videos in {}s chunks".format(len(preview_chunks), chunk_len))

                
                for video in preview_chunks:
                    try:
                        vid_name = user + "_com_chunk_" + str(preview_chunks.index(video)) + "_edited.mp4"
                        vid_dir = os.path.join(attach_dir, user, vid_name)

                        logging.debug("Rendering {} at time {}s".format(vid_name, (time.time() - start_time)))
                        video.write_videofile(
                            vid_dir,
                            threads=8,
                            preset="ultrafast",
                            bitrate="1000k",
                            audio_codec="aac",
                            remove_temp=True,
                            fps=24
                        )
                        results = "Chunk {} Rendered Successfully".format(str(preview_chunks.index(video))), 1
                        results = "Chunk 1 Rendered Successfully", 1
                        if send_end is not None:
                            send_end.send(results)            
                    except:
                        logging.error("Fatal error occured while writing video - Chunk Render")
                        logging.exception("")
                        logging.error("Exiting program without writing video file correctly")                
                        results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                        if send_end is not None:
                            send_end.send(results)            
                        return results
                #results = "Video Rendered Successfully", 1
                logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                logging.debug("Completed in {} seconds".format(time.time() - start_time))
                logging.debug("Closing render instance - Chunk")            
                if send_end is not None:
                    send_end.send(results)            
                return results

                    
            if compress_render:
                logging.debug("Running compress render instance")
                try:
                    finished_video.write_videofile(
                        vid_dir,
                        threads=8,
                        bitrate="1000k",
                        audio_codec="aac",
                        remove_temp=True,
                        fps=24
                    )        
                    results = "Video Rendered Successfully", 1
                    logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                    logging.debug("Completed in {} seconds".format(time.time() - start_time))
                    logging.debug("Closing render instance - Compress")
                    if send_end is not None:
                        send_end.send(results)            
                    return results
                except:
                    logging.error("Fatal error occured while writing video - Compressed Render")
                    logging.exception("")
                    logging.error("Exiting program without writing video file correctly")
                    results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                    if send_end is not None:
                        send_end.send(results)            
                    return results
            else:
                logging.debug("Running full render instance")
                try:
                    logging.debug("Rendering {}".format(vid_name))
                    finished_video.write_videofile(            
                        vid_dir,
                        threads=8,
                        audio_codec="aac",
                        bitrate="8000k",
                        remove_temp=True,
                        fps=24
                    )        
                    results = "Video Rendered Successfully", 1
                    logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                    logging.debug("Completed in {} seconds".format(time.time() - start_time))
                    logging.debug("Closing render instance - Full")
                    if send_end is not None:
                        send_end.send(results)            
                    return results

                except:
                    logging.error("Fatal error occured while writing video - Full Render")
                    logging.exception("")
                    logging.error("Exiting program without writing video file correctly")
                    results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                    if send_end is not None:
                        send_end.send(results)            
                    return results                

    except:
        logging.error("An unknown error has occured, causing video render instance to crash:")
        logging.exception("")
        results = "Unforseen error has occured [Contact admin]", 99      
        if send_end is not None:
            send_end.send(results)            
        return results
示例#27
0
def main():
    extensions = ['ogv', 'mov']
    trial = ['326', '160']
    folders = [
        '../video_302_20210514-143755-6691',
        '../video_305_20210514-141912-2142'
    ]
    filter_bad_frames = [True, False]
    for index_trial in [0, 1]:
        results_df = pd.read_csv(
            f'{folders[index_trial]}/structured_output.csv')
        results_df = results_df[results_df['trial'] == float(
            trial[index_trial])]
        table_et_pt1 = ASC2CSV(
            f'{folders[index_trial]}/et{trial[index_trial]}.asc')
        table_et_pt2 = ASC2CSV(
            f'{folders[index_trial]}/et{trial[index_trial]}pt2.asc')
        list_of_videos = []
        for screen in range(1, 13):
            results_df_this_screen = results_df[results_df['screen'] == screen]
            video_filename = f'{folders[index_trial]}/recorded_screen_{screen}_trial_{trial[index_trial]}_0001.{extensions[index_trial]}'
            if os.path.isfile(video_filename):
                my_clip = mpe.VideoFileClip(video_filename)
                if screen in [2, 4, 7, 9]:
                    start_video = results_df_this_screen[
                        results_df_this_screen['title'] ==
                        'start_video_recording']['timestamp'].values[
                            0] * 24 * 60 * 60

                    #the only screen with audio is screen 2
                    if screen == 2:
                        table_et_2 = table_et_pt1.copy()
                        start_video_2 = start_video
                        my_clip = fl(
                            my_clip, lambda get_frame, t: scroll(
                                get_frame, t, table_et_2, start_video_2),
                            filter_bad_frames[index_trial])
                        delay_audio = results_df_this_screen[
                            results_df_this_screen['title'] ==
                            'start_audio_recording']['timestamp'].values[
                                0] * 24 * 60 * 60 - start_video

                        #generate the audio from the timestamped transcription
                        if use_digital_audio:
                            full_audio = AudioSegment.empty()
                            previous_end = 0
                            with open(
                                    f'{folders[index_trial]}/{trial[index_trial]}_joined.json',
                                    'r') as f:
                                table_text = json.load(f)['timestamps']
                            with open(
                                    f'{folders[index_trial]}/{trial[index_trial]}_trim.json',
                                    'r') as f:
                                b = json.load(f)
                            trim_value = float(b['start_trim']) / 1000
                            for row in table_text:
                                print(row[1])
                                print(trim_value)

                                #row[1] is the timestamp for the start of the word, and row[2] the timestamp for the end of the word
                                row[1] += trim_value
                                row[2] += trim_value
                                print(row[1])

                                # if start and end of the word are at the same time, it was not captured by the original transcription, so we do not use it in the audio, only in subtitle
                                if row[1] == row[2]:
                                    continue

                                # text to speech
                                tts = SaveTTSFile('create_video_temp.wav')
                                tts.start(
                                    row[0].replace('.', 'period').replace(
                                        ',', 'comma').replace('/', 'slash'),
                                    row[1], row[2])
                                for i in range(10):
                                    if not os.path.exists(
                                            './create_video_temp.wav'):
                                        time.sleep(1)
                                    else:
                                        break
                                    if i > 10:
                                        assert (False)
                                del (tts)

                                # add silence between words if they did not end/start at the same time
                                if row[1] > previous_end:
                                    full_audio += AudioSegment.silent(
                                        duration=(row[1] - previous_end) *
                                        1000)
                                print(full_audio.duration_seconds)
                                print(row[1])
                                assert (abs(full_audio.duration_seconds -
                                            row[1]) < 0.002)

                                #change the duration of the word sound to the duration it took for the radiologist to say it
                                word_audio = AudioSegment.from_file(
                                    'create_video_temp.wav', format="wav")
                                word_audio = stretch_audio(
                                    word_audio, 'create_video_temp.wav',
                                    word_audio.duration_seconds /
                                    (row[2] - row[1]))

                                full_audio += word_audio
                                assert (abs(full_audio.duration_seconds -
                                            row[2]) < 0.002)
                                previous_end = row[2]
                            full_audio.export("create_video_temp.wav",
                                              format="wav")
                            audio_background = mpe.AudioFileClip(
                                'create_video_temp.wav')
                            os.remove('./create_video_temp.wav')
                        else:
                            audio_background = mpe.AudioFileClip(
                                f'{folders[index_trial]}/{trial[index_trial]}.wav'
                            )
                            # delay_audio = round(delay_audio*my_clip.fps)/my_clip.fps
                        if delay_audio > 0:
                            null_audio = mpe.AudioClip(lambda t: 0,
                                                       duration=delay_audio)
                            audio_background = mpe.concatenate_audioclips(
                                [null_audio, audio_background])
                            delay_audio = 0
                        delay_end_video = my_clip.duration - audio_background.duration
                        if delay_end_video > 0:
                            null_audio = mpe.AudioClip(
                                lambda t: 0, duration=delay_end_video)
                            audio_background = mpe.concatenate_audioclips(
                                [audio_background, null_audio])
                            delay_end_video = 0
                        audio_background.write_audiofile('temp_crop_audio.wav')
                        trim_audio('temp_crop_audio.wav', -delay_audio,
                                   -delay_end_video)
                        audio_background = mpe.AudioFileClip(
                            'temp_crop_audio.wav')

                    else:
                        if screen == 4:
                            table_et_this_screen_4 = table_et_pt2[
                                table_et_pt2['index_edf'] == 0]
                            start_video_4 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_4,
                                    start_video_4),
                                filter_bad_frames[index_trial])
                        if screen == 7:
                            table_et_this_screen_7 = table_et_pt2[
                                table_et_pt2['index_edf'] == 1]
                            start_video_7 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_7,
                                    start_video_7),
                                filter_bad_frames[index_trial])
                        if screen == 9:
                            table_et_this_screen_9 = table_et_pt2[
                                table_et_pt2['index_edf'] == 2]
                            start_video_9 = start_video
                            my_clip = fl(
                                my_clip, lambda get_frame, t: scroll(
                                    get_frame, t, table_et_this_screen_9,
                                    start_video_9),
                                filter_bad_frames[index_trial])
                else:
                    my_clip = fl(my_clip, clean,
                                 filter_bad_frames[index_trial])
                if screen != 2:
                    audio_background = mpe.AudioClip(lambda t: 0,
                                                     duration=my_clip.duration)
                my_clip = my_clip.set_audio(audio_background)
                list_of_videos.append(my_clip)
        final = mpe.concatenate_videoclips(list_of_videos)
        final.write_videofile(f"movie_{extensions[index_trial]}.mp4",
                              audio_codec='aac',
                              codec="libx264",
                              temp_audiofile='temp-audio.m4a',
                              remove_temp=True,
                              fps=30,
                              bitrate="5000k")
        os.remove('./create_video_temp.wav')
    def suppressAudioPortions(self, videoIndex, videoFileName,
                              downloadVideoInfoDic):
        mp4FilePathName = os.path.join(self.targetAudioDir, videoFileName)
        suppressStartEndSecondsLists = downloadVideoInfoDic.getSuppressStartEndSecondsListsForVideoIndex(
            videoIndex)
        suppressFrameNb = len(suppressStartEndSecondsLists)
        videoAudioFrame = mp.AudioFileClip(mp4FilePathName)
        duration = videoAudioFrame.duration
        clips = []
        keptStartEndSecondsLists = []

        msgText = '\nsuppressing portions of "{}" ...\n'.format(videoFileName)
        self.audioController.displayMessage(msgText)

        for extractIdx in range(suppressFrameNb + 1):
            if extractIdx == 0:
                timeFrameEndValue = suppressStartEndSecondsLists[0][0]

                if timeFrameEndValue == 0:
                    # suppress frame is starting at 0 and so, appending time frame 0-0 is nonsensical !
                    continue

                extractStartEndSecondsList = [0, timeFrameEndValue]
                keptStartEndSecondsLists.append(extractStartEndSecondsList)
                clips.append(
                    self.extractClip(videoAudioFrame,
                                     extractStartEndSecondsList))
            elif extractIdx == suppressFrameNb:
                extractStartEndSecondsList = [
                    suppressStartEndSecondsLists[extractIdx - 1][1], duration
                ]

                if extractStartEndSecondsList[0] == 'end':
                    suppressStartEndSecondsLists[extractIdx - 1][1] = duration
                    continue

                keptStartEndSecondsLists.append(extractStartEndSecondsList)
                clips.append(
                    self.extractClip(videoAudioFrame,
                                     extractStartEndSecondsList))
            else:
                extractStartEndSecondsList = [
                    suppressStartEndSecondsLists[extractIdx - 1][1],
                    suppressStartEndSecondsLists[extractIdx][0]
                ]
                keptStartEndSecondsLists.append(extractStartEndSecondsList)
                clips.append(
                    self.extractClip(videoAudioFrame,
                                     extractStartEndSecondsList))

        clip = mp.concatenate_audioclips(clips)
        mp3FileName = os.path.splitext(videoFileName)[0] + '_s.mp3'
        mp3FilePathName = os.path.join(self.targetAudioDir, mp3FileName)
        clip.write_audiofile(mp3FilePathName)
        clip.close()
        videoAudioFrame.close()
        HHMMSS_suppressedTimeFramesList = self.convertStartEndSecondsListsTo_HHMMSS_TimeFramesList(
            suppressStartEndSecondsLists)
        HHMMSS_keptTimeFramesList = self.convertStartEndSecondsListsTo_HHMMSS_TimeFramesList(
            keptStartEndSecondsLists)
        downloadVideoInfoDic.addSuppressedFileInfoForVideoIndex(
            videoIndex, mp3FileName, HHMMSS_suppressedTimeFramesList,
            HHMMSS_keptTimeFramesList)

        self.displayFramesMsg('\ttime frames suppressed:',
                              HHMMSS_suppressedTimeFramesList)
        self.displayFramesMsg('\n\ttime frames kept:',
                              HHMMSS_keptTimeFramesList)
示例#29
0
        text2.draw(surface)
        return surface.get_npimage()

    # meke video
    text = mpy.VideoClip(render_text, duration=DURATION)

    # load audio files to add them to video
    silence_half = mpy.AudioFileClip("0.5-second-of-silence.mp3")
    silence_sec = mpy.AudioFileClip("1-second-of-silence.mp3")
    video_audio1 = mpy.AudioFileClip("untranslated.mp3")
    video_audio2 = mpy.AudioFileClip("translated.mp3")
    video_audio3 = mpy.AudioFileClip("untranslated_fast.mp3")

    # put all sounds together
    video_audio = mpy.concatenate_audioclips([
        silence_half, video_audio1, silence_half, video_audio2, silence_half,
        video_audio3, silence_sec
    ])

    video = mpy.CompositeVideoClip([text.set_position('center')],
                                   size=VIDEO_SIZE).on_color(
                                       color=WHITE,
                                       col_opacity=1).set_audio(video_audio)

    video.write_videofile("video-" + str(v_counter) + ".mp4", fps=10)
    v_counter = v_counter + 1

# combine all videos in one
clips = []

for i in range(1, len(word_list) + 1):
    clips.append(mpy.VideoFileClip("video-" + str(v_counter) + ".mp4"))
示例#30
0
 def add_silence(self, duration_s: float) -> None:
     """Add a silence of a certain duration the an audio clip."""
     silence_clip = med.AudioClip(silence, duration=duration_s)
     self.clip = med.concatenate_audioclips([silence_clip, self.clip])
示例#31
0
    print(mapContentImg)
    print(title)
    #for key in mapContentImg:
    videos = []
    for i in range(len(mapContentImg.keys())):
        audiopath, duration = news2video.text2Audio(mapContentImg.keys()[i],
                                                    "audio-%d" % i)
        videopath = news2video.generateVideo(mapContentImg.values()[i],
                                             duration, "%d" % i)
        videopath = news2video.addAudio2Video(videopath, audiopath)
        videos.append(mpe.VideoFileClip(videopath))
        #print(mapContentImg.keys()[i])
        #print(mapContentImg.values()[i])

    if len(videos) > 1:
        finalclip = mpe.concatenate_audioclips(videos)
        finalclip.to_videofile(news2video.imgroot + "final.mp4",
                               fps=1,
                               remove_temp=False)
    sys.exit(1)
    audiopath, duration = news2video.text2Audio(texts)
    print("Audio Generate over, name:%s duration:%d" % (audiopath, duration))
    videopath = news2video.generateVideo(pics, duration)
    print("Video Generate over, name:%s duration:%d" % (videopath, duration))
    #videopath = news2video.addText2Video(videopath, texts, title)
    print("Text Adding over, name:%s duration:%d" % (videopath, duration))
    videopath = news2video.addAudio2Video(videopath, audiopath)
    print("Audio Adding over, name:%s duration:%d" % (videopath, duration))
    #audiopath = news2video.text2Audio('欢迎使用百度语音只能服务')
    #news2video.changePicSize(r'E:\testimg\IMG_1615.JPG', 540, 720)
    #videopath = news2video.addAudio2Video(videopath, r'E:\test-res\test.mp3')
示例#32
0
 def repeat(self) -> None:
     """Concatenates a video and audio stream with itself to make a twice as long video"""
     if self.has_video:
         self.clip = med.concatenate_videoclips([self.clip, self.clip])
     else:
         self.clip = med.concatenate_audioclips([self.clip, self.clip])
示例#33
0
    video = combined_clip

#audio
audio_files = []

for i in os.listdir():
    if i.endswith(".mp3") or i.endswith(".wav"):
        audio_files.append(i)

print("Audio files loaded are: " + str(audio_files))

for i, clip in enumerate(audio_files):
    audio_files[i] = AudioFileClip(clip)

#ToDo Concatenate audio tracks into audioclip
combined_audio = concatenate_audioclips(audio_files)

#Set Duration of audioclip
background_audio = combined_audio.set_duration(video.duration)

#combine videos' audio and audio track
video_audio = video.audio
print(background_audio)
print(video_audio)
final_audio = CompositeAudioClip([background_audio, video_audio])
final_clip = video.set_audio(final_audio)

#render
print("Composition successful. Rendering!")
final_clip.write_videofile(output_name, fps=fr, logger=None)
def concatenate_audio_moviepy(audio_clip_paths, output_path):
    """Concatenates several audio files into one audio file using MoviePy
    and save it to `output_path`. Note that extension (mp3, etc.) must be added to `output_path`"""
    clips = [AudioFileClip(c) for c in audio_clip_paths]
    final_clip = concatenate_audioclips(clips)
    final_clip.write_audiofile(output_path)