Exemplo n.º 1
0
 def write_frames(self, frames: List[np.ndarray], filepath: str, audio: CompositeAudioClip) -> str:
     """Writes the frames to a given .mp4 filepath (h264 codec)"""
     vclip = ImageSequenceClip(frames, fps=self.fps)
     audio.set_duration(vclip.duration)
     vclip.audio = audio
     vclip.write_videofile(filepath, codec='libx264', fps=self.fps)
     return filepath
Exemplo n.º 2
0
def createVideo(title, script, publishTime):
    intro = VideoFileClip("clips/intro.mp4")
    body = VideoFileClip("clips/body.mp4")
    loop = VideoFileClip("clips/loop.mp4")
    outro = VideoFileClip("clips/outro.mp4")

    titleMP3 = ''.join(e for e in title if e.isalnum())
    titleMP3 = titleMP3 + ".mp3"
    # title +  ".mp3"

    audioclip = AudioFileClip(titleMP3)

    scriptLen = audioclip.duration
    loopLen = loop.duration

    multiplier = scriptLen / loopLen

    new_audioclip = CompositeAudioClip([body.audio, audioclip])
    body.audio = new_audioclip

    x = [intro, body]
    multiplier = multiplier - 1
    while multiplier > 0:
        x.extend([loop])
        multiplier = multiplier - 1

    x.extend([outro])
    final_clip = concatenate_videoclips(x)

    titleMP4 = ''.join(e for e in title if e.isalnum())
    titleMP4 = titleMP4 + ".mp4"

    #    titleMP4 = title + ".mp4"
    final_clip.write_videofile(titleMP4)
    uploadVid(title, script, titleMP4, publishTime)
Exemplo n.º 3
0
def video_render(txt_file,image_file,sound_file,save_file):
        from moviepy.editor import ImageClip
        from moviepy.editor import CompositeVideoClip
        from moviepy.editor import CompositeAudioClip
        from moviepy.editor import TextClip
        from moviepy.editor import AudioFileClip
        from moviepy.editor import concatenate
        from moviepy.config import change_settings
        change_settings({"IMAGEMAGICK_BINARY": "/usr/local/bin/convert"})
        text=[]
        
        with open(txt_file,'r') as file:
            for lines in file:
                if lines!="\n":
                    text.append(lines.rstrip('\n'))
        durs=[]
        for i in text:            
            res = len(re.findall(r'\w+', i)) 
            if res/2>3:
                durs.append(res/2)
            else:
                durs.append(3)
        total_duration=sum(durs)
        
        a_clip = AudioFileClip(sound_file)
        if a_clip.duration<total_duration:
            new_audioclip = CompositeAudioClip([a_clip, a_clip.set_start(a_clip.duration-1)]).set_duration(total_duration+3)
        else:
            new_audioclip=a_clip.set_duration(total_duration+3)
        
        screen=(1920,1080)
        clip_list = []
        i=0
        for string in text:
            duration=durs[i]
            i+=1
            try:
                txt_clip = TextClip(string, fontsize = 70, color = 'white', method='caption',size=screen ).set_duration(duration).set_pos('center')
                clip_list.append(txt_clip)
            except UnicodeEncodeError:
                txt_clip = TextClip("Issue with text", fontsize = 70, color = 'white').set_duration(2) 
                clip_list.append(txt_clip)
        
        final_text_clip = concatenate(clip_list, method = "compose").set_start(3)  
            
        v_clip = ImageClip(image_file).set_duration(total_duration+3)
        video=CompositeVideoClip([v_clip, final_text_clip])
        # video = video.set_audio(AudioFileClip('sound/Serenity (1).mp3'))
        video = video.set_audio(new_audioclip)
        video.write_videofile(save_file, 
                              codec='libx264',
                              fps=10, 
                              threads=4,
                              audio_codec='aac', 
                              temp_audiofile='temp-audio.m4a', 
                              remove_temp=True
                              )
Exemplo n.º 4
0
def outro():
    outroimg = pathlib.Path(RESOURCES + "/images/outputMoment.jpg")
    audio = AudioFileClip(pathlib.Path(RESOURCES + "/sounds/outroaud.wav"))
    music = AudioFileClip(pathlib.Path(RESOURCES + "/sounds/jazz_lounge.mp3"))
    final_audio = CompositeAudioClip([audio, music])
    outro = ImageClip(outroimg)
    outro = outro.set_fps(24)
    outro = outro.set_audio(final_audio)
    outro = outro.set_duration(30)
    outro.write_videofile(pathlib.Path(RESOURCES + "/vids/outro.mp4"))
Exemplo n.º 5
0
 def __add_audio_to_video__(self, file_path, duration, config):
     print(file_path)
     my_clip = VideoFileClip(file_path)
     audio_background = AudioFileClip('downloaded/audio.mp3').subclip(
         0, duration - 1)
     final_audio = CompositeAudioClip([audio_background])
     final_clip = my_clip.set_audio(final_audio)
     result_path = "tmp/" + self.__next_index__() + ".mp4"
     final_clip.write_videofile(result_path)
     return result_path
Exemplo n.º 6
0
def add_audio_to_video(video_file_path, audio_file_path, target_file_path):
    videoclip = VideoFileClip(video_file_path)
    audioclip = AudioFileClip(audio_file_path)
    audioclip = audioclip.subclip(0, videoclip.duration)

    new_audioclip = CompositeAudioClip([audioclip])
    # videoclip.audio = new_audioclip
    videoclip2 = videoclip.set_audio(new_audioclip)
    videoclip2.write_videofile(target_file_path,
                               codec="libx264",
                               audio_codec="aac")
Exemplo n.º 7
0
def add_background_audio(audio_clip):
    """
    :param audio_clip: 최종 영상 오디오 클립
    :return: 배경음 삽입된 오디오 클립
    """

    # Access audio file
    back_audio = AudioFileClip(settings.BASE_DIR + '/core/back_audios/' + get_selected_music() + '.wav')

    # New audio file
    new_audio_clip = CompositeAudioClip([audio_clip.fx(volumex, 7), back_audio.fx(volumex, 1)])

    return new_audio_clip
Exemplo n.º 8
0
def renderVideos(data):
    cliparr = []

    for entry in data:
        #if data[entry] == "5.mp4":
        #break
        c = VideoFileClip(f"raw_videos/{data[entry]}",
                          target_resolution=(1080, 1920))
        t = TextClip(entry, fontsize=50, color='white')
        #width, height
        t = t.set_position((0.1, 0.8), relative=True).set_duration(c.duration)
        c = CompositeVideoClip([c, t])

        cliparr.append(c)

    final_clip = concatenate_videoclips(cliparr, method='compose')
    final_clip = final_clip.fx(volumex, 0.3)
    audio_background = AudioFileClip(BACKGROUND_MUSIC_PATH).set_duration(
        final_clip.duration)
    final_audio = CompositeAudioClip([final_clip.audio, audio_background])
    ret_clip = final_clip.set_audio(final_audio)
    return ret_clip
def addAudioToVideo(name):
    try:
        os.chdir(os.path.join(settings.BASE_DIR, r"dataset/" + name))
        print(os.listdir())
        audiofile = AudioFileClip('audio.mp3')
        bgaudiofile = AudioFileClip(os.path.join(settings.BASE_DIR, r"bg.mp3"))
        videoclip = VideoFileClip("mygeneratedvideo.mp4")
        new_audioclip = CompositeAudioClip([audiofile, bgaudiofile])
        videoclip = videoclip.set_audio(audiofile)
        # videoclip.audio = new_audioclip
        videoclip = videoclip.subclip(0, audiofile.duration)
        videoclip = videoclip.speedx(factor=1.1)

        clip = VideoFileClip(
            'https://github.com/mashuk999/green-screen-video-python/blob/main/greenscreen.mp4?raw=true'
        )
        maskedclipDurationMultiplier = int(videoclip.duration // clip.duration)
        maskedClipList = []
        for iterator in range(maskedclipDurationMultiplier):
            maskedClipList.append(clip)
        #Adding Anchor
        clip = concatenate_videoclips(maskedClipList)
        masked_clip = clip.fx(vfx.mask_color,
                              color=[109, 246, 16],
                              thr=80,
                              s=5)
        masked_clip = masked_clip.resize(videoclip.size).set_pos(
            ('center', 'bottom'))
        final_clip = CompositeVideoClip([videoclip, masked_clip])
        final_clip = final_clip.resize((460, 720))
        # videoclip = videoclip.fx(speedx, 1.3)
        os.chdir(os.path.join(settings.BASE_DIR, ""))
        final_clip.write_videofile("test" + ".mp4")
    except Exception as e:
        print('addaudioto video m.v.')
        print(e)
Exemplo n.º 10
0
output = "video/" + VIDEO_SUB_PATH + "/slideshow.mp4"
clip0 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide0.mp4")
# clip1 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide1.mp4")
clip2 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide2.mp4")
# clip3 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide3.mp4")
clip4 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide4.mp4")
clip5 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide5.mp4")
clip6 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide6.mp4")
clip7 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide7.mp4")
# clip8 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide6_1.mp4")
# clip9 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide7_1.mp4")
# clip10 = VideoFileClip("video/"+VIDEO_SUB_PATH+"/slide8.mp4")
clip11 = VideoFileClip("video/" + VIDEO_SUB_PATH + "/slide10.mp4")

audio_background = AudioFileClip(BG_AUDIO)
final_audio = CompositeAudioClip([audio_background])

# slide1 = CompositeVideoClip([clip1.fx( transfx.crossfadein, delay)])
slide2 = CompositeVideoClip([clip2.fx(transfx.slide_in, delay, 'bottom')])
slide3 = CompositeVideoClip([clip11.fx(transfx.crossfadeout, 2)])
slide4 = CompositeVideoClip([clip4.fx(transfx.slide_out, delay, 'left')])
slide5 = CompositeVideoClip([clip5.fx(transfx.crossfadein, delay)])
slide6 = CompositeVideoClip([clip6.fx(transfx.crossfadein, delay)])
slide7 = CompositeVideoClip([clip7.fx(transfx.crossfadein, delay)])
# slide8 = CompositeVideoClip([clip8.fx( transfx.slide_in, delay, 'right')])
# slide9 = CompositeVideoClip([clip9.fx( transfx.crossfadein, delay)])
# slide10 = CompositeVideoClip([clip10.fx( transfx.crossfadein, delay)])
# slided_clips = concatenate([clip0, slide1, slide2, slide3, slide4, slide6, slide7, slide8, slide9, slide5], padding=-delay, method="compose")
# slided_clips = concatenate([clip0, slide1, slide2, slide6, slide7, slide8, slide9, slide10, slide4, slide3, slide5], padding=-delay, method="compose").set_audio(final_audio)
slided_clips = concatenate(
    [clip0, slide2, slide6, slide7, slide4, slide3, slide5],
# parse the arguments
args = parser.parse_args()
video_file = args.video_file
audio_file = args.audio_file
start = args.start
end = args.end
composite = args.composite
volume_factor = args.volume_factor
# print the passed parameters, just for logging
print(vars(args))
# load the video
video_clip = VideoFileClip(video_file)
# load the audio
audio_clip = AudioFileClip(audio_file)
# use the volume factor to increase/decrease volume
audio_clip = audio_clip.volumex(volume_factor)
# if end is not set, use video clip's end
if not end:
    end = video_clip.end
# make sure audio clip is less than video clip in duration
# setting the start & end of the audio clip to `start` and `end` paramters
audio_clip = audio_clip.subclip(start, end)
# composite with the existing audio in the video if composite parameter is set
if composite:
    final_audio = CompositeAudioClip([video_clip.audio, audio_clip])
else:
    final_audio = audio_clip
# add the final audio to the video
final_clip = video_clip.set_audio(final_audio)
# save the final clip
final_clip.write_videofile("final.mp4")
Exemplo n.º 12
0
final_audio_dir = os.path.join(mixed_audio_dir, 'final-audio.mp3')
final_video_dir = os.path.join(mixed_audio_dir, 'final-video.mp4')

video_clip = VideoFileClip(source_video_path)
original_audio = video_clip.audio
# extracting the original audio
original_audio.write_audiofile(og_audio_dir)

# another audio file to clip
background_audio_clip = AudioFileClip(source_audio_path)
# making subclip of this of same length of video clip
bg_music = background_audio_clip.subclip(t_start=0, t_end=video_clip.duration)

# now we want the background music to be low
bg_music = bg_music.volumex(0.10)  # 10% of it's audio
# or bg_music = bg_music.fx(vfx.volumex, 0.10)

final_audio = CompositeAudioClip([original_audio, bg_music])
final_audio.write_audiofile(final_audio_dir, fps=original_audio.fps)

final_clip = video_clip.set_audio(final_audio)
final_clip.write_videofile(final_video_dir)

# if error in audio file:
# new_audio = AudioFileClip(final_audio_path)
# final_clip = video_clip.set_audio(new_audio)

# if above not work (it is for mp4)
# final_clip.write_videofile(final_video_dir, codec='libx264',
#                            audio_codec='aac')
Exemplo n.º 13
0
    video = combined_clip

#audio
audio_files = []

for i in os.listdir():
    if i.endswith(".mp3") or i.endswith(".wav"):
        audio_files.append(i)

print("Audio files loaded are: " + str(audio_files))

for i, clip in enumerate(audio_files):
    audio_files[i] = AudioFileClip(clip)

#ToDo Concatenate audio tracks into audioclip
combined_audio = concatenate_audioclips(audio_files)

#Set Duration of audioclip
background_audio = combined_audio.set_duration(video.duration)

#combine videos' audio and audio track
video_audio = video.audio
print(background_audio)
print(video_audio)
final_audio = CompositeAudioClip([background_audio, video_audio])
final_clip = video.set_audio(final_audio)

#render
print("Composition successful. Rendering!")
final_clip.write_videofile(output_name, fps=fr, logger=None)
Exemplo n.º 14
0
def get_chunk(user,
              send_end=None,
              compress_render=False,
              chunk_render=False,
              chunk_number=0,
              all_clips=True):
    try:
        start_time_count = time.time()
        log_name = datetime.now().strftime(
            "%Y.%m.%d-%H-%M-%S"
        ) + "_chunk_service_instance_id_{}_TESTING.log".format(user)

        # Look for the json file in the project folder
        try:
            json_data = sherpaUtils.open_proj(user)
        except FileNotFoundError as e:
            logging.error("File or folder cannot be found")
            logging.error(e)
            results = "Render exited without error [Unable to find file or folder]", 0
            if send_end is not None:
                send_end.send(results)
            return results

        # If a file can be found, but no edit data exists in the file
        if not json_data['CutAwayFootage'] and not json_data[
                'InterviewFootage']:
            logging.error(
                "This project seems to have no edit data recorded. Exiting render session"
            )
            results = "Render exited without error [No edit data exists in JSON]", 0
            if send_end is not None:
                send_end.send(results)
            return results

        # Collecting garbage to clear out memory
        gc.collect()

        # Creating a logging instance for testing
        log_file_name = os.path.join(Config.BASE_DIR, Config.LOGS_LOCATION,
                                     Config.RENDER_LOGS, log_name)

        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s %(levelname)-8s %(message)s',
                            datefmt='%Y-%m-%d %H:%M:%S',
                            filename=log_file_name)
        logging.debug(
            "Beginning render instance of project id {}".format(user))

        global_frames = generateEffects.get_fps(user)
        logging.debug("Global fps has been set to {}".format(global_frames))

        vid_type = json_data['VideoType']
        one_to_one = True if vid_type == "Square" else False

        # Get timeline lengths
        cutaway_timeline_length = round(
            sherpaUtils.calculate_timeline_length(json_data['CutAwayFootage']),
            2)
        interview_timeline_length = round(
            sherpaUtils.calculate_timeline_length(
                json_data['InterviewFootage']), 2)

        logging.debug("Cutaway length: {}s      Interview length: {}s".format(
            cutaway_timeline_length, interview_timeline_length))

        # Find the smallest timeline length
        smallest_timeline = sherpaUtils.order_picker(
            cutaway_timeline_length, interview_timeline_length)

        if smallest_timeline == "CutAwayFootage":
            if not json_data['CutAwayFootage']:
                logging.debug(
                    "Cutaways is empty, making interview line the new cutaway line"
                )
                json_data['CutAwayFootage'] = json_data['InterviewFootage']
                json_data['InterviewFootage'] = dict()
                cutaway_timeline_length = round(
                    sherpaUtils.calculate_timeline_length(
                        json_data['CutAwayFootage']), 2)
                interview_timeline_length = round(
                    sherpaUtils.calculate_timeline_length(
                        json_data['InterviewFootage']), 2)
                smallest_timeline = sherpaUtils.order_picker(
                    cutaway_timeline_length, interview_timeline_length)
            logging.debug(
                "Smallest timeline is currently the Cut Away Timeline, correcting timelines as necessary"
            )
            blank_no = 1

        # While the smallest timeline is the cut away timeline
        # TODO: THIS ISSUE MAY ONLY OCCUR IF THE CUTAWAY TIMELINE IS SHORTER THAN THE TOP TIMELINE
        while smallest_timeline == 'CutAwayFootage':
            if blank_no > 100:
                logging.debug(
                    "There's something wrong with the blank placement for {}. Terminating project"
                    .format(user))
                results = "Fatal error, blank placement is in infinite loop", 99
                if send_end is not None:
                    send_end.send(results)
                return results

            # Calculate the length of the blank that should be playing at the smallest timeline
            current_interview_clip = sherpaUtils.current_interview_footage(
                json_data, cutaway_timeline_length)[0]

            # Calculate when the clip om the interview timeline should be playing (globally), and returns the length that the blank clip should be
            blank_len = round(
                sherpaUtils.calculate_time_at_clip(
                    current_interview_clip['Meta'],
                    json_data['InterviewFootage'],
                    timeline_len=cutaway_timeline_length), 2)

            # Creating a blank clip to insert into time
            blank_name = "end_of_line_blank_" + str(blank_no)

            end_of_line_blank = {
                blank_name: {
                    "Meta": {
                        "name": blank_name,
                        "startTime": 0,
                        "endTime": blank_len,
                        "audioLevel": 1,
                        "order": len(json_data[smallest_timeline]) + 1,
                        "clipType": "Blank"
                    },
                    "edit": {}
                }
            }

            blank_no += 1
            logging.debug(blank_name + ":")
            logging.debug(end_of_line_blank)
            # Insert it into the timeline
            json_data[smallest_timeline].update(end_of_line_blank)

            # Update the length
            cutaway_timeline_length = round(
                (cutaway_timeline_length + blank_len), 2)
            logging.debug("Cutaway length: {}, Inteview length: {}".format(
                cutaway_timeline_length, interview_timeline_length))

            smallest_timeline = sherpaUtils.order_picker(
                cutaway_timeline_length, interview_timeline_length)

        blank_replace = True

        #print(json_data)
        #print(sherpaUtils.calculate_timeline_length(json_data['CutAwayFootage']))

        if not json_data['CutAwayFootage']:
            logging.debug("Only interview exists")
            json_data['CutAwayFootage'] = json_data['InterviewFootage']
            json_data['InterviewFootage'] = dict()
            blank_replace = False

        if not json_data['InterviewFootage']:
            logging.debug("Only CutAway Exists")
            blank_replace = False

        if blank_replace:
            #blank replacement
            full_context_start = 0
            for clip in json_data['CutAwayFootage']:
                full_context_end = round(
                    full_context_start + sherpaUtils.calculate_clip_length(
                        json_data['CutAwayFootage'][clip]['Meta']), 2)
                json_data['CutAwayFootage'][clip]['Meta'][
                    'fullContextStart'] = full_context_start
                json_data['CutAwayFootage'][clip]['Meta'][
                    'fullContextEnd'] = full_context_end
                full_context_start = full_context_end

            full_context_start = 0
            for clip in json_data['InterviewFootage']:
                full_context_end = round(
                    full_context_start + sherpaUtils.calculate_clip_length(
                        json_data['InterviewFootage'][clip]['Meta']), 2)
                json_data['InterviewFootage'][clip]['Meta'][
                    'fullContextStart'] = full_context_start
                json_data['InterviewFootage'][clip]['Meta'][
                    'fullContextEnd'] = full_context_end
                full_context_start = full_context_end

            logging.debug(
                "Full context start and end for all clips calculated")
            print(json_data)

            cp = copy.deepcopy(json_data['CutAwayFootage'])

            for clip in cp:
                # If there's any blank, clean the whole thing up
                if json_data['CutAwayFootage'][clip]['Meta'].get(
                        'clipType') == "Blank":
                    if not json_data['CutAwayFootage'][clip]['edit']:
                        blank_start = json_data['CutAwayFootage'][clip][
                            'Meta']['fullContextStart']
                        blank_end = json_data['CutAwayFootage'][clip]['Meta'][
                            'fullContextEnd']

                        print("BLANK START ", blank_start)
                        print("BLANK END ", blank_end)

                        interview_clip = sherpaUtils.blank_json_replace(
                            blank_start, blank_end, json_data,
                            json_data['CutAwayFootage'][clip])
                        print(interview_clip)
                        if interview_clip is not None:
                            logging.debug(
                                "Blank instance has interview clips playing below it"
                            )
                            if isinstance(interview_clip, list):
                                # Update all the orders from the blank onwards
                                #amnt = (len(interview_clip) - 1)
                                #json_data['CutAwayFootage'] = sherpaUtils.update_order(json_data['CutAwayFootage'], json_data['CutAwayFootage'][clip]['Meta']['order'], amnt)
                                print(interview_clip[0])
                                json_data['CutAwayFootage'][
                                    clip] = interview_clip[0]
                                interview_clip.pop(0)
                                pos = 0
                                count = 9999
                                for _item in interview_clip:
                                    clip_name = str(count)
                                    json_data['CutAwayFootage'][
                                        clip_name] = interview_clip[pos]
                                    pos += 1
                                    count += 1
                            else:
                                json_data['CutAwayFootage'][
                                    clip] = interview_clip

        print(json_data)
        full_context_start = 0
        full_context_end = 0
        for clip in json_data['CutAwayFootage']:
            full_context_end = round(
                full_context_start + sherpaUtils.calculate_clip_length(
                    json_data['CutAwayFootage'][clip]['Meta']), 2)
            #print("START: ", full_context_start)
            #print("END: ", full_context_end)
            json_data['CutAwayFootage'][clip]['Meta'][
                'fullContextStart'] = full_context_start
            json_data['CutAwayFootage'][clip]['Meta'][
                'fullContextEnd'] = full_context_end
            full_context_start = full_context_end
        #print("After")
        print(json_data)
        logging.debug("JSON data is now: ")
        logging.debug(json_data)
        video_list = []
        top_audio = []
        cutaway_timeline = 0
        logging.debug("Clips ready in: {}".format(time.time() -
                                                  start_time_count))
        print("Clips ready in: ", time.time() - start_time_count)
        for clip_name in json_data['CutAwayFootage']:
            logging.debug(clip_name + ":")
            logging.debug("Cutaway Timeline: {}".format(cutaway_timeline))

            # Initialise clip data first
            clip_data = json_data['CutAwayFootage'][clip_name]

            clip_type = clip_data['Meta'].get('clipType')

            # If its a cutaway, just generate the clip and add a caption if it exists
            if clip_type == "CutAway" or clip_type == "Interview":
                logging.debug(clip_name + " is a cutaway.")
                clip = generateEffects.generate_clip(
                    clip_data=clip_data['Meta'],
                    user=user,
                    compressed=compress_render or chunk_render,
                    render_type=one_to_one)
                # Generate caption data
                logging.debug("Generating audio for {}".format(clip_name))
                clip = generateEffects.better_generate_text_caption(
                    clip,
                    clip_data['edit'],
                    compressed=compress_render or chunk_render,
                    render_type=one_to_one)
                logging.debug(
                    "Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}"
                    .format(clip_name, clip.audio, clip.duration))
                top_audio.append(clip.audio)

            # Generate image
            elif clip_type == "Image":
                logging.debug(clip_name + " is an image.")
                clip = generateEffects.generate_image_clip(
                    clip_data['Meta'], user)
                logging.debug("Generating audio for {}".format(clip_name))
                clip = generateEffects.better_generate_text_caption(
                    clip,
                    clip_data['edit'],
                    compressed=compress_render or chunk_render,
                    render_type=one_to_one)
                logging.debug(
                    "Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}"
                    .format(clip_name, clip.audio, clip.duration))
                top_audio.append(clip.audio)

            # If it's a blank
            elif clip_type == "Blank" or clip_type == "CustomBlank":
                logging.debug(clip_name + " is a Blank.")
                clip = generateEffects.generate_blank(
                    clip_data['Meta'],
                    compressed=compress_render or chunk_render,
                    render_type=one_to_one)
                logging.debug("Generating audio for {}".format(clip_name))
                clip = generateEffects.better_generate_text_caption(
                    clip,
                    clip_data['edit'],
                    compressed=compress_render or chunk_render,
                    render_type=one_to_one)
                logging.debug(
                    "Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}"
                    .format(clip_name, clip.audio, clip.duration))
                top_audio.append(clip.audio)

            # Insert clip into correct position in array
            logging.debug("Inserted clip '{}' into pos {}.".format(
                clip_name, clip_data['Meta'].get('order') - 1))

            cutaway_timeline = round((cutaway_timeline + clip.duration), 2)
            video_list.append(clip)

        logging.debug("Final Cutaway Timeline: {}".format(cutaway_timeline))

        # Video list
        logging.debug("Video list:")
        logging.debug(video_list)

        # Create audio from the interview Footage
        bottom_audio = generateEffects.interview_audio_builder(
            interview_data=json_data['InterviewFootage'], user=user)

        # We need to insert the intro if it exists
        # If its a chunk render, ignore input
        if not chunk_render:
            if os.path.exists(os.path.join(attach_dir, user, 'Template.json')):
                logging.debug("Creating intro, this may take some time")
                try:
                    intro, transparent = getAndProcessTemplate.getandprocesstemplate(
                        user)
                    if transparent:
                        logging.debug("Transparent intro")
                        logging.debug(
                            "Creating composite of intro and first clip in render"
                        )
                        # First clip has been removed from project
                        first_clip = video_list.pop(0)
                        # Create composite, and insert back into project
                        intro = CompositeVideoClip([first_clip, intro])
                        logging.debug(
                            "Replacing first clip in project with intro")
                        video_list.insert(0, intro)
                        # No sound adding necessary, transparent intros take sound from first clip
                    else:
                        logging.debug("Opaque intro")
                        intro.audio = AudioFileClip(
                            os.path.join(resource_path,
                                         'silence.mp3')).set_duration(
                                             intro.duration)
                        video_list.insert(0, intro)
                        top_audio.insert(0, intro.audio)
                except:
                    logging.error("Error occured during intro generation")
                    logging.exception('')
            else:
                logging.debug(
                    "Intro json data can't be found, continuing render service"
                )

        # Concatenate the clips together
        top_audio = concatenate_audioclips(top_audio)
        logging.debug("Top audio len: {}".format(round(top_audio.duration, 2)))

        # Try adding the music if it exists
        logging.debug("Attempting to add music...")
        try:
            music_data = json_data['MusicTrackURL']
            music_audio_lvl = float(json_data['MusicAudioLevel'])
            music = generateEffects.open_music(music_data, music_audio_lvl,
                                               cutaway_timeline)
            # If the video is longer than the music, replay it
            if music.duration > cutaway_timeline:
                music = CompositeAudioClip([
                    music,
                    generateEffects.open_music(
                        music_data, music_audio_lvl,
                        cutaway_timeline - music.duration)
                ])
            top_audio = CompositeAudioClip([music, top_audio])
            logging.debug("Music added successfully")
        except Exception as e:
            logging.debug(
                "Exception occured in render - during music audio building:")
            logging.debug(e)
            finished_audio = top_audio

        # Try adding the voice over
        logging.debug("Attempting to add voice over...")
        try:
            voice_data = json_data['VoiceTrackURL']
            voice_audio_lvl = float(json_data['VoiceoverAudioLevel'])
            voice = generateEffects.open_voice(voice_data, voice_audio_lvl,
                                               user)
            if voice.duration > cutaway_timeline:
                voice = voice.set_end(cutaway_timeline)
            voice = voice.audio_fadeout(2)
            top_audio = CompositeAudioClip([voice, top_audio])
            logging.debug("Music added successfully")
        except Exception as e:
            logging.debug(
                "Exception occured in render - during voiceover audio building:"
            )
            logging.debug(e)
            finished_audio = top_audio

        # Try concatenating the top and bottom audio lines together
        logging.debug("Attepting to add interview audio...")
        try:
            bottom_audio = concatenate_audioclips(bottom_audio)
            logging.debug("Bottom audio len: {}".format(
                round(bottom_audio.duration, 2)))
            finished_audio = CompositeAudioClip([top_audio, bottom_audio])
            logging.debug("Interview audio addedd successfully")
        except Exception as e:
            logging.debug(
                "Exception occured in render - during interview audio building:"
            )
            logging.debug(e)
            finished_audio = top_audio

        logging.debug("Finished audio len: {}".format(
            round(finished_audio.duration, 2)))

        # Concatenate the video files together
        finished_video = concatenate_videoclips(video_list)
        finished_video = finished_video.set_audio(finished_audio)

        try:
            if not chunk_render:
                if one_to_one:
                    logging.debug("Defunct. Square render handled elsewhere.")
                    #insta_rez = (round(1080*0.44357), round(1080*0.44357)) if compress_render else (1080, 1080)
                    #logging.debug("Resizing video to {}".format(insta_rez))
                    #finished_video = crop(finished_video, x1=0, y1=0, x2=insta_rez[0], y2=insta_rez[1])
        except:
            logging.error("Error occured in Square Video resize")
            logging.exception("")

        # Defining path here is cleaner
        vid_name = user + "_com_chunk_edited_TESTING.mp4"
        vid_dir = os.path.join(attach_dir, user, vid_name)

        logging.debug(
            "Rendering {} clip(s) together, of total length {}.".format(
                len(video_list), round(finished_video.duration, 2)))
        logging.debug("Writing '{}' to {}".format(vid_name, vid_dir))

        logging.debug("Videos placed in {} seconds".format(time.time() -
                                                           start_time_count))

        if chunk_render:
            if all_clips == True:
                chunk_len = Config.PREVIEW_CHUNK_LENGTH
                start_time = 0
                end_time = start_time + chunk_len
                finished_dur = round(finished_video.duration, 2)
                preview_chunks = []
                segment_no = ceil(finished_dur / chunk_len)
                # hangover segment

                logging.debug(
                    "Video duration: {}s  /{}s = {} segments      full segments: {}"
                    .format(finished_dur, chunk_len, finished_dur / chunk_len,
                            segment_no))

                # _ is for non important variable
                for i in range(segment_no):
                    preview_clip = finished_video.subclip(
                        start_time, min(start_time + chunk_len, finished_dur))
                    logging.debug("Clip is currently from {} to {}".format(
                        start_time,
                        round(min(start_time + chunk_len, finished_dur), 2)))

                    start_time += chunk_len
                    logging.debug("Segment {} is {}s long".format(
                        i, round(preview_clip.duration, 2)))
                    logging.debug("Global framerate: {}".format(global_frames))
                    preview_clip.fps = global_frames
                    if preview_clip.duration < chunk_len / 2:
                        logging.debug(
                            "Clip is smaller than {}s, so appending it to last clip instead."
                            .format(chunk_len / 2))
                        preview_clip = concatenate_videoclips(
                            [preview_clip, preview_chunks[-1]])
                        del preview_chunks[-1]
                    preview_chunks.append(preview_clip)

                logging.debug("Preview chunk list: ")
                logging.debug(preview_chunks)

                logging.debug("Rendering out {} videos in {}s chunks".format(
                    len(preview_chunks), chunk_len))

                for video in preview_chunks:
                    try:
                        vid_name = user + "_com_chunk_" + str(
                            preview_chunks.index(video)) + "_edited.mp4"
                        vid_dir = os.path.join(attach_dir, user, vid_name)
                        logging.debug(
                            "Global framerate: {}".format(global_frames))
                        logging.debug("Rendering {} at time {}s".format(
                            vid_name, (time.time() - start_time_count)))
                        video.write_videofile(vid_dir,
                                              threads=8,
                                              preset="ultrafast",
                                              bitrate="1000k",
                                              audio_codec="aac",
                                              remove_temp=True,
                                              fps=global_frames)
                        results = "Chunk {} Rendered Successfully".format(
                            str(preview_chunks.index(video))), 1
                        results = "Chunk 1 Rendered Successfully", 1
                        if send_end is not None:
                            send_end.send(results)
                    except:
                        logging.error(
                            "Fatal error occured while writing video - Chunk Render"
                        )
                        logging.exception("")
                        logging.error(
                            "Exiting program without writing video file correctly"
                        )
                        results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(
                            log_name), 99
                        if send_end is not None:
                            send_end.send(results)
                        return results
                #results = "Video Rendered Successfully", 1

                logging.debug("All chunks rendered to {}".format(vid_dir))
                logging.debug(
                    "Completed in {} seconds".format(time.time() -
                                                     start_time_count))
                logging.debug("Closing render instance - Chunk")
                if send_end is not None:
                    send_end.send(results)
                return results

            else:
                start_time = chunk_number * Config.PREVIEW_CHUNK_LENGTH
                end_time = min(start_time + Config.PREVIEW_CHUNK_LENGTH,
                               finished_video.duration)

                vid_name = user + "_com_chunk_" + str(
                    chunk_number) + "_edited.mp4"
                vid_dir = os.path.join(attach_dir, user, vid_name)

                finished_video = finished_video.subclip(
                    start_time, min(end_time, finished_video.end))
                video.write_videofile(vid_dir,
                                      threads=8,
                                      preset="ultrafast",
                                      bitrate="1000k",
                                      audio_codec="aac",
                                      remove_temp=True,
                                      fps=global_frames)
                print(("Done in {} seconds".format(time.time() -
                                                   start_time_count)))
                logging.debug("Done in {} seconds".format(time.time() -
                                                          start_time_count))
                logging.debug("File '{}' successfully written to {}".format(
                    vid_name, vid_dir))
                logging.debug("Closing render instance - Chunk")
                results = "Chunk {} Rendered Successfully".format(
                    chunk_number), 1
                if send_end is not None:
                    send_end.send(results)
                return results

        if compress_render:
            logging.debug("Running compress render instance")
            try:
                vid_name = user + "_com_preview_edited.mp4"
                vid_dir = os.path.join(attach_dir, user, vid_name)
                logging.debug("Global framerate: {}".format(global_frames))
                finished_video.write_videofile(vid_dir,
                                               threads=8,
                                               bitrate="1000k",
                                               audio_codec="aac",
                                               remove_temp=True,
                                               fps=global_frames)
                results = "Video Rendered Successfully", 1
                logging.debug("File '{}' successfully written to {}".format(
                    vid_name, vid_dir))
                logging.debug(
                    "Completed in {} seconds".format(time.time() -
                                                     start_time_count))
                logging.debug("Closing render instance - Compress")
                if send_end is not None:
                    send_end.send(results)
                return results
            except:
                logging.error(
                    "Fatal error occured while writing video - Compressed Render"
                )
                logging.exception("")
                logging.error(
                    "Exiting program without writing video file correctly")
                results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(
                    log_name), 99
                if send_end is not None:
                    send_end.send(results)
                return results
        else:
            logging.debug("Running full render instance")
            try:
                vid_name = user + "_edited.mp4"
                vid_dir = os.path.join(attach_dir, user, vid_name)
                logging.debug("Rendering {}".format(vid_name))
                logging.debug("Global framerate: {}".format(global_frames))
                finished_video.write_videofile(vid_dir,
                                               threads=8,
                                               audio_codec="aac",
                                               bitrate="6000k",
                                               remove_temp=True,
                                               fps=global_frames)
                results = "Video Rendered Successfully", 1
                logging.debug("File '{}' successfully written to {}".format(
                    vid_name, vid_dir))
                logging.debug(
                    "Completed in {} seconds".format(time.time() -
                                                     start_time_count))
                logging.debug("Closing render instance - Full")
                if send_end is not None:
                    send_end.send(results)
                return results

            except:
                logging.error(
                    "Fatal error occured while writing video - Full Render")
                logging.exception("")
                logging.error(
                    "Exiting program without writing video file correctly")
                results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(
                    log_name), 99
                if send_end is not None:
                    send_end.send(results)
                return results
    except:
        logging.error(
            "Unknown Fatal Error occured during render instance of '{}'".
            format(user))
        logging.exception("")
        logging.error("Exiting program without writing video file correctly")
        results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(
            log_name), 99
        if send_end is not None:
            send_end.send(results)
        return results
Exemplo n.º 15
0
# 完事还可以继续给视频添加背景音乐, 视频处理有专门的包moviepy
video = VideoFileClip("image/ball.mp4")
print(video.duration)
audio = AudioFileClip("sound/bg_music.ogg")
audio = audio.subclip(0, video.duration)
print(audio.duration)
# 注意, 视频, 音频不同长不要直接去合并, 总长度会按长的来, 但
# 视频过长, 后面没声音; 音频过长, 视频虽然显示长度长, 但播放一半闪退
video = video.set_audio(audio)
video.write_videofile("image/ball2.mp4")

# 更灵活的处理方法是截取视频不同的subclip, 混淆声音后设为背景, 再把各视频clip拼接起来
# 比如以bass2.mp4为基础, 在10s位置再混入笑声
video = VideoFileClip("image/ball2.mp4")
video_clip1 = video.subclip(0, 10)
video_clip2 = video.subclip(10, video.duration)

laugh = AudioFileClip("sound/laugh.wav")

audio_clip1 = video_clip1.audio
audio_clip2 = video_clip2.audio

# 注意混入不需要俩音频一般长
audio_clip2 = CompositeAudioClip([audio_clip2, laugh])

video_clip2 = video_clip2.set_audio(audio_clip2)

video = concatenate_videoclips([video_clip1, video_clip2])
video.write_videofile("image/ball3.mp4")
Exemplo n.º 16
0
 def join_audios(self, audio_1: str, audio_2: str,
                 destination: str) -> None:
     audio_clip_1 = AudioFileClip(audio_1)
     audio_clip_2 = AudioFileClip(audio_2)
     new_audio_clip = CompositeAudioClip([audio_clip_1, audio_clip_2])
     new_audio_clip.write_audiofile(destination, fps=44100)
# pip install moviepy

from moviepy.editor import VideoFileClip, concatenate_videoclips, vfx, AudioFileClip, afx, CompositeAudioClip

clip1 = VideoFileClip("one.mp4").subclip(10,20).fx(vfx.fadein, 1).fx(vfx.fadeout, 1)
clip2 = VideoFileClip("two.mp4").subclip(10,20).fx(vfx.fadein, 1).fx(vfx.fadeout, 1)
clip3 = VideoFileClip("one.mp4").subclip(20,30).fx(vfx.fadein, 1).fx(vfx.fadeout, 1)
clip4 = VideoFileClip("one.mp4").subclip(10,20).fx(vfx.colorx, 1.5)\
    .fx(vfx.lum_contrast, 0, 50, 128)

audio = AudioFileClip("intro.mp4").fx(afx.audio_fadein, 1).fx(afx.volumex, 0.1)

combined = concatenate_videoclips([clip1, clip2, clip3, clip4])
combined.audio = CompositeAudioClip([audio])
combined.write_videofile("combined.mp4")
Exemplo n.º 18
0
def downloadRedditVideos(subreddit, time=1000, filter="month", output="output.mp4"):
    for filename in os.listdir("videos/"):
        file_path = os.path.join("videos/", filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
        except Exception as e:
            print("Failed to delete %s. Reason: %s" % (file_path, e))

    addTime = 0
    i = 0
    if subreddit is 1:
        subreddit = "perfectlycutscreams"
    elif subreddit is 2:
        subreddit = "watchpeopledieinside"
    elif subreddit is 3:
        subreddit = "contagiouslaughter"
    elif subreddit is 4:
        subreddit = "livestreamfail"
    elif subreddit is 5:
        subreddit = "whatcouldgowrong"

    for submission in reddit.subreddit(subreddit).hot(limit=500):
        if submission.media is not None:
            if (
                "https://clips.twitch.tv/" in submission.url
                and "tt_" not in submission.url
            ):
                if addTime < time:
                    dl_clip(submission.url, str(i).rjust(2, "0"))
                    videoD = VideoFileClip(
                        "videos/" + str(i).rjust(2, "0") + ".mp4"
                    ).duration
                    addTime += videoD
                    i += 1
            elif "reddit_video" in submission.media:
                if (
                    addTime < time
                    and submission.media["reddit_video"]["duration"] < 200
                ):
                    video = submission.media["reddit_video"]["fallback_url"]
                    v = requests.get(video)

                    open("tmp/video.mp4", "wb").write(v.content)
                    a = requests.get(re.sub("[^/]*$", "audio", video, 1))
                    if a.status_code != 200:
                        b = requests.get(re.sub("[^/]*$", "DASH_audio.mp4", video, 1))
                        if b.status_code != 200:
                            open("videos/" + str(i).rjust(2, "0") + ".mp4", "wb").write(
                                v.content
                            )
                        else:
                            open("tmp/audio.mp4", "wb").write(b.content)
                            combined = VideoFileClip("tmp/video.mp4")
                            combined.audio = CompositeAudioClip(
                                [AudioFileClip("tmp/audio.mp4")]
                            )
                            combined.write_videofile(
                                "videos/" + str(i).rjust(2, "0") + ".mp4",
                                temp_audiofile="tmp/tmp_audio.mp3",
                            )

                    else:
                        open("tmp/audio.mp4", "wb").write(a.content)
                        combined = VideoFileClip("tmp/video.mp4")
                        combined.audio = CompositeAudioClip(
                            [AudioFileClip("tmp/audio.mp4")]
                        )
                        combined.write_videofile(
                            "videos/" + str(i).rjust(2, "0") + ".mp4",
                            temp_audiofile="tmp/tmp_audio.mp3",
                        )

                    os.unlink("tmp/video.mp4")
                    os.unlink("tmp/audio.mp4")

                    addTime += submission.media["reddit_video"]["duration"]
                    print("Video seconds: " + str(addTime))
                    i += 1
Exemplo n.º 19
0
        segment_index += 1
    else:
        raise NotImplementedError('The segment type ' +
                                  song_segments[segment_index][1] +
                                  ' is not supported')

# for s in range(len(showcase_segments)):
#     if s == 0:
#         downtime_length = showcase_segments[s][0] - (period * 0.6)
#     else:
#         downtime_length = (showcase_segments[s][0] - showcase_segments[s - 1][1]) - (period * 0.6)
#     exposition_clip_files = random.sample(clip_files, math.floor(downtime_length / 3.5) )
#     for exp_clip_file in exposition_clip_files:
#         clip = VideoFileClip(join(clip_folder, exp_clip_file))
#         clip = clip.set_duration(downtime_length / len(exposition_clip_files))
#         clips.append(clip)
#         print('Downtime Clip Added!')
#
#     offset = clip_index
#     for i in range(math.floor((showcase_segments[s][1] - showcase_segments[s][0]) / period)):
#         clip = VideoFileClip(join(clip_folder, clip_files[i + offset]))
#         clip = clip.subclip(5 - (period * .6), 5 + (period * .4))
#         clips.append(clip)
#         print('Showcase Clip Added!')
#         clip_index += 1

output = concatenate_videoclips(videos)
output = output.set_audio(CompositeAudioClip([output.audio, song]))
# final_clip = final_clip.set_audio(AudioFileClip(song_file))
output.write_videofile('./output2.mp4')
Exemplo n.º 20
0
def render_video(user, send_end=None, compress_render=False, chunk_render=False):
    """
    User: String -> The ID of the project (User is just a hangover from previous builds)
    compress_render: Bool -> Set to true if you want this function to return a quick render
    """
    try:
        if chunk_render:
            chunk.get_chunk(sherpaUtils.open_proj(user), user, 1)
        else:
            log_name = datetime.now().strftime("%Y.%m.%d-%H-%M-%S") + "_render_service_instance_id_{}.log".format(user)

            # Collecting garbage to clear out memory
            gc.collect()

            # Creating a logging instance for testing
            log_file_name = os.path.join(
                Config.BASE_DIR,
                Config.LOGS_LOCATION,
                Config.RENDER_LOGS, 
                log_name
            )

            logging.basicConfig(
                level=logging.DEBUG, 
                format='%(asctime)s %(levelname)-8s %(message)s',
                datefmt='%Y-%m-%d %H:%M:%S',
                filename=log_file_name)
            logging.debug("Beginning render instance of project id {}".format(user))

            # For logging
            start_time = time.time()

            # Finished timeline video
            video_list = []

            # Top audio timeline
            top_audio = []

            # Define current length of video, in terms of the 'main' timeline
            cutaway_timeline = 0

            # Look for the json file in the project folder
            try:
                json_file = sherpaUtils.open_proj(user)
            except FileNotFoundError as e:
                logging.error("File or folder cannot be found")
                logging.error(e)
                results = "Render exited without error [Unable to find file or folder]", 0        
                if send_end is not None:
                    send_end.send(results)
                return results


            # If a file can be found, but no edit data exists in the file
            if not json_file['CutAwayFootage'] and not json_file['InterviewFootage']:
                logging.error("This project seems to have no edit data recorded. Exiting render session")
                results = "Render exited without error [No edit data exists in JSON]", 0        
                if send_end is not None:
                    send_end.send(results)            
                return results


            # Get timeline lengths
            cutaway_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['CutAwayFootage']), 2)
            interview_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['InterviewFootage']), 2)

            logging.debug("Cutaway length: {}s      Interview length: {}s".format(cutaway_timeline_length, interview_timeline_length))

            # Find the smallest timeline length
            smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)

            if smallest_timeline == "CutAwayFootage":
                if not json_file['CutAwayFootage']:
                    logging.debug("Cutaways is empty, making interview line the new cutaway line")
                    json_file['CutAwayFootage'] = json_file['InterviewFootage']
                    json_file['InterviewFootage'] = dict()
                    cutaway_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['CutAwayFootage']), 2)
                    interview_timeline_length = round(sherpaUtils.calculate_timeline_length(json_file['InterviewFootage']), 2)        
                    smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)
                logging.debug("Smallest timeline is currently the Cut Away Timeline, correcting timelines as necessary")
                blank_no = 1

            # While the smallest timeline is the cut away timeline
            # TODO: THIS ISSUE MAY ONLY OCCUR IF THE CUTAWAY TIMELINE IS SHORTER THAN THE TOP TIMELINE
            while smallest_timeline == 'CutAwayFootage':
                if blank_no > 100:
                    logging.debug("There's something wrong with the blank placement for {}. Terminating project".format(user))            
                    results = "Fatal error, blank placement is in infinite loop", 99        
                    if send_end is not None:
                        send_end.send(results)
                    return results

                # Calculate the length of the blank that should be playing at the smallest timeline 
                current_interview_clip = sherpaUtils.current_interview_footage(
                    json_file, 
                    cutaway_timeline_length
                )[0]

                # Calculate when the clip om the interview timeline should be playing (globally), and returns the length that the blank clip should be
                blank_len = sherpaUtils.calculate_time_at_clip(
                    current_interview_clip['Meta'], 
                    json_file['InterviewFootage'], 
                    timeline_len=cutaway_timeline_length
                )

                # Creating a blank clip to insert into time
                blank_name = "end_of_line_blank_" + str(blank_no)

                end_of_line_blank = {
                blank_name: {
                        "Meta": {
                            "name": blank_name,
                            "startTime": 0,
                            "endTime": blank_len,
                            "audioLevel": 1,
                            "order": len(json_file[smallest_timeline])+1,
                            "clipType": "Blank"
                        },
                        "edit": {

                        }
                    }
                }

                blank_no += 1
                logging.debug(blank_name + ":")
                logging.debug(end_of_line_blank)
                # Insert it into the timeline
                json_file[smallest_timeline].update(end_of_line_blank)

                # Update the length
                cutaway_timeline_length = round((cutaway_timeline_length+blank_len),2)
                logging.debug("Cutaway length: {}, Inteview length: {}".format(cutaway_timeline_length, interview_timeline_length))
                    
                smallest_timeline = sherpaUtils.order_picker(cutaway_timeline_length, interview_timeline_length)

            # Automated all the clips - Run through all the cutaway footage
            for clip_name in json_file['CutAwayFootage']:

                logging.debug(clip_name + ":")
                logging.debug("Cutaway Timeline: {}".format(cutaway_timeline))

                # Initialise clip data first
                clip_data = json_file['CutAwayFootage'][clip_name]

                clip_type = clip_data['Meta'].get('clipType')

                # If its a cutaway, just generate the clip and add a caption if it exists
                if clip_type == "CutAway" or clip_type == "Interview":
                    logging.debug(clip_name + " is a cutaway.")
                    clip = generateEffects.generate_clip(clip_data=clip_data['Meta'], user=user, compressed=compress_render or chunk_render)
                    # Generate caption data
                    logging.debug("Generating audio for {}".format(clip_name))
                    clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)
                    logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(clip_name, clip.audio, clip.duration))
                    top_audio.insert(clip_data['Meta'].get('order'), clip.audio)

                # Generate image
                elif clip_type == "Image":
                    logging.debug(clip_name + " is an image.")
                    clip = generateEffects.generate_image_clip(clip_data['Meta'], user)            
                    logging.debug("Generating audio for {}".format(clip_name))
                    clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)
                    logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(clip_name, clip.audio, clip.duration))
                    top_audio.insert(clip_data['Meta'].get('order'), clip.audio)

                # If it's a blank
                elif clip_type == "Blank":
                    # These values are used later in the blank process
                    some_filler = False
                    total_insert_length = 0                            
                    logging.debug("Inserting audio for blank '{}'".format(clip_name))
                    top_audio.insert(clip_data['Meta'].get('order'), generateEffects.get_blank_audio(clip_data))
                    # We need to see if we can find any clips to replace the blank with
                    try:
                        logging.debug(clip_name + " is a blank.")
                        # We need to find the clip that should be playing in the interview timeline
                        cutaway_blank_len = sherpaUtils.calculate_clip_length(clip_data['Meta'])

                        # Gets clip on interview timeline that should be playing, as well as its start time on the interview timeline
                        relevant_interview_clip_data, interview_start_time = sherpaUtils.current_interview_footage(
                            data=json_file,
                            clip_timeline=cutaway_timeline
                        )

                        # rounding for simple calculation
                        interview_start_time = round(interview_start_time, 2)

                        # Set metadata for clip rendering and order for timeline insert
                        interview_clip_meta_data = relevant_interview_clip_data['Meta']
                        interview_clip_ord = interview_clip_meta_data.get('order')

                        # Difference between the current time in the video, and the start time of the interview clip
                        dif = round(cutaway_timeline-interview_start_time, 2)

                        
                        logging.debug("Interview clip starts at {}, Blank clip starts at {}, so difference is {}".format(
                            interview_start_time,
                            cutaway_timeline,
                            dif)
                        )

                        # Define clip length
                        clip_dur = sherpaUtils.calculate_clip_length(clip_data['Meta'])

                        sub_clip_start = (interview_clip_meta_data.get('startTime')) + dif
                        # Get end of clip or end of blank, whichever comes first
                        sub_clip_end = min(
                            ((interview_clip_meta_data.get('startTime')) + dif + clip_dur), 
                            interview_clip_meta_data.get('endTime')
                        )

                        # Round data off for simple calculation 
                        sub_clip_start = round(sub_clip_start, 2)
                        sub_clip_end = round(sub_clip_end, 2)

                        logging.debug("Sub clip starts at {}, ends at {}".format(sub_clip_start, sub_clip_end))

                        sub_clip_length = sub_clip_end - sub_clip_start
                        total_insert_length += sub_clip_length

                        interview_clip_type = interview_clip_meta_data.get('clipType')

                        # Create video clip from data found above
                        # Audio is not needed, we will insert it later
                        if interview_clip_type == "Interview":
                            logging.debug("Replacing blank {} with interview clip {}".format(
                                clip_data['Meta'].get('name'),
                                interview_clip_meta_data.get('name')
                            ))
                            # Create clip with parameterised start and end times
                            clip = generateEffects.generate_clip(
                                clip_data=interview_clip_meta_data,
                                user=user,
                                start=sub_clip_start,
                                end=sub_clip_end,
                                compressed=compress_render
                            )

                            clip = generateEffects.better_generate_text_caption(clip, relevant_interview_clip_data['edit'], compressed=compress_render or chunk_render)

                        # Blanks from the cutaway can be placed instead
                        elif interview_clip_type == "Blank":
                            
                            logging.debug("Replacing blank {} with interview blank {}".format(
                                clip_data['Meta'].get('name'),
                                interview_clip_meta_data.get('name')
                            ))
                            clip = generateEffects.generate_blank(interview_clip_meta_data, start=sub_clip_start, end=sub_clip_end, compressed=compress_render or chunk_render)
                            clip = generateEffects.better_generate_text_caption(clip, relevant_interview_clip_data['edit'], compressed=compress_render or chunk_render)

                        # TODO: Careful here, rounding could cause issues
                        total_insert_length = round(total_insert_length, 2)
                        logging.debug("Insert lenght: {}".format(total_insert_length))

                        # If the blank length is longer than the length of the videos being inserted
                        while not isclose(total_insert_length, cutaway_blank_len):
                            some_filler = True
                            logging.debug("Clip length didn't suffice for blank, adding more files as necessary")

                            time_to_fill = cutaway_blank_len - total_insert_length

                            time_to_fill = round(time_to_fill, 2)

                            logging.debug("Time left to fill is {}".format(time_to_fill))

                            interview_clip_ord+=1

                            next_clip_data = sherpaUtils.give_clip_order(interview_clip_ord, json_file['InterviewFootage'])

                            # Clip should be the the same size as the time to fill if possible
                            # But it's also possible that the next clip isn't bi enough either
                            # So we'll need to go further on
                            # To stop bugs, we need to set our end time as either the time left to fill, or the length of the clip
                            end_time = round(min(
                                next_clip_data['Meta'].get('startTime') + time_to_fill,
                                sherpaUtils.calculate_clip_length(next_clip_data['Meta'])
                            ), 2)

                            logging.debug("End time for clip is {}".format(end_time))


                            if next_clip_data['Meta'].get('clipType') == "Interview":
                                next_clip = generateEffects.generate_clip(
                                    clip_data=next_clip_data['Meta'],
                                    end=next_clip_data['Meta'].get('startTime')+end_time,
                                    user=user,
                                    compressed=compress_render or chunk_render
                                )
            
                                next_clip = generateEffects.better_generate_text_caption(next_clip, next_clip_data['edit'], compressed=compress_render or chunk_render)
            
                            elif next_clip_data['Meta'].get('clipType') == "Blank":
                                next_clip = generateEffects.generate_blank(next_clip_data['Meta'], end=end_time, compressed=compress_render or chunk_render)
                                next_clip = generateEffects.better_generate_text_caption(next_clip, next_clip_data['edit'], compressed=compress_render or chunk_render)

                            total_insert_length += next_clip.duration
                            logging.debug("Total insert length {}".format(total_insert_length))

                            clip = concatenate_videoclips([clip, next_clip])
                        logging.debug("Blank clip '{}' has been replaced with interview clips as necessary".format(clip_name))

                    # No clip can be found, generate the clip from the blank data in the cutaway timeline
                    except TypeError:
                        if some_filler:
                            logging.debug("Some suitable clips have been found from the interview clip, but a discrepency has still occured")
                            logging.debug("{}s of footage failed to be found in the interview footage".format(time_to_fill))
                            logging.debug("Inserting interview clips that have been found.")
                        if some_filler == False:
                            logging.error("TypeError in render - No clip found to replace blank '{}'".format(clip_data['Meta'].get("name")))
                            logging.debug("Rendering out blank file found in cutaway timeline instead")
                            clip = generateEffects.generate_blank(clip_data['Meta'], compressed=compress_render or chunk_render)            
                            logging.debug("Generating audio for {}".format(clip_name))
                            clip = generateEffects.better_generate_text_caption(clip, clip_data['edit'], compressed=compress_render or chunk_render)


                # Insert clip into correct position in array
                logging.debug("Inserted clip '{}' into pos {}.".format(clip_name, clip_data['Meta'].get('order')-1))

                cutaway_timeline = round((cutaway_timeline+clip.duration), 2)
                video_list.insert(clip_data['Meta'].get('order')-1, clip)

            # Video list
            logging.debug("Video list:")
            logging.debug(video_list)

            # Create audio from the interview Footage
            bottom_audio = generateEffects.interview_audio_builder(interview_data=json_file['InterviewFootage'], user=user)

            # We need to insert the intro if it exists
            if os.path.exists(os.path.join(attach_dir, user, "intro.mp4")):
                logging.debug("Intro clip found")
                logging.error("WE ARE CURRENTLY NOT IMPLEMENTING INTROS")
                """       
                intro_clip = generateEffects.create_intro_clip(user, compressed=compress_render or chunk_render)
                video_list.insert(0, intro_clip)
                logging.debug("Inserting audio for clip '{}'     Clip Audio is {}   Audio length is {}".format(intro_clip, intro_clip.audio, intro_clip.duration))
                top_audio.insert(0, intro_clip.audio)
                bottom_audio.insert(0, intro_clip.audio)"""
            else:
                logging.error("No intro clip found, continuing")

            # Concatenate the clips together
            top_audio = concatenate_audioclips(top_audio)    
            logging.debug("Top audio len: {}".format(round(top_audio.duration, 2)))

                
            # Try adding the music if it exists
            logging.debug("Attempting to add music...")
            try:
                music_data = json_file['MusicTrackURL']
                music_audio_lvl = float(json_file['MusicAudioLevel'])
                music = generateEffects.open_music(music_data, music_audio_lvl, cutaway_timeline)
                # If the video is longer than the music, replay it
                if music.duration > cutaway_timeline:
                    music = CompositeAudioClip([music, generateEffects.open_music(music_data, music_audio_lvl, cutaway_timeline - music.duration)])
                top_audio = CompositeAudioClip([top_audio, music])
                logging.debug("Music added successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during music audio building:")
                logging.debug(e)
                finished_audio = top_audio

            # Try adding the voice over 
            logging.debug("Attempting to add voice over...")
            try:
                voice_data = json_file['VoiceTrackURL']
                voice_audio_lvl = float(json_file['VoiceoverAudioLevel'])
                voice = generateEffects.open_voice(voice_data, voice_audio_lvl, user)
                top_audio = CompositeAudioClip([top_audio, voice])
                logging.debug("Music added successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during voiceover audio building:")
                logging.debug(e)
                finished_audio = top_audio

            # Try concatenating the top and bottom audio lines together
            logging.debug("Attepting to add interview audio...")
            try:
                bottom_audio = concatenate_audioclips(bottom_audio)    
                logging.debug("Bottom audio len: {}".format(round(bottom_audio.duration, 2)))
                finished_audio = CompositeAudioClip([top_audio, bottom_audio])
                logging.debug("Interview audio addedd successfully")
            except Exception as e:
                logging.debug("Exception occured in render - during interview audio building:")
                logging.debug(e)
                finished_audio = top_audio

            logging.debug("Finished audio len: {}".format(round(finished_audio.duration, 2)))

            # Concatenate the video files together
            finished_video = concatenate_videoclips(video_list)
            finished_video = finished_video.set_audio(finished_audio)


            # Defining path here is cleaner
            vid_name = user + "_com_preview_edited.mp4" if compress_render else user + "_edited.mp4"
            vid_dir = os.path.join(attach_dir, user, vid_name)


            logging.debug("Rendering {} clip(s) together, of total length {}.".format(len(video_list), round(finished_video.duration, 2)))
            logging.debug("Writing '{}' to {}".format(vid_name, vid_dir))

            logging.debug("Videos placed in {} seconds".format(time.time() - start_time))

            # Render the finished project out into an mp4
            if chunk_render:
                if finished_video.duration<Config.PREVIEW_CHUNK_LENGTH:
                        logging.debug("Rendering Video as it's smaller than chunk length")
                        vid_dir = os.path.join(attach_dir, user, user + "_com_chunk_0_edited.mp4")
                        finished_video.write_videofile(
                            vid_dir,
                            threads=8,
                            preset="ultrafast",
                            bitrate="1000k",
                            audio_codec="aac",
                            remove_temp=True,
                            fps=24
                        )
                        results = "Chunk Rendered Successfully", 1
                        if send_end is not None:
                            send_end.send(results)            
                        return results

                logging.debug("Running chunk render instance")
                # Get 10 second chunks of videos
                logging.debug("Splitting video up into 10s chunks.")
                
                # Initialising variables
                finished_dur = round(finished_video.duration, 2)
                chunk_len = Config.PREVIEW_CHUNK_LENGTH
                preview_chunks = []
                playtime = 0

                # Getting segment amount (rounded up to account for section that doesn't fit within chunk lenght)
                segment_no = ceil(finished_dur/chunk_len)
                # hangover segment

                logging.debug("Video duration: {}s  /{}s = {} segments      full segments: {}".format(finished_dur, chunk_len, finished_dur/chunk_len, segment_no))

                # _ is for non important variable
                for i in range(segment_no):
                    preview_clip = finished_video.subclip(playtime, min(playtime+chunk_len, finished_dur))
                    logging.debug("Clip is currently from {} to {}".format(playtime, round(min(playtime+chunk_len, finished_dur), 2)))

                    playtime+=chunk_len
                    logging.debug("Segment {} is {}s long".format(i, round(preview_clip.duration, 2)))
                    preview_clip.fps = 24
                    if preview_clip.duration < chunk_len/2:
                        logging.debug("Clip is smaller than {}s, so appending it to last clip instead.".format(chunk_len/2))
                        preview_clip = concatenate_videoclips([preview_clip, preview_chunks[-1]])
                        del preview_chunks[-1]
                    preview_chunks.append(preview_clip)


                
                logging.debug("Preview chunk list: ")
                logging.debug(preview_chunks)

                logging.debug("Rendering out {} videos in {}s chunks".format(len(preview_chunks), chunk_len))

                
                for video in preview_chunks:
                    try:
                        vid_name = user + "_com_chunk_" + str(preview_chunks.index(video)) + "_edited.mp4"
                        vid_dir = os.path.join(attach_dir, user, vid_name)

                        logging.debug("Rendering {} at time {}s".format(vid_name, (time.time() - start_time)))
                        video.write_videofile(
                            vid_dir,
                            threads=8,
                            preset="ultrafast",
                            bitrate="1000k",
                            audio_codec="aac",
                            remove_temp=True,
                            fps=24
                        )
                        results = "Chunk {} Rendered Successfully".format(str(preview_chunks.index(video))), 1
                        results = "Chunk 1 Rendered Successfully", 1
                        if send_end is not None:
                            send_end.send(results)            
                    except:
                        logging.error("Fatal error occured while writing video - Chunk Render")
                        logging.exception("")
                        logging.error("Exiting program without writing video file correctly")                
                        results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                        if send_end is not None:
                            send_end.send(results)            
                        return results
                #results = "Video Rendered Successfully", 1
                logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                logging.debug("Completed in {} seconds".format(time.time() - start_time))
                logging.debug("Closing render instance - Chunk")            
                if send_end is not None:
                    send_end.send(results)            
                return results

                    
            if compress_render:
                logging.debug("Running compress render instance")
                try:
                    finished_video.write_videofile(
                        vid_dir,
                        threads=8,
                        bitrate="1000k",
                        audio_codec="aac",
                        remove_temp=True,
                        fps=24
                    )        
                    results = "Video Rendered Successfully", 1
                    logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                    logging.debug("Completed in {} seconds".format(time.time() - start_time))
                    logging.debug("Closing render instance - Compress")
                    if send_end is not None:
                        send_end.send(results)            
                    return results
                except:
                    logging.error("Fatal error occured while writing video - Compressed Render")
                    logging.exception("")
                    logging.error("Exiting program without writing video file correctly")
                    results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                    if send_end is not None:
                        send_end.send(results)            
                    return results
            else:
                logging.debug("Running full render instance")
                try:
                    logging.debug("Rendering {}".format(vid_name))
                    finished_video.write_videofile(            
                        vid_dir,
                        threads=8,
                        audio_codec="aac",
                        bitrate="8000k",
                        remove_temp=True,
                        fps=24
                    )        
                    results = "Video Rendered Successfully", 1
                    logging.debug("File '{}' successfully written to {}".format(vid_name, vid_dir))
                    logging.debug("Completed in {} seconds".format(time.time() - start_time))
                    logging.debug("Closing render instance - Full")
                    if send_end is not None:
                        send_end.send(results)            
                    return results

                except:
                    logging.error("Fatal error occured while writing video - Full Render")
                    logging.exception("")
                    logging.error("Exiting program without writing video file correctly")
                    results = "Video not rendered [ERROR OCCURED, VIEW LOGS '{}' FOR MORE DETAILS]".format(log_name), 99
                    if send_end is not None:
                        send_end.send(results)            
                    return results                

    except:
        logging.error("An unknown error has occured, causing video render instance to crash:")
        logging.exception("")
        results = "Unforseen error has occured [Contact admin]", 99      
        if send_end is not None:
            send_end.send(results)            
        return results
Exemplo n.º 21
0
    def main(self):
        #setting the thread and tts for title
        self.submission = self.reddit.submission(id=self.thread)
        self.submission.comment_sort = 'top'
        # If the submission is stickied or not a text post, then return nothing
        if self.submission.stickied or not self.submission.is_self:
            print("Error! Invalid submission")
            return
        tts = gTTS(text=self.submission.title, lang=choice(self.english))
        tts.save('temp_files/title/title.mp3')

        self.engine = pyttsx3.init()
        self.voices = self.engine.getProperty('voices')
        # Making male voice more common
        self.weights = [
            0.7 if "DAVID" in voice.id else 0.15 for voice in self.voices
        ]
        self.engine.setProperty('rate', 150)
        self.status = "Getting Comments"
        print("Getting Comments")
        self.draw_title()

        for count, self.top_level_comment in enumerate(
                tqdm(self.submission.comments[:self.total_num_comments],
                     leave=False)):
            try:
                self.top_level_comment.body = self.remove_urls(
                    self.top_level_comment.body)
                if self.valid_comment(1000, 600,
                                      top=True):  #valid_comment(score, len)
                    self.second_level_comment = self.top_level_comment.replies[
                        0]
                    self.second_level_comment.body = self.remove_urls(
                        self.second_level_comment.body)
                    self.third_level_comment = self.second_level_comment.replies[
                        0]
                    self.third_level_comment.body = self.remove_urls(
                        self.third_level_comment.body)

                    if self.valid_comment((self.top_level_comment.score // 10),
                                          400,
                                          second=True):
                        if self.valid_comment(
                            (self.top_level_comment.score // 12),
                                200,
                                third=True):
                            self.make_comments(count, third=True)
                            self.num_comments_dict[str(count)] = 3
                        else:
                            self.make_comments(count, second=True)
                            self.num_comments_dict[str(count)] = 2
                    else:
                        self.make_comments(count, top=True)
                        self.num_comments_dict[str(count)] = 1

                elif self.valid_comment(4000, 1200, top=True):
                    self.make_comments(count, top=True)
                    self.num_comments_dict[str(count)] = 1
            except:
                pass

        self.create_audio_file()
        self.create_video_file()
        music = [music_name for music_name in glob.glob("music/*.mp3")]
        music_choice = choice(music)
        audio_foreground = AudioFileClip('comments/all.mp3')
        audio_background = AudioFileClip(music_choice).volumex(0.12)

        audio_ratio = ceil(audio_foreground.duration /
                           audio_background.duration)
        audio_concat = concatenate_audioclips([audio_background] * audio_ratio)
        final_audio = CompositeAudioClip([audio_foreground, audio_concat])

        self.status = "Writing Video"
        print("Writing Video")
        final_audio = final_audio.set_end(audio_foreground.duration + 1)
        final = self.concat_clip.set_audio(final_audio)
        final.write_videofile(f"{self.submission.id}.mp4", fps=24, threads=4)

        #clearing the temp directory
        for file in [
                img_file for img_file in glob.glob("temp_files/Images/*.png")
        ]:
            os.remove(file)
        for file in [
                name for name in glob.glob("temp_files/comment_files/*.mp3")
        ]:
            os.remove(file)