def merge_audio_and_video(self):
        sc_file_name = self.screen_recorder.file_name
        # ad_file_name = self.audio_recorder.file_name # 원본 오디오 파일 이름
        ad_file_name = self.audio_recorder.file_name + "..pr.wav"  # 딥러닝 결과 오디오 파일 이름
        output_file_name = self.output_file_name

        while True:
            if not self.modeling and os.path.isfile(ad_file_name):
                # print(self.audio_recorder.file_name)
                audio_clip = mpe.AudioFileClip(ad_file_name)
                video_clip = mpe.VideoFileClip(sc_file_name)
                new_audio_clip = mpe.CompositeAudioClip(
                    [audio_clip])  # 녹화된 Video file 객체 얻기
                # print('new_audio_clip' + new_audio_clip)
                video_clip.audio = new_audio_clip  # Video file의 audio를 새로 쓰기
                video_clip.write_videofile(output_file_name,
                                           codec="png")  # 최종적인 Video file을 쓰기

                # 녹화 파일 삭제
                os.remove(self.file_name + "temp.wav")
                os.remove(self.file_name + "temp.avi")
                os.remove(self.audio_recorder.file_name + "..pr.wav")
                os.remove(self.audio_recorder.file_name + "..pr.png")
                os.remove(self.audio_recorder.file_name + "..lr.wav")
                os.remove(self.audio_recorder.file_name + "..lr.png")
                os.remove(self.audio_recorder.file_name + "..hr.wav")
                os.remove(self.audio_recorder.file_name + "..hr.png")

                print("merge success")
Ejemplo n.º 2
0
    def combine(self, other: 'Moviepy', other_first: bool = False,  # type: ignore
                crossfade_duration: float = 0) -> None:
        """Combines this video stream with another stream"""
        self.reader_refs += other.reader_refs
        clips = [other.clip, self.clip] if other_first else [self.clip, other.clip]

        if self.has_video and other.has_video:
            if crossfade_duration == 0:
                self.clip = med.concatenate_videoclips(clips)
            else:
                # Have clips[1] start while clips[0] is not finished yet
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(transfx.crossfadein, crossfade_duration)
                self.clip = med.CompositeVideoClip([clips[0], clips[1]])
                # TODO: consider calling set_duration?
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
        else:
            if crossfade_duration == 0:
                assert self.has_video is False and other.has_video is False
                self.clip = med.concatenate_audioclips(clips)
            else:
                # Audio crossfade in: start earlier, fade in with normal audio_fadein effect.
                clips[1] = clips[1].set_start(max(0, clips[0].duration - crossfade_duration))
                clips[1] = clips[1].fx(afx.audio_fadein, crossfade_duration)
                self.clip = med.CompositeAudioClip([clips[0], clips[1]])
                self.clip.duration = clips[0].duration + clips[1].duration - crossfade_duration
Ejemplo n.º 3
0
def overlay_meme_audio(clip_audio, freeze_time, tbc_duration):
    meme_audio = jaudio.get_meme_audio(freeze_time, tbc_duration)
    overlayed_audio = mp.CompositeAudioClip(
        [meme_audio.volumex(1.2),
         clip_audio.volumex(0.6)])

    return overlayed_audio
Ejemplo n.º 4
0
def render(files, dirs, mixer=95, portat=0, music=False):
    videofile = mp.VideoFileClip(dirs + files)
    if portat == "portat":
        video = videofile.resize((360, 480))
    else:
        video = videofile.resize(width=480)
    if video.duration <= 120:
        if music != False:
            musics = mp.AudioFileClip(f"videoassets/{music}.mp3")
            video = video.set_audio(mp.CompositeAudioClip([musics.volumex(
                1 - mixer/100), video.audio.volumex(mixer/100)]).set_duration(video.duration))
        intro = mp.VideoFileClip("videoassets/quickskitsoliveintro.mp4").resize(
            video.size
        )
        logoimage = mp.ImageClip("videoassets/logo.png")
        logo = (
            logoimage.set_duration(video.duration)
            .resize(height=40)
            .margin(right=50, bottom=50, opacity=0)
            .set_pos(("right", "bottom"))
        )
        final = mp.CompositeVideoClip([video, logo])
        final = mp.concatenate_videoclips([final, intro.fadein(1).fadeout(1)])

        newformat = changeformat("rendered_" + files, ".mp4")
        final.write_videofile(dirs + newformat, fps=20,
                              codec="mpeg4", audio_codec='aac', threads=4)
        intro.close()
        logoimage.close()
        videofile.close()
        if music != False:
            musics.close()
        return newformat
    else:
        return False
Ejemplo n.º 5
0
def convert(videos, sounds, dest, tduration=30, mduration=30):
    acc = 0
    clips = []

    for v, s in zip(videos, sounds):
        c = read_clip(v)
        if c.duration >= mduration:
            continue
        c = process_clip(c, s)
        acc += c.duration
        clips.append(c)
        if acc > tduration:
            break

    end_clip = mp.TextClip("FIN",
                           fontsize=100,
                           color="white",
                           font="garamond",
                           method="caption").set_duration(3)
    clips.append(end_clip)

    output = mp.concatenate_videoclips(clips, method="compose")
    music = audio_loop.audio_loop(volumex.volumex(
        mp.AudioFileClip("bgm.mp3").set_start(0), 0.2),
                                  duration=output.duration)

    new_audio = mp.CompositeAudioClip([music, output.audio])
    output = output.set_audio(new_audio)
    output.write_videofile(dest + "/" + "output.mp4")
Ejemplo n.º 6
0
    def translate_video(self, url, native_lng, lng="english"):
        #video, audio = self.retrieve_video_and_audio(url)
        audio = {"content": self.get_audio(url)}

        full_transcript = self.split_transcript(
            self.get_transcript(audio, native_lng))
        translated_transcript = []
        for line in full_transcript:
            translated_transcript.append(self.translate(line, lng))

        translated_audio = None
        for i in range(len(full_transcript)):
            native_line = full_transcript[i]
            translated_line = translated_transcript[i]
            speed_factor = self.get_speed_factor(native_line, translated_line)
            if not translated_audio:
                translated_audio = self.text_to_audio(
                    translated_line, lng, speed_factor=speed_factor)
            else:
                translated_audio = translated_audio + self.text_to_audio(
                    translated_line, lng, speed_factor=speed_factor)

        with open('output.mp3', 'wb') as out:
            out.write(translated_audio)

        my_clip = videoclip
        audio_background = mpe.AudioFileClip('output.mp3')
        final_audio = mpe.CompositeAudioClip([audio_background])
        final_clip = my_clip.set_audio(final_audio)
        final_clip.write_videofile("result.mp4")
Ejemplo n.º 7
0
 def put_bgm(self, finalclip, endvideo_duration=4):
     print('正在加入背景音乐……', end='')
     BGM = mpy.AudioFileClip(
         self.bgm).set_duration(finalclip.duration - endvideo_duration).fx(
             afx.audio_fadeout, 0.8)
     final_audio = mpy.CompositeAudioClip([BGM, finalclip.audio])
     mix = finalclip.set_audio(final_audio)
     print('完成')
     return mix
Ejemplo n.º 8
0
def concat_videos(videos, bg_music, new_fp):
    videos_clip = [mpe.VideoFileClip(v) for v in videos]
    combined_clip = mpe.concatenate_videoclips(videos_clip)

    ori_audio_clip = combined_clip.audio
    # bg_audio_clip = create_bg_music(bg_music, combined_clip.duration)
    bg_audio_clip = mpe.AudioFileClip(bg_music)
    new_audio_clip = mpe.CompositeAudioClip([ori_audio_clip, bg_audio_clip])
    combined_clip.set_audio(new_audio_clip)
    combined_clip.write_videofile(new_fp)
Ejemplo n.º 9
0
def audio_recorder(final_file_name, temp_video_file, temp_audio_file):

    q = queue.Queue()

    def callback(indata, frames, time, status):
        q.put(indata.copy())

    device_info = query_devices(0, 'input')
    samplerate = int(device_info['default_samplerate'])
    initial_time = time()
    with SoundFile("Recorded Videos/" + temp_audio_file + ".wav",
                   mode='x',
                   samplerate=samplerate,
                   channels=2) as file:
        with InputStream(samplerate=samplerate,
                         device=0,
                         channels=2,
                         callback=callback):
            while not stop:
                file.write(q.get())

    print("счет1 ", count1)
    print("time()-initial_time = ", time() - initial_time)
    fps_real = count1 / (time() - initial_time)
    processing_condition_showing("Обработка")
    print("Действительный fps ", fps_real)

    changing_fps(fps_real, temp_video_file)

    #merging
    sound = AudioSegment.from_wav("Recorded Videos/" + temp_audio_file +
                                  ".wav")
    sound.export("Recorded Videos/" + temp_audio_file + ".mp3")

    clip = mpe.VideoFileClip("Recorded Videos/" + temp_video_file +
                             "_corrected.avi")
    audio = mpe.AudioFileClip("Recorded Videos/" + temp_audio_file + ".mp3")

    final_audio = mpe.CompositeAudioClip([audio])
    final_file = clip.set_audio(final_audio)

    final_file.write_videofile("Recorded Videos/" + final_file_name + ".mp4")

    print("Удаление временных файлов")
    if isfile("Recorded Videos/" + temp_audio_file + ".mp3"):
        remove("Recorded Videos/" + temp_audio_file + ".mp3")
    if isfile("Recorded Videos/" + temp_audio_file + ".wav"):
        remove("Recorded Videos/" + temp_audio_file + ".wav")
    if isfile("Recorded Videos/" + temp_video_file + ".avi"):
        remove("Recorded Videos/" + temp_video_file + ".avi")

    if isfile("Recorded Videos/" + temp_video_file + "_corrected.avi"):
        remove("Recorded Videos/" + temp_video_file + "_corrected.avi")
    processing_condition_showing("done")
Ejemplo n.º 10
0
 def addAudio2Video(self, videopath, audiopath):
     video = mpe.VideoFileClip(videopath)
     print("video duration:%d" % (video.duration))
     audio = mpe.AudioFileClip(audiopath)
     subaudio = audio.subclip(0, video.duration)
     print(video.audio)
     finalAudio = mpe.CompositeAudioClip([subaudio])
     #finalAudio = subaudio
     finalVideo = video.set_audio(finalAudio)
     audioVideo = videopath[:-4] + "-withaudio.mp4"
     finalVideo.write_videofile(audioVideo)
     return audioVideo
Ejemplo n.º 11
0
def sync_videos(data_start, video_start, data_ani_path):

    #print(data_start)
    #print(video_start)

    data_start_datetime = datetime.strptime(data_start, '%Y-%m-%d %H:%M:%S')
    video_start_datetime = datetime.strptime(video_start, '%Y-%m-%d %H:%M:%S')

    delta = (data_start_datetime - video_start_datetime).total_seconds()
    #print(delta)

    image_list = []
    #append file names from Output_Images as strings to image_list
    for root, dirs, files in os.walk(data_ani_path):
        image_list += glob.glob(os.path.join(root, '*png'))
        image_list += glob.glob(os.path.join(root, '*jpg'))
        image_list += glob.glob(os.path.join(root, '*jpeg'))

    #sort list by file name
    image_list.sort()  #first alphabetically
    image_list.sort(key=len)  #second by filename length

    data_clip = mpe.ImageSequenceClip(image_list, fps=1)
    video_clip = mpe.VideoFileClip("trim_test.mp4")
    # prevent moviepy from automatically converting portrait to landscape
    if video_clip.rotation == 90:
        video_clip = video_clip.resize(video_clip.size[::-1])
        video_clip.rotation = 0

    if delta >= 0:  #if data starts after video
        # relevant attributes for composition
        test = data_clip.set_start(delta, change_end=True)
        #test.end += delta # this didn't seem to work
        test.duration += delta  # this worked!

        final_clip = video_array(test, \
                                 video_clip, video_clip.audio)
    else:  #if data starts before video
        # relevant attributes for composition
        delta = -delta
        test = video_clip.set_start(delta, change_end=True)
        test.duration += delta
        test_audio = video_clip.audio.set_start(delta, change_end=True)
        # force moviepy to register set_start from previous line
        test_audio = mpe.CompositeAudioClip([test_audio])
        #test_audio.duration += delta
        final_clip = video_array(data_clip, test, test_audio)

    final_clip.write_videofile("sync_test.mp4",
                               logger=None,
                               preset='ultrafast')
    """
Ejemplo n.º 12
0
def fix_audio():
    audio_path = os.path.join(os.path.dirname(__file__), "input", "audio.mp3")
    audioclip = mpe.VideoFileClip(
        os.path.join(os.path.dirname(__file__), "input", FILE_NAME))
    audioclip.audio.write_audiofile(audio_path)
    videoclip = mpe.VideoFileClip(
        os.path.join(os.path.dirname(__file__), "output", "output.avi"))
    audio_background = mpe.AudioFileClip(audio_path)

    new_audioclip = mpe.CompositeAudioClip([audio_background])
    videoclip.audio = new_audioclip
    videoclip.write_videofile(
        os.path.join(os.path.dirname(__file__), "output", "final.mp4"))
Ejemplo n.º 13
0
def audio_list_combine(audio):
    audio_clip_list = list()
    # We add all the audiofiles to a list
    for audio_file in audio:
        if type(audio_file) is str:
            temp_audio_clip = mpy.AudioFileClip(audio_file)
        elif type(audio_file) is mpy.AudioClip:
            temp_audio_clip = audio_file
        else:
            raise Exception(
                "Parameter audio that was passed as a list containing elements of incorrect variable type. "
                "Type must either be a filename string or a moviepy.Audioclip."
            )

        audio_clip_list.append(temp_audio_clip)
    # We then merge the audiofiles
    audio_clip = mpy.CompositeAudioClip(audio_clip_list)
    return audio_clip
Ejemplo n.º 14
0
def generate_video():

    # Parse script
    script = ""
    with open("script.txt", "r") as f:
        script = f.read()

    parsed_script = parse_script(script)
    print("Script parsed.")

    # Generate audio
    if "audio.wav" in os.listdir("."):
        print("Audio detected.")
        chunks = get_chunks(pdb.AudioSegment.from_wav("audio.wav"))
        print("Audio chunks generated.")
        sum(chunks).export("audio_track.wav", "wav")
        # Export to file first, then match
        match_audio(parsed_script, chunks)

    # Generate subtitles
    subtitle_clip = generate_subtitles_clip(parsed_script)
    print("Subtitles generated.")

    # Generate scenes
    schedule = get_scene_transition_schedule(parsed_script)
    video_clip = scheduled_time_scene_transition(schedule)
    print("Scene generated.")

    # Generate the video
    main_clip = mpy.CompositeVideoClip([video_clip, subtitle_clip])

    # Add the audio track
    if "audio_track.wav" in os.listdir("."):
        audio_clip = mpy.AudioFileClip('audio_track.wav')
        if "BGM.mp3" in os.listdir(".") or "BGM.flac" in os.listdir("."):
            audio_clip = mpy.CompositeAudioClip([
                mpy.AudioFileClip('audio_track.wav'),
                mpy.AudioFileClip("BGM.mp3").volumex(0.15)
            ])
        main_clip = main_clip.set_audio(
            audio_clip.set_duration(main_clip.duration))

    # Write the video
    main_clip.write_videofile("output.mp4")
Ejemplo n.º 15
0
def clipIt(vod, momentTime, sample_window, VOD_ID=None, suspenseSound=None):
    """
    returns vfx clip with fade
    """

    dt_sample_window = datetime.timedelta(0, sample_window)
    dt_sample_window_end = 10

    startTime = (momentTime - dt_sample_window).strftime(TIME_FORMAT)

    endTime = (momentTime + dt_sample_window_end).strftime(TIME_FORMAT)
    print(f"Found most engaged moment at: {startTime} to {endTime}", )

    clip = vod.subclip(startTime, endTime)

    # Add watermark
    if VOD_ID:
        txt_clip = mpy.TextClip(f"twitch.tv/videos/{VOD_ID}",
                                fontsize=14,
                                color="white")
        txt_clip = txt_clip.set_pos("bottom").set_duration(sample_window)
        clip = mpy.CompositeVideoClip([clip, txt_clip])

    # Add fade in and fade out
    FADE_DURATION = 3
    clip = vfx.fadeout(clip, FADE_DURATION)
    clip = vfx.fadein(clip, FADE_DURATION)

    if suspenseSound:
        # fade in some audio sound
        audioclip = mpy.AudioFileClip(suspenseSound).set_duration(
            sample_window)

        audioclip = afx.audio_fadeout(audioclip, FADE_DURATION)
        audioclip = afx.audio_fadein(audioclip, round(FADE_DURATION * 2))

        clipAudio = mpy.CompositeAudioClip([clip.audio, audioclip])
        clip.audio = clipAudio

    return clip
Ejemplo n.º 16
0
def process_clip(clip, sound):
    base_clip = clip
    base_clip = resize.resize(base_clip, width=640, height=480)
    title_tts = mp.AudioFileClip(sound.dest + "/title" + sound.filename +
                                 ".mp3").set_start(0)
    title_clip = mp.TextClip(sound.title,
                             fontsize=50,
                             color="white",
                             font="garamond",
                             method="caption",
                             size=(base_clip.size[0], None))
    title_clip = title_clip.on_color(
        size=(title_clip.size[0] + 10, title_clip.size[1] + 10),
        col_opacity=0.5).set_duration(
            title_tts.duration).set_position("center")
    title_clip = fadein.fadein(title_clip, 0.2, (255, 255, 255))

    comment_tts = mp.AudioFileClip(sound.dest + "/comment" + sound.filename +
                                   ".mp3")
    comment_time = max(
        min(base_clip.duration - comment_tts.duration, base_clip.duration / 2),
        title_tts.duration + 1)
    comment_clip = mp.TextClip(sound.top_comment,
                               fontsize=20,
                               color="white",
                               method="caption",
                               size=(base_clip.size[0],
                                     None)).on_color(col_opacity=0.5)
    comment_clip = comment_clip.set_duration(
        comment_tts.duration).set_start(comment_time).set_position("bottom")
    comment_clip = fadein.fadein(comment_clip, 0.2, (255, 255, 255))
    comment_tts = comment_tts.set_start(comment_time)

    audio = mp.CompositeAudioClip([title_tts, comment_tts])
    base_clip = loop.loop(base_clip,
                          duration=max(base_clip.duration, audio.duration + 1))
    newclip = mp.CompositeVideoClip([base_clip, title_clip, comment_clip])
    return newclip.set_audio(audio)
Ejemplo n.º 17
0
        print('Error 404. Please try again.')

print('>> File "' + file + '" chosen.')

start_time = time.time()

v = mp.VideoFileClip('./' + file)
final = v.duration - 0.1

song = './jjtbcroundabout.mp3'
riff_time = 44.944

start_song = final - riff_time
audioclip = mp.AudioFileClip(song)
audioclip = audioclip.set_start(t=start_song)
fa = mp.CompositeAudioClip([audioclip, v.audio])

thumb = v.save_frame('./jjtbcthumbnail.jpg', t=final)
tg = Image.open('./jjtbcthumbnail.jpg').convert('L')
tinted = ImageOps.colorize(tg, black='#1e1a12', white='#bfb196')
tinted.save('./jjtbcthumbnail.jpg')
finalfr = mp.ImageClip('./jjtbcthumbnail.jpg',
                       duration=(audioclip.duration -
                                 riff_time)).set_start(final)

bcarrow = mp.ImageClip('./jjtbcarrow.png').set_duration(10).resize(
    width=50).margin(left=10, bottom=10).set_pos(('left', 'bottom'))
tbcarrow = mp.ImageClip('./jjtbcarrow.png')
vidwid = v.w
print('>>>> Width = ' + str(vidwid))
Ejemplo n.º 18
0
            source_img.paste(tick, (x + 190, y + h + 110))
        if (ans == str(4)):

            rightd = Image.open('./main/rightd.png').convert("RGBA")
            rightd.thumbnail(button_size_1)
            right = ImageDraw.Draw(rightd)
            right.text((50, 10), text_o1[3], font=font)
            source_img.paste(rightd, ans_4)
            source_img.paste(tick,
                             (int(width / 2 + x + 10) + 180, y + h + 110))

        timer_end_img = Image.open('./main/timeisup.png').convert("RGBA")
        timer_img.thumbnail((50, 50))
        source_img.paste(timer_end_img, (int(width / 2 - 100), y + h + 200))

        #source_img.save("./images/12.png", "PNG")
        #source_img.save("./images/13.png", "PNG")
        #source_img.save("./images/14.png", "PNG")
        source_img.save("file.png", "PNG")
        img = cv2.imread("file.png")
        for i in range(24 * 3):
            out.write(img)

out.release()
file.close()

my_clip = mpe.VideoFileClip('final_output.mp4')
audio_background = mpe.AudioFileClip('speech.mp3')
final_audio = mpe.CompositeAudioClip([my_clip.audio, audio_background])
final_clip = my_clip.set_audio(final_audio)
Ejemplo n.º 19
0
    YouBOT_session_data['post_data'])

v, a, up_img, down_img, bottom_bar_img, auth, corner_logo, trans_clips, ups_text, t, = prepare_all_comment_clips(
    post_data['comments'].to_list(), post_data['replies'].to_list(),
    start_time)

ov, oa, ot, outro_image = prepare_outro_clips(t)
background = mpy.ColorClip(
    (width, height), color=(26, 26, 27)).set_duration(t + outro_image.duration)

combined = mpy.CompositeVideoClip([background] + corner_logo + v + [iv] +
                                  [ov] + [auth_text] + [intro_image] +
                                  [outro_image] + up_img + down_img +
                                  bottom_bar_img + auth + ups_text +
                                  trans_clips).set_audio(
                                      mpy.CompositeAudioClip(a + [ia] + [oa]))

background_music = prepare_background_music(combined.duration)

final_audio = mpy.CompositeAudioClip([combined.audio, background_music])
combined = combined.set_audio(final_audio)

# title = subreddit_name + ', asks: ' + YouBOT_session_data['post_data'].title

tags = ['reddit', 'askreddit', 'toadfilms', 'updoot',
        'stories']  # manual tags: reddit,askreddit,toadfilms,updoot,stories
description = 'Sub for the some of the best AskReddit content freshly handpicked for you!'
title = prepare_title_for_win10_file(YouBOT_session_data['post_data'].title)
path = r'releases\\'
file_path = path + title + r'\\'
Ejemplo n.º 20
0
final = v.duration - 0.1  # Removes 0.1s from end of clip so that freeze frame doesnt loop to 1st frame

# ~ Roundabout song ~
song = './songs/roundabout_long.mp3'
riff_time = 44.944

start_song = final - riff_time
audioclip = mp.AudioFileClip(song)
audioclip = audioclip.set_start(
    t=start_song)  # Time at which song should start so riff is at end

# ~ v1.2 edit - Adding Mute/No Audio function ~
#v = v.set_audio('')

try:
    fa = mp.CompositeAudioClip([audioclip, v.audio
                                ])  # If video contains audio, merge with song
except AttributeError:
    fa = mp.CompositeAudioClip([audioclip])  # Else skip

# ~ Video Freeze Frame ~
# Create Sepia image from last frame using PIL
thumb = v.save_frame('thumbnail.jpg', t=final)
tg = Image.open('thumbnail.jpg').convert('L')  # Convert image to grayscale
tinted = ImageOps.colorize(tg, black='#1e1a12',
                           white='#bfb196')  # Tintinng sepia tones
tinted.save('thumbnail.jpg')

finalfr = mp.ImageClip('thumbnail.jpg',
                       duration=(audioclip.duration - riff_time)).set_start(
                           final)  # Open tinted frame as freeze frame
Ejemplo n.º 21
0
import moviepy.editor as mp

inp = 'in/f.mp4'
out = 'out/f.webm'
aud = 'aud/1.WAV'
video = mp.VideoFileClip(inp)
if video.rotation == 90:
    video = video.resize(video.size[::-1])
    video.rotation = 0

logo = (
    mp.ImageClip("logo.png").set_duration(video.duration).resize(
        height=50)  # if you need to resize...
    .margin(right=8, top=8, opacity=0)  # (optional) logo-border padding
    .set_pos(("right", "top")))

if aud != '':
    audioclip = mp.AudioFileClip(aud)
    new_audioclip = mp.CompositeAudioClip([audioclip])
    video.audio = new_audioclip

final = mp.CompositeVideoClip([video, logo])
final.write_videofile(out)
Ejemplo n.º 22
0
        0, 2 * PI,
        N_SQUARES)[:-1]  ## Polar angles of the center of small squares

    ## Parametric n-gon equation for
    ## https://tpfto.wordpress.com/2011/09/15/parametric-equations-for-regular-and-reuleaux-polygons/
    r1 = np.cos(PI / N_GON) / (np.cos(a - (PI / N_GON) *
                                      (2 * np.floor((N_GON * a) /
                                                    (2 * PI)) + 1)))
    r2 = np.cos(PI / (N_GON + 2)) / (np.cos(a - (PI / (N_GON + 2)) *
                                            (2 * np.floor(((N_GON + 2) * a) /
                                                          (2 * PI)) + 1)))

    d1 = np.cumsum(np.sqrt(((r1[1:] - r1[:-1])**2)))
    d1 = [0] + list(d1 / (d1.max()) + 1e-10)
    d2 = np.cumsum(np.sqrt(((r2[1:] - r2[:-1])**2)))
    d2 = [0] + list(d2 / (d2.max()) + 1e-10)
    P1 = list(zip(r1, a, d1))
    P2 = list(zip(r2, a, d2))

    videoclip = mpe.VideoClip(make_frame=make_frame, duration=PSEUDO_DURATION)
    if EXTENSION == 'gif':
        videoclip.write_gif(output_file,
                            fps=FPS,
                            program='ImageMagick',
                            opt='OptimizePlus')
    else:
        _audioclip = mpe.AudioFileClip(AUDIO_FILE)
        audioclip = mpe.CompositeAudioClip([_audioclip])
        videoclip.audio = audioclip
        videoclip.write_videofile(output_file, fps=FPS)
                                  isColor=True,
                                  frameSize=size)
            for i in range(len(frame_array)):
                # writing to a image array
                out.write(frame_array[i])
            out.release()
            time.sleep(2)

            print(pathVideoOut + "\\" +
                  filename.split('.txt')[0].split("\\")[-1] + '.mp4')
            my_clip = mpe.VideoFileClip(
                filename=pathVideoOut + "\\" +
                filename.split('.txt')[0].split("\\")[-1] + '_v.mp4')
            # audio_background = mpe.AudioFileClip('welcome.mp3')
            # print("type audio_background", type(audio_background))
            final_audio = mpe.CompositeAudioClip([audio_background, bg_music])
            # final_clip = CompositeVideoClip([my_clip, subtitles.set_position(('center', 'bottom'))])
            final_clip = my_clip.set_audio(final_audio)
            path_f_video = pathVideoOut + "\\" + filename.split(
                '.txt')[0].split("\\")[-1] + '.mp4'
            final_clip.write_videofile(path_f_video,
                                       bitrate='50000k',
                                       fps=FPS,
                                       audio_bitrate='3000k',
                                       codec='mpeg4',
                                       preset="placebo",
                                       threads=4)
            time.sleep(1)
            v_len = video_to_frames(path_f_video)
            if v_len == 0:
                null_video_counter += 1
Ejemplo n.º 24
0
def main(argv):
    inputfile = ''
    keypass = ''
    try:
        opts, args = getopt.getopt(argv, "hi:k:", ["ifile=", "key="])
    except getopt.GetoptError:
        print('in.py -i <inputfile> -k <user_key>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('in.py -i <inputfile> -k <user_key>')
            sys.exit()
        elif opt in ("-i", "--ifile"):
            inputfile = arg
        elif opt in ("-k", "--key"):
            keypass = arg


#    print(inputfile)
#    print(keypass)

    inp = inputfile
    if inp == '':
        inp = 'in/f.mp4'
    out = 'out/' + keypass + '_' + dt_stamp + '.webm'
    # aud = 'aud/' + rand_aud +'.WAV'
    aud = 'aud/' + random.choice(os.listdir("aud/"))

    logu = 'logo.png'

    video = mp.VideoFileClip(inp)
    # if video.rotation == 90:
    video = video.resize(video.size[::-1])
    video.rotation = 0

    logo = (
        mp.ImageClip(logu).set_duration(video.duration).resize(
            height=50)  # if you need to resize...
        .margin(right=8, top=8, opacity=0)  # (optional) logo-border padding
        .set_pos(("right", "top")))

    if aud != '':
        audioclip = mp.AudioFileClip(aud).set_duration(video.duration)
        new_audioclip = mp.CompositeAudioClip([audioclip])
        video.audio = new_audioclip

    final = mp.CompositeVideoClip([video, logo])
    final.write_videofile(out)

    url = 'https://xxxxxx.com.au?auth=' + keypass + '&handle=stream'
    print('Posting Data To ' + url)

    userdata = {
        "loc": out,
        "stamp": dt_stamp,
        "Auth": keypass,
        "handle": "stream"
    }
    resp = requests.post(url)

    #    files = {'file': open(out, 'rb')}
    #    userdata = {"loc": out, "stamp": dt_stamp, "Auth": keypass, "handle": "stream"}
    #    resp = requests.post(url, files=files, params=userdata)
    #    r = requests.get(url, headers={"Auth":keypass, "handle":"stream"})

    print('Call Response:')
    print(resp)
Ejemplo n.º 25
0
def start():
    browsebutton.config(state='disabled')
    mvar = 'JoJoTBCfying in progress. Please wait...'
    messvar.set(mvar)

    ## JoJoTBCfier code:

    file = re.findall(r'.+(\/.+)$', filename)[0][1:]
    start_time = time.time()

    ## Editing video

    # ~ Video clip ~

    v = mp.VideoFileClip(filename)
    final = v.duration - 0.1  # Removes 0.1s from end of clip so that freeze frame doesnt loop to 1st frame

    # ~ Roundabout song ~
    mvar = 'Adding roundabout.mp3...'
    messvar.set(mvar)

    song = 'roundabout.mp3'
    riff_time = 44.944

    start_song = final - riff_time
    audioclip = mp.AudioFileClip(song)
    audioclip = audioclip.set_start(
        t=start_song)  # Time at which song should start so riff is at end

    # ~ v1.2 edit - Adding Mute/No Audio function ~
    #v = v.set_audio('')

    try:
        fa = mp.CompositeAudioClip(
            [audioclip, v.audio])  # If video contains audio, merge with song
    except AttributeError:
        fa = mp.CompositeAudioClip([audioclip])  # Else just add audio

    # ~ Video Freeze Frame ~
    mvar = 'Creating freeze frame...'
    messvar.set(mvar)

    # Create Sepia image from last frame using PIL
    thumb = v.save_frame('thumbnail.jpg', t=final)
    tg = Image.open('thumbnail.jpg').convert('L')  # Convert image to grayscale
    tinted = ImageOps.colorize(tg, black='#1e1a12',
                               white='#bfb196')  # Tinting sepia tones
    tinted.save('thumbnail.jpg')

    finalfr = mp.ImageClip(
        'thumbnail.jpg', duration=(audioclip.duration - riff_time)).set_start(
            final)  # Open tinted frame as freeze frame

    # TBC arrow slide in
    mvar = 'Adding To Be Continued arrow...'
    messvar.set(mvar)

    tbcarrow = mp.ImageClip('tbcarrow.png')
    vidwid, vidhei = v.w, v.h
    print('>> WidthxHeight = ' + str(vidwid) + 'x' + str(vidhei))

    tbcarrow = tbcarrow.resize(
        width=(vidwid * 0.4))  # Resizing arrow to 40% of video width

    # ~ Converting to .mp4 ~
    mvar = 'Converting to .mp4...'
    messvar.set(mvar)

    extindex = file.rfind('.')  # finding final . for extension
    file = str(file[0:extindex]) + '.mp4'  # replacing extension with .mp4

    #  ~ Exporting video ~
    mvar = 'Exporting video...'
    messvar.set(mvar)

    fv = mp.CompositeVideoClip([
        v, finalfr,
        tbcarrow.set_pos(
            ('left',
             'bottom')).set_start(final).set_duration(audioclip.duration -
                                                      riff_time)
    ])  #add tbc arrow
    fva = fv.set_audio(fa).set_end(fv.duration - 0.1)
    fva = fva.set_fps(fps=30)
    fva.write_videofile(f'./jojofied/jojofied_{file}')

    # ~ Log File ~
    mvar = 'Writing log file...'
    messvar.set(mvar)

    now = datetime.now()
    dt = now.strftime('%a %d/%m/%y %I:%M:%S %p')

    lf = open('log.txt', 'a+')
    fpath = f'./jojofied/jojofied_{file}'
    lf.write(
        f'\n\n{dt}\n>> File: {file}\n>> Location: {fpath}\n>> Video Length: {fva.duration}\n>> Time Taken: {time.time()-start_time}'
    )
    lf.close()

    if len({fpath}) >= 55:
        mvar = f'Done. Video output at {fpath[:55]}...'
    else:
        mvar = f'Done. Video output at {fpath}'
    messvar.set(mvar)

    # ~ Resetting GUI ~
    startbutton.config(state='disabled')
    browsebutton.config(state='normal')
Ejemplo n.º 26
0
def create(request):
    if request.method == 'POST':
        data = request.POST['parag']
        paragraph = data
        text = data.replace('\n', '')
        data = text
        for k in text.split("\n"):
            text2 = re.sub(r"[^a-zA-Z0-9&]+", ' ', k)
        text = text2
        tokens = [t for t in text.split()]
        sr = stopwords.words('english')
        clean_tokens = tokens[:]
        for token in tokens:
            if token in stopwords.words('english'):

                clean_tokens.remove(token)
        freq = nltk.FreqDist(clean_tokens)

        s = [(k, freq[k]) for k in sorted(freq, key=freq.get, reverse=True)]
        title = s[0][0]
        search_queries = [
            sorted(freq.items(), key=lambda kv:
                   (kv[1], kv[0]), reverse=True)[0][0] + "  " +
            sorted(freq.items(), key=lambda kv:
                   (kv[1], kv[0]), reverse=True)[1][0]
        ]
        for query in search_queries:
            downloadimages(query, title)

        stop_words = stopwords.words('english')
        summarize_text = []
        # Step 1 - Read text anc split it
        article = data.split(". ")
        sentences = []
        sentences_list = ''
        count_sentence = 0
        for sentence in article:
            count_sentence = count_sentence + 1
            sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
        sentences.pop()
        top_n = int(count_sentence / 3)
        # Step 2 - Generate Similary Martix across sentences
        sentence_similarity_martix = build_similarity_matrix(
            sentences, stop_words)
        # Step 3 - Rank sentences in similarity martix
        sentence_similarity_graph = nx.from_numpy_array(
            sentence_similarity_martix)
        scores = nx.pagerank(sentence_similarity_graph)
        # Step 4 - Sort the rank and pick top sentences
        ranked_sentence = sorted(
            ((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
        for i in range(top_n):
            summarize_text.append(" ".join(ranked_sentence[i][1]))
        # Step 5 - Offcourse, output the summarize texr
        m = 1
        # Driver Code
        with open("visualizer/input/op.tsv", "w") as text_file:
            text_file.write("content" + "\t" + "val" + '\n')
            for i in summarize_text:
                sentences_list = sentences_list + i
                search_queries.append(i)
                text_file.write(i + "\t" + str(m) + '\n')
                m = m + 1
        emotion = predict()
        for query in search_queries:
            review = re.sub('[^a-zA-Z]', ' ', query)
            review = review.lower()
            review = review.split()
            ps = PorterStemmer()
            review = [
                ps.stem(word) for word in review
                if not word in set(stopwords.words('english'))
            ]
            review = ' '.join(review)
            downloadimages(review, title)
        img_array = []
        for filename in glob.glob('visualizer/images/' + title + '/*.jpg'):
            img = cv2.imread(filename)
            height, width, layers = img.shape
            size = (width, height)
            img_array.append(img)

        out = cv2.VideoWriter('visualizer/output/project.avi',
                              cv2.VideoWriter_fourcc(*'DIVX'), 0.2, size)
        for i in range(len(img_array)):
            img = img_array[i]
            img = cv2.resize(img, size)
            out.write(img)
        out.release()
        folder = 'visualizer/images/' + title + '/'
        for the_file in os.listdir(folder):
            file_path = os.path.join(folder, the_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
                #elif os.path.isdir(file_path): shutil.rmtree(file_path)
            except Exception as e:
                print(e)
        textClip = gTTS(text=sentences_list, lang=language, slow=False)
        textClip.save("visualizer/output/voice.mp3")
        audioclip = mpe.AudioFileClip("visualizer/output/voice.mp3")
        my_clip = mpe.VideoFileClip('visualizer/output/project.avi')
        audio_background = mpe.AudioFileClip('visualizer/emotions/' + emotion +
                                             '.mp3')
        new_audioclip = mpe.CompositeAudioClip(
            [audio_background.volumex(0.08),
             audioclip.volumex(1)])

        final_audio = mpe.CompositeAudioClip([new_audioclip])
        audio = mpe.afx.audio_loop(final_audio, duration=audioclip.duration)
        final_clip = my_clip.set_audio(audio)
        final_clip.write_videofile("visualizer/output/" + title + '.mp4')
        data = title
        file_path = 'visualizer/output/' + data + '.mp4'
        video = Video()
        video.data = paragraph
        video.name = data
        video.videofile = file_path
        video.save()
        return redirect(video.videofile.url)

    if request.method == 'GET':
        return render(request, 'index.html')
Ejemplo n.º 27
0
    def expMovieIn60s(self):
        print('正在剪辑...\n')
        w, h = 850, 480
        self.w = w
        self.h = h
        clips = []
        n = 0

        drtn = 56 / len(self.lst) / (1 - self.k)
        self.drtn = drtn
        crstime = drtn * self.k
        for p in self.lst:
            fn = os.path.join(self.pth, p)
            if n == 0:
                _img = mpy.ImageClip(fn).set_fps(25).set_duration(drtn).resize(
                    (w, h))
                clips.append(_img)
                n += 1
            else:
                _img = mpy.ImageClip(fn).set_fps(25).set_duration(drtn).resize(
                    (w, h)).crossfadein(crstime).set_start(
                        (drtn - crstime) * n)
                clips.append(_img)
                n += 1

        #计算正片时间,供logo字幕用
        drt = 0
        for c in clips:
            drt = drt + (c.duration - crstime)
        subtitle_drt = drt - clips[0].duration

        #加上片尾
        print('正在处理片尾...\n')
        endvideo = mpy.VideoFileClip(
            self.endV, target_resolution=(h, w)).set_start(
                (drtn - crstime) * n)  # 片尾   分辨率是先写h,再写w  大坑
        clips.append(endvideo)

        print('正在处理字幕...\n')

        clips = self.put_cover_text(clips)

        logo=mpy.ImageClip(r"I:\大智小超\公共素材\图片类\00大智小超科学实验室商标.png") \
            .set_fps(25).set_duration(drt).resize((80,48)).set_position((740,420)) \
            .crossfadein(crstime).set_start(0)
        clips.append(logo)

        print('正在拼接视频...\n')
        finalclip = mpy.CompositeVideoClip(clips)

        #加上背景音乐
        print('正在添加背景音乐...\n')
        BGM = mpy.AudioFileClip(
            self.bgm).set_duration(finalclip.duration - endvideo.duration).fx(
                afx.audio_fadeout, 0.8)
        final_audio = mpy.CompositeAudioClip([BGM, finalclip.audio])
        mix = finalclip.set_audio(final_audio)
        totalTime = finalclip.duration

        out = os.path.join(self.pth, self.consName,
                           self.consName + '_搭建视频_forced_60s.mp4')
        print('正在导出视频:{}...\n'.format(out))
        mix.write_videofile(out)
        self.killProcess()
        print('Done')
Ejemplo n.º 28
0
    def export_mv(self, w=1280, h=720, bgm_src='default'):
        crs_info = self.read_crs_info()

        clip_02_src = os.path.join(self.pic_dir, self.crs_name,
                                   self.crs_name + '_clip_02.mp4')
        _clip_02_src = mpy.VideoFileClip(clip_02_src,
                                         target_resolution=(h, w)).set_start(0)
        target_sec = 56 - int(_clip_02_src.duration)

        if _clip_02_src.duration > 44:
            print('第二段影片大于44秒,请先剪裁。')
            sys.exit(0)

        building_ani_src = os.path.join(
            'i:\\乐高\\图纸', self.crs_name,
            self.crs_name + '_building_animation_only.mp4')
        if os.path.exists(building_ani_src):
            print('目录中已存在搭建动画,将用于合并生成视频号影片。')
            clip_01_src = os.path.join(
                self.pic_dir, self.crs_name,
                self.crs_name + '_building_animation_only.mp4')
            _clip_01 = mpy.VideoFileClip(clip_01_src,
                                         target_resolution=(h, w)).set_start(0)
            acc_clip_01 = _clip_01.fl_time(
                lambda t: _clip_01.duration / target_sec * t,
                apply_to=['mask', 'audio'])
            clip_01 = acc_clip_01.set_duration(target_sec)
        else:
            print('目录中无搭建动画,正在生成搭建动画序列……')
            building_ani = BuildAnimation(crs_name=self.crs_name, save_yn='no')
            # _clip_01=building_ani.exp_building_movie(exptype='part',total_sec_for_part=target_sec)
            clip_01 = building_ani.exp_building_movie(
                exptype='part', total_sec_for_part=target_sec)

        # target_sec=10

        # acc_clip_01 = _clip_01.fl_time(lambda t:  _clip_01.duration/target_sec*t, apply_to=['mask', 'audio'])
        # clip_01=acc_clip_01.set_duration(target_sec)

        clip_02 = mpy.VideoFileClip(
            clip_02_src, target_resolution=(h, w)).set_start(clip_01.duration)

        # bg_time=int(clip_01.duration+clip_02.duration)-2*clip_01.duration/_clip_01.duration
        bg_time = int(clip_01.duration + clip_02.duration) - 2
        bg = mpy.ColorClip((430, 720),
                           color=(0, 0, 0),
                           ismask=False,
                           duration=bg_time).set_opacity(0.5).set_position(
                               (850, 0)).set_start(2)

        bg_left = mpy.ColorClip((300, 56),
                                color=(51, 149, 255),
                                ismask=False,
                                duration=bg_time).set_position(
                                    (275, 15)).set_start(2)

        txt_left = '科学机器人课'
        txt_title = self.crs_name[4:]
        txt_tool = '教具:' + crs_info['教具'].values.tolist()[0]
        txt_big_klg = '课程知识点'
        txt_klg = crs_info['知识点'].values.tolist()[0].split('\n')

        clip_left = mpy.TextClip(txt_left,
                                 fontsize=40,
                                 font='j:/fonts/hongMengHei.ttf',
                                 color='#ffffff').set_position(
                                     (310,
                                      18)).set_duration(bg_time).set_start(2)
        clip_title = mpy.TextClip(
            txt_title,
            fontsize=54,
            font='j:/fonts/yousheTitleHei.ttf',
            color='#ffff00').set_position(
                (int(430 / 2) - int(len(txt_title) * 54 / 2) + 860,
                 22)).set_duration(bg_time).set_start(2)
        clip_tool = mpy.TextClip(
            txt_tool,
            fontsize=26,
            font='j:/fonts/yousheTitleHei.ttf',
            color='#ffff00').set_position(
                (int(430 / 2) - int(len(txt_tool) * 26 / 2) + 880,
                 110)).set_duration(bg_time).set_start(2)
        clip_big_klg = mpy.TextClip(
            txt_big_klg,
            fontsize=46,
            font='j:/fonts/HYXinHaiXingKaiW.ttf',
            color='#ffffff').set_position(
                (int(430 / 2) - int(len(txt_big_klg) * 46 / 2) + 850,
                 350)).set_duration(bg_time).set_start(2)
        clip_logo = mpy.ImageClip(self.logo_src).set_fps(25).set_position(
            (20, 650)).set_duration(56).resize(
                (110, int(110 * 253 / 425))).set_start(0)

        clips = [
            clip_01, clip_02, bg, clip_logo, clip_title, clip_tool,
            clip_big_klg
        ]

        for n, text in enumerate(txt_klg):
            clip_klg = mpy.TextClip(
                text,
                fontsize=30,
                font='j:/fonts/HYXinHaiXingKaiW.ttf',
                color='#ffffff',
                align='West').set_position(
                    (890, 430 + n * 48)).set_duration(bg_time).set_start(2)
            clips.append(clip_klg)

        clip_end = mpy.VideoFileClip(
            self.end_clip_src, target_resolution=(h, w)).set_start(bg_time + 2)

        clips_rear = [bg_left, clip_left, clip_end]
        clips.extend(clips_rear)
        finalclip = mpy.CompositeVideoClip(clips)

        if bgm_src == 'default':
            bgm = mpy.AudioFileClip(
                self.bgm_src).set_duration(finalclip.duration -
                                           clip_end.duration).fx(
                                               afx.audio_fadeout, 0.8)
        else:
            bgm = mpy.AudioFileClip(bgm_src).set_duration(
                finalclip.duration - clip_end.duration).fx(
                    afx.audio_fadeout, 0.8)
        final_audio = mpy.CompositeAudioClip([bgm, finalclip.audio])
        mix = finalclip.set_audio(final_audio)

        out_mv = os.path.join(self.pic_dir, self.crs_name,
                              self.crs_name + '_视频号.mp4')
        mix.write_videofile(out_mv)
Ejemplo n.º 29
0
from user_info import username
import moviepy.editor as mpe
user = username()
user_mp3 = user + '.mp3'
clip = mpe.VideoFileClip("stock/video.mp4")
audio_bg = mpe.AudioFileClip(f'user/{user_mp3}')
final_audio = mpe.CompositeAudioClip([audio_bg])
final_clip = clip.set_audio(final_audio)
user_output = user + '.mp4'
final_clip.write_videofile(f'user/output/{user_output}')
Ejemplo n.º 30
0
import moviepy.editor as mpe
clip = mpe.VideoFileClip("C:/laragon/www/GG/1.avi")
audio_bg = mpe.AudioFileClip("C:/laragon/www/GG/main.mp3")
final_audio = mpe.CompositeAudioClip([audio_bg, clip.audio])
final_clip = clip.set_audio(final_audio)
final_clip.write_videofile("out/output.mp4")