Esempio n. 1
0
def video_song(song_name):
    # grab the first video
    searches = find_three_links(f"reginaldo rossi {song_name}")
    link = searches[0][1]

    if not link:
        return False

    yt = YouTube(link)
    input_video = "data/videos/clip.mp4"
    output_video = "data/videos/clip2.mp4"

    # download the vid
    try:
        yt.streams.filter(
            progressive=True, res='360p',
            file_extension='mp4').first().download(filename=input_video)

        # cut the vid, add fade in and out, the save it
        clip = VideoFileClip(input_video).subclip(10, 55)
        # clip = clip.resize(480, 360)
        clip = fadein(clip, duration=2)
        clip = audio_fadein(clip, duration=2)
        clip = fadeout(clip, duration=5)
        clip = audio_fadeout(clip, duration=5)
        clip.write_videofile(output_video, codec="libx264", audio_codec="aac")

        media = api.media_upload(filename=output_video,
                                 media_category="tweet_video")
        return {"media": media, "link": link}
    except AttributeError as e:
        print(e)
Esempio n. 2
0
def clipIt(vod, momentTime, sample_window, VOD_ID=None, suspenseSound=None):
    """
    returns vfx clip with fade
    """

    dt_sample_window = datetime.timedelta(0, sample_window)
    dt_sample_window_end = 10

    startTime = (momentTime - dt_sample_window).strftime(TIME_FORMAT)

    endTime = (momentTime + dt_sample_window_end).strftime(TIME_FORMAT)
    print(f"Found most engaged moment at: {startTime} to {endTime}", )

    clip = vod.subclip(startTime, endTime)

    # Add watermark
    if VOD_ID:
        txt_clip = mpy.TextClip(f"twitch.tv/videos/{VOD_ID}",
                                fontsize=14,
                                color="white")
        txt_clip = txt_clip.set_pos("bottom").set_duration(sample_window)
        clip = mpy.CompositeVideoClip([clip, txt_clip])

    # Add fade in and fade out
    FADE_DURATION = 3
    clip = vfx.fadeout(clip, FADE_DURATION)
    clip = vfx.fadein(clip, FADE_DURATION)

    if suspenseSound:
        # fade in some audio sound
        audioclip = mpy.AudioFileClip(suspenseSound).set_duration(
            sample_window)

        audioclip = afx.audio_fadeout(audioclip, FADE_DURATION)
        audioclip = afx.audio_fadein(audioclip, round(FADE_DURATION * 2))

        clipAudio = mpy.CompositeAudioClip([clip.audio, audioclip])
        clip.audio = clipAudio

    return clip
Esempio n. 3
0
def run(adsjson):
    # read the ads from the json file, which contains:
    # url: the link to the image
    # tag: the type of the ad
    #   type 0: the user did not click on it and buy from the ad
    #   type 1: the user did not click on it but buy the product of the ad
    #   type 2: the user clicked on it but did not buy the product of the ad
    #   type 3: the user clicked on it and did buy the product of the ad
    img_list = readAds(adsjson)

    # prepare the ads' image for our template video
    # the width, height of our video is 640, 480. Therefore, our image should
    # be 640, 480. We achieve this goal by pasting the original ad on a blank
    # background. After the process, the datas are well processed.
    good_imgs = []
    for i in img_list:
        gi = pil_adprocess(i['data'])
        gi_dict = {'data': gi, 'tag': i['tag']}
        good_imgs.append(gi_dict)

    # define the duration of each clip for the final clip sampling
    _INTRO_DURATION = 4
    _ADS_DURATION = 10
    _TEXT_DURATION = 15
    _END_DURATION = 4
    # full duration of our created video
    _DURATION = _INTRO_DURATION + _ADS_DURATION + _TEXT_DURATION + _END_DURATION

    # Time to create our own video
    # 1. Get the intro clip with some text effect, check out info()
    txt_clip, maskclip = intro(_INTRO_DURATION)
    # Compose clips
    intro_clip = CompositeVideoClip([maskclip, txt_clip])

    # 2. Get the ads clips. The ads clips are splitted into 4 groups mentioned
    # previously, based on the tag.
    tag0 = []
    tag1 = []
    tag2 = []
    tag3 = []
    for i in good_imgs:
        if i['tag'] == 0:
            tag0.append(np.array(i['data']))
        elif i['tag'] == 1:
            tag1.append(np.array(i['data']))
        elif i['tag'] == 2:
            tag2.append(np.array(i['data']))
        elif i['tag'] == 3:
            tag3.append(np.array(i['data']))

    # The first tag, tag0, is the type of ad that the user totally ignored.
    tag0textclip = simpleTextClip('love me plz', 50, 'center', 3, ef_cascade)
    tag0clips = tagclip(tag0)
    # Tag1 is the type of ad that the user access the product not from the ad,
    # but somewhere else.
    tag1textclip = simpleTextClip('We got cheaper ones!', 50, 'center', 3.5,
                                  ef_vortex)
    tag1clips = tagclip(tag1)
    # Tag2 is the type of ad that the user actually clicked on it, but never did
    # the purchase action.
    tag2textclip = simpleTextClip('87/100 Interested', 50, 'center', 4.5,
                                  ef_arrive)
    tag2clips = tagclip(tag2)
    # Tag3 is the type of ad that the user appreciate. GOOD FOR THEM.
    tag3textclip = simpleTextClip('Thank you for these', 50, 'center', 4,
                                  ef_vortex)
    tag3clips = tagclip(tag3)
    fulltagclips = [tag0textclip] + tag0clips + \
                   [tag1textclip] + tag1clips + \
                   [tag2textclip] + tag2clips + \
                   [tag3textclip] + tag3clips

    # Bye clip, a 'SEE YA' fade out text clip
    byeclip = fadeout(simpleTextClip('SEE YA', 50, 'center', _END_DURATION,
                                     ef_vortex),
                      duration=_END_DURATION)
    concat_clip = [intro_clip] + fulltagclips + [byeclip]

    # concatenate all the clips
    final = concatenate_videoclips(concat_clip)
    # Read the audio and give the fade out duration
    audio = audio_fadeout(AudioFileClip("sources/facebook.mp3").subclip(
        0, _DURATION),
                          duration=4)
    final = final.set_audio(audio)
    # Write the result to a file (many options available !)
    final.write_videofile("holy.mp4", fps=24, codec='libx264', audio_codec='aac',\
                            temp_audiofile='bgTMP.m4a', remove_temp=True)
Esempio n. 4
0
# -------------------------------------------------------------
os.chdir(MEDIA)
filename: str = [
    audio
    for audio in glob.iglob("**.mp3")
    if "TEMP" not in audio
][0]
audio: editor.AudioFileClip = editor.AudioFileClip(filename)
audio: editor.AudioFileClip = afx.volumex(audio, factor=0.5)

# -------------------------------------------------------------
# AUDIO TRANSFORMATIONS
# Applying different audio effects to the background audio.
# -------------------------------------------------------------
audio: editor.AudioFileClip = afx.audio_fadein(audio, duration=1)
audio: editor.AudioFileClip = afx.audio_fadeout(audio, duration=1)

# -------------------------------------------------------------
# AUDIO LOOP
# Creating an infinite loop of the audio so that it is enough
# to cover the video.
# TODO: In future versions, multiple audios should be supported.
# -------------------------------------------------------------
audio: editor.AudioFileClip = afx.audio_loop(audio, nloops=100)

# -------------------------------------------------------------
# AUDIO VIDEO
# Concatenating audio and video clips.
# -------------------------------------------------------------
if final.audio.duration > audio.duration:
    raise RuntimeError("Video is longer than the Audio:", final.audio.duration, audio.duration)