Esempio n. 1
0
def make_video(video_paths):
    """
    Makes videos
    """
    videos = []
    for path in video_paths:
        video = audio_fadein(
            audio_fadeout(
                fadein(
                    fadeout(
                        VideoFileClip(path, target_resolution=(1080, 1920)),
                        1), 1), 1), 1)
        videos.append(video)

    black_image = ColorClip((1920, 1080), color=[0, 0, 0], duration=0.5)
    result_clip = concat(videos, transition=black_image, method="compose")
    result_clip.write_videofile("result.mp4", fps=60, preset="ultrafast")
Esempio n. 2
0
def getIntroVid(vidTitle, origCropFrac, sampleHeight, origVidDir, username):
    if len(username)==0:
        username="******"
    titleParts = [vidTitle,
                  " ",
                  "by \n " + username]
    sizes = [50,
             30,
             80]

    introVid = VideoFileClip(origVidDir)
    (w, h) = introVid.size
    length = introVid.duration

    times = [[0, int(length/3)],
             [int(length/3), int(length*2/3)],
             [int(length*2/3), int(length)]]

    # times = [[0, 3],
    #          [3, 9],
    #          [9, 14]]

    introVid = introVid.crop(height=int(round((1 - origCropFrac*2) * sampleHeight, 0)), width = w, x_center=w/2, y_center=h/2)
    clips=[]
    iPart=0
    while iPart < len(titleParts):
        title=titleParts[iPart]
        if times[iPart][1]>introVid.duration:
            times[iPart][1] = introVid.duration
        vidClip = introVid.subclip(times[iPart][0], times[iPart][1])
        text = TextClip(title, font="Amiri-Bold", fontsize=sizes[iPart], color="white", align = 'center').set_position(("center",0.3), relative=True)
        text_clip = mpe.CompositeVideoClip([vidClip, text]).set_duration(vidClip.duration)
        clips.append(text_clip)
        iPart = iPart + 1

    final_clip = concatenate_videoclips(clips, method='compose')

    final_clip1 = fadeout(final_clip, 1, final_color=None)

    final_clip2 = fadein(final_clip1, 1, initial_color=None)

    return final_clip2
Esempio n. 3
0
def test_fadeout():
    clip = get_test_video()
    clip1 = fadeout(clip, 0.5)
    clip1.write_videofile(os.path.join(TMP_DIR, "fadeout1.webm"))
    close_all_clips(locals())
Esempio n. 4
0
def test_fadeout():
    with VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 5) as clip:
        clip1 = fadeout(clip, 1)
        clip1.write_videofile(os.path.join(TMP_DIR, "fadeout1.webm"))
Esempio n. 5
0
def test_fadeout():
    with VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0,5) as clip:
        clip1 = fadeout(clip, 1)
        clip1.write_videofile(os.path.join(TMP_DIR,"fadeout1.webm"))
Esempio n. 6
0
def run(adsjson):
    # read the ads from the json file, which contains:
    # url: the link to the image
    # tag: the type of the ad
    #   type 0: the user did not click on it and buy from the ad
    #   type 1: the user did not click on it but buy the product of the ad
    #   type 2: the user clicked on it but did not buy the product of the ad
    #   type 3: the user clicked on it and did buy the product of the ad
    img_list = readAds(adsjson)

    # prepare the ads' image for our template video
    # the width, height of our video is 640, 480. Therefore, our image should
    # be 640, 480. We achieve this goal by pasting the original ad on a blank
    # background. After the process, the datas are well processed.
    good_imgs = []
    for i in img_list:
        gi = pil_adprocess(i['data'])
        gi_dict = {'data': gi, 'tag': i['tag']}
        good_imgs.append(gi_dict)

    # define the duration of each clip for the final clip sampling
    _INTRO_DURATION = 4
    _ADS_DURATION = 10
    _TEXT_DURATION = 15
    _END_DURATION = 4
    # full duration of our created video
    _DURATION = _INTRO_DURATION + _ADS_DURATION + _TEXT_DURATION + _END_DURATION

    # Time to create our own video
    # 1. Get the intro clip with some text effect, check out info()
    txt_clip, maskclip = intro(_INTRO_DURATION)
    # Compose clips
    intro_clip = CompositeVideoClip([maskclip, txt_clip])

    # 2. Get the ads clips. The ads clips are splitted into 4 groups mentioned
    # previously, based on the tag.
    tag0 = []
    tag1 = []
    tag2 = []
    tag3 = []
    for i in good_imgs:
        if i['tag'] == 0:
            tag0.append(np.array(i['data']))
        elif i['tag'] == 1:
            tag1.append(np.array(i['data']))
        elif i['tag'] == 2:
            tag2.append(np.array(i['data']))
        elif i['tag'] == 3:
            tag3.append(np.array(i['data']))

    # The first tag, tag0, is the type of ad that the user totally ignored.
    tag0textclip = simpleTextClip('love me plz', 50, 'center', 3, ef_cascade)
    tag0clips = tagclip(tag0)
    # Tag1 is the type of ad that the user access the product not from the ad,
    # but somewhere else.
    tag1textclip = simpleTextClip('We got cheaper ones!', 50, 'center', 3.5,
                                  ef_vortex)
    tag1clips = tagclip(tag1)
    # Tag2 is the type of ad that the user actually clicked on it, but never did
    # the purchase action.
    tag2textclip = simpleTextClip('87/100 Interested', 50, 'center', 4.5,
                                  ef_arrive)
    tag2clips = tagclip(tag2)
    # Tag3 is the type of ad that the user appreciate. GOOD FOR THEM.
    tag3textclip = simpleTextClip('Thank you for these', 50, 'center', 4,
                                  ef_vortex)
    tag3clips = tagclip(tag3)
    fulltagclips = [tag0textclip] + tag0clips + \
                   [tag1textclip] + tag1clips + \
                   [tag2textclip] + tag2clips + \
                   [tag3textclip] + tag3clips

    # Bye clip, a 'SEE YA' fade out text clip
    byeclip = fadeout(simpleTextClip('SEE YA', 50, 'center', _END_DURATION,
                                     ef_vortex),
                      duration=_END_DURATION)
    concat_clip = [intro_clip] + fulltagclips + [byeclip]

    # concatenate all the clips
    final = concatenate_videoclips(concat_clip)
    # Read the audio and give the fade out duration
    audio = audio_fadeout(AudioFileClip("sources/facebook.mp3").subclip(
        0, _DURATION),
                          duration=4)
    final = final.set_audio(audio)
    # Write the result to a file (many options available !)
    final.write_videofile("holy.mp4", fps=24, codec='libx264', audio_codec='aac',\
                            temp_audiofile='bgTMP.m4a', remove_temp=True)
Esempio n. 7
0
 def video_fade_out(self, source: str, destination: str,
                    duration: float) -> None:
     video = VideoFileClip(source)
     video = fadeout(video, duration)
     video.write_videofile(destination)
Esempio n. 8
0
def genPMVs(PMV, dot, sampleHeight, sampleWidth, pythonDir):
    #    file
    #    outDir

    ydl_opts = {
        'outtmpl': PMV.musicDir + '%(title)s' + '.mp4',
        'format': 'best',
        'playlist': 'no'
    }

    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        ydl.cache.remove()
        info_dict = ydl.extract_info(PMV.musicURL, download=False)
        print('downloading?', PMV.musicURL)
        ydl.download([PMV.musicURL])
        musicName = info_dict.get('title', None)

    for vid in PMV.videoURLs:
        print(vid)
        subprocess.call([pythonDir, r"downloadVid.py", PMV.vidDir, vid])

    vidDir = PMV.vidDir

    if PMV.musicType == 'mp4':
        originalVidBool = True
    elif PMV.musicType == 'mp3':
        originalVidBool = False
    else:
        originalVidBool = False

    print(AudioSegment.ffmpeg)

    ################################

    mp3_dir = PMV.musicDir + musicName + dot + 'mp4'  # PMV.musicType
    ###############################

    if len(PMV.vidName) == 0:
        PMV.vidName = PMV.userName + ' PMV - ' + musicName
    elif len(PMV.userName) > 0:
        PMV.vidName = PMV.vidName + ' - ' + PMV.userName

    file_out = PMV.outDir + PMV.vidName + dot + 'mp4'  #  filetypeout

    print(mp3_dir)

    audioclip = AudioFileClip(mp3_dir)

    print('error1')

    sound = AudioSegment.from_file(mp3_dir, 'mp4')
    print('error2')

    if PMV.trimSong == True:
        sound = sound[PMV.songStart * 1000:PMV.songEnd * 1000]
        audioclip = audioclip.subclip(PMV.songStart, PMV.songEnd)

    print('error3')
    tenSecs = 10 * 1000
    first_10_seconds = sound[:tenSecs]

    ten_data = first_10_seconds._data

    first_ten_data = np.fromstring(ten_data, dtype=np.int16)
    new_ten_data = np.absolute(first_ten_data)

    # %% Music Data

    bitrate = len(new_ten_data) / 10  # raw data to 1 s
    print(bitrate)
    ratio = int(round(bitrate * PMV.granularity, 0))
    raw_data = sound._data

    first_data = np.fromstring(raw_data, dtype=np.int16)
    new_data = np.absolute(first_data)

    reshaped_data = reshapeData(new_data, ratio)

    diff_data = getElementDiff(reshaped_data)

    result = getHighValues2(reshaped_data, diff_data, PMV.sd_scale,
                            PMV.nSplits, PMV.granularity, PMV.min_length)

    print('List of Indices of maximum element :', len(result))

    print(result)

    print(vidDir)

    result.append(len(first_data) / ratio)

    videosIn = list()
    iVids = 0

    for f in listdir(vidDir):
        if isfile:
            if f.endswith(".mp4"):
                if iVids < 120:
                    videosIn.append(f)
                iVids = iVids + 1

    videoData = list()

    for i in videosIn:
        videoData.append(
            Video(name=i, customStart=0, customEnd=0, directory=vidDir))
        print(i)

    nInVids = len(videosIn)

    iOrig = 0
    origVidName = musicName + dot + PMV.musicType
    if originalVidBool == True:
        while iOrig <= nInVids * PMV.origVidScale:
            videoData.append(
                Video(name=origVidName,
                      customStart=0,
                      customEnd=0,
                      directory=PMV.musicDir))
            iOrig = iOrig + 1

    nVideos = len(videoData)

    videos = [0] * nVideos

    i = 0
    while i < nVideos:
        if PMV.resize == True:
            vidTemp = VideoFileClip(
                videoData[i].directory + videoData[i].name).resize(
                    width=sampleWidth)  #(sampleWidth, sampleHeight))
            if PMV.flipBool == True and videoData[i].name != origVidName:
                vidTemp2 = mirror_x(vidTemp)
            else:
                vidTemp2 = vidTemp
            videos[i] = vidTemp2
        else:
            videos[i] = VideoFileClip(vidDir + videoData[i].name)

        print('name', 'duration', 'customStart', 'customEnd')

        if originalVidBool == True and videoData[i].name == origVidName:
            if PMV.trimSong == True:
                videoData[i].customEnd = PMV.songEnd
                videoData[i].customStart = PMV.songStart
            else:
                videoData[i].customEnd = videos[i].duration
                videoData[i].customStart = 0  #
            print(videoData[i].name, videos[i].duration,
                  videoData[i].customStart, videoData[i].customEnd,
                  'Original Video')
        else:
            customStart = PMV.startTime
            subtractEnd = 40

            videoData[i].customEnd = videos[i].duration - subtractEnd
            videoData[i].customStart = customStart
            print(videoData[i].name, videos[i].duration,
                  videoData[i].customStart, videoData[i].customEnd)

        i = i + 1

    clips = videoSplits(result, videos, videoData, first_data, bitrate,
                        PMV.granularity, PMV.randomise, origVidName)

    for attempt in range(3):
        try:

            print('stage 1')
            finalVideo = concatenate_videoclips(clips, method='compose')

            if PMV.originalCrop == True:
                (w, h) = finalVideo.size
                print(PMV.origCropFrac, int(sampleHeight * PMV.origCropFrac),
                      int((1 - PMV.origCropFrac) * sampleHeight), w, h)
                finalVideo = finalVideo.crop(height=int(
                    round((1 - PMV.origCropFrac * 2) * sampleHeight, 0)),
                                             width=w,
                                             x_center=w / 2,
                                             y_center=h / 2)

            print('stage 2')
            finalVideo.volumex(0)

            print('stage 3')
            finalVideo2 = finalVideo.set_audio(audioclip)

            print('stage 4')
            finalVideo2a = fadeout(finalVideo2, 1, final_color=None)

            finalVideo2c = fadein(finalVideo2a, 1, initial_color=None)

            if PMV.addIntro:
                introVideo = getIntroVid(PMV.vidName, PMV.origCropFrac,
                                         sampleHeight, PMV.introVidDir,
                                         PMV.userName)
                finalVideo3 = concatenate_videoclips(
                    [introVideo, finalVideo2c], method='compose')
            else:
                finalVideo3 = finalVideo2c

            print('stage 4')

            print('stage 5')

            finalVideo3.write_videofile(file_out, threads=4, fps=25)
        except OSError as OSErrorMessage:
            print("OSError retrying - Attempt: ", attempt)
            print(OSErrorMessage)
            pass
        else:
            break

    print('stage 6')
    i = 0
    while i < len(videos):
        videos[i].reader.close()
        del videos[i].reader
        del videos[i]
        i = i + 1

    del audioclip.reader
    del audioclip

    print('Finished!')
Esempio n. 9
0
def test_fadeout():
    clip = get_test_video()
    clip1 = fadeout(clip, 0.5)
    clip1.write_videofile(os.path.join(TMP_DIR, "fadeout1.webm"))
    close_all_clips(locals())