Beispiel #1
0
def main(url, output):
    driver = webdriver.Chrome()
    remote_url = url
    driver.get(remote_url)
    
    png = chrome_takeFullScreenshot(driver)
    with open("website_image.png", 'wb') as f:
        f.write(png)

    driver.close()

    clip = ImageClip('website_image.png')
    
    video_width = int(clip.size[0] + 800)
    video_height = int(video_width/1.5)

    bg_clip = ColorClip(size=(video_width, video_height), color=[228, 220, 220])

    scroll_speed = 180
    total_duration = (clip.h - 800)/scroll_speed

    fl = lambda gf,t : gf(t)[int(scroll_speed*t):int(scroll_speed*t)+800,:]
    clip = clip.fl(fl, apply_to=['mask'])

    video = CompositeVideoClip([bg_clip, clip.set_pos("center")])
    video.duration = total_duration
    if not output.endswith('.mp4'):
        output += '.mp4'
    video.write_videofile(output, fps=26)
    os.remove('website_image.png')
Beispiel #2
0
def chart_highlights(week, position):
    gains = ['airplay_gain', 'stream_gain', 'digital_gain', 'highest_ranking_debut']

    top_offset = 0
    n_movies = 0
    gainer_movies = []
    for gain in gains:
        if getattr(week, gain) == position:
            top_offset = n_movies * 50
            n_movies += 1
            gainer_image = ImageClip(
                join(settings.IMAGE_ASSETS, "{}.png".format(gain))
            ).set_duration(duration)

            gainer_movies.append(gainer_image.set_pos(
                lambda t,
                top_offset=top_offset:
                (min(10, -position_image_size['x'] + t * 400),
                 (1080 - 20 - position_image_size['y'] - 55 - top_offset,
                  int(1060 - position_image_size['y'] + 380*t - 380*13))[t > 13])))
    return gainer_movies
Beispiel #3
0
def generate_video(test=True):

    video_list = []
    sub_video = subscribers_video()
    video_list.append(sub_video)
    week = Week.objects.all()[0]
    for i, position in enumerate(week.position_set.all()):
        if i == 2 and test:
            break
        if i == 50:
            break

        video = VideoFileClip(join(settings.VIDEO_ASSETS,
                                   "{} - {}.mp4".format(position.song.name,
                                                        position.song.artist))).set_duration(10)
        # video = audio_fadeout(video, 2)

        graph = (ImageClip(join(settings.IMAGES, "graph{}.png".format(position.position))).
                 set_duration(duration))

        graph = graph.set_pos(lambda t: (
            (max(1445, 1800 - t * 700), (5, int(20 - 400*t + 400*13.2))[t > 13.2])))

        ####
        w, h = video.size

        position_image = ImageClip(
            join(settings.IMAGES, "pos{}.png".format(position.position))
        ).set_duration(duration)

        change_image = ImageClip(
            join(settings.IMAGES, "change{}.png".format(position.position))
        ).set_duration(duration)

        lower_third_image = ImageClip(
            join(settings.IMAGES, "lower_third{}.png".format(position.position))
        ).set_duration(duration)

        # I am *NOT* explaining the formula, understands who can/want.
        # txt_mov = txt_col.set_pos(lambda t: (max(w/30, int(w-0.5*w*t)), max(5*h/6, int(100*t))) )
        txt_mov = position_image.set_pos(
            lambda t: (min(0, -position_image_size['x'] + t * 400),
                       (1080 - 20 - position_image_size['y'],
                        int(1060 - position_image_size['y'] + 380*t - 380*13))[t > 13]))

        change_image_mov = change_image.set_pos(
            lambda t: (min(change_image_size['x'], -position_image_size['x'] + t * 700),
                       (1080 - 20 - position_image_size['y'],
                        int(1060 - position_image_size['y'] + 400*t - 400*13.2))[t > 13.2]))

        lower_third_mov = lower_third_image.set_pos(
            lambda t: (min(change_image_size['x'] + position_image_size['y'], -lower_third_size['x'] + t * 2500),
                       (1080 - 20 - lower_third_size['y'],
                        int(1060 - lower_third_size['y'] + 430*t - 430*13.4))[t > 13.4]))

        gainer_mov = chart_highlights(week, position.position)
        final = CompositeVideoClip([video, lower_third_mov, change_image_mov,
                                    txt_mov, graph] + gainer_mov, size=((1920, 1080))).fadeout(0.2)
        video_list.append(final)

    FINAL = concatenate_videoclips(list(reversed(video_list)))
    FINAL.write_videofile(join(settings.VIDEOS, "billboard_top_50_this_week.mp4"),
                          fps=24, codec='libx264')
songClip = AudioFileClip("./Dekhte_Dekhte.mp3") # extract audio clip
lrcFile = open('./Dekhte_Dekhte.lrc', 'r')
lines = lrcFile.readlines() # read the lyrics

bgImageClip = ImageClip('./img/background.jpg') # background image for the video

beginTimeStamp = '00:00.00'
firstLyricTimeStamp = lines[5][1:9]
songDuration = lines[-1][1:9]
screensize = (1920, 1080)
lastLineFlag = 0

fillerText = TextClip('Dekthe Dekthe',color='white', font="Amiri-Bold", kerning = 5, fontsize=100) # Text to dispaly before lyrics start
fillerDuration = timeDiff(firstLyricTimeStamp, beginTimeStamp) # time before lyrics start
fillerVideoClip = CompositeVideoClip( [bgImageClip.set_pos('center').set_duration(fillerDuration), fillerText.set_pos('center').set_duration(fillerDuration)], size=screensize) # video clip where filler text is displayed on background image
videoClips = [fillerVideoClip] # add the video clip to a list

for i in range(5, len(lines)): #start reading lyrics
    lyricStrip = lines[i][10:]
    currentLineTimeStamp = lines[i][:10][1:-1]
    print(currentLineTimeStamp)
    origLyric = lyricStrip.split('!!')[0].lstrip().rstrip() # lyric strip in original language
    engLyric = lyricStrip.split('!!')[1].lstrip().rstrip() # lyric strip in translated language
    disaplayText = origLyric+'\n'+'\n'+'*******************'+'\n'+engLyric # display text at each time step
    LyricClip = TextClip(disaplayText, color='white', font="Amiri-Bold", kerning = 5, fontsize=60) # text clip for combined lyrics
    if (lines[i+1] == '\n'):
        duration = timeDiff(songDuration, currentLineTimeStamp) # calculate time period to display lyric
        lastLineFlag = 1 # if last lyric is reached
    else:
        nextLineTimeStep = lines[i+1][:10][1:-1]
Beispiel #5
0
PEP-0008, with exceptions made for the purposes of improving book
formatting. Example code is provided "as is".
Permissions
============
In general, you may use the code we've provided with this book in your
programs . You do not need to contact us for permission unless you're
reproducing a significant portion of the code and using it in educational
distributions. Examples:
* Writing an education program or book that uses several chunks of code from
    this course requires permission. 
* Selling or distributing a digital package from material taken from this
    book does require permission.
* Answering a question by citing this book and quoting example code does not
    require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Practical Python Projects, by Yasoob Khalid. Copyright 2020 Yasoob."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact me at [email protected].
"""

from moviepy.editor import ImageClip, ColorClip, CompositeVideoClip
clip = ImageClip('website_image.png')
bg_clip = ColorClip(size=(1600, 1000), color=[228, 220, 220])
scroll_speed = 180
total_duration = (clip.h - 800) / scroll_speed
fl = lambda gf, t: gf(t)[int(scroll_speed * t):int(scroll_speed * t) + 800, :]
clip = clip.fl(fl, apply_to=['mask'])
video = CompositeVideoClip([bg_clip, clip.set_pos("center")])
video.duration = total_duration
video.write_videofile("movie.mp4", fps=26)