Exemple #1
0
def compose(timestamps):
    p = Pool(processes=5)
    _urls = p.map(get_vid_url, timestamps.keys())
    urls = {}
    for v, u in _urls:
        if u:
            urls[v] = u

    to_download = []
    i = 0
    for vid in timestamps:
        if vid not in urls:
            continue

        words = timestamps[vid]

        for w in words:
            start = w['start']
            end = w['end'] + 0.02
            outname = str(i).zfill(4) + '.mp4'
            to_download.append((urls[vid], start, end, outname))
            i += 1
            # if os.path.exists(outname):
            #     i += 1
            #     continue

    clipnames = p.starmap(download_segment, to_download)

    clips = []
    for f in clipnames:
        clips.append(Clip(f))

    comp = Composition(clips, singletrack=True)
    comp.save('supercut.mp4')
Exemple #2
0
def make_video():

    clips=[]

    for video in mergeSequence:
        #print mergeSequence
    #print each video of the sequence and tagsself.and colors

    #filename = "./videos/" + video['id']
        filename = video["path"]
    #print video
    #print filename
        clip = Clip(filename, start=video['start'], end= video['end'])
        if "abstract" in video["tags"]:
            clip.fadein(1)
            clip.fadeout(1.5)
        for video in beginning:
            clip.fadein(3)
        for video in ending:
            clip.fadeout(1.5)
        for video in randomSounds:
            clip.fadein(1)
            clip.fadeout(1.5)


        clip.glow()
        clips.append(clip)
        print (mergeSequence[0]["id"])

    composition = Composition(clips,singletrack=True, width=800, height=800)
    videoName = "render/videoMergeClassic" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") +".mp4"
    composition.save(videoName)
Exemple #3
0
def compose_hmms():
    eats = []
    for jsonfile in glob(BASE + '*.json'):
        with open(jsonfile, 'r') as infile:
            words = json.load(infile)['words']

            for a in words:
                if a['word'].lower() == 'mmm' and 'end' in a and 'start' in a:
                    eats.append((jsonfile, a['start'], a['end']))
    clips = []
    for i, s in enumerate(eats):
        jsonfile, start, end = s
        vid = jsonfile.replace('.json', '')
        if end - start > 1:
            start = end - 1
        if end - start < 0.1:
            continue
        print(vid, start, end)
        clip = Clip(vid, start=start, end=end)
        # clip.save('eats/{}.mp4'.format(str(i).zfill(4)))
        clips.append(clip)

    outname = 'hmm.mp4'
    comp = Composition(clips, singletrack=True)
    comp.save(outname)
Exemple #4
0
def compose_silences(min_silence=2, max_silence=4.0, total=None, randomize=True, outname='silences.mp4'):
    silences = []
    for jsonfile in glob(BASE + '*.json'):
        with open(jsonfile, 'r') as infile:
            words = json.load(infile)['words']
            for a, b in zip(words, words[1:]):
                try:
                    dist = b['start'] - a['end']
                    if dist >= min_silence and dist <= max_silence:
                        silences.append((jsonfile, a['end'], b['start']))
                except Exception as e:
                    continue

    if randomize:
        random.shuffle(silences)

    if total is not None:
        silences = silences[0:total]

    clips = []
    for s in silences:
        jsonfile, start, end = s
        vid = jsonfile.replace('.json', '')
        print(vid, start, end)
        clip = Clip(vid, start=start, end=end)
        clips.append(clip)


    comp = Composition(clips, singletrack=True)
    comp.save(outname)
Exemple #5
0
def make_video():

    clips = []

    for video in mergeSequence:
        #print mergeSequence
        #print each video of the sequence and tagsself.and colors

        #filename = "./videos/" + video['id']
        filename = video["path"]
        #print video
        #print filename
        clip = Clip(filename, start=video['start'], end=video['end'])
        if "abstract" in video["tags"]:
            clip.fadein(1)
            clip.fadeout(1.5)
        for video in beginning:
            clip.fadein(3)
        for video in ending:
            clip.fadeout(1.5)
        for video in randomSounds:
            clip.fadein(1)
            clip.fadeout(1.5)

        clip.glow()
        clips.append(clip)
        print mergeSequence[0]["id"]

    composition = Composition(clips, singletrack=True, width=800, height=800)
    composition.save('mergeVideo3_nature4.mp4')
def compose_with_vidpy(
    maxduration=60,
    thresh=0.2,
    fade=0.3,
    duration=4,
    sections=3,
    padding=0.5,
    outname="home.mp4",
):
    shots = {}
    allshots = []

    for f in glob("videos/*.shots.json"):
        # if re.search(r'^videos/\d+', f) is None:
        #     continue

        with open(f, "r") as infile:
            data = json.load(infile)

        f = f.replace(".shots.json", "")

        _shots = [(f, d["time"]) for d in data if d["score"] > thresh]
        _shots = [d["time"] for d in data if d["score"] > thresh]
        shots[f] = []
        for i, d in enumerate(_shots):
            if i > 0:
                start = _shots[i - 1]
            else:
                start = 0
            end = d
            shots[f].append((start, end))

        # if len(_shots) > 5:
        #     shots[f] = _shots
        #     allshots += _shots

    offset = 0
    clips = []
    while offset < maxduration:
        filename = random.choice(list(shots.keys()))
        if len(shots[filename]) < 5:
            continue
        start, end = random.choice(shots[filename])
        start += padding
        end -= padding
        dur = min(end - start, duration - padding)

        clip = Clip(filename, start=start, end=start + dur, offset=offset)
        clip.zoompan([0, 0, "100%", "100%"], ["-25%", "-25%", "150%", "150%"],
                     0, 100)
        clip.fadein(fade)
        offset += dur - fade
        clips.append(clip)

    # if stitch:
    comp = Composition(clips)
    comp.save(outname)
Exemple #7
0
    def save(self, filename, **kwargs):
        '''Saves the clip as a video file

        Args:
            filename (str): The file to save to.
            kwargs: Pass in any arguments that you would to Composition (width, height, fps, bgcolor)
        '''

        from vidpy import Composition
        comp = Composition([self], **kwargs)
        comp.save(filename)
Exemple #8
0
def make_video():
    randomq = get_random_question()
    nouns = get_nouns(randomq)
    print randomq
    print nouns
    saved_image = get_image(random.choice(nouns))
    print saved_image
    clip = Clip(saved_image, start=0, end=2)
    text = Text(randomq, start=0, end=2)
    text.spin(20)
    comp = Composition([clip, text])
    comp.save('lol.mp4')
Exemple #9
0
def compose_right():
    list_videos_right = glob('./recorded_videos/recorded_videos_cut/recorded_videos_cut_right/*.mp4')
    clips_right = []

    for video in list_videos_right:
        clip_right = Clip(video)
        clips_right.append(clip_right)

    print("Composing Right Videos")
    shuffle(clips_right)

    # play videos on top of each other
    composition = Composition(clips_right, singletrack=True)
    composition.save(COMPOSITION_CROPPED_RIGHT + '/composition_right/videos_right.mp4')
def make_video(tag):

    clips = []

    for video in mergeSequence:
        #print mergeSequence
        #print each video of the sequence and tagsself.and colors

        #filename = "./videos/" + video['id']
        filename = video["path"]
        #print video
        #print filename
        clip = Clip(filename, start=video['start'], end=video['end'])
        if "abstract" in video["tags"]:
            clip.fadein(1)
            clip.fadeout(1.5)
        if video in beginning:
            clip.fadein(3)
        if video in ending:
            clip.fadeout(1.5)
        if video in randomSounds:
            clip.fadein(1)
            clip.fadeout(1.5)

        clip.glow()
        clips.append(clip)
        #print (mergeSequence[0]["id"])

    composition = Composition(clips, singletrack=True, width=800, height=800)
    #videoName = "render/videoConnected10" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") +".mp4"
    # videoName = "render/video_" + tag +  datetime.datetime.now().strftime("%Y%m%d%H%M%S") +".mp4"
    videoName = "render/video_" + tag + datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S") + ".mov"
    composition.save(videoName)
    #composition.save(videoName)
    #datetime.datetime.now().strftime("%Y%m%d%H%M&S")

    #setup a client (api adress, localhost)
    client = udp_client.UDPClient("127.0.0.1", 8000)
    print("testing message")

    #composition.preview()

    #now built the messagingSenderId
    msg = osc_message_builder.OscMessageBuilder(address="/video")
    msg.add_arg("nature")
    msg = msg.build()
    client.send(msg)
Exemple #11
0
def compose(lines, outname='cut.mp4'):
    alllines = {}
    clips = []
    vids = {}

    # for srt in glob(BASE + '*.srt'):
    #     _lines = clean_srt(srt)
    #     alllines += [(k, _lines[k], srt) for k in _lines]

    segments = []

    for jsonfile in glob(BASE + '*.json'):
        with open(jsonfile, 'r') as infile:
            words = json.load(infile)['words']
        alllines[jsonfile] = words

    for l in lines:
        # ts, text, srt = random.choice(find_line(l, alllines))
        # print(ts, text, srt)
        # vid = srt.replace('.srt', '.mp4')
        # # if vid not in vids:
        # #     vids[vid] = VideoFileClip(vid)
        # start, end = convert_timespan(ts)
        # clip = vids[vid].subclip(start, end + 0.5)
        # clips.append(clip)

        results = find_in_json(l, alllines)
        for r in results:
            if r not in segments:
                segments.append(r)
        # segments += results

    for result in segments:
        jsonfile, start, end = result
        vid = jsonfile.replace('.json', '')
        print(vid, start, end)
        # if vid not in vids:
        #     vids[vid] = VideoFileClip(vid)
        # clip = vids[vid].subclip(start, end)
        clip = Clip(vid, start=start, end=end)
        clips.append(clip)

    # comp = concatenate(clips)
    # comp.write_videofile(outname)
    comp = Composition(clips, singletrack=True)
    comp.save(outname)
Exemple #12
0
def main(filenames=None):
    # try:
    #     shutil.rmtree('shots')
    # except:
    #     pass
    #
    # os.makedirs('shots')

    query = ''

    if filenames is None:
        with open('recipes.txt', 'r') as infile:
            recipes = [r.strip() for r in infile.readlines()]
        recipes = [r for r in recipes if r != '']
        query = random.choice(recipes)
        download(query)
        filenames = glob('videos/*.mp4')

    video = [
        ('ingredients', '(<DT>? <CD>? <JJ>? <NN|NNS>+ <IN> <NN|NNS>+)'),
        ('simple_ingredients', '(<JJ> <NN|NNS>)'),
        ('simple_ingredients2', '(<JJ>? <NN> <IN> <JJ>? <NN|NNS>)'),
        ('instructions', '(<RB>? <VB> <DT>? <JJ>? <NN|NNS> <RB>?)'),
        ('instructions2', '(<VB> <PRP> <RB>? <NN|NNS>?)'),
        ('delicious', 'delicious|incredible|wonderful|amazing'),
        ('hmm', 'hmm|mmm|yum'),
    ]

    clips = []
    for cat, pat in video:
        outname = cat + '.mp4'
        compose_clip(filenames, outname, pat)
        clips.append(Clip(outname))

    comp = Composition(clips, singletrack=True)
    # finalname = 'cookingshow_' + query + '.mp4'
    finalname = 'cookingshow.mp4'
    comp.save(finalname)
    return finalname
Exemple #13
0
    randomWidth = vidWidth + random.uniform(-delta, delta)
    randomHeight = vidHeight + random.uniform(-delta, delta)
    clip.position(x=randomX, y=randomY, w=randomWidth, h=randomHeight)

    # fade in for 0.1 second
    clip.fadein(0.1)

     # repeat the clip three times
    clip.repeat(3)

    # start the clip based on existing clips
    # add a random offset
    clip.set_offset(len(clips)*random.random())

    # add clip to list of clips
    clips.append(clip)

    # increment the x and y position
    x += vidWidth
    # if x is outside of canvas
    if x > canvasWidth:
        # update y
        y += vidHeight
        # reset x
        x = 0


grid = Composition(clips, width=canvasWidth, height=canvasHeight)
# save to hard disk
grid.save('gridRandom.mp4')
from vidpy import Clip, Composition

video1 = "files/rainTest.mp4"
video2 = "files/N2.mov"

clips = []

clip1 = Clip(video1)

# attempt to automatically remove background color
# you can also specify a color with color='#00ff00'
clip1.chroma(amount=5.8)
clip1.chroma(color="#00FFFF")

clip2 = Clip(video2)

clips = [clip2, clip1]

comp = Composition(clips, bgcolor='#ffffff')
comp.save('output/test.mp4')
comp.preview()
Exemple #15
0
# import vidpy module
from vidpy import Clip, Composition

# video location in hard drive
video = "videos/video5.mp4"

# empty list of clips
clips = []

for i in range(0, 10):
    clip = Clip(video)

    # attempt to automatically remove background color
    # you can also specify a color with color='#00ff00'
    clip.chroma(amount=0.2, color="#000000")

    # start the clips 1/2 second after last clip
    clip.set_offset(i * 0.5)

    # change the clips x coordinate
    clip.position(x=(i * 10) - 30)

    # loop the clip 3 times
    clip.repeat(3)

    clips.append(clip)

comp = Composition(clips, bgcolor='#ff4dff', duration=4)
comp.save('chroma.mp4')
## example based on Stitch Videos example from VidPy website
## https://antiboredom.github.io/vidpy/examples.html

# import vidpy module
from vidpy import Clip, Composition
# import random module
from random import random

# load clips
clip1 = Clip('videos/video1.mp4')
clip2 = Clip('videos/video2.mp4')
clip3 = Clip('videos/video3.mp4')

# list of videos
clips = [clip1, clip2, clip3]

# iterate through each clip
for clip in clips:
    # select a random start
    randomStart = random() * clip.duration
    # select a random end
    randomEnd = randomStart + random() * (clip.duration - randomStart)

    # cut at the desired position
    clip.cut(start=randomStart, end=randomEnd)

# stitch all the cuts together
stiched = Composition(clips, singletrack=True)
stiched.save('randomStitch.mp4')
# iterate through every video
for video in range(videos):
    # create a clip one second long
    clip = Clip(vid, start=0, end=random.random()*10)

    # mute it
    clip.volume(0)

    # pick random key from the dictionary
    f = random.choice(list(filters.keys()))

    # retrieve the arguments from the dictionary
    args = filters[f]

    if len(args) > 0:
        getattr(clip, f)(*args)
    else:
        getattr(clip, f)()

    # add the text of the filter
    # clip.text(f, olcolor='#000000', outline=10)

    # append clip to the clips list
    clips.append(clip)

# make composition
comp = Composition(clips, singletrack=True)

# save file to hard drive
comp.save('filtersRandom.mp4')
Exemple #18
0
## example based on Stitch Videos example from VidPy website
## https://antiboredom.github.io/vidpy/examples.html

# import vidpy module
from vidpy import Clip, Composition

# load clips
clip1 = Clip('videos/video1.mp4')
clip2 = Clip('videos/video2.mp4')
clip3 = Clip('videos/video3.mp4')

# list of videos
clips = [clip1, clip2, clip3]

# stitch all clips together
stiched = Composition(clips, singletrack=True)
stiched.save('allVids.mp4')
Exemple #19
0
    color = random.choice(colors)
    outline_color = random.choice(colors)
    outline_size = random.randint(3, 8)
    weight = random.randint(100, 1000)
    style = random.choice(styles)

    # put the text in a bounding box that fills 90% of the screen
    # (x, y, width, height)
    boundingbox = ('10%', '10%', '80%', '80%')

    # create a one second text clip with random parameters
    text = Text(line,
                end=1,
                color=color,
                font=font,
                style=style,
                weight=weight,
                olcolor=outline_color,
                outline=outline_size,
                size=200,
                bbox=boundingbox,
                pad=50)

    # add a glow for some reason!
    text.glow(1)

    clips.append(text)

comp = Composition(clips, singletrack=True, width=1280, height=720, fps=30)
comp.save('manifesto.mp4')
Exemple #20
0
from vidpy import Clip, Composition

clip1 = Clip('videos/hand1.mp4')
clip2 = Clip('videos/hand2.mp4')
clip3 = Clip('videos/hand3.mp4')

clips = [clip1, clip2, clip3]


# stitch all clips together
stiched = Composition(clips, singletrack=True)
stiched.save('allvids.mp4')

# stitch the first second of all clips
for clip in clips:
    clip.cut(start=0, end=1)

stiched = Composition(clips, singletrack=True)
stiched.save('firstsecond.mp4')
Exemple #21
0
    'softglow': [],
}

clips = []
for f in filters:
    # create a clip one second long
    clip = Clip(vid, start=0, end=1)

    # mute it
    clip.volume(0)

    # retrieve the filters
    args = filters[f]

    if len(args) > 0:
        getattr(clip, f)(*args)
    else:
        getattr(clip, f)()

    # add the text of the filter
    clip.text(f, olcolor='#000000', outline=10)

    # append clip to the clips list
    clips.append(clip)

# make composition
comp = Composition(clips, singletrack=True)

# save file to hard drive
comp.save('filters.mp4')
Exemple #22
0
from vidpy import Clip, Composition

video = 'videos/hand1.mp4'

clips = []

for i in range(0, 5):
    clip = Clip(video)

    # attempt to automatically remove background color
    # you can also specify a color with color='#00ff00'
    clip.chroma(amount=0.2)

    # start the clips 1/2 second after last clip
    clip.set_offset(i * 0.5)

    # change the clips x coordinate
    clip.position(x=(i * 100) - 300)

    # loop the clip 3 times
    clip.repeat(3)

    clips.append(clip)

comp = Composition(clips, bgcolor='#ff4dff', duration=4)
comp.save('chroma_overlay.mp4')
Exemple #23
0
for i in range(0, 3):
    clip20 = Clip('lotr_late2.mp4', offset=16.4 + (i * 0.7), start=0, end=7)
    clips.append(clip20)

for i in range(0, 7):
    clip29 = Clip('holodeck_b.mp4', offset=18.5 + (i * 0.2), start=18, end=20)
    clips.append(clip29)

for i in range(0, 3):
    clip21 = Clip('jobs_640_480_1.mp4',
                  offset=19.4 + i * 0.5,
                  start=0.3,
                  end=6)
    clips.append(clip21)

for i in range(0, 3):
    clip22 = Clip('sheliak_636_480_1.mp4',
                  offset=19.3 + i * 2,
                  start=1.87,
                  end=2.9)
    clips.append(clip22)

# clip4 = Clip('sheliak_636_480_1.mp4', offset=8, start=1.87, end=2.9)
# clips.append(clip4)

comp = Composition(clips)
# comp = Composition(clips, singletrack=True)

# comp.preview()
comp.save('screens_doors_men_alienmen_mix.mp4')
clips = [clip1, clip2, clip3]

# maximum length is sixty seconds
# try also 10 seconds, 1 second, 0.1 second
remainingTime = 60

# iterate through each clip
for clip in range(len(clips)):
    # select a random start
    randomStart = random() * clips[clip].duration
    # select a random duration
    randomDuration = random() * remainingTime
    # if the duration is possible
    if randomStart + randomDuration < clips[clip].duration:
        # calculate randomEnd
        randomEnd = randomStart + randomDuration
    # if the duration is not posible
    else:
        # randomEnd is the original duration of clip
        randomEnd = clips[clip].duration

    # update remainingTime
    remainingTime = remainingTime - (randomEnd - randomStart)

    # cut at the desired position
    clips[clip].cut(start=randomStart, end=randomEnd)

# stitch all the cuts together
stiched = Composition(clips, singletrack=True)
stiched.save('randomMinute.mp4')
from vidpy import Clip, Composition

video1 = 'sunset/sunset1_small.mp4'

clips = []

clip1 = Clip(video1)

# attempt to automatically remove background color
# you can also specify a color with color='#00ff00'
clip1.chroma(amount=0.001)
clip1.chroma(color="#9187ff")

clips = [clip1]

comp = Composition(clips, bgcolor='#FF87DC')
comp.save('sunsetlayer1.mp4')
comp.preview()
Exemple #26
0
y = 0

clips = []

while y < canvas_height:
    # create a clip
    clip = Clip(video)

    # set clip position
    clip.position(x=x, y=y, w=vid_width, h=vid_height)

    # fade in for 1/2 second
    clip.fadein(0.5)

    # repeat the clip three times
    clip.repeat(3)

    # start the clip based on existing clips
    clip.set_offset(len(clips)*.1)

    clips.append(clip)

    # increment the x and y position
    x += vid_width
    if x > canvas_width:
        y += vid_height
        x = 0

grid = Composition(clips, width=canvas_width, height=canvas_height)
grid.save('grid.mp4')
Exemple #27
0
import time
from vidpy import Clip, Composition

hands = glob.glob('hands/*.mp4')
hand = random.choice(hands)
print hand

clips = []
x = -400
start = 0

backgrounds = ['#2cffff', '#fc282a', '#fcdb2a', '#2452d8']

for i in range(0, 6):  # same as for i in [0, 1, 2, 3, 4]
    clip = Clip(hand)
    clip.chroma()
    clip.set_offset(start)
    clip.position(x=x)
    clip.fadein(0.2)
    clip.fadeout(0.2)

    clips.append(clip)

    start += 0.5
    x += 150

composition = Composition(clips, bgcolor=random.choice(backgrounds))

outname = 'coolhands_{}.mp4'.format(int(time.time()))
composition.save(outname)
# variable for introducing randomness
delta = 5

# variable for number of copies
numberCopies = 100

for i in range(0, numberCopies):
    clip = Clip(video)

    # attempt to automatically remove background color
    # you can also specify a color with color='#00ff00'
    clip.chroma(amount=0.2, color="#000000")

    # start the clips 1/2 second after last clip
    clip.set_offset(i * 0.5)

    # change the clips x coordinate
    clip.position(x=(i * 10 * random.random()) - 30)

    # loop the clip 3 times
    clip.repeat(3)

    clips.append(clip)

# composition made out of clips
# bgcolor picks background color
# duration picks duration of video
comp = Composition(clips, bgcolor='#ff4dff', duration=10.0)
comp.save('chromaRandom.mp4')
Exemple #29
0
def compose_clip(filenames, outname, pat):
    timestamps = []

    for f in filenames:
        try:
            with open(f.replace('.mp4', '.en.vtt'), 'r') as infile:
                data = infile.read()
            sentences = vtt.parse_auto_sub(data)
        except:
            continue

        if 'words' not in sentences[0]:
            continue

        if '<' in pat:
            text = ' '.join([s['text'] for s in sentences])
            doc = nlp(text)

            results = pos_regex_matches(doc, pat)
            results = [r.string.lower().strip() for r in results]
            results = [r for r in results if "'" not in r]
            results = list(set(results))
        else:
            results = pat.split('|')

        allwords = []
        for s in sentences:
            allwords += s['words']

        justwords = [w['word'].lower().strip() for w in allwords]

        for r in results:
            # print(r)
            indices = find_sub_list(r.split(' '), justwords)
            for i in indices:
                start = allwords[i[0]]['start']
                end = allwords[i[1]]['end'] + 0.2
                timestamps.append((f, r, start, end))

    timestamps = sorted(timestamps, key=lambda x: x[1])
    clips = []
    '''create with vidpy'''
    for f, r, start, end in timestamps:
        print(r)
        clip = Clip(f, start=start, end=end)
        clip.text(r,
                  font='Courgette',
                  size=60,
                  valign='bottom',
                  color='#FFFF00',
                  bbox=('1%', '1%', '98%', '98%'))
        clips.append(clip)

    tmpclips = []
    for i, chunk in enumerate(chunker(clips, 60)):
        tmpname = outname + '.tmp.{}.mp4'.format(str(i).zfill(5))
        comp = Composition(chunk, singletrack=True)
        comp.save(tmpname)
        tmpclips.append(tmpname)

    comp = Composition([Clip(t) for t in tmpclips], singletrack=True)
    comp.save(outname)

    for tmpname in tmpclips:
        os.remove(tmpname)
    '''create with ffmpeg'''
    # clipnum = 0
    # for f, r, start, end in timestamps:
    #     tempout = 'shots/' + str(clipnum).zfill(6) + '.mp4'
    #     fontfile = os.path.abspath("Courgette-Regular.ttf")
    #     args = '''ffmpeg -hide_banner -loglevel panic -y -i {} -ss {} -t {} -strict -2 -vf drawtext="fontfile={}:text='{}': fontcolor=yellow: fontsize=30: x=(w-text_w)/2: y=(h-text_h-20)" {}'''.format(f, start, end-start, fontfile, r, tempout)
    #     args = '''ffmpeg -hide_banner -loglevel panic -y -i {} -ss {} -t {} -strict -2 {}'''.format(f, start, end-start, tempout)
    #     with Popen(args, shell=True):
    #         pass
    #     clipnum += 1
    '''create clips with vidpy'''
    # clipnum = 0
    # for f, r, start, end in timestamps:
    #     print(r)
    #     tempout = 'shots/' + str(clipnum).zfill(6) + '.mp4'
    #     clip = Clip(f, start=start, end=end)
    #     clip.text(r, font='Courgette', size=60, valign='bottom', color='#FFFF00', bbox=('1%', '1%', '98%', '98%'))
    #     Composition([clip]).save(tempout, silent=True)
    #     clipnum += 1

    return outname
Exemple #30
0
    style = random.choice(styles)

    # put the text in a bounding box that fills 90% of the screen
    # (x, y, width, height)
    boundingbox = ('10%', '10%', '80%', '80%')

    # create a one second text clip with random parameters
    text = Text(line,
                end=1,
                color=color,
                font=font,
                style=style,
                weight=weight,
                olcolor=outline_color,
                outline=outline_size,
                size=200,
                bbox=boundingbox,
                pad=50)

    # add a glow for some reason!
    text.glow(1)

    # add text
    clips.append(text)

# create composition out of clips
comp = Composition(clips, singletrack=True, width=1280, height=720, fps=30)

# save file to hard drive
comp.save('text.mp4')