def test_speed(self): clip = Clip('video.mp4').speed(.5) self.assertEqual(str(clip), '-track timewarp:0.5:video.mp4 in=":0.000000"') clip = Clip('video.mp4').speed(2) self.assertEqual(str(clip), '-track timewarp:2:video.mp4 in=":0.000000"')
def test_multitrack_composition(self): config.MELT_BINARY = 'melt' clip = Clip('video.mp4') clip2 = Clip('video2.mp4') comp = Composition([clip, clip2]) self.assertEqual( str(comp), 'melt -track color:#000000 out=0 -track video.mp4 in=":0.000000" -track video2.mp4 in=":0.000000" -transition composite distort=1 a_track=0 b_track=1 -transition mix a_track=0 b_track=1 -transition composite distort=1 a_track=0 b_track=2 -transition mix a_track=0 b_track=2' )
def test_basic_singletrack_composition(self): config.MELT_BINARY = 'melt' clip = Clip('video.mp4') clip2 = Clip('video2.mp4') comp = Composition([clip, clip2], singletrack=True) self.assertEqual( str(comp), 'melt -track color:#000000 out=0 -track video.mp4 in=":0.000000" video2.mp4 in=":0.000000" -transition composite distort=1 a_track=0 b_track=1' )
def cut_right(): for video in list_videos: name = path.basename(video).split(':')[0] + ".mp4" resized_path = path.join(COMPOSITION_DESTINATION, name) print("Cutting Right") clip = Clip(resized_path) clip.fx('crop', {'right': 1280/2}) Composition([clip]).save(path.join(COMPOSITION_CROPPED_RIGHT, name))
def test_cut(self): clip = Clip('video.mp4').cut(start=1.1, end=3) self.assertEqual(str(clip), '-track video.mp4 in=":1.100000" out=":3.000000"') clip = Clip('video.mp4').cut(start=1.1) self.assertEqual(str(clip), '-track video.mp4 in=":1.100000"') clip = Clip('video.mp4').cut(end=2) self.assertEqual(str(clip), '-track video.mp4 in=":0.000000" out=":2.000000"') clip = Clip('video.mp4').cut(start=1, duration=3) self.assertEqual(str(clip), '-track video.mp4 in=":1.000000" out=":4.000000"')
def compose_silences(min_silence=2, max_silence=4.0, total=None, randomize=True, outname='silences.mp4'): silences = [] for jsonfile in glob(BASE + '*.json'): with open(jsonfile, 'r') as infile: words = json.load(infile)['words'] for a, b in zip(words, words[1:]): try: dist = b['start'] - a['end'] if dist >= min_silence and dist <= max_silence: silences.append((jsonfile, a['end'], b['start'])) except Exception as e: continue if randomize: random.shuffle(silences) if total is not None: silences = silences[0:total] clips = [] for s in silences: jsonfile, start, end = s vid = jsonfile.replace('.json', '') print(vid, start, end) clip = Clip(vid, start=start, end=end) clips.append(clip) comp = Composition(clips, singletrack=True) comp.save(outname)
def compose(timestamps): p = Pool(processes=5) _urls = p.map(get_vid_url, timestamps.keys()) urls = {} for v, u in _urls: if u: urls[v] = u to_download = [] i = 0 for vid in timestamps: if vid not in urls: continue words = timestamps[vid] for w in words: start = w['start'] end = w['end'] + 0.02 outname = str(i).zfill(4) + '.mp4' to_download.append((urls[vid], start, end, outname)) i += 1 # if os.path.exists(outname): # i += 1 # continue clipnames = p.starmap(download_segment, to_download) clips = [] for f in clipnames: clips.append(Clip(f)) comp = Composition(clips, singletrack=True) comp.save('supercut.mp4')
def compose_hmms(): eats = [] for jsonfile in glob(BASE + '*.json'): with open(jsonfile, 'r') as infile: words = json.load(infile)['words'] for a in words: if a['word'].lower() == 'mmm' and 'end' in a and 'start' in a: eats.append((jsonfile, a['start'], a['end'])) clips = [] for i, s in enumerate(eats): jsonfile, start, end = s vid = jsonfile.replace('.json', '') if end - start > 1: start = end - 1 if end - start < 0.1: continue print(vid, start, end) clip = Clip(vid, start=start, end=end) # clip.save('eats/{}.mp4'.format(str(i).zfill(4))) clips.append(clip) outname = 'hmm.mp4' comp = Composition(clips, singletrack=True) comp.save(outname)
def test_profie(self): clip = Clip(os.path.realpath('demos/videos/hand1.mp4')) self.assertAlmostEqual(clip.original_duration, 4.18000) self.assertAlmostEqual(clip.duration, 4.18000) self.assertEqual(clip.total_frames, 251) self.assertEqual(clip.original_fps, 60) self.assertEqual(clip.width, 1280) self.assertEqual(clip.height, 720)
def test_args(self): clip = Clip('video.mp4') self.assertEqual(str(clip), '-track video.mp4 in=":0.000000"') clip = Clip('video.mp4', start=1.5) self.assertEqual(str(clip), '-track video.mp4 in=":1.500000"') clip = Clip('video.mp4', start=1.1, end=3) self.assertEqual(str(clip), '-track video.mp4 in=":1.100000" out=":3.000000"') clip = Clip('video.mp4', offset=2) self.assertEqual(str(clip), '-track -blank :2.000000 video.mp4 in=":0.000000"') clip = str(Clip('video.mp4', somearg=5, anotherarg="hi")) self.assertTrue(' somearg="5"' in clip) self.assertTrue(' anotherarg="hi"' in clip)
def make_video(): randomq = get_random_question() nouns = get_nouns(randomq) print randomq print nouns saved_image = get_image(random.choice(nouns)) print saved_image clip = Clip(saved_image, start=0, end=2) text = Text(randomq, start=0, end=2) text.spin(20) comp = Composition([clip, text]) comp.save('lol.mp4')
def compose_right(): list_videos_right = glob('./recorded_videos/recorded_videos_cut/recorded_videos_cut_right/*.mp4') clips_right = [] for video in list_videos_right: clip_right = Clip(video) clips_right.append(clip_right) print("Composing Right Videos") shuffle(clips_right) # play videos on top of each other composition = Composition(clips_right, singletrack=True) composition.save(COMPOSITION_CROPPED_RIGHT + '/composition_right/videos_right.mp4')
def compose_with_vidpy( maxduration=60, thresh=0.2, fade=0.3, duration=4, sections=3, padding=0.5, outname="home.mp4", ): shots = {} allshots = [] for f in glob("videos/*.shots.json"): # if re.search(r'^videos/\d+', f) is None: # continue with open(f, "r") as infile: data = json.load(infile) f = f.replace(".shots.json", "") _shots = [(f, d["time"]) for d in data if d["score"] > thresh] _shots = [d["time"] for d in data if d["score"] > thresh] shots[f] = [] for i, d in enumerate(_shots): if i > 0: start = _shots[i - 1] else: start = 0 end = d shots[f].append((start, end)) # if len(_shots) > 5: # shots[f] = _shots # allshots += _shots offset = 0 clips = [] while offset < maxduration: filename = random.choice(list(shots.keys())) if len(shots[filename]) < 5: continue start, end = random.choice(shots[filename]) start += padding end -= padding dur = min(end - start, duration - padding) clip = Clip(filename, start=start, end=start + dur, offset=offset) clip.zoompan([0, 0, "100%", "100%"], ["-25%", "-25%", "150%", "150%"], 0, 100) clip.fadein(fade) offset += dur - fade clips.append(clip) # if stitch: comp = Composition(clips) comp.save(outname)
def test_xml(self): clip = Clip('demos/videos/hand1.mp4') clip2 = Clip('demos/videos/hand2.mp4') # by default, the duration/width/height/fps should be set automatically comp = Composition([clip, clip2]) xml = fromstring(comp.xml()) profile = xml.find('profile') self.assertEqual(profile.get('width'), '1280') self.assertEqual(profile.get('height'), '720') self.assertEqual(profile.get('frame_rate_num'), '60') self.assertEqual(xml.find('tractor').get('out'), '250') self.assertEqual(xml.find('producer').get('out'), '250') self.assertEqual(xml.find('./playlist/entry').get('out'), '250') # overwrite fps, width, height, duration comp = Composition([clip, clip2], duration=10, width=100, height=50, fps=30) xml = fromstring(comp.xml()) profile = xml.find('profile') self.assertEqual(profile.get('width'), '100') self.assertEqual(profile.get('height'), '50') self.assertEqual(profile.get('frame_rate_num'), '30') self.assertEqual(xml.find('tractor').get('out'), ':10.000000') self.assertEqual(xml.find('producer').get('out'), ':10.000000') self.assertEqual(xml.find('./playlist/entry').get('out'), ':10.000000') # test duration for singletrack comp = Composition([clip, clip2], singletrack=True) xml = fromstring(comp.xml()) self.assertEqual(xml.find('tractor').get('out'), '396') self.assertEqual(xml.find('producer').get('out'), '396') self.assertEqual(xml.find('./playlist/entry').get('out'), '396')
def compose(lines, outname='cut.mp4'): alllines = {} clips = [] vids = {} # for srt in glob(BASE + '*.srt'): # _lines = clean_srt(srt) # alllines += [(k, _lines[k], srt) for k in _lines] segments = [] for jsonfile in glob(BASE + '*.json'): with open(jsonfile, 'r') as infile: words = json.load(infile)['words'] alllines[jsonfile] = words for l in lines: # ts, text, srt = random.choice(find_line(l, alllines)) # print(ts, text, srt) # vid = srt.replace('.srt', '.mp4') # # if vid not in vids: # # vids[vid] = VideoFileClip(vid) # start, end = convert_timespan(ts) # clip = vids[vid].subclip(start, end + 0.5) # clips.append(clip) results = find_in_json(l, alllines) for r in results: if r not in segments: segments.append(r) # segments += results for result in segments: jsonfile, start, end = result vid = jsonfile.replace('.json', '') print(vid, start, end) # if vid not in vids: # vids[vid] = VideoFileClip(vid) # clip = vids[vid].subclip(start, end) clip = Clip(vid, start=start, end=end) clips.append(clip) # comp = concatenate(clips) # comp.write_videofile(outname) comp = Composition(clips, singletrack=True) comp.save(outname)
def main(filenames=None): # try: # shutil.rmtree('shots') # except: # pass # # os.makedirs('shots') query = '' if filenames is None: with open('recipes.txt', 'r') as infile: recipes = [r.strip() for r in infile.readlines()] recipes = [r for r in recipes if r != ''] query = random.choice(recipes) download(query) filenames = glob('videos/*.mp4') video = [ ('ingredients', '(<DT>? <CD>? <JJ>? <NN|NNS>+ <IN> <NN|NNS>+)'), ('simple_ingredients', '(<JJ> <NN|NNS>)'), ('simple_ingredients2', '(<JJ>? <NN> <IN> <JJ>? <NN|NNS>)'), ('instructions', '(<RB>? <VB> <DT>? <JJ>? <NN|NNS> <RB>?)'), ('instructions2', '(<VB> <PRP> <RB>? <NN|NNS>?)'), ('delicious', 'delicious|incredible|wonderful|amazing'), ('hmm', 'hmm|mmm|yum'), ] clips = [] for cat, pat in video: outname = cat + '.mp4' compose_clip(filenames, outname, pat) clips.append(Clip(outname)) comp = Composition(clips, singletrack=True) # finalname = 'cookingshow_' + query + '.mp4' finalname = 'cookingshow.mp4' comp.save(finalname) return finalname
def test_fx(self): clip = Clip('video.mp4').fx('somefx') self.assertEqual( str(clip), '-track video.mp4 in=":0.000000" -attach-track somefx') clip = Clip('video.mp4').fx('somefx') self.assertEqual(' '.join(clip.args(singletrack=True)), 'video.mp4 in=":0.000000" -attach-clip somefx') clip = str( Clip('video.mp4').fx('somefx', { 'param1': 2, 'param2': 'hello' })) self.assertTrue( '-track video.mp4 in=":0.000000" -attach-track somefx' in clip) self.assertTrue(' param1="2"' in clip) self.assertTrue(' param2="hello"' in clip)
def extract_shots(thresh=0.2, duration=4, padding=0.5): shots = {} allshots = [] for f in glob('videos/*.shots.json'): with open(f, 'r') as infile: data = json.load(infile) f = f.replace('.shots.json', '') _shots = [d['time'] for d in data if d['score'] > thresh] for i, d in enumerate(_shots): if i > 0: start = _shots[i - 1] else: start = 0 end = d start += padding end -= padding end = min(end, start + duration - padding) if end - start < 1: continue print(start, end) outname = 'shots/{}_{}_{}.mp4'.format(f.replace('videos/', ''), start, end) if os.path.exists(outname): continue clip = Clip(f, start=start, end=end) clip.zoompan([0, 0, '100%', '100%'], ['-25%', '-25%', '150%', '150%'], 0, 100) clip.save(outname)
from vidpy import Clip, Composition video1 = 'sunset/sunset1_small.mp4' clips = [] clip1 = Clip(video1) # attempt to automatically remove background color # you can also specify a color with color='#00ff00' clip1.chroma(amount=0.001) clip1.chroma(color="#9187ff") clips = [clip1] comp = Composition(clips, bgcolor='#FF87DC') comp.save('sunsetlayer1.mp4') comp.preview()
from vidpy import Clip, Composition video = 'videos/hand1.mp4' clips = [] for i in range(0, 5): clip = Clip(video) # attempt to automatically remove background color # you can also specify a color with color='#00ff00' clip.chroma(amount=0.2) # start the clips 1/2 second after last clip clip.set_offset(i * 0.5) # change the clips x coordinate clip.position(x=(i * 100) - 300) # loop the clip 3 times clip.repeat(3) clips.append(clip) comp = Composition(clips, bgcolor='#ff4dff', duration=4) comp.save('chroma_overlay.mp4')
import glob from vidpy import Clip, Composition, Text clips = [] face_files = glob.glob("face*.mp4") start = 0 for f in face_files: clip = Clip(f) clip.set_offset(start) clip.repeat(20) clip.opacity(.5) clips.append(clip) start += .5 textclip = Text("Your faces look cool!", font="Comic Sans MS", color="#ff0000") clips.append(textclip) comp = Composition(clips) comp.preview() # clip1 = Clip("face-5.mp4") # clip1.repeat(50) # clip1.fx('frei0r.cartoon', {'0': .999}) # # comp = Composition([clip1]) # comp.preview() # clip2 = Clip("face-3.mp4") # clip1.repeat(100)
# start position x = 0 y = 0 # variable for introducing randomness delta = 5 # empty list of clips clips = [] # while y is inside of canvas while y < canvasHeight: # create a clip clip = Clip(video) # set clip position # add random offsets to each variable randomX = x + random.uniform(-delta, delta) randomY = y + random.uniform(-delta, delta) randomWidth = vidWidth + random.uniform(-delta, delta) randomHeight = vidHeight + random.uniform(-delta, delta) clip.position(x=randomX, y=randomY, w=randomWidth, h=randomHeight) # fade in for 0.1 second clip.fadein(0.1) # repeat the clip three times clip.repeat(3)
from vidpy import Clip, Composition clip = Clip('videos/hand2.mp4') clip.speed(0.1) clip.preview() # Composition([clip]).preview()
def make_video(): clips = [] blankclip = Clip('black.mp4') clips.append(blankclip) for video in mergeSequence: #print mergeSequence #print each video of the sequence and tagsself.and colors #filename = "./videos/" + video['id'] filename = video["path"] #print video print(filename) # if filename.endswith('mp3'): # continue clip = Clip(filename, start=video['start'], end=video['end']) # clip.position(x=0, y=0, w='100%', h='100%', distort=True) # clip.fx('crop', { # 'center': 1 # }) # clip.fx('affine', { # 'transition.geometry': '0/0:640x640', # 'transition.fill': 1, # 'transition.distort': 1, # 'transition.scale': 1, # 'scale': 1 # }) if "abstract" in video["tags"]: clip.fadein(1) clip.fadeout(1.5) if video in beginning: clip.fadein(3) if video in ending: clip.fadeout(1.5) if video in randomSounds: clip.fadein(1) clip.fadeout(1.5) clip.glow() clips.append(clip) print(mergeSequence[0]["id"]) # clips = clips[0:3] composition = Composition(clips, singletrack=True) videoName = "render/videoMergeClassic" + datetime.datetime.now().strftime( "%Y%m%d%H%M%S") + ".mp4" #composition.save(videoName) composition.preview()
def make_video(): clips=[] for video in mergeSequence: #print mergeSequence #print each video of the sequence and tagsself.and colors #filename = "./videos/" + video['id'] filename = video["path"] #print video #print filename clip = Clip(filename, start=video['start'], end= video['end']) if "abstract" in video["tags"]: clip.fadein(1) clip.fadeout(1.5) for video in beginning: clip.fadein(3) for video in ending: clip.fadeout(1.5) for video in randomSounds: clip.fadein(1) clip.fadeout(1.5) clip.glow() clips.append(clip) print (mergeSequence[0]["id"]) composition = Composition(clips,singletrack=True, width=800, height=800) videoName = "render/videoMergeClassic" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") +".mp4" composition.save(videoName)
'invert': [], 'extract_color': [], 'glow': [], 'softglow': [], } # empty list of clips clips = [] # number of videos to be stitched videos = 10 # iterate through every video for video in range(videos): # create a clip one second long clip = Clip(vid, start=0, end=random.random()*10) # mute it clip.volume(0) # pick random key from the dictionary f = random.choice(list(filters.keys())) # retrieve the arguments from the dictionary args = filters[f] if len(args) > 0: getattr(clip, f)(*args) else: getattr(clip, f)()
import random import time from vidpy import Clip, Composition hands = glob.glob('hands/*.mp4') hand = random.choice(hands) print hand clips = [] x = -400 start = 0 backgrounds = ['#2cffff', '#fc282a', '#fcdb2a', '#2452d8'] for i in range(0, 6): # same as for i in [0, 1, 2, 3, 4] clip = Clip(hand) clip.chroma() clip.set_offset(start) clip.position(x=x) clip.fadein(0.2) clip.fadeout(0.2) clips.append(clip) start += 0.5 x += 150 composition = Composition(clips, bgcolor=random.choice(backgrounds)) outname = 'coolhands_{}.mp4'.format(int(time.time())) composition.save(outname)
from vidpy import Clip, Composition clip1 = Clip("videos/output.mp4", start=1, end=5) clip2 = Clip("videos/zuck.mp4", start=0, end=6) # clip1.fadein(1) # clip2.fadeout(1) # clip1.chroma(color='#000000') clips = [clip1, clip2] comp = Composition(clips, singletrack=True) comp.preview()
def compose_clip(filenames, outname, pat): timestamps = [] for f in filenames: try: with open(f.replace('.mp4', '.en.vtt'), 'r') as infile: data = infile.read() sentences = vtt.parse_auto_sub(data) except: continue if 'words' not in sentences[0]: continue if '<' in pat: text = ' '.join([s['text'] for s in sentences]) doc = nlp(text) results = pos_regex_matches(doc, pat) results = [r.string.lower().strip() for r in results] results = [r for r in results if "'" not in r] results = list(set(results)) else: results = pat.split('|') allwords = [] for s in sentences: allwords += s['words'] justwords = [w['word'].lower().strip() for w in allwords] for r in results: # print(r) indices = find_sub_list(r.split(' '), justwords) for i in indices: start = allwords[i[0]]['start'] end = allwords[i[1]]['end'] + 0.2 timestamps.append((f, r, start, end)) timestamps = sorted(timestamps, key=lambda x: x[1]) clips = [] '''create with vidpy''' for f, r, start, end in timestamps: print(r) clip = Clip(f, start=start, end=end) clip.text(r, font='Courgette', size=60, valign='bottom', color='#FFFF00', bbox=('1%', '1%', '98%', '98%')) clips.append(clip) tmpclips = [] for i, chunk in enumerate(chunker(clips, 60)): tmpname = outname + '.tmp.{}.mp4'.format(str(i).zfill(5)) comp = Composition(chunk, singletrack=True) comp.save(tmpname) tmpclips.append(tmpname) comp = Composition([Clip(t) for t in tmpclips], singletrack=True) comp.save(outname) for tmpname in tmpclips: os.remove(tmpname) '''create with ffmpeg''' # clipnum = 0 # for f, r, start, end in timestamps: # tempout = 'shots/' + str(clipnum).zfill(6) + '.mp4' # fontfile = os.path.abspath("Courgette-Regular.ttf") # args = '''ffmpeg -hide_banner -loglevel panic -y -i {} -ss {} -t {} -strict -2 -vf drawtext="fontfile={}:text='{}': fontcolor=yellow: fontsize=30: x=(w-text_w)/2: y=(h-text_h-20)" {}'''.format(f, start, end-start, fontfile, r, tempout) # args = '''ffmpeg -hide_banner -loglevel panic -y -i {} -ss {} -t {} -strict -2 {}'''.format(f, start, end-start, tempout) # with Popen(args, shell=True): # pass # clipnum += 1 '''create clips with vidpy''' # clipnum = 0 # for f, r, start, end in timestamps: # print(r) # tempout = 'shots/' + str(clipnum).zfill(6) + '.mp4' # clip = Clip(f, start=start, end=end) # clip.text(r, font='Courgette', size=60, valign='bottom', color='#FFFF00', bbox=('1%', '1%', '98%', '98%')) # Composition([clip]).save(tempout, silent=True) # clipnum += 1 return outname
## example based on Stitch Videos example from VidPy website ## https://antiboredom.github.io/vidpy/examples.html # import vidpy module from vidpy import Clip, Composition # load clips clip1 = Clip('videos/video1.mp4') clip2 = Clip('videos/video2.mp4') clip3 = Clip('videos/video3.mp4') # list of videos clips = [clip1, clip2, clip3] # stitch all clips together stiched = Composition(clips, singletrack=True) stiched.save('allVids.mp4')