def __init__(self, input_filename_a, input_filename_b, output_filename): "Synchronizes slavebundle on masterbundle, writes to outbundle" self.master = video.loadav(input_filename_a) # convert slave so it matches master's settings converted = video.convertmov(input_filename_b, settings=self.master.video.settings) self.slave = video.loadav(converted) self.out = output_filename self.input_a = self.master.audio self.input_b = self.slave.audio self.segs_a = self.input_a.analysis.segments self.segs_b = self.input_b.analysis.segments self.output_filename = output_filename
def main(input_filename, output_filename): if input_filename.startswith("http://"): av = video.loadavfromyoutube(input_filename) else: av = video.loadav(input_filename) collect = audio.AudioQuantumList() for bar in av.audio.analysis.bars: collect.append(bar.children()[0]) out = video.getpieces(av, collect) out.save(output_filename)
def main(toReverse, inputFilename, outputFilename): if inputFilename.startswith("http://"): av = video.loadavfromyoutube(inputFilename) else: av = video.loadav(inputFilename) if toReverse == 'tatums': chunks = av.audio.analysis.tatums elif toReverse == 'beats': chunks = av.audio.analysis.beats chunks.reverse() out = video.getpieces(av, chunks) out.save(outputFilename)
def __init__(self, tracks): self.videos = [] for track in tracks: if 'youtube.com' in track: #assume it's a youtube url try: self.videos.append(video.loadavfromyoutube(track)) except: print 'unable to fetch', track else: self.videos.append(video.loadav(track)) self.collectvid = video.EditableFrames(settings=self.videos[0].video.settings) self.collectaudio = audio.AudioQuantumList()
def main(infile, outfile, choices=4): if infile.startswith("http://"): av = video.loadavfromyoutube(infile) else: av = video.loadav(infile) meter = av.audio.analysis.time_signature['value'] fade_in = av.audio.analysis.end_of_fade_in fade_out = av.audio.analysis.start_of_fade_out sections = av.audio.analysis.sections.that(overlap_range(fade_in, fade_out)) outchunks = audio.AudioQuantumList() for section in sections: print str(section) + ":" beats = av.audio.analysis.beats.that(are_contained_by(section)) segments = av.audio.analysis.segments.that(overlap(section)) num_bars = len(section.children()) print "\t", len(beats), "beats,", len(segments), "segments" if len(beats) < meter: continue b = [] segstarts = [] for m in range(meter): b.append(beats.that(are_beat_number(m))) segstarts.append(segments.that(overlap_starts_of(b[m]))) if not b: continue elif not b[0]: continue now = b[0][0] for x in range(0, num_bars * meter): beat = x % meter next_beat = (x + 1) % meter now_end_segment = segments.that(contain_point(now.end))[0] next_candidates = segstarts[next_beat].ordered_by(timbre_distance_from(now_end_segment)) if not next_candidates: continue next_choice = next_candidates[random.randrange(min(choices, len(next_candidates)))] next = b[next_beat].that(start_during(next_choice))[0] outchunks.append(now) print "\t" + now.context_string() now = next out = video.getpieces(av, outchunks) out.save(outfile)
def main(infile, directory, outfile): afile = audio.LocalAudioFile(infile) av = [] ff = os.listdir(directory) for f in ff: # collect the files if f.rsplit('.', 1)[1].lower() in ['mp3', 'aif', 'aiff', 'aifc', 'wav', 'mpg', 'flv', 'mov']: av.append(video.loadav(os.path.join(directory,f))) num_files = len(av) # not sure the best way to handle these settings newv = video.EditableFrames(settings=av[0].video.settings) print >> sys.stderr, "Assembling beats.", for i, beat in enumerate(afile.analysis.beats): print >> sys.stderr, '.', vid = av[i%num_files] if beat.end > vid.audio.duration: # do something smart continue newv += vid.video[beat] outav = video.SynchronizedAV(audio=afile, video=newv) outav.save(outfile)
def __init__(self, input_filename, output_filename): self.av = video.loadav(input_filename) self.segs = self.av.audio.analysis.segments self.output_filename = output_filename