def main(input_filename, output_filename): if input_filename.startswith("http://"): av = video.loadavfromyoutube(input_filename) else: av = video.loadav(input_filename) collect = audio.AudioQuantumList() for bar in av.audio.analysis.bars: collect.append(bar.children()[0]) out = video.getpieces(av, collect) out.save(output_filename)
def main(toReverse, inputFilename, outputFilename): if inputFilename.startswith("http://"): av = video.loadavfromyoutube(inputFilename) else: av = video.loadav(inputFilename) if toReverse == 'tatums': chunks = av.audio.analysis.tatums elif toReverse == 'beats': chunks = av.audio.analysis.beats chunks.reverse() out = video.getpieces(av, chunks) out.save(outputFilename)
def main(infile, outfile, choices=4): if infile.startswith("http://"): av = video.loadavfromyoutube(infile) else: av = video.loadav(infile) meter = av.audio.analysis.time_signature['value'] fade_in = av.audio.analysis.end_of_fade_in fade_out = av.audio.analysis.start_of_fade_out sections = av.audio.analysis.sections.that(overlap_range(fade_in, fade_out)) outchunks = audio.AudioQuantumList() for section in sections: print str(section) + ":" beats = av.audio.analysis.beats.that(are_contained_by(section)) segments = av.audio.analysis.segments.that(overlap(section)) num_bars = len(section.children()) print "\t", len(beats), "beats,", len(segments), "segments" if len(beats) < meter: continue b = [] segstarts = [] for m in range(meter): b.append(beats.that(are_beat_number(m))) segstarts.append(segments.that(overlap_starts_of(b[m]))) if not b: continue elif not b[0]: continue now = b[0][0] for x in range(0, num_bars * meter): beat = x % meter next_beat = (x + 1) % meter now_end_segment = segments.that(contain_point(now.end))[0] next_candidates = segstarts[next_beat].ordered_by(timbre_distance_from(now_end_segment)) if not next_candidates: continue next_choice = next_candidates[random.randrange(min(choices, len(next_candidates)))] next = b[next_beat].that(start_during(next_choice))[0] outchunks.append(now) print "\t" + now.context_string() now = next out = video.getpieces(av, outchunks) out.save(outfile)
def run(self): st = modify.Modify() collect = audio.AudioQuantumList() for a in self.segs: seg_index = a.absolute_context()[0] distances = self.get_distance_from(a) distances[seg_index] = sys.maxint match_index = distances.index(min(distances)) match = self.segs[match_index] print seg_index, match_index # make the length of the new seg match the length # of the old seg collect.append(match) out = video.getpieces(self.av, collect) out.save(self.output_filename)