예제 #1
0
파일: vone.py 프로젝트: MechanisM/remix
def main(input_filename, output_filename):
    if input_filename.startswith("http://"):
        av = video.loadavfromyoutube(input_filename)
    else:
        av = video.loadav(input_filename)
    collect = audio.AudioQuantumList()
    for bar in av.audio.analysis.bars:
        collect.append(bar.children()[0])
    out = video.getpieces(av, collect)
    out.save(output_filename)
예제 #2
0
파일: vreverse.py 프로젝트: MechanisM/remix
def main(toReverse, inputFilename, outputFilename):
    if inputFilename.startswith("http://"):
        av = video.loadavfromyoutube(inputFilename)
    else:
        av = video.loadav(inputFilename)
    if toReverse == 'tatums':
        chunks = av.audio.analysis.tatums
    elif toReverse == 'beats':
        chunks = av.audio.analysis.beats
    chunks.reverse()
    out = video.getpieces(av, chunks)
    out.save(outputFilename)
예제 #3
0
 def __init__(self, tracks):
     self.videos = []
     for track in tracks:
         if 'youtube.com' in track:
             #assume it's a youtube url
             try:
                 self.videos.append(video.loadavfromyoutube(track))
             except:
                 print 'unable to fetch', track
         else:
             self.videos.append(video.loadav(track))
     self.collectvid = video.EditableFrames(settings=self.videos[0].video.settings)
     self.collectaudio = audio.AudioQuantumList()
예제 #4
0
파일: vdissoc.py 프로젝트: MechanisM/remix
def main(infile, outfile, choices=4):
    if infile.startswith("http://"):
        av = video.loadavfromyoutube(infile)
    else:
        av = video.loadav(infile)
    
    meter = av.audio.analysis.time_signature['value']
    fade_in = av.audio.analysis.end_of_fade_in
    fade_out = av.audio.analysis.start_of_fade_out
    sections = av.audio.analysis.sections.that(overlap_range(fade_in, fade_out))
    outchunks = audio.AudioQuantumList()

    for section in sections:
        print str(section) + ":"
        beats = av.audio.analysis.beats.that(are_contained_by(section))
        segments = av.audio.analysis.segments.that(overlap(section))
        num_bars = len(section.children())
        
        print "\t", len(beats), "beats,", len(segments), "segments"
        if len(beats) < meter:
            continue
        
        b = []
        segstarts = []
        for m in range(meter):
            b.append(beats.that(are_beat_number(m)))
            segstarts.append(segments.that(overlap_starts_of(b[m])))
        
        if not b:
            continue
        elif not b[0]:
            continue
        
        now = b[0][0]
        
        for x in range(0, num_bars * meter):
            beat = x % meter
            next_beat = (x + 1) % meter
            now_end_segment = segments.that(contain_point(now.end))[0]
            next_candidates = segstarts[next_beat].ordered_by(timbre_distance_from(now_end_segment))
            if not next_candidates:
                continue
            next_choice = next_candidates[random.randrange(min(choices, len(next_candidates)))]
            next = b[next_beat].that(start_during(next_choice))[0]
            outchunks.append(now)
            print "\t" + now.context_string()
            now = next
    
    out = video.getpieces(av, outchunks)
    out.save(outfile)
예제 #5
0
def main(input_filename):
    output_filename = input_filename.split("=",1)[1]
    output_dir = "../assets/{}/".format(output_filename)
    mkdir(output_dir)

    #Download youtube video
    youtube_seq = video.loadavfromyoutube(input_filename)

    #Save audio
    youtube_seq.audio.encode(output_dir+output_filename+".mp3")

    #Make output json for slices
    filename = output_dir + output_filename + "_slices.json"
    slices = open(filename, "w")
    #separate variables for video and audio part
    av_video = video.ImageSequence(youtube_seq.video)
    av_audio = youtube_seq.audio

    beats = []
    frameLocs = []
    masterWidth = 10000
    masterHeight = 10000
    for beat in av_audio.analysis.beats:
        #pull thumbnail for beat beginning and halfway through beat
        index1 = av_video.indexvoodo(beat)
        #index2 = av_video.indexvoodo(beat.start+0.5*beat.duration)
        #av_video.renderframe(index1,output_dir+"frames/")
        frameLoc = av_video.files[int(index1.start*av_video.settings.fps)]
        frame = Image.open(frameLoc)
        frame.load()
        frameLocs.append(frameLoc)
        beats.append({'start':beat.start, 'duration':beat.duration})
    json.dump({"beats":beats}, slices, sort_keys=True, indent=4)
    slices.close()
    final_image = Image.new("RGB", (10000, 10000))
    xOffset = 0
    yOffset = 0
    width = 0
    height = 0
    for frameLoc in frameLocs:
        temp_frame = Image.open(frameLoc)
        temp_frame.thumbnail((145,145))
        temp_frame.load()
        final_image.paste(temp_frame,(xOffset,yOffset))
        width = temp_frame.size[0]
        xOffset += width
        if xOffset > masterWidth:
            xOffset = 0
            height = temp_frame.size[1]
            yOffset += height
        if yOffset > masterHeight:
            raise IndexError("Too many images to fit on canvas")
    final_image.save(output_dir+output_filename+".jpg")

    majDict = {0:"C",1:"C#",2:"D",3:"D#",4:"E",5:"F",
            6:"F#",7:"G",8:"G#",9:"A",10:"A#",11:"B"}
    minDict = {0:"cm",1:"c#m",2:"dm",3:"d#m",4:"em",5:"fm",
            6:"f#m",7:"gm",8:"g#m",9:"am",10:"a#m",11:"bm"}

    #write info
    filename = output_dir + output_filename + "_info.json"
    info = open(filename, "w")
    keycode = av_audio.analysis.key['value']
    mode = av_audio.analysis.mode['value']
    try:
        if mode == 0:
            key = minDict[keycode]
        elif mode == 1:
            key = majDict[keycode]
        else:
            raise
    except:
        key = "unknown"
    tempo = av_audio.analysis.tempo['value']
    duration = av_audio.analysis.duration
    time_signature = av_audio.analysis.time_signature['value']
    json.dump({ "width":width, "height":height, "key":key, "tempo":tempo,
                "duration":duration, "time_signature":time_signature}, info,
                sort_keys = True, indent=4)
    info.close()