Exemplo n.º 1
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    tonic = audiofile.analysis.key['value']

    chunks = audiofile.analysis.__getattribute__(units)

    # Get the segments
    all_segments = audiofile.analysis.segments

    # Find tonic segments
    tonic_segments = audio.AudioQuantumList(kind="segment")
    for segment in all_segments:
        pitches = segment.pitches
        if pitches.index(max(pitches)) == tonic:
            tonic_segments.append(segment)

    # Find each chunk that matches each segment
    out_chunks = audio.AudioQuantumList(kind=units)
    for chunk in chunks:
        for segment in tonic_segments:
            if chunk.start >= segment.start and segment.end >= chunk.start:
                out_chunks.append(chunk)
                break

    out = audio.getpieces(audiofile, out_chunks)
    out.encode(outputFile)
Exemplo n.º 2
0
def createAllNotes():
    allNotes = audio.AudioQuantumList()
    semitones = audio.AudioQuantumList()
    createSemitones(directory, semitones)
    for i in range(4):
        addOctave(semitones, i, allNotes)
    for i in range(1, 3):
        addOctave(semitones, i * -1, allNotes)
    for i in range(len(allNotes)):
        note = audio.AudioQuantumList()
        note.append(allNotes[i])
        out = audio.assemble(note)
        out.encode(str(i) + ".mp3")
Exemplo n.º 3
0
def main(num_beats, directory, outfile):

    aud = []
    ff = os.listdir(directory)
    for f in ff:
        # collect the files
        if f.rsplit('.',
                    1)[1].lower() in ['mp3', 'aif', 'aiff', 'aifc', 'wav']:
            aud.append(audio.LocalAudioFile(os.path.join(directory, f)))
            # mind the rate limit

    num_files = len(aud)
    x = audio.AudioQuantumList()

    print >> sys.stderr, "Assembling beats.",
    for w in range(num_beats):
        print >> sys.stderr, '.',
        ssong = aud[w % num_files].analysis
        s = ssong.beats[w % len(ssong.beats)]
        tsong = aud[(w - 1) % num_files].analysis
        t = tsong.beats[w % len(tsong.beats)]

        x.append(audio.Simultaneous([s, t]))

    print >> sys.stderr, "\nStarting rendering pass..."

    then = time.time()
    # call render_sequentially() with no arguments, and then it calls itself with
    #  contextual arguments for each source, for each AudioQuantum. It's a lot of
    #  tree-walking, but each source file gets loaded once (and takes itself from)
    #  memory when its rendering pass finishes.
    x.render().encode(outfile)

    print >> sys.stderr, "%f sec for rendering" % (time.time() - then, )
Exemplo n.º 4
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    for bar in bars:
        collect.append(bar.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
Exemplo n.º 5
0
def main(input_filename, output_filename):
    if input_filename.startswith("http://"):
        av = video.loadavfromyoutube(input_filename)
    else:
        av = video.loadav(input_filename)
    collect = audio.AudioQuantumList()
    for bar in av.audio.analysis.bars:
        collect.append(bar.children()[0])
    out = video.getpieces(av, collect)
    out.save(output_filename)
Exemplo n.º 6
0
def main(directory, inputfile_name, output_filename):
    semitones = []
    noteList = [[0 for x in range(12)] for x in range(6)]
    collect = audio.AudioQuantumList()
    final = audio.AudioQuantumList()
    #createAllNotes()
    initNoteList(noteList)
    print len(noteList)
    print noteList[0][0].analysis.segments.timbre
    audiofile = audio.LocalAudioFile(input_filename)
    songSegments = audiofile.analysis.segments
    bmp = 10000.0
    bmpi = 0
    bmt = 10000.0
    bmti = 0
    #print len(songSegments)
    for i in range(len(songSegments)):
        for j in range(12):
            noteSegments = noteList[0][j].analysis.segments
            pDist = distFinder.cosine(
                songSegments[i].pitches,
                noteSegments[len(noteSegments) / 2].pitches)
            if pDist < bmp:
                bmp = pDist
                bmpi = j
        for k in range(6):
            noteSegments = noteList[k][bmpi].analysis.segments
            tDist = distFinder.cosine(
                songSegments[i].timbre[1],
                noteSegments[len(noteSegments) / 2].timbre[1])
            if tDist < bmt:
                bmt = tDist
                bmti = k
        print str(i / len(songSegments)) + '%'
        matchDuration(noteList[bmti][bmpi].analysis.segments, songSegments[i],
                      collect)
        bmp = 10000.0
        bmt = 10000.0
    out = audio.assemble(collect)
    out.encode(output_filename)
Exemplo n.º 7
0
def main(input_filename, output_filename):
    # load audio file
    audiofile = audio.LocalAudioFile(input_filename)
    # get the beats (Audio Quanta)
    beats = audiofile.analysis.beats
    # create a new empty list of Audio Quanta
    collect = audio.AudioQuantumList()
    # add the first segment in each beat in sequence
    for beat in beats:
        # beat.children are the segments in this beat
        # beat.children()[0] is the first segment
        collect.append(beat.children()[0])
    # Get the raw audio data for the audio quanta and store them in 'out'
    out = audio.getpieces(audiofile, collect)
    # encode the raw audio as the appropriate file type (using en-ffmpeg)
    out.encode(output_filename)
Exemplo n.º 8
0
def main(infile, outfile, choices=4):
    if infile.startswith("http://"):
        av = video.loadavfromyoutube(infile)
    else:
        av = video.loadav(infile)

    meter = av.audio.analysis.time_signature['value']
    sections = av.audio.analysis.sections
    output = audio.AudioQuantumList()

    for section in sections:
        beats = []
        bars = section.children()
        for bar in bars:
            beats.extend(bar.children())
    
        if not bars or not beats:
            continue

        beat_array = []
        for m in range(meter):
            metered_beats = []
            for b in beats:
                if beats.index(b) % meter == m:
                    metered_beats.append(b)
            beat_array.append(metered_beats)

        # Always start with the first beat
        output.append(beat_array[0][0]);
        for x in range(1, len(bars) * meter):
            meter_index = x % meter
            next_candidates = beat_array[meter_index]

            def sorting_function(chunk, target_chunk=output[-1]):
                timbre = chunk.mean_timbre()
                target_timbre = target_chunk.mean_timbre()
                timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
                return timbre_distance

            next_candidates = sorted(next_candidates, key=sorting_function)
            next_index = random.randint(0, min(choices, len(next_candidates) - 1))
            output.append(next_candidates[next_index])
    
    out = video.getpieces(av, output)
    out.save(outfile)
Exemplo n.º 9
0
    def run(self):
        st = modify.Modify()
        collect = audio.AudioQuantumList()
        for a in self.segs:
            seg_index = a.absolute_context()[0]

            distances = self.get_distance_from(a)

            distances[seg_index] = sys.maxint

            match_index = distances.index(min(distances))
            match = self.segs[match_index]
            print seg_index, match_index
            # make the length of the new seg match the length
            # of the old seg
            collect.append(match)
        out = video.getpieces(self.av, collect)
        out.save(self.output_filename)
Exemplo n.º 10
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    '''
    This line got the bars of the song in the previous version:
    bars = audiofile.analysis.bars
    
    Now, this line gets the beats in the song:
    '''
    beats = audiofile.analysis.beats
    collect = audio.AudioQuantumList()
    '''
    This loop got the first beat in each bar and appended them to a list:
    for bar in bars:
        collect.append(bar.children()[0])
        
    Now, this loop gets the first segment in each beat and appends them to the list:
    '''
    for b in beats:
        collect.append(b.children()[0])
    out = audio.getpieces(audiofile, collect)
    out.encode(output_filename)
Exemplo n.º 11
0
def main(input_filename, output_filename):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(input_filename)

    # This gets a list of every bar in the track.
    # You can manipulate this just like any other Python list!
    bars = audiofile.analysis.bars

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc.
    collect = audio.AudioQuantumList()

    # This loop puts the first item in the children of each bar into the new list.
    # A bar's children are beats!  Simple as that.
    for bar in bars:
        collect.append(bar.children()[0])

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(output_filename)
Exemplo n.º 12
0
def main(input_filename, output_filename):
    audiofile = audio.LocalAudioFile(input_filename)
    bars = audiofile.analysis.bars
    collect = audio.AudioQuantumList()
    count = 0
    for bar in bars:
        try:
            beat = bar.children()[1]
            beat_audio = beat.render()
            scaled_beat = dirac.timeScale(
                beat_audio.data, 1.2) if count == 1 else dirac.timeScale(
                    beat_audio.data, 1.0)
            ts = audio.AudioData(ndarray=scaled_beat,
                                 shape=scaled_beat.shape,
                                 sampleRate=audiofile.sampleRate,
                                 numChannels=scaled_beat.shape[1])
            collect.append(ts)
            count = (count + 1) % 3
        except IndexError:
            pass
    out = audio.assemble(collect, numChannels=2)
    out.encode(output_filename)
Exemplo n.º 13
0
def main(infile, outfile, choices=4, bars=40):
    audiofile = audio.LocalAudioFile(infile)
    meter = audiofile.analysis.time_signature['value']
    fade_in = audiofile.analysis.end_of_fade_in
    fade_out = audiofile.analysis.start_of_fade_out

    beats = []
    for b in audiofile.analysis.beats:
        if b.start > fade_in or b.end < fade_out:
            beats.append(b)
    output = audio.AudioQuantumList()

    beat_array = []
    for m in range(meter):
        metered_beats = []
        for b in beats:
            if beats.index(b) % meter == m:
                metered_beats.append(b)
        beat_array.append(metered_beats)

    # Always start with the first beat
    output.append(beat_array[0][0])
    for x in range(1, bars * meter):
        meter_index = x % meter
        next_candidates = beat_array[meter_index]

        def sorting_function(chunk, target_chunk=output[-1]):
            timbre = chunk.mean_pitches()
            target_timbre = target_chunk.mean_pitches()
            timbre_distance = numpy.linalg.norm(
                numpy.array(timbre) - numpy.array(target_timbre))
            return timbre_distance

        next_candidates = sorted(next_candidates, key=sorting_function)
        next_index = random.randint(0, min(choices, len(next_candidates) - 1))
        output.append(next_candidates[next_index])

    out = audio.getpieces(audiofile, output)
    out.encode(outfile)
Exemplo n.º 14
0
def main(units, inputFile, outputFile):
    audiofile = audio.LocalAudioFile(inputFile)
    collect = audio.AudioQuantumList()
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)
    for b in audiofile.analysis.bars[0:-1]:
        # all but the last beat
        collect.extend(b.children()[0:-1])
        if units.startswith("tatum"):
            # all but the last half (round down) of the last beat
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])
    # endings were rough, so leave everything after the start
    # of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)
    out = audio.getpieces(audiofile, collect)
    out.encode(outputFile)
Exemplo n.º 15
0
def get_canonical_segments(audio_file):
    """For each bar, iterate through that bar's segments
    and calculate the relative start times. Make sure we don't
    include the same segment twice."""
    bars = audio_file.analysis.bars
    seen_segments = set()
    canonical_segments = audio.AudioQuantumList()
    for bar in bars:
        for segment in bar.segments:
            abs_context = segment.absolute_context()
            segment_index = abs_context[0]
            if segment_index in seen_segments:
                continue
            relative_start_time = segment.start - bar.start
            # the following condition probably isn't necessary, because
            # the condition above will catch it. but just in case
            if relative_start_time < 0:
                continue
            segment.relative_start_time = relative_start_time
            canonical_segments.append(segment)
            seen_segments.add(segment_index)
    print 'that yielded %i canonical segments' % len(canonical_segments)
    return canonical_segments
Exemplo n.º 16
0
def main(units, inputFile, outputFile):
    # This takes your input track, sends it to the analyzer, and returns the results.
    audiofile = audio.LocalAudioFile(inputFile)

    # This makes a new list of "AudioQuantums".
    # Those are just any discrete chunk of audio:  bars, beats, etc
    collect = audio.AudioQuantumList()

    # If the analysis can't find any bars, stop!
    # (This might happen with really ambient music)
    if not audiofile.analysis.bars:
        print "No bars found in this analysis!"
        print "No output."
        sys.exit(-1)

    # This loop puts all but the last of each bar into the new list!
    for b in audiofile.analysis.bars[0:-1]:
        collect.extend(b.children()[0:-1])

        # If we're using tatums instead of beats, we want all but the last half (round down) of the last beat
        # A tatum is the smallest rhythmic subdivision of a beat -- http://en.wikipedia.org/wiki/Tatum_grid
        if units.startswith("tatum"):
            half = -(len(b.children()[-1].children()) // 2)
            collect.extend(b.children()[-1].children()[0:half])

    # Endings were rough, so leave everything after the start of the final bar intact:
    last = audio.AudioQuantum(
        audiofile.analysis.bars[-1].start,
        audiofile.analysis.duration - audiofile.analysis.bars[-1].start)
    collect.append(last)

    # This assembles the pieces of audio defined in collect from the analyzed audio file.
    out = audio.getpieces(audiofile, collect)

    # This writes the newly created audio to the given file.
    out.encode(outputFile)
Exemplo n.º 17
0
def main(num_beats, directory, outfile):
    # register the two special effects we need. Since we only make
    #  AudioQuanta shorter, TimeTruncate is a good choice.
    aud = []
    ff = os.listdir(directory)
    for f in ff:
        # collect the files
        if f.rsplit('.',
                    1)[1].lower() in ['mp3', 'aif', 'aiff', 'aifc', 'wav']:
            # the new defer kwarg doesn't load the audio until needed
            filename = os.path.join(directory, f)
            aud.append(audio.LocalAudioFile(filename, defer=True))
        # mind the rate limit

    num_files = len(aud)

    print >> sys.stderr, "Sorting files by key..."
    # sort by key signature: with enough files, it'll sound
    # like it's always going up in pitch
    aud.sort(key=keysig)

    x = audio.AudioQuantumList()

    print >> sys.stderr, "Assembling beats.",
    for w in range(num_beats):
        print >> sys.stderr, '.',

        # cycle through the audio files (with a different period)
        ssong = aud[w % (num_files - 1)].analysis

        # cycle through the beats
        s = ssong.beats[w % len(ssong.beats)]

        # run an accelerando, and truncate the beats if the reference is shorter
        new_dur = pow(2, -3.0 * w / num_beats)
        if new_dur < s.duration:
            shorter = audio.TimeTruncateLength(new_dur)
            s = shorter(s)

        # sort-of normalize based on overall track volume with a crescendo
        level_change = audio.LevelDB(-15 - ssong.loudness +
                                     (6.0 * w / num_beats))
        s = level_change(s)

        # cycle through the songs (out of phase with 's')
        tsong = aud[(w - 1) % num_files].analysis

        # cycle through the beats (out of phase)
        t = tsong.beats[w % len(tsong.beats)]

        # have a more dramatic volume change
        level_change = audio.LevelDB(-18 - tsong.loudness +
                                     (9.0 * w / num_beats))
        t = level_change(t)
        # also note that there will be significant overlap of the un-truncated 't' beats
        #  by the end, making things louder overall

        # glue the two beats together
        x.append(audio.Simultaneous([s, t]))

    print >> sys.stderr, "\nStarting rendering pass..."

    then = time.time()
    # call render_sequentially() with no arguments, and then it calls itself with
    #  contextual arguments for each source, for each AudioQuantum. It's a lot of
    #  tree-walking, but each source file gets loaded once (and takes itself from)
    #  memory when its rendering pass finishes.
    x.render().encode(outfile)

    print >> sys.stderr, "%f sec for rendering" % (time.time() - then, )

    print >> sys.stderr, "Outputting XML: each source makes an API call for its metadata."
Exemplo n.º 18
0
def main(input_filename):
    audiofile = audio.LocalAudioFile(input_filename)

    if granularity == "segment":
        all_audio = audiofile.analysis.segments
    elif granularity == "tatum":
        all_audio = audiofile.analysis.tatums
    elif granularity == "beat":
        all_audio = audiofile.analysis.beats
    elif granularity == "bar":
        all_audio = audiofile.analysis.bars

    all_segments = audiofile.analysis.segments

    output_text_filename = "%ss%s" % (granularity, ".timbre")
    f = open(output_text_filename, 'w')
    counter = 0
    for chunk in all_audio:
        output_filename = "%s_%s.wav" % (granularity, counter)
        counter = counter + 1

        collect = audio.AudioQuantumList()
        collect.append(chunk)
        out = audio.getpieces(audiofile, collect)
        out.encode(output_filename)

        # Now I need to write things
        # I am going to take timbre values 1 through 7, as 0 is just amplitude.
        temp_timbre = []
        if granularity == "segment":
            temp_timbre = [
                chunk.timbre[1:7]
            ]  # This is needed to make things work with the numpy array stuff

        # Work out how to get averages here
        # There must be a better way to get segments from an audioQuanta...
        if granularity != "segment":
            for segment in all_segments:
                if segment.start >= chunk.start and segment.start < chunk.get_end(
                ):
                    temp_timbre.append(segment.timbre[1:7])
                elif segment.start > chunk.get_end():
                    break
            # This is if we have no segments that starts in the chunk
            if not temp_timbre:
                for segment in all_segments:
                    if segment.start < chunk.start and segment.end > chunk.get_end(
                    ):
                        temp_timbre.append(segment.timbre[1:7])
                        break

        temp_timbre = numpy.array(temp_timbre)
        if temp_timbre.size == 0:
            temp_timbre = numpy.array([[0, 0, 0, 0, 0, 0]])
        timbre_list = list(temp_timbre.mean(axis=0))
        timbre_list = [str(math.floor(t)) for t in timbre_list]

        # Yes, I am writing one number per line.  Shhh.  ChucK's string reading abilities are awful
        for timbre in timbre_list:
            f.write("%s\n" % timbre)

    f.close()