Esempio n. 1
0
  def shift_segment(self, segment, amount):
    # filenames
    tmp_root = "/tmp/%s" % self.checksum[-3:]
    tmp_base = "%s/%s/" % (tmp_root, self.checksum)
    tmp_filename = self.checksum[:6] + str(segment.absolute_context()[0])
    tmp_path = tmp_base + tmp_filename + '.wav'
    shifted_path = tmp_base + tmp_filename + '_shifted.wav'

    # make sure the path is a thing
    if not os.path.exists(tmp_base):
      os.makedirs(tmp_base)
    
    # write the segment out
    tmp_file = segment.encode(tmp_path)

    # shift it
    subprocess.call([
      'rubberband',
      '--pitch', str(amount),
      tmp_path,
      shifted_path])

    # put it back into a segment
    new_data = AudioData(shifted_path)
    new_data.load()

    # clean up the files 
    # os.removedirs(tmp_root) # assuming no collisions, or problems if we nuke something else in there

    sys.stdout.flush()

    return new_data 
Esempio n. 2
0
def do_work(track, options):
    
    verbose = bool(options.verbose)
    
    # This gets the swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9
    
    # If there's no swing, return the original tune
    if swing == 0:
        return Playback(track, 0, track.analysis.duration)
    
    # This gets the beat and the where the beats strt
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    # This is where the magic happens:
    # For each beat, compute how much to stretch / compress each half of each beat
    for beat in beats[:-1]:
        # This adds swing:
        if 0 < swing:
            rate1 = 1+swing
            dur = beat.duration/2.0
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/dur
        # This removes swing
        else:
            rate1 = 1 / (1+abs(swing))
            dur = (beat.duration/2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration-stretch)/(beat.duration-dur)
        # This builds the list of swing rates for each beat
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start+dur) * track.sampleRate)
        rates.append((start1-offset, rate1))
        rates.append((start2-offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration-dur, stretch, beat.duration-stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args
    
    # This gets all the audio, from the
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate),:]
    # This block does the time stretching
    if verbose: 
        print "\nTime stretching..."
    # Dirac is a timestretching tool that comes with remix.
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # This builds the timestretched AudioData object
    ts = AudioData(ndarray=vecout, shape=vecout.shape, 
                    sampleRate=track.sampleRate, numChannels=vecout.shape[1], 
                    verbose=verbose)
     # Create playback objects (just a collection of audio) for the first and last beat
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start, track.analysis.duration-beats[-1].start)
    
    # Return the first beat, the timestreched beats, and the last beat
    return [pb1, ts, pb2]
Esempio n. 3
0
 def render(self):
     t1, t2 = map(make_stereo, (self.t1.get(), self.t2.get()))
     vecout = crossfade(t1.data, t2.data, self.mode)
     audio_out = AudioData(ndarray=vecout,
                           shape=vecout.shape,
                           sampleRate=t1.sampleRate,
                           numChannels=vecout.shape[1])
     return audio_out
Esempio n. 4
0
    def __init__(self, filename, analysis=None):
        AudioData.__init__(self, filename=filename, verbose=True, defer=False, sampleRate=None, numChannels=None)
        try: tempanalysis = cPickle.loads(analysis)
        except Exception:
            track_md5 = hashlib.md5(file(self.filename, 'rb').read()).hexdigest()

            print >> sys.stderr, "Computed MD5 of file is " + track_md5
            try:
                print >> sys.stderr, "Probing for existing analysis"
                tempanalysis = AudioAnalysis(track_md5)
            except Exception:
                print >> sys.stderr, "Analysis not found. Uploading..."
                tempanalysis = AudioAnalysis(filename)

        self.analysis = tempanalysis
        self.analysis.source = self
        self.is_local = False
Esempio n. 5
0
def do_work(track, options):

    verbose = bool(options.verbose)

    # swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9

    if swing == 0:
        return Playback(track, 0, track.analysis.duration)

    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    for beat in beats[:-1]:
        # put swing
        if 0 < swing:
            rate1 = 1 + swing
            dur = beat.duration / 2.0
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / dur
        # remove swing
        else:
            rate1 = 1 / (1 + abs(swing))
            dur = (beat.duration / 2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / (beat.duration - dur)
        # build list of rates
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start + dur) * track.sampleRate)
        rates.append((start1 - offset, rate1))
        rates.append((start2 - offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration - dur, stretch,
                    beat.duration - stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]
    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)
    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
Esempio n. 6
0
    def stretch(self, t, l):
        """t is a track, l is a list"""
        signal_start = int(l[0][0] * t.sampleRate)
        signal_duration = int((sum(l[-1]) - l[0][0]) * t.sampleRate)
        vecin = t.data[signal_start:signal_start + signal_duration, :]

        rates = []
        for i in xrange(len(l)):
            rate = (int(l[i][0] * t.sampleRate) - signal_start,
                    self.durations[i] / l[i][1])
            rates.append(rate)

        vecout = dirac.timeScale(vecin, rates, t.sampleRate, 0)
        if hasattr(t, 'gain'):
            vecout = limit(multiply(vecout, float32(t.gain)))

        audio_out = AudioData(ndarray=vecout,
                              shape=vecout.shape,
                              sampleRate=t.sampleRate,
                              numChannels=vecout.shape[1])
        return audio_out
Esempio n. 7
0
def do_work(track, options):

    # manage options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # for every beat
    for beat in beats[:-1]:

        # get a tempo, particularly for accelerando
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo,
                                    high_tempo, rate_tempo)

        # calculate rates
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # show on screen
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur * rate1,
                        dur * rate2, 60.0 / (dur * rate1),
                        60.0 / (dur * rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration,
                        beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]

    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)

    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
Esempio n. 8
0
    def update(self, *args):
        from random import random, choice
        print "play", self.curr_md5, "beat", self.curr_beat
        cursor_ = args[0]
        branch_cursor_ = args[1]
        last_branch_cursor_ = args[2]
        self.curr_player.play(self.curr_laf.analysis.beats[self.curr_beat])
        self.curr_beat = (self.curr_beat + 1) % len(
            self.curr_laf.analysis.beats)
        # get candidates
        candidates = self.all_edges[self.curr_md5].get(self.curr_beat, [])
        candidates = [
            candidates[i] for i in range(len(candidates))
            if candidates[i][0] < self.threshold
        ]
        # restrict to local branches if we just changed songs and are resetting the data structures
        if self.thread is not None:
            if self.thread.ejecting():
                candidates = [
                    candidates[i] for i in range(len(candidates))
                    if candidates[i][1] == self.curr_md5
                ]
        branched = False
        if len(candidates) > 0:
            print len(
                candidates
            ), "branch candidates, prob =", self.curr_branch_probability
            # print candidates
            # flip a coin
            if random() < self.curr_branch_probability:
                #Beat is rendered so the audio data can be obtained for the crossfade function
                first_rendered_beat = self.curr_laf.analysis.beats[
                    self.curr_beat].render()
                print "Branch!!!"
                branch = choice(candidates)
                changed_song = branch[1] != self.curr_md5
                self.last_branch[0] = [
                    self.curr_beat + self.start_beats[self.curr_md5]
                ]
                self.curr_md5 = branch[1]
                self.curr_beat = branch[2]
                self.curr_player = self.aq_players[self.curr_md5]
                self.curr_laf = self.local_audio[self.curr_md5]
                self.curr_branch_probability = self.min_branch_probability
                self.last_branch[1] = [
                    self.curr_beat + self.start_beats[self.curr_md5]
                ]
                branched = True

                #Next beat is also rendered
                second_rendered_beat = self.curr_laf.analysis.beats[
                    self.curr_beat].render()

                #Which beat is shorter
                min_len = min(first_rendered_beat.data.shape[0],
                              second_rendered_beat.data.shape[0])
                first = first_rendered_beat.data[0:min_len, :]
                second = second_rendered_beat.data[0:min_len, :]

                #Crossfade between two beats of the same length
                third = crossfade(first, second, 'linear')

                #If the first beat is longer...
                if first_rendered_beat.data.shape[
                        0] > second_rendered_beat.data.shape[0]:
                    audio_out = AudioData(
                        ndarray=third,
                        shape=third.shape,
                        sampleRate=first_rendered_beat.sampleRate,
                        numChannels=third.shape[1])
                #If the second beat is longer...
                else:
                    #The crossfade replaces the first part of the second beat
                    second_rendered_beat.data[0:min_len, :] = third
                    audio_out = AudioData(
                        ndarray=second_rendered_beat.data,
                        shape=second_rendered_beat.data.shape,
                        sampleRate=first_rendered_beat.sampleRate,
                        numChannels=second_rendered_beat.data.shape[1])

                self.curr_player.play_audio_data(audio_out)
                self.curr_beat = (self.curr_beat + 1) % len(
                    self.curr_laf.analysis.beats)

                if changed_song:
                    print "********** Changed song **********"
                    # signal that the data loading thread should reset
                    self.last_branch = [self.curr_beat, self.curr_beat]
                    if self.thread is not None:
                        self.thread.eject(self.curr_md5)

            else:
                self.curr_branch_probability = min(
                    self.max_branch_probability, self.curr_branch_probability +
                    self.step_branch_probability)
        #self.curr_player.play(self.curr_laf.analysis.beats[self.curr_beat])
        #self.curr_beat = (self.curr_beat + 1) % len(self.curr_laf.analysis.beats)
        # update cursor
        t0 = self.curr_beat + self.start_beats[self.curr_md5]
        cursor_.set_xdata(t0)
        cursor_.set_ydata(t0)

        if len(candidates) > 0:
            from numpy import vstack, repeat, array
            t0 = repeat(t0, len(candidates), 0)
            t1 = array([self.start_beats[c[1]] for c in candidates]) + array(
                [c[2] for c in candidates])
            branch_x = vstack((t0, t0, t1, t1, t0)).T.reshape((-1, 1))
            branch_y = vstack((t0, t1, t1, t0, t0)).T.reshape((-1, 1))
            branch_cursor_.set_xdata(branch_x)
            branch_cursor_.set_ydata(branch_y)
            self.ghost = 1
        elif self.ghost >= 4:
            branch_cursor_.set_xdata([])
            branch_cursor_.set_ydata([])
        else:
            self.ghost += 1

        if branched:
            if self.last_branch[0] < self.last_branch[1]:
                last_branch_cursor_.set_color('green')
            else:
                last_branch_cursor_.set_color('red')
            last_branch_x = [self.last_branch[i] for i in [0, 1, 1]]
            last_branch_y = [self.last_branch[i] for i in [0, 0, 1]]
            last_branch_cursor_.set_xdata(last_branch_x)
            last_branch_cursor_.set_ydata(last_branch_y)

        args[0].figure.canvas.draw()
Esempio n. 9
0
def do_work(track, options):

    # This manages the various input options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # This set the tempo and applies acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # For every beat, we get a tempo, and apply a time stretch
    for beat in beats[:-1]:

        # Get a tempo for the beat
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo,
                                    high_tempo, rate_tempo)

        # Calculate rates for time stretching each beat.
        #
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # Add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # This prints what's happening, if verbose mode is on.
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur * rate1,
                        dur * rate2, 60.0 / (dur * rate1),
                        60.0 / (dur * rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration,
                        beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # This gets the audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]

    # This does the time stretch
    if verbose:
        print "\nTime stretching..."
    # Dirac is a timestretching tool that comes with remix.
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # This builds the timestretched AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)

    # Create playback objects (just a collection of audio) for the first and last beat
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    # Return the first beat, the timestreched beats, and the last beat
    return [pb1, ts, pb2]