Exemplo n.º 1
0
def terminate(dur_intro, middle, dur_outro, duration, lgh):
    # merge intro
    if isinstance(middle[0], Playback):
        middle[0].start = 0
        middle[0].duration += dur_intro
        start = []
    else:
        start = [Playback(middle[0].track, 0, dur_intro)]
    # merge outro
    if isinstance(middle[-1], Playback):
        middle[-1].duration += dur_outro
        end = []
    else:
        end = [Playback(middle[-1].track, middle[-1].start + middle[-1].duration, dur_outro)]
    # combine
    actions = start + middle + end
    if lgh == False:
        return actions
    excess = sum(inst.duration for inst in actions)-duration
    if excess == 0:
        return actions
    # trim the end with fadeout
    if actions[-1].duration <= FADE_OUT+excess:
        start = actions[-1].start
        dur = FADE_OUT
        actions.remove(actions[-1])
    else:
        actions[-1].duration -= FADE_OUT+excess
        start = actions[-1].start+actions[-1].duration
        dur = FADE_OUT
    actions.append(Fadeout(middle[0].track, start, dur))
    return actions
Exemplo n.º 2
0
def initialize(track, inter, transition):
    """find initial cursor location"""
    mat = track.resampled['matrix']
    markers = getattr(track.analysis, track.resampled['rate'])

    try:
        # compute duration of matrix
        mat_dur = markers[track.resampled['index'] + rows(
            mat)].start - markers[track.resampled['index']].start
        start = (mat_dur - inter - transition - FADE_IN) / 2
        dur = start + FADE_IN + inter
        # move cursor to transition marker
        duration, track.resampled['cursor'] = move_cursor(track, dur, 0)
        # work backwards to find the exact locations of initial fade in and playback sections
        fi = Fadein(
            track,
            markers[track.resampled['index'] + track.resampled['cursor']].start
            - inter - FADE_IN, FADE_IN)
        pb = Playback(
            track,
            markers[track.resampled['index'] + track.resampled['cursor']].start
            - inter, inter)
    except:
        track.resampled['cursor'] = FADE_IN + inter
        fi = Fadein(track, 0, FADE_IN)
        pb = Playback(track, FADE_IN, inter)

    return [fi, pb]
Exemplo n.º 3
0
def do_work(track, options):

    verbose = bool(options.verbose)

    # swing factor
    swing = float(options.swing)
    if swing < -0.9: swing = -0.9
    if swing > +0.9: swing = +0.9

    if swing == 0:
        return Playback(track, 0, track.analysis.duration)

    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # compute rates
    rates = []
    for beat in beats[:-1]:
        # put swing
        if 0 < swing:
            rate1 = 1 + swing
            dur = beat.duration / 2.0
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / dur
        # remove swing
        else:
            rate1 = 1 / (1 + abs(swing))
            dur = (beat.duration / 2.0) / rate1
            stretch = dur * rate1
            rate2 = (beat.duration - stretch) / (beat.duration - dur)
        # build list of rates
        start1 = int(beat.start * track.sampleRate)
        start2 = int((beat.start + dur) * track.sampleRate)
        rates.append((start1 - offset, rate1))
        rates.append((start2 - offset, rate2))
        if verbose:
            args = (beats.index(beat), dur, beat.duration - dur, stretch,
                    beat.duration - stretch)
            print "Beat %d — split [%.3f|%.3f] — stretch [%.3f|%.3f] seconds" % args

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]
    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)
    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)
    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]
Exemplo n.º 4
0
def make_crossfade(track1, track2, inter):

    markers1 = getattr(track1.analysis, track1.resampled['rate'])

    if len(markers1) < MIN_SEARCH:
        start1 = track1.resampled['cursor']
    else:
        start1 = markers1[track1.resampled['index'] +
                          track1.resampled['cursor']].start

    start2 = max((track2.analysis.duration - (inter + 2 * X_FADE)) / 2, 0)
    markers2 = getattr(track2.analysis, track2.resampled['rate'])

    if len(markers2) < MIN_SEARCH:
        track2.resampled['cursor'] = start2 + X_FADE + inter
        dur = min(track2.analysis.duration - 2 * X_FADE, inter)
    else:
        duration, track2.resampled['cursor'] = move_cursor(
            track2, start2 + X_FADE + inter, 0)
        dur = markers2[track2.resampled['index'] +
                       track2.resampled['cursor']].start - X_FADE - start2

    xf = Crossfade((track1, track2), (start1, start2), X_FADE)
    pb = Playback(track2, start2 + X_FADE, dur)

    return [xf, pb]
Exemplo n.º 5
0
def make_transition(track1, track2, inter, transition):
    # the minimal transition is 2 markers
    # the minimal inter is 0 sec
    markers1 = getattr(track1.analysis, track1.resampled['rate'])
    markers2 = getattr(track2.analysis, track2.resampled['rate'])
    
    if len(markers1) < MIN_SEARCH or len(markers2) < MIN_SEARCH:
        return make_crossfade(track1, track2, inter)
    
    # though the minimal transition is 2 markers, the alignment is on at least 3 seconds
    mat1 = get_mat_out(track1, max(transition, MIN_ALIGN_DURATION))
    mat2 = get_mat_in(track2, max(transition, MIN_ALIGN_DURATION), inter)
    
    try:
        loc, n, rate1, rate2 = align(track1, track2, mat1, mat2)
    except:
        return make_crossfade(track1, track2, inter)
        
    if transition < MIN_ALIGN_DURATION:
        duration, cursor = move_cursor(track2, transition, loc)
        n = max(cursor-loc, MIN_MARKERS)
    
    xm = make_crossmatch(track1, track2, rate1, rate2, loc, n)
    # loc and n are both in terms of potentially upsampled data. 
    # Divide by rate here to get end_crossmatch in terms of the original data.
    end_crossmatch = (loc + n) / rate2
    
    if markers2[-1].start < markers2[end_crossmatch].start + inter + transition:
        inter = max(markers2[-1].start - transition, 0)
        
    # move_cursor sets the cursor properly for subsequent operations, and gives us duration.
    dur, track2.resampled['cursor'] = move_cursor(track2, inter, end_crossmatch)
    pb = Playback(track2, sum(xm.l2[-1]), dur)
    
    return [xm, pb]
Exemplo n.º 6
0
def make_jumps(path, track):
    actions = []
    source = path[0][0]
    #pb = Playback(track, 0, 10)
    for p in path:
        try:
            if p[2]['target']-p[2]['source'] == 1: 
                raise
            target = p[0]
            if 0 < target-source:
                actions.append(Playback(track, source, target-source))
            actions.append(Jump(track, p[0], p[1], p[2]['duration']))
            source = p[1]
        except:
            target = p[1]
    if 0 < target-source:
        actions.append(Playback(track, source, target-source))
    return actions
Exemplo n.º 7
0
def start_mix(t1, t2, xfade, fadeonly):
    """
    Returns playback of the first track faded or beatmatched with the second
    """
    mismatch = check_tempo_mismatch(t1, t2)
    if (fadeonly):
        (crossfade_t12, end_t1, start_t2) = cross_fade_match(t1, t2, xfade)
    else:
        (xmatch_t12, end_t1, start_t2) = beatmatch(t1, t2, xfade)
    return Playback(t1, 0, end_t1)
Exemplo n.º 8
0
def end_mix(t2, t3, xfade, fadeonly):
    """
    Returns playback of last track starting at the end of the last fade
    """
    mismatch = check_tempo_mismatch(t2, t3)
    if fadeonly or mismatch:
        (crossfade_t23, end_t2, start_t3) = cross_fade_match(t2, t3, xfade)
        fade = crossfade_t23
    else:
        (beatmatch_t23, end_t2, start_t3) = beatmatch(t2, t3, xfade)
        fade = beatmatch_t23
    pb = Playback(t3, start_t3, t3.analysis.duration)
    return (fade, pb)
Exemplo n.º 9
0
def one_loop(graph, track, mode='shortest'):
    jumps = get_jumps(graph, mode='backward')
    if len(jumps) == 0: return []
    loop = None
    if mode == 'longest':
        loop = jumps[0]
    else:
        jumps.reverse()
        for jump in jumps:
            if jump[1] < jump[0]:
                loop = jump
                break
    if loop == None: return []
    # Let's capture a bit of the attack
    OFFSET = 0.025 # 25 ms
    pb = Playback(track, loop[1]-OFFSET, loop[0]-loop[1])
    jp = Jump(track, loop[0]-OFFSET, loop[1]-OFFSET, loop[2]['duration'])
    return [pb, jp]
Exemplo n.º 10
0
def fade_and_play(t1, t2, t3, xfade, fadeonly=False):
    """
    Crossmatch or if the tempo difference is too great, Crossfade between 
    track 1 and 2, and playback 2, need the track 3 to determine where to 
    stop playing track 2
    """
    mismatch = check_tempo_mismatch(t1, t2)

    if fadeonly or mismatch:
        (crossfade_t12, end_t1, start_t2) = cross_fade_match(t1, t2, xfade)
        (crossfade_t23, end_t2, start_t3) = cross_fade_match(t2, t3, xfade)
        fade = crossfade_t12
    else:
        (beatmatch_t12, end_t1, start_t2) = beatmatch(t1, t2, xfade)
        (beatmatch_t23, end_t2, start_t3) = beatmatch(t2, t3, xfade)
        fade = beatmatch_t12
    pb = Playback(t2, start_t2, end_t2 - start_t2)

    return (fade, pb)
Exemplo n.º 11
0
def do_work(track, options):

    # manage options
    verbose = bool(options.verbose)
    low_tempo = float(options.low)
    high_tempo = float(options.high)
    rate_tempo = float(options.rate)
    rubato = float(options.rubato)
    tempo = float(options.tempo)

    # acceleration or not
    if rate_tempo == 0:
        if tempo == 0:
            low_tempo = track.analysis.tempo['value']
            high_tempo = low_tempo
        else:
            low_tempo = tempo
            high_tempo = tempo

    rates = []
    count = min(max(0, int(options.offset)), 1)
    beats = track.analysis.beats
    offset = int(beats[0].start * track.sampleRate)

    # for every beat
    for beat in beats[:-1]:

        # get a tempo, particularly for accelerando
        target_tempo = select_tempo(beats.index(beat), len(beats), low_tempo,
                                    high_tempo, rate_tempo)

        # calculate rates
        if count == 0:
            dur = beat.duration / 2.0
            rate1 = 60.0 / (target_tempo * dur)
            stretch = dur * rate1
            rate2 = rate1 + rubato
        elif count == 1:
            rate1 = 60.0 / (target_tempo * beat.duration)

        # add a change of rate at a given time
        start1 = int(beat.start * track.sampleRate)
        rates.append((start1 - offset, rate1))
        if count == 0:
            start2 = int((beat.start + dur) * track.sampleRate)
            rates.append((start2 - offset, rate2))

        # show on screen
        if verbose:
            if count == 0:
                args = (beats.index(beat), count, beat.duration, dur * rate1,
                        dur * rate2, 60.0 / (dur * rate1),
                        60.0 / (dur * rate2))
                print "Beat %d (%d) | stretch %.3f sec into [%.3f|%.3f] sec | tempo = [%d|%d] bpm" % args
            elif count == 1:
                args = (beats.index(beat), count, beat.duration,
                        beat.duration * rate1, 60.0 / (beat.duration * rate1))
                print "Beat %d (%d) | stretch %.3f sec into %.3f sec | tempo = %d bpm" % args

        count = (count + 1) % 2

    # get audio
    vecin = track.data[offset:int(beats[-1].start * track.sampleRate), :]

    # time stretch
    if verbose:
        print "\nTime stretching..."
    vecout = dirac.timeScale(vecin, rates, track.sampleRate, 0)

    # build timestretch AudioData object
    ts = AudioData(ndarray=vecout,
                   shape=vecout.shape,
                   sampleRate=track.sampleRate,
                   numChannels=vecout.shape[1],
                   verbose=verbose)

    # initial and final playback
    pb1 = Playback(track, 0, beats[0].start)
    pb2 = Playback(track, beats[-1].start,
                   track.analysis.duration - beats[-1].start)

    return [pb1, ts, pb2]