Esempio n. 1
0
def get_loops(fileobj, output_temp, inter=8.0, trans=2.0):
    track = LocalAudioFile(fileobj.name)
    tracks = [track, track, track] # 3 of em!

    valid = []
    # compute resampled and normalized matrices
    for track in tracks:
        track.resampled = resample_features(track, rate='beats')
        track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
        # remove tracks that are too small
        if is_valid(track, inter, trans):
            valid.append(track)
        # for compatibility, we make mono tracks stereo
        track = make_stereo(track)
    tracks = valid

    if len(tracks) < 1: return []
    # Initial transition. Should contain 2 instructions: fadein, and playback.
    start = initialize(tracks[0], inter, trans)

    # Middle transitions. Should each contain 2 instructions: crossmatch, playback.
    middle = []
    [middle.extend(make_transition(t1, t2, inter, trans)) for (t1, t2) in tuples(tracks)]

    # Last chunk. Should contain 1 instruction: fadeout.
    end = terminate(tracks[-1], FADE_OUT)
    actions =  start + middle + end
    
    # output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3")
    render(actions, output_temp.name, False)
    # Do it again
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    return (output_temp.name, analysis)
Esempio n. 2
0
def fruity_loops(fileobj, output_temp):
    options = {
        'plot': None,
        'force': None,
        'verbose': None,
        'graph': None,
        'infinite': True,
        'length': None,
        'minimum': 16,
        'longest': None,
        'duration': 10,
        'pickle': None,
        'shortest': False
    }
    args = [fileobj.name]
    track = LocalAudioFile(args[0], verbose=False)
    # import pdb; pdb.set_trace()
    # this is where the work takes place
    actions = earworm.do_work(track, options)
    render(actions, output_temp.name, verbose=False)
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(
        urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    # print os.path.exists(output_temp.name)
    return (output_temp.name, analysis)
Esempio n. 3
0
def main():
    # This setups up a parser for the various input options
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-o",
                      "--offset",
                      default=0,
                      help="offset where to start counting")
    parser.add_option("-l", "--low", default=100, help="low tempo")
    parser.add_option("-H", "--high", default=192, help="high tempo")
    parser.add_option("-r",
                      "--rate",
                      default=0,
                      help="acceleration rate (try 30)")
    parser.add_option("-R",
                      "--rubato",
                      default=0,
                      help="rubato on second beat (try 0.2)")
    parser.add_option("-t",
                      "--tempo",
                      default=0,
                      help="target tempo (try 160)")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="show results on screen")

    # If we don't have enough options, exit!
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1

    verbose = options.verbose

    # This gets the analysis for this file
    track = LocalAudioFile(args[0], verbose=verbose)

    if verbose:
        print "Waltzifying..."

    # This is where the work takes place
    actions = do_work(track, options)

    if verbose:
        display_actions(actions)

    # This makes the new name for the output file
    name = os.path.splitext(os.path.basename(args[0]))
    name = str(name[0] + '_waltz_%d' % int(options.offset) + '.mp3')

    if verbose:
        print "Rendering... %s" % name

    # This renders the audio out to the output file
    render(actions, name, verbose=verbose)
    if verbose:
        print "Success!"
    return 1
Esempio n. 4
0
def main():
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-d", "--duration", default=DEF_DUR, help="target duration (argument in seconds) default=600")
    parser.add_option("-m", "--minimum", default=MIN_JUMP, help="minimal loop size (in beats) default=8")
    parser.add_option("-i", "--infinite", action="store_true", help="generate an infinite loop (outputs a wav file)")
    parser.add_option("-l", "--length", action="store_true", help="length must be accurate")
    parser.add_option("-k", "--pickle", action="store_true", help="output graph as a pickle object")
    parser.add_option("-g", "--graph", action="store_true", help="output graph as a gml text file")
    parser.add_option("-p", "--plot", action="store_true", help="output graph as png image")
    parser.add_option("-f", "--force", action="store_true", help="force (re)computing the graph")
    parser.add_option("-S", "--shortest", action="store_true", help="output the shortest loop")
    parser.add_option("-L", "--longest", action="store_true", help="output the longest loop")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")

    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1

    track = None
    if len(args) == 2:
        track = find_track(args[0], args[1])
        if not track:
            print "Couldn't find %s by %s" % (args[0], args[1])
            return 1
    else:
        mp3 = args[0]

        if os.path.exists(mp3 + '.json'):
            track = AnalyzedAudioFile(mp3)
        else:
            track = LocalAudioFile(mp3)

    # this is where the work takes place
    actions = do_work(track, options)

    if bool(options.verbose) == True:
        display_actions(actions)

    print "Output Duration = %.3f sec" % sum(act.duration for act in actions)

    # Send to renderer
    name = os.path.splitext(os.path.basename(args[0]))

    # Output wav for loops in order to remain sample accurate
    if bool(options.infinite) == True:
        name = name[0]+'_'+str(int(options.duration))+'_loop.wav'
    elif bool(options.shortest) == True:
        name = name[0]+'_'+str(int(sum(act.duration for act in actions)))+'_shortest.wav'
    elif bool(options.longest) == True:
        name = name[0]+'_'+str(int(sum(act.duration for act in actions)))+'_longest.wav'
    else:
        name = name[0]+'_'+str(int(options.duration))+'.mp3'

    print "Rendering..."
    render(actions, name)
    return 1
Esempio n. 5
0
    def render(self, mp3_filename):
        # Check that we have loaded track from Echo Nest
        if self.track == None:
            self.load_track(True)

        # NOTE to shorten/lengthen refer to compute_path() in earworm.py
        # renders full length of song
        actions = [Playback(self.track, min(self.graph.nodes()), max(self.graph.nodes()))]
        render(actions, mp3_filename)
Esempio n. 6
0
def main():
    options, args = get_options(warn=True);
    actions = do_work(args, options)
    
    if bool(options.verbose) == True:
        display_actions(actions)
    print "Output Duration = %.3f sec" % sum(act.duration for act in actions)
    
    # Send to renderer
    print "Rendering..."
    render(actions, 'capsule.mp3')
    return 1
Esempio n. 7
0
def fruity_loops(fileobj, output_temp):
    options = {'plot': None, 'force': None, 'verbose': None, 'graph': None, 'infinite': True, 'length': None, 'minimum': 16, 'longest': None, 'duration': 10, 'pickle': None, 'shortest': False}
    args = [fileobj.name]
    track = LocalAudioFile(args[0], verbose=False)
    # import pdb; pdb.set_trace()
    # this is where the work takes place
    actions = earworm.do_work(track, options)
    render(actions, output_temp.name, verbose=False)
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    # print os.path.exists(output_temp.name)
    return (output_temp.name, analysis)
Esempio n. 8
0
def main():
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]

    parser = OptionParser(usage=usage)
    parser.add_option("-p", "--pattern", default="1", help="tempo pattern, default 1 (every beat at same tempo)\
    	Each beat will be sped up by a factor of the corresponding digit in the pattern.\
    	1122 will take each four beats, and squish the last two (making a waltz)\
    	12 will squish every alternating beat (long swing, depending on the song)\
    	Much crazier is possible. Also note that the beat detection is sometimes off/ not aligned with bars.\
    	Use -d with \"1111\" to find out what four beats will be grouped at a time.\"\
    	")
    parser.add_option("-s", "--slowdown", default=1, help="General factor of slowdown")
    parser.add_option("-d", "--debug", action="store_true", help="General factor of slowdown")
    parser.add_option("--downbeat", default=-1, help="Downbeat index in the pattern.")
    parser.add_option("--downbeat_file", default=0, help="File to use for downbeat sound.")
    parser.add_option("--downbeat_offset", default=0, help="Amount of seconds to shift beat back.")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")
    
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1
    
    track = None
    mp3 = args[0]
    
    if os.path.exists(mp3 + '.json'):
        track = AnalyzedAudioFile(mp3)
    else:
        track = LocalAudioFile(mp3)
    
    # this is where the work takes place
    actions = do_work(track, options)
    if bool(options.verbose): print "\t2..."

    
        #if bool(options.verbose) == True:
            #display_actions(actions)
    
    if bool(options.verbose): print "\t2..."
    # Send to renderer
    name = os.path.splitext(os.path.basename(args[0]))
    beat_signature = options.pattern;
    if (float(options.slowdown) != 1):
    	beat_signature = beat_signature + "_" + options.slowdown
    name = name[0]+'_'+beat_signature+'.mp3'
    name = name.replace(' ','')
    if bool(options.verbose): print "\t2..."

    print "Rendering..."
    render(actions, name)
    return 1
Esempio n. 9
0
def generateCrossmatch(localAudioFiles, beatMarkers, filenames, beats):
    actions = []
    for i in range(len(beatMarkers)-1): 
        cm = action.Crossmatch((localAudioFiles[i], localAudioFiles[i+1]), 
            ([(b.start, b.duration) for b in beats[i][beatMarkers[i][1] - 
            MIX_LENGTH:beatMarkers[i][1]]],[(b.start, b.duration) 
            for b in beats[i+1][beatMarkers[i+1][0]:beatMarkers[i+1][0]+
            MIX_LENGTH]]))
        actions.append(cm)
    for i in range(len(beatMarkers)): 
        startBeat = beats[i][beatMarkers[i][0]+MIX_LENGTH]
        endBeat = beats[i][beatMarkers[i][1]-MIX_LENGTH]
        actions.insert(2*i, action.Playback(localAudioFiles[i], 
            startBeat.start, endBeat.start-startBeat.start))
    try:
        action.render([action.Fadein(localAudioFiles[0],beats[0]
            [beatMarkers[0][0]].start,beats[0][beatMarkers[0][0]+MIX_LENGTH].
            start-beats[0][beatMarkers[0][0]].start)],"000 fade in")
    except: print() # boundary error, so no fade-in will be generated for this playlist
    for i in range(len(actions)/2):
        index = str(i+1)
        while(len(index) < 3): index = "0" + index
        try:
            action.render([actions[2*i],actions[2*i+1]], index)
        except: print filenames[i]                        
    index = str(len(filenames))
    while(len(index) < 3): index = "0" + index
    action.render([actions[-1]], index)
    try:
        action.render([action.Fadeout(localAudioFiles[-1],beats[-1][beatMarkers[-1][1]-
            MIX_LENGTH].start,beats[-1][beatMarkers[-1][1]].start-beats[-1]
            [beatMarkers[-1][1]-MIX_LENGTH].start)], "999 fade out")
    except: print() #boundary error, so omit fade-out from playlist
    action.render
Esempio n. 10
0
def generateCrossmatch(localAudioFiles, beatMarkers, filenames, beats):
    actions = []
    for i in range(len(beatMarkers)-1): 
        #try:
        cm = action.Crossmatch((localAudioFiles[i], localAudioFiles[i+1]), 
            ([(b.start, b.duration) for b in beats[i][beatMarkers[i][1] - 
            MIX_LENGTH:beatMarkers[i][1]]],[(b.start, b.duration) 
            for b in beats[i+1][beatMarkers[i+1][0]:beatMarkers[i+1][0]+
            MIX_LENGTH]]))
        actions.append(cm)
        #except: 
            #print "exception at: ", filenames[i]
    for i in range(len(beatMarkers)): 
        startBeat = beats[i][beatMarkers[i][0]+MIX_LENGTH]
        endBeat = beats[i][beatMarkers[i][1]-MIX_LENGTH]
        actions.insert(2*i, action.Playback(localAudioFiles[i], 
            startBeat.start, endBeat.start-startBeat.start))
    action.render([action.Fadein(localAudioFiles[0],beats[0]
        [beatMarkers[0][0]].start,beats[0][beatMarkers[0][0]+MIX_LENGTH].
        start-beats[0][beatMarkers[0][0]].start)],"000 fade in")
    for i in range(len(actions)/2):
        index = str(i+1)
        while(len(index) < 3): index = "0" + index
        try:
            action.render([actions[2*i],actions[2*i+1]], index + " " + filenames[i])
        except: print filenames[i]                        
    index = str(len(filenames))
    while(len(index) < 3): index = "0" + index
    action.render([actions[-1]], index + " " + filenames[-1])
    action.render([action.Fadeout(localAudioFiles[-1],beats[-1][beatMarkers[-1][1]-
        MIX_LENGTH].start,beats[-1][beatMarkers[-1][1]].start-beats[-1]
        [beatMarkers[-1][1]-MIX_LENGTH].start)], "999 fade out")
Esempio n. 11
0
def main():
#	m = mix("mp3/output1.mp3", "mp3/coldenergy.mp3", 0)
#	print " * rendering"
#	render(m, 'outA.mp3')
#
#	m = mix("mp3/coldenergy.mp3", "mp3/1998.mp3", 967)
#	print " * rendering"
#	render(m, 'outB.mp3')
#
#	m = mix("mp3/1998.mp3", "mp3/milano.mp3", 575)
#	print " * rendering"
#	render(m, 'outC.mp3')
    m = mix(sys.argv[1], sys.argv[2], 0)
    print " * rendering"
    render(m, sys.argv[3])
Esempio n. 12
0
def main():
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]

    parser = OptionParser(usage=usage)
    parser.add_option("-p", "--pattern", default="1", help="tempo pattern, default 1 (every beat at same tempo)\
    	Each beat will be sped up by a factor of the corresponding digit in the pattern. 0 will drop a beat\
    	1122 will take each four beats, and squish the last two (making a waltz)\
    	12 will squish every alternating beat (long swing, depending on the song)\
        1110 will drop every 4th beat\
    	Much crazier is possible. Also note that the beat detection is sometimes off/ not aligned with bars.\
    	Use -d with \"1111\" to find out what four beats will be grouped at a time.\"\
    	")
    parser.add_option("-s", "--slowdown", default=1, help="General factor of slowdown")
    parser.add_option("-f", "--format", default="mp3", help="Output format (e.g. mp3, wav)")
    parser.add_option("-d", "--debug", action="store_true", help="General factor of slowdown")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")
    
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1
    
    track = None
    mp3 = args[0]
    

    track = LocalAudioFile(mp3)
    
    # this is where the work takes place
    actions = do_work(track, options)
    
    if bool(options.verbose) == True:
        display_actions(actions)
    
    # Send to renderer
    name = os.path.splitext(os.path.basename(args[0]))
    beat_signature = options.pattern;
    if (float(options.slowdown) != 1):
    	beat_signature = beat_signature + "_" + options.slowdown
    name = name[0]+'_'+beat_signature+'.'+options.format
    name = name.replace(' ','')
    
    print "Rendering..."
    render(actions, name)
    return 1
Esempio n. 13
0
def main():
    # This setups up a parser for the various input options
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-o", "--offset", default=0, help="offset where to start counting")
    parser.add_option("-l", "--low", default=100, help="low tempo")
    parser.add_option("-H", "--high", default=192, help="high tempo")
    parser.add_option("-r", "--rate", default=0, help="acceleration rate (try 30)")
    parser.add_option("-R", "--rubato", default=0, help="rubato on second beat (try 0.2)")
    parser.add_option("-t", "--tempo", default=0, help="target tempo (try 160)")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")
    
    # If we don't have enough options, exit!
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1
    
    verbose = options.verbose

    # This gets the analysis for this file
    track = LocalAudioFile(args[0], verbose=verbose)
    
    if verbose:
        print "Waltzifying..."

    # This is where the work takes place
    actions = do_work(track, options)

    if verbose:
        display_actions(actions)
    
    # This makes the new name for the output file
    name = os.path.splitext(os.path.basename(args[0]))
    name = str(name[0] + '_waltz_%d' % int(options.offset) +'.mp3')
    
    if verbose:
        print "Rendering... %s" % name

    # This renders the audio out to the output file
    render(actions, name, verbose=verbose)
    if verbose:
        print "Success!"
    return 1
Esempio n. 14
0
def main():
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-o", "--offset", default=0, help="offset where to start counting")
    parser.add_option("-l", "--low", default=100, help="low tempo")
    parser.add_option("-H", "--high", default=192, help="high tempo")
    parser.add_option("-r", "--rate", default=0, help="acceleration rate (try 30)")
    parser.add_option("-R", "--rubato", default=0, help="rubato on second beat (try 0.2)")
    parser.add_option("-t", "--tempo", default=0, help="target tempo (try 160)")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")

    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1

    verbose = options.verbose

    # get Echo Nest analysis for this file
    track = LocalAudioFile(args[0], verbose=verbose)

    if verbose:
        print "Waltzifying..."

    # this is where the work takes place
    actions = do_work(track, options)

    if verbose:
        display_actions(actions)

    # new name
    name = os.path.splitext(os.path.basename(args[0]))
    name = str(name[0] + "_waltz_%d" % int(options.offset) + ".mp3")

    if verbose:
        print "Rendering... %s" % name

    # send to renderer
    render(actions, name, verbose=verbose)

    if verbose:
        print "Success!"

    return 1
Esempio n. 15
0
def main():
	usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
	parser = OptionParser(usage=usage)
	parser.add_option("-w", "--waltz", default=2, help="where to put the extra beat, value of 1, 2, or 3, default=2")
	parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")
	
	(options, args) = parser.parse_args()
	if len(args) < 1:
		parser.print_help()
		return -1
	
	verbose = options.verbose
	track = None
	
	track = LocalAudioFile(args[0], verbose=verbose)
	if verbose:
		print "Computing waltz . . ."
		
	# this is where the work takes place
	actions = do_work(track, options)

	if verbose:
		display_actions(actions)

	if verbose:
		print "Song is in %s/4 time" % int(track.analysis.time_signature['value'])
	
	# Send to renderer
	name = os.path.splitext(os.path.basename(args[0]))

	name = name[0] + '_waltz_b' + str(int(options.waltz)) + '.mp3'
	name = name.replace(' ','') 
	name = os.path.join(os.getcwd(), name) # TODO: use sys.path[0] instead of getcwd()?

	if verbose:
		print "Rendering... %s" % name
        
	render(actions, name, verbose=verbose)
	if verbose:
		print "Success!"
	return 1
Esempio n. 16
0
def main():
    # This setups up a parser for the various options
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-s", "--swing", default=0.33, help="swing factor default=0.33")
    parser.add_option("-v", "--verbose", action="store_true", help="show results on screen")
    
    # If we don't have enough options, exit!
    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1
    
    # Set up the track and verbose-ness
    verbose = options.verbose
    track = None
    track = LocalAudioFile(args[0], verbose=verbose)
    if verbose:
        print "Computing swing . . ."

    # This is where the work takes place
    actions = do_work(track, options)
    
    if verbose:
        display_actions(actions)
    
    # This renders the audio out to the new file
    name = os.path.splitext(os.path.basename(args[0]))
    sign = ('-','+')[float(options.swing) >= 0]
    name = name[0] + '_swing' + sign + str(int(abs(float(options.swing))*100)) +'.mp3'
    name = name.replace(' ','') 
    name = os.path.join(os.getcwd(), name) # TODO: use sys.path[0] instead of getcwd()?
    
    if verbose:
        print "Rendering... %s" % name
    render(actions, name, verbose=verbose)
    if verbose:
        print "Success!"
    return 1
Esempio n. 17
0
def get_loops(fileobj, output_temp, inter=8.0, trans=2.0):
    track = LocalAudioFile(fileobj.name)
    tracks = [track, track, track]  # 3 of em!

    valid = []
    # compute resampled and normalized matrices
    for track in tracks:
        track.resampled = resample_features(track, rate='beats')
        track.resampled['matrix'] = timbre_whiten(track.resampled['matrix'])
        # remove tracks that are too small
        if is_valid(track, inter, trans):
            valid.append(track)
        # for compatibility, we make mono tracks stereo
        track = make_stereo(track)
    tracks = valid

    if len(tracks) < 1: return []
    # Initial transition. Should contain 2 instructions: fadein, and playback.
    start = initialize(tracks[0], inter, trans)

    # Middle transitions. Should each contain 2 instructions: crossmatch, playback.
    middle = []
    [
        middle.extend(make_transition(t1, t2, inter, trans))
        for (t1, t2) in tuples(tracks)
    ]

    # Last chunk. Should contain 1 instruction: fadeout.
    end = terminate(tracks[-1], FADE_OUT)
    actions = start + middle + end

    # output_temp = tempfile.NamedTemporaryFile(mode="w+b", suffix=".mp3")
    render(actions, output_temp.name, False)
    # Do it again
    new_one = audio.LocalAudioFile(output_temp.name)
    analysis = json.loads(
        urllib.urlopen(new_one.analysis.pyechonest_track.analysis_url).read())
    return (output_temp.name, analysis)
Esempio n. 18
0
def main():
    usage = "usage: %s [options] <one_single_mp3>" % sys.argv[0]
    parser = OptionParser(usage=usage)
    parser.add_option("-d",
                      "--duration",
                      default=DEF_DUR,
                      help="target duration (argument in seconds) default=600")
    parser.add_option("-m",
                      "--minimum",
                      default=MIN_JUMP,
                      help="minimal loop size (in beats) default=8")
    parser.add_option("-i",
                      "--infinite",
                      action="store_true",
                      help="generate an infinite loop (outputs a wav file)")
    parser.add_option("-l",
                      "--length",
                      action="store_true",
                      help="length must be accurate")
    parser.add_option("-k",
                      "--pickle",
                      action="store_true",
                      help="output graph as a pickle object")
    parser.add_option("-g",
                      "--graph",
                      action="store_true",
                      help="output graph as a gml text file")
    parser.add_option("-p",
                      "--plot",
                      action="store_true",
                      help="output graph as png image")
    parser.add_option("-f",
                      "--force",
                      action="store_true",
                      help="force (re)computing the graph")
    parser.add_option("-S",
                      "--shortest",
                      action="store_true",
                      help="output the shortest loop")
    parser.add_option("-L",
                      "--longest",
                      action="store_true",
                      help="output the longest loop")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      help="show results on screen")

    (options, args) = parser.parse_args()
    if len(args) < 1:
        parser.print_help()
        return -1

    verbose = options.verbose
    track = LocalAudioFile(args[0], verbose=verbose)

    # this is where the work takes place
    actions = do_work(track, options)

    if verbose:
        display_actions(actions)
        print "Output Duration = %.3f sec" % sum(act.duration
                                                 for act in actions)

    # Send to renderer
    name = os.path.splitext(os.path.basename(args[0]))

    # Output wav for loops in order to remain sample accurate
    if bool(options.infinite) == True:
        name = name[0] + '_' + str(int(options.duration)) + '_loop.wav'
    elif bool(options.shortest) == True:
        name = name[0] + '_' + str(int(sum(
            act.duration for act in actions))) + '_shortest.wav'
    elif bool(options.longest) == True:
        name = name[0] + '_' + str(int(sum(
            act.duration for act in actions))) + '_longest.wav'
    else:
        name = name[0] + '_' + str(int(options.duration)) + '.mp3'

    if options.verbose:
        print "Rendering..."
    render(actions, name, verbose=verbose)
    return 1
Esempio n. 19
0
def save_mixing_result(m,filename):
    render(m,filename)
Esempio n. 20
0
def mashComponents(localAudioFiles, loudnessMarkers):
    instSegments = localAudioFiles[0].analysis.segments# This is the base track
    vocalSegments = localAudioFiles[1].analysis.segments# This is the overlay track
    instBeats = localAudioFiles[0].analysis.beats[loudnessMarkers[0][0]:
                                                  loudnessMarkers[0][1]]
    vocalBeats = localAudioFiles[1].analysis.beats[loudnessMarkers[1][0]:
                                                   loudnessMarkers[1][1]]
    pitches = meanPitches(instSegments,instBeats)
    timbre = meanTimbre(instSegments,instBeats)
    sections = localAudioFiles[1].analysis.sections #This is the new lead vocal layer
    sections = sections.that(selection.are_contained_by_range(
            vocalBeats[0].start, vocalBeats[-1].start+vocalBeats[-1].duration))
    if(len(sections)==0):sections = localAudioFiles[1].analysis.sections[2:-2]
    pyplot.figure(0,(16,9))
    image = numpy.array(pitches)
    image = numpy.concatenate((image,numpy.array(timbre)),axis = 1)
    image = numpy.concatenate((image,numpy.array(meanLoudness(instSegments,instBeats))),
                              axis = 1)
    """ Now image contains chromatic, timbral, and loudness information"""
    sectBeats = getSectBeats(sections[0]) # get beats that comprise a specific section
    template = numpy.array(meanPitches(vocalSegments,sectBeats))
    template = numpy.concatenate((template,numpy.array(
                                meanTimbre(vocalSegments,sectBeats))),axis=1)
    template = numpy.concatenate((template,numpy.array(
                                meanLoudness(vocalSegments,sectBeats))),axis = 1)
    im = feature.match_template(image,template,pad_input=True)
    maxValues = [] #tuples of x coord, y coord, correlation, and section len(in secs)
    ij = numpy.unravel_index(numpy.argmax(im), im.shape)
    x, y = ij[::-1]
    maxValues.append((numpy.argmax(im),x,y,sections[0].duration))
    for i in range(len(sections)-1):
        sectBeats = getSectBeats(sections[i+1])
        template = numpy.array(meanPitches(vocalSegments,sectBeats))
        template = numpy.concatenate((template,numpy.array(
                                meanTimbre(vocalSegments,sectBeats))), axis=1)
        template = numpy.concatenate((template,numpy.array(
                                meanLoudness(vocalSegments,sectBeats))),axis = 1)
        match = feature.match_template(image,template,pad_input=True)
        ij = numpy.unravel_index(numpy.argmax(match), match.shape)
        x, y = ij[::-1]
        maxValues.append((numpy.argmax(match),
                          TEMPLATE_WIDTH*i+x,y,sections[i+1].duration))
        im = numpy.concatenate((im,match),axis = 1)
    maxValues.sort()
    maxValues.reverse()
    try:
        count = 0
        while(maxValues[count][3] < 10.0): # choose a section longer than 10 secs
            count += 1
        x = maxValues[count][1]
        y = maxValues[count][2]
    except:        
        print "exception in mashComponents..."
        ij = numpy.unravel_index(numpy.argmax(im), im.shape)
        x, y = ij[::-1]
    pyplot.imshow(im, cmap = pyplot.get_cmap('gray'), aspect = 'auto')
    pyplot.plot(x,y,'o',markeredgecolor='r',markerfacecolor='none',markersize=15)
    pyplot.show()
    sectionBeats = getSectBeats(sections[x/TEMPLATE_WIDTH])
    print "len(sectionBeats): ", len(sectionBeats)
    print "len(instBeats): ", len(instBeats)
    print "y: ", y
    y = instBeats[y].absolute_context()[0]
    instBeats = localAudioFiles[0].analysis.beats 
    matchingBeats = instBeats[(y-len(sectionBeats)/2):(y+len(sectionBeats)/2)]
    print"len(matchingBeats): ", len(matchingBeats)
    matchingBeats = matchingBeats[-len(sectionBeats):]
    print"len(matchingBeats): ", len(matchingBeats)
    """ Check to make sure lengths of beat lists are equal... """
    if len(matchingBeats) != len(sectionBeats):
        print "len(matchingBeats) != len(sectionBeats). For now, I will just truncate..."
        print "len(matchingBeats): ", len(matchingBeats)
        print "len(sectionBeats): ", len(sectionBeats)
        if len(matchingBeats) > len(sectionBeats):matchingBeats = matchingBeats[
                                                            :len(sectionBeats)]
        else: sectionBeats = sectionBeats[:len(matchingBeats)]
    """ I have to make sure sectionBeats and matchingBeats are similarly aligned
        within their group, aka bar of four beats. I will add a beat to the beginning
        of matchingBeats until that condition is met. I re-initialize instBeats and
        vocalBeats, because now I want to include the areas outside of those marked
        off by AutomaticDJ for fade ins and fade outs."""
    vocalBeats = localAudioFiles[1].analysis.beats
    while(matchingBeats[0].local_context()[0] != sectionBeats[0].local_context()[0]):
        matchingBeats.insert(0,instBeats[matchingBeats[0].absolute_context()[0]-1])
        sectionBeats.append(vocalBeats[sectionBeats[-1].absolute_context()[0]+1])
    """ Check to make sure lengths of beat lists are equal... """
    if len(matchingBeats) != len(sectionBeats):
        print "len(matchingBeats) != len(sectionBeats) at the second checkpoint."
        print "This should not be the case. The while loop must not be adding beats"
        print "to both lists equally."
        print "len(matchingBeats): ", len(matchingBeats)
        print "len(sectionBeats): ", len(sectionBeats)
        sys.exit()
    """ Next, I will use the beats around the designated beats above to transition into
    and out of the mashup. """
    XLEN = 4 # number of beats in crossmatch
    if(matchingBeats[0].absolute_context()[0] < XLEN or
       len(instBeats) - matchingBeats[-1].absolute_context()[0] - 1 < XLEN or
       sectionBeats[0].absolute_context()[0] < XLEN or
       len(vocalBeats) - sectionBeats[-1].absolute_context()[0] - 1 < XLEN):
        XLEN -= 1
    BUFFERLEN = 12 # number of beats before and after crossmatches
    while(matchingBeats[0].absolute_context()[0] < BUFFERLEN+XLEN or
       len(instBeats) - matchingBeats[-1].absolute_context()[0] - 1 < BUFFERLEN+XLEN or
       sectionBeats[0].absolute_context()[0] < BUFFERLEN+XLEN or
       len(vocalBeats) - sectionBeats[-1].absolute_context()[0] - 1 < BUFFERLEN+XLEN):
        BUFFERLEN -= 1
    try:
        """ These are the 4 beats before matchingBeats. These are the four beats of the
        instrumental track that preclude the mashed section. """
        b4beatsI = instBeats[matchingBeats[0].absolute_context()[0]-XLEN:
                            matchingBeats[0].absolute_context()[0]]
        """ These are the 4 beats after matchingBeats. These are the four beats of the
        instrumental track that follow the mashed section. """
        afterbeatsI = instBeats[matchingBeats[-1].absolute_context()[0]+1:
                            matchingBeats[-1].absolute_context()[0]+1+XLEN]
        if(len(b4beatsI) != len(afterbeatsI)):
            print "The lengths of b4beatsI and afterbeatsI are not equal."
        """ These are the 16 beats before the 4-beat crossmatch into matchingBeats. """
        preBufferBeats = instBeats[matchingBeats[0].absolute_context()[0]-BUFFERLEN-XLEN:
                                            matchingBeats[0].absolute_context()[0]-XLEN]
        """ These are the 16 beats before the 4-beat crossmatch into matchingBeats. """
        postBufferBeats = instBeats[matchingBeats[-1].absolute_context()[0]+1+XLEN:
                                matchingBeats[-1].absolute_context()[0]+1+XLEN+BUFFERLEN]
        if(len(preBufferBeats) != len(postBufferBeats)):
            print "The lengths of preBufferBeats and postBufferBeats are not equal."
            print "len(preBufferBeats): ", len(preBufferBeats)
            print "len(postBufferBeats): ", len(postBufferBeats)
            print matchingBeats[-1].absolute_context()[0]
            print len(instBeats)
            sys.exit()
        """ These are the 4 beats before matchingBeats. These are the four beats of the
        new vocal track that preclude the mashed section. """
        b4beatsV = vocalBeats[sectionBeats[0].absolute_context()[0]-XLEN:
                            sectionBeats[0].absolute_context()[0]]
        """ These are the 4 beats after matchingBeats. These are the four beats of the 
        new vocal track that follow the mashed section. """
        afterbeatsV = vocalBeats[sectionBeats[-1].absolute_context()[0]+1:
                            sectionBeats[-1].absolute_context()[0]+1+XLEN]
        if(len(b4beatsV) != len(afterbeatsV)):
            print "The lengths of b4beatsI and afterbeatsI are not equal."
            sys.exit()
    except: 
        print "exception in 4 beat try block."
        sys.exit()
    """ vocData: An AudioData object for the new vocal data that will be overlaid. 
        instData: An AudioData object for the base instrumental track. 
        originalVocData: An AudioData object of the original vocal to accompany 
            the new one. 
        vocalMix: An AudioData of both vocal tracks mixed together, in order to 
            keep the overall vocal loudness approximately constant. 
        mix: An AudioData of the instrumental track and combined vocals
            mixed together. """
    vocData = audio.getpieces(localAudioFiles[3],b4beatsV+sectionBeats+afterbeatsV)
    instData = audio.getpieces(localAudioFiles[2],b4beatsI+matchingBeats+afterbeatsI)
    if instData.data.shape[0] >= vocData.data.shape[0]: 
        mix = audio.megamix([instData, vocData])
    else: 
        mix = audio.megamix([vocData, instData]) # the longer data set has to go first.
    mix.encode('mix.mp3')
    vocData.encode('vocData.mp3')
    """ Now, make a similar mix for before the mashed sections..."""
    instData = audio.getpieces(localAudioFiles[2], preBufferBeats + b4beatsI)
    vocData = audio.getpieces(localAudioFiles[4], preBufferBeats + b4beatsI)
    premix = audio.megamix([instData, vocData])
    """ ...and another mix for after the mashed sections."""
    instData = audio.getpieces(localAudioFiles[2], afterbeatsI + postBufferBeats)
    vocData = audio.getpieces(localAudioFiles[4], afterbeatsI + postBufferBeats)
    postmix = audio.megamix([instData, vocData])
    """ Now, I have three AudioData objects, mix, premix, and postmix, that overlap by
    four beats. I will build Crossmatch objects from the overlapping regions, and three 
    Playback objects for the areas that are not in transition. """
    action.make_stereo(premix)
    action.make_stereo(mix)
    action.make_stereo(postmix)
    preBuffdur = sum([p.duration for p in preBufferBeats]) # duration of preBufferBeats
    playback1 = action.Playback(premix,0.0,preBuffdur)
    b4dur = sum([p.duration for p in b4beatsI]) # duration of b4beatsI
    crossfade1 = action.Crossfade((premix,mix),(preBuffdur,0.0),b4dur) 
    abdur = sum([p.duration for p in afterbeatsI])
    playback2 = action.Playback(mix,b4dur,mix.duration - b4dur - abdur)
    crossfade2 = action.Crossfade((mix,postmix),(mix.duration - abdur,0.0),abdur) 
    playback3 = action.Playback(postmix,abdur,sum([p.duration for p in postBufferBeats]))
    action.render([playback1,crossfade1,playback2,crossfade2,playback3], 'mashup.mp3')
Esempio n. 21
0
def generateFaderSlam(localAudioFiles, beatMarkers, filenames):
    actions = [action.Playback(laf, b[0].start, (b[1].start-b[0].start)) for laf,b in zip(localAudioFiles,beatMarkers)]
    for i in range(len(actions)): 
        action.render([actions[i]],str(i) + " " + filenames[i])