Exemplo n.º 1
0
def _show_one(audio_file):
    "given an audio file, print out the artist, title and some audio attributes of the song"
    print 'File:        ', audio_file
    pytrack = track.track_from_filename(audio_file)
    print 'Artist:      ', pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown'
    print 'Title:       ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
    print 'Track ID:    ', pytrack.id
    print 'Tempo:       ', pytrack.tempo
    print 'Energy:       %1.3f %s' % (pytrack.energy, _bar(pytrack.energy))
    if not pytrack.valence:
        # Track hasn't had latest attributes computed. Force an upload.
        pytrack = track.track_from_filename(audio_file, force_upload=True)
    print 'Valence:      %1.3f %s' % (pytrack.valence, _bar(pytrack.valence)) 
    print 'Acousticness: %1.3f %s' % (pytrack.acousticness, _bar(pytrack.acousticness))
    print
Exemplo n.º 2
0
def generate_loopback(trans_one, trans_two, mp3_list, index_in_list, delay, compare_tempo):
    _, one_second = trans_one
    two_first, _ = trans_two

    dest_range = range(0,two_first)
    
    t = track.track_from_filename(mp3_list[index_in_list])
    t.get_analysis()

    if delay:
        print "Waiting 3 seconds"
        time.sleep(3)
    
    src_range = range(one_second, len(t.segments))

    best_trans = (0,one_second)
    best_dist = twosongshift.compare_segments(t.segments[0],t.segments[one_second], compare_tempo)

    for i in dest_range:
        for j in src_range:
            if (i != j): #this check may not be necessary
                new_dist = twosongshift.compare_segments(t.segments[i],t.segments[j],compare_tempo)
                if (best_dist > new_dist):
                    best_dist = new_dist
                    best_trans = (i,j)

    return best_trans
Exemplo n.º 3
0
	def generateControlFile(self, inputFile):
		controlFilename = 'control-' + self.barbershopID + '.txt'
		self.logger.log('Control file for barbershopping: ' + controlFilename)

		# Upload original audio to The Echo Nest for analysis
		uploadedTrack = track.track_from_filename(inputFile)
		self.logger.log('Echo nest says original tack duration is ' + str(uploadedTrack.duration))
		self.logger.log('Echo nest says original tack in key: ' + str(uploadedTrack.key) + ' (confidence: ' + str(uploadedTrack.key_confidence) + ')')
		track_length = math.floor(uploadedTrack.duration)
		key_offset = uploadedTrack.key - 6
		
		# Generate chord progression
		with open(controlFilename, 'w+') as controlFile:
			# Come up with a good algorithm here later...
			controlFile.write("%f %i %i\n" % (0.000001, key_offset + 60, 117))
			controlFile.write("%f %i %i\n" % (0.000001, key_offset + 64, 117))
			controlFile.write("%f %i %i\n" % (0.000001, key_offset + 67, 117))
			controlFile.write("%f %i %i\n" % (0.000001, key_offset + 71, 117))
			for t in xrange(1, track_length/2):
				controlFile.write("%f %i %i\n" % (t * 2.0, key_offset + 60 + t + key_offset, 117))
				controlFile.write("%f %i %i\n" % (t * 2.0, key_offset + 64 + t + key_offset, 117))
				controlFile.write("%f %i %i\n" % (t * 2.0, key_offset + 67 + t + key_offset, 117))
				controlFile.write("%f %i %i\n" % (t * 2.0, key_offset + 71 + t + key_offset, 117))

		return controlFilename
Exemplo n.º 4
0
def segmentFile(filename, clipNameBase, segLoc):

	# Run Echonest analyzer 
	t = track.track_from_filename(filename, 'mp3')
	t.get_analysis()

	# Song sections from echonest
	sectionDurations = [s['duration'] for s in t.sections]
	sections = np.cumsum(sectionDurations)

	# Read in audio
	audioInfo = MP3Handler().fileInfo(filename)
	audio, sr = MP3Handler().read(filename, 1)
	
	# Write each segment into its own file
	sectionSamples = np.round(np.multiply(sections,sr)).astype("int")

	for i in range(sectionSamples.shape[0]):

		if i == 0:
			audioClip = audio[:sectionSamples[i]]
		else:
			audioClip = audio[sectionSamples[i-1]:sectionSamples[i]]

		clipName = "{}{}_{}.mp3".format(segLoc, clipNameBase, i)

		MP3Handler().write(audioClip, clipName, sr*2)
Exemplo n.º 5
0
def _show_one(audio_file):
    "given an audio file, print out the artist, title and some audio attributes of the song"
    print('File:        ', audio_file)
    pytrack = track.track_from_filename(audio_file)
    print('Artist:      ',
          pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown')
    print('Title:       ',
          pytrack.title if hasattr(pytrack, 'title') else 'Unknown')
    print('Track ID:    ', pytrack.id)
    print('Tempo:       ', pytrack.tempo)
    print('Energy:       %1.3f %s' % (pytrack.energy, _bar(pytrack.energy)))
    if not pytrack.valence:
        # Track hasn't had latest attributes computed. Force an upload.
        pytrack = track.track_from_filename(audio_file, force_upload=True)
    print('Valence:      %1.3f %s' % (pytrack.valence, _bar(pytrack.valence)))
    print('Acousticness: %1.3f %s\n' %
          (pytrack.acousticness, _bar(pytrack.acousticness)))
Exemplo n.º 6
0
def show_tempo(mp3):
    "given an mp3, print out the artist, title and tempo of the song"
    pytrack = track.track_from_filename(mp3)
    print 'File:  ', mp3
    print 'Artist:', pytrack.artist if hasattr(pytrack, 'artist') else 'Unknown'
    print 'Title: ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
    print 'Tempo: ', pytrack.tempo 
    print
Exemplo n.º 7
0
def show_tempo(mp3):
    "given an mp3, print out the artist, title and tempo of the song"
    pytrack = track.track_from_filename(mp3)
    print 'File:  ', mp3
    print 'Artist:', pytrack.artist if hasattr(pytrack,
                                               'artist') else 'Unknown'
    print 'Title: ', pytrack.title if hasattr(pytrack, 'title') else 'Unknown'
    print 'Tempo: ', pytrack.tempo
    print
    def analysis_worker(self, filename, callback, user_data):
        track = self.__load_from_cache(filename)

        if not track:
            track = echotrack.track_from_filename(filename)
            track.get_analysis()
            self.__save_to_cache(filename, track)

        if (callback):
            callback(track, *user_data)
Exemplo n.º 9
0
def get_transition(first_filename, second_filename, ratio, delay, compare_tempo):
    #set up the 2 files for analysis
    track_one = track.track_from_filename(first_filename)
    track_one.get_analysis()

    track_two = track.track_from_filename(second_filename)
    track_two.get_analysis()

    if (ratio > 1.0 or ratio < 0.0):
        print "Error: ratio must be between 0.0 and 1.0"
        sys.exit(-1)

    first_middle = len(track_one.segments)/2
    second_middle = len(track_two.segments)/2

    first_start = int(first_middle - (first_middle * ratio))
    first_end = int(first_middle + (first_middle * ratio))

    second_start = int(second_middle - (second_middle * ratio))
    second_end = int(second_middle + (second_middle * ratio))

    #compare each segment in the first file to each segment in the second file
    comparisons = []
    for i in range(first_start,first_end):
        appender = []
        for j in range(second_start,second_end):
            compare = compare_segments(track_one.segments[i],track_two.segments[j], compare_tempo)
            appender.append(compare)
        comparisons.append(appender)

    (first_low,second_low) = (0,0)

    for i in range(0,len(comparisons)):
        for j in range(0,len(comparisons[i])):
            if comparisons[i][j] < comparisons[first_low][second_low]:
                first_low = i
                second_low = j
    if delay:
        print "Waiting 6 seconds"
        time.sleep(6)

    return (first_low+first_start,second_low+second_start,comparisons[first_low][second_low])
Exemplo n.º 10
0
def perform_echonest_analysis(filename):
    """
	Perform the analysis of an audio file with echonest.
	"""
    analysis = {}
    print 'Computing echonest\'s descritors for ' + filename + "..."
    pytrack = echo_track.track_from_filename(filename)
    pytrack.get_analysis()
    analysis = echonest_formatter(pytrack)
    print 'Done'
    return analysis
Exemplo n.º 11
0
 def parse(self):
     t = track.track_from_filename(self.path);
     #select most relevant notes
     self.notes = []
     self.times = []
     for segments in getattr(t, 'segments'):
         keys = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'];
         pitches = zip(keys,segments['pitches'])
         self.notes.append([p[0] for p in pitches if p[1]>0.8]) #only add notes with certain confidence lvl
         self.times.append(segments['start'])
     return zip(self.times, self.notes)   
Exemplo n.º 12
0
def testfiles():
	"""Test all audio files"""
	import pyechonest.track
	for file in get_many_mp3(status=0):
		if file.track_details['length'] < 700:
			print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
			# TODO: Should this be pyechonest.track.track_from_filename?
			track = track.track_from_filename('audio/'+file.filename, force_upload=True)
			print(track.id)
		else:
			print("BIG ONE - Name: {} Length: {}".format(file.filename, file.track_details['length']))
Exemplo n.º 13
0
def testfiles():
	"""Test all audio files"""
	import pyechonest.track
	for file in get_many_mp3(status=0):
		if file.track_details['length'] < 700:
			print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
			# TODO: Should this be pyechonest.track.track_from_filename?
			track = track.track_from_filename('audio/'+file.filename, force_upload=True)
			print(track.id)
		else:
			print("BIG ONE - Name: {} Length: {}".format(file.filename, file.track_details['length']))
Exemplo n.º 14
0
def fetch_data(filepaths, result=None, overwrite=False, checkpoint_file=''):
    """
    Parameters
    ----------
    filepaths : list
        Collection of audio files on disk to query against the EchoNest API.
    result : dict, or None
        Dictionary to add info; will create if None.
    overwrite : bool, default=False
        If False, will skip any keys contained in `result`.
    checkpoint_file : str, or None
        Path to write results as they are accumulated; ignored if empty.

    Returns
    -------
    result : dict
        Map of filebases to metadata.
    """
    throttle = Throttle()
    throttle.touch()
    if result is None:
        result = dict()

    filepaths = set(filepaths)
    while filepaths:
        fpath = filepaths.pop()
        key = futil.filebase(fpath)
        # If we've already got data and we're not overwriting, move on.
        if key in result and not overwrite:
            print "[%s] %4d: '%s'" % (time.asctime(), len(filepaths), key)
            continue
        try:
            # Otherwise, let's make some requests.
            print "[%s] %4d: '%s'" % (time.asctime(), len(filepaths), key)
            track = T.track_from_filename(fpath)
            if track:
                result[key] = extract_info(track)
            if checkpoint_file:
                with open(checkpoint_file, 'w') as fp:
                    json.dump(result, fp, indent=2)
            throttle.wait()
        except T.util.EchoNestAPIError as err:
            if err.http_status == 429:
                print "You got rate limited braah ... hang on."
                throttle.wait(10)
                filepaths.add(fpath)
            elif err.http_status >= 500:
                print "Server error; moving on, dropping key: %s" % key
        except socket.error as err:
            print "Socket Error %s" % err
            filepaths.add(fpath)
            throttle.wait(10)
    return result
Exemplo n.º 15
0
def fetch_data(filepaths, result=None, overwrite=False, checkpoint_file=''):
    """
    Parameters
    ----------
    filepaths : list
        Collection of audio files on disk to query against the EchoNest API.
    result : dict, or None
        Dictionary to add info; will create if None.
    overwrite : bool, default=False
        If False, will skip any keys contained in `result`.
    checkpoint_file : str, or None
        Path to write results as they are accumulated; ignored if empty.

    Returns
    -------
    result : dict
        Map of filebases to metadata.
    """
    throttle = Throttle()
    throttle.touch()
    if result is None:
        result = dict()

    filepaths = set(filepaths)
    while filepaths:
        fpath = filepaths.pop()
        key = futil.filebase(fpath)
        # If we've already got data and we're not overwriting, move on.
        if key in result and not overwrite:
            print "[%s] %4d: '%s'" % (time.asctime(), len(filepaths), key)
            continue
        try:
            # Otherwise, let's make some requests.
            print "[%s] %4d: '%s'" % (time.asctime(), len(filepaths), key)
            track = T.track_from_filename(fpath)
            if track:
                result[key] = extract_info(track)
            if checkpoint_file:
                with open(checkpoint_file, 'w') as fp:
                    json.dump(result, fp, indent=2)
            throttle.wait()
        except T.util.EchoNestAPIError as err:
            if err.http_status == 429:
                print "You got rate limited braah ... hang on."
                throttle.wait(10)
                filepaths.add(fpath)
            elif err.http_status >= 500:
                print "Server error; moving on, dropping key: %s" % key
        except socket.error as err:
            print "Socket Error %s" % err
            filepaths.add(fpath)
            throttle.wait(10)
    return result
Exemplo n.º 16
0
def getAttributes(localAudioFile):
    """
	for local audio files, can upload for analysis and 
	return format in a json file
	"""
    t = track.track_from_filename(localAudioFile)
    t.get_analysis()

    title = localAudioFile.split('.')[0]
    output = "./input/json/" + title + ".json"

    with open(output, 'w') as o:
        json.dump({'features': t.segments}, o, indent=4)
Exemplo n.º 17
0
def fingerprint(file):
  t = track.track_from_filename(file)
  t.get_analysis()
  print "Fingerprint:",   t.echoprintstring
  print "Acousticness:",  t.acousticness
  print "Danceability:",  t.danceability
  print "Energy:",        t.energy
  print "Liveness:",      t.liveness
  print "Loudness:",      t.loudness
  print "Mode:",          "minor" if t.mode else "major"
  print "Speechiness:",   t.speechiness
  print "Tempo:",         t.tempo
  print "Time Signature", t.time_signature
  print "Valence:",       t.valence
Exemplo n.º 18
0
def main(mp3_list, transition_ratio, segment_temp_change_limit, output_file, delay, compare_tempo, algorithm):
    track_analysis = []
    for i in range(0,len(mp3_list)):
        track_analysis.append( (track.track_from_filename(mp3_list[i])))

    for t in track_analysis:
        t.get_analysis()

    print "continuing..."
    #Reorders mp3_list and generates the transitions
    transitions, mp3_list = generate_transitions(mp3_list, transition_ratio, delay, compare_tempo, algorithm, track_analysis)

    print mp3_list
    print transitions

    #generate the array of audio quantums
    first_index, _ = transitions[0]
    collects = []
    collects.append(beatshift.tempo_shift(mp3_list[0],(0,first_index),segment_temp_change_limit,mp3_list[1],delay))

    for i in range(1,len(transitions)):
        end_segment, _ = transitions[i]
        _, start_segment = transitions[i-1]

        if (start_segment >= end_segment): #if loopback needed
            loop_trans = generate_loopback(transitions[i-1],transitions[i],mp3_list,i,delay,compare_tempo)
            start_trans, end_trans = loop_trans

            collects.append(song_loopback(start_segment, end_trans, mp3_list[i],delay))

            start_segment = start_trans
        print mp3_list[i]
        print mp3_list[i+1]
        print (start_segment, end_segment)
        collects.append(beatshift.tempo_shift(mp3_list[i],(start_segment,end_segment),segment_temp_change_limit,mp3_list[i+1],delay))

    _, last_index = transitions[len(transitions)-1]
    last_song = audio.LocalAudioFile(mp3_list[len(mp3_list)-1])

    col_append = []
    for i in range(last_index, len(last_song.analysis.segments)):
        col_append.append(last_song.analysis.segments[i].render())

    collects.append(col_append)

    #write to file
    #the sum(collects, []) takes the list of lists of quantum and converts it
    #to a single list of quantums
    out = audio.assemble(sum(collects, []), numChannels=2)
    out.encode(output_file)
Exemplo n.º 19
0
def tempo_shift(input_filename, seg_range, shift_length, second_song, delay):
    t1 = track.track_from_filename(input_filename)
    t2 = track.track_from_filename(second_song)

    start_range, end_range = seg_range

    shift_length = min(shift_length, end_range - start_range)
    shift_magnitude = t2.tempo - t1.tempo

    beat_increment = (1.0*shift_magnitude)/shift_length
    beat_ratio = 1.0
    beat_count = 0

    audiofile = audio.LocalAudioFile(input_filename)
    beats = audiofile.analysis.segments
    collect = []

    for i in range(start_range, end_range):
        if (i > (end_range - shift_length)):
            desired_bpm = beat_increment * (i - (end_range - shift_length)) + t1.tempo
            beat_ratio = t1.tempo/desired_bpm

        beat_audio = beats[i].render()

        if (beat_ratio == 1.0):
            collect.append(beat_audio)
        else:
            scaled_beat = dirac.timeScale(beat_audio.data, beat_ratio)
            ts = audio.AudioData(ndarray=scaled_beat, shape=scaled_beat.shape,
                    sampleRate=audiofile.sampleRate, numChannels=scaled_beat.shape[1])
            collect.append(ts)
    if delay:
        print "Waiting 9 seconds"
        time.sleep(9)

    return collect
Exemplo n.º 20
0
def getTopTen(artist_uri, spotify):
    songs = []
    results = spotify.artist_top_tracks(artist_uri)
    for t in results[u'tracks'][:10]:
        url = t[u'preview_url']
        req2 = urllib2.Request(url)
        response = urllib2.urlopen(req2)
        data = response.read()
        f = NamedTemporaryFile(suffix = ".mp3")
        f.write(data)
        songs.append((t['name'], audio.LocalAudioFile(f.name)))
        t1 = track.track_from_filename(f.name)
        print 'track:' + t['name'] + "   key:" + str(t1.key) + " tempo:" + str(t1.tempo) + " mode:" + str(t1.mode)
        f.close()
    return songs
Exemplo n.º 21
0
def getTopTen(artist_uri, spotify):
    songs = []
    results = spotify.artist_top_tracks(artist_uri)
    for t in results[u'tracks'][:10]:
        url = t[u'preview_url']
        req2 = urllib2.Request(url)
        response = urllib2.urlopen(req2)
        data = response.read()
        f = NamedTemporaryFile(suffix=".mp3")
        f.write(data)
        songs.append((t['name'], audio.LocalAudioFile(f.name)))
        t1 = track.track_from_filename(f.name)
        print 'track:' + t['name'] + "   key:" + str(t1.key) + " tempo:" + str(
            t1.tempo) + " mode:" + str(t1.mode)
        f.close()
    return songs
Exemplo n.º 22
0
def track_with_file(filename, mp3=None, track_id=None):
    if track_id:
        nest_track = track.track_from_id(track_id)
    else:
        nest_track = track.track_from_filename(mp3 or filename)
    audio_track = audiolab.Sndfile(filename)
    nest_track.samplerate = rate = audio_track.samplerate
    nest_track.nframes = audio_track.nframes
    cur_frame = 0
    for seg in nest_track.segments:
        num_frames = rate * seg["duration"]
        if cur_frame != seg["start"] * rate:
            num_frames += seg["start"] * rate - cur_frame
        if cur_frame + num_frames > audio_track.nframes:
            num_frames = audio_track.nframes - cur_frame
        seg["raw"] = audio_track.read_frames(num_frames)
        cur_frame += num_frames
    return nest_track
def echonest(file_path):
    """
    """

    analysis = {}

    print 'Retrieving features from the EchoNest analyzer...'

    try: 
        track = en_tr.track_from_filename(filename = file_path, force_upload = True)
        track.get_analysis()
        analysis = echonest_formater(track)
    except:
        print "Error retrieving EchoNest features"
        return {}

    print 'Done'
    return analysis
Exemplo n.º 24
0
def generateControlFile(inputFile, barbershopID):
  controlFilename = "control-"+ barbershopID + ".txt"
  
  uploadedTrack = track.track_from_filename(inputFile)
  key_offset = uploadedTrack.key - 6
  #audioKeyConfidence = uploadedTrack.key_confidence
  #audioBarCount = uploadedTrack.bars.count()
  
  # generate chord progression
  with open(controlFilename, "w+") as controlFile:

    # test, go up in whole tones
    for t in xrange(1,4):
      controlFile.write("%f %i %i\n" % (t*2.0, key_offset + 54 + 3 * t, 117 ))
      controlFile.write("%f %i %i\n" % (t*2.0, key_offset + 59 + 3 * t, 117 ))
      controlFile.write("%f %i %i\n" % (t*2.0, key_offset + 64 + 3 * t, 117 ))
  
  return controlFilename
Exemplo n.º 25
0
    def POST(self, *args):
        input_params = web.input()
        file_id = input_params.get('file_id')
        file_path = get_audio_file_path_for_md5(file_id)
        if not os.path.exists(file_path):
            raise visualizer_exceptions.FileNotFoundVE(file_id)

        # ok, file exists, do we have analysis already?
        analysis_file_path = get_analysis_file_path_for_md5(file_id)
        if not os.path.exists(analysis_file_path):
            track_obj = track.track_from_filename(file_path)
            track_obj = clean_track(track_obj.__dict__)
            with open(analysis_file_path, 'w') as new_analysis_file:
                json.dump(track_obj, new_analysis_file)

        with open(analysis_file_path) as analysis_file:
            analysis_dict = json.load(analysis_file)

        ca = clean_analysis(analysis_dict)
        return json.dumps({'analysis':ca})
Exemplo n.º 26
0
def generateControlFile(inputFile, barbershopID):
    controlFilename = "control-" + barbershopID + ".txt"

    uploadedTrack = track.track_from_filename(inputFile)
    key_offset = uploadedTrack.key - 6
    #audioKeyConfidence = uploadedTrack.key_confidence
    #audioBarCount = uploadedTrack.bars.count()

    # generate chord progression
    with open(controlFilename, "w+") as controlFile:

        # test, go up in whole tones
        for t in xrange(1, 4):
            controlFile.write("%f %i %i\n" %
                              (t * 2.0, key_offset + 54 + 3 * t, 117))
            controlFile.write("%f %i %i\n" %
                              (t * 2.0, key_offset + 59 + 3 * t, 117))
            controlFile.write("%f %i %i\n" %
                              (t * 2.0, key_offset + 64 + 3 * t, 117))

    return controlFilename
Exemplo n.º 27
0
    def populate(self, path):
        if not isinstance(path, str):
            raise TypeError('path must be type string')
        if not os.path.isfile(path): raise ValueError('file does not exist')

        t = track.track_from_filename(path)
        # sometimes tracks don't have data filled in
        try:
            self.title = t.title
            self.artist = t.artist
            self.album = t.release
        except AttributeError: pass

        self.duration = t.duration
        self.bitrate = t.bitrate
        self.key = t.key
        self.key_conf = t.key_confidence
        self.mode = t.mode
        self.mode_conf = t.mode_confidence
        self.time_sig = t.time_signature
        self.time_sig_conf = t.time_signature_confidence
        self.tempo = t.tempo
        self.tempo_conf = t.tempo_confidence
        self.loudness = t.loudness
        self.end_fade_in = t.end_of_fade_in
        self.start_fade_out = t.start_of_fade_out
        self.energy = t.energy
        self.danceability = t.danceability
        self.speechiness = t.speechiness

        # try to get the last sections for cue points
        try:
            for i in range(self.num_cue_points):
                self.cue_points.append(t.sections.pop()['start'])
        # there weren't enough, use what we have
        except IndexError: pass
Exemplo n.º 28
0
    def generateControlFile(self, inputFile):
        controlFilename = 'control-' + self.barbershopID + '.txt'
        self.logger.log('Control file for barbershopping: ' + controlFilename)

        # Upload original audio to The Echo Nest for analysis
        uploadedTrack = track.track_from_filename(inputFile)
        self.logger.log('Echo nest says original tack duration is ' +
                        str(uploadedTrack.duration))
        self.logger.log('Echo nest says original tack in key: ' +
                        str(uploadedTrack.key) + ' (confidence: ' +
                        str(uploadedTrack.key_confidence) + ')')
        track_length = math.floor(uploadedTrack.duration)
        key_offset = uploadedTrack.key - 6

        # Generate chord progression
        with open(controlFilename, 'w+') as controlFile:
            # Come up with a good algorithm here later...
            controlFile.write("%f %i %i\n" % (0.000001, key_offset + 60, 117))
            controlFile.write("%f %i %i\n" % (0.000001, key_offset + 64, 117))
            controlFile.write("%f %i %i\n" % (0.000001, key_offset + 67, 117))
            controlFile.write("%f %i %i\n" % (0.000001, key_offset + 71, 117))
            for t in xrange(1, track_length / 2):
                controlFile.write(
                    "%f %i %i\n" %
                    (t * 2.0, key_offset + 60 + t + key_offset, 117))
                controlFile.write(
                    "%f %i %i\n" %
                    (t * 2.0, key_offset + 64 + t + key_offset, 117))
                controlFile.write(
                    "%f %i %i\n" %
                    (t * 2.0, key_offset + 67 + t + key_offset, 117))
                controlFile.write(
                    "%f %i %i\n" %
                    (t * 2.0, key_offset + 71 + t + key_offset, 117))

        return controlFilename
Exemplo n.º 29
0
def create_track_from_path(path):
    try:
        return track.track_from_filename(path)
    except util.EchoNestAPIError:
        print "Could not create track from file"
Exemplo n.º 30
0
def convert_one_song(audiofile,
                     output,
                     mbconnect=None,
                     verbose=0,
                     DESTROYAUDIO=False):
    """
    PRINCIPAL FUNCTION
    Converts one given audio file to hdf5 format (saved in 'output')
    by uploading it to The Echo Nest API
    INPUT
         audiofile   - path to a typical audio file (wav, mp3, ...)
            output   - nonexisting hdf5 path
         mbconnect   - if not None, open connection to musicbrainz server
           verbose   - if >0 display more information
      DESTROYAUDIO   - Careful! deletes audio file if everything went well
    RETURN
       1 if we think a song is created, 0 otherwise
    """
    # inputs + sanity checks
    if not os.path.exists(audiofile):
        print(('ERROR: song file does not exist:', songfile))
        return 0
    if os.path.exists(output):
        print(('ERROR: hdf5 output file already exist:', output,
               ', delete or choose new path'))
        return 0
    # get EN track / song / artist for that song
    if verbose > 0: print(('get analysis for file:', audiofile))
    track = trackEN.track_from_filename(audiofile)
    song_id = track.song_id
    song = songEN.Song(song_id)
    if verbose > 0: print(('found song:', song.title, '(', song_id, ')'))
    artist_id = song.artist_id
    artist = artistEN.Artist(artist_id)
    if verbose > 0: print(('found artist:', artist.name, '(', artist_id, ')'))
    # hack to fill missing values
    try:
        track.foreign_id
    except AttributeError:
        track.__setattr__('foreign_id', '')
        if verbose > 0: print('no track foreign_id found')
    try:
        track.foreign_release_id
    except AttributeError:
        track.__setattr__('foreign_release_id', '')
        if verbose > 0: print('no track foreign_release_id found')
    # create HDF5 file
    if verbose > 0: print(('create HDF5 file:', output))
    HDF5.create_song_file(output, force=False)
    # fill hdf5 file from track
    if verbose > 0:
        if mbconnect is None:
            print('fill HDF5 file with info from track/song/artist')
        else:
            print(
                'fill HDF5 file with info from track/song/artist/musicbrainz')
    h5 = HDF5.open_h5_file_append(output)
    HDF5.fill_hdf5_from_artist(h5, artist)
    HDF5.fill_hdf5_from_song(h5, song)
    HDF5.fill_hdf5_from_track(h5, track)
    if not mbconnect is None:
        HDF5.fill_hdf5_from_musicbrainz(h5, mbconnect)
    h5.close()
    # done
    if DESTROYAUDIO:
        if verbose > 0: print(('We remove audio file:', audiofile))
        os.remove(audiofile)
    return 1
Exemplo n.º 31
0
#!/usr/bin/env python

import sys
import os.path
import random
from pyechonest import config
config.ECHO_NEST_API_KEY = "CVBUZHWXOPIYE6ZRD"

from pyechonest import track

SONG = "TestSongs/We R Who We R/We R Who We R.mp3"

tinfo = track.track_from_filename(SONG)
if tinfo.danceability < 0.5:
	print "Warning: song might not be fun to dance to (danceability {0}%)".format(tinfo.danceability*100)
if tinfo.tempo_confidence < 0.5:
	print "Warning: song tempo might be incorrectly detected, steps might not be on the beat or may gradually become off-beat (confidence {0}%).".format(tinfo.tempo_confidence*100)

print "Dancability:", tinfo.danceability
dir(tinfo)
print "Title:", tinfo

class SMFile:
	def __init__(self, smout):
		self.smout = open(smout, 'w')
		self.smname = smout

	def whl(self, string):
		sys.stdout.write(string + ";\n")
		self.smout.write(string + ";\n")
Exemplo n.º 32
0
def get_track(filename):
    f = open (filename)
    t = track.track_from_filename(filename)
    return t
Exemplo n.º 33
0
 def get_echonest(self):
     return track_from_filename(self.filename)
Exemplo n.º 34
0
# Uncomment to set the API key explicitly. Otherwise Pyechonest will
# look in the ECHO_NEST_API_KEY environment variable for the key.
#from pyechonest import config
#config.ECHO_NEST_API_KEY='YOUR API KEY'

from pyechonest import track

c_major_file = track.track_from_filename('c_major.mp3', filetype='mp3')
c_major_md5 = track.track_from_md5(c_major_file.md5)
c_major_ra_md5 = track.track_from_reanalyzing_md5(c_major_file.md5)

for c_major in (c_major_md5, c_major_ra_md5):
    print 'track ID: %s' % c_major.id
    print 'segment pitches:'
    print('%8s    ' + '%-4s ' * 12) % ('start', 'C', 'C#', 'D', 'D#', 'E', 'F',
                                       'F#', 'G', 'G#', 'A', 'A#', 'B')
    for a_dict in c_major.segments:
        print('%8.2f    ' + '%4.2f ' * 12) % (
            (a_dict['start'], ) + tuple(a_dict['pitches']))
    print ''
Exemplo n.º 35
0
    try:
        artistObject= artist.Artist(urllib.unquote(artistString).decode('utf8'))
        images=artistObject.get_images()
        l=len(images)
        k=random.randint(0,l-1)
        return images[k]['url']
    except:
       return 'http://images.wikia.com/theslenderman/images/c/ce/Question-mark-face.jpg'


def getArtistBio(artistString):
    artistObject= artist.Artist(urllib.unquote(artistString).decode('utf8'))
    bios=artistObject.get_biographies()
    return bios[0]['text'].encode('ascii','ignore')

#print getSimilarArtists('skrillex')
mp3='Bassnectar.mp3'
pytrack = track.track_from_filename(mp3)
response=urllib2.urlopen(pytrack.analysis_url)
jsonOutput=json.loads(response.read())
for i in range(len(jsonOutput['sections'])):
    print jsonOutput['sections'][i]
    print ""
#print urllib2.get_data()
#print jsonOutput
pygame.mixer.init()
pygame.mixer.music.set_volume(0.7)
pygame.mixer.music.load(mp3)
pygame.mixer.music.play()

Exemplo n.º 36
0
import requests
import json
import os


mongo_url = 'https://api.mongolab.com/api/1/databases/echonest/collections/acapella2?apiKey=a4-xIcHUAl_iBb9iqoq8pClCmk2DplxI'
path = './audio'
config.ECHO_NEST_API_KEY="LKME7OQAVE5RXMYGG"

headers = {'content-type': 'application/json'}

for file_name in os.listdir(path):
	print "SONG " + file_name
	if file_name.endswith('.mp3'):
		file_path = './audio/' + file_name
		song = track.track_from_filename(file_path, force_upload=False)

		params = {}
		params['song_name'] = file_name
		params['id'] = song.id
		for segment in  song.segments:
			# print segment.keys()
			params['timbre'] = segment['timbre']
			params['pitches'] = segment['pitches']
			params['duration'] = segment['duration']
			params['loudness_start'] = segment['loudness_start']
			params['loudness_max_time'] = segment['loudness_max_time']
			params['loudness_max'] = segment['loudness_max']

			requests.post(mongo_url, data=json.dumps(params), headers= headers)
def aquireTrackInfo(filepath,loadedAudio):#If not meta data present then retrieve it. pyechonest comes in here
    print('Retrieving...')
    pytrack = track.track_from_filename(filepath)
    setTrackInfo(loadedAudio,pytrack,filepath)
Exemplo n.º 38
0
def convert_one_song(audiofile,output,mbconnect=None,verbose=0,DESTROYAUDIO=False):
    """
    PRINCIPAL FUNCTION
    Converts one given audio file to hdf5 format (saved in 'output')
    by uploading it to The Echo Nest API
    INPUT
         audiofile   - path to a typical audio file (wav, mp3, ...)
            output   - nonexisting hdf5 path
         mbconnect   - if not None, open connection to musicbrainz server
           verbose   - if >0 display more information
      DESTROYAUDIO   - Careful! deletes audio file if everything went well
    RETURN
       1 if we think a song is created, 0 otherwise
    """
    # inputs + sanity checks
    if not os.path.exists(audiofile):
        print('ERROR: song file does not exist:',songfile)
        return 0
    if os.path.exists(output):
        print('ERROR: hdf5 output file already exist:',output,', delete or choose new path')
        return 0
    # get EN track / song / artist for that song
    if verbose>0: print('get analysis for file:',audiofile)
    track = trackEN.track_from_filename(audiofile)
    song_id = track.song_id
    song = songEN.Song(song_id)
    if verbose>0: print('found song:',song.title,'(',song_id,')')
    artist_id = song.artist_id
    artist = artistEN.Artist(artist_id)
    if verbose>0: print('found artist:',artist.name,'(',artist_id,')')
    # hack to fill missing values
    try:
        track.foreign_id
    except AttributeError:
        track.__setattr__('foreign_id','')
        if verbose>0: print('no track foreign_id found')
    try:
        track.foreign_release_id
    except AttributeError:
        track.__setattr__('foreign_release_id','')
        if verbose>0: print('no track foreign_release_id found')
    # create HDF5 file
    if verbose>0: print('create HDF5 file:',output)
    HDF5.create_song_file(output,force=False)
    # fill hdf5 file from track
    if verbose>0:
        if mbconnect is None:
            print('fill HDF5 file with info from track/song/artist')
        else:
            print('fill HDF5 file with info from track/song/artist/musicbrainz')
    h5 = HDF5.open_h5_file_append(output)
    HDF5.fill_hdf5_from_artist(h5,artist)
    HDF5.fill_hdf5_from_song(h5,song)
    HDF5.fill_hdf5_from_track(h5,track)
    if not mbconnect is None:
        HDF5.fill_hdf5_from_musicbrainz(h5,mbconnect)
    h5.close()
    # done
    if DESTROYAUDIO:
        if verbose>0: print('We remove audio file:',audiofile)
        os.remove(audiofile)
    return 1
Exemplo n.º 39
0
for dirpath, dirnames, filenames in os.walk(path):
    for filename in [f for f in filenames if f.endswith(".mp3")]:
        files.append(os.path.join(dirpath, filename))

all_files = []
count = 0

for filename in files:
    metadata_reader = MetadataReader(filename=filename)
    metadata = metadata_reader()
    track_title = metadata[0]
    track_artist = metadata[1]
    track_year = metadata[6]
    while True:
        try:
            pytrack = echo_track.track_from_filename(filename)
            pytrack.get_analysis()
            break
        except:
            print "Error encountered"
            time.sleep(60)
    echonest_id = pytrack.id
    count += 1
    print "Files analyzed: ", count
    echonest_id = echonest_id.decode('utf-8')
    all_files.append({
        "title": track_title,
        "artist": track_artist,
        "year": track_year,
        "echonest_id": echonest_id
    })
# Uncomment to set the API key explicitly. Otherwise Pyechonest will
# look in the ECHO_NEST_API_KEY environment variable for the key.
#from pyechonest import config
#config.ECHO_NEST_API_KEY='YOUR API KEY'

from pyechonest import track

c_major_file = track.track_from_filename('c_major.mp3', filetype='mp3')
c_major_md5 = track.track_from_md5(c_major_file.md5)
c_major_ra_md5 = track.track_from_reanalyzing_md5(c_major_file.md5)

for c_major in (c_major_md5, c_major_ra_md5):
    print 'track ID: %s' % c_major.id
    print 'segment pitches:'
    print ('%8s    ' + '%-4s ' * 12) % ('start', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
    for a_dict in c_major.segments:
        print ('%8.2f    ' + '%4.2f ' * 12) % ((a_dict['start'], ) + tuple(a_dict['pitches']))
    print ''