Exemplo n.º 1
0
def uploadToEcho(filenames):
    retry=[]
    successTracks = []
    i=0
    if not os.path.isdir('echoNestUploads'):
        os.mkdir('echoNestUploads')
     
    for f in filenames:
        time = t.localtime()
        time = str(time.tm_hour) +'_' + str(time.tm_min) + '_' + str(time.tm_sec)
        try:
            print 'uploading...', f
            rTrack = track.track_from_file(open(f, 'r'), 'mp3')
            if rTrack:
                print rTrack.tempo
                print rTrack.time_signature
                print 'gather track info'
                trackInfo = [f, rTrack.tempo, rTrack.time_signature, rTrack.id]
                successTracks.append(trackInfo)
                pickle.dump(trackInfo, open('echoNestUploads/'+str(i)+'-'+time+'.pkl', 'w'))
                try:
                    print 'saving id3'
                    tag = d3.load(f).tag
                    tag.bpm = rTrack.tempo
                    tag.copyright_url = 'echo'
                except:
                    print 'id3 failed'
        except:
            print "couldn't upload"
            retry.append(f)
        i+=1

    return successTracks, retry
Exemplo n.º 2
0
def AnalyzeMP3File(file):
    f = open(file)
    theTrack = track.track_from_file(f, "mp3")
    theTrack.get_analysis()
    trackObj = models.trackInfo(theTrack, file)
    try:
        print theTrack.title
    except:
        print "unknown track"
    trackObj.save()
    f.close()
Exemplo n.º 3
0
def get_timbre(wav_dir):
    # from echonest capture the timbre and pitches loudness et.al.
    config.ECHO_NEST_API_KEY = "BPQ7TEP9JXXDVIXA5"  # daleloogn my api key
    f = open(wav_dir)
    print "process%s====================================================================" % wav_dir
    # t = track.track_from_file(f, 'wav')
    t = track.track_from_file(f, 'wav', 256, force_upload=True)
    # if not with force_upload it will timed out for sockets
    t.get_analysis()
    segments = t.segments  # list of dicts :timing,pitch,loudness and timbre for each segment
    timbre = from_segments_get_timbre(wav_dir, segments)
    return timbre
Exemplo n.º 4
0
def get_timbre_pitches_loudness(wav_dir):
    # from echonest capture the timbre and pitches loudness et.al.
    config.ECHO_NEST_API_KEY = "BPQ7TEP9JXXDVIXA5"  # daleloogn my api key
    f = open(wav_dir)
    print "process:============ %s =============" % wav_dir
    t = track.track_from_file(f, 'wav', 256, force_upload=True)
    t.get_analysis()
    segments = t.segments  # list of dicts :timing,pitch,loudness and timbre for each segment
    timbre_pitches_loudness = from_segments_get_timbre_pitch_etal(wav_dir, segments)
    timbre_pitches_loudness_file_txt = open('timbre_pitches_loudness_file.txt', 'a')
    timbre_pitches_loudness_file_txt.write(wav_dir + '\r\n')
    timbre_pitches_loudness_file_txt.write(str(timbre_pitches_loudness))
    timbre_pitches_loudness_file_txt.close()
    return segments
Exemplo n.º 5
0
def main():
    f = open("sonata.mp3")
    t = track.track_from_file(f, "mp3")
    print t.key
    print t.title
    t.get_analysis()
    #t.sections is a list of dicts which describes the larger sections of a song
    #print dir(t)
    print t.sections

    beatList = [(x['start'], x['start']+x['duration']) for x in t.beats]
    print beatList
    while myTime < 212:
      time.sleep(0.05)
      checkBeat(beatList)
Exemplo n.º 6
0
def fetch_en_annotations(filename):
    songname = filename.rstrip('.mp3')
    print songname, "to json annotation"
    # from pyechonest import artist
    # bk = artist.Artist('bikini kill')
    # print "Artists similar to: %s:" % (bk.name,)
    # for similar_artist in bk.similar: print "\t%s" % (similar_artist.name,)

    f = open(mypath + filename)
    t = track.track_from_file(f, 'mp3')
    outfile = songname + '.json'
    print t.analysis_url
    myurl = t.analysis_url

    annotation = subprocess.check_output(["curl", myurl])
    # ipdb.set_trace()

    open(songname + '.json', 'w').write(json.dumps(annotation))
    pickle.dump(t, open(songname + ".pickle", "w"))
Exemplo n.º 7
0
def fetch_en_annotations(filename):
    songname = filename.rstrip('.mp3')
    print songname, "to json annotation"
    # from pyechonest import artist
    # bk = artist.Artist('bikini kill')
    # print "Artists similar to: %s:" % (bk.name,)
    # for similar_artist in bk.similar: print "\t%s" % (similar_artist.name,)

    f = open(mypath + filename)
    t = track.track_from_file(f, 'mp3')
    outfile = songname + '.json'
    print t.analysis_url
    myurl = t.analysis_url

    annotation = subprocess.check_output(["curl", myurl])
    # ipdb.set_trace()

    open(songname+'.json', 'w').write(json.dumps(annotation))
    pickle.dump(t, open(songname + ".pickle", "w"))
Exemplo n.º 8
0
def songDetails (songName):
  global count
  print songName
  if re.search('\.[mM][pP]3$',songName):    #Checking if the file is a mp3 file or not
    startTime=time.time()
    f = open(musicPath+songName)
    t = track.track_from_file(f, 'mp3',30,False)
    endTime=time.time();
    print songName
    print t.id
    print t.artist
    print t.duration
    print t.title
    print "Search Time : "+str(endTime-startTime)
    print "----------------------"
    fo = open("foo.txt", "a")
    fo.write( songName+"\n")
    fo.write( t.title+"\n")
    fo.write( t.artist+"\n")
    fo.write( "------------\n")
    # Close opend file
    fo.close()
  count+=1
Exemplo n.º 9
0
Arquivo: views.py Projeto: jkal/sndidx
def upload(request):
    """
    Handle the upload of the query file and store it in Mongo.
    """
    f = request.FILES.get('files[]')
    k = request.POST.get('k', 1)

    # Send file to Echonest and request full analysis.
    # Only MP3 supported for now.
    # TODO: This is a bit unstable with large files.
    try:
        tr = track.track_from_file(f, 'mp3')
        tr.get_analysis()
    except EchoNestException:
        return HttpResponse(status=500)

    # Create object to store.
    q = {
        'id': tr.id,
        'md5': tr.md5,
        'bitrate': tr.bitrate,
        'codestring': tr.codestring,
        'duration': tr.duration,
        'energy': tr.energy,
        'segments': tr.segments,
    }

    queries.insert(q)
    redirect_to = reverse('results', args=[tr.md5]) + '?k=' + k

    response = JSONResponse([{
        'name': f.name,
        'url': redirect_to,
    }], {}, response_mimetype(request))
    response['Content-Disposition'] = 'inline; filename=file.json'

    return response
def get_timbre_pitches_loudness(wav_dir):
    turn_api()
    f = open(wav_dir)
    print "process%s====================================================================" % wav_dir
    # t = track.track_from_file(f, 'wav')
    t = track.track_from_file(f, 'wav', 256, force_upload=True)
    # if not with force_upload it will timed out for sockets
    t.get_analysis()
    segments = t.segments  # list of dicts :timing,pitch,loudness and timbre for each segment
    timbre_pitches_loudness = []
    # flag_test = 1
    for segments_item in segments:
        timbre = segments_item['timbre']
        pitches = segments_item['pitches']
        loudness_start = segments_item['loudness_start']
        loudness_max_time = segments_item['loudness_max_time']
        loudness_max = segments_item['loudness_max']
        segments_item_union = timbre + pitches + [loudness_start, loudness_max_time, loudness_max]
        timbre_pitches_loudness.append(segments_item_union)
    timbre_pitches_loudness_file_txt = open('timbre_pitches_loudness_file_txt', 'a')
    timbre_pitches_loudness_file_txt.write('\r\n' + wav_dir + '\r\n')
    timbre_pitches_loudness_file_txt.write(timbre_pitches_loudness)
    timbre_pitches_loudness_file_txt.close()
    return timbre_pitches_loudness
Exemplo n.º 11
0
def get_timbre_pitches_loudness(wav_dir):
    turn_api()
    f = open(wav_dir)
    print "process%s====================================================================" % wav_dir
    # t = track.track_from_file(f, 'wav')
    t = track.track_from_file(f, "wav", 256, force_upload=True)
    # if not with force_upload it will timed out for sockets
    t.get_analysis()
    segments = t.segments  # list of dicts :timing,pitch,loudness and timbre for each segment
    timbre_pitches_loudness = []
    # flag_test = 1
    for segments_item in segments:
        timbre = segments_item["timbre"]
        pitches = segments_item["pitches"]
        loudness_start = segments_item["loudness_start"]
        loudness_max_time = segments_item["loudness_max_time"]
        loudness_max = segments_item["loudness_max"]
        segments_item_union = timbre + pitches + [loudness_start, loudness_max_time, loudness_max]
        timbre_pitches_loudness.append(segments_item_union)
    timbre_pitches_loudness_file_txt = open("timbre_pitches_loudness_file.txt", "a")
    timbre_pitches_loudness_file_txt.write("\r\n" + wav_dir + "\r\n")
    timbre_pitches_loudness_file_txt.write(str(timbre_pitches_loudness))
    timbre_pitches_loudness_file_txt.close()
    return timbre_pitches_loudness
Exemplo n.º 12
0
                print e

        if not t:
            try:
                f = open(item.master.path)
                md5 = md5_for_file(f)
                log.debug("query by md5: %s" % md5)
                t = track.track_from_md5(md5)
            except EchoNestAPIError, e:
                print e

        if not t:
            try:
                log.debug("query by file: %s" % item.master.path)
                f = open(item.master.path)
                t = track.track_from_file(f, "mp3")
            except EchoNestAPIError, e:
                print e

        if t:
            item.echonest_id = t.id

            t.get_analysis()

            # print t
            print t.id
            print t.analysis_url
            print
            print "danceability:      %s" % t.danceability
            print "energy:            %s" % t.energy
            print "key:               %s" % t.key
Exemplo n.º 13
0
        if not t:
            try:
                f = open(item.master.path)
                md5 = md5_for_file(f);
                log.debug('query by md5: %s' % md5)
                t = track.track_from_md5(md5)
            except EchoNestAPIError, e:
                print e


        if not t:
            try:
                log.debug('query by file: %s' % item.master.path)
                f = open(item.master.path)
                t = track.track_from_file(f, 'mp3')
            except EchoNestAPIError, e:
                print e


        if t:
            item.echonest_id = t.id

            t.get_analysis()

            #print t
            print t.id
            print t.analysis_url
            print
            print 'danceability:      %s' % t.danceability
            print 'energy:            %s' % t.energy
Exemplo n.º 14
0
def get_attr(fp, pathstring, db):
    try:
        track = track_from_file(fp, 'mp3')
    except Exception:
        return

    # all initial values are set to None
    beatsavg = None
    beatsdev = None
    barsavg = None
    barsdev = None
    sectionsavg = None
    sectionsdev = None
    sectionscount = None
    segmentsavg = None
    segmentsdev = None
    tatumsavg = None
    tatumsdev = None
    tatumscount = None
    thisartist = None
    thisdanceability = None
    thisduration = None
    thisfadein = None
    thisenergy = None
    thiskey = None
    thiskeyconfidence = None
    thisliveness = None
    thisloudness = None
    thismode = None
    thismodeconfidence = None
    thisoffset = None
    thisspeechiness = None
    thisfadeout = None
    thistempo = None
    thistempoconfidence = None
    thistimesig = None
    thistimesigcon = None
    thistitle = None
    thisloudnessmaxaverage = None
    thisloudnessmaxdeviation = None
    thisloudnessmaxdifferential = None
    thisloudnessmaxtimeaverage = None
    thisloudnessmaxtimedeviation = None
    thisloudnessmaxtimedifferential = None
    thisloudnessstartaverage = None
    thisloudnessstartdeviation = None
    thisloudnessstartdifferential = None

    thistimbreaverage = [None, None, None, None, None, None, None, None, None, None, None, None]
    thistimbredev = [None, None, None, None, None, None, None, None, None, None, None, None]
    thistimbrediff = [None, None, None, None, None, None, None, None, None, None, None, None]
    thispitchaverage = [None, None, None, None, None, None, None, None, None, None, None, None]
    thispitchdev = [None, None, None, None, None, None, None, None, None, None, None, None]
    thispitchdiff = [None, None, None, None, None, None, None, None, None, None, None, None]
    thispitchratioa = [None, None, None, None, None, None, None, None, None, None, None, None]
    thispitchratiob = [None, None, None, None, None, None, None, None, None, None, None, None]

    # features present in the track information are placed in the corresponding variable holders
    ## many of these call functions to process the data further
    if hasattr(track, 'beats') and len(track.beats) > 0:
        beatsavg = get_average(track.beats, 'duration')
        beatsdev = get_deviation(track.beats, 'duration', beatsavg)
    if hasattr(track, 'bars') and len(track.bars) > 0:
        barsavg = get_average(track.bars, 'duration')
        barsdev = get_deviation(track.bars, 'duration', barsavg)
    if hasattr(track, 'sections') and len(track.sections) > 0:
        sectionsavg = get_average(track.sections, 'duration')
        sectionsdev = get_deviation(track.sections, 'duration', sectionsavg)
        sectionscount = len(track.sections)
    if hasattr(track, 'segments') and len(track.segments) > 0:
        segmentsavg = get_average(track.segments, 'duration')
        segmentsdev = get_deviation(track.segments, 'duration', segmentsavg)
    if hasattr(track, 'tatums') and len(track.tatums) > 0:
        tatumsavg = get_average(track.tatums, 'duration')
        tatumsdev = get_deviation(track.tatums, 'duration', tatumsavg)
        tatumscount = len(track.tatums)

    if hasattr(track, 'artist'):
        thisartist = str(track.artist)
    if hasattr(track, 'danceability'):
        thisdanceability = track.danceability
    if hasattr(track, 'duration'):
        thisduration = track.duration
    if hasattr(track, 'end_of_fade_in'):
        thisfadein = track.end_of_fade_in
    if hasattr(track, 'energy'):
        thisenergy = track.energy
    if hasattr(track, 'key'):
        thiskey = track.key
    if hasattr(track, 'key_confidence'):
        thiskeyconfidence = track.key_confidence
    if hasattr(track, 'liveness'):
        thisliveness = track.liveness
    if hasattr(track, 'loudness'):
        thisloudness = track.loudness
    if hasattr(track, 'mode'):
        thismode = track.mode
    if hasattr(track, 'mode_confidence'):
        thismodeconfidence = track.mode_confidence
    if hasattr(track, 'offset_seconds'):
        thisoffset = track.offset_seconds
    if hasattr(track, 'speechiness'):
        thisspeechiness = track.speechiness
    if hasattr(track, 'start_of_fade_out'):
        thisfadeout = track.start_of_fade_out
    if hasattr(track, 'tempo'):
        thistempo = track.tempo
    if hasattr(track, 'tempo_confidence'):
        thistempoconfidence = track.tempo_confidence
    if hasattr(track, 'time_signature'):
        thistimesig = track.time_signature
    if hasattr(track, 'time_signature_confidence'):
        thistimesigcon = track.time_signature_confidence
    if hasattr(track, 'title'):
        thistitle = str(track)

    # segment data (individual notes) are extracted and processed into aggregate values for the track
    if hasattr(track, 'segments') and len(track.segments) != 0:
        loudness_maxarray = []
        loudness_max_timearray = []
        loudness_startarray = []
        timbrearrays = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
        pitcharrays = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]

        for k in track.segments:
            loudness_maxarray.append(k['loudness_max'])
            loudness_max_timearray.append(k['loudness_max_time'])
            loudness_startarray.append(k['loudness_start'])
            
            i = 0
            while i < 12:
                timbrearrays[i].append(k['timbre'][i])
                i += 1
            i = 0
            while i < 12:
                pitcharrays[i].append(k['pitches'][i])
                i += 1

        avg = get_aver(loudness_maxarray)
        thisloudnessmaxaverage = avg
        thisloudnessmaxdeviation = get_devi(loudness_maxarray, avg)
        thisloudnessmaxdifferential = get_differential(loudness_maxarray)
        
        avg = get_aver(loudness_max_timearray)
        thisloudnessmaxtimeaverage = avg
        thisloudnessmaxtimedeviation = get_devi(loudness_max_timearray, avg)
        thisloudnessmaxtimedifferential =  get_differential(loudness_max_timearray)
        
        avg = get_aver(loudness_startarray)
        thisloudnessstartaverage = avg
        thisloudnessstartdeviation = get_devi(loudness_startarray, avg)
        thisloudnessstartdifferential = get_differential(loudness_startarray)
        
        i = 0
        while i < 12:
            avg = get_aver(timbrearrays[i])
            thistimbreaverage[i] = avg
            thistimbredev[i] = get_devi(timbrearrays[i], avg)
            thistimbrediff[i] = get_differential(timbrearrays[i])
            avg = get_aver(pitcharrays[i])
            thispitchaverage[i] = avg
            thispitchdev[i] = get_devi(pitcharrays[i], avg)
            thispitchdiff[i] = get_differential(pitcharrays[i])
            thispitchratioa[i] = get_ratio(pitcharrays[i], .5)
            thispitchratiob[i] = get_ratio(pitcharrays[i], 1)
            i += 1
        

    # a dictionary is created with all the features present, which can then be read into a DB
    song = { commonPath: pathstring,
             commonTitle: thistitle,
             commonArtist: thisartist,
             songBeatAverage.name: beatsavg,
             songBeatDeviation.name: beatsdev,
             songBarsAverage.name: barsavg,
             songBarsDeviation.name: barsdev,
             songDanceability.name: thisdanceability,
             songDuration.name: thisduration,
             songEndOfFadeIn.name: thisfadein,
             songEnergy.name: thisenergy,
             songKey.name: thiskey,
             songKeyConfidence.name: thiskeyconfidence,
             songLiveness.name: thisliveness,
             songLoudness.name: thisloudness,
             songMode.name: thismode,
             songModeConfidence.name: thismodeconfidence,
             songOffsetSeconds.name: thisoffset,
             songSectionsAverage.name: sectionsavg,
             songSectionsDeviation.name: sectionsdev,
             songSectionsCount.name: sectionscount,
             songSpeechiness.name: thisspeechiness,
             songStartOfFadeOut.name: thisfadeout,
             songTatumsAverage.name: tatumsavg,
             songTatumsDeviation.name: tatumsdev,
             songTatumsCount.name: tatumscount,
             songTempo.name: thistempo,
             songTempoConfidence.name: thistempoconfidence,
             songTimeSignature.name: thistimesig,
             songTimeSignatureConfidence.name: thistimesigcon,
             songLoudnessMaxAverage.name: thisloudnessmaxaverage,
             songLoudnessMaxDeviation.name: thisloudnessmaxdeviation,
             songLoudnessMaxDifferential.name: thisloudnessmaxdifferential,
             songLoudnessMaxTimeAverage.name: thisloudnessmaxtimeaverage,
             songLoudnessMaxTimeDeviation.name: thisloudnessmaxtimedeviation,
             songLoudnessMaxTimeDifferential.name: thisloudnessmaxtimedifferential,
             songLoudnessStartAverage.name: thisloudnessstartaverage,
             songLoudnessStartDeviation.name: thisloudnessstartdeviation,
             songLoudnessStartDifferential.name: thisloudnessstartdifferential,
             songTimbre1Average.name: thistimbreaverage[0],
             songTimbre1Dev.name: thistimbredev[0],
             songTimbre1Diff.name: thistimbrediff[0],
             songTimbre2Average.name: thistimbreaverage[1],
             songTimbre2Dev.name: thistimbredev[1],
             songTimbre2Diff.name: thistimbrediff[1],
             songTimbre3Average.name: thistimbreaverage[2],
             songTimbre3Dev.name: thistimbredev[2],
             songTimbre3Diff.name: thistimbrediff[2],
             songTimbre4Average.name: thistimbreaverage[3],
             songTimbre4Dev.name: thistimbredev[3],
             songTimbre4Diff.name: thistimbrediff[3],
             songTimbre5Average.name: thistimbreaverage[4],
             songTimbre5Dev.name: thistimbredev[4],
             songTimbre5Diff.name: thistimbrediff[4],
             songTimbre6Average.name: thistimbreaverage[5],
             songTimbre6Dev.name: thistimbredev[5],
             songTimbre6Diff.name: thistimbrediff[5],
             songTimbre7Average.name: thistimbreaverage[6],
             songTimbre7Dev.name: thistimbredev[6],
             songTimbre7Diff.name: thistimbrediff[6],
             songTimbre8Average.name: thistimbreaverage[7],
             songTimbre8Dev.name: thistimbredev[7],
             songTimbre8Diff.name: thistimbrediff[7],
             songTimbre9Average.name: thistimbreaverage[8],
             songTimbre9Dev.name: thistimbredev[8],
             songTimbre9Diff.name: thistimbrediff[8],
             songTimbre10Average.name: thistimbreaverage[9],
             songTimbre10Dev.name: thistimbredev[9],
             songTimbre10Diff.name: thistimbrediff[9],
             songTimbre11Average.name: thistimbreaverage[10],
             songTimbre11Dev.name: thistimbredev[10],
             songTimbre11Diff.name: thistimbrediff[10],
             songTimbre12Average.name: thistimbreaverage[11],
             songTimbre12Dev.name: thistimbredev[11],
             songTimbre12Diff.name: thistimbrediff[11],
             songPitch1Average.name: thispitchaverage[0],
             songPitch1Dev.name: thispitchdev[0],
             songPitch1Diff.name: thispitchdiff[0],
             songPitch2Average.name: thispitchaverage[1],
             songPitch2Dev.name: thispitchdev[1],
             songPitch2Diff.name: thispitchdiff[1],
             songPitch3Average.name: thispitchaverage[2],
             songPitch3Dev.name: thispitchdev[2],
             songPitch3Diff.name: thispitchdiff[2],
             songPitch4Average.name: thispitchaverage[3],
             songPitch4Dev.name: thispitchdev[3],
             songPitch4Diff.name: thispitchdiff[3],
             songPitch5Average.name: thispitchaverage[4],
             songPitch5Dev.name: thispitchdev[4],
             songPitch5Diff.name: thispitchdiff[4],
             songPitch6Average.name: thispitchaverage[5],
             songPitch6Dev.name: thispitchdev[5],
             songPitch6Diff.name: thispitchdiff[5],
             songPitch7Average.name: thispitchaverage[6],
             songPitch7Dev.name: thispitchdev[6],
             songPitch7Diff.name: thispitchdiff[6],
             songPitch8Average.name: thispitchaverage[7],
             songPitch8Dev.name: thispitchdev[7],
             songPitch8Diff.name: thispitchdiff[7],
             songPitch9Average.name: thispitchaverage[8],
             songPitch9Dev.name: thispitchdev[8],
             songPitch9Diff.name: thispitchdiff[8],
             songPitch10Average.name: thispitchaverage[9],
             songPitch10Dev.name: thispitchdev[9],
             songPitch10Diff.name: thispitchdiff[9],
             songPitch11Average.name: thispitchaverage[10],
             songPitch11Dev.name: thispitchdev[10],
             songPitch11Diff.name: thispitchdiff[10],
             songPitch12Average.name: thispitchaverage[11],
             songPitch12Dev.name: thispitchdev[11],
             songPitch12Diff.name: thispitchdiff[11],
             songPitch1Ratioa.name: thispitchratioa[0],
             songPitch2Ratioa.name: thispitchratioa[1],
             songPitch3Ratioa.name: thispitchratioa[2],
             songPitch4Ratioa.name: thispitchratioa[3],
             songPitch5Ratioa.name: thispitchratioa[4],
             songPitch6Ratioa.name: thispitchratioa[5],
             songPitch7Ratioa.name: thispitchratioa[6],
             songPitch8Ratioa.name: thispitchratioa[7],
             songPitch9Ratioa.name: thispitchratioa[8],
             songPitch10Ratioa.name: thispitchratioa[9],
             songPitch11Ratioa.name: thispitchratioa[10],
             songPitch12Ratioa.name: thispitchratioa[11],
             songPitch1Ratiob.name: thispitchratiob[0],
             songPitch2Ratiob.name: thispitchratiob[1],
             songPitch3Ratiob.name: thispitchratiob[2],
             songPitch4Ratiob.name: thispitchratiob[3],
             songPitch5Ratiob.name: thispitchratiob[4],
             songPitch6Ratiob.name: thispitchratiob[5],
             songPitch7Ratiob.name: thispitchratiob[6],
             songPitch8Ratiob.name: thispitchratiob[7],
             songPitch9Ratiob.name: thispitchratiob[8],
             songPitch10Ratiob.name: thispitchratiob[9],
             songPitch11Ratiob.name: thispitchratiob[10],
             songPitch12Ratiob.name: thispitchratiob[11]}

 
    ## calls a function to place these attributes in the DB
    db.add_song(song)
Exemplo n.º 15
0
def get_attr(fp, pathstring, db):
    try:
        track = track_from_file(fp, 'mp3')
    except Exception:
        return

    # all initial values are set to None
    beatsavg = None
    beatsdev = None
    barsavg = None
    barsdev = None
    sectionsavg = None
    sectionsdev = None
    sectionscount = None
    segmentsavg = None
    segmentsdev = None
    tatumsavg = None
    tatumsdev = None
    tatumscount = None
    thisartist = None
    thisdanceability = None
    thisduration = None
    thisfadein = None
    thisenergy = None
    thiskey = None
    thiskeyconfidence = None
    thisliveness = None
    thisloudness = None
    thismode = None
    thismodeconfidence = None
    thisoffset = None
    thisspeechiness = None
    thisfadeout = None
    thistempo = None
    thistempoconfidence = None
    thistimesig = None
    thistimesigcon = None
    thistitle = None
    thisloudnessmaxaverage = None
    thisloudnessmaxdeviation = None
    thisloudnessmaxdifferential = None
    thisloudnessmaxtimeaverage = None
    thisloudnessmaxtimedeviation = None
    thisloudnessmaxtimedifferential = None
    thisloudnessstartaverage = None
    thisloudnessstartdeviation = None
    thisloudnessstartdifferential = None

    thistimbreaverage = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thistimbredev = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thistimbrediff = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thispitchaverage = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thispitchdev = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thispitchdiff = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thispitchratioa = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]
    thispitchratiob = [
        None, None, None, None, None, None, None, None, None, None, None, None
    ]

    # features present in the track information are placed in the corresponding variable holders
    ## many of these call functions to process the data further
    if hasattr(track, 'beats') and len(track.beats) > 0:
        beatsavg = get_average(track.beats, 'duration')
        beatsdev = get_deviation(track.beats, 'duration', beatsavg)
    if hasattr(track, 'bars') and len(track.bars) > 0:
        barsavg = get_average(track.bars, 'duration')
        barsdev = get_deviation(track.bars, 'duration', barsavg)
    if hasattr(track, 'sections') and len(track.sections) > 0:
        sectionsavg = get_average(track.sections, 'duration')
        sectionsdev = get_deviation(track.sections, 'duration', sectionsavg)
        sectionscount = len(track.sections)
    if hasattr(track, 'segments') and len(track.segments) > 0:
        segmentsavg = get_average(track.segments, 'duration')
        segmentsdev = get_deviation(track.segments, 'duration', segmentsavg)
    if hasattr(track, 'tatums') and len(track.tatums) > 0:
        tatumsavg = get_average(track.tatums, 'duration')
        tatumsdev = get_deviation(track.tatums, 'duration', tatumsavg)
        tatumscount = len(track.tatums)

    if hasattr(track, 'artist'):
        thisartist = str(track.artist)
    if hasattr(track, 'danceability'):
        thisdanceability = track.danceability
    if hasattr(track, 'duration'):
        thisduration = track.duration
    if hasattr(track, 'end_of_fade_in'):
        thisfadein = track.end_of_fade_in
    if hasattr(track, 'energy'):
        thisenergy = track.energy
    if hasattr(track, 'key'):
        thiskey = track.key
    if hasattr(track, 'key_confidence'):
        thiskeyconfidence = track.key_confidence
    if hasattr(track, 'liveness'):
        thisliveness = track.liveness
    if hasattr(track, 'loudness'):
        thisloudness = track.loudness
    if hasattr(track, 'mode'):
        thismode = track.mode
    if hasattr(track, 'mode_confidence'):
        thismodeconfidence = track.mode_confidence
    if hasattr(track, 'offset_seconds'):
        thisoffset = track.offset_seconds
    if hasattr(track, 'speechiness'):
        thisspeechiness = track.speechiness
    if hasattr(track, 'start_of_fade_out'):
        thisfadeout = track.start_of_fade_out
    if hasattr(track, 'tempo'):
        thistempo = track.tempo
    if hasattr(track, 'tempo_confidence'):
        thistempoconfidence = track.tempo_confidence
    if hasattr(track, 'time_signature'):
        thistimesig = track.time_signature
    if hasattr(track, 'time_signature_confidence'):
        thistimesigcon = track.time_signature_confidence
    if hasattr(track, 'title'):
        thistitle = str(track)

    # segment data (individual notes) are extracted and processed into aggregate values for the track
    if hasattr(track, 'segments') and len(track.segments) != 0:
        loudness_maxarray = []
        loudness_max_timearray = []
        loudness_startarray = []
        timbrearrays = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]
        pitcharrays = [],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]

        for k in track.segments:
            loudness_maxarray.append(k['loudness_max'])
            loudness_max_timearray.append(k['loudness_max_time'])
            loudness_startarray.append(k['loudness_start'])

            i = 0
            while i < 12:
                timbrearrays[i].append(k['timbre'][i])
                i += 1
            i = 0
            while i < 12:
                pitcharrays[i].append(k['pitches'][i])
                i += 1

        avg = get_aver(loudness_maxarray)
        thisloudnessmaxaverage = avg
        thisloudnessmaxdeviation = get_devi(loudness_maxarray, avg)
        thisloudnessmaxdifferential = get_differential(loudness_maxarray)

        avg = get_aver(loudness_max_timearray)
        thisloudnessmaxtimeaverage = avg
        thisloudnessmaxtimedeviation = get_devi(loudness_max_timearray, avg)
        thisloudnessmaxtimedifferential = get_differential(
            loudness_max_timearray)

        avg = get_aver(loudness_startarray)
        thisloudnessstartaverage = avg
        thisloudnessstartdeviation = get_devi(loudness_startarray, avg)
        thisloudnessstartdifferential = get_differential(loudness_startarray)

        i = 0
        while i < 12:
            avg = get_aver(timbrearrays[i])
            thistimbreaverage[i] = avg
            thistimbredev[i] = get_devi(timbrearrays[i], avg)
            thistimbrediff[i] = get_differential(timbrearrays[i])
            avg = get_aver(pitcharrays[i])
            thispitchaverage[i] = avg
            thispitchdev[i] = get_devi(pitcharrays[i], avg)
            thispitchdiff[i] = get_differential(pitcharrays[i])
            thispitchratioa[i] = get_ratio(pitcharrays[i], .5)
            thispitchratiob[i] = get_ratio(pitcharrays[i], 1)
            i += 1

    # a dictionary is created with all the features present, which can then be read into a DB
    song = {
        commonPath: pathstring,
        commonTitle: thistitle,
        commonArtist: thisartist,
        songBeatAverage.name: beatsavg,
        songBeatDeviation.name: beatsdev,
        songBarsAverage.name: barsavg,
        songBarsDeviation.name: barsdev,
        songDanceability.name: thisdanceability,
        songDuration.name: thisduration,
        songEndOfFadeIn.name: thisfadein,
        songEnergy.name: thisenergy,
        songKey.name: thiskey,
        songKeyConfidence.name: thiskeyconfidence,
        songLiveness.name: thisliveness,
        songLoudness.name: thisloudness,
        songMode.name: thismode,
        songModeConfidence.name: thismodeconfidence,
        songOffsetSeconds.name: thisoffset,
        songSectionsAverage.name: sectionsavg,
        songSectionsDeviation.name: sectionsdev,
        songSectionsCount.name: sectionscount,
        songSpeechiness.name: thisspeechiness,
        songStartOfFadeOut.name: thisfadeout,
        songTatumsAverage.name: tatumsavg,
        songTatumsDeviation.name: tatumsdev,
        songTatumsCount.name: tatumscount,
        songTempo.name: thistempo,
        songTempoConfidence.name: thistempoconfidence,
        songTimeSignature.name: thistimesig,
        songTimeSignatureConfidence.name: thistimesigcon,
        songLoudnessMaxAverage.name: thisloudnessmaxaverage,
        songLoudnessMaxDeviation.name: thisloudnessmaxdeviation,
        songLoudnessMaxDifferential.name: thisloudnessmaxdifferential,
        songLoudnessMaxTimeAverage.name: thisloudnessmaxtimeaverage,
        songLoudnessMaxTimeDeviation.name: thisloudnessmaxtimedeviation,
        songLoudnessMaxTimeDifferential.name: thisloudnessmaxtimedifferential,
        songLoudnessStartAverage.name: thisloudnessstartaverage,
        songLoudnessStartDeviation.name: thisloudnessstartdeviation,
        songLoudnessStartDifferential.name: thisloudnessstartdifferential,
        songTimbre1Average.name: thistimbreaverage[0],
        songTimbre1Dev.name: thistimbredev[0],
        songTimbre1Diff.name: thistimbrediff[0],
        songTimbre2Average.name: thistimbreaverage[1],
        songTimbre2Dev.name: thistimbredev[1],
        songTimbre2Diff.name: thistimbrediff[1],
        songTimbre3Average.name: thistimbreaverage[2],
        songTimbre3Dev.name: thistimbredev[2],
        songTimbre3Diff.name: thistimbrediff[2],
        songTimbre4Average.name: thistimbreaverage[3],
        songTimbre4Dev.name: thistimbredev[3],
        songTimbre4Diff.name: thistimbrediff[3],
        songTimbre5Average.name: thistimbreaverage[4],
        songTimbre5Dev.name: thistimbredev[4],
        songTimbre5Diff.name: thistimbrediff[4],
        songTimbre6Average.name: thistimbreaverage[5],
        songTimbre6Dev.name: thistimbredev[5],
        songTimbre6Diff.name: thistimbrediff[5],
        songTimbre7Average.name: thistimbreaverage[6],
        songTimbre7Dev.name: thistimbredev[6],
        songTimbre7Diff.name: thistimbrediff[6],
        songTimbre8Average.name: thistimbreaverage[7],
        songTimbre8Dev.name: thistimbredev[7],
        songTimbre8Diff.name: thistimbrediff[7],
        songTimbre9Average.name: thistimbreaverage[8],
        songTimbre9Dev.name: thistimbredev[8],
        songTimbre9Diff.name: thistimbrediff[8],
        songTimbre10Average.name: thistimbreaverage[9],
        songTimbre10Dev.name: thistimbredev[9],
        songTimbre10Diff.name: thistimbrediff[9],
        songTimbre11Average.name: thistimbreaverage[10],
        songTimbre11Dev.name: thistimbredev[10],
        songTimbre11Diff.name: thistimbrediff[10],
        songTimbre12Average.name: thistimbreaverage[11],
        songTimbre12Dev.name: thistimbredev[11],
        songTimbre12Diff.name: thistimbrediff[11],
        songPitch1Average.name: thispitchaverage[0],
        songPitch1Dev.name: thispitchdev[0],
        songPitch1Diff.name: thispitchdiff[0],
        songPitch2Average.name: thispitchaverage[1],
        songPitch2Dev.name: thispitchdev[1],
        songPitch2Diff.name: thispitchdiff[1],
        songPitch3Average.name: thispitchaverage[2],
        songPitch3Dev.name: thispitchdev[2],
        songPitch3Diff.name: thispitchdiff[2],
        songPitch4Average.name: thispitchaverage[3],
        songPitch4Dev.name: thispitchdev[3],
        songPitch4Diff.name: thispitchdiff[3],
        songPitch5Average.name: thispitchaverage[4],
        songPitch5Dev.name: thispitchdev[4],
        songPitch5Diff.name: thispitchdiff[4],
        songPitch6Average.name: thispitchaverage[5],
        songPitch6Dev.name: thispitchdev[5],
        songPitch6Diff.name: thispitchdiff[5],
        songPitch7Average.name: thispitchaverage[6],
        songPitch7Dev.name: thispitchdev[6],
        songPitch7Diff.name: thispitchdiff[6],
        songPitch8Average.name: thispitchaverage[7],
        songPitch8Dev.name: thispitchdev[7],
        songPitch8Diff.name: thispitchdiff[7],
        songPitch9Average.name: thispitchaverage[8],
        songPitch9Dev.name: thispitchdev[8],
        songPitch9Diff.name: thispitchdiff[8],
        songPitch10Average.name: thispitchaverage[9],
        songPitch10Dev.name: thispitchdev[9],
        songPitch10Diff.name: thispitchdiff[9],
        songPitch11Average.name: thispitchaverage[10],
        songPitch11Dev.name: thispitchdev[10],
        songPitch11Diff.name: thispitchdiff[10],
        songPitch12Average.name: thispitchaverage[11],
        songPitch12Dev.name: thispitchdev[11],
        songPitch12Diff.name: thispitchdiff[11],
        songPitch1Ratioa.name: thispitchratioa[0],
        songPitch2Ratioa.name: thispitchratioa[1],
        songPitch3Ratioa.name: thispitchratioa[2],
        songPitch4Ratioa.name: thispitchratioa[3],
        songPitch5Ratioa.name: thispitchratioa[4],
        songPitch6Ratioa.name: thispitchratioa[5],
        songPitch7Ratioa.name: thispitchratioa[6],
        songPitch8Ratioa.name: thispitchratioa[7],
        songPitch9Ratioa.name: thispitchratioa[8],
        songPitch10Ratioa.name: thispitchratioa[9],
        songPitch11Ratioa.name: thispitchratioa[10],
        songPitch12Ratioa.name: thispitchratioa[11],
        songPitch1Ratiob.name: thispitchratiob[0],
        songPitch2Ratiob.name: thispitchratiob[1],
        songPitch3Ratiob.name: thispitchratiob[2],
        songPitch4Ratiob.name: thispitchratiob[3],
        songPitch5Ratiob.name: thispitchratiob[4],
        songPitch6Ratiob.name: thispitchratiob[5],
        songPitch7Ratiob.name: thispitchratiob[6],
        songPitch8Ratiob.name: thispitchratiob[7],
        songPitch9Ratiob.name: thispitchratiob[8],
        songPitch10Ratiob.name: thispitchratiob[9],
        songPitch11Ratiob.name: thispitchratiob[10],
        songPitch12Ratiob.name: thispitchratiob[11]
    }

    ## calls a function to place these attributes in the DB
    db.add_song(song)
    def run(self):

        log.info('maintenance walker')
        log.info('action: %s' % self.action)

        if self.action == 'check_media':

            from alibrary.models import Media


            if self.id:
                items = Media.objects.filter(id=self.id)
            else:
                items = Media.objects.filter()[0:self.limit]

            for item in items:

                delete = False

                if not item.master:
                    log.info('no master for: %s' % item)
                    if self.delete_missing:
                        log.info('delete item: %s' % item)
                        delete = True

                else:
                    log.debug('got master for: %s' % item)
                    log.debug('path: %s' % item.master.path)
                    #print item.master.path

                    if os.path.isfile(item.master.path):
                        size = b = os.path.getsize(item.master.path)
                        log.debug('filesize: %s' % size)
                        if size < 10:
                            log.debug('size too small or zero > delete: %s' % size)

                    else:
                        log.debug('file does not exist')
                        delete = True

                if delete and self.delete_missing:
                    log.info('delete item: %s' % item)
                    item.delete()

                if not delete:
                    item.status = 1
                    item.save()


        if self.action == 'echonest_media':

            from alibrary.models import Media

            if self.id:
                items = Media.objects.filter(id=self.id)
            else:
                items = Media.objects.filter(echonest_id=None)[0:self.limit]

            for item in items:
                log.info('analyze: %s' % item)
                item.echonest_analyze()


        if self.action == 'clean_playlists':

            from alibrary.models import PlaylistItem
            from alibrary.models import Media

            items = PlaylistItem.objects.all()

            for item in items:
                log.info('clean: %s' % item.pk)
                if not item.content_object:
                    log.info('no content object > delete: %s' % item.pk)
                    item.delete()
                # m = Media.objects.get(pk=item.)


        if self.action == 'self_check_playlists':

            from alibrary.models.playlistmodels import self_check_playlists

            # reset
            # ps = Playlist.objects.all()
            # ps.update(status=1)

            self_check_playlists()


        if self.action == 'degrade_playlists':
            from alibrary.models.playlistmodels import Playlist
            ps = Playlist.objects.filter(type='broadcast').exclude(status=1)

            ps.update(type='playlist', status=1)




        if self.action == 'map_tags':

            from alibrary.models import Media


            if self.id:
                items = Media.objects.filter(id=self.id)
            else:
                items = Media.objects.filter()[0:self.limit]

            for item in items:

                if item.tags.count() < 1:
                    print item
                    print 'map release tags'
                    if item.release and item.release.tags.count() > 0:
                        item.tags = item.release.tags
                        item.save()




        if self.action == 'echonest_media__':

            from alibrary.models import Media
            from pyechonest.util import EchoNestAPIError
            from pyechonest import track
            from pyechonest import config as echonest_config
            echonest_config.ECHO_NEST_API_KEY=ECHONEST_API_KEY

            if self.id:
                items = Media.objects.filter(id=self.id)
            else:
                items = Media.objects.filter(status=0)[0:self.limit]

            for item in items:
                log.info('analyze: %s' % item)




                #md5 = '96fa0180d225f14e9f8cbfffbf5eb81d'

                t = None

                if item.echonest_id:
                    try:
                        log.debug('query by echonest id: %s' % item.echonest_id)
                        t = track.track_from_id(item.echonest_id)
                    except EchoNestAPIError, e:
                        print e

                if not t:
                    try:
                        f = open(item.master.path)
                        md5 = md5_for_file(f);
                        log.debug('query by md5: %s' % md5)
                        t = track.track_from_md5(md5)
                    except EchoNestAPIError, e:
                        print e


                if not t:
                    try:
                        log.debug('query by file: %s' % item.master.path)
                        f = open(item.master.path)
                        t = track.track_from_file(f, 'mp3')
                    except EchoNestAPIError, e:
                        print e
Exemplo n.º 17
0
from pyechonest import artist
from pyechonest import track

midifilename = sys.argv[1]
audiofilename = sys.argv[2]

try:
    config.ECHO_NEST_API_KEY = sys.argv[3]
except:
    config.ECHO_NEST_API_KEY = raw_input('Enter your Echo Nest API key: ')

midifile=midi.read_midifile(midifilename)
audiofile=file(audiofilename)
 

echoTrack = track.track_from_file(audiofile,audiofilename[-3:])
audioSegments = echoTrack.segments
audioSegments = sorted(audioSegments,key=lambda x:x['start'])

events = reduce(lambda x,y :x+y,midifile)
events = sorted(events,key=lambda x: x.msdelay)



t = 0
audioSegments_ix = 0
events_ix=0


while True:
    try:
Exemplo n.º 18
0
    except OSError, e:
        print e
        sys.exit()

    print "Recursively gathering all filenames..."
    file_list = get_file_list(dir_list, dir_name)

    for filename in file_list:
        print "Opening file:", filename
        dir_path, file_name = os.path.split(filename)
        if len(file_name) <= 8:
            ext = filename.split('.')[-1]
            f = open(filename)
            print "Getting track from Echo Nest..."
            try:
                music = track.track_from_file(f, ext)
            except socket.error, e:
                print 'Socket Error:', e
                print 'Sleeping...'
                time.sleep(60)
                print 'Continuing...'
                continue
            except echo_util.EchoNestAPIError, e:
                print 'API Error:', e
                print 'Sleeping...'
                time.sleep(60)
                print 'Continuing...'
                continue
            if hasattr(music, 'title'):
                new_title = music.title.replace(' ', '')
                new_filename = '%s/%s.%s' % (dir_path, new_title, ext)
Exemplo n.º 19
0
import pyechonest
from pyechonest import config
from pyechonest.track import track_from_file
import pickle
import numpy as np
import pylab as pl
import os

api_key = "DMSARFFYEZKLFL9LE"

config.ECHO_NEST_API_KEY = api_key

fpath = "02-Heir-Apparent.mp3"

if not os.path.exists("track.p"):
    with open(fpath, 'rb') as fin:
        track = track_from_file(fin, 'mp3')
    pickle.dump(track, open('track.p', 'wb'))
else:
    track = pickle.load(open('track.p', 'rb'))

t = [seg['start'] for seg in track.segments]
loud = [seg['loudness_max'] for seg in track.segments]

pl.plot(t, loud)
#pl.xticks(t,np.arange(len(t)))
pl.xlabel("Time Segment")
pl.ylabel("Loudness")
pl.show()
Exemplo n.º 20
0
def get_beats(filename):
    audio_file = open(filename)
    track_analysis = track.track_from_file(audio_file, 'mp3')
    return track_analysis
Exemplo n.º 21
0
import pyechonest 
from pyechonest import config
from pyechonest.track import track_from_file
import pickle
import numpy as np
import pylab as pl
import os


api_key = "DMSARFFYEZKLFL9LE"

config.ECHO_NEST_API_KEY = api_key

fpath = "02-Heir-Apparent.mp3"

if not os.path.exists("track.p") : 
	with open(fpath,'rb') as fin : 
		track = track_from_file(fin,'mp3')
	pickle.dump(track,open('track.p','wb'))
else : 
	track = pickle.load(open('track.p','rb'))

t = [seg['start'] for seg in track.segments]
loud = [seg['loudness_max'] for seg in track.segments]

pl.plot(t,loud)
#pl.xticks(t,np.arange(len(t)))
pl.xlabel("Time Segment")
pl.ylabel("Loudness")
pl.show()
config.ECHO_NEST_API_KEY="5TSYCVEZEIQ9R3HEO"
#os.environ['ECHONEST_API_KEY'] = config.ECHO_NEST_API_KEY

# the goal is to get the feature information for the learn subbase of rwc audio samples
input_dir = '/home/manu/workspace/databases/genres/jazz/'
output_dir = '/home/manu/workspace/databases/genres/jazz/hdf5/'

# Single file is working, now loop on all files from the learning directory17
#from pyechonest import track
for audiofile in features.get_filepaths(input_dir, ext='.au'):
    print "Starting work on ", audiofile    
    output = output_dir + os.path.splitext(os.path.split(audiofile)[-1])[0] + '.h5'
    if os.path.exists(output):
        continue
    file_object = open(audiofile)
    curtrack = track.track_from_file(file_object, 'au', force_upload=True)
#
    HDF5.create_song_file(output,force=False)
    h5 = HDF5.open_h5_file_append(output)
    # HACK we need to fill missing values
    curtrack.__setattr__('foreign_id','')
    curtrack.__setattr__('foreign_release_id','')
    curtrack.__setattr__('audio_md5','')
    HDF5.fill_hdf5_from_track(h5,curtrack)
    h5.close()
    del h5
    
# first testing on a single song
#audiofile = input_dir + 'rwc-g-m01_1.wav'
#output = output_dir + 'rwc-g-m01_1.h5'